Merge git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git
authorKalle Valo <kvalo@codeaurora.org>
Tue, 4 Aug 2020 11:02:54 +0000 (14:02 +0300)
committerKalle Valo <kvalo@codeaurora.org>
Tue, 4 Aug 2020 11:02:54 +0000 (14:02 +0300)
mt76 driver had major conflicts within mt7615 directory. To make it easier for
every merge wireless-drivers to wireless-drivers-next and solve those
conflicts.

3142 files changed:
.gitignore
.mailmap
Documentation/ABI/testing/dev-kmsg
Documentation/ABI/testing/sysfs-bus-papr-pmem [new file with mode: 0644]
Documentation/ABI/testing/sysfs-platform-chipidea-usb-otg
Documentation/admin-guide/README.rst
Documentation/admin-guide/cgroup-v2.rst
Documentation/admin-guide/device-mapper/index.rst
Documentation/admin-guide/mm/transhuge.rst
Documentation/arm64/cpu-feature-registers.rst
Documentation/arm64/silicon-errata.rst
Documentation/arm64/sve.rst
Documentation/block/bfq-iosched.rst
Documentation/bpf/btf.rst
Documentation/bpf/prog_cgroup_sockopt.rst
Documentation/core-api/dma-api.rst
Documentation/core-api/pin_user_pages.rst
Documentation/dev-tools/kcsan.rst
Documentation/dev-tools/kunit/faq.rst
Documentation/devicetree/bindings/Makefile
Documentation/devicetree/bindings/arm/freescale/fsl,scu.txt
Documentation/devicetree/bindings/bus/socionext,uniphier-system-bus.yaml
Documentation/devicetree/bindings/clock/imx27-clock.yaml
Documentation/devicetree/bindings/clock/imx31-clock.yaml
Documentation/devicetree/bindings/clock/imx5-clock.yaml
Documentation/devicetree/bindings/display/bridge/sii902x.txt
Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt
Documentation/devicetree/bindings/display/imx/ldb.txt
Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.yaml
Documentation/devicetree/bindings/display/rockchip/rockchip-drm.yaml
Documentation/devicetree/bindings/gpio/mediatek,mt7621-gpio.txt
Documentation/devicetree/bindings/interrupt-controller/csky,mpintc.txt
Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.txt
Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
Documentation/devicetree/bindings/misc/olpc,xo1.75-ec.txt
Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml
Documentation/devicetree/bindings/net/dsa/dsa.txt
Documentation/devicetree/bindings/net/dsa/dsa.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/net/dsa/ocelot.txt
Documentation/devicetree/bindings/net/ethernet-phy.yaml
Documentation/devicetree/bindings/net/mediatek-bluetooth.txt
Documentation/devicetree/bindings/net/mscc-phy-vsc8531.txt
Documentation/devicetree/bindings/net/ti,dp83867.yaml
Documentation/devicetree/bindings/net/ti,dp83869.yaml
Documentation/devicetree/bindings/net/wireless/microchip,wilc1000.yaml [moved from drivers/staging/wilc1000/microchip,wilc1000.yaml with 100% similarity]
Documentation/devicetree/bindings/pinctrl/renesas,pfc-pinctrl.txt
Documentation/devicetree/bindings/sound/audio-graph-card.txt
Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
Documentation/devicetree/bindings/spi/amlogic,meson-gx-spicc.yaml
Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
Documentation/devicetree/bindings/thermal/thermal-sensor.yaml
Documentation/devicetree/bindings/thermal/thermal-zones.yaml
Documentation/devicetree/bindings/thermal/ti,am654-thermal.yaml
Documentation/devicetree/bindings/timer/csky,mptimer.txt
Documentation/devicetree/bindings/usb/aspeed,usb-vhub.yaml
Documentation/devicetree/writing-schema.rst
Documentation/driver-api/driver-model/devres.rst
Documentation/driver-api/media/v4l2-subdev.rst
Documentation/filesystems/dax.txt
Documentation/filesystems/debugfs.rst
Documentation/filesystems/ext4/verity.rst
Documentation/gpu/amdgpu.rst
Documentation/i2c/slave-eeprom-backend.rst
Documentation/i2c/smbus-protocol.rst
Documentation/kbuild/modules.rst
Documentation/kbuild/reproducible-builds.rst
Documentation/mips/ingenic-tcu.rst
Documentation/networking/arcnet.rst
Documentation/networking/ax25.rst
Documentation/networking/batman-adv.rst
Documentation/networking/can_ucan_protocol.rst
Documentation/networking/dccp.rst
Documentation/networking/device_drivers/appletalk/cops.rst [moved from Documentation/networking/cops.rst with 100% similarity]
Documentation/networking/device_drivers/appletalk/index.rst [new file with mode: 0644]
Documentation/networking/device_drivers/appletalk/ltpc.rst [moved from Documentation/networking/ltpc.rst with 100% similarity]
Documentation/networking/device_drivers/atm/cxacru-cf.py [moved from Documentation/networking/cxacru-cf.py with 100% similarity]
Documentation/networking/device_drivers/atm/cxacru.rst [moved from Documentation/networking/cxacru.rst with 100% similarity]
Documentation/networking/device_drivers/atm/fore200e.rst [moved from Documentation/networking/fore200e.rst with 100% similarity]
Documentation/networking/device_drivers/atm/index.rst [new file with mode: 0644]
Documentation/networking/device_drivers/atm/iphase.rst [moved from Documentation/networking/iphase.rst with 100% similarity]
Documentation/networking/device_drivers/cable/index.rst [new file with mode: 0644]
Documentation/networking/device_drivers/cable/sb1000.rst [moved from Documentation/networking/device_drivers/sb1000.rst with 100% similarity]
Documentation/networking/device_drivers/cellular/index.rst [new file with mode: 0644]
Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst [moved from Documentation/networking/device_drivers/qualcomm/rmnet.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/3com/3c509.rst [moved from Documentation/networking/device_drivers/3com/3c509.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/3com/vortex.rst [moved from Documentation/networking/device_drivers/3com/vortex.rst with 99% similarity]
Documentation/networking/device_drivers/ethernet/altera/altera_tse.rst [moved from Documentation/networking/altera_tse.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/amazon/ena.rst [moved from Documentation/networking/device_drivers/amazon/ena.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/aquantia/atlantic.rst [moved from Documentation/networking/device_drivers/aquantia/atlantic.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/chelsio/cxgb.rst [moved from Documentation/networking/device_drivers/chelsio/cxgb.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/cirrus/cs89x0.rst [moved from Documentation/networking/device_drivers/cirrus/cs89x0.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/davicom/dm9000.rst [moved from Documentation/networking/device_drivers/davicom/dm9000.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/dec/de4x5.rst [moved from Documentation/networking/device_drivers/dec/de4x5.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/dec/dmfe.rst [moved from Documentation/networking/device_drivers/dec/dmfe.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/dlink/dl2k.rst [moved from Documentation/networking/device_drivers/dlink/dl2k.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/freescale/dpaa.rst [moved from Documentation/networking/device_drivers/freescale/dpaa.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/freescale/dpaa2/dpio-driver.rst [moved from Documentation/networking/device_drivers/freescale/dpaa2/dpio-driver.rst with 97% similarity]
Documentation/networking/device_drivers/ethernet/freescale/dpaa2/ethernet-driver.rst [moved from Documentation/networking/device_drivers/freescale/dpaa2/ethernet-driver.rst with 98% similarity]
Documentation/networking/device_drivers/ethernet/freescale/dpaa2/index.rst [moved from Documentation/networking/device_drivers/freescale/dpaa2/index.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/freescale/dpaa2/mac-phy-support.rst [moved from Documentation/networking/device_drivers/freescale/dpaa2/mac-phy-support.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst [moved from Documentation/networking/device_drivers/freescale/dpaa2/overview.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/freescale/gianfar.rst [moved from Documentation/networking/device_drivers/freescale/gianfar.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/google/gve.rst [moved from Documentation/networking/device_drivers/google/gve.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/huawei/hinic.rst [moved from Documentation/networking/hinic.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/index.rst [new file with mode: 0644]
Documentation/networking/device_drivers/ethernet/intel/e100.rst [moved from Documentation/networking/device_drivers/intel/e100.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/intel/e1000.rst [moved from Documentation/networking/device_drivers/intel/e1000.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/intel/e1000e.rst [moved from Documentation/networking/device_drivers/intel/e1000e.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/intel/fm10k.rst [moved from Documentation/networking/device_drivers/intel/fm10k.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/intel/i40e.rst [moved from Documentation/networking/device_drivers/intel/i40e.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/intel/iavf.rst [moved from Documentation/networking/device_drivers/intel/iavf.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/intel/ice.rst [moved from Documentation/networking/device_drivers/intel/ice.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/intel/igb.rst [moved from Documentation/networking/device_drivers/intel/igb.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/intel/igbvf.rst [moved from Documentation/networking/device_drivers/intel/igbvf.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/intel/ixgb.rst [moved from Documentation/networking/device_drivers/intel/ixgb.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/intel/ixgbe.rst [moved from Documentation/networking/device_drivers/intel/ixgbe.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/intel/ixgbevf.rst [moved from Documentation/networking/device_drivers/intel/ixgbevf.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst [moved from Documentation/networking/device_drivers/marvell/octeontx2.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/mellanox/mlx5.rst [moved from Documentation/networking/device_drivers/mellanox/mlx5.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/microsoft/netvsc.rst [moved from Documentation/networking/device_drivers/microsoft/netvsc.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/neterion/s2io.rst [moved from Documentation/networking/device_drivers/neterion/s2io.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/neterion/vxge.rst [moved from Documentation/networking/device_drivers/neterion/vxge.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/netronome/nfp.rst [moved from Documentation/networking/device_drivers/netronome/nfp.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/pensando/ionic.rst [moved from Documentation/networking/device_drivers/pensando/ionic.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/smsc/smc9.rst [moved from Documentation/networking/device_drivers/smsc/smc9.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/stmicro/stmmac.rst [moved from Documentation/networking/device_drivers/stmicro/stmmac.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/ti/cpsw.rst [moved from Documentation/networking/device_drivers/ti/cpsw.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/ti/cpsw_switchdev.rst [moved from Documentation/networking/device_drivers/ti/cpsw_switchdev.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/ti/tlan.rst [moved from Documentation/networking/device_drivers/ti/tlan.rst with 100% similarity]
Documentation/networking/device_drivers/ethernet/toshiba/spider_net.rst [moved from Documentation/networking/device_drivers/toshiba/spider_net.rst with 100% similarity]
Documentation/networking/device_drivers/fddi/defza.rst [moved from Documentation/networking/defza.rst with 100% similarity]
Documentation/networking/device_drivers/fddi/index.rst [new file with mode: 0644]
Documentation/networking/device_drivers/fddi/skfp.rst [moved from Documentation/networking/skfp.rst with 100% similarity]
Documentation/networking/device_drivers/hamradio/baycom.rst [moved from Documentation/networking/baycom.rst with 100% similarity]
Documentation/networking/device_drivers/hamradio/index.rst [new file with mode: 0644]
Documentation/networking/device_drivers/hamradio/z8530drv.rst [moved from Documentation/networking/z8530drv.rst with 100% similarity]
Documentation/networking/device_drivers/index.rst
Documentation/networking/device_drivers/wan/index.rst [new file with mode: 0644]
Documentation/networking/device_drivers/wan/z8530book.rst [moved from Documentation/networking/z8530book.rst with 100% similarity]
Documentation/networking/device_drivers/wifi/index.rst [new file with mode: 0644]
Documentation/networking/device_drivers/wifi/intel/ipw2100.rst [moved from Documentation/networking/device_drivers/intel/ipw2100.rst with 99% similarity]
Documentation/networking/device_drivers/wifi/intel/ipw2200.rst [moved from Documentation/networking/device_drivers/intel/ipw2200.rst with 100% similarity]
Documentation/networking/device_drivers/wifi/ray_cs.rst [moved from Documentation/networking/ray_cs.rst with 100% similarity]
Documentation/networking/devlink/devlink-info.rst
Documentation/networking/devlink/ice.rst
Documentation/networking/dsa/dsa.rst
Documentation/networking/ethtool-netlink.rst
Documentation/networking/ieee802154.rst
Documentation/networking/index.rst
Documentation/networking/ip-sysctl.rst
Documentation/networking/ipvs-sysctl.rst
Documentation/networking/rxrpc.rst
Documentation/networking/timestamping.rst
Documentation/networking/tls-offload.rst
Documentation/powerpc/papr_hcalls.rst
Documentation/process/changes.rst
Documentation/process/coding-style.rst
Documentation/sh/index.rst
Documentation/userspace-api/media/conf_nitpick.py
Documentation/virt/kvm/api.rst
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/include/asm/elf.h
arch/arc/include/asm/irqflags-compact.h
arch/arc/kernel/entry.S
arch/arc/kernel/head.S
arch/arc/kernel/setup.c
arch/arm/boot/dts/am335x-baltos.dtsi
arch/arm/boot/dts/am335x-boneblack-common.dtsi
arch/arm/boot/dts/am335x-boneblack-wireless.dts
arch/arm/boot/dts/am335x-boneblue.dts
arch/arm/boot/dts/am335x-bonegreen-wireless.dts
arch/arm/boot/dts/am335x-evm.dts
arch/arm/boot/dts/am335x-evmsk.dts
arch/arm/boot/dts/am335x-lxm.dts
arch/arm/boot/dts/am335x-moxa-uc-2100-common.dtsi
arch/arm/boot/dts/am335x-moxa-uc-8100-me-t.dts
arch/arm/boot/dts/am335x-pepper.dts
arch/arm/boot/dts/am335x-phycore-som.dtsi
arch/arm/boot/dts/am335x-pocketbeagle.dts
arch/arm/boot/dts/am33xx-l4.dtsi
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/am4372.dtsi
arch/arm/boot/dts/am437x-cm-t43.dts
arch/arm/boot/dts/am437x-gp-evm.dts
arch/arm/boot/dts/am437x-l4.dtsi
arch/arm/boot/dts/am437x-sk-evm.dts
arch/arm/boot/dts/am43x-epos-evm.dts
arch/arm/boot/dts/am5729-beagleboneai.dts
arch/arm/boot/dts/bcm-nsp.dtsi
arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
arch/arm/boot/dts/bcm958522er.dts
arch/arm/boot/dts/bcm958525er.dts
arch/arm/boot/dts/bcm958525xmc.dts
arch/arm/boot/dts/bcm958622hr.dts
arch/arm/boot/dts/bcm958623hr.dts
arch/arm/boot/dts/bcm958625hr.dts
arch/arm/boot/dts/bcm958625k.dts
arch/arm/boot/dts/dra7-evm-common.dtsi
arch/arm/boot/dts/dra7-l4.dtsi
arch/arm/boot/dts/imx6ul-kontron-n6x1x-s.dtsi
arch/arm/boot/dts/imx6ul-kontron-n6x1x-som-common.dtsi
arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
arch/arm/boot/dts/omap4-duovero-parlor.dts
arch/arm/boot/dts/omap4.dtsi
arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
arch/arm/include/asm/efi.h
arch/arm/kernel/asm-offsets.c
arch/arm/kernel/ftrace.c
arch/arm/kernel/kgdb.c
arch/arm/kernel/traps.c
arch/arm/mach-bcm/Kconfig
arch/arm/mach-imx/pm-imx5.c
arch/arm/mach-imx/pm-imx6.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-sti/board-dt.c
arch/arm/mm/alignment.c
arch/arm/xen/enlighten.c
arch/arm64/Kconfig
arch/arm64/Kconfig.debug
arch/arm64/Makefile
arch/arm64/boot/dts/freescale/imx8mm-beacon-som.dtsi
arch/arm64/boot/dts/freescale/imx8mm-evk.dts
arch/arm64/boot/dts/freescale/imx8mn-ddr4-evk.dts
arch/arm64/include/asm/alternative.h
arch/arm64/include/asm/arch_gicv3.h
arch/arm64/include/asm/arch_timer.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/linkage.h
arch/arm64/include/asm/mmu.h
arch/arm64/include/asm/pgtable-prot.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/unistd32.h
arch/arm64/include/asm/vdso/clocksource.h
arch/arm64/include/asm/vdso/compat_gettimeofday.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/alternative.c
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/entry-common.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/hw_breakpoint.c
arch/arm64/kernel/insn.c
arch/arm64/kernel/kgdb.c
arch/arm64/kernel/machine_kexec_file.c
arch/arm64/kernel/perf_regs.c
arch/arm64/kernel/probes/kprobes.c
arch/arm64/kernel/signal32.c
arch/arm64/kernel/traps.c
arch/arm64/kernel/vdso.c
arch/arm64/kernel/vdso/Makefile
arch/arm64/kernel/vdso/sigreturn.S
arch/arm64/kernel/vdso32/Makefile
arch/arm64/kernel/vdso32/sigreturn.S [deleted file]
arch/arm64/kernel/vdso32/vdso.lds.S
arch/arm64/kernel/vmlinux.lds.S
arch/arm64/kvm/hyp-init.S
arch/arm64/kvm/pmu.c
arch/arm64/kvm/pvtime.c
arch/arm64/kvm/reset.c
arch/arm64/kvm/vgic/vgic-v4.c
arch/arm64/mm/init.c
arch/arm64/mm/mmu.c
arch/c6x/lib/checksum.c
arch/c6x/lib/csum_64plus.S
arch/csky/kernel/ftrace.c
arch/ia64/include/asm/sections.h
arch/ia64/kernel/ftrace.c
arch/ia64/kernel/unwind_i.h
arch/m68k/kernel/setup_no.c
arch/m68k/mm/mcfmmu.c
arch/mips/boot/dts/ingenic/gcw0.dts
arch/mips/boot/dts/mscc/ocelot_pcb120.dts
arch/mips/include/asm/unroll.h
arch/mips/kernel/kprobes.c
arch/mips/kernel/syscalls/syscall_n32.tbl
arch/mips/kernel/syscalls/syscall_o32.tbl
arch/mips/kernel/traps.c
arch/mips/kvm/emulate.c
arch/mips/kvm/mips.c
arch/mips/lantiq/xway/sysctrl.c
arch/nds32/kernel/ftrace.c
arch/nios2/include/asm/checksum.h
arch/openrisc/kernel/dma.c
arch/parisc/kernel/ftrace.c
arch/parisc/kernel/kgdb.c
arch/parisc/kernel/process.c
arch/parisc/kernel/syscalls/syscall.tbl
arch/parisc/lib/memcpy.c
arch/powerpc/include/asm/nohash/32/pgtable.h
arch/powerpc/include/asm/sections.h
arch/powerpc/include/uapi/asm/papr_pdsm.h [new file with mode: 0644]
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/kgdb.c
arch/powerpc/kernel/kprobes.c
arch/powerpc/kernel/module_64.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/syscalls/syscall.tbl
arch/powerpc/kernel/trace/ftrace.c
arch/powerpc/kvm/book3s_64_mmu_radix.c
arch/powerpc/lib/inst.c
arch/powerpc/mm/book3s64/pkeys.c
arch/powerpc/mm/nohash/kaslr_booke.c
arch/powerpc/oprofile/backtrace.c
arch/powerpc/perf/callchain_32.c
arch/powerpc/perf/callchain_64.c
arch/powerpc/perf/core-book3s.c
arch/powerpc/platforms/cell/spu_callbacks.c
arch/powerpc/platforms/pseries/papr_scm.c
arch/powerpc/sysdev/fsl_pci.c
arch/riscv/include/asm/cmpxchg.h
arch/riscv/kernel/ftrace.c
arch/riscv/kernel/kgdb.c
arch/riscv/kernel/patch.c
arch/riscv/kernel/sys_riscv.c
arch/riscv/kernel/traps.c
arch/riscv/kernel/vdso/Makefile
arch/riscv/kernel/vdso/vgettimeofday.c
arch/riscv/mm/pageattr.c
arch/riscv/net/bpf_jit.h
arch/riscv/net/bpf_jit_comp32.c
arch/riscv/net/bpf_jit_comp64.c
arch/riscv/net/bpf_jit_core.c
arch/s390/Kconfig
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/crypto/prng.c
arch/s390/include/asm/kvm_host.h
arch/s390/include/asm/syscall.h
arch/s390/include/asm/vdso.h
arch/s390/kernel/asm-offsets.c
arch/s390/kernel/debug.c
arch/s390/kernel/early.c
arch/s390/kernel/entry.S
arch/s390/kernel/ftrace.c
arch/s390/kernel/ipl.c
arch/s390/kernel/perf_cpum_sf.c
arch/s390/kernel/ptrace.c
arch/s390/kernel/setup.c
arch/s390/kernel/syscalls/syscall.tbl
arch/s390/kernel/time.c
arch/s390/kernel/uv.c
arch/s390/kernel/vdso64/Makefile
arch/s390/kernel/vdso64/clock_getres.S
arch/s390/mm/fault.c
arch/s390/mm/hugetlbpage.c
arch/s390/mm/maccess.c
arch/s390/net/bpf_jit_comp.c
arch/s390/pci/pci_event.c
arch/sh/kernel/ftrace.c
arch/sh/kernel/traps.c
arch/sparc/kernel/sys32.S
arch/sparc/kernel/syscalls/syscall.tbl
arch/um/kernel/maccess.c
arch/x86/Kconfig
arch/x86/boot/compressed/head_64.S
arch/x86/entry/common.c
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64_compat.S
arch/x86/entry/syscall_x32.c
arch/x86/entry/syscalls/syscall_32.tbl
arch/x86/entry/syscalls/syscall_64.tbl
arch/x86/events/Makefile
arch/x86/hyperv/hv_init.c
arch/x86/include/asm/bitops.h
arch/x86/include/asm/bug.h
arch/x86/include/asm/cpu.h
arch/x86/include/asm/cpumask.h
arch/x86/include/asm/fpu/internal.h
arch/x86/include/asm/idtentry.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/mwait.h
arch/x86/include/asm/pgtable_types.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/ptrace.h
arch/x86/include/uapi/asm/kvm.h
arch/x86/kernel/cpu/centaur.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cpu.h
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/mce/core.c
arch/x86/kernel/cpu/resctrl/core.c
arch/x86/kernel/cpu/resctrl/internal.h
arch/x86/kernel/cpu/resctrl/rdtgroup.c
arch/x86/kernel/cpu/umwait.c
arch/x86/kernel/cpu/zhaoxin.c
arch/x86/kernel/dumpstack.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/ftrace.c
arch/x86/kernel/kgdb.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/kprobes/opt.c
arch/x86/kernel/ldt.c
arch/x86/kernel/nmi.c
arch/x86/kernel/probe_roms.c
arch/x86/kernel/traps.c
arch/x86/kvm/kvm_cache_regs.h
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmcs.h
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c
arch/x86/lib/memcpy_64.S
arch/x86/lib/usercopy_64.c
arch/x86/mm/fault.c
arch/x86/mm/init_32.c
arch/x86/mm/maccess.c
arch/x86/pci/pcbios.c
arch/x86/platform/intel-mid/sfi.c
arch/x86/power/cpu.c
arch/x86/purgatory/Makefile
arch/x86/xen/enlighten_pv.c
arch/x86/xen/xen-asm_64.S
block/bio-integrity.c
block/blk-mq-debugfs.c
block/blk-mq-tag.c
block/blk-mq.c
block/keyslot-manager.c
block/partitions/ldm.c
block/partitions/ldm.h
crypto/af_alg.c
crypto/algboss.c
crypto/algif_aead.c
crypto/algif_hash.c
crypto/algif_rng.c
crypto/algif_skcipher.c
crypto/drbg.c
drivers/acpi/acpi_configfs.c
drivers/acpi/dptf/dptf_power.c
drivers/acpi/fan.c
drivers/acpi/sysfs.c
drivers/amba/tegra-ahb.c
drivers/android/binder.c
drivers/ata/libata-core.c
drivers/ata/libata-scsi.c
drivers/ata/sata_rcar.c
drivers/atm/Kconfig
drivers/atm/eni.c
drivers/atm/firestream.c
drivers/atm/fore200e.c
drivers/atm/horizon.c
drivers/atm/iphase.c
drivers/atm/lanai.c
drivers/atm/solos-pci.c
drivers/atm/zatm.c
drivers/base/power/trace.c
drivers/base/regmap/regmap.c
drivers/bcma/driver_gpio.c
drivers/bcma/scan.c
drivers/block/drbd/drbd_int.h
drivers/block/drbd/drbd_protocol.h
drivers/block/loop.c
drivers/block/nbd.c
drivers/block/rbd.c
drivers/block/virtio_blk.c
drivers/bus/ti-sysc.c
drivers/char/hw_random/ks-sa-rng.c
drivers/char/mem.c
drivers/char/tpm/st33zp24/i2c.c
drivers/char/tpm/st33zp24/spi.c
drivers/char/tpm/st33zp24/st33zp24.c
drivers/char/tpm/tpm-dev-common.c
drivers/char/tpm/tpm_ibmvtpm.c
drivers/char/tpm/tpm_tis.c
drivers/char/tpm/tpm_tis_core.c
drivers/char/tpm/tpm_tis_spi_main.c
drivers/clk/sifive/fu540-prci.c
drivers/clocksource/arm_arch_timer.c
drivers/cpufreq/intel_pstate.c
drivers/cpuidle/cpuidle.c
drivers/crypto/caam/Kconfig
drivers/crypto/caam/ctrl.c
drivers/crypto/caam/desc.h
drivers/crypto/caam/pdb.h
drivers/crypto/ccp/sev-dev.c
drivers/crypto/chelsio/chcr_crypto.h
drivers/crypto/chelsio/chtls/chtls_cm.c
drivers/crypto/chelsio/chtls/chtls_main.c
drivers/crypto/hisilicon/sgl.c
drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
drivers/dio/dio.c
drivers/dma-buf/dma-buf.c
drivers/dma/milbeaut-hdmac.c
drivers/dma/milbeaut-xdmac.c
drivers/dma/moxart-dma.c
drivers/dma/tegra20-apb-dma.c
drivers/dma/ti/edma.c
drivers/dma/ti/k3-udma.c
drivers/dma/timb_dma.c
drivers/edac/amd64_edac.c
drivers/firewire/core-cdev.c
drivers/firewire/core-transaction.c
drivers/firewire/core.h
drivers/firewire/nosy.c
drivers/firewire/ohci.c
drivers/firmware/dmi-sysfs.c
drivers/firmware/efi/Kconfig
drivers/firmware/efi/arm-init.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/esrt.c
drivers/firmware/efi/libstub/Makefile
drivers/firmware/efi/libstub/arm32-stub.c
drivers/firmware/efi/libstub/efi-stub-helper.c
drivers/firmware/efi/libstub/efi-stub.c
drivers/firmware/efi/libstub/efistub.h
drivers/firmware/efi/libstub/file.c
drivers/firmware/efi/libstub/skip_spaces.c
drivers/firmware/google/memconsole-coreboot.c
drivers/firmware/google/vpd.c
drivers/firmware/iscsi_ibft.c
drivers/firmware/pcdp.h
drivers/firmware/psci/psci_checker.c
drivers/firmware/raspberrypi.c
drivers/fpga/Kconfig
drivers/gpio/gpio-arizona.c
drivers/gpio/gpio-pca953x.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/dsc/Makefile
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
drivers/gpu/drm/amd/display/modules/color/color_gamma.c
drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
drivers/gpu/drm/drm_encoder_slave.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_panel_orientation_quirks.c
drivers/gpu/drm/exynos/exynos_drm_dma.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_mic.c
drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
drivers/gpu/drm/i915/display/intel_ddi.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_display.h
drivers/gpu/drm/i915/display/intel_dp_mst.c
drivers/gpu/drm/i915/display/intel_fbc.c
drivers/gpu/drm/i915/gt/intel_context.c
drivers/gpu/drm/i915/gt/intel_engine_cs.c
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/gt/intel_ring.c
drivers/gpu/drm/i915/gt/intel_workarounds.c
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
drivers/gpu/drm/i915/gt/selftest_lrc.c
drivers/gpu/drm/i915/gt/selftest_mocs.c
drivers/gpu/drm/i915/gt/selftest_ring.c [new file with mode: 0644]
drivers/gpu/drm/i915/gt/selftest_rps.c
drivers/gpu/drm/i915/gt/selftest_timeline.c
drivers/gpu/drm/i915/gt/selftest_workarounds.c
drivers/gpu/drm/i915/gt/shaders/README [new file with mode: 0644]
drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm [new file with mode: 0644]
drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm [new file with mode: 0644]
drivers/gpu/drm/i915/gvt/debugfs.c
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/mmio_context.h
drivers/gpu/drm/i915/gvt/reg.h
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_pmu.c
drivers/gpu/drm/i915/i915_priolist_types.h
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
drivers/gpu/drm/mcde/mcde_display.c
drivers/gpu/drm/mcde/mcde_drv.c
drivers/gpu/drm/mediatek/Kconfig
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/gpu/drm/mediatek/mtk_drm_plane.c
drivers/gpu/drm/mediatek/mtk_dsi.c
drivers/gpu/drm/mediatek/mtk_hdmi.c
drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
drivers/gpu/drm/meson/meson_registers.h
drivers/gpu/drm/meson/meson_viu.c
drivers/gpu/drm/msm/adreno/a2xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/msm_submitqueue.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/nouveau_dmem.c
drivers/gpu/drm/nouveau/nouveau_svm.c
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/radeon/ci_dpm.c
drivers/gpu/drm/radeon/ni_dpm.c
drivers/gpu/drm/rcar-du/Kconfig
drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
drivers/gpu/drm/sun4i/sun8i_mixer.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/hub.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/host1x/bus.c
drivers/gpu/host1x/dev.c
drivers/hv/vmbus_drv.c
drivers/hwmon/acpi_power_meter.c
drivers/hwmon/bt1-pvt.c
drivers/hwmon/max6697.c
drivers/hwmon/pmbus/Kconfig
drivers/hwmon/pmbus/pmbus_core.c
drivers/hwtracing/stm/policy.c
drivers/hwtracing/stm/stm.h
drivers/i2c/Kconfig
drivers/i2c/algos/i2c-algo-pca.c
drivers/i2c/busses/i2c-designware-common.c
drivers/i2c/busses/i2c-designware-core.h
drivers/i2c/busses/i2c-designware-pcidrv.c
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-eg20t.c
drivers/i2c/busses/i2c-fsi.c
drivers/i2c/busses/i2c-mlxcpld.c
drivers/i2c/i2c-core-base.c
drivers/i2c/i2c-core-smbus.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/counters.c
drivers/infiniband/core/mad.c
drivers/infiniband/core/rdma_core.c
drivers/infiniband/core/sa_query.c
drivers/infiniband/hw/efa/efa_verbs.c
drivers/infiniband/hw/hfi1/debugfs.c
drivers/infiniband/hw/hfi1/init.c
drivers/infiniband/hw/hfi1/iowait.h
drivers/infiniband/hw/hfi1/ipoib.h
drivers/infiniband/hw/hfi1/ipoib_tx.c
drivers/infiniband/hw/hfi1/netdev_rx.c
drivers/infiniband/hw/hfi1/qp.c
drivers/infiniband/hw/hfi1/tid_rdma.c
drivers/infiniband/hw/hfi1/verbs_txreq.h
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_hw_v1.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/hns/hns_roce_mr.c
drivers/infiniband/hw/i40iw/Makefile
drivers/infiniband/hw/i40iw/i40iw.h
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/mlx5/qpc.c
drivers/infiniband/hw/qedr/main.c
drivers/infiniband/hw/qedr/qedr_iw_cm.c
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/sw/rdmavt/qp.c
drivers/infiniband/sw/siw/siw_main.c
drivers/infiniband/sw/siw/siw_qp_rx.c
drivers/input/serio/hp_sdc.c
drivers/iommu/Kconfig
drivers/iommu/intel/dmar.c
drivers/iommu/intel/iommu.c
drivers/irqchip/Kconfig
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic.c
drivers/irqchip/irq-riscv-intc.c
drivers/isdn/capi/Kconfig
drivers/isdn/hardware/mISDN/hfcpci.c
drivers/isdn/hardware/mISDN/hfcsusb.c
drivers/isdn/hardware/mISDN/netjet.c
drivers/isdn/mISDN/socket.c
drivers/md/bcache/btree.c
drivers/md/bcache/super.c
drivers/md/dm-ioctl.c
drivers/md/dm-rq.c
drivers/md/dm-writecache.c
drivers/md/dm-zoned-metadata.c
drivers/md/dm-zoned-reclaim.c
drivers/md/dm-zoned-target.c
drivers/md/dm.c
drivers/media/platform/omap3isp/isp.c
drivers/media/platform/omap3isp/ispvideo.c
drivers/media/usb/pwc/pwc.h
drivers/message/fusion/mptbase.c
drivers/message/fusion/mptscsih.c
drivers/mfd/mt6360-core.c
drivers/misc/habanalabs/command_submission.c
drivers/misc/habanalabs/debugfs.c
drivers/misc/habanalabs/gaudi/gaudi.c
drivers/misc/habanalabs/gaudi/gaudiP.h
drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
drivers/misc/kgdbts.c
drivers/misc/mei/hw-me-regs.h
drivers/misc/mei/hw-me.c
drivers/misc/mei/hw-me.h
drivers/misc/mei/pci-me.c
drivers/mmc/host/meson-gx-mmc.c
drivers/mmc/host/owl-mmc.c
drivers/mmc/host/sdhci-msm.c
drivers/mtd/mtdcore.c
drivers/mtd/nand/raw/nandsim.c
drivers/mtd/nand/raw/xway_nand.c
drivers/net/Kconfig
drivers/net/appletalk/Kconfig
drivers/net/bareudp.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_options.c
drivers/net/caif/caif_hsi.c
drivers/net/caif/caif_serial.c
drivers/net/caif/caif_spi.c
drivers/net/caif/caif_virtio.c
drivers/net/can/peak_canfd/peak_pciefd_main.c
drivers/net/dsa/Kconfig
drivers/net/dsa/b53/b53_common.c
drivers/net/dsa/b53/b53_spi.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/bcm_sf2_cfp.c
drivers/net/dsa/dsa_loop.c
drivers/net/dsa/lan9303-core.c
drivers/net/dsa/microchip/ksz8795.c
drivers/net/dsa/microchip/ksz9477.c
drivers/net/dsa/microchip/ksz9477_i2c.c
drivers/net/dsa/microchip/ksz_common.c
drivers/net/dsa/microchip/ksz_common.h
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/mv88e6xxx/chip.h
drivers/net/dsa/mv88e6xxx/global1.c
drivers/net/dsa/mv88e6xxx/global1.h
drivers/net/dsa/mv88e6xxx/global2.c
drivers/net/dsa/mv88e6xxx/global2_scratch.c
drivers/net/dsa/ocelot/Kconfig
drivers/net/dsa/ocelot/Makefile
drivers/net/dsa/ocelot/felix.c
drivers/net/dsa/ocelot/felix.h
drivers/net/dsa/ocelot/felix_vsc9959.c
drivers/net/dsa/ocelot/seville_vsc9953.c [new file with mode: 0644]
drivers/net/dsa/qca/ar9331.c
drivers/net/dsa/qca8k.c
drivers/net/dsa/qca8k.h
drivers/net/dsa/rtl8366.c
drivers/net/dsa/rtl8366rb.c
drivers/net/dsa/sja1105/sja1105.h
drivers/net/dsa/sja1105/sja1105_dynamic_config.c
drivers/net/dsa/sja1105/sja1105_dynamic_config.h
drivers/net/dsa/sja1105/sja1105_flower.c
drivers/net/dsa/sja1105/sja1105_main.c
drivers/net/dsa/sja1105/sja1105_ptp.c
drivers/net/dsa/sja1105/sja1105_spi.c
drivers/net/dsa/sja1105/sja1105_static_config.c
drivers/net/dsa/sja1105/sja1105_static_config.h
drivers/net/dsa/sja1105/sja1105_tas.c
drivers/net/dsa/sja1105/sja1105_vl.c
drivers/net/dsa/vitesse-vsc73xx-platform.c
drivers/net/dsa/vitesse-vsc73xx-spi.c
drivers/net/ethernet/3com/3c59x.c
drivers/net/ethernet/3com/Kconfig
drivers/net/ethernet/3com/typhoon.c
drivers/net/ethernet/8390/8390.h
drivers/net/ethernet/8390/ne2k-pci.c
drivers/net/ethernet/adaptec/starfire.c
drivers/net/ethernet/aeroflex/greth.c
drivers/net/ethernet/agere/et131x.c
drivers/net/ethernet/alteon/acenic.c
drivers/net/ethernet/amazon/ena/ena_admin_defs.h
drivers/net/ethernet/amazon/ena/ena_com.c
drivers/net/ethernet/amazon/ena/ena_com.h
drivers/net/ethernet/amazon/ena/ena_eth_com.c
drivers/net/ethernet/amazon/ena/ena_eth_com.h
drivers/net/ethernet/amazon/ena/ena_ethtool.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/amazon/ena/ena_netdev.h
drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h
drivers/net/ethernet/amd/amd8111e.c
drivers/net/ethernet/amd/au1000_eth.c
drivers/net/ethernet/amd/pcnet32.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/amd/xgbe/xgbe-main.c
drivers/net/ethernet/amd/xgbe/xgbe-pci.c
drivers/net/ethernet/amd/xgbe/xgbe.h
drivers/net/ethernet/aquantia/atlantic/aq_common.h
drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h
drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
drivers/net/ethernet/aquantia/atlantic/aq_hw.h
drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
drivers/net/ethernet/aquantia/atlantic/aq_main.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.h
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
drivers/net/ethernet/aquantia/atlantic/aq_ptp.h
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
drivers/net/ethernet/aquantia/atlantic/aq_ring.h
drivers/net/ethernet/aquantia/atlantic/aq_vec.c
drivers/net/ethernet/aquantia/atlantic/aq_vec.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c
drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
drivers/net/ethernet/arc/emac_main.c
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/aurora/nb8800.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnx2x/Makefile
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_self_test.c [new file with mode: 0644]
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/broadcom/tg3.h
drivers/net/ethernet/brocade/bna/bfa_ioc.c
drivers/net/ethernet/brocade/bna/bfi.h
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/cadence/macb_pci.c
drivers/net/ethernet/cadence/macb_ptp.c
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
drivers/net/ethernet/cavium/liquidio/octeon_network.h
drivers/net/ethernet/cavium/liquidio/request_manager.c
drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/chelsio/Kconfig
drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
drivers/net/ethernet/chelsio/cxgb4/l2t.c
drivers/net/ethernet/chelsio/cxgb4/sched.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/smt.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
drivers/net/ethernet/cirrus/Kconfig
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/cortina/Kconfig
drivers/net/ethernet/dec/tulip/Kconfig
drivers/net/ethernet/dec/tulip/de2104x.c
drivers/net/ethernet/dec/tulip/dmfe.c
drivers/net/ethernet/dec/tulip/tulip_core.c
drivers/net/ethernet/dec/tulip/uli526x.c
drivers/net/ethernet/dec/tulip/winbond-840.c
drivers/net/ethernet/dlink/dl2k.c
drivers/net/ethernet/dlink/sundance.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/fealnx.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
drivers/net/ethernet/freescale/dpaa2/dpni.c
drivers/net/ethernet/freescale/dpaa2/dpni.h
drivers/net/ethernet/freescale/enetc/Kconfig
drivers/net/ethernet/freescale/enetc/enetc.c
drivers/net/ethernet/freescale/enetc/enetc.h
drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
drivers/net/ethernet/freescale/enetc/enetc_hw.h
drivers/net/ethernet/freescale/enetc/enetc_pf.c
drivers/net/ethernet/freescale/enetc/enetc_pf.h
drivers/net/ethernet/freescale/enetc/enetc_qos.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fec_ptp.c
drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
drivers/net/ethernet/freescale/xgmac_mdio.c
drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/huawei/hinic/Makefile
drivers/net/ethernet/huawei/hinic/hinic_dev.h
drivers/net/ethernet/huawei/hinic/hinic_devlink.c [new file with mode: 0644]
drivers/net/ethernet/huawei/hinic/hinic_devlink.h [new file with mode: 0644]
drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
drivers/net/ethernet/huawei/hinic/hinic_main.c
drivers/net/ethernet/huawei/hinic/hinic_port.c
drivers/net/ethernet/huawei/hinic/hinic_port.h
drivers/net/ethernet/huawei/hinic/hinic_rx.c
drivers/net/ethernet/huawei/hinic/hinic_sriov.c
drivers/net/ethernet/huawei/hinic/hinic_tx.c
drivers/net/ethernet/huawei/hinic/hinic_tx.h
drivers/net/ethernet/ibm/ibmveth.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/Kconfig
drivers/net/ethernet/intel/e100.c
drivers/net/ethernet/intel/e1000/e1000.h
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
drivers/net/ethernet/intel/e1000/e1000_hw.c
drivers/net/ethernet/intel/e1000/e1000_main.c
drivers/net/ethernet/intel/e1000/e1000_param.c
drivers/net/ethernet/intel/e1000e/82571.c
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/e1000e/param.c
drivers/net/ethernet/intel/e1000e/phy.c
drivers/net/ethernet/intel/e1000e/ptp.c
drivers/net/ethernet/intel/fm10k/fm10k.h
drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
drivers/net/ethernet/intel/fm10k/fm10k_main.c
drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
drivers/net/ethernet/intel/fm10k/fm10k_pf.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_adminq.c
drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40e/i40e_client.c
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_dcb.h
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_devids.h
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_hmc.h
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_osdep.h
drivers/net/ethernet/intel/i40e/i40e_ptp.c
drivers/net/ethernet/intel/i40e/i40e_register.h
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/i40e/i40e_xsk.h
drivers/net/ethernet/intel/iavf/iavf.h
drivers/net/ethernet/intel/iavf/iavf_ethtool.c
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/iavf/iavf_txrx.c
drivers/net/ethernet/intel/iavf/iavf_type.h
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
drivers/net/ethernet/intel/ice/ice_base.c
drivers/net/ethernet/intel/ice/ice_common.c
drivers/net/ethernet/intel/ice/ice_common.h
drivers/net/ethernet/intel/ice/ice_dcb.c
drivers/net/ethernet/intel/ice/ice_dcb.h
drivers/net/ethernet/intel/ice/ice_devlink.c
drivers/net/ethernet/intel/ice/ice_ethtool.c
drivers/net/ethernet/intel/ice/ice_flex_pipe.c
drivers/net/ethernet/intel/ice/ice_flex_type.h
drivers/net/ethernet/intel/ice/ice_hw_autogen.h
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_lib.h
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_nvm.c
drivers/net/ethernet/intel/ice/ice_nvm.h
drivers/net/ethernet/intel/ice/ice_sched.c
drivers/net/ethernet/intel/ice/ice_sched.h
drivers/net/ethernet/intel/ice/ice_switch.c
drivers/net/ethernet/intel/ice/ice_type.h
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/e1000_nvm.c
drivers/net/ethernet/intel/igb/e1000_phy.c
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/igbvf/ethtool.c
drivers/net/ethernet/intel/igbvf/igbvf.h
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/igc/igc.h
drivers/net/ethernet/intel/igc/igc_defines.h
drivers/net/ethernet/intel/igc/igc_ethtool.c
drivers/net/ethernet/intel/igc/igc_hw.h
drivers/net/ethernet/intel/igc/igc_i225.c
drivers/net/ethernet/intel/igc/igc_i225.h
drivers/net/ethernet/intel/igc/igc_mac.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/intel/igc/igc_ptp.c
drivers/net/ethernet/intel/igc/igc_regs.h
drivers/net/ethernet/intel/ixgb/ixgb.h
drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
drivers/net/ethernet/intel/ixgb/ixgb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
drivers/net/ethernet/intel/ixgbevf/ethtool.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/intel/ixgbevf/vf.c
drivers/net/ethernet/jme.c
drivers/net/ethernet/jme.h
drivers/net/ethernet/marvell/Kconfig
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/marvell/octeontx2/af/common.h
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
drivers/net/ethernet/marvell/skge.c
drivers/net/ethernet/marvell/sky2.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_star_emac.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx5/core/Kconfig
drivers/net/ethernet/mellanox/mlx5/core/Makefile
drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.c
drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec.h
drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/accel/tls.c
drivers/net/ethernet/mellanox/mlx5/core/accel/tls.h
drivers/net/ethernet/mellanox/mlx5/core/devlink.c
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.h
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
drivers/net/ethernet/mellanox/mlx5/core/en/health.c
drivers/net/ethernet/mellanox/mlx5/core/en/health.h
drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en/params.h
drivers/net/ethernet/mellanox/mlx5/core/en/port.c
drivers/net/ethernet/mellanox/mlx5/core/en/port.h
drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
drivers/net/ethernet/mellanox/mlx5/core/fw.c
drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.h
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/mellanox/mlx5/core/wq.h
drivers/net/ethernet/mellanox/mlxsw/Makefile
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/core.h
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
drivers/net/ethernet/mellanox/mlxsw/minimal.c
drivers/net/ethernet/mellanox/mlxsw/pci.c
drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/resources.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/spectrum_flow.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_matchall.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
drivers/net/ethernet/mellanox/mlxsw/switchib.c
drivers/net/ethernet/mellanox/mlxsw/switchx2.c
drivers/net/ethernet/mellanox/mlxsw/trap.h
drivers/net/ethernet/micrel/ksz884x.c
drivers/net/ethernet/microchip/lan743x_main.c
drivers/net/ethernet/mscc/Kconfig
drivers/net/ethernet/mscc/Makefile
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/mscc/ocelot.h
drivers/net/ethernet/mscc/ocelot_board.c [deleted file]
drivers/net/ethernet/mscc/ocelot_flower.c
drivers/net/ethernet/mscc/ocelot_io.c
drivers/net/ethernet/mscc/ocelot_net.c [new file with mode: 0644]
drivers/net/ethernet/mscc/ocelot_police.c
drivers/net/ethernet/mscc/ocelot_police.h
drivers/net/ethernet/mscc/ocelot_ptp.c
drivers/net/ethernet/mscc/ocelot_regs.c [deleted file]
drivers/net/ethernet/mscc/ocelot_tc.c [deleted file]
drivers/net/ethernet/mscc/ocelot_tc.h [deleted file]
drivers/net/ethernet/mscc/ocelot_vcap.c [moved from drivers/net/ethernet/mscc/ocelot_ace.c with 77% similarity]
drivers/net/ethernet/mscc/ocelot_vcap.h [moved from drivers/net/ethernet/mscc/ocelot_ace.h with 75% similarity]
drivers/net/ethernet/mscc/ocelot_vsc7514.c [new file with mode: 0644]
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/natsemi/natsemi.c
drivers/net/ethernet/neterion/Kconfig
drivers/net/ethernet/neterion/s2io.c
drivers/net/ethernet/neterion/vxge/vxge-config.c
drivers/net/ethernet/neterion/vxge/vxge-config.h
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/netronome/nfp/flower/main.c
drivers/net/ethernet/netronome/nfp/flower/main.h
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
drivers/net/ethernet/netronome/nfp/nfp_devlink.c
drivers/net/ethernet/netronome/nfp/nfp_net.h
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
drivers/net/ethernet/packetengines/hamachi.c
drivers/net/ethernet/packetengines/yellowfin.c
drivers/net/ethernet/pensando/Kconfig
drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
drivers/net/ethernet/pensando/ionic/ionic_dev.h
drivers/net/ethernet/pensando/ionic/ionic_devlink.c
drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
drivers/net/ethernet/pensando/ionic/ionic_if.h
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/pensando/ionic/ionic_lif.h
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qed/Makefile
drivers/net/ethernet/qlogic/qed/qed.h
drivers/net/ethernet/qlogic/qed/qed_chain.c [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_cxt.c
drivers/net/ethernet/qlogic/qed/qed_cxt.h
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
drivers/net/ethernet/qlogic/qed/qed_dcbx.h
drivers/net/ethernet/qlogic/qed/qed_debug.c
drivers/net/ethernet/qlogic/qed/qed_debug.h
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_dev_api.h
drivers/net/ethernet/qlogic/qed/qed_fcoe.c
drivers/net/ethernet/qlogic/qed/qed_fcoe.h
drivers/net/ethernet/qlogic/qed/qed_hsi.h
drivers/net/ethernet/qlogic/qed/qed_hw.c
drivers/net/ethernet/qlogic/qed/qed_hw.h
drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
drivers/net/ethernet/qlogic/qed/qed_init_ops.c
drivers/net/ethernet/qlogic/qed/qed_init_ops.h
drivers/net/ethernet/qlogic/qed/qed_int.c
drivers/net/ethernet/qlogic/qed/qed_int.h
drivers/net/ethernet/qlogic/qed/qed_iscsi.c
drivers/net/ethernet/qlogic/qed/qed_iscsi.h
drivers/net/ethernet/qlogic/qed/qed_iwarp.c
drivers/net/ethernet/qlogic/qed/qed_iwarp.h
drivers/net/ethernet/qlogic/qed/qed_l2.c
drivers/net/ethernet/qlogic/qed/qed_l2.h
drivers/net/ethernet/qlogic/qed/qed_ll2.c
drivers/net/ethernet/qlogic/qed/qed_ll2.h
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_mcp.c
drivers/net/ethernet/qlogic/qed/qed_mcp.h
drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
drivers/net/ethernet/qlogic/qed/qed_ooo.c
drivers/net/ethernet/qlogic/qed/qed_ooo.h
drivers/net/ethernet/qlogic/qed/qed_ptp.c
drivers/net/ethernet/qlogic/qed/qed_ptp.h [new file with mode: 0644]
drivers/net/ethernet/qlogic/qed/qed_rdma.c
drivers/net/ethernet/qlogic/qed/qed_rdma.h
drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
drivers/net/ethernet/qlogic/qed/qed_roce.c
drivers/net/ethernet/qlogic/qed/qed_roce.h
drivers/net/ethernet/qlogic/qed/qed_selftest.c
drivers/net/ethernet/qlogic/qed/qed_selftest.h
drivers/net/ethernet/qlogic/qed/qed_sp.h
drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
drivers/net/ethernet/qlogic/qed/qed_spq.c
drivers/net/ethernet/qlogic/qed/qed_sriov.c
drivers/net/ethernet/qlogic/qed/qed_sriov.h
drivers/net/ethernet/qlogic/qed/qed_vf.c
drivers/net/ethernet/qlogic/qed/qed_vf.h
drivers/net/ethernet/qlogic/qede/Makefile
drivers/net/ethernet/qlogic/qede/qede.h
drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
drivers/net/ethernet/qlogic/qede/qede_filter.c
drivers/net/ethernet/qlogic/qede/qede_fp.c
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/qlogic/qede/qede_ptp.c
drivers/net/ethernet/qlogic/qede/qede_ptp.h
drivers/net/ethernet/qlogic/qede/qede_rdma.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
drivers/net/ethernet/rdc/r6040.c
drivers/net/ethernet/realtek/r8169.h
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/realtek/r8169_phy_config.c
drivers/net/ethernet/rocker/rocker_main.c
drivers/net/ethernet/sfc/bitfield.h
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/efx_channels.c
drivers/net/ethernet/sfc/efx_channels.h
drivers/net/ethernet/sfc/efx_common.c
drivers/net/ethernet/sfc/efx_common.h
drivers/net/ethernet/sfc/ethtool.c
drivers/net/ethernet/sfc/ethtool_common.c
drivers/net/ethernet/sfc/ethtool_common.h
drivers/net/ethernet/sfc/farch.c
drivers/net/ethernet/sfc/mcdi.c
drivers/net/ethernet/sfc/mcdi.h
drivers/net/ethernet/sfc/mcdi_filters.c
drivers/net/ethernet/sfc/mcdi_filters.h
drivers/net/ethernet/sfc/mcdi_functions.c
drivers/net/ethernet/sfc/mcdi_functions.h
drivers/net/ethernet/sfc/mcdi_pcol.h
drivers/net/ethernet/sfc/mcdi_port.c
drivers/net/ethernet/sfc/mcdi_port.h [new file with mode: 0644]
drivers/net/ethernet/sfc/mcdi_port_common.c
drivers/net/ethernet/sfc/mcdi_port_common.h
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/nic.c
drivers/net/ethernet/sfc/nic.h
drivers/net/ethernet/sfc/nic_common.h [new file with mode: 0644]
drivers/net/ethernet/sfc/ptp.c
drivers/net/ethernet/sfc/ptp.h [new file with mode: 0644]
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/sfc/rx_common.c
drivers/net/ethernet/sfc/rx_common.h
drivers/net/ethernet/sfc/selftest.c
drivers/net/ethernet/sfc/siena.c
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/sfc/tx.h
drivers/net/ethernet/sfc/tx_common.c
drivers/net/ethernet/sfc/tx_common.h
drivers/net/ethernet/sis/sis190.c
drivers/net/ethernet/sis/sis900.c
drivers/net/ethernet/smsc/Kconfig
drivers/net/ethernet/smsc/epic100.c
drivers/net/ethernet/smsc/smsc9420.c
drivers/net/ethernet/socionext/netsec.c
drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
drivers/net/ethernet/sun/cassini.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/sun/sungem.c
drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
drivers/net/ethernet/ti/Kconfig
drivers/net/ethernet/ti/am65-cpsw-ethtool.c
drivers/net/ethernet/ti/am65-cpsw-nuss.c
drivers/net/ethernet/ti/am65-cpsw-nuss.h
drivers/net/ethernet/ti/am65-cpsw-qos.c
drivers/net/ethernet/ti/tlan.c
drivers/net/ethernet/xilinx/xilinx_axienet.h
drivers/net/ethernet/xircom/xirc2ps_cs.c
drivers/net/fddi/Kconfig
drivers/net/fddi/skfp/ess.c
drivers/net/fddi/skfp/h/cmtdef.h
drivers/net/fddi/skfp/smt.c
drivers/net/geneve.c
drivers/net/hamradio/Kconfig
drivers/net/hamradio/scc.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc_drv.c
drivers/net/ipa/gsi.c
drivers/net/ipa/gsi.h
drivers/net/ipa/gsi_private.h
drivers/net/ipa/gsi_trans.h
drivers/net/ipa/ipa_clock.c
drivers/net/ipa/ipa_clock.h
drivers/net/ipa/ipa_cmd.c
drivers/net/ipa/ipa_cmd.h
drivers/net/ipa/ipa_data-sdm845.c
drivers/net/ipa/ipa_endpoint.c
drivers/net/ipa/ipa_gsi.c
drivers/net/ipa/ipa_gsi.h
drivers/net/ipa/ipa_interrupt.h
drivers/net/ipa/ipa_main.c
drivers/net/ipa/ipa_mem.c
drivers/net/ipa/ipa_qmi_msg.c
drivers/net/ipa/ipa_reg.h
drivers/net/ipa/ipa_smp2p.h
drivers/net/ipa/ipa_table.c
drivers/net/ipa/ipa_table.h
drivers/net/ipa/ipa_uc.c
drivers/net/macsec.c
drivers/net/macvlan.c
drivers/net/netdevsim/Makefile
drivers/net/netdevsim/dev.c
drivers/net/netdevsim/netdev.c
drivers/net/netdevsim/netdevsim.h
drivers/net/netdevsim/udp_tunnels.c [new file with mode: 0644]
drivers/net/phy/Kconfig
drivers/net/phy/Makefile
drivers/net/phy/adin.c
drivers/net/phy/at803x.c
drivers/net/phy/dp83640.c
drivers/net/phy/dp83822.c
drivers/net/phy/dp83869.c
drivers/net/phy/marvell.c
drivers/net/phy/marvell10g.c
drivers/net/phy/mdio-boardinfo.c
drivers/net/phy/mdio-cavium.h
drivers/net/phy/mdio-mux-gpio.c
drivers/net/phy/mdio-octeon.c
drivers/net/phy/mdio-thunder.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/mdio_device.c
drivers/net/phy/mdio_devres.c [new file with mode: 0644]
drivers/net/phy/mscc/Makefile
drivers/net/phy/mscc/mscc.h
drivers/net/phy/mscc/mscc_fc_buffer.h
drivers/net/phy/mscc/mscc_mac.h
drivers/net/phy/mscc/mscc_macsec.c
drivers/net/phy/mscc/mscc_macsec.h
drivers/net/phy/mscc/mscc_main.c
drivers/net/phy/mscc/mscc_ptp.c [new file with mode: 0644]
drivers/net/phy/mscc/mscc_ptp.h [new file with mode: 0644]
drivers/net/phy/phy-c45.c
drivers/net/phy/phy-core.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/phy/phylink.c
drivers/net/phy/realtek.c
drivers/net/phy/sfp.c
drivers/net/phy/smsc.c
drivers/net/plip/plip.c
drivers/net/ppp/pppoe.c
drivers/net/ppp/pptp.c
drivers/net/thunderbolt.c
drivers/net/tun.c
drivers/net/usb/ax88179_178a.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/ipheth.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/smsc95xx.c
drivers/net/usb/usbnet.c
drivers/net/vrf.c
drivers/net/vxlan.c
drivers/net/wan/c101.c
drivers/net/wan/cosa.c
drivers/net/wan/farsync.c
drivers/net/wan/lapbether.c
drivers/net/wan/lmc/lmc_main.c
drivers/net/wan/n2.c
drivers/net/wan/pc300too.c
drivers/net/wan/pci200syn.c
drivers/net/wireguard/device.c
drivers/net/wireguard/device.h
drivers/net/wireguard/netlink.c
drivers/net/wireguard/noise.c
drivers/net/wireguard/queueing.h
drivers/net/wireguard/receive.c
drivers/net/wireguard/socket.c
drivers/net/wireless/Kconfig
drivers/net/wireless/Makefile
drivers/net/wireless/admtek/adm8211.c
drivers/net/wireless/ath/Kconfig
drivers/net/wireless/ath/ath10k/htt_tx.c
drivers/net/wireless/ath/ath10k/usb.c
drivers/net/wireless/ath/ath11k/Kconfig
drivers/net/wireless/ath/ath11k/Makefile
drivers/net/wireless/ath/ath11k/core.c
drivers/net/wireless/ath/ath11k/core.h
drivers/net/wireless/ath/ath11k/dbring.c [new file with mode: 0644]
drivers/net/wireless/ath/ath11k/dbring.h [new file with mode: 0644]
drivers/net/wireless/ath/ath11k/debug.c
drivers/net/wireless/ath/ath11k/dp.c
drivers/net/wireless/ath/ath11k/dp.h
drivers/net/wireless/ath/ath11k/dp_rx.c
drivers/net/wireless/ath/ath11k/dp_tx.c
drivers/net/wireless/ath/ath11k/mac.c
drivers/net/wireless/ath/ath11k/reg.c
drivers/net/wireless/ath/ath11k/spectral.c [new file with mode: 0644]
drivers/net/wireless/ath/ath11k/spectral.h [new file with mode: 0644]
drivers/net/wireless/ath/ath11k/wmi.c
drivers/net/wireless/ath/ath11k/wmi.h
drivers/net/wireless/ath/ath6kl/usb.c
drivers/net/wireless/ath/ath9k/Kconfig
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/ath/carl9170/Kconfig
drivers/net/wireless/ath/carl9170/usb.c
drivers/net/wireless/ath/spectral_common.h
drivers/net/wireless/ath/wil6210/Kconfig
drivers/net/wireless/ath/wil6210/txrx.c
drivers/net/wireless/atmel/at76c50x-usb.c
drivers/net/wireless/broadcom/b43/main.c
drivers/net/wireless/broadcom/b43/phy_common.c
drivers/net/wireless/broadcom/b43/phy_g.c
drivers/net/wireless/broadcom/b43/phy_ht.c
drivers/net/wireless/broadcom/b43/phy_lp.c
drivers/net/wireless/broadcom/b43/phy_n.c
drivers/net/wireless/broadcom/b43/radio_2056.c
drivers/net/wireless/broadcom/b43/tables_nphy.c
drivers/net/wireless/broadcom/b43legacy/main.c
drivers/net/wireless/broadcom/b43legacy/phy.c
drivers/net/wireless/broadcom/b43legacy/radio.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.c
drivers/net/wireless/broadcom/brcm80211/brcmsmac/led.h
drivers/net/wireless/cisco/airo.c
drivers/net/wireless/intel/ipw2x00/Kconfig
drivers/net/wireless/intel/ipw2x00/ipw2100.c
drivers/net/wireless/intel/ipw2x00/ipw2200.c
drivers/net/wireless/intel/iwlegacy/4965-mac.c
drivers/net/wireless/intel/iwlegacy/4965-rs.c
drivers/net/wireless/intel/iwlegacy/common.c
drivers/net/wireless/intel/iwlwifi/Kconfig
drivers/net/wireless/intel/iwlwifi/dvm/commands.h
drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intersil/Kconfig
drivers/net/wireless/intersil/hostap/hostap_hw.c
drivers/net/wireless/intersil/hostap/hostap_pci.c
drivers/net/wireless/intersil/orinoco/Kconfig
drivers/net/wireless/intersil/orinoco/orinoco_nortel.c
drivers/net/wireless/intersil/orinoco/orinoco_pci.c
drivers/net/wireless/intersil/orinoco/orinoco_pci.h
drivers/net/wireless/intersil/orinoco/orinoco_plx.c
drivers/net/wireless/intersil/orinoco/orinoco_tmd.c
drivers/net/wireless/intersil/orinoco/orinoco_usb.c
drivers/net/wireless/intersil/p54/Kconfig
drivers/net/wireless/intersil/p54/fwio.c
drivers/net/wireless/intersil/p54/p54pci.c
drivers/net/wireless/intersil/p54/p54usb.c
drivers/net/wireless/intersil/prism54/isl_oid.h
drivers/net/wireless/intersil/prism54/islpci_dev.c
drivers/net/wireless/intersil/prism54/islpci_eth.c
drivers/net/wireless/intersil/prism54/islpci_hotplug.c
drivers/net/wireless/intersil/prism54/islpci_mgt.c
drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
drivers/net/wireless/marvell/mwifiex/cfg80211.c
drivers/net/wireless/marvell/mwifiex/main.c
drivers/net/wireless/marvell/mwifiex/sta_cmd.c
drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
drivers/net/wireless/mediatek/mt76/Kconfig
drivers/net/wireless/mediatek/mt76/Makefile
drivers/net/wireless/mediatek/mt76/debugfs.c
drivers/net/wireless/mediatek/mt76/dma.c
drivers/net/wireless/mediatek/mt76/eeprom.c
drivers/net/wireless/mediatek/mt76/mac80211.c
drivers/net/wireless/mediatek/mt76/mt76.h
drivers/net/wireless/mediatek/mt76/mt7603/main.c
drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
drivers/net/wireless/mediatek/mt76/mt7615/Makefile
drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
drivers/net/wireless/mediatek/mt76/mt7615/dma.c
drivers/net/wireless/mediatek/mt76/mt7615/init.c
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
drivers/net/wireless/mediatek/mt76/mt7615/mac.h
drivers/net/wireless/mediatek/mt76/mt7615/main.c
drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
drivers/net/wireless/mediatek/mt76/mt7615/pci.c
drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
drivers/net/wireless/mediatek/mt76/mt7615/regs.h
drivers/net/wireless/mediatek/mt76/mt7615/sdio.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt7615/sdio.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt7615/testmode.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt7615/usb.c
drivers/net/wireless/mediatek/mt76/mt7615/usb_init.c [deleted file]
drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
drivers/net/wireless/mediatek/mt76/mt76x02.h
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
drivers/net/wireless/mediatek/mt76/mt76x02_util.c
drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
drivers/net/wireless/mediatek/mt76/mt7915/dma.c
drivers/net/wireless/mediatek/mt76/mt7915/init.c
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
drivers/net/wireless/mediatek/mt76/mt7915/mac.h
drivers/net/wireless/mediatek/mt76/mt7915/main.c
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
drivers/net/wireless/mediatek/mt76/mt7915/pci.c
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
drivers/net/wireless/mediatek/mt76/pci.c
drivers/net/wireless/mediatek/mt76/sdio.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/testmode.c [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/testmode.h [new file with mode: 0644]
drivers/net/wireless/mediatek/mt76/tx.c
drivers/net/wireless/mediatek/mt76/usb.c
drivers/net/wireless/mediatek/mt76/util.c
drivers/net/wireless/mediatek/mt7601u/mcu.c
drivers/net/wireless/microchip/Kconfig [new file with mode: 0644]
drivers/net/wireless/microchip/Makefile [new file with mode: 0644]
drivers/net/wireless/microchip/wilc1000/Kconfig [moved from drivers/staging/wilc1000/Kconfig with 100% similarity]
drivers/net/wireless/microchip/wilc1000/Makefile [moved from drivers/staging/wilc1000/Makefile with 72% similarity]
drivers/net/wireless/microchip/wilc1000/cfg80211.c [moved from drivers/staging/wilc1000/cfg80211.c with 99% similarity]
drivers/net/wireless/microchip/wilc1000/cfg80211.h [moved from drivers/staging/wilc1000/cfg80211.h with 100% similarity]
drivers/net/wireless/microchip/wilc1000/fw.h [moved from drivers/staging/wilc1000/fw.h with 100% similarity]
drivers/net/wireless/microchip/wilc1000/hif.c [moved from drivers/staging/wilc1000/hif.c with 100% similarity]
drivers/net/wireless/microchip/wilc1000/hif.h [moved from drivers/staging/wilc1000/hif.h with 100% similarity]
drivers/net/wireless/microchip/wilc1000/mon.c [moved from drivers/staging/wilc1000/mon.c with 98% similarity]
drivers/net/wireless/microchip/wilc1000/netdev.c [moved from drivers/staging/wilc1000/netdev.c with 96% similarity]
drivers/net/wireless/microchip/wilc1000/netdev.h [moved from drivers/staging/wilc1000/netdev.h with 100% similarity]
drivers/net/wireless/microchip/wilc1000/sdio.c [moved from drivers/staging/wilc1000/sdio.c with 99% similarity]
drivers/net/wireless/microchip/wilc1000/spi.c [moved from drivers/staging/wilc1000/spi.c with 100% similarity]
drivers/net/wireless/microchip/wilc1000/wlan.c [moved from drivers/staging/wilc1000/wlan.c with 100% similarity]
drivers/net/wireless/microchip/wilc1000/wlan.h [moved from drivers/staging/wilc1000/wlan.h with 100% similarity]
drivers/net/wireless/microchip/wilc1000/wlan_cfg.c [moved from drivers/staging/wilc1000/wlan_cfg.c with 100% similarity]
drivers/net/wireless/microchip/wilc1000/wlan_cfg.h [moved from drivers/staging/wilc1000/wlan_cfg.h with 100% similarity]
drivers/net/wireless/microchip/wilc1000/wlan_if.h [moved from drivers/staging/wilc1000/wlan_if.h with 100% similarity]
drivers/net/wireless/quantenna/qtnfmac/core.c
drivers/net/wireless/ralink/rt2x00/rt2400pci.c
drivers/net/wireless/ralink/rt2x00/rt2500pci.c
drivers/net/wireless/ralink/rt2x00/rt2800pci.c
drivers/net/wireless/ralink/rt2x00/rt2x00.h
drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
drivers/net/wireless/ralink/rt2x00/rt2x00pci.c
drivers/net/wireless/ralink/rt2x00/rt2x00pci.h
drivers/net/wireless/ralink/rt2x00/rt2x00soc.c
drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
drivers/net/wireless/ralink/rt2x00/rt61pci.c
drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
drivers/net/wireless/realtek/rtlwifi/ps.c
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
drivers/net/wireless/realtek/rtlwifi/usb.c
drivers/net/wireless/realtek/rtw88/Kconfig
drivers/net/wireless/realtek/rtw88/Makefile
drivers/net/wireless/realtek/rtw88/bf.c
drivers/net/wireless/realtek/rtw88/coex.c
drivers/net/wireless/realtek/rtw88/coex.h
drivers/net/wireless/realtek/rtw88/debug.c
drivers/net/wireless/realtek/rtw88/fw.c
drivers/net/wireless/realtek/rtw88/fw.h
drivers/net/wireless/realtek/rtw88/mac80211.c
drivers/net/wireless/realtek/rtw88/main.c
drivers/net/wireless/realtek/rtw88/main.h
drivers/net/wireless/realtek/rtw88/pci.c
drivers/net/wireless/realtek/rtw88/reg.h
drivers/net/wireless/realtek/rtw88/rtw8723d.c
drivers/net/wireless/realtek/rtw88/rtw8821c.c [new file with mode: 0644]
drivers/net/wireless/realtek/rtw88/rtw8821c.h [new file with mode: 0644]
drivers/net/wireless/realtek/rtw88/rtw8821c_table.c [new file with mode: 0644]
drivers/net/wireless/realtek/rtw88/rtw8821c_table.h [new file with mode: 0644]
drivers/net/wireless/realtek/rtw88/rtw8821ce.c [new file with mode: 0644]
drivers/net/wireless/realtek/rtw88/rtw8821ce.h [new file with mode: 0644]
drivers/net/wireless/realtek/rtw88/rtw8822b.c
drivers/net/wireless/realtek/rtw88/rtw8822c.c
drivers/net/wireless/realtek/rtw88/rtw8822ce.c
drivers/net/wireless/realtek/rtw88/tx.c
drivers/net/wireless/realtek/rtw88/tx.h
drivers/net/wireless/ti/wl1251/event.c
drivers/net/wireless/ti/wlcore/main.c
drivers/net/wireless/zydas/zd1211rw/zd_usb.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netback/rx.c
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/nvdimm/region_devs.c
drivers/nvdimm/security.c
drivers/nvme/host/core.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/target/loop.c
drivers/of/of_mdio.c
drivers/oprofile/cpu_buffer.h
drivers/phy/samsung/phy-samsung-usb2.h
drivers/pinctrl/freescale/pinctrl-imx.c
drivers/pinctrl/intel/pinctrl-baytrail.c
drivers/pinctrl/pinctrl-amd.h
drivers/pinctrl/pinctrl-mcp23s08_spi.c
drivers/pinctrl/pinctrl-single.c
drivers/pinctrl/qcom/pinctrl-ipq6018.c
drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
drivers/pinctrl/tegra/pinctrl-tegra.c
drivers/ptp/ptp_chardev.c
drivers/ptp/ptp_pch.c
drivers/rapidio/rio-scan.c
drivers/regulator/Kconfig
drivers/regulator/da9063-regulator.c
drivers/regulator/helpers.c
drivers/regulator/pfuze100-regulator.c
drivers/s390/cio/qdio.h
drivers/s390/cio/qdio_debug.c
drivers/s390/cio/qdio_main.c
drivers/s390/cio/vfio_ccw_chp.c
drivers/s390/crypto/zcrypt_ep11misc.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_mpc.c
drivers/s390/net/qeth_core_mpc.h
drivers/s390/net/qeth_core_sys.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/scsi/zfcp_erp.c
drivers/s390/virtio/virtio_ccw.c
drivers/scsi/aic94xx/aic94xx_init.c
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
drivers/scsi/ipr.c
drivers/scsi/isci/init.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/mvsas/mv_init.c
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/qedf/qedf_main.c
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_nvme.c
drivers/scsi/ufs/ufs_bsg.c
drivers/soc/imx/soc-imx8m.c
drivers/soc/ti/knav_qmss.h
drivers/soc/ti/omap_prm.c
drivers/spi/spi-fsl-dspi.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-rspi.c
drivers/spi/spi-sprd-adi.c
drivers/spi/spi-stm32-qspi.c
drivers/spi/spidev.c
drivers/ssb/driver_chipcommon.c
drivers/ssb/driver_chipcommon_pmu.c
drivers/ssb/sprom.c
drivers/staging/Kconfig
drivers/staging/Makefile
drivers/staging/octeon/ethernet-mdio.c
drivers/staging/octeon/ethernet-mdio.h
drivers/staging/octeon/ethernet.c
drivers/staging/rtl8723bs/core/rtw_wlan_util.c
drivers/staging/wfx/hif_tx.c
drivers/staging/wfx/hif_tx.h
drivers/staging/wfx/queue.c
drivers/staging/wfx/scan.c
drivers/thermal/cpufreq_cooling.c
drivers/thermal/imx_thermal.c
drivers/thermal/mtk_thermal.c
drivers/thermal/qcom/tsens.c
drivers/thermal/rcar_gen3_thermal.c
drivers/thermal/sprd_thermal.c
drivers/tty/serial/kgdb_nmi.c
drivers/tty/serial/kgdboc.c
drivers/usb/cdns3/ep0.c
drivers/usb/cdns3/trace.h
drivers/usb/class/cdc-acm.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/gadget.c
drivers/usb/dwc2/platform.c
drivers/usb/dwc3/dwc3-exynos.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/early/ehci-dbgp.c
drivers/usb/gadget/udc/mv_udc_core.c
drivers/usb/host/ehci-exynos.c
drivers/usb/host/ehci-pci.c
drivers/usb/host/ohci-sm501.c
drivers/usb/host/xhci-mtk.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/misc/usbtest.c
drivers/usb/phy/phy-tegra-usb.c
drivers/usb/renesas_usbhs/fifo.c
drivers/usb/renesas_usbhs/fifo.h
drivers/usb/typec/mux/intel_pmc_mux.c
drivers/usb/typec/tcpm/tcpci_rt1711h.c
drivers/vdpa/vdpa.c
drivers/vfio/pci/vfio_pci.c
drivers/vfio/pci/vfio_pci_config.c
drivers/vhost/test.c
drivers/vhost/test.h
drivers/vhost/vdpa.c
drivers/video/backlight/tosa_lcd.c
drivers/video/fbdev/core/fbcon.c
drivers/video/fbdev/hpfb.c
drivers/video/fbdev/uvesafb.c
drivers/virtio/virtio_mem.c
drivers/w1/w1_netlink.h
drivers/xen/xenbus/xenbus_client.c
fs/afs/cell.c
fs/afs/dir.c
fs/afs/dir_silly.c
fs/afs/file.c
fs/afs/flock.c
fs/afs/fs_operation.c
fs/afs/fs_probe.c
fs/afs/inode.c
fs/afs/internal.h
fs/afs/main.c
fs/afs/misc.c
fs/afs/server.c
fs/afs/write.c
fs/afs/yfsclient.c
fs/aio.c
fs/autofs/waitq.c
fs/block_dev.c
fs/btrfs/block-group.c
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/ref-verify.c
fs/btrfs/space-info.c
fs/btrfs/super.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.h
fs/cachefiles/rdwr.c
fs/cifs/cifs_debug.c
fs/cifs/cifsfs.h
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/ioctl.c
fs/cifs/misc.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/transport.c
fs/debugfs/file.c
fs/efivarfs/file.c
fs/erofs/zdata.h
fs/exec.c
fs/exfat/dir.c
fs/exfat/exfat_fs.h
fs/exfat/file.c
fs/exfat/namei.c
fs/exfat/super.c
fs/ext4/Makefile
fs/ext4/dir.c
fs/ext4/ext4.h
fs/ext4/extents.c
fs/ext4/ialloc.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/mballoc.c
fs/ext4/super.c
fs/ext4/verity.c
fs/ext4/xattr.c
fs/ext4/xattr.h
fs/ext4/xattr_hurd.c [new file with mode: 0644]
fs/gfs2/aops.c
fs/gfs2/file.c
fs/gfs2/glock.c
fs/gfs2/glops.c
fs/gfs2/incore.h
fs/gfs2/inode.c
fs/gfs2/log.c
fs/gfs2/log.h
fs/gfs2/main.c
fs/gfs2/ops_fstype.c
fs/gfs2/recovery.c
fs/gfs2/super.c
fs/io-wq.c
fs/io-wq.h
fs/io_uring.c
fs/jbd2/journal.c
fs/jffs2/nodelist.h
fs/jffs2/summary.h
fs/nfs/direct.c
fs/nfs/file.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/nfs4namespace.c
fs/nfsd/nfs4state.c
fs/nfsd/nfsctl.c
fs/nfsd/nfsd.h
fs/nfsd/vfs.c
fs/ocfs2/dlmglue.c
fs/ocfs2/ocfs2.h
fs/ocfs2/ocfs2_fs.h
fs/ocfs2/suballoc.c
fs/proc/bootconfig.c
fs/proc/kcore.c
fs/proc/proc_sysctl.c
fs/read_write.c
fs/squashfs/squashfs_fs.h
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_log_priv.h
include/asm-generic/cacheflush.h
include/asm-generic/checksum.h
include/asm-generic/hugetlb.h
include/asm-generic/vmlinux.lds.h
include/crypto/if_alg.h
include/drm/drm_displayid.h
include/keys/encrypted-type.h
include/keys/rxrpc-type.h
include/linux/atmdev.h
include/linux/atomic-fallback.h
include/linux/binfmts.h
include/linux/bits.h
include/linux/blkdev.h
include/linux/bpf-cgroup.h
include/linux/bpf-netns.h
include/linux/bpf.h
include/linux/bpf_types.h
include/linux/bpf_verifier.h
include/linux/bpfilter.h
include/linux/btf.h
include/linux/btf_ids.h [new file with mode: 0644]
include/linux/can/skb.h
include/linux/cb710.h
include/linux/ceph/libceph.h
include/linux/cgroup-defs.h
include/linux/cgroup.h
include/linux/compat.h
include/linux/compiler-clang.h
include/linux/compiler-gcc.h
include/linux/compiler.h
include/linux/compiler_attributes.h
include/linux/compiler_types.h
include/linux/debugfs.h
include/linux/dma-direct.h
include/linux/dma-mapping.h
include/linux/dmaengine.h
include/linux/efi.h
include/linux/ethtool.h
include/linux/filter.h
include/linux/fs.h
include/linux/fscache-cache.h
include/linux/fsl/enetc_mdio.h
include/linux/hashtable.h
include/linux/host1x.h
include/linux/i2c.h
include/linux/icmp.h
include/linux/icmpv6.h
include/linux/ieee80211.h
include/linux/if_bridge.h
include/linux/if_vlan.h
include/linux/indirect_call_wrapper.h
include/linux/intel-iommu.h
include/linux/ipv6.h
include/linux/irq_work.h
include/linux/jbd2.h
include/linux/kallsyms.h
include/linux/kexec.h
include/linux/kgdb.h
include/linux/kprobes.h
include/linux/kvm_host.h
include/linux/libata.h
include/linux/linkmode.h
include/linux/lsm_hook_defs.h
include/linux/marvell_phy.h
include/linux/mdio.h
include/linux/mlx5/accel.h
include/linux/mlx5/cq.h
include/linux/mlx5/device.h
include/linux/mlx5/driver.h
include/linux/mlx5/fs.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/port.h
include/linux/mlx5/qp.h
include/linux/mlx5/rsc_dump.h [new file with mode: 0644]
include/linux/mlx5/vport.h
include/linux/mmc/sdio_ids.h
include/linux/mmzone.h
include/linux/mroute.h
include/linux/mroute6.h
include/linux/net.h
include/linux/net/intel/i40e_client.h [moved from drivers/net/ethernet/intel/i40e/i40e_client.h with 93% similarity]
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/netfilter/x_tables.h
include/linux/netfilter_ipv4/ip_tables.h
include/linux/netfilter_ipv6/ip6_tables.h
include/linux/netpoll.h
include/linux/of_mdio.h
include/linux/overflow.h
include/linux/pci.h
include/linux/perf_event.h
include/linux/pgtable.h
include/linux/phy.h
include/linux/phylink.h
include/linux/psp-sev.h
include/linux/qed/common_hsi.h
include/linux/qed/eth_common.h
include/linux/qed/fcoe_common.h
include/linux/qed/iscsi_common.h
include/linux/qed/iwarp_common.h
include/linux/qed/qed_chain.h
include/linux/qed/qed_eth_if.h
include/linux/qed/qed_fcoe_if.h
include/linux/qed/qed_if.h
include/linux/qed/qed_iov_if.h
include/linux/qed/qed_iscsi_if.h
include/linux/qed/qed_ll2_if.h
include/linux/qed/qed_rdma_if.h
include/linux/qed/qede_rdma.h
include/linux/qed/rdma_common.h
include/linux/qed/roce_common.h
include/linux/qed/storage_common.h
include/linux/qed/tcp_common.h
include/linux/scatterlist.h
include/linux/sched.h
include/linux/sched/jobctl.h
include/linux/sched/signal.h
include/linux/sctp.h
include/linux/skbuff.h
include/linux/skmsg.h
include/linux/smp.h
include/linux/smp_types.h [new file with mode: 0644]
include/linux/sockptr.h [new file with mode: 0644]
include/linux/swap.h
include/linux/syscalls.h
include/linux/task_work.h
include/linux/tcp.h
include/linux/tifm.h
include/linux/timekeeping.h
include/linux/tpm_eventlog.h
include/linux/uaccess.h
include/linux/umh.h
include/linux/usb/usbnet.h
include/linux/usermode_driver.h [new file with mode: 0644]
include/linux/vmalloc.h
include/linux/wimax/debug.h
include/net/9p/transport.h
include/net/act_api.h
include/net/bonding.h
include/net/busy_poll.h
include/net/caif/caif_layer.h
include/net/cipso_ipv4.h
include/net/compat.h
include/net/devlink.h
include/net/dsa.h
include/net/dst.h
include/net/flow.h
include/net/flow_dissector.h
include/net/flow_offload.h
include/net/genetlink.h
include/net/gue.h
include/net/inet_connection_sock.h
include/net/inet_ecn.h
include/net/inet_sock.h
include/net/ip.h
include/net/ip6_checksum.h
include/net/ip6_fib.h
include/net/ip_tunnels.h
include/net/ip_vs.h
include/net/ipv6.h
include/net/l3mdev.h
include/net/mptcp.h
include/net/netfilter/nf_flow_table.h
include/net/netfilter/nf_tables.h
include/net/netns/bpf.h
include/net/pkt_cls.h
include/net/pkt_sched.h
include/net/rpl.h
include/net/sctp/constants.h
include/net/sctp/sctp.h
include/net/sctp/structs.h
include/net/sock.h
include/net/switchdev.h
include/net/tc_act/tc_ct.h
include/net/tc_act/tc_police.h
include/net/tcp.h
include/net/tls.h
include/net/transp_v6.h
include/net/tso.h
include/net/udp.h
include/net/udp_tunnel.h
include/net/wimax.h
include/net/xdp.h
include/net/xdp_sock.h
include/net/xfrm.h
include/net/xsk_buff_pool.h
include/soc/mscc/ocelot.h
include/soc/mscc/ocelot_dev.h
include/soc/mscc/ocelot_qsys.h
include/soc/mscc/ocelot_sys.h
include/sound/compress_driver.h
include/sound/dmaengine_pcm.h
include/sound/soc.h
include/trace/events/block.h
include/trace/events/rxrpc.h
include/trace/events/xdp.h
include/uapi/asm-generic/unistd.h
include/uapi/linux/atmioc.h
include/uapi/linux/batadv_packet.h
include/uapi/linux/batman_adv.h
include/uapi/linux/bpf.h
include/uapi/linux/caif/caif_socket.h
include/uapi/linux/devlink.h
include/uapi/linux/errqueue.h
include/uapi/linux/ethtool.h
include/uapi/linux/ethtool_netlink.h
include/uapi/linux/fb.h
include/uapi/linux/fs.h
include/uapi/linux/icmp.h
include/uapi/linux/icmpv6.h
include/uapi/linux/if_bridge.h
include/uapi/linux/if_link.h
include/uapi/linux/if_xdp.h
include/uapi/linux/in.h
include/uapi/linux/in6.h
include/uapi/linux/inet_diag.h
include/uapi/linux/io_uring.h
include/uapi/linux/mdio.h
include/uapi/linux/mptcp.h
include/uapi/linux/mrp_bridge.h
include/uapi/linux/ndctl.h
include/uapi/linux/neighbour.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/pkt_cls.h
include/uapi/linux/pkt_sched.h
include/uapi/linux/ptp_clock.h
include/uapi/linux/rds.h
include/uapi/linux/rtnetlink.h
include/uapi/linux/snmp.h
include/uapi/linux/spi/spidev.h
include/uapi/linux/vfio.h
include/uapi/linux/xattr.h
include/uapi/linux/xdp_diag.h
include/xen/interface/io/netif.h
init/Kconfig
kernel/Makefile
kernel/bpf/arraymap.c
kernel/bpf/bpf_struct_ops.c
kernel/bpf/btf.c
kernel/bpf/cgroup.c
kernel/bpf/core.c
kernel/bpf/cpumap.c
kernel/bpf/devmap.c
kernel/bpf/hashtab.c
kernel/bpf/local_storage.c
kernel/bpf/lpm_trie.c
kernel/bpf/map_iter.c
kernel/bpf/net_namespace.c
kernel/bpf/queue_stack_maps.c
kernel/bpf/reuseport_array.c
kernel/bpf/ringbuf.c
kernel/bpf/stackmap.c
kernel/bpf/syscall.c
kernel/bpf/task_iter.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup.c
kernel/debug/debug_core.c
kernel/debug/gdbstub.c
kernel/debug/kdb/kdb_io.c
kernel/debug/kdb/kdb_main.c
kernel/debug/kdb/kdb_support.c
kernel/dma/Kconfig
kernel/dma/direct.c
kernel/dma/mapping.c
kernel/dma/pool.c
kernel/dma/remap.c
kernel/events/callchain.c
kernel/exit.c
kernel/fork.c
kernel/kallsyms.c
kernel/kexec_file.c
kernel/kprobes.c
kernel/kthread.c
kernel/module.c
kernel/nsproxy.c
kernel/padata.c
kernel/printk/printk.c
kernel/rcu/rcuperf.c
kernel/rcu/tree.c
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sched/idle.c
kernel/sched/sched.h
kernel/signal.c
kernel/smp.c
kernel/task_work.c
kernel/trace/Makefile
kernel/trace/blktrace.c
kernel/trace/bpf_trace.c
kernel/trace/bpf_trace.h [new file with mode: 0644]
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_boot.c
kernel/trace/trace_entries.h
kernel/trace/trace_events_trigger.c
kernel/trace/trace_export.c
kernel/trace/trace_functions.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_probe.c
kernel/trace/trace_probe.h
kernel/umh.c
kernel/usermode_driver.c [new file with mode: 0644]
kernel/workqueue.c
lib/Kconfig.debug
lib/Kconfig.kasan
lib/checksum.c
lib/packing.c
lib/seq_buf.c
lib/test_bpf.c
lib/test_hmm.c
lib/test_lockup.c
lib/test_objagg.c
mm/cma.c
mm/cma.h
mm/cma_debug.c
mm/compaction.c
mm/debug.c
mm/debug_vm_pgtable.c
mm/filemap.c
mm/gup.c
mm/hugetlb.c
mm/maccess.c
mm/memcontrol.c
mm/memory.c
mm/memory_hotplug.c
mm/migrate.c
mm/nommu.c
mm/page_alloc.c
mm/rodata_test.c
mm/slab.h
mm/slab_common.c
mm/slub.c
mm/swap.c
mm/swap_state.c
mm/vmalloc.c
mm/vmscan.c
mm/workingset.c
net/8021q/vlan_dev.c
net/9p/client.c
net/9p/mod.c
net/9p/trans_rdma.c
net/Kconfig
net/appletalk/ddp.c
net/atm/common.c
net/atm/common.h
net/atm/lec_arpc.h
net/atm/pvc.c
net/atm/svc.c
net/ax25/Kconfig
net/ax25/af_ax25.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bat_v_elp.c
net/batman-adv/bat_v_ogm.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/fragmentation.c
net/batman-adv/hard-interface.c
net/batman-adv/log.h
net/batman-adv/main.c
net/batman-adv/main.h
net/batman-adv/multicast.c
net/batman-adv/netlink.c
net/batman-adv/network-coding.c
net/batman-adv/originator.c
net/batman-adv/routing.c
net/batman-adv/send.c
net/batman-adv/soft-interface.c
net/batman-adv/tp_meter.c
net/batman-adv/translation-table.c
net/batman-adv/tvlv.c
net/batman-adv/types.h
net/bluetooth/bnep/sock.c
net/bluetooth/cmtp/sock.c
net/bluetooth/hci_sock.c
net/bluetooth/hidp/sock.c
net/bluetooth/l2cap_sock.c
net/bluetooth/rfcomm/sock.c
net/bluetooth/sco.c
net/bpf/test_run.c
net/bpfilter/Kconfig
net/bpfilter/Makefile
net/bpfilter/bpfilter_kern.c
net/bpfilter/bpfilter_umh_blob.S
net/bridge/br_fdb.c
net/bridge/br_mrp.c
net/bridge/br_mrp_netlink.c
net/bridge/br_mrp_switchdev.c
net/bridge/br_multicast.c
net/bridge/br_netlink.c
net/bridge/br_netlink_tunnel.c
net/bridge/br_private.h
net/bridge/br_private_mrp.h
net/bridge/netfilter/ebtables.c
net/bridge/netfilter/nft_meta_bridge.c
net/bridge/netfilter/nft_reject_bridge.c
net/caif/caif_socket.c
net/can/af_can.c
net/can/bcm.c
net/can/j1939/socket.c
net/can/raw.c
net/ceph/ceph_common.c
net/ceph/osd_client.c
net/compat.c
net/core/bpf_sk_storage.c
net/core/dev.c
net/core/dev_addr_lists.c
net/core/dev_ioctl.c
net/core/devlink.c
net/core/drop_monitor.c
net/core/filter.c
net/core/flow_dissector.c
net/core/flow_offload.c
net/core/neighbour.c
net/core/skmsg.c
net/core/sock.c
net/core/sock_map.c
net/core/sysctl_net_core.c
net/core/tso.c
net/core/xdp.c
net/dcb/dcbnl.c
net/dccp/Kconfig
net/dccp/ccids/Kconfig
net/dccp/ccids/ccid3.c
net/dccp/ccids/ccid3.h
net/dccp/ccids/lib/packet_history.c
net/dccp/ccids/lib/packet_history.h
net/dccp/dccp.h
net/dccp/feat.c
net/dccp/input.c
net/dccp/ipv4.c
net/dccp/ipv6.c
net/dccp/options.c
net/dccp/proto.c
net/dccp/timer.c
net/decnet/af_decnet.c
net/decnet/dn_dev.c
net/decnet/dn_route.c
net/devres.c
net/dsa/Kconfig
net/dsa/Makefile
net/dsa/dsa2.c
net/dsa/dsa_priv.h
net/dsa/master.c
net/dsa/slave.c
net/dsa/tag_edsa.c
net/dsa/tag_ksz.c
net/dsa/tag_lan9303.c
net/dsa/tag_mtk.c
net/dsa/tag_ocelot.c
net/dsa/tag_qca.c
net/dsa/tag_rtl4_a.c [new file with mode: 0644]
net/ethtool/Makefile
net/ethtool/cabletest.c
net/ethtool/common.c
net/ethtool/common.h
net/ethtool/ioctl.c
net/ethtool/linkmodes.c
net/ethtool/linkstate.c
net/ethtool/netlink.c
net/ethtool/netlink.h
net/ethtool/strset.c
net/ethtool/tunnels.c [new file with mode: 0644]
net/hsr/hsr_device.c
net/hsr/hsr_device.h
net/hsr/hsr_forward.c
net/hsr/hsr_main.c
net/hsr/hsr_netlink.c
net/ieee802154/socket.c
net/ipv4/Kconfig
net/ipv4/Makefile
net/ipv4/af_inet.c
net/ipv4/bpfilter/sockopt.c
net/ipv4/cipso_ipv4.c
net/ipv4/esp4_offload.c
net/ipv4/fib_semantics.c
net/ipv4/fib_trie.c
net/ipv4/fou.c
net/ipv4/icmp.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/inet_hashtables.c
net/ipv4/ip_options.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_tunnel_core.c
net/ipv4/ip_vti.c
net/ipv4/ipip.c
net/ipv4/ipmr.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/netfilter/ipt_SYNPROXY.c
net/ipv4/netfilter/iptable_filter.c
net/ipv4/netfilter/iptable_mangle.c
net/ipv4/netfilter/iptable_nat.c
net/ipv4/netfilter/iptable_raw.c
net/ipv4/netfilter/iptable_security.c
net/ipv4/netfilter/nf_flow_table_ipv4.c
net/ipv4/netfilter/nf_reject_ipv4.c
net/ipv4/netfilter/nft_dup_ipv4.c
net/ipv4/netfilter/nft_fib_ipv4.c
net/ipv4/netfilter/nft_reject_ipv4.c
net/ipv4/ping.c
net/ipv4/proc.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_cubic.c
net/ipv4/tcp_highspeed.c
net/ipv4/tcp_htcp.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv4/tcp_veno.c
net/ipv4/udp.c
net/ipv4/udp_impl.h
net/ipv4/udp_tunnel_core.c [moved from net/ipv4/udp_tunnel.c with 100% similarity]
net/ipv4/udp_tunnel_nic.c [new file with mode: 0644]
net/ipv4/udp_tunnel_stub.c [new file with mode: 0644]
net/ipv4/udplite.c
net/ipv6/Kconfig
net/ipv6/af_inet6.c
net/ipv6/datagram.c
net/ipv6/esp6_offload.c
net/ipv6/exthdrs.c
net/ipv6/fib6_rules.c
net/ipv6/fou6.c
net/ipv6/icmp.c
net/ipv6/ila/ila_main.c
net/ipv6/inet6_hashtables.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_icmp.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/ip6mr.c
net/ipv6/ipv6_sockglue.c
net/ipv6/mcast.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/ip6t_SYNPROXY.c
net/ipv6/netfilter/ip6table_filter.c
net/ipv6/netfilter/ip6table_mangle.c
net/ipv6/netfilter/ip6table_nat.c
net/ipv6/netfilter/ip6table_raw.c
net/ipv6/netfilter/ip6table_security.c
net/ipv6/netfilter/nf_flow_table_ipv6.c
net/ipv6/netfilter/nf_reject_ipv6.c
net/ipv6/netfilter/nft_dup_ipv6.c
net/ipv6/netfilter/nft_fib_ipv6.c
net/ipv6/netfilter/nft_reject_ipv6.c
net/ipv6/ping.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/rpl_iptunnel.c
net/ipv6/sit.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/ipv6/udp_impl.h
net/ipv6/udplite.c
net/iucv/af_iucv.c
net/kcm/kcmsock.c
net/key/af_key.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_debugfs.c
net/l2tp/l2tp_eth.c
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/l2tp/l2tp_netlink.c
net/l2tp/l2tp_ppp.c
net/l3mdev/l3mdev.c
net/llc/af_llc.c
net/llc/llc_conn.c
net/llc/llc_input.c
net/llc/llc_pdu.c
net/llc/llc_sap.c
net/mac80211/mesh_hwmp.c
net/mac80211/mesh_pathtbl.c
net/mac80211/rx.c
net/mac80211/status.c
net/mac80211/tx.c
net/mptcp/Kconfig
net/mptcp/Makefile
net/mptcp/crypto.c
net/mptcp/crypto_test.c [new file with mode: 0644]
net/mptcp/mptcp_diag.c [new file with mode: 0644]
net/mptcp/options.c
net/mptcp/pm.c
net/mptcp/pm_netlink.c
net/mptcp/protocol.c
net/mptcp/protocol.h
net/mptcp/subflow.c
net/mptcp/token.c
net/mptcp/token_test.c [new file with mode: 0644]
net/ncsi/ncsi-rsp.c
net/netfilter/ipset/ip_set_bitmap_ip.c
net/netfilter/ipset/ip_set_bitmap_ipmac.c
net/netfilter/ipset/ip_set_bitmap_port.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_hash_gen.h
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_dup_netdev.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_flow_table_inet.c
net/netfilter/nf_flow_table_offload.c
net/netfilter/nf_sockopt.c
net/netfilter/nf_synproxy_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_offload.c
net/netfilter/nfnetlink.c
net/netfilter/nft_compat.c
net/netfilter/nft_connlimit.c
net/netfilter/nft_counter.c
net/netfilter/nft_ct.c
net/netfilter/nft_dup_netdev.c
net/netfilter/nft_fib_inet.c
net/netfilter/nft_fib_netdev.c
net/netfilter/nft_flow_offload.c
net/netfilter/nft_hash.c
net/netfilter/nft_immediate.c
net/netfilter/nft_limit.c
net/netfilter/nft_log.c
net/netfilter/nft_masq.c
net/netfilter/nft_nat.c
net/netfilter/nft_numgen.c
net/netfilter/nft_objref.c
net/netfilter/nft_osf.c
net/netfilter/nft_queue.c
net/netfilter/nft_quota.c
net/netfilter/nft_redir.c
net/netfilter/nft_reject.c
net/netfilter/nft_reject_inet.c
net/netfilter/nft_set_pipapo.c
net/netfilter/nft_set_rbtree.c
net/netfilter/nft_synproxy.c
net/netfilter/nft_tunnel.c
net/netfilter/x_tables.c
net/netfilter/xt_nat.c
net/netlabel/netlabel_domainhash.c
net/netlink/af_netlink.c
net/netlink/genetlink.c
net/netrom/af_netrom.c
net/nfc/core.c
net/nfc/llcp_sock.c
net/nfc/nci/core.c
net/nfc/rawsock.c
net/openvswitch/actions.c
net/openvswitch/datapath.c
net/openvswitch/datapath.h
net/openvswitch/flow_netlink.c
net/openvswitch/flow_table.c
net/openvswitch/flow_table.h
net/openvswitch/vport.c
net/packet/af_packet.c
net/packet/internal.h
net/phonet/pep.c
net/phonet/socket.c
net/qrtr/qrtr.c
net/rds/af_rds.c
net/rds/connection.c
net/rds/ib.h
net/rds/rdma.c
net/rds/rdma_transport.h
net/rds/rds.h
net/rds/send.c
net/rds/transport.c
net/rose/af_rose.c
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-internal.h
net/rxrpc/call_accept.c
net/rxrpc/call_event.c
net/rxrpc/input.c
net/rxrpc/key.c
net/sched/Kconfig
net/sched/act_api.c
net/sched/act_connmark.c
net/sched/act_csum.c
net/sched/act_ct.c
net/sched/act_ctinfo.c
net/sched/act_gact.c
net/sched/act_gate.c
net/sched/act_mirred.c
net/sched/act_mpls.c
net/sched/act_pedit.c
net/sched/act_police.c
net/sched/act_skbedit.c
net/sched/act_vlan.c
net/sched/cls_api.c
net/sched/cls_flow.c
net/sched/cls_flower.c
net/sched/cls_matchall.c
net/sched/cls_tcindex.c
net/sched/cls_u32.c
net/sched/em_canid.c
net/sched/em_ipset.c
net/sched/em_ipt.c
net/sched/em_meta.c
net/sched/ematch.c
net/sched/sch_api.c
net/sched/sch_atm.c
net/sched/sch_cake.c
net/sched/sch_cbq.c
net/sched/sch_drr.c
net/sched/sch_dsmark.c
net/sched/sch_ets.c
net/sched/sch_fq.c
net/sched/sch_fq_codel.c
net/sched/sch_fq_pie.c
net/sched/sch_hfsc.c
net/sched/sch_hhf.c
net/sched/sch_htb.c
net/sched/sch_multiq.c
net/sched/sch_prio.c
net/sched/sch_qfq.c
net/sched/sch_red.c
net/sched/sch_sfb.c
net/sched/sch_sfq.c
net/sched/sch_taprio.c
net/sched/sch_teql.c
net/sctp/associola.c
net/sctp/bind_addr.c
net/sctp/ipv6.c
net/sctp/protocol.c
net/sctp/socket.c
net/smc/af_smc.c
net/smc/smc_clc.c
net/smc/smc_clc.h
net/smc/smc_core.c
net/smc/smc_core.h
net/smc/smc_ib.c
net/smc/smc_ib.h
net/smc/smc_ism.c
net/smc/smc_ism.h
net/smc/smc_llc.c
net/smc/smc_pnet.c
net/smc/smc_wr.c
net/socket.c
net/sunrpc/rpc_pipe.c
net/sunrpc/svcsock.c
net/sunrpc/xdr.c
net/sunrpc/xprtrdma/frwr_ops.c
net/sunrpc/xprtrdma/rpc_rdma.c
net/sunrpc/xprtrdma/transport.c
net/sunrpc/xprtrdma/verbs.c
net/sunrpc/xprtrdma/xprt_rdma.h
net/switchdev/switchdev.c
net/tipc/bcast.c
net/tipc/bcast.h
net/tipc/bearer.c
net/tipc/discover.c
net/tipc/link.c
net/tipc/msg.c
net/tipc/msg.h
net/tipc/name_distr.c
net/tipc/name_distr.h
net/tipc/name_table.c
net/tipc/name_table.h
net/tipc/node.c
net/tipc/node.h
net/tipc/socket.c
net/tipc/udp_media.c
net/tls/tls_device.c
net/tls/tls_main.c
net/unix/af_unix.c
net/vmw_vsock/af_vsock.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/wext-compat.c
net/x25/Kconfig
net/x25/af_x25.c
net/x25/x25_link.c
net/x25/x25_route.c
net/xdp/xsk.c
net/xdp/xsk_buff_pool.c
net/xdp/xsk_diag.c
net/xdp/xsk_queue.h
net/xdp/xskmap.c
net/xfrm/Kconfig
net/xfrm/xfrm_device.c
net/xfrm/xfrm_interface.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_state.c
samples/Kconfig
samples/bpf/Makefile
samples/bpf/fds_example.c
samples/bpf/map_perf_test_kern.c
samples/bpf/map_perf_test_user.c
samples/bpf/offwaketime_kern.c
samples/bpf/test_map_in_map_kern.c
samples/bpf/test_map_in_map_user.c
samples/bpf/test_overhead_kprobe_kern.c
samples/bpf/test_probe_write_user_kern.c
samples/bpf/tracex1_kern.c
samples/bpf/tracex5_kern.c
samples/bpf/xdp_monitor_user.c
samples/bpf/xdp_redirect_cpu_kern.c
samples/bpf/xdp_redirect_cpu_user.c
samples/bpf/xdp_rxq_info_user.c
samples/bpf/xdpsock_user.c
samples/ftrace/sample-trace-array.c
samples/mei/mei-amt-version.c
samples/vfs/test-statx.c
samples/watch_queue/Makefile
scripts/Kbuild.include
scripts/Kconfig.include
scripts/Makefile.extrawarn
scripts/Makefile.lib
scripts/atomic/gen-atomic-fallback.sh
scripts/bpf_helpers_doc.py
scripts/decode_stacktrace.sh
scripts/dtc/checks.c
scripts/dtc/dtc.h
scripts/dtc/flattree.c
scripts/dtc/libfdt/fdt_rw.c
scripts/dtc/libfdt/fdt_sw.c
scripts/dtc/libfdt/libfdt.h
scripts/dtc/treesource.c
scripts/dtc/version_gen.h
scripts/dtc/yamltree.c
scripts/gcc-plugins/Kconfig
scripts/headers_install.sh
scripts/kconfig/qconf.cc
scripts/kconfig/qconf.h
scripts/link-vmlinux.sh
scripts/recordmcount.h
security/integrity/iint.c
security/integrity/ima/ima.h
security/integrity/ima/ima_crypto.c
security/integrity/integrity.h
security/security.c
security/selinux/ss/conditional.c
security/selinux/ss/services.c
sound/core/compress_offload.c
sound/drivers/opl3/opl3_synth.c
sound/hda/intel-dsp-config.c
sound/pci/hda/hda_auto_parser.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/amd/raven/acp3x-pcm-dma.c
sound/soc/amd/renoir/Makefile
sound/soc/codecs/hdac_hda.c
sound/soc/codecs/max98390.c
sound/soc/codecs/rt1015.c
sound/soc/codecs/rt1015.h
sound/soc/codecs/rt5682.c
sound/soc/fsl/fsl_asrc_common.h
sound/soc/fsl/fsl_asrc_dma.c
sound/soc/fsl/fsl_mqs.c
sound/soc/fsl/fsl_ssi.c
sound/soc/intel/boards/Kconfig
sound/soc/qcom/common.c
sound/soc/qcom/qdsp6/q6afe.c
sound/soc/qcom/qdsp6/q6afe.h
sound/soc/qcom/qdsp6/q6asm.c
sound/soc/rockchip/rockchip_pdm.c
sound/soc/soc-core.c
sound/soc/soc-devres.c
sound/soc/soc-generic-dmaengine-pcm.c
sound/soc/soc-pcm.c
sound/soc/soc-topology.c
sound/soc/sof/intel/Kconfig
sound/soc/sof/intel/hda-stream.c
sound/soc/sof/probe.h
sound/soc/sof/sof-pci-dev.c
sound/usb/card.h
sound/usb/endpoint.c
sound/usb/format.c
sound/usb/mixer.c
sound/usb/mixer.h
sound/usb/mixer_quirks.c
sound/usb/pcm.c
sound/usb/quirks-table.h
sound/usb/quirks.c
tools/Makefile
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/asm/msr-index.h
tools/arch/x86/include/uapi/asm/kvm.h
tools/arch/x86/include/uapi/asm/unistd.h
tools/arch/x86/include/uapi/asm/vmx.h
tools/arch/x86/lib/memcpy_64.S
tools/bootconfig/main.c
tools/bootconfig/test-bootconfig.sh
tools/bpf/Makefile
tools/bpf/bpftool/.gitignore
tools/bpf/bpftool/Documentation/bpftool-btf.rst
tools/bpf/bpftool/Documentation/bpftool-link.rst
tools/bpf/bpftool/Documentation/bpftool-map.rst
tools/bpf/bpftool/Documentation/bpftool-prog.rst
tools/bpf/bpftool/Makefile
tools/bpf/bpftool/bash-completion/bpftool
tools/bpf/bpftool/btf.c
tools/bpf/bpftool/common.c
tools/bpf/bpftool/feature.c
tools/bpf/bpftool/gen.c
tools/bpf/bpftool/link.c
tools/bpf/bpftool/main.c
tools/bpf/bpftool/main.h
tools/bpf/bpftool/map.c
tools/bpf/bpftool/pids.c [new file with mode: 0644]
tools/bpf/bpftool/prog.c
tools/bpf/bpftool/skeleton/pid_iter.bpf.c [new file with mode: 0644]
tools/bpf/bpftool/skeleton/pid_iter.h [new file with mode: 0644]
tools/bpf/bpftool/skeleton/profiler.bpf.c
tools/bpf/bpftool/skeleton/profiler.h [deleted file]
tools/bpf/resolve_btfids/Build [new file with mode: 0644]
tools/bpf/resolve_btfids/Makefile [new file with mode: 0644]
tools/bpf/resolve_btfids/main.c [new file with mode: 0644]
tools/build/feature/Makefile
tools/build/feature/test-clang-bpf-co-re.c [new file with mode: 0644]
tools/build/feature/test-clang-bpf-global-var.c [deleted file]
tools/include/linux/bits.h
tools/include/linux/btf_ids.h [new file with mode: 0644]
tools/include/linux/compiler.h
tools/include/uapi/asm-generic/unistd.h
tools/include/uapi/drm/i915_drm.h
tools/include/uapi/linux/bpf.h
tools/include/uapi/linux/fcntl.h
tools/include/uapi/linux/fs.h
tools/include/uapi/linux/fscrypt.h
tools/include/uapi/linux/if_link.h
tools/include/uapi/linux/if_xdp.h
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/stat.h
tools/include/uapi/linux/vhost.h
tools/lib/bpf/bpf.h
tools/lib/bpf/bpf_core_read.h
tools/lib/bpf/bpf_endian.h
tools/lib/bpf/bpf_helpers.h
tools/lib/bpf/btf.c
tools/lib/bpf/btf.h
tools/lib/bpf/btf_dump.c
tools/lib/bpf/hashmap.h
tools/lib/bpf/libbpf.c
tools/lib/bpf/libbpf.h
tools/lib/bpf/libbpf.map
tools/lib/bpf/libbpf_probes.c
tools/lib/traceevent/event-parse.c
tools/lib/traceevent/kbuffer-parse.c
tools/lib/traceevent/kbuffer.h
tools/objtool/arch.h
tools/objtool/arch/x86/decode.c
tools/objtool/arch/x86/include/arch_elf.h [new file with mode: 0644]
tools/objtool/check.c
tools/objtool/elf.c
tools/objtool/elf.h
tools/objtool/orc_gen.c
tools/perf/Makefile.config
tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
tools/perf/arch/s390/entry/syscalls/syscall.tbl
tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
tools/perf/arch/x86/util/intel-pt.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-script.c
tools/perf/scripts/python/export-to-postgresql.py
tools/perf/scripts/python/exported-sql-viewer.py
tools/perf/scripts/python/flamegraph.py
tools/perf/trace/beauty/statx.c
tools/perf/ui/browsers/hists.c
tools/perf/util/bpf-prologue.c
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/intel-pt.c
tools/perf/util/parse-events.y
tools/perf/util/pmu.h
tools/perf/util/probe-event.c
tools/perf/util/probe-file.c
tools/perf/util/stat-display.c
tools/spi/spidev_test.c
tools/testing/kunit/kunit.py
tools/testing/kunit/kunit_config.py
tools/testing/kunit/kunit_parser.py
tools/testing/kunit/kunit_tool_test.py
tools/testing/kunit/test_data/test_insufficient_memory.log [new file with mode: 0644]
tools/testing/nvdimm/test/nfit_test.h
tools/testing/selftests/Makefile
tools/testing/selftests/arm64/signal/Makefile
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/bpf_legacy.h
tools/testing/selftests/bpf/network_helpers.c
tools/testing/selftests/bpf/network_helpers.h
tools/testing/selftests/bpf/prog_tests/autoload.c [new file with mode: 0644]
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
tools/testing/selftests/bpf/prog_tests/cgroup_skb_sk_lookup.c
tools/testing/selftests/bpf/prog_tests/connect_force_port.c
tools/testing/selftests/bpf/prog_tests/core_retro.c [new file with mode: 0644]
tools/testing/selftests/bpf/prog_tests/endian.c [new file with mode: 0644]
tools/testing/selftests/bpf/prog_tests/fentry_fexit.c
tools/testing/selftests/bpf/prog_tests/flow_dissector.c
tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c
tools/testing/selftests/bpf/prog_tests/ksyms.c [new file with mode: 0644]
tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c
tools/testing/selftests/bpf/prog_tests/map_ptr.c [new file with mode: 0644]
tools/testing/selftests/bpf/prog_tests/perf_buffer.c
tools/testing/selftests/bpf/prog_tests/resolve_btfids.c [new file with mode: 0644]
tools/testing/selftests/bpf/prog_tests/sk_lookup.c [new file with mode: 0644]
tools/testing/selftests/bpf/prog_tests/skeleton.c
tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
tools/testing/selftests/bpf/prog_tests/tcp_rtt.c
tools/testing/selftests/bpf/prog_tests/trace_printk.c [new file with mode: 0644]
tools/testing/selftests/bpf/prog_tests/udp_limit.c [new file with mode: 0644]
tools/testing/selftests/bpf/prog_tests/varlen.c [new file with mode: 0644]
tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/bpf_cubic.c
tools/testing/selftests/bpf/progs/bpf_iter.h [new file with mode: 0644]
tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c
tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c
tools/testing/selftests/bpf/progs/bpf_iter_netlink.c
tools/testing/selftests/bpf/progs/bpf_iter_task.c
tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c
tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c
tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h
tools/testing/selftests/bpf/progs/bpf_iter_udp4.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/bpf_iter_udp6.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/bpf_tracing_net.h [new file with mode: 0644]
tools/testing/selftests/bpf/progs/btf_data.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/connect4_prog.c
tools/testing/selftests/bpf/progs/fentry_test.c
tools/testing/selftests/bpf/progs/fexit_test.c
tools/testing/selftests/bpf/progs/map_ptr_kern.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/sockopt_sk.c
tools/testing/selftests/bpf/progs/test_autoload.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_core_retro.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_endian.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c
tools/testing/selftests/bpf/progs/test_ksyms.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_perf_buffer.c
tools/testing/selftests/bpf/progs/test_sk_lookup.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_skeleton.c
tools/testing/selftests/bpf/progs/test_sockmap_kern.h
tools/testing/selftests/bpf/progs/test_varlen.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_vmlinux.c
tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c
tools/testing/selftests/bpf/progs/trace_printk.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/udp_limit.c [new file with mode: 0644]
tools/testing/selftests/bpf/test_kmod.sh
tools/testing/selftests/bpf/test_lwt_seg6local.sh
tools/testing/selftests/bpf/test_maps.c
tools/testing/selftests/bpf/test_progs.c
tools/testing/selftests/bpf/test_progs.h
tools/testing/selftests/bpf/test_sockmap.c
tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c [new file with mode: 0644]
tools/testing/selftests/bpf/verifier/map_ptr.c [new file with mode: 0644]
tools/testing/selftests/bpf/verifier/map_ptr_mixing.c
tools/testing/selftests/bpf/verifier/value_ptr_arith.c
tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
tools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh
tools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh
tools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh
tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_police_scale.sh [new file with mode: 0644]
tools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh
tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_police_scale.sh [new file with mode: 0644]
tools/testing/selftests/drivers/net/mlxsw/tc_police_occ.sh [new file with mode: 0755]
tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh [new file with mode: 0644]
tools/testing/selftests/drivers/net/mlxsw/tc_restrictions.sh
tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh [new file with mode: 0644]
tools/testing/selftests/ftrace/ftracetest
tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc
tools/testing/selftests/ftrace/test.d/00basic/trace_pipe.tc
tools/testing/selftests/ftrace/test.d/direct/kprobe-direct.tc
tools/testing/selftests/ftrace/test.d/dynevent/add_remove_kprobe.tc
tools/testing/selftests/ftrace/test.d/dynevent/add_remove_synth.tc
tools/testing/selftests/ftrace/test.d/dynevent/clear_select_events.tc
tools/testing/selftests/ftrace/test.d/dynevent/generic_clear_event.tc
tools/testing/selftests/ftrace/test.d/event/event-enable.tc
tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc
tools/testing/selftests/ftrace/test.d/event/event-pid.tc
tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc
tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter-stack.tc
tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter.tc
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-notrace-pid.tc
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
tools/testing/selftests/ftrace/test.d/ftrace/func-filter-stacktrace.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_cpumask.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_mod_trace.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_profile_stat.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_profiler.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_stack_tracer.tc
tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc
tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc
tools/testing/selftests/ftrace/test.d/functions
tools/testing/selftests/ftrace/test.d/instances/instance-event.tc
tools/testing/selftests/ftrace/test.d/instances/instance.tc
tools/testing/selftests/ftrace/test.d/kprobe/add_and_remove.tc
tools/testing/selftests/ftrace/test.d/kprobe/busy_check.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_comm.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_symbol.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_ftrace.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_module.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_multiprobe.tc
tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_args.tc
tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_maxactive.tc
tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc
tools/testing/selftests/ftrace/test.d/kprobe/profile.tc
tools/testing/selftests/ftrace/test.d/kprobe/uprobe_syntax_errors.tc
tools/testing/selftests/ftrace/test.d/preemptirq/irqsoff_tracer.tc
tools/testing/selftests/ftrace/test.d/template
tools/testing/selftests/ftrace/test.d/tracer/wakeup.tc
tools/testing/selftests/ftrace/test.d/tracer/wakeup_rt.tc
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-action-hist-xfail.tc
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onchange-action-hist.tc
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-trace-action-hist.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-eventonoff.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-filter.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-syntax-errors.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-snapshot.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-stacktrace.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-trace-marker-hist.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-trace-marker-snapshot.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-trace-marker-synthetic-kernel.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-trace-marker-synthetic.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-traceonoff.tc
tools/testing/selftests/kmod/kmod.sh
tools/testing/selftests/kselftest.h
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/devlink_port_split.py [new file with mode: 0755]
tools/testing/selftests/net/fib_nexthops.sh
tools/testing/selftests/net/forwarding/devlink_lib.sh
tools/testing/selftests/net/forwarding/ethtool.sh
tools/testing/selftests/net/forwarding/ethtool_extended_state.sh [new file with mode: 0755]
tools/testing/selftests/net/forwarding/ethtool_lib.sh
tools/testing/selftests/net/forwarding/forwarding.config.sample
tools/testing/selftests/net/forwarding/pedit_l4port.sh [new file with mode: 0755]
tools/testing/selftests/net/forwarding/sch_red.sh [new file with mode: 0755]
tools/testing/selftests/net/forwarding/tc_police.sh [new file with mode: 0755]
tools/testing/selftests/net/mptcp/Makefile
tools/testing/selftests/net/mptcp/diag.sh [new file with mode: 0755]
tools/testing/selftests/net/mptcp/mptcp_connect.c
tools/testing/selftests/net/mptcp/mptcp_connect.sh
tools/testing/selftests/net/rxtimestamp.c
tools/testing/selftests/net/rxtimestamp.sh [new file with mode: 0755]
tools/testing/selftests/net/so_txtime.c
tools/testing/selftests/net/vrf_strict_mode_test.sh [new file with mode: 0755]
tools/testing/selftests/netfilter/Makefile
tools/testing/selftests/netfilter/nft_conntrack_helper.sh [new file with mode: 0755]
tools/testing/selftests/pidfd/pidfd.h
tools/testing/selftests/pidfd/pidfd_getfd_test.c
tools/testing/selftests/pidfd/pidfd_setns_test.c
tools/testing/selftests/powerpc/pmu/ebb/Makefile
tools/testing/selftests/ptp/testptp.c
tools/testing/selftests/seccomp/seccomp_bpf.c
tools/testing/selftests/tc-testing/Makefile [moved from tools/testing/selftests/tc-testing/bpf/Makefile with 79% similarity]
tools/testing/selftests/tc-testing/action.c [moved from tools/testing/selftests/tc-testing/bpf/action.c with 100% similarity]
tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
tools/testing/selftests/tc-testing/tdc.sh [new file with mode: 0755]
tools/testing/selftests/tc-testing/tdc_config.py
tools/testing/selftests/tpm2/test_smoke.sh
tools/testing/selftests/tpm2/test_space.sh
tools/testing/selftests/tpm2/tpm2.py
tools/testing/selftests/tpm2/tpm2_tests.py
tools/testing/selftests/wireguard/netns.sh
tools/testing/selftests/x86/Makefile
tools/testing/selftests/x86/helpers.h [new file with mode: 0644]
tools/testing/selftests/x86/single_step_syscall.c
tools/testing/selftests/x86/syscall_arg_fault.c
tools/testing/selftests/x86/syscall_nt.c
tools/testing/selftests/x86/test_vsyscall.c
tools/testing/selftests/x86/unwind_vdso.c
tools/virtio/linux/kernel.h
tools/virtio/linux/virtio.h
tools/virtio/virtio_test.c
tools/virtio/vringh_test.c
virt/kvm/kvm_main.c

index 87b9dd8..d5f4804 100644 (file)
@@ -143,6 +143,9 @@ x509.genkey
 /allrandom.config
 /allyes.config
 
+# Kconfig savedefconfig output
+/defconfig
+
 # Kdevelop4
 *.kdev4
 
index c69d9c7..6da12df 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -90,11 +90,16 @@ Frank Rowand <frowand.list@gmail.com> <frank.rowand@sonymobile.com>
 Frank Zago <fzago@systemfabricworks.com>
 Gao Xiang <xiang@kernel.org> <gaoxiang25@huawei.com>
 Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com>
+Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>
+Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
+Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@linux.vnet.ibm.com>
 Greg Kroah-Hartman <greg@echidna.(none)>
 Greg Kroah-Hartman <gregkh@suse.de>
 Greg Kroah-Hartman <greg@kroah.com>
 Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com>
 Hanjun Guo <guohanjun@huawei.com> <hanjun.guo@linaro.org>
+Heiko Carstens <hca@linux.ibm.com> <h.carstens@de.ibm.com>
+Heiko Carstens <hca@linux.ibm.com> <heiko.carstens@de.ibm.com>
 Henk Vergonet <Henk.Vergonet@gmail.com>
 Henrik Kretzschmar <henne@nachtwindheim.de>
 Henrik Rydberg <rydberg@bitmath.org>
index 1e6c28b..f307506 100644 (file)
@@ -56,11 +56,6 @@ Description: The /dev/kmsg character device node provides userspace access
                  seek after the last record available at the time
                  the last SYSLOG_ACTION_CLEAR was issued.
 
-               Due to the record nature of this interface with a "read all"
-               behavior and the specific positions each seek operation sets,
-               SEEK_CUR is not supported, returning -ESPIPE (invalid seek) to
-               errno whenever requested.
-
                The output format consists of a prefix carrying the syslog
                prefix including priority and facility, the 64 bit message
                sequence number and the monotonic timestamp in microseconds,
diff --git a/Documentation/ABI/testing/sysfs-bus-papr-pmem b/Documentation/ABI/testing/sysfs-bus-papr-pmem
new file mode 100644 (file)
index 0000000..5b10d03
--- /dev/null
@@ -0,0 +1,27 @@
+What:          /sys/bus/nd/devices/nmemX/papr/flags
+Date:          Apr, 2020
+KernelVersion: v5.8
+Contact:       linuxppc-dev <linuxppc-dev@lists.ozlabs.org>, linux-nvdimm@lists.01.org,
+Description:
+               (RO) Report flags indicating various states of a
+               papr-pmem NVDIMM device. Each flag maps to a one or
+               more bits set in the dimm-health-bitmap retrieved in
+               response to H_SCM_HEALTH hcall. The details of the bit
+               flags returned in response to this hcall is available
+               at 'Documentation/powerpc/papr_hcalls.rst' . Below are
+               the flags reported in this sysfs file:
+
+               * "not_armed"   : Indicates that NVDIMM contents will not
+                                 survive a power cycle.
+               * "flush_fail"  : Indicates that NVDIMM contents
+                                 couldn't be flushed during last
+                                 shut-down event.
+               * "restore_fail": Indicates that NVDIMM contents
+                                 couldn't be restored during NVDIMM
+                                 initialization.
+               * "encrypted"   : NVDIMM contents are encrypted.
+               * "smart_notify": There is health event for the NVDIMM.
+               * "scrubbed"    : Indicating that contents of the
+                                 NVDIMM have been scrubbed.
+               * "locked"      : Indicating that NVDIMM contents cant
+                                 be modified until next power cycle.
index 151c595..f58cfb0 100644 (file)
@@ -1,6 +1,6 @@
 What:          /sys/bus/platform/devices/ci_hdrc.0/inputs/a_bus_req
 Date:          Feb 2014
-Contact:       Li Jun <b47624@freescale.com>
+Contact:       Li Jun <jun.li@nxp.com>
 Description:
                Can be set and read.
                Set a_bus_req(A-device bus request) input to be 1 if
@@ -17,7 +17,7 @@ Description:
 
 What:          /sys/bus/platform/devices/ci_hdrc.0/inputs/a_bus_drop
 Date:          Feb 2014
-Contact:       Li Jun <b47624@freescale.com>
+Contact:       Li Jun <jun.li@nxp.com>
 Description:
                Can be set and read
                The a_bus_drop(A-device bus drop) input is 1 when the
@@ -32,7 +32,7 @@ Description:
 
 What:          /sys/bus/platform/devices/ci_hdrc.0/inputs/b_bus_req
 Date:          Feb 2014
-Contact:       Li Jun <b47624@freescale.com>
+Contact:       Li Jun <jun.li@nxp.com>
 Description:
                Can be set and read.
                The b_bus_req(B-device bus request) input is 1 during the time
@@ -47,7 +47,7 @@ Description:
 
 What:          /sys/bus/platform/devices/ci_hdrc.0/inputs/a_clr_err
 Date:          Feb 2014
-Contact:       Li Jun <b47624@freescale.com>
+Contact:       Li Jun <jun.li@nxp.com>
 Description:
                Only can be set.
                The a_clr_err(A-device Vbus error clear) input is used to clear
index 5fb5269..5aad534 100644 (file)
@@ -258,7 +258,7 @@ Configuring the kernel
 Compiling the kernel
 --------------------
 
- - Make sure you have at least gcc 4.6 available.
+ - Make sure you have at least gcc 4.9 available.
    For more information, refer to :ref:`Documentation/process/changes.rst <changes>`.
 
    Please note that you can still run a.out user programs with this kernel.
index ce3e05e..d09471a 100644 (file)
@@ -1356,8 +1356,8 @@ PAGE_SIZE multiple when read back.
 
          thp_fault_alloc
                Number of transparent hugepages which were allocated to satisfy
-               a page fault, including COW faults. This counter is not present
-               when CONFIG_TRANSPARENT_HUGEPAGE is not set.
+               a page fault. This counter is not present when CONFIG_TRANSPARENT_HUGEPAGE
+                is not set.
 
          thp_collapse_alloc
                Number of transparent hugepages which were allocated to allow
index ec62fcc..6cf8adc 100644 (file)
@@ -11,6 +11,7 @@ Device Mapper
     dm-clone
     dm-crypt
     dm-dust
+    dm-ebs
     dm-flakey
     dm-init
     dm-integrity
index 6a233e4..b2acd0d 100644 (file)
@@ -305,8 +305,7 @@ monitor how successfully the system is providing huge pages for use.
 
 thp_fault_alloc
        is incremented every time a huge page is successfully
-       allocated to handle a page fault. This applies to both the
-       first time a page is faulted and for COW faults.
+       allocated to handle a page fault.
 
 thp_collapse_alloc
        is incremented by khugepaged when it has found
index 314fa5b..f28853f 100644 (file)
@@ -171,6 +171,7 @@ infrastructure:
 
 
   3) ID_AA64PFR1_EL1 - Processor Feature Register 1
+
      +------------------------------+---------+---------+
      | Name                         |  bits   | visible |
      +------------------------------+---------+---------+
@@ -181,6 +182,7 @@ infrastructure:
 
 
   4) MIDR_EL1 - Main ID Register
+
      +------------------------------+---------+---------+
      | Name                         |  bits   | visible |
      +------------------------------+---------+---------+
index 936cf2a..3f7c3a7 100644 (file)
@@ -147,6 +147,14 @@ stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | Qualcomm Tech. | Falkor v{1,2}   | E1041           | QCOM_FALKOR_ERRATUM_1041    |
 +----------------+-----------------+-----------------+-----------------------------+
+| Qualcomm Tech. | Kryo4xx Gold    | N/A             | ARM64_ERRATUM_1463225       |
++----------------+-----------------+-----------------+-----------------------------+
+| Qualcomm Tech. | Kryo4xx Gold    | N/A             | ARM64_ERRATUM_1418040       |
++----------------+-----------------+-----------------+-----------------------------+
+| Qualcomm Tech. | Kryo4xx Silver  | N/A             | ARM64_ERRATUM_1530923       |
++----------------+-----------------+-----------------+-----------------------------+
+| Qualcomm Tech. | Kryo4xx Silver  | N/A             | ARM64_ERRATUM_1024718       |
++----------------+-----------------+-----------------+-----------------------------+
 +----------------+-----------------+-----------------+-----------------------------+
 | Fujitsu        | A64FX           | E#010001        | FUJITSU_ERRATUM_010001      |
 +----------------+-----------------+-----------------+-----------------------------+
index 5689c74..bfd55f4 100644 (file)
@@ -186,7 +186,7 @@ prctl(PR_SVE_SET_VL, unsigned long arg)
 
     flags:
 
-       PR_SVE_SET_VL_INHERIT
+       PR_SVE_VL_INHERIT
 
            Inherit the current vector length across execve().  Otherwise, the
            vector length is reset to the system default at execve().  (See
@@ -247,7 +247,7 @@ prctl(PR_SVE_GET_VL)
 
     The following flag may be OR-ed into the result:
 
-       PR_SVE_SET_VL_INHERIT
+       PR_SVE_VL_INHERIT
 
            Vector length will be inherited across execve().
 
@@ -393,7 +393,7 @@ The regset data starts with struct user_sve_header, containing:
 * At every execve() call, the new vector length of the new process is set to
   the system default vector length, unless
 
-    * PR_SVE_SET_VL_INHERIT (or equivalently SVE_PT_VL_INHERIT) is set for the
+    * PR_SVE_VL_INHERIT (or equivalently SVE_PT_VL_INHERIT) is set for the
       calling thread, or
 
     * a deferred vector length change is pending, established via the
index 0d237d4..19d4d15 100644 (file)
@@ -492,13 +492,6 @@ set max_budget to higher values than those to which BFQ would have set
 it with auto-tuning. An alternative way to achieve this goal is to
 just increase the value of timeout_sync, leaving max_budget equal to 0.
 
-weights
--------
-
-Read-only parameter, used to show the weights of the currently active
-BFQ queues.
-
-
 4. Group scheduling with BFQ
 ============================
 
@@ -566,7 +559,7 @@ Parameters to set
 For each group, there is only the following parameter to set.
 
 weight (namely blkio.bfq.weight or io.bfq-weight): the weight of the
-group inside its parent. Available values: 1..10000 (default 100). The
+group inside its parent. Available values: 1..1000 (default 100). The
 linear mapping between ioprio and weights, described at the beginning
 of the tunable section, is still valid, but all weights higher than
 IOPRIO_BE_NR*10 are mapped to ioprio 0.
index 4d565d2..b5361b8 100644 (file)
@@ -691,6 +691,42 @@ kernel API, the ``insn_off`` is the instruction offset in the unit of ``struct
 bpf_insn``. For ELF API, the ``insn_off`` is the byte offset from the
 beginning of section (``btf_ext_info_sec->sec_name_off``).
 
+4.2 .BTF_ids section
+====================
+
+The .BTF_ids section encodes BTF ID values that are used within the kernel.
+
+This section is created during the kernel compilation with the help of
+macros defined in ``include/linux/btf_ids.h`` header file. Kernel code can
+use them to create lists and sets (sorted lists) of BTF ID values.
+
+The ``BTF_ID_LIST`` and ``BTF_ID`` macros define unsorted list of BTF ID values,
+with following syntax::
+
+  BTF_ID_LIST(list)
+  BTF_ID(type1, name1)
+  BTF_ID(type2, name2)
+
+resulting in following layout in .BTF_ids section::
+
+  __BTF_ID__type1__name1__1:
+  .zero 4
+  __BTF_ID__type2__name2__2:
+  .zero 4
+
+The ``u32 list[];`` variable is defined to access the list.
+
+The ``BTF_ID_UNUSED`` macro defines 4 zero bytes. It's used when we
+want to define unused entry in BTF_ID_LIST, like::
+
+      BTF_ID_LIST(bpf_skb_output_btf_ids)
+      BTF_ID(struct, sk_buff)
+      BTF_ID_UNUSED
+      BTF_ID(struct, task_struct)
+
+All the BTF ID lists and sets are compiled in the .BTF_ids section and
+resolved during the linking phase of kernel build by ``resolve_btfids`` tool.
+
 5. Using BTF
 ************
 
index c47d974..172f957 100644 (file)
@@ -86,6 +86,20 @@ then the next program in the chain (A) will see those changes,
 *not* the original input ``setsockopt`` arguments. The potentially
 modified values will be then passed down to the kernel.
 
+Large optval
+============
+When the ``optval`` is greater than the ``PAGE_SIZE``, the BPF program
+can access only the first ``PAGE_SIZE`` of that data. So it has to options:
+
+* Set ``optlen`` to zero, which indicates that the kernel should
+  use the original buffer from the userspace. Any modifications
+  done by the BPF program to the ``optval`` are ignored.
+* Set ``optlen`` to the value less than ``PAGE_SIZE``, which
+  indicates that the kernel should use BPF's trimmed ``optval``.
+
+When the BPF program returns with the ``optlen`` greater than
+``PAGE_SIZE``, the userspace will receive ``EFAULT`` errno.
+
 Example
 =======
 
index 2d8d2fe..f416204 100644 (file)
@@ -206,6 +206,14 @@ others should not be larger than the returned value.
 
 ::
 
+       bool
+       dma_need_sync(struct device *dev, dma_addr_t dma_addr);
+
+Returns %true if dma_sync_single_for_{device,cpu} calls are required to
+transfer memory ownership.  Returns %false if those calls can be skipped.
+
+::
+
        unsigned long
        dma_get_merge_boundary(struct device *dev);
 
index 6068266..7ca8c7b 100644 (file)
@@ -33,7 +33,7 @@ all combinations of get*(), pin*(), FOLL_LONGTERM, and more. Also, the
 pin_user_pages*() APIs are clearly distinct from the get_user_pages*() APIs, so
 that's a natural dividing line, and a good point to make separate wrapper calls.
 In other words, use pin_user_pages*() for DMA-pinned pages, and
-get_user_pages*() for other cases. There are four cases described later on in
+get_user_pages*() for other cases. There are five cases described later on in
 this document, to further clarify that concept.
 
 FOLL_PIN and FOLL_GET are mutually exclusive for a given gup call. However,
index ce4bbd9..b38379f 100644 (file)
@@ -114,12 +114,6 @@ the below options are available:
   To dynamically limit for which functions to generate reports, see the
   `DebugFS interface`_ blacklist/whitelist feature.
 
-  For ``__always_inline`` functions, replace ``__always_inline`` with
-  ``__no_kcsan_or_inline`` (which implies ``__always_inline``)::
-
-    static __no_kcsan_or_inline void foo(void) {
-        ...
-
 * To disable data race detection for a particular compilation unit, add to the
   ``Makefile``::
 
index ea55b24..1628862 100644 (file)
@@ -61,3 +61,43 @@ test, or an end-to-end test.
   kernel by installing a production configuration of the kernel on production
   hardware with a production userspace and then trying to exercise some behavior
   that depends on interactions between the hardware, the kernel, and userspace.
+
+KUnit isn't working, what should I do?
+======================================
+
+Unfortunately, there are a number of things which can break, but here are some
+things to try.
+
+1. Try running ``./tools/testing/kunit/kunit.py run`` with the ``--raw_output``
+   parameter. This might show details or error messages hidden by the kunit_tool
+   parser.
+2. Instead of running ``kunit.py run``, try running ``kunit.py config``,
+   ``kunit.py build``, and ``kunit.py exec`` independently. This can help track
+   down where an issue is occurring. (If you think the parser is at fault, you
+   can run it manually against stdin or a file with ``kunit.py parse``.)
+3. Running the UML kernel directly can often reveal issues or error messages
+   kunit_tool ignores. This should be as simple as running ``./vmlinux`` after
+   building the UML kernel (e.g., by using ``kunit.py build``). Note that UML
+   has some unusual requirements (such as the host having a tmpfs filesystem
+   mounted), and has had issues in the past when built statically and the host
+   has KASLR enabled. (On older host kernels, you may need to run ``setarch
+   `uname -m` -R ./vmlinux`` to disable KASLR.)
+4. Make sure the kernel .config has ``CONFIG_KUNIT=y`` and at least one test
+   (e.g. ``CONFIG_KUNIT_EXAMPLE_TEST=y``). kunit_tool will keep its .config
+   around, so you can see what config was used after running ``kunit.py run``.
+   It also preserves any config changes you might make, so you can
+   enable/disable things with ``make ARCH=um menuconfig`` or similar, and then
+   re-run kunit_tool.
+5. Try to run ``make ARCH=um defconfig`` before running ``kunit.py run``. This
+   may help clean up any residual config items which could be causing problems.
+6. Finally, try running KUnit outside UML. KUnit and KUnit tests can run be
+   built into any kernel, or can be built as a module and loaded at runtime.
+   Doing so should allow you to determine if UML is causing the issue you're
+   seeing. When tests are built-in, they will execute when the kernel boots, and
+   modules will automatically execute associated tests when loaded. Test results
+   can be collected from ``/sys/kernel/debug/kunit/<test suite>/results``, and
+   can be parsed with ``kunit.py parse``. For more details, see "KUnit on
+   non-UML architectures" in :doc:`usage`.
+
+If none of the above tricks help, you are always welcome to email any issues to
+kunit-dev@googlegroups.com.
index a638989..91c4d00 100644 (file)
@@ -2,7 +2,6 @@
 DT_DOC_CHECKER ?= dt-doc-validate
 DT_EXTRACT_EX ?= dt-extract-example
 DT_MK_SCHEMA ?= dt-mk-schema
-DT_MK_SCHEMA_USERONLY_FLAG := $(if $(DT_SCHEMA_FILES), -u)
 
 DT_SCHEMA_MIN_VERSION = 2020.5
 
@@ -35,21 +34,40 @@ quiet_cmd_mk_schema = SCHEMA  $@
 
 DT_DOCS = $(shell $(find_cmd) | sed -e 's|^$(srctree)/||')
 
-DT_SCHEMA_FILES ?= $(DT_DOCS)
-
-extra-$(CHECK_DT_BINDING) += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES))
-extra-$(CHECK_DT_BINDING) += $(patsubst $(src)/%.yaml,%.example.dt.yaml, $(DT_SCHEMA_FILES))
-extra-$(CHECK_DT_BINDING) += processed-schema-examples.yaml
-
 override DTC_FLAGS := \
        -Wno-avoid_unnecessary_addr_size \
-       -Wno-graph_child_address
+       -Wno-graph_child_address \
+       -Wno-interrupt_provider
 
 $(obj)/processed-schema-examples.yaml: $(DT_DOCS) check_dtschema_version FORCE
        $(call if_changed,mk_schema)
 
-$(obj)/processed-schema.yaml: DT_MK_SCHEMA_FLAGS := $(DT_MK_SCHEMA_USERONLY_FLAG)
+ifeq ($(DT_SCHEMA_FILES),)
+
+# Unless DT_SCHEMA_FILES is specified, use the full schema for dtbs_check too.
+# Just copy processed-schema-examples.yaml
+
+$(obj)/processed-schema.yaml: $(obj)/processed-schema-examples.yaml FORCE
+       $(call if_changed,copy)
+
+DT_SCHEMA_FILES = $(DT_DOCS)
+
+else
+
+# If DT_SCHEMA_FILES is specified, use it for processed-schema.yaml
+
+$(obj)/processed-schema.yaml: DT_MK_SCHEMA_FLAGS := -u
 $(obj)/processed-schema.yaml: $(DT_SCHEMA_FILES) check_dtschema_version FORCE
        $(call if_changed,mk_schema)
 
-extra-y += processed-schema.yaml
+endif
+
+extra-$(CHECK_DT_BINDING) += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES))
+extra-$(CHECK_DT_BINDING) += $(patsubst $(src)/%.yaml,%.example.dt.yaml, $(DT_SCHEMA_FILES))
+extra-$(CHECK_DT_BINDING) += processed-schema-examples.yaml
+extra-$(CHECK_DTBS) += processed-schema.yaml
+
+# Hack: avoid 'Argument list too long' error for 'make clean'. Remove most of
+# build artifacts here before they are processed by scripts/Makefile.clean
+clean-files = $(shell find $(obj) \( -name '*.example.dts' -o \
+                       -name '*.example.dt.yaml' \) -delete 2>/dev/null)
index 7150474..10b8459 100644 (file)
@@ -47,7 +47,7 @@ Required properties:
                          &lsio_mu1 1 2
                          &lsio_mu1 1 3
                          &lsio_mu1 3 3>;
-               See Documentation/devicetree/bindings/mailbox/fsl,mu.txt
+               See Documentation/devicetree/bindings/mailbox/fsl,mu.yaml
                for detailed mailbox binding.
 
 Note: Each mu which supports general interrupt should have an alias correctly
index c4c9119..a0c6c5d 100644 (file)
@@ -80,14 +80,14 @@ examples:
         ranges = <1 0x00000000 0x42000000 0x02000000>,
                  <5 0x00000000 0x46000000 0x01000000>;
 
-        ethernet@1,01f00000 {
+        ethernet@1,1f00000 {
             compatible = "smsc,lan9115";
             reg = <1 0x01f00000 0x1000>;
             interrupts = <0 48 4>;
             phy-mode = "mii";
         };
 
-        uart@5,00200000 {
+        serial@5,200000 {
             compatible = "ns16550a";
             reg = <5 0x00200000 0x20>;
             interrupts = <0 49 4>;
index b5f3ed0..a753654 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Clock bindings for Freescale i.MX27
 
 maintainers:
-  - Fabio Estevam <fabio.estevam@freescale.com>
+  - Fabio Estevam <fabio.estevam@nxp.com>
 
 description: |
   The clock consumer should specify the desired clock by having the clock
index 1b6f75d..a25a374 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Clock bindings for Freescale i.MX31
 
 maintainers:
-  - Fabio Estevam <fabio.estevam@freescale.com>
+  - Fabio Estevam <fabio.estevam@nxp.com>
 
 description: |
   The clock consumer should specify the desired clock by having the clock
index f5c2b3d..4d9e7c7 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Clock bindings for Freescale i.MX5
 
 maintainers:
-  - Fabio Estevam <fabio.estevam@freescale.com>
+  - Fabio Estevam <fabio.estevam@nxp.com>
 
 description: |
   The clock consumer should specify the desired clock by having the clock
index 6e14e08..0d1db3f 100644 (file)
@@ -37,7 +37,7 @@ Optional properties:
        simple-card or audio-graph-card binding. See their binding
        documents on how to describe the way the sii902x device is
        connected to the rest of the audio system:
-       Documentation/devicetree/bindings/sound/simple-card.txt
+       Documentation/devicetree/bindings/sound/simple-card.yaml
        Documentation/devicetree/bindings/sound/audio-graph-card.txt
        Note: In case of the audio-graph-card binding the used port
        index should be 3.
index 5bf77f6..5a99490 100644 (file)
@@ -68,7 +68,7 @@ Required properties:
   datasheet
 - clocks : phandle to the PRE axi clock input, as described
   in Documentation/devicetree/bindings/clock/clock-bindings.txt and
-  Documentation/devicetree/bindings/clock/imx6q-clock.txt.
+  Documentation/devicetree/bindings/clock/imx6q-clock.yaml.
 - clock-names: should be "axi"
 - interrupts: should contain the PRE interrupt
 - fsl,iram: phandle pointing to the mmio-sram device node, that should be
@@ -94,7 +94,7 @@ Required properties:
   datasheet
 - clocks : phandles to the PRG ipg and axi clock inputs, as described
   in Documentation/devicetree/bindings/clock/clock-bindings.txt and
-  Documentation/devicetree/bindings/clock/imx6q-clock.txt.
+  Documentation/devicetree/bindings/clock/imx6q-clock.yaml.
 - clock-names: should be "ipg" and "axi"
 - fsl,pres: phandles to the PRE units attached to this PRG, with the fixed
   PRE as the first entry and the muxable PREs following.
index 38c637f..8e6e7d7 100644 (file)
@@ -30,8 +30,8 @@ Required properties:
                 "di2_sel" - IPU2 DI0 mux
                 "di3_sel" - IPU2 DI1 mux
         The needed clock numbers for each are documented in
-        Documentation/devicetree/bindings/clock/imx5-clock.txt, and in
-        Documentation/devicetree/bindings/clock/imx6q-clock.txt.
+        Documentation/devicetree/bindings/clock/imx5-clock.yaml, and in
+        Documentation/devicetree/bindings/clock/imx6q-clock.yaml.
 
 Optional properties:
  - pinctrl-names : should be "default" on i.MX53, not used on i.MX6q
index 41fd571..be69e0c 100644 (file)
@@ -33,7 +33,7 @@ additionalProperties: false
 
 examples:
   - |
-    sysreg {
+    sysreg@0 {
         compatible = "arm,versatile-sysreg", "syscon", "simple-mfd";
         reg = <0x00000 0x1000>;
 
index ec8ae74..7204da5 100644 (file)
@@ -24,7 +24,7 @@ properties:
     description: |
       Should contain a list of phandles pointing to display interface port
       of vop devices. vop definitions as defined in
-      Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
+      Documentation/devicetree/bindings/display/rockchip/rockchip-vop.yaml
 
 required:
   - compatible
index ba45558..e1c49b6 100644 (file)
@@ -12,7 +12,7 @@ Required properties for the top level node:
    Only the GPIO_ACTIVE_HIGH and GPIO_ACTIVE_LOW flags are supported.
 - #interrupt-cells : Specifies the number of cells needed to encode an
    interrupt. Should be 2. The first cell defines the interrupt number,
-   the second encodes the triger flags encoded as described in
+   the second encodes the trigger flags encoded as described in
    Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
 - compatible:
   - "mediatek,mt7621-gpio" for Mediatek controllers
index e134053..e6bbcae 100644 (file)
@@ -10,7 +10,7 @@ Interrupt number definition:
  16-31  : private  irq, and we use 16 as the co-processor timer.
  31-1024: common irq for soc ip.
 
-Interrupt triger mode: (Defined in dt-bindings/interrupt-controller/irq.h)
+Interrupt trigger mode: (Defined in dt-bindings/interrupt-controller/irq.h)
  IRQ_TYPE_LEVEL_HIGH (default)
  IRQ_TYPE_LEVEL_LOW
  IRQ_TYPE_EDGE_RISING
index 4438432..ad76edc 100644 (file)
@@ -87,7 +87,7 @@ Example:
                ranges;
 
                /* APU<->RPU0 IPI mailbox controller */
-               ipi_mailbox_rpu0: mailbox@ff90400 {
+               ipi_mailbox_rpu0: mailbox@ff990400 {
                        reg = <0xff990400 0x20>,
                              <0xff990420 0x20>,
                              <0xff990080 0x20>,
index 9134e9b..b12f9be 100644 (file)
@@ -10,7 +10,7 @@ such as network interfaces, crypto accelerator instances, L2 switches,
 etc.
 
 For an overview of the DPAA2 architecture and fsl-mc bus see:
-Documentation/networking/device_drivers/freescale/dpaa2/overview.rst
+Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst
 
 As described in the above overview, all DPAA2 objects in a DPRC share the
 same hardware "isolation context" and a 10-bit value called an ICID
index 8c4d649..2d7cdf1 100644 (file)
@@ -8,7 +8,7 @@ The embedded controller requires the SPI controller driver to signal readiness
 to receive a transfer (that is, when TX FIFO contains the response data) by
 strobing the ACK pin with the ready signal. See the "ready-gpios" property of the
 SSP binding as documented in:
-<Documentation/devicetree/bindings/spi/spi-pxa2xx.txt>.
+<Documentation/devicetree/bindings/spi/marvell,mmp2-ssp.yaml>.
 
 Example:
        &ssp3 {
index 64c20c9..85fefe3 100644 (file)
@@ -22,6 +22,7 @@ select:
           - amlogic,meson8m2-dwmac
           - amlogic,meson-gxbb-dwmac
           - amlogic,meson-axg-dwmac
+          - amlogic,meson-g12a-dwmac
   required:
     - compatible
 
@@ -36,6 +37,7 @@ allOf:
               - amlogic,meson8m2-dwmac
               - amlogic,meson-gxbb-dwmac
               - amlogic,meson-axg-dwmac
+              - amlogic,meson-g12a-dwmac
 
     then:
       properties:
@@ -95,6 +97,7 @@ properties:
           - amlogic,meson8m2-dwmac
           - amlogic,meson-gxbb-dwmac
           - amlogic,meson-axg-dwmac
+          - amlogic,meson-g12a-dwmac
     contains:
       enum:
         - snps,dwmac-3.70a
index f66bb7e..bf7328a 100644 (file)
@@ -1,257 +1,4 @@
 Distributed Switch Architecture Device Tree Bindings
 ----------------------------------------------------
 
-Switches are true Linux devices and can be probed by any means. Once
-probed, they register to the DSA framework, passing a node
-pointer. This node is expected to fulfil the following binding, and
-may contain additional properties as required by the device it is
-embedded within.
-
-Required properties:
-
-- ports                : A container for child nodes representing switch ports.
-
-Optional properties:
-
-- dsa,member   : A two element list indicates which DSA cluster, and position
-                 within the cluster a switch takes. <0 0> is cluster 0,
-                 switch 0. <0 1> is cluster 0, switch 1. <1 0> is cluster 1,
-                 switch 0. A switch not part of any cluster (single device
-                 hanging off a CPU port) must not specify this property
-
-The ports container has the following properties
-
-Required properties:
-
-- #address-cells       : Must be 1
-- #size-cells          : Must be 0
-
-Each port children node must have the following mandatory properties:
-- reg                  : Describes the port address in the switch
-
-An uplink/downlink port between switches in the cluster has the following
-mandatory property:
-
-- link                 : Should be a list of phandles to other switch's DSA
-                         port. This port is used as the outgoing port
-                         towards the phandle ports. The full routing
-                         information must be given, not just the one hop
-                         routes to neighbouring switches.
-
-A CPU port has the following mandatory property:
-
-- ethernet             : Should be a phandle to a valid Ethernet device node.
-                          This host device is what the switch port is
-                         connected to.
-
-A user port has the following optional property:
-
-- label                        : Describes the label associated with this port, which
-                          will become the netdev name.
-
-Port child nodes may also contain the following optional standardised
-properties, described in binding documents:
-
-- phy-handle           : Phandle to a PHY on an MDIO bus. See
-                         Documentation/devicetree/bindings/net/ethernet.txt
-                         for details.
-
-- phy-mode             : See
-                         Documentation/devicetree/bindings/net/ethernet.txt
-                         for details.
-
-- fixed-link           : Fixed-link subnode describing a link to a non-MDIO
-                         managed entity. See
-                         Documentation/devicetree/bindings/net/fixed-link.txt
-                         for details.
-
-The MAC address will be determined using the optional properties
-defined in ethernet.txt.
-
-Example
-
-The following example shows three switches on three MDIO busses,
-linked into one DSA cluster.
-
-&mdio1 {
-       #address-cells = <1>;
-       #size-cells = <0>;
-
-       switch0: switch0@0 {
-               compatible = "marvell,mv88e6085";
-               reg = <0>;
-
-               dsa,member = <0 0>;
-
-               ports {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-                       port@0 {
-                               reg = <0>;
-                               label = "lan0";
-                       };
-
-                       port@1 {
-                               reg = <1>;
-                               label = "lan1";
-                               local-mac-address = [00 00 00 00 00 00];
-                       };
-
-                       port@2 {
-                               reg = <2>;
-                               label = "lan2";
-                       };
-
-                       switch0port5: port@5 {
-                               reg = <5>;
-                               phy-mode = "rgmii-txid";
-                               link = <&switch1port6
-                                       &switch2port9>;
-                               fixed-link {
-                                       speed = <1000>;
-                                       full-duplex;
-                               };
-                       };
-
-                       port@6 {
-                               reg = <6>;
-                               ethernet = <&fec1>;
-                               fixed-link {
-                                       speed = <100>;
-                                       full-duplex;
-                               };
-                       };
-               };
-       };
-};
-
-&mdio2 {
-       #address-cells = <1>;
-       #size-cells = <0>;
-
-       switch1: switch1@0 {
-               compatible = "marvell,mv88e6085";
-               reg = <0>;
-
-               dsa,member = <0 1>;
-
-               ports {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-                       port@0 {
-                               reg = <0>;
-                               label = "lan3";
-                               phy-handle = <&switch1phy0>;
-                       };
-
-                       port@1 {
-                               reg = <1>;
-                               label = "lan4";
-                               phy-handle = <&switch1phy1>;
-                       };
-
-                       port@2 {
-                               reg = <2>;
-                               label = "lan5";
-                               phy-handle = <&switch1phy2>;
-                       };
-
-                       switch1port5: port@5 {
-                               reg = <5>;
-                               link = <&switch2port9>;
-                               phy-mode = "rgmii-txid";
-                               fixed-link {
-                                       speed = <1000>;
-                                       full-duplex;
-                               };
-                       };
-
-                       switch1port6: port@6 {
-                               reg = <6>;
-                               phy-mode = "rgmii-txid";
-                               link = <&switch0port5>;
-                               fixed-link {
-                                       speed = <1000>;
-                                       full-duplex;
-                               };
-                       };
-               };
-               mdio-bus {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-                       switch1phy0: switch1phy0@0 {
-                               reg = <0>;
-                       };
-                       switch1phy1: switch1phy0@1 {
-                               reg = <1>;
-                       };
-                       switch1phy2: switch1phy0@2 {
-                               reg = <2>;
-                       };
-               };
-        };
-};
-
-&mdio4 {
-       #address-cells = <1>;
-       #size-cells = <0>;
-
-       switch2: switch2@0 {
-               compatible = "marvell,mv88e6085";
-               reg = <0>;
-
-               dsa,member = <0 2>;
-
-               ports {
-                       #address-cells = <1>;
-                       #size-cells = <0>;
-                       port@0 {
-                               reg = <0>;
-                               label = "lan6";
-                       };
-
-                       port@1 {
-                               reg = <1>;
-                               label = "lan7";
-                       };
-
-                       port@2 {
-                               reg = <2>;
-                               label = "lan8";
-                       };
-
-                       port@3 {
-                               reg = <3>;
-                               label = "optical3";
-                               fixed-link {
-                                       speed = <1000>;
-                                       full-duplex;
-                                       link-gpios = <&gpio6 2
-                                             GPIO_ACTIVE_HIGH>;
-                               };
-                       };
-
-                       port@4 {
-                               reg = <4>;
-                               label = "optical4";
-                               fixed-link {
-                                       speed = <1000>;
-                                       full-duplex;
-                                       link-gpios = <&gpio6 3
-                                             GPIO_ACTIVE_HIGH>;
-                               };
-                       };
-
-                       switch2port9: port@9 {
-                               reg = <9>;
-                               phy-mode = "rgmii-txid";
-                               link = <&switch1port5
-                                       &switch0port5>;
-                               fixed-link {
-                                       speed = <1000>;
-                                       full-duplex;
-                               };
-                       };
-               };
-       };
-};
+See Documentation/devicetree/bindings/net/dsa/dsa.yaml for the documenation.
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.yaml b/Documentation/devicetree/bindings/net/dsa/dsa.yaml
new file mode 100644 (file)
index 0000000..faea214
--- /dev/null
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/dsa/dsa.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ethernet Switch Device Tree Bindings
+
+maintainers:
+  - Andrew Lunn <andrew@lunn.ch>
+  - Florian Fainelli <f.fainelli@gmail.com>
+  - Vivien Didelot <vivien.didelot@gmail.com>
+
+description:
+  This binding represents Ethernet Switches which have a dedicated CPU
+  port. That port is usually connected to an Ethernet Controller of the
+  SoC. Such setups are typical for embedded devices.
+
+select: false
+
+properties:
+  $nodename:
+    pattern: "^switch(@.*)?$"
+
+  dsa,member:
+    minItems: 2
+    maxItems: 2
+    description:
+      A two element list indicates which DSA cluster, and position within the
+      cluster a switch takes. <0 0> is cluster 0, switch 0. <0 1> is cluster 0,
+      switch 1. <1 0> is cluster 1, switch 0. A switch not part of any cluster
+      (single device hanging off a CPU port) must not specify this property
+    $ref: /schemas/types.yaml#/definitions/uint32-array
+
+patternProperties:
+  "^(ethernet-)?ports$":
+    type: object
+    properties:
+      '#address-cells':
+        const: 1
+      '#size-cells':
+        const: 0
+
+    patternProperties:
+      "^(ethernet-)?port@[0-9]+$":
+        type: object
+        description: Ethernet switch ports
+
+        properties:
+          reg:
+            description: Port number
+
+          label:
+            description:
+              Describes the label associated with this port, which will become
+              the netdev name
+            $ref: /schemas/types.yaml#definitions/string
+
+          link:
+            description:
+              Should be a list of phandles to other switch's DSA port. This
+              port is used as the outgoing port towards the phandle ports. The
+              full routing information must be given, not just the one hop
+              routes to neighbouring switches
+            $ref: /schemas/types.yaml#definitions/phandle-array
+
+          ethernet:
+            description:
+              Should be a phandle to a valid Ethernet device node.  This host
+              device is what the switch port is connected to
+            $ref: /schemas/types.yaml#definitions/phandle
+
+          phy-handle: true
+
+          phy-mode: true
+
+          fixed-link: true
+
+          mac-address: true
+
+        required:
+          - reg
+
+        additionalProperties: false
+
+oneOf:
+  - required:
+    - ports
+  - required:
+    - ethernet-ports
+
+...
index 66a129f..7a271d0 100644 (file)
@@ -4,10 +4,15 @@ Microchip Ocelot switch driver family
 Felix
 -----
 
-The VSC9959 core is currently the only switch supported by the driver, and is
-found in the NXP LS1028A. It is a PCI device, part of the larger ENETC root
-complex. As a result, the ethernet-switch node is a sub-node of the PCIe root
-complex node and its "reg" property conforms to the parent node bindings:
+Currently the switches supported by the felix driver are:
+
+- VSC9959 (Felix)
+- VSC9953 (Seville)
+
+The VSC9959 switch is found in the NXP LS1028A. It is a PCI device, part of the
+larger ENETC root complex. As a result, the ethernet-switch node is a sub-node
+of the PCIe root complex node and its "reg" property conforms to the parent
+node bindings:
 
 * reg: Specifies PCIe Device Number and Function Number of the endpoint device,
   in this case for the Ethernet L2Switch it is PF5 (of device 0, bus 0).
@@ -114,3 +119,95 @@ Example:
                };
        };
 };
+
+The VSC9953 switch is found inside NXP T1040. It is a platform device with the
+following required properties:
+
+- compatible:
+       Must be "mscc,vsc9953-switch".
+
+Supported PHY interface types (appropriate SerDes protocol setting changes are
+needed in the RCW binary):
+
+* phy_mode = "internal": on ports 8 and 9
+* phy_mode = "sgmii": on ports 0, 1, 2, 3, 4, 5, 6, 7
+* phy_mode = "qsgmii": on ports 0, 1, 2, 3, 4, 5, 6, 7
+
+Example:
+
+&soc {
+       ethernet-switch@800000 {
+               #address-cells = <0x1>;
+               #size-cells = <0x0>;
+               compatible = "mscc,vsc9953-switch";
+               little-endian;
+               reg = <0x800000 0x290000>;
+
+               ports {
+                       #address-cells = <0x1>;
+                       #size-cells = <0x0>;
+
+                       port@0 {
+                               reg = <0x0>;
+                               label = "swp0";
+                       };
+
+                       port@1 {
+                               reg = <0x1>;
+                               label = "swp1";
+                       };
+
+                       port@2 {
+                               reg = <0x2>;
+                               label = "swp2";
+                       };
+
+                       port@3 {
+                               reg = <0x3>;
+                               label = "swp3";
+                       };
+
+                       port@4 {
+                               reg = <0x4>;
+                               label = "swp4";
+                       };
+
+                       port@5 {
+                               reg = <0x5>;
+                               label = "swp5";
+                       };
+
+                       port@6 {
+                               reg = <0x6>;
+                               label = "swp6";
+                       };
+
+                       port@7 {
+                               reg = <0x7>;
+                               label = "swp7";
+                       };
+
+                       port@8 {
+                               reg = <0x8>;
+                               phy-mode = "internal";
+                               ethernet = <&enet0>;
+
+                               fixed-link {
+                                       speed = <2500>;
+                                       full-duplex;
+                               };
+                       };
+
+                       port@9 {
+                               reg = <0x9>;
+                               phy-mode = "internal";
+                               status = "disabled";
+
+                               fixed-link {
+                                       speed = <2500>;
+                                       full-duplex;
+                               };
+                       };
+               };
+       };
+};
index 9b1f114..a9e547a 100644 (file)
@@ -162,6 +162,18 @@ properties:
     description:
       Specifies a reference to a node representing a SFP cage.
 
+  rx-internal-delay-ps:
+    description: |
+      RGMII Receive PHY Clock Delay defined in pico seconds.  This is used for
+      PHY's that have configurable RX internal delays.  If this property is
+      present then the PHY applies the RX delay.
+
+  tx-internal-delay-ps:
+    description: |
+      RGMII Transmit PHY Clock Delay defined in pico seconds.  This is used for
+      PHY's that have configurable TX internal delays. If this property is
+      present then the PHY applies the TX delay.
+
 required:
   - reg
 
index 219bcbd..9ef5bac 100644 (file)
@@ -3,7 +3,7 @@ MediaTek SoC built-in Bluetooth Devices
 
 This device is a serial attached device to BTIF device and thus it must be a
 child node of the serial node with BTIF. The dt-bindings details for BTIF
-device can be known via Documentation/devicetree/bindings/serial/8250.txt.
+device can be known via Documentation/devicetree/bindings/serial/8250.yaml.
 
 Required properties:
 
index 5ff37c6..87a27d7 100644 (file)
@@ -31,6 +31,8 @@ Optional properties:
                          VSC8531_LINK_100_ACTIVITY (2),
                          VSC8531_LINK_ACTIVITY (0) and
                          VSC8531_DUPLEX_COLLISION (8).
+- load-save-gpios      : GPIO used for the load/save operation of the PTP
+                         hardware clock (PHC).
 
 
 Table: 1 - Edge rate change
@@ -67,4 +69,5 @@ Example:
                 vsc8531,edge-slowdown  = <7>;
                 vsc8531,led-0-mode     = <LINK_1000_ACTIVITY>;
                 vsc8531,led-1-mode     = <LINK_100_ACTIVITY>;
+               load-save-gpios         = <&gpio 10 GPIO_ACTIVE_HIGH>;
         };
index 554dcd7..c6716ac 100644 (file)
@@ -24,7 +24,7 @@ description: |
   IEEE 802.3 Standard Media Independent Interface (MII), the IEEE 802.3 Gigabit
   Media Independent Interface (GMII) or Reduced GMII (RGMII).
 
-  Specifications about the charger can be found at:
+  Specifications about the Ethernet PHY can be found at:
     https://www.ti.com/lit/gpn/dp83867ir
 
 properties:
index 5b69ef0..cf40b46 100644 (file)
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
 title: TI DP83869 ethernet PHY
 
 allOf:
-  - $ref: "ethernet-controller.yaml#"
+  - $ref: "ethernet-phy.yaml#"
 
 maintainers:
   - Dan Murphy <dmurphy@ti.com>
@@ -24,7 +24,7 @@ description: |
   conversions.  The DP83869HM can also support Bridge Conversion from RGMII to
   SGMII and SGMII to RGMII.
 
-  Specifications about the charger can be found at:
+  Specifications about the Ethernet PHY can be found at:
     http://www.ti.com/lit/ds/symlink/dp83869hm.pdf
 
 properties:
@@ -64,6 +64,18 @@ properties:
        Operational mode for the PHY.  If this is not set then the operational
        mode is set by the straps. see dt-bindings/net/ti-dp83869.h for values
 
+  rx-internal-delay-ps:
+    description: Delay is in pico seconds
+    enum: [ 250, 500, 750, 1000, 1250, 1500, 1750, 2000, 2250, 2500, 2750, 3000,
+            3250, 3500, 3750, 4000 ]
+    default: 2000
+
+  tx-internal-delay-ps:
+    description: Delay is in pico seconds
+    enum: [ 250, 500, 750, 1000, 1250, 1500, 1750, 2000, 2250, 2500, 2750, 3000,
+            3250, 3500, 3750, 4000 ]
+    default: 2000
+
 required:
   - reg
 
@@ -80,5 +92,7 @@ examples:
         ti,op-mode = <DP83869_RGMII_COPPER_ETHERNET>;
         ti,max-output-impedance = "true";
         ti,clk-output-sel = <DP83869_CLK_O_SEL_CHN_A_RCLK>;
+        rx-internal-delay-ps = <2000>;
+        tx-internal-delay-ps = <2000>;
       };
     };
index b686131..1b8e8b4 100644 (file)
@@ -114,7 +114,7 @@ with values derived from the SoC user manual.
    [flags]>
 
 On other mach-shmobile platforms GPIO is handled by the gpio-rcar driver.
-Please refer to Documentation/devicetree/bindings/gpio/renesas,gpio-rcar.txt
+Please refer to Documentation/devicetree/bindings/gpio/renesas,rcar-gpio.yaml
 for documentation of the GPIO device tree bindings on those platforms.
 
 
index 2696826..d5f6919 100644 (file)
@@ -5,7 +5,7 @@ It is based on common bindings for device graphs.
 see ${LINUX}/Documentation/devicetree/bindings/graph.txt
 
 Basically, Audio Graph Card property is same as Simple Card.
-see ${LINUX}/Documentation/devicetree/bindings/sound/simple-card.txt
+see ${LINUX}/Documentation/devicetree/bindings/sound/simple-card.yaml
 
 Below are same as Simple-Card.
 
index 4d51f3f..a6ffcde 100644 (file)
@@ -5,7 +5,7 @@ codec or external codecs.
 
 sti sound drivers allows to expose sti SoC audio interface through the
 generic ASoC simple card. For details about sound card declaration please refer to
-Documentation/devicetree/bindings/sound/simple-card.txt.
+Documentation/devicetree/bindings/sound/simple-card.yaml.
 
 1) sti-uniperiph-dai: audio dai device.
 ---------------------------------------
index 9147df2..38efb50 100644 (file)
@@ -34,12 +34,15 @@ properties:
     maxItems: 1
 
   clocks:
-    maxItems: 1
+    minItems: 1
+    maxItems: 2
+    items:
+      - description: controller register bus clock
+      - description: baud rate generator and delay control clock
 
   clock-names:
-    description: input clock for the baud rate generator
-    items:
-      - const: core
+    minItems: 1
+    maxItems: 2
 
 if:
   properties:
@@ -51,17 +54,22 @@ if:
 then:
   properties:
     clocks:
-      contains:
-        items:
-          - description: controller register bus clock
-          - description: baud rate generator and delay control clock
+      minItems: 2
 
     clock-names:
-      minItems: 2
       items:
         - const: core
         - const: pclk
 
+else:
+  properties:
+    clocks:
+      maxItems: 1
+
+    clock-names:
+      items:
+        - const: core
+
 required:
   - compatible
   - reg
index 790311a..c8c1e91 100644 (file)
@@ -19,7 +19,7 @@ Required properties:
 
 SPI Controller nodes must be child of GENI based Qualcomm Universal
 Peripharal. Please refer GENI based QUP wrapper controller node bindings
-described in Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt.
+described in Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.yaml.
 
 SPI slave nodes must be children of the SPI master node and conform to SPI bus
 binding as described in Documentation/devicetree/bindings/spi/spi-bus.txt.
index fcd25a0..727d045 100644 (file)
@@ -41,7 +41,7 @@ examples:
     #include <dt-bindings/interrupt-controller/arm-gic.h>
 
     // Example 1: SDM845 TSENS
-    soc: soc@0 {
+    soc: soc {
             #address-cells = <2>;
             #size-cells = <2>;
 
index b8515d3..3ec9cc8 100644 (file)
@@ -224,7 +224,7 @@ examples:
     #include <dt-bindings/thermal/thermal.h>
 
     // Example 1: SDM845 TSENS
-    soc: soc@0 {
+    soc {
             #address-cells = <2>;
             #size-cells = <2>;
 
index 25b9209..ea14de8 100644 (file)
@@ -35,7 +35,7 @@ examples:
     #include <dt-bindings/soc/ti,sci_pm_domain.h>
     vtm: thermal@42050000 {
         compatible = "ti,am654-vtm";
-        reg = <0x0 0x42050000 0x0 0x25c>;
+        reg = <0x42050000 0x25c>;
         power-domains = <&k3_pds 80 TI_SCI_PD_EXCLUSIVE>;
         #thermal-sensor-cells = <1>;
     };
index 15cfec0..f5c7e99 100644 (file)
@@ -8,7 +8,7 @@ regs is accessed by cpu co-processor 4 registers with mtcr/mfcr.
  - PTIM_CTLR "cr<0, 14>" Control reg to start reset timer.
  - PTIM_TSR  "cr<1, 14>" Interrupt cleanup status reg.
  - PTIM_CCVR "cr<3, 14>" Current counter value reg.
- - PTIM_LVR  "cr<6, 14>" Window value reg to triger next event.
+ - PTIM_LVR  "cr<6, 14>" Window value reg to trigger next event.
 
 ==============================
 timer node bindings definition
index e4e83d3..8b019ac 100644 (file)
@@ -127,8 +127,8 @@ examples:
                 #address-cells = <1>;
                 #size-cells = <0>;
 
-                string@0409 {
-                        reg = <0x0409>;
+                string@409 {
+                        reg = <0x409>;
                         manufacturer = "ASPEED";
                         product = "USB Virtual Hub";
                         serial-number = "0000";
index 220cf46..8c74a99 100644 (file)
@@ -1,4 +1,4 @@
-:orphan:
+.. SPDX-License-Identifier: GPL-2.0
 
 Writing DeviceTree Bindings in json-schema
 ==========================================
@@ -124,9 +124,12 @@ dtc must also be built with YAML output support enabled. This requires that
 libyaml and its headers be installed on the host system. For some distributions
 that involves installing the development package, such as:
 
-Debian:
+Debian::
+
   apt-get install libyaml-dev
-Fedora:
+
+Fedora::
+
   dnf -y install libyaml-devel
 
 Running checks
index e0b58c3..eaaaafc 100644 (file)
@@ -342,7 +342,8 @@ LED
 MDIO
   devm_mdiobus_alloc()
   devm_mdiobus_alloc_size()
-  devm_mdiobus_free()
+  devm_mdiobus_register()
+  devm_of_mdiobus_register()
 
 MEM
   devm_free_pages()
index 6e71f67..bc7e1fc 100644 (file)
@@ -451,7 +451,7 @@ The bridge driver also has some helper functions it can use:
                                        "module_foo", "chipid", 0x36, NULL);
 
 This loads the given module (can be ``NULL`` if no module needs to be loaded)
-and calls :c:func:`i2c_new_device` with the given ``i2c_adapter`` and
+and calls :c:func:`i2c_new_client_device` with the given ``i2c_adapter`` and
 chip/address arguments. If all goes well, then it registers the subdev with
 the v4l2_device.
 
index 8e26707..8fdb78f 100644 (file)
@@ -25,7 +25,7 @@ size when creating the filesystem.
 Currently 3 filesystems support DAX: ext2, ext4 and xfs.  Enabling DAX on them
 is different.
 
-Enabling DAX on ext4 and ext2
+Enabling DAX on ext2
 -----------------------------
 
 When mounting the filesystem, use the "-o dax" option on the command line or
@@ -33,8 +33,8 @@ add 'dax' to the options in /etc/fstab.  This works to enable DAX on all files
 within the filesystem.  It is equivalent to the '-o dax=always' behavior below.
 
 
-Enabling DAX on xfs
--------------------
+Enabling DAX on xfs and ext4
+----------------------------
 
 Summary
 -------
index 1da7a4b..728ab57 100644 (file)
@@ -185,13 +185,17 @@ byte offsets over a base for the register block.
 
 If you want to dump an u32 array in debugfs, you can create file with::
 
+    struct debugfs_u32_array {
+       u32 *array;
+       u32 n_elements;
+    };
+
     void debugfs_create_u32_array(const char *name, umode_t mode,
                        struct dentry *parent,
-                       u32 *array, u32 elements);
+                       struct debugfs_u32_array *array);
 
-The "array" argument provides data, and the "elements" argument is
-the number of elements in the array. Note: Once array is created its
-size can not be changed.
+The "array" argument wraps a pointer to the array's data and the number
+of its elements. Note: Once array is created its size can not be changed.
 
 There is a helper function to create device related seq_file::
 
index 3e4c0ee..e99ff3f 100644 (file)
@@ -39,3 +39,6 @@ is encrypted as well as the data itself.
 
 Verity files cannot have blocks allocated past the end of the verity
 metadata.
+
+Verity and DAX are not compatible and attempts to set both of these flags
+on a file will fail.
index 4cc7432..1711235 100644 (file)
@@ -197,11 +197,14 @@ pp_power_profile_mode
 .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
    :doc: pp_power_profile_mode
 
-busy_percent
-~~~~~~~~~~~~
+*_busy_percent
+~~~~~~~~~~~~~~
 
 .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
-   :doc: busy_percent
+   :doc: gpu_busy_percent
+
+.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+   :doc: mem_busy_percent
 
 GPU Product Information
 =======================
index 0b8cd83..38d951f 100644 (file)
@@ -1,14 +1,26 @@
 ==============================
-Linux I2C slave eeprom backend
+Linux I2C slave EEPROM backend
 ==============================
 
-by Wolfram Sang <wsa@sang-engineering.com> in 2014-15
+by Wolfram Sang <wsa@sang-engineering.com> in 2014-20
 
-This is a proof-of-concept backend which acts like an EEPROM on the connected
-I2C bus. The memory contents can be modified from userspace via this file
-located in sysfs::
+This backend simulates an EEPROM on the connected I2C bus. Its memory contents
+can be accessed from userspace via this file located in sysfs::
 
        /sys/bus/i2c/devices/<device-directory>/slave-eeprom
 
+The following types are available: 24c02, 24c32, 24c64, and 24c512. Read-only
+variants are also supported. The name needed for instantiating has the form
+'slave-<type>[ro]'. Examples follow:
+
+24c02, read/write, address 0x64:
+  # echo slave-24c02 0x1064 > /sys/bus/i2c/devices/i2c-1/new_device
+
+24c512, read-only, address 0x42:
+  # echo slave-24c512ro 0x1042 > /sys/bus/i2c/devices/i2c-1/new_device
+
+You can also preload data during boot if a device-property named
+'firmware-name' contains a valid filename (DT or ACPI only).
+
 As of 2015, Linux doesn't support poll on binary sysfs files, so there is no
 notification when another master changed the content.
index c2e2963..64689d1 100644 (file)
@@ -57,7 +57,7 @@ SMBus Quick Command
 
 This sends a single bit to the device, at the place of the Rd/Wr bit::
 
-  A Addr Rd/Wr [A] P
+  S Addr Rd/Wr [A] P
 
 Functionality flag: I2C_FUNC_SMBUS_QUICK
 
index a45cccf..85ccc87 100644 (file)
@@ -182,7 +182,8 @@ module 8123.ko, which is built from the following files::
        8123_pci.c
        8123_bin.o_shipped      <= Binary blob
 
---- 3.1 Shared Makefile
+3.1 Shared Makefile
+-------------------
 
        An external module always includes a wrapper makefile that
        supports building the module using "make" with no arguments.
@@ -470,9 +471,9 @@ build.
 
        The syntax of the Module.symvers file is::
 
-       <CRC>       <Symbol>         <Module>                         <Export Type>     <Namespace>
+               <CRC>       <Symbol>         <Module>                         <Export Type>     <Namespace>
 
-       0xe1cc2a05  usb_stor_suspend drivers/usb/storage/usb-storage  EXPORT_SYMBOL_GPL USB_STORAGE
+               0xe1cc2a05  usb_stor_suspend drivers/usb/storage/usb-storage  EXPORT_SYMBOL_GPL USB_STORAGE
 
        The fields are separated by tabs and values may be empty (e.g.
        if no namespace is defined for an exported symbol).
index 5033938..3b25655 100644 (file)
@@ -101,7 +101,7 @@ Structure randomisation
 
 If you enable ``CONFIG_GCC_PLUGIN_RANDSTRUCT``, you will need to
 pre-generate the random seed in
-``scripts/gcc-plgins/randomize_layout_seed.h`` so the same value
+``scripts/gcc-plugins/randomize_layout_seed.h`` so the same value
 is used in rebuilds.
 
 Debug info conflicts
index c5a646b..2b75760 100644 (file)
@@ -68,4 +68,4 @@ and frameworks can be controlled from the same registers, all of these
 drivers access their registers through the same regmap.
 
 For more information regarding the devicetree bindings of the TCU drivers,
-have a look at Documentation/devicetree/bindings/timer/ingenic,tcu.txt.
+have a look at Documentation/devicetree/bindings/timer/ingenic,tcu.yaml.
index e93d982..82fce60 100644 (file)
@@ -434,7 +434,7 @@ can set up your network then:
        ifconfig arc0 insight
        route add insight arc0
        route add freedom arc0  /* I would use the subnet here (like I said
-                                       to to in "single protocol" above),
+                                       to in "single protocol" above),
                                        but the rest of the subnet
                                        unfortunately lies across the PPP
                                        link on freedom, which confuses
index 824afd7..f060cfb 100644 (file)
@@ -6,7 +6,7 @@ AX.25
 
 To use the amateur radio protocols within Linux you will need to get a
 suitable copy of the AX.25 Utilities. More detailed information about
-AX.25, NET/ROM and ROSE, associated programs and and utilities can be
+AX.25, NET/ROM and ROSE, associated programs and utilities can be
 found on http://www.linux-ax25.org.
 
 There is an active mailing list for discussing Linux amateur radio matters
index 1802094..74821d2 100644 (file)
@@ -73,7 +73,7 @@ lower value. This will make the mesh more responsive to topology changes, but
 will also increase the overhead.
 
 Information about the current state can be accessed via the batadv generic
-netlink family. batctl provides human readable version via its debug tables
+netlink family. batctl provides human readable version via its debug tables
 subcommands.
 
 
@@ -115,8 +115,8 @@ are prefixed with "batman-adv:" So to see just these messages try::
   $ dmesg | grep batman-adv
 
 When investigating problems with your mesh network, it is sometimes necessary to
-see more detail debug messages. This must be enabled when compiling the
-batman-adv module. When building batman-adv as part of kernel, use "make
+see more detailed debug messages. This must be enabled when compiling the
+batman-adv module. When building batman-adv as part of the kernel, use "make
 menuconfig" and enable the option ``B.A.T.M.A.N. debugging``
 (``CONFIG_BATMAN_ADV_DEBUG=y``).
 
@@ -160,7 +160,7 @@ IRC:
   #batman on irc.freenode.org
 Mailing-list:
   b.a.t.m.a.n@open-mesh.org (optional subscription at
-  https://lists.open-mesh.org/mm/listinfo/b.a.t.m.a.n)
+  https://lists.open-mesh.org/mailman3/postorius/lists/b.a.t.m.a.n.lists.open-mesh.org/)
 
 You can also contact the Authors:
 
index 4cef88d..638ac1e 100644 (file)
@@ -144,7 +144,7 @@ UCAN_COMMAND_SET_BITTIMING
 
 *Host2Dev; mandatory*
 
-Setup bittiming by sending the the structure
+Setup bittiming by sending the structure
 ``ucan_ctl_payload_t.cmd_set_bittiming`` (see ``struct bittiming`` for
 details)
 
@@ -232,7 +232,7 @@ UCAN_IN_TX_COMPLETE
   zero
 
 The CAN device has sent a message to the CAN bus. It answers with a
-list of of tuples <echo-ids, flags>.
+list of tuples <echo-ids, flags>.
 
 The echo-id identifies the frame from (echos the id from a previous
 UCAN_OUT_TX message). The flag indicates the result of the
index dde16be..91e5c33 100644 (file)
@@ -192,6 +192,9 @@ FIONREAD
        Works as in udp(7): returns in the ``int`` argument pointer the size of
        the next pending datagram in bytes, or 0 when no datagram is pending.
 
+SIOCOUTQ
+       Returns the number of unsent data bytes in the socket send queue as ``int``
+       into the buffer specified by the argument pointer.
 
 Other tunables
 ==============
diff --git a/Documentation/networking/device_drivers/appletalk/index.rst b/Documentation/networking/device_drivers/appletalk/index.rst
new file mode 100644 (file)
index 0000000..de7507f
--- /dev/null
@@ -0,0 +1,19 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+AppleTalk Device Drivers
+========================
+
+Contents:
+
+.. toctree::
+   :maxdepth: 2
+
+   cops
+   ltpc
+
+.. only::  subproject and html
+
+   Indices
+   =======
+
+   * :ref:`genindex`
diff --git a/Documentation/networking/device_drivers/atm/index.rst b/Documentation/networking/device_drivers/atm/index.rst
new file mode 100644 (file)
index 0000000..7b593f0
--- /dev/null
@@ -0,0 +1,20 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+Asynchronous Transfer Mode (ATM) Device Drivers
+===============================================
+
+Contents:
+
+.. toctree::
+   :maxdepth: 2
+
+   cxacru
+   fore200e
+   iphase
+
+.. only::  subproject and html
+
+   Indices
+   =======
+
+   * :ref:`genindex`
diff --git a/Documentation/networking/device_drivers/cable/index.rst b/Documentation/networking/device_drivers/cable/index.rst
new file mode 100644 (file)
index 0000000..cce3c43
--- /dev/null
@@ -0,0 +1,18 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+Cable Modem Device Drivers
+==========================
+
+Contents:
+
+.. toctree::
+   :maxdepth: 2
+
+   sb1000
+
+.. only::  subproject and html
+
+   Indices
+   =======
+
+   * :ref:`genindex`
diff --git a/Documentation/networking/device_drivers/cellular/index.rst b/Documentation/networking/device_drivers/cellular/index.rst
new file mode 100644 (file)
index 0000000..fc1812d
--- /dev/null
@@ -0,0 +1,18 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+Cellular Modem Device Drivers
+=============================
+
+Contents:
+
+.. toctree::
+   :maxdepth: 2
+
+   qualcomm/rmnet
+
+.. only::  subproject and html
+
+   Indices
+   =======
+
+   * :ref:`genindex`
@@ -4,8 +4,6 @@
 3Com Vortex device driver
 =========================
 
-Documentation/networking/device_drivers/3com/vortex.rst
-
 Andrew Morton
 
 30 April 2000
@@ -19,8 +19,10 @@ pool management for network interfaces.
 This document provides an overview the Linux DPIO driver, its
 subcomponents, and its APIs.
 
-See Documentation/networking/device_drivers/freescale/dpaa2/overview.rst for
-a general overview of DPAA2 and the general DPAA2 driver architecture in Linux.
+See
+Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst
+for a general overview of DPAA2 and the general DPAA2 driver architecture
+in Linux.
 
 Driver Overview
 ---------------
@@ -33,7 +33,8 @@ hardware resources, like queues, do not have a corresponding MC object and
 are treated as internal resources of other objects.
 
 For a more detailed description of the DPAA2 architecture and its object
-abstractions see *Documentation/networking/device_drivers/freescale/dpaa2/overview.rst*.
+abstractions see
+*Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst*.
 
 Each Linux net device is built on top of a Datapath Network Interface (DPNI)
 object and uses Buffer Pools (DPBPs), I/O Portals (DPIOs) and Concentrators
diff --git a/Documentation/networking/device_drivers/ethernet/index.rst b/Documentation/networking/device_drivers/ethernet/index.rst
new file mode 100644 (file)
index 0000000..cbb75a1
--- /dev/null
@@ -0,0 +1,60 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+Ethernet Device Drivers
+=======================
+
+Device drivers for Ethernet and Ethernet-based virtual function devices.
+
+Contents:
+
+.. toctree::
+   :maxdepth: 2
+
+   3com/3c509
+   3com/vortex
+   amazon/ena
+   altera/altera_tse
+   aquantia/atlantic
+   chelsio/cxgb
+   cirrus/cs89x0
+   dlink/dl2k
+   davicom/dm9000
+   dec/de4x5
+   dec/dmfe
+   freescale/dpaa
+   freescale/dpaa2/index
+   freescale/gianfar
+   google/gve
+   huawei/hinic
+   intel/e100
+   intel/e1000
+   intel/e1000e
+   intel/fm10k
+   intel/igb
+   intel/igbvf
+   intel/ixgb
+   intel/ixgbe
+   intel/ixgbevf
+   intel/i40e
+   intel/iavf
+   intel/ice
+   marvell/octeontx2
+   mellanox/mlx5
+   microsoft/netvsc
+   neterion/s2io
+   neterion/vxge
+   netronome/nfp
+   pensando/ionic
+   smsc/smc9
+   stmicro/stmmac
+   ti/cpsw
+   ti/cpsw_switchdev
+   ti/tlan
+   toshiba/spider_net
+
+.. only::  subproject and html
+
+   Indices
+   =======
+
+   * :ref:`genindex`
diff --git a/Documentation/networking/device_drivers/fddi/index.rst b/Documentation/networking/device_drivers/fddi/index.rst
new file mode 100644 (file)
index 0000000..0b75294
--- /dev/null
@@ -0,0 +1,19 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+Fiber Distributed Data Interface (FDDI) Device Drivers
+======================================================
+
+Contents:
+
+.. toctree::
+   :maxdepth: 2
+
+   defza
+   skfp
+
+.. only::  subproject and html
+
+   Indices
+   =======
+
+   * :ref:`genindex`
diff --git a/Documentation/networking/device_drivers/hamradio/index.rst b/Documentation/networking/device_drivers/hamradio/index.rst
new file mode 100644 (file)
index 0000000..7e73173
--- /dev/null
@@ -0,0 +1,19 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+Amateur Radio Device Drivers
+============================
+
+Contents:
+
+.. toctree::
+   :maxdepth: 2
+
+   baycom
+   z8530drv
+
+.. only::  subproject and html
+
+   Indices
+   =======
+
+   * :ref:`genindex`
index e18dad1..a3113ff 100644 (file)
@@ -1,56 +1,22 @@
 .. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 
-Vendor Device Drivers
-=====================
+Hardware Device Drivers
+=======================
 
 Contents:
 
 .. toctree::
    :maxdepth: 2
 
-   freescale/dpaa2/index
-   intel/e100
-   intel/e1000
-   intel/e1000e
-   intel/fm10k
-   intel/igb
-   intel/igbvf
-   intel/ixgb
-   intel/ixgbe
-   intel/ixgbevf
-   intel/i40e
-   intel/iavf
-   intel/ice
-   google/gve
-   marvell/octeontx2
-   mellanox/mlx5
-   netronome/nfp
-   pensando/ionic
-   stmicro/stmmac
-   3com/3c509
-   3com/vortex
-   amazon/ena
-   aquantia/atlantic
-   chelsio/cxgb
-   cirrus/cs89x0
-   davicom/dm9000
-   dec/de4x5
-   dec/dmfe
-   dlink/dl2k
-   freescale/dpaa
-   freescale/gianfar
-   intel/ipw2100
-   intel/ipw2200
-   microsoft/netvsc
-   neterion/s2io
-   neterion/vxge
-   qualcomm/rmnet
-   sb1000
-   smsc/smc9
-   ti/cpsw_switchdev
-   ti/cpsw
-   ti/tlan
-   toshiba/spider_net
+   appletalk/index
+   atm/index
+   cable/index
+   cellular/index
+   ethernet/index
+   fddi/index
+   hamradio/index
+   wan/index
+   wifi/index
 
 .. only::  subproject and html
 
diff --git a/Documentation/networking/device_drivers/wan/index.rst b/Documentation/networking/device_drivers/wan/index.rst
new file mode 100644 (file)
index 0000000..9d9ae94
--- /dev/null
@@ -0,0 +1,18 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+Classic WAN Device Drivers
+==========================
+
+Contents:
+
+.. toctree::
+   :maxdepth: 2
+
+   z8530book
+
+.. only::  subproject and html
+
+   Indices
+   =======
+
+   * :ref:`genindex`
diff --git a/Documentation/networking/device_drivers/wifi/index.rst b/Documentation/networking/device_drivers/wifi/index.rst
new file mode 100644 (file)
index 0000000..bf91a87
--- /dev/null
@@ -0,0 +1,20 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+Wi-Fi Device Drivers
+====================
+
+Contents:
+
+.. toctree::
+   :maxdepth: 2
+
+   intel/ipw2100
+   intel/ipw2200
+   ray_cs
+
+.. only::  subproject and html
+
+   Indices
+   =======
+
+   * :ref:`genindex`
@@ -78,7 +78,7 @@ such, if you are interested in deploying or shipping a driver as part of
 solution intended to be used for purposes other than development, please
 obtain a tested driver from Intel Customer Support at:
 
-http://www.intel.com/support/wireless/sb/CS-006408.htm
+https://www.intel.com/support/wireless/sb/CS-006408.htm
 
 1. Introduction
 ===============
index 3fe1140..7572bf6 100644 (file)
@@ -44,9 +44,11 @@ versions is generally discouraged - here, and via any other Linux API.
        reported for two ports of the same device or on two hosts of
        a multi-host device should be identical.
 
-       .. note:: ``devlink-info`` API should be extended with a new field
-         if devices want to report board/product serial number (often
-         reported in PCI *Vital Product Data* capability).
+   * - ``board.serial_number``
+     - Board serial number of the device.
+
+       This is usually the serial number of the board, often available in
+       PCI *Vital Product Data*.
 
    * - ``fixed``
      - Group for hardware identifiers, and versions of components
@@ -201,10 +203,6 @@ Future work
 
 The following extensions could be useful:
 
- - product serial number - NIC boards often get labeled with a board serial
-   number rather than ASIC serial number; it'd be useful to add board serial
-   numbers to the API if they can be retrieved from the device;
-
  - on-disk firmware file names - drivers list the file names of firmware they
    may need to load onto devices via the ``MODULE_FIRMWARE()`` macro. These,
    however, are per module, rather than per device. It'd be useful to list
index 72ea8d2..237848d 100644 (file)
@@ -84,8 +84,20 @@ The ``ice`` driver reports the following versions
 Regions
 =======
 
-The ``ice`` driver enables access to the contents of the Non Volatile Memory
-flash chip via the ``nvm-flash`` region.
+The ``ice`` driver implements the following regions for accessing internal
+device data.
+
+.. list-table:: regions implemented
+    :widths: 15 85
+
+    * - Name
+      - Description
+    * - ``nvm-flash``
+      - The contents of the entire flash chip, sometimes referred to as
+        the device's Non Volatile Memory.
+    * - ``device-caps``
+      - The contents of the device firmware's capabilities buffer. Useful to
+        determine the current state and configuration of the device.
 
 Users can request an immediate capture of a snapshot via the
 ``DEVLINK_CMD_REGION_NEW``
@@ -105,3 +117,42 @@ Users can request an immediate capture of a snapshot via the
     0000000000000000 0014 95dc 0014 9514 0035 1670 0034 db30
 
     $ devlink region delete pci/0000:01:00.0/nvm-flash snapshot 1
+
+    $ devlink region new pci/0000:01:00.0/device-caps snapshot 1
+    $ devlink region dump pci/0000:01:00.0/device-caps snapshot 1
+    0000000000000000 01 00 01 00 00 00 00 00 01 00 00 00 00 00 00 00
+    0000000000000010 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+    0000000000000020 02 00 02 01 32 03 00 00 0a 00 00 00 25 00 00 00
+    0000000000000030 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+    0000000000000040 04 00 01 00 01 00 00 00 00 00 00 00 00 00 00 00
+    0000000000000050 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+    0000000000000060 05 00 01 00 03 00 00 00 00 00 00 00 00 00 00 00
+    0000000000000070 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+    0000000000000080 06 00 01 00 01 00 00 00 00 00 00 00 00 00 00 00
+    0000000000000090 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+    00000000000000a0 08 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00
+    00000000000000b0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+    00000000000000c0 12 00 01 00 01 00 00 00 01 00 01 00 00 00 00 00
+    00000000000000d0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+    00000000000000e0 13 00 01 00 00 01 00 00 00 00 00 00 00 00 00 00
+    00000000000000f0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+    0000000000000100 14 00 01 00 01 00 00 00 00 00 00 00 00 00 00 00
+    0000000000000110 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+    0000000000000120 15 00 01 00 01 00 00 00 00 00 00 00 00 00 00 00
+    0000000000000130 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+    0000000000000140 16 00 01 00 01 00 00 00 00 00 00 00 00 00 00 00
+    0000000000000150 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+    0000000000000160 17 00 01 00 06 00 00 00 00 00 00 00 00 00 00 00
+    0000000000000170 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+    0000000000000180 18 00 01 00 01 00 00 00 01 00 00 00 08 00 00 00
+    0000000000000190 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+    00000000000001a0 22 00 01 00 01 00 00 00 00 00 00 00 00 00 00 00
+    00000000000001b0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+    00000000000001c0 40 00 01 00 00 08 00 00 08 00 00 00 00 00 00 00
+    00000000000001d0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+    00000000000001e0 41 00 01 00 00 08 00 00 00 00 00 00 00 00 00 00
+    00000000000001f0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+    0000000000000200 42 00 01 00 00 08 00 00 00 00 00 00 00 00 00 00
+    0000000000000210 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+
+    $ devlink region delete pci/0000:01:00.0/device-caps snapshot 1
index 563d56c..a8d15dd 100644 (file)
@@ -95,7 +95,7 @@ Ethernet switch.
 Networking stack hooks
 ----------------------
 
-When a master netdev is used with DSA, a small hook is placed in in the
+When a master netdev is used with DSA, a small hook is placed in the
 networking stack is in order to have the DSA subsystem process the Ethernet
 switch specific tagging protocol. DSA accomplishes this by registering a
 specific (and fake) Ethernet type (later becoming ``skb->protocol``) with the
index 82470c3..7d75f1e 100644 (file)
@@ -443,10 +443,11 @@ supports.
 LINKSTATE_GET
 =============
 
-Requests link state information. At the moment, only link up/down flag (as
-provided by ``ETHTOOL_GLINK`` ioctl command) is provided but some future
-extensions are planned (e.g. link down reason). This request does not have any
-attributes.
+Requests link state information. Link up/down flag (as provided by
+``ETHTOOL_GLINK`` ioctl command) is provided. Optionally, extended state might
+be provided as well. In general, extended state describes reasons for why a port
+is down, or why it operates in some non-obvious mode. This request does not have
+any attributes.
 
 Request contents:
 
@@ -461,16 +462,135 @@ Kernel response contents:
   ``ETHTOOL_A_LINKSTATE_LINK``          bool    link state (up/down)
   ``ETHTOOL_A_LINKSTATE_SQI``           u32     Current Signal Quality Index
   ``ETHTOOL_A_LINKSTATE_SQI_MAX``       u32     Max support SQI value
+  ``ETHTOOL_A_LINKSTATE_EXT_STATE``     u8      link extended state
+  ``ETHTOOL_A_LINKSTATE_EXT_SUBSTATE``  u8      link extended substate
   ====================================  ======  ============================
 
 For most NIC drivers, the value of ``ETHTOOL_A_LINKSTATE_LINK`` returns
 carrier flag provided by ``netif_carrier_ok()`` but there are drivers which
 define their own handler.
 
+``ETHTOOL_A_LINKSTATE_EXT_STATE`` and ``ETHTOOL_A_LINKSTATE_EXT_SUBSTATE`` are
+optional values. ethtool core can provide either both
+``ETHTOOL_A_LINKSTATE_EXT_STATE`` and ``ETHTOOL_A_LINKSTATE_EXT_SUBSTATE``,
+or only ``ETHTOOL_A_LINKSTATE_EXT_STATE``, or none of them.
+
 ``LINKSTATE_GET`` allows dump requests (kernel returns reply messages for all
 devices supporting the request).
 
 
+Link extended states:
+
+  ================================================      ============================================
+  ``ETHTOOL_LINK_EXT_STATE_AUTONEG``                    States relating to the autonegotiation or
+                                                        issues therein
+
+  ``ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE``      Failure during link training
+
+  ``ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH``      Logical mismatch in physical coding sublayer
+                                                        or forward error correction sublayer
+
+  ``ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY``       Signal integrity issues
+
+  ``ETHTOOL_LINK_EXT_STATE_NO_CABLE``                   No cable connected
+
+  ``ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE``                Failure is related to cable,
+                                                        e.g., unsupported cable
+
+  ``ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE``               Failure is related to EEPROM, e.g., failure
+                                                        during reading or parsing the data
+
+  ``ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE``        Failure during calibration algorithm
+
+  ``ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED``      The hardware is not able to provide the
+                                                        power required from cable or module
+
+  ``ETHTOOL_LINK_EXT_STATE_OVERHEAT``                   The module is overheated
+  ================================================      ============================================
+
+Link extended substates:
+
+  Autoneg substates:
+
+  ===============================================================   ================================
+  ``ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED``              Peer side is down
+
+  ``ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED``                 Ack not received from peer side
+
+  ``ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED``        Next page exchange failed
+
+  ``ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE``   Peer side is down during force
+                                                                    mode or there is no agreement of
+                                                                    speed
+
+  ``ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE``     Forward error correction modes
+                                                                    in both sides are mismatched
+
+  ``ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD``                           No Highest Common Denominator
+  ===============================================================   ================================
+
+  Link training substates:
+
+  ===========================================================================   ====================
+  ``ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED``                    Frames were not
+                                                                                 recognized, the
+                                                                                 lock failed
+
+  ``ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT``                       The lock did not
+                                                                                 occur before
+                                                                                 timeout
+
+  ``ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY``    Peer side did not
+                                                                                 send ready signal
+                                                                                 after training
+                                                                                 process
+
+  ``ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT``                                  Remote side is not
+                                                                                 ready yet
+  ===========================================================================   ====================
+
+  Link logical mismatch substates:
+
+  ================================================================   ===============================
+  ``ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK``   Physical coding sublayer was
+                                                                     not locked in first phase -
+                                                                     block lock
+
+  ``ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK``      Physical coding sublayer was
+                                                                     not locked in second phase -
+                                                                     alignment markers lock
+
+  ``ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS``     Physical coding sublayer did
+                                                                     not get align status
+
+  ``ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED``             FC forward error correction is
+                                                                     not locked
+
+  ``ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED``             RS forward error correction is
+                                                                     not locked
+  ================================================================   ===============================
+
+  Bad signal integrity substates:
+
+  =================================================================    =============================
+  ``ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS``    Large number of physical
+                                                                       errors
+
+  ``ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE``                   The system attempted to
+                                                                       operate the cable at a rate
+                                                                       that is not formally
+                                                                       supported, which led to
+                                                                       signal integrity issues
+  =================================================================    =============================
+
+  Cable issue substates:
+
+  ===================================================   ============================================
+  ``ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE``    Unsupported cable
+
+  ``ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE``   Cable test failure
+  ===================================================   ============================================
+
 DEBUG_GET
 =========
 
@@ -1110,6 +1230,39 @@ used to report the amplitude of the reflection for a given pair.
  | | | ``ETHTOOL_A_CABLE_AMPLITUDE_mV``        | s16    | Reflection amplitude |
  +-+-+-----------------------------------------+--------+----------------------+
 
+TUNNEL_INFO
+===========
+
+Gets information about the tunnel state NIC is aware of.
+
+Request contents:
+
+  =====================================  ======  ==========================
+  ``ETHTOOL_A_TUNNEL_INFO_HEADER``       nested  request header
+  =====================================  ======  ==========================
+
+Kernel response contents:
+
+ +---------------------------------------------+--------+---------------------+
+ | ``ETHTOOL_A_TUNNEL_INFO_HEADER``            | nested | reply header        |
+ +---------------------------------------------+--------+---------------------+
+ | ``ETHTOOL_A_TUNNEL_INFO_UDP_PORTS``         | nested | all UDP port tables |
+ +-+-------------------------------------------+--------+---------------------+
+ | | ``ETHTOOL_A_TUNNEL_UDP_TABLE``            | nested | one UDP port table  |
+ +-+-+-----------------------------------------+--------+---------------------+
+ | | | ``ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE``     | u32    | max size of the     |
+ | | |                                         |        | table               |
+ +-+-+-----------------------------------------+--------+---------------------+
+ | | | ``ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES``    | bitset | tunnel types which  |
+ | | |                                         |        | table can hold      |
+ +-+-+-----------------------------------------+--------+---------------------+
+ | | | ``ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY``    | nested | offloaded UDP port  |
+ +-+-+-+---------------------------------------+--------+---------------------+
+ | | | | ``ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT``   | be16   | UDP port            |
+ +-+-+-+---------------------------------------+--------+---------------------+
+ | | | | ``ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE``   | u32    | tunnel type         |
+ +-+-+-+---------------------------------------+--------+---------------------+
+
 Request translation
 ===================
 
index 36ca823..6f4bf84 100644 (file)
@@ -30,8 +30,8 @@ Socket API
 
 The address family, socket addresses etc. are defined in the
 include/net/af_ieee802154.h header or in the special header
-in the userspace package (see either http://wpan.cakelab.org/ or the
-git tree at https://github.com/linux-wpan/wpan-tools).
+in the userspace package (see either https://linux-wpan.org/wpan-tools.html
+or the git tree at https://github.com/linux-wpan/wpan-tools).
 
 6LoWPAN Linux implementation
 ============================
index 0186e27..c29496f 100644 (file)
@@ -20,7 +20,6 @@ Contents:
    ieee802154
    j1939
    kapi
-   z8530book
    msg_zerocopy
    failover
    net_dim
@@ -39,36 +38,28 @@ Contents:
    nfc
    6lowpan
    6pack
-   altera_tse
    arcnet-hardware
    arcnet
    atm
    ax25
-   baycom
    bonding
    cdc_mbim
-   cops
-   cxacru
    dccp
    dctcp
    decnet
-   defza
    dns_resolver
    driver
    eql
    fib_trie
    filter
-   fore200e
    framerelay
    generic-hdlc
    generic_netlink
    gen_stats
    gtp
-   hinic
    ila
    ipddp
    ip_dynaddr
-   iphase
    ipsec
    ip-sysctl
    ipv6
@@ -77,7 +68,6 @@ Contents:
    kcm
    l2tp
    lapb-module
-   ltpc
    mac80211-injection
    mpls-sysctl
    multiqueue
@@ -97,14 +87,12 @@ Contents:
    ppp_generic
    proc_net_tcp
    radiotap-headers
-   ray_cs
    rds
    regulatory
    rxrpc
    sctp
    secid
    seg6-sysctl
-   skfp
    strparser
    switchdev
    tc-actions-env-rules
@@ -122,7 +110,6 @@ Contents:
    xfrm_proc
    xfrm_sync
    xfrm_sysctl
-   z8530drv
 
 .. only::  subproject and html
 
index b72f89d..837d51f 100644 (file)
@@ -741,7 +741,7 @@ tcp_fastopen - INTEGER
 
        Default: 0x1
 
-       Note that that additional client or server features are only
+       Note that additional client or server features are only
        effective if the basic support (0x1 and 0x2) are enabled respectively.
 
 tcp_fastopen_blackhole_timeout_sec - INTEGER
index be36c46..2afccc6 100644 (file)
@@ -114,7 +114,7 @@ drop_entry - INTEGER
        modes (when there is no enough available memory, the strategy
        is enabled and the variable is automatically set to 2,
        otherwise the strategy is disabled and the variable is set to
-       1), and 3 means that that the strategy is always enabled.
+       1), and 3 means that the strategy is always enabled.
 
 drop_packet - INTEGER
        - 0  - disabled (default)
index 68552b9..39c2249 100644 (file)
@@ -186,7 +186,7 @@ About the AF_RXRPC driver:
      time [tunable] after the last connection using it discarded, in case a new
      connection is made that could use it.
 
- (#) A client-side connection is only shared between calls if they have have
+ (#) A client-side connection is only shared between calls if they have
      the same key struct describing their security (and assuming the calls
      would otherwise share the connection).  Non-secured calls would also be
      able to share connections with each other.
index 1adead6..03f7bea 100644 (file)
@@ -589,3 +589,168 @@ Time stamps for outgoing packets are to be generated as follows:
   this would occur at a later time in the processing pipeline than other
   software time stamping and therefore could lead to unexpected deltas
   between time stamps.
+
+3.2 Special considerations for stacked PTP Hardware Clocks
+----------------------------------------------------------
+
+There are situations when there may be more than one PHC (PTP Hardware Clock)
+in the data path of a packet. The kernel has no explicit mechanism to allow the
+user to select which PHC to use for timestamping Ethernet frames. Instead, the
+assumption is that the outermost PHC is always the most preferable, and that
+kernel drivers collaborate towards achieving that goal. Currently there are 3
+cases of stacked PHCs, detailed below:
+
+3.2.1 DSA (Distributed Switch Architecture) switches
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+These are Ethernet switches which have one of their ports connected to an
+(otherwise completely unaware) host Ethernet interface, and perform the role of
+a port multiplier with optional forwarding acceleration features.  Each DSA
+switch port is visible to the user as a standalone (virtual) network interface,
+and its network I/O is performed, under the hood, indirectly through the host
+interface (redirecting to the host port on TX, and intercepting frames on RX).
+
+When a DSA switch is attached to a host port, PTP synchronization has to
+suffer, since the switch's variable queuing delay introduces a path delay
+jitter between the host port and its PTP partner. For this reason, some DSA
+switches include a timestamping clock of their own, and have the ability to
+perform network timestamping on their own MAC, such that path delays only
+measure wire and PHY propagation latencies. Timestamping DSA switches are
+supported in Linux and expose the same ABI as any other network interface (save
+for the fact that the DSA interfaces are in fact virtual in terms of network
+I/O, they do have their own PHC).  It is typical, but not mandatory, for all
+interfaces of a DSA switch to share the same PHC.
+
+By design, PTP timestamping with a DSA switch does not need any special
+handling in the driver for the host port it is attached to.  However, when the
+host port also supports PTP timestamping, DSA will take care of intercepting
+the ``.ndo_do_ioctl`` calls towards the host port, and block attempts to enable
+hardware timestamping on it. This is because the SO_TIMESTAMPING API does not
+allow the delivery of multiple hardware timestamps for the same packet, so
+anybody else except for the DSA switch port must be prevented from doing so.
+
+In code, DSA provides for most of the infrastructure for timestamping already,
+in generic code: a BPF classifier (``ptp_classify_raw``) is used to identify
+PTP event messages (any other packets, including PTP general messages, are not
+timestamped), and provides two hooks to drivers:
+
+- ``.port_txtstamp()``: The driver is passed a clone of the timestampable skb
+  to be transmitted, before actually transmitting it. Typically, a switch will
+  have a PTP TX timestamp register (or sometimes a FIFO) where the timestamp
+  becomes available. There may be an IRQ that is raised upon this timestamp's
+  availability, or the driver might have to poll after invoking
+  ``dev_queue_xmit()`` towards the host interface. Either way, in the
+  ``.port_txtstamp()`` method, the driver only needs to save the clone for
+  later use (when the timestamp becomes available). Each skb is annotated with
+  a pointer to its clone, in ``DSA_SKB_CB(skb)->clone``, to ease the driver's
+  job of keeping track of which clone belongs to which skb.
+
+- ``.port_rxtstamp()``: The original (and only) timestampable skb is provided
+  to the driver, for it to annotate it with a timestamp, if that is immediately
+  available, or defer to later. On reception, timestamps might either be
+  available in-band (through metadata in the DSA header, or attached in other
+  ways to the packet), or out-of-band (through another RX timestamping FIFO).
+  Deferral on RX is typically necessary when retrieving the timestamp needs a
+  sleepable context. In that case, it is the responsibility of the DSA driver
+  to call ``netif_rx_ni()`` on the freshly timestamped skb.
+
+3.2.2 Ethernet PHYs
+^^^^^^^^^^^^^^^^^^^
+
+These are devices that typically fulfill a Layer 1 role in the network stack,
+hence they do not have a representation in terms of a network interface as DSA
+switches do. However, PHYs may be able to detect and timestamp PTP packets, for
+performance reasons: timestamps taken as close as possible to the wire have the
+potential to yield a more stable and precise synchronization.
+
+A PHY driver that supports PTP timestamping must create a ``struct
+mii_timestamper`` and add a pointer to it in ``phydev->mii_ts``. The presence
+of this pointer will be checked by the networking stack.
+
+Since PHYs do not have network interface representations, the timestamping and
+ethtool ioctl operations for them need to be mediated by their respective MAC
+driver.  Therefore, as opposed to DSA switches, modifications need to be done
+to each individual MAC driver for PHY timestamping support. This entails:
+
+- Checking, in ``.ndo_do_ioctl``, whether ``phy_has_hwtstamp(netdev->phydev)``
+  is true or not. If it is, then the MAC driver should not process this request
+  but instead pass it on to the PHY using ``phy_mii_ioctl()``.
+
+- On RX, special intervention may or may not be needed, depending on the
+  function used to deliver skb's up the network stack. In the case of plain
+  ``netif_rx()`` and similar, MAC drivers must check whether
+  ``skb_defer_rx_timestamp(skb)`` is necessary or not - and if it is, don't
+  call ``netif_rx()`` at all.  If ``CONFIG_NETWORK_PHY_TIMESTAMPING`` is
+  enabled, and ``skb->dev->phydev->mii_ts`` exists, its ``.rxtstamp()`` hook
+  will be called now, to determine, using logic very similar to DSA, whether
+  deferral for RX timestamping is necessary.  Again like DSA, it becomes the
+  responsibility of the PHY driver to send the packet up the stack when the
+  timestamp is available.
+
+  For other skb receive functions, such as ``napi_gro_receive`` and
+  ``netif_receive_skb``, the stack automatically checks whether
+  ``skb_defer_rx_timestamp()`` is necessary, so this check is not needed inside
+  the driver.
+
+- On TX, again, special intervention might or might not be needed.  The
+  function that calls the ``mii_ts->txtstamp()`` hook is named
+  ``skb_clone_tx_timestamp()``. This function can either be called directly
+  (case in which explicit MAC driver support is indeed needed), but the
+  function also piggybacks from the ``skb_tx_timestamp()`` call, which many MAC
+  drivers already perform for software timestamping purposes. Therefore, if a
+  MAC supports software timestamping, it does not need to do anything further
+  at this stage.
+
+3.2.3 MII bus snooping devices
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+These perform the same role as timestamping Ethernet PHYs, save for the fact
+that they are discrete devices and can therefore be used in conjunction with
+any PHY even if it doesn't support timestamping. In Linux, they are
+discoverable and attachable to a ``struct phy_device`` through Device Tree, and
+for the rest, they use the same mii_ts infrastructure as those. See
+Documentation/devicetree/bindings/ptp/timestamper.txt for more details.
+
+3.2.4 Other caveats for MAC drivers
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Stacked PHCs, especially DSA (but not only) - since that doesn't require any
+modification to MAC drivers, so it is more difficult to ensure correctness of
+all possible code paths - is that they uncover bugs which were impossible to
+trigger before the existence of stacked PTP clocks.  One example has to do with
+this line of code, already presented earlier::
+
+      skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+Any TX timestamping logic, be it a plain MAC driver, a DSA switch driver, a PHY
+driver or a MII bus snooping device driver, should set this flag.
+But a MAC driver that is unaware of PHC stacking might get tripped up by
+somebody other than itself setting this flag, and deliver a duplicate
+timestamp.
+For example, a typical driver design for TX timestamping might be to split the
+transmission part into 2 portions:
+
+1. "TX": checks whether PTP timestamping has been previously enabled through
+   the ``.ndo_do_ioctl`` ("``priv->hwtstamp_tx_enabled == true``") and the
+   current skb requires a TX timestamp ("``skb_shinfo(skb)->tx_flags &
+   SKBTX_HW_TSTAMP``"). If this is true, it sets the
+   "``skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS``" flag. Note: as
+   described above, in the case of a stacked PHC system, this condition should
+   never trigger, as this MAC is certainly not the outermost PHC. But this is
+   not where the typical issue is.  Transmission proceeds with this packet.
+
+2. "TX confirmation": Transmission has finished. The driver checks whether it
+   is necessary to collect any TX timestamp for it. Here is where the typical
+   issues are: the MAC driver takes a shortcut and only checks whether
+   "``skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS``" was set. With a stacked
+   PHC system, this is incorrect because this MAC driver is not the only entity
+   in the TX data path who could have enabled SKBTX_IN_PROGRESS in the first
+   place.
+
+The correct solution for this problem is for MAC drivers to have a compound
+check in their "TX confirmation" portion, not only for
+"``skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS``", but also for
+"``priv->hwtstamp_tx_enabled == true``". Because the rest of the system ensures
+that PTP timestamping is not enabled for anything other than the outermost PHC,
+this enhanced check will avoid delivering a duplicated TX timestamp to user
+space.
index f914e81..37773da 100644 (file)
@@ -428,6 +428,24 @@ by the driver:
    which were part of a TLS stream.
  * ``rx_tls_decrypted_bytes`` - number of TLS payload bytes in RX packets
    which were successfully decrypted.
+ * ``rx_tls_ctx`` - number of TLS RX HW offload contexts added to device for
+   decryption.
+ * ``rx_tls_del`` - number of TLS RX HW offload contexts deleted from device
+   (connection has finished).
+ * ``rx_tls_resync_req_pkt`` - number of received TLS packets with a resync
+    request.
+ * ``rx_tls_resync_req_start`` - number of times the TLS async resync request
+    was started.
+ * ``rx_tls_resync_req_end`` - number of times the TLS async resync request
+    properly ended with providing the HW tracked tcp-seq.
+ * ``rx_tls_resync_req_skip`` - number of times the TLS async resync request
+    procedure was started by not properly ended.
+ * ``rx_tls_resync_res_ok`` - number of times the TLS resync response call to
+    the driver was successfully handled.
+ * ``rx_tls_resync_res_skip`` - number of times the TLS resync response call to
+    the driver was terminated unsuccessfully.
+ * ``rx_tls_err`` - number of RX packets which were part of a TLS stream
+   but were not decrypted due to unexpected error in the state machine.
  * ``tx_tls_encrypted_packets`` - number of TX packets passed to the device
    for encryption of their TLS payload.
  * ``tx_tls_encrypted_bytes`` - number of TLS payload bytes in TX packets
index 3493631..48fcf12 100644 (file)
@@ -220,13 +220,51 @@ from the LPAR memory.
 **H_SCM_HEALTH**
 
 | Input: drcIndex
-| Out: *health-bitmap, health-bit-valid-bitmap*
+| Out: *health-bitmap (r4), health-bit-valid-bitmap (r5)*
 | Return Value: *H_Success, H_Parameter, H_Hardware*
 
 Given a DRC Index return the info on predictive failure and overall health of
-the NVDIMM. The asserted bits in the health-bitmap indicate a single predictive
-failure and health-bit-valid-bitmap indicate which bits in health-bitmap are
-valid.
+the PMEM device. The asserted bits in the health-bitmap indicate one or more states
+(described in table below) of the PMEM device and health-bit-valid-bitmap indicate
+which bits in health-bitmap are valid. The bits are reported in
+reverse bit ordering for example a value of 0xC400000000000000
+indicates bits 0, 1, and 5 are valid.
+
+Health Bitmap Flags:
+
++------+-----------------------------------------------------------------------+
+|  Bit |               Definition                                              |
++======+=======================================================================+
+|  00  | PMEM device is unable to persist memory contents.                     |
+|      | If the system is powered down, nothing will be saved.                 |
++------+-----------------------------------------------------------------------+
+|  01  | PMEM device failed to persist memory contents. Either contents were   |
+|      | not saved successfully on power down or were not restored properly on |
+|      | power up.                                                             |
++------+-----------------------------------------------------------------------+
+|  02  | PMEM device contents are persisted from previous IPL. The data from   |
+|      | the last boot were successfully restored.                             |
++------+-----------------------------------------------------------------------+
+|  03  | PMEM device contents are not persisted from previous IPL. There was no|
+|      | data to restore from the last boot.                                   |
++------+-----------------------------------------------------------------------+
+|  04  | PMEM device memory life remaining is critically low                   |
++------+-----------------------------------------------------------------------+
+|  05  | PMEM device will be garded off next IPL due to failure                |
++------+-----------------------------------------------------------------------+
+|  06  | PMEM device contents cannot persist due to current platform health    |
+|      | status. A hardware failure may prevent data from being saved or       |
+|      | restored.                                                             |
++------+-----------------------------------------------------------------------+
+|  07  | PMEM device is unable to persist memory contents in certain conditions|
++------+-----------------------------------------------------------------------+
+|  08  | PMEM device is encrypted                                              |
++------+-----------------------------------------------------------------------+
+|  09  | PMEM device has successfully completed a requested erase or secure    |
+|      | erase procedure.                                                      |
++------+-----------------------------------------------------------------------+
+|10:63 | Reserved / Unused                                                     |
++------+-----------------------------------------------------------------------+
 
 **H_SCM_PERFORMANCE_STATS**
 
index 5cfb54c..8f68e72 100644 (file)
@@ -29,7 +29,7 @@ you probably needn't concern yourself with pcmciautils.
 ====================== ===============  ========================================
         Program        Minimal version       Command to check the version
 ====================== ===============  ========================================
-GNU C                  4.8              gcc --version
+GNU C                  4.9              gcc --version
 GNU make               3.81             make --version
 binutils               2.23             ld -v
 flex                   2.5.35           flex --version
index 2657a55..1bee6f8 100644 (file)
@@ -319,6 +319,26 @@ If you are afraid to mix up your local variable names, you have another
 problem, which is called the function-growth-hormone-imbalance syndrome.
 See chapter 6 (Functions).
 
+For symbol names and documentation, avoid introducing new usage of
+'master / slave' (or 'slave' independent of 'master') and 'blacklist /
+whitelist'.
+
+Recommended replacements for 'master / slave' are:
+    '{primary,main} / {secondary,replica,subordinate}'
+    '{initiator,requester} / {target,responder}'
+    '{controller,host} / {device,worker,proxy}'
+    'leader / follower'
+    'director / performer'
+
+Recommended replacements for 'blacklist/whitelist' are:
+    'denylist / allowlist'
+    'blocklist / passlist'
+
+Exceptions for introducing new usage is to maintain a userspace ABI/API,
+or when updating code for an existing (as of 2020) hardware or protocol
+specification that mandates those terms. For new specifications
+translate specification usage of the terminology to the kernel coding
+standard where possible.
 
 5) Typedefs
 -----------
index bc8db7b..0bd405a 100644 (file)
@@ -16,18 +16,6 @@ Store Queue API
 .. kernel-doc:: arch/sh/kernel/cpu/sh4/sq.c
    :export:
 
-SH-5
-----
-
-TLB Interfaces
-~~~~~~~~~~~~~~
-
-.. kernel-doc:: arch/sh/mm/tlb-sh5.c
-   :internal:
-
-.. kernel-doc:: arch/sh/include/asm/tlb_64.h
-   :internal:
-
 Machine Specific Interfaces
 ===========================
 
index d0c50d7..0a8e236 100644 (file)
@@ -27,7 +27,7 @@ nitpick_ignore = [
     ("c:func", "copy_to_user"),
     ("c:func", "determine_valid_ioctls"),
     ("c:func", "ERR_PTR"),
-    ("c:func", "i2c_new_device"),
+    ("c:func", "i2c_new_client_device"),
     ("c:func", "ioctl"),
     ("c:func", "IS_ERR"),
     ("c:func", "KERNEL_VERSION"),
index 426f945..320788f 100644 (file)
@@ -4339,14 +4339,15 @@ Errors:
 #define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001
 
   struct kvm_vmx_nested_state_hdr {
-       __u32 flags;
        __u64 vmxon_pa;
        __u64 vmcs12_pa;
-       __u64 preemption_timer_deadline;
 
        struct {
                __u16 flags;
        } smm;
+
+       __u32 flags;
+       __u64 preemption_timer_deadline;
   };
 
   struct kvm_vmx_nested_state_data {
index 68f21d4..6200eb1 100644 (file)
@@ -147,7 +147,7 @@ Maintainers List
 M:     Steffen Klassert <klassert@kernel.org>
 L:     netdev@vger.kernel.org
 S:     Odd Fixes
-F:     Documentation/networking/device_drivers/3com/vortex.rst
+F:     Documentation/networking/device_drivers/ethernet/3com/vortex.rst
 F:     drivers/net/ethernet/3com/3c59x.c
 
 3CR990 NETWORK DRIVER
@@ -816,7 +816,7 @@ R:  Saeed Bishara <saeedb@amazon.com>
 R:     Zorik Machulsky <zorik@amazon.com>
 L:     netdev@vger.kernel.org
 S:     Supported
-F:     Documentation/networking/device_drivers/amazon/ena.rst
+F:     Documentation/networking/device_drivers/ethernet/amazon/ena.rst
 F:     drivers/net/ethernet/amazon/
 
 AMAZON RDMA EFA DRIVER
@@ -1295,7 +1295,7 @@ L:        netdev@vger.kernel.org
 S:     Supported
 W:     https://www.marvell.com/
 Q:     http://patchwork.ozlabs.org/project/netdev/list/
-F:     Documentation/networking/device_drivers/aquantia/atlantic.rst
+F:     Documentation/networking/device_drivers/ethernet/aquantia/atlantic.rst
 F:     drivers/net/ethernet/aquantia/atlantic/
 
 AQUANTIA ETHERNET DRIVER PTP SUBSYSTEM
@@ -2929,6 +2929,7 @@ F:        include/uapi/linux/atm*
 
 ATMEL MACB ETHERNET DRIVER
 M:     Nicolas Ferre <nicolas.ferre@microchip.com>
+M:     Claudiu Beznea <claudiu.beznea@microchip.com>
 S:     Supported
 F:     drivers/net/ethernet/cadence/
 
@@ -3306,7 +3307,7 @@ X:        arch/riscv/net/bpf_jit_comp32.c
 
 BPF JIT for S390
 M:     Ilya Leoshkevich <iii@linux.ibm.com>
-M:     Heiko Carstens <heiko.carstens@de.ibm.com>
+M:     Heiko Carstens <hca@linux.ibm.com>
 M:     Vasily Gorbik <gor@linux.ibm.com>
 L:     netdev@vger.kernel.org
 L:     bpf@vger.kernel.org
@@ -3946,7 +3947,7 @@ L:        linux-crypto@vger.kernel.org
 S:     Supported
 F:     drivers/char/hw_random/cctrng.c
 F:     drivers/char/hw_random/cctrng.h
-F:     Documentation/devicetree/bindings/rng/arm-cctrng.txt
+F:     Documentation/devicetree/bindings/rng/arm-cctrng.yaml
 W:     https://developer.arm.com/products/system-ip/trustzone-cryptocell/cryptocell-700-family
 
 CEC FRAMEWORK
@@ -4753,7 +4754,7 @@ F:        net/ax25/sysctl_net_ax25.c
 DAVICOM FAST ETHERNET (DMFE) NETWORK DRIVER
 L:     netdev@vger.kernel.org
 S:     Orphan
-F:     Documentation/networking/device_drivers/dec/dmfe.rst
+F:     Documentation/networking/device_drivers/ethernet/dec/dmfe.rst
 F:     drivers/net/ethernet/dec/tulip/dmfe.c
 
 DC390/AM53C974 SCSI driver
@@ -5241,8 +5242,8 @@ M:        Ioana Ciornei <ioana.ciornei@nxp.com>
 M:     Ioana Radulescu <ruxandra.radulescu@nxp.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
-F:     Documentation/networking/device_drivers/freescale/dpaa2/ethernet-driver.rst
-F:     Documentation/networking/device_drivers/freescale/dpaa2/mac-phy-support.rst
+F:     Documentation/networking/device_drivers/ethernet/freescale/dpaa2/ethernet-driver.rst
+F:     Documentation/networking/device_drivers/ethernet/freescale/dpaa2/mac-phy-support.rst
 F:     drivers/net/ethernet/freescale/dpaa2/Kconfig
 F:     drivers/net/ethernet/freescale/dpaa2/Makefile
 F:     drivers/net/ethernet/freescale/dpaa2/dpaa2-eth*
@@ -5490,7 +5491,7 @@ F:        include/uapi/drm/r128_drm.h
 DRM DRIVER FOR RAYDIUM RM67191 PANELS
 M:     Robert Chiras <robert.chiras@nxp.com>
 S:     Maintained
-F:     Documentation/devicetree/bindings/display/panel/raydium,rm67191.txt
+F:     Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml
 F:     drivers/gpu/drm/panel/panel-raydium-rm67191.c
 
 DRM DRIVER FOR ROCKTECH JH057N00900 PANELS
@@ -7302,7 +7303,7 @@ R:        Sagi Shahar <sagis@google.com>
 R:     Jon Olson <jonolson@google.com>
 L:     netdev@vger.kernel.org
 S:     Supported
-F:     Documentation/networking/device_drivers/google/gve.rst
+F:     Documentation/networking/device_drivers/ethernet/google/gve.rst
 F:     drivers/net/ethernet/google
 
 GPD POCKET FAN DRIVER
@@ -7913,7 +7914,7 @@ HUAWEI ETHERNET DRIVER
 M:     Bin Luo <luobin9@huawei.com>
 L:     netdev@vger.kernel.org
 S:     Supported
-F:     Documentation/networking/hinic.rst
+F:     Documentation/networking/device_drivers/ethernet/huawei/hinic.rst
 F:     drivers/net/ethernet/huawei/hinic/
 
 HUGETLB FILESYSTEM
@@ -7965,7 +7966,7 @@ S:        Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git
 F:     Documentation/ABI/stable/sysfs-bus-vmbus
 F:     Documentation/ABI/testing/debugfs-hyperv
-F:     Documentation/networking/device_drivers/microsoft/netvsc.rst
+F:     Documentation/networking/device_drivers/ethernet/microsoft/netvsc.rst
 F:     arch/x86/hyperv
 F:     arch/x86/include/asm/hyperv-tlfs.h
 F:     arch/x86/include/asm/mshyperv.h
@@ -8333,7 +8334,7 @@ M:        Alexander Aring <alex.aring@gmail.com>
 M:     Stefan Schmidt <stefan@datenfreihafen.org>
 L:     linux-wpan@vger.kernel.org
 S:     Maintained
-W:     http://wpan.cakelab.org/
+W:     https://linux-wpan.org/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan-next.git
 F:     Documentation/networking/ieee802154.rst
@@ -8647,18 +8648,7 @@ W:       http://e1000.sourceforge.net/
 Q:     http://patchwork.ozlabs.org/project/intel-wired-lan/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue.git
-F:     Documentation/networking/device_drivers/intel/e100.rst
-F:     Documentation/networking/device_drivers/intel/e1000.rst
-F:     Documentation/networking/device_drivers/intel/e1000e.rst
-F:     Documentation/networking/device_drivers/intel/fm10k.rst
-F:     Documentation/networking/device_drivers/intel/i40e.rst
-F:     Documentation/networking/device_drivers/intel/iavf.rst
-F:     Documentation/networking/device_drivers/intel/ice.rst
-F:     Documentation/networking/device_drivers/intel/igb.rst
-F:     Documentation/networking/device_drivers/intel/igbvf.rst
-F:     Documentation/networking/device_drivers/intel/ixgb.rst
-F:     Documentation/networking/device_drivers/intel/ixgbe.rst
-F:     Documentation/networking/device_drivers/intel/ixgbevf.rst
+F:     Documentation/networking/device_drivers/ethernet/intel/
 F:     drivers/net/ethernet/intel/
 F:     drivers/net/ethernet/intel/*/
 F:     include/linux/avf/virtchnl.h
@@ -8848,8 +8838,8 @@ INTEL PRO/WIRELESS 2100, 2200BG, 2915ABG NETWORK CONNECTION SUPPORT
 M:     Stanislav Yakovlev <stas.yakovlev@gmail.com>
 L:     linux-wireless@vger.kernel.org
 S:     Maintained
-F:     Documentation/networking/device_drivers/intel/ipw2100.rst
-F:     Documentation/networking/device_drivers/intel/ipw2200.rst
+F:     Documentation/networking/device_drivers/wifi/intel/ipw2100.rst
+F:     Documentation/networking/device_drivers/wifi/intel/ipw2200.rst
 F:     drivers/net/wireless/intel/ipw2x00/
 
 INTEL PSTATE DRIVER
@@ -10362,7 +10352,7 @@ M:      Geetha sowjanya <gakula@marvell.com>
 M:     Jerin Jacob <jerinj@marvell.com>
 L:     netdev@vger.kernel.org
 S:     Supported
-F:     Documentation/networking/device_drivers/marvell/octeontx2.rst
+F:     Documentation/networking/device_drivers/ethernet/marvell/octeontx2.rst
 F:     drivers/net/ethernet/marvell/octeontx2/af/
 
 MARVELL SOC MMC/SD/SDIO CONTROLLER DRIVER
@@ -10808,7 +10798,7 @@ F:      Documentation/devicetree/bindings/dma/mtk-*
 F:     drivers/dma/mediatek/
 
 MEDIATEK ETHERNET DRIVER
-M:     Felix Fietkau <nbd@openwrt.org>
+M:     Felix Fietkau <nbd@nbd.name>
 M:     John Crispin <john@phrozen.org>
 M:     Sean Wang <sean.wang@mediatek.com>
 M:     Mark Lee <Mark-MC.Lee@mediatek.com>
@@ -11031,7 +11021,7 @@ L:      linux-rdma@vger.kernel.org
 S:     Supported
 W:     http://www.mellanox.com
 Q:     http://patchwork.ozlabs.org/project/netdev/list/
-F:     Documentation/networking/device_drivers/mellanox/
+F:     Documentation/networking/device_drivers/ethernet/mellanox/
 F:     drivers/net/ethernet/mellanox/mlx5/core/
 F:     include/linux/mlx5/
 
@@ -11362,6 +11352,13 @@ L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Supported
 F:     drivers/usb/gadget/udc/atmel_usba_udc.*
 
+MICROCHIP WILC1000 WIFI DRIVER
+M:     Ajay Singh <ajay.kathat@microchip.com>
+M:     Claudiu Beznea <claudiu.beznea@microchip.com>
+L:     linux-wireless@vger.kernel.org
+S:     Supported
+F:     drivers/net/wireless/microchip/wilc1000/
+
 MICROCHIP XDMA DRIVER
 M:     Ludovic Desroches <ludovic.desroches@microchip.com>
 L:     linux-arm-kernel@lists.infradead.org
@@ -11369,14 +11366,6 @@ L:     dmaengine@vger.kernel.org
 S:     Supported
 F:     drivers/dma/at_xdmac.c
 
-MICROSEMI ETHERNET SWITCH DRIVER
-M:     Alexandre Belloni <alexandre.belloni@bootlin.com>
-M:     Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
-L:     netdev@vger.kernel.org
-S:     Supported
-F:     drivers/net/ethernet/mscc/
-F:     include/soc/mscc/ocelot*
-
 MICROSEMI MIPS SOCS
 M:     Alexandre Belloni <alexandre.belloni@bootlin.com>
 M:     Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
@@ -11814,8 +11803,8 @@ NETERION 10GbE DRIVERS (s2io/vxge)
 M:     Jon Mason <jdmason@kudzu.us>
 L:     netdev@vger.kernel.org
 S:     Supported
-F:     Documentation/networking/device_drivers/neterion/s2io.rst
-F:     Documentation/networking/device_drivers/neterion/vxge.rst
+F:     Documentation/networking/device_drivers/ethernet/neterion/s2io.rst
+F:     Documentation/networking/device_drivers/ethernet/neterion/vxge.rst
 F:     drivers/net/ethernet/neterion/
 
 NETFILTER
@@ -12335,6 +12324,18 @@ M:     Peter Zijlstra <peterz@infradead.org>
 S:     Supported
 F:     tools/objtool/
 
+OCELOT ETHERNET SWITCH DRIVER
+M:     Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
+M:     Vladimir Oltean <vladimir.oltean@nxp.com>
+M:     Claudiu Manoil <claudiu.manoil@nxp.com>
+M:     Alexandre Belloni <alexandre.belloni@bootlin.com>
+L:     netdev@vger.kernel.org
+S:     Supported
+F:     drivers/net/dsa/ocelot/*
+F:     drivers/net/ethernet/mscc/
+F:     include/soc/mscc/ocelot*
+F:     net/dsa/tag_ocelot.c
+
 OCXL (Open Coherent Accelerator Processor Interface OpenCAPI) DRIVER
 M:     Frederic Barrat <fbarrat@linux.ibm.com>
 M:     Andrew Donnellan <ajd@linux.ibm.com>
@@ -12691,13 +12692,13 @@ F:    arch/mips/boot/dts/ralink/omega2p.dts
 
 OP-TEE DRIVER
 M:     Jens Wiklander <jens.wiklander@linaro.org>
-L:     tee-dev@lists.linaro.org
+L:     op-tee@lists.trustedfirmware.org
 S:     Maintained
 F:     drivers/tee/optee/
 
 OP-TEE RANDOM NUMBER GENERATOR (RNG) DRIVER
 M:     Sumit Garg <sumit.garg@linaro.org>
-L:     tee-dev@lists.linaro.org
+L:     op-tee@lists.trustedfirmware.org
 S:     Maintained
 F:     drivers/char/hw_random/optee-rng.c
 
@@ -13366,7 +13367,7 @@ M:      Shannon Nelson <snelson@pensando.io>
 M:     Pensando Drivers <drivers@pensando.io>
 L:     netdev@vger.kernel.org
 S:     Supported
-F:     Documentation/networking/device_drivers/pensando/ionic.rst
+F:     Documentation/networking/device_drivers/ethernet/pensando/ionic.rst
 F:     drivers/net/ethernet/pensando/
 
 PER-CPU MEMORY ALLOCATOR
@@ -14049,7 +14050,7 @@ QLOGIC QLA3XXX NETWORK DRIVER
 M:     GR-Linux-NIC-Dev@marvell.com
 L:     netdev@vger.kernel.org
 S:     Supported
-F:     Documentation/networking/device_drivers/qlogic/LICENSE.qla3xxx
+F:     Documentation/networking/device_drivers/ethernet/qlogic/LICENSE.qla3xxx
 F:     drivers/net/ethernet/qlogic/qla3xxx.*
 
 QLOGIC QLA4XXX iSCSI DRIVER
@@ -14100,7 +14101,7 @@ M:      Laurentiu Tudor <laurentiu.tudor@nxp.com>
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
-F:     Documentation/networking/device_drivers/freescale/dpaa2/overview.rst
+F:     Documentation/networking/device_drivers/ethernet/freescale/dpaa2/overview.rst
 F:     drivers/bus/fsl-mc/
 
 QT1010 MEDIA DRIVER
@@ -14192,6 +14193,15 @@ L:     dmaengine@vger.kernel.org
 S:     Supported
 F:     drivers/dma/qcom/hidma*
 
+QUALCOMM I2C CCI DRIVER
+M:     Loic Poulain <loic.poulain@linaro.org>
+M:     Robert Foss <robert.foss@linaro.org>
+L:     linux-i2c@vger.kernel.org
+L:     linux-arm-msm@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/i2c/i2c-qcom-cci.txt
+F:     drivers/i2c/busses/i2c-qcom-cci.c
+
 QUALCOMM IOMMU
 M:     Rob Clark <robdclark@gmail.com>
 L:     iommu@lists.linux-foundation.org
@@ -14212,7 +14222,7 @@ M:      Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
 M:     Sean Tranchetti <stranche@codeaurora.org>
 L:     netdev@vger.kernel.org
 S:     Maintained
-F:     Documentation/networking/device_drivers/qualcomm/rmnet.rst
+F:     Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst
 F:     drivers/net/ethernet/qualcomm/rmnet/
 F:     include/linux/if_rmnet.h
 
@@ -14534,7 +14544,7 @@ F:      Documentation/devicetree/bindings/i2c/renesas,iic-emev2.txt
 F:     drivers/i2c/busses/i2c-emev2.c
 
 RENESAS ETHERNET DRIVERS
-R:     Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
+R:     Sergei Shtylyov <sergei.shtylyov@gmail.com>
 L:     netdev@vger.kernel.org
 L:     linux-renesas-soc@vger.kernel.org
 F:     Documentation/devicetree/bindings/net/renesas,*.txt
@@ -14561,8 +14571,8 @@ RENESAS R-CAR THERMAL DRIVERS
 M:     Niklas Söderlund <niklas.soderlund@ragnatech.se>
 L:     linux-renesas-soc@vger.kernel.org
 S:     Supported
-F:     Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt
-F:     Documentation/devicetree/bindings/thermal/rcar-thermal.txt
+F:     Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.yaml
+F:     Documentation/devicetree/bindings/thermal/rcar-thermal.yaml
 F:     drivers/thermal/rcar_gen3_thermal.c
 F:     drivers/thermal/rcar_thermal.c
 
@@ -14818,7 +14828,7 @@ S:      Maintained
 F:     drivers/video/fbdev/savage/
 
 S390
-M:     Heiko Carstens <heiko.carstens@de.ibm.com>
+M:     Heiko Carstens <hca@linux.ibm.com>
 M:     Vasily Gorbik <gor@linux.ibm.com>
 M:     Christian Borntraeger <borntraeger@de.ibm.com>
 L:     linux-s390@vger.kernel.org
@@ -14849,7 +14859,7 @@ F:      drivers/s390/block/dasd*
 F:     include/linux/dasd_mod.h
 
 S390 IOMMU (PCI)
-M:     Gerald Schaefer <gerald.schaefer@de.ibm.com>
+M:     Gerald Schaefer <gerald.schaefer@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
 W:     http://www.ibm.com/developerworks/linux/linux390/
@@ -14877,7 +14887,7 @@ F:      drivers/s390/net/
 
 S390 PCI SUBSYSTEM
 M:     Niklas Schnelle <schnelle@linux.ibm.com>
-M:     Gerald Schaefer <gerald.schaefer@de.ibm.com>
+M:     Gerald Schaefer <gerald.schaefer@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
 W:     http://www.ibm.com/developerworks/linux/linux390/
@@ -16045,8 +16055,10 @@ SPARSE CHECKER
 M:     "Luc Van Oostenryck" <luc.vanoostenryck@gmail.com>
 L:     linux-sparse@vger.kernel.org
 S:     Maintained
-W:     https://sparse.wiki.kernel.org/
+W:     https://sparse.docs.kernel.org/
 T:     git git://git.kernel.org/pub/scm/devel/sparse/sparse.git
+Q:     https://patchwork.kernel.org/project/linux-sparse/list/
+B:     https://bugzilla.kernel.org/enter_bug.cgi?component=Sparse&product=Tools
 F:     include/linux/compiler.h
 
 SPEAR CLOCK FRAMEWORK SUPPORT
@@ -16093,7 +16105,7 @@ SPIDERNET NETWORK DRIVER for CELL
 M:     Ishizaki Kou <kou.ishizaki@toshiba.co.jp>
 L:     netdev@vger.kernel.org
 S:     Supported
-F:     Documentation/networking/device_drivers/toshiba/spider_net.rst
+F:     Documentation/networking/device_drivers/ethernet/toshiba/spider_net.rst
 F:     drivers/net/ethernet/toshiba/spider_net*
 
 SPMI SUBSYSTEM
@@ -16251,13 +16263,6 @@ M:     Forest Bond <forest@alittletooquiet.net>
 S:     Odd Fixes
 F:     drivers/staging/vt665?/
 
-STAGING - WILC1000 WIFI DRIVER
-M:     Adham Abozaeid <adham.abozaeid@microchip.com>
-M:     Ajay Singh <ajay.kathat@microchip.com>
-L:     linux-wireless@vger.kernel.org
-S:     Supported
-F:     drivers/staging/wilc1000/
-
 STAGING SUBSYSTEM
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 L:     devel@driverdev.osuosl.org
@@ -16320,7 +16325,7 @@ M:      Jose Abreu <joabreu@synopsys.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 W:     http://www.stlinux.com
-F:     Documentation/networking/device_drivers/stmicro/
+F:     Documentation/networking/device_drivers/ethernet/stmicro/
 F:     drivers/net/ethernet/stmicro/stmmac/
 
 SUN3/3X
@@ -16759,7 +16764,7 @@ F:      include/media/i2c/tw9910.h
 
 TEE SUBSYSTEM
 M:     Jens Wiklander <jens.wiklander@linaro.org>
-L:     tee-dev@lists.linaro.org
+L:     op-tee@lists.trustedfirmware.org
 S:     Maintained
 F:     Documentation/tee.txt
 F:     drivers/tee/
@@ -17208,7 +17213,7 @@ M:      Samuel Chessman <chessman@tux.org>
 L:     tlan-devel@lists.sourceforge.net (subscribers-only)
 S:     Maintained
 W:     http://sourceforge.net/projects/tlan/
-F:     Documentation/networking/device_drivers/ti/tlan.rst
+F:     Documentation/networking/device_drivers/ethernet/ti/tlan.rst
 F:     drivers/net/ethernet/ti/tlan.*
 
 TM6000 VIDEO4LINUX DRIVER
@@ -18254,14 +18259,6 @@ S:     Maintained
 F:     drivers/input/serio/userio.c
 F:     include/uapi/linux/userio.h
 
-VITESSE FELIX ETHERNET SWITCH DRIVER
-M:     Vladimir Oltean <vladimir.oltean@nxp.com>
-M:     Claudiu Manoil <claudiu.manoil@nxp.com>
-L:     netdev@vger.kernel.org
-S:     Maintained
-F:     drivers/net/dsa/ocelot/*
-F:     net/dsa/tag_ocelot.c
-
 VIVID VIRTUAL VIDEO DRIVER
 M:     Hans Verkuil <hverkuil@xs4all.nl>
 L:     linux-media@vger.kernel.org
@@ -18908,7 +18905,7 @@ L:      linux-hams@vger.kernel.org
 S:     Maintained
 W:     http://yaina.de/jreuter/
 W:     http://www.qsl.net/dl1bke/
-F:     Documentation/networking/z8530drv.rst
+F:     Documentation/networking/device_drivers/hamradio/z8530drv.rst
 F:     drivers/net/hamradio/*scc.c
 F:     drivers/net/hamradio/z8530.h
 
index ae5d822..d653f59 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 8
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc4
 NAME = Kleptomaniac Octopus
 
 # *DOCUMENTATION*
@@ -448,6 +448,7 @@ OBJSIZE             = $(CROSS_COMPILE)size
 STRIP          = $(CROSS_COMPILE)strip
 endif
 PAHOLE         = pahole
+RESOLVE_BTFIDS = $(objtree)/tools/bpf/resolve_btfids/resolve_btfids
 LEX            = flex
 YACC           = bison
 AWK            = awk
@@ -510,7 +511,7 @@ GCC_PLUGINS_CFLAGS :=
 CLANG_FLAGS :=
 
 export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC
-export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE LEX YACC AWK INSTALLKERNEL
+export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE RESOLVE_BTFIDS LEX YACC AWK INSTALLKERNEL
 export PERL PYTHON PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
 export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ
 export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
@@ -828,7 +829,7 @@ endif
 
 ifdef CONFIG_DEBUG_INFO_COMPRESSED
 DEBUG_CFLAGS   += -gz=zlib
-KBUILD_AFLAGS  += -Wa,--compress-debug-sections=zlib
+KBUILD_AFLAGS  += -gz=zlib
 KBUILD_LDFLAGS += --compress-debug-sections=zlib
 endif
 
@@ -970,8 +971,8 @@ LDFLAGS_vmlinux     += --pack-dyn-relocs=relr
 endif
 
 # Align the bit size of userspace programs with the kernel
-KBUILD_USERCFLAGS  += $(filter -m32 -m64, $(KBUILD_CFLAGS))
-KBUILD_USERLDFLAGS += $(filter -m32 -m64, $(KBUILD_CFLAGS))
+KBUILD_USERCFLAGS  += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
+KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS))
 
 # make the checker run with the right architecture
 CHECKFLAGS += --arch=$(ARCH)
@@ -1053,9 +1054,10 @@ export mod_sign_cmd
 
 HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
 
+has_libelf = $(call try-run,\
+               echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0)
+
 ifdef CONFIG_STACK_VALIDATION
-  has_libelf := $(call try-run,\
-               echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0)
   ifeq ($(has_libelf),1)
     objtool_target := tools/objtool FORCE
   else
@@ -1064,6 +1066,14 @@ ifdef CONFIG_STACK_VALIDATION
   endif
 endif
 
+ifdef CONFIG_DEBUG_INFO_BTF
+  ifeq ($(has_libelf),1)
+    resolve_btfids_target := tools/bpf/resolve_btfids FORCE
+  else
+    ERROR_RESOLVE_BTFIDS := 1
+  endif
+endif
+
 PHONY += prepare0
 
 export MODORDER := $(extmod-prefix)modules.order
@@ -1175,7 +1185,7 @@ prepare0: archprepare
        $(Q)$(MAKE) $(build)=.
 
 # All the preparing..
-prepare: prepare0 prepare-objtool
+prepare: prepare0 prepare-objtool prepare-resolve_btfids
 
 # Support for using generic headers in asm-generic
 asm-generic := -f $(srctree)/scripts/Makefile.asm-generic obj
@@ -1188,7 +1198,7 @@ uapi-asm-generic:
        $(Q)$(MAKE) $(asm-generic)=arch/$(SRCARCH)/include/generated/uapi/asm \
        generic=include/uapi/asm-generic
 
-PHONY += prepare-objtool
+PHONY += prepare-objtool prepare-resolve_btfids
 prepare-objtool: $(objtool_target)
 ifeq ($(SKIP_STACK_VALIDATION),1)
 ifdef CONFIG_UNWINDER_ORC
@@ -1199,6 +1209,11 @@ else
 endif
 endif
 
+prepare-resolve_btfids: $(resolve_btfids_target)
+ifeq ($(ERROR_RESOLVE_BTFIDS),1)
+       @echo "error: Cannot resolve BTF IDs for CONFIG_DEBUG_INFO_BTF, please install libelf-dev, libelf-devel or elfutils-libelf-devel" >&2
+       @false
+endif
 # Generate some files
 # ---------------------------------------------------------------------------
 
@@ -1336,16 +1351,6 @@ dt_binding_check: scripts_dtc
 # ---------------------------------------------------------------------------
 # Modules
 
-# install modules.builtin regardless of CONFIG_MODULES
-PHONY += _builtin_inst_
-_builtin_inst_:
-       @mkdir -p $(MODLIB)/
-       @cp -f modules.builtin $(MODLIB)/
-       @cp -f $(objtree)/modules.builtin.modinfo $(MODLIB)/
-
-PHONY += install
-install: _builtin_inst_
-
 ifdef CONFIG_MODULES
 
 # By default, build modules as well
@@ -1389,7 +1394,7 @@ PHONY += modules_install
 modules_install: _modinst_ _modinst_post
 
 PHONY += _modinst_
-_modinst_: _builtin_inst_
+_modinst_:
        @rm -rf $(MODLIB)/kernel
        @rm -f $(MODLIB)/source
        @mkdir -p $(MODLIB)/kernel
@@ -1399,6 +1404,8 @@ _modinst_: _builtin_inst_
                ln -s $(CURDIR) $(MODLIB)/build ; \
        fi
        @sed 's:^:kernel/:' modules.order > $(MODLIB)/modules.order
+       @cp -f modules.builtin $(MODLIB)/
+       @cp -f $(objtree)/modules.builtin.modinfo $(MODLIB)/
        $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst
 
 # This depmod is only for convenience to give the initial
index fddc700..197896c 100644 (file)
@@ -170,6 +170,15 @@ config ARC_CPU_HS
 
 endchoice
 
+config ARC_TUNE_MCPU
+       string "Override default -mcpu compiler flag"
+       default ""
+       help
+         Override default -mcpu=xxx compiler flag (which is set depending on
+         the ISA version) with the specified value.
+         NOTE: If specified flag isn't supported by current compiler the
+         ISA default value will be used as a fallback.
+
 config CPU_BIG_ENDIAN
        bool "Enable Big Endian Mode"
        help
@@ -465,6 +474,12 @@ config ARC_IRQ_NO_AUTOSAVE
          This is programmable and can be optionally disabled in which case
          software INTERRUPT_PROLOGUE/EPILGUE do the needed work
 
+config ARC_LPB_DISABLE
+       bool "Disable loop buffer (LPB)"
+       help
+         On HS cores, loop buffer (LPB) is programmable in runtime and can
+         be optionally disabled.
+
 endif # ISA_ARCV2
 
 endmenu   # "ARC CPU Configuration"
index 20e9ab6..d00f8b8 100644 (file)
@@ -10,8 +10,25 @@ CROSS_COMPILE := $(call cc-cross-prefix, arc-linux- arceb-linux-)
 endif
 
 cflags-y       += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
-cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
-cflags-$(CONFIG_ISA_ARCV2)     += -mcpu=hs38
+
+tune-mcpu-def-$(CONFIG_ISA_ARCOMPACT)  := -mcpu=arc700
+tune-mcpu-def-$(CONFIG_ISA_ARCV2)      := -mcpu=hs38
+
+ifeq ($(CONFIG_ARC_TUNE_MCPU),"")
+cflags-y                               += $(tune-mcpu-def-y)
+else
+tune-mcpu                              := $(shell echo $(CONFIG_ARC_TUNE_MCPU))
+tune-mcpu-ok                           := $(call cc-option-yn, $(tune-mcpu))
+ifeq ($(tune-mcpu-ok),y)
+cflags-y                               += $(tune-mcpu)
+else
+# The flag provided by 'CONFIG_ARC_TUNE_MCPU' option isn't known by this compiler
+# (probably the compiler is too old). Use ISA default mcpu flag instead as a safe option.
+$(warning ** WARNING ** CONFIG_ARC_TUNE_MCPU flag '$(tune-mcpu)' is unknown, fallback to '$(tune-mcpu-def-y)')
+cflags-y                               += $(tune-mcpu-def-y)
+endif
+endif
+
 
 ifdef CONFIG_ARC_CURR_IN_REG
 # For a global register defintion, make sure it gets passed to every file
index c77a0e3..0284ace 100644 (file)
@@ -19,7 +19,7 @@
 #define  R_ARC_32_PCREL                0x31
 
 /*to set parameters in the core dumps */
-#define ELF_ARCH               EM_ARCOMPACT
+#define ELF_ARCH               EM_ARC_INUSE
 #define ELF_CLASS              ELFCLASS32
 
 #ifdef CONFIG_CPU_BIG_ENDIAN
index 7fc73fe..863d63a 100644 (file)
@@ -90,6 +90,9 @@ static inline void arch_local_irq_restore(unsigned long flags)
 /*
  * Unconditionally Enable IRQs
  */
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+extern void arch_local_irq_enable(void);
+#else
 static inline void arch_local_irq_enable(void)
 {
        unsigned long temp;
@@ -102,7 +105,7 @@ static inline void arch_local_irq_enable(void)
        : "n"((STATUS_E1_MASK | STATUS_E2_MASK))
        : "cc", "memory");
 }
-
+#endif
 
 /*
  * Unconditionally Disable IRQs
index 60406ec..ea00c8a 100644 (file)
@@ -165,7 +165,6 @@ END(EV_Extension)
 tracesys:
        ; save EFA in case tracer wants the PC of traced task
        ; using ERET won't work since next-PC has already committed
-       lr  r12, [efa]
        GET_CURR_TASK_FIELD_PTR   TASK_THREAD, r11
        st  r12, [r11, THREAD_FAULT_ADDR]       ; thread.fault_address
 
@@ -208,15 +207,9 @@ tracesys_exit:
 ; Breakpoint TRAP
 ; ---------------------------------------------
 trap_with_param:
-
-       ; stop_pc info by gdb needs this info
-       lr  r0, [efa]
+       mov r0, r12     ; EFA in case ptracer/gdb wants stop_pc
        mov r1, sp
 
-       ; Now that we have read EFA, it is safe to do "fake" rtie
-       ;   and get out of CPU exception mode
-       FAKE_RET_FROM_EXCPN
-
        ; Save callee regs in case gdb wants to have a look
        ; SP will grow up by size of CALLEE Reg-File
        ; NOTE: clobbers r12
@@ -243,6 +236,10 @@ ENTRY(EV_Trap)
 
        EXCEPTION_PROLOGUE
 
+       lr  r12, [efa]
+
+       FAKE_RET_FROM_EXCPN
+
        ;============ TRAP 1   :breakpoints
        ; Check ECR for trap with arg (PROLOGUE ensures r10 has ECR)
        bmsk.f 0, r10, 7
@@ -250,9 +247,6 @@ ENTRY(EV_Trap)
 
        ;============ TRAP  (no param): syscall top level
 
-       ; First return from Exception to pure K mode (Exception/IRQs renabled)
-       FAKE_RET_FROM_EXCPN
-
        ; If syscall tracing ongoing, invoke pre-post-hooks
        GET_CURR_THR_INFO_FLAGS   r10
        btst r10, TIF_SYSCALL_TRACE
index 6eb23f1..17fd1ed 100644 (file)
        bclr    r5, r5, STATUS_AD_BIT
 #endif
        kflag   r5
+
+#ifdef CONFIG_ARC_LPB_DISABLE
+       lr      r5, [ARC_REG_LPB_BUILD]
+       breq    r5, 0, 1f               ; LPB doesn't exist
+       mov     r5, 1
+       sr      r5, [ARC_REG_LPB_CTRL]
+1:
+#endif /* CONFIG_ARC_LPB_DISABLE */
 #endif
        ; Config DSP_CTRL properly, so kernel may use integer multiply,
        ; multiply-accumulate, and divide operations
index dad8a65..41f07b3 100644 (file)
@@ -58,10 +58,12 @@ static const struct id_to_str arc_legacy_rel[] = {
        { 0x00,         NULL   }
 };
 
-static const struct id_to_str arc_cpu_rel[] = {
+static const struct id_to_str arc_hs_ver54_rel[] = {
        /* UARCH.MAJOR, Release */
        {  0,           "R3.10a"},
        {  1,           "R3.50a"},
+       {  2,           "R3.60a"},
+       {  3,           "R4.00a"},
        {  0xFF,        NULL   }
 };
 
@@ -117,12 +119,6 @@ static void decode_arc_core(struct cpuinfo_arc *cpu)
        struct bcr_uarch_build_arcv2 uarch;
        const struct id_to_str *tbl;
 
-       /*
-        * Up until (including) the first core4 release (0x54) things were
-        * simple: AUX IDENTITY.ARCVER was sufficient to identify arc family
-        * and release: 0x50 to 0x53 was HS38, 0x54 was HS48 (dual issue)
-        */
-
        if (cpu->core.family < 0x54) { /* includes arc700 */
 
                for (tbl = &arc_legacy_rel[0]; tbl->id != 0; tbl++) {
@@ -143,11 +139,10 @@ static void decode_arc_core(struct cpuinfo_arc *cpu)
        }
 
        /*
-        * However the subsequent HS release (same 0x54) allow HS38 or HS48
-        * configurations and encode this info in a different BCR.
-        * The BCR was introduced in 0x54 so can't be read unconditionally.
+        * Initial HS cores bumped AUX IDENTITY.ARCVER for each release until
+        * ARCVER 0x54 which introduced AUX MICRO_ARCH_BUILD and subsequent
+        * releases only update it.
         */
-
        READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch);
 
        if (uarch.prod == 4) {
@@ -158,7 +153,7 @@ static void decode_arc_core(struct cpuinfo_arc *cpu)
                cpu->name = "HS38";
        }
 
-       for (tbl = &arc_cpu_rel[0]; tbl->id != 0xFF; tbl++) {
+       for (tbl = &arc_hs_ver54_rel[0]; tbl->id != 0xFF; tbl++) {
                if (uarch.maj == tbl->id) {
                        cpu->release = tbl->str;
                        break;
index 05e7b5d..04f0b12 100644 (file)
 &mmc2 {
        status = "okay";
        vmmc-supply = <&wl12xx_vmmc>;
-       ti,non-removable;
+       non-removable;
        bus-width = <4>;
        cap-power-off-card;
        pinctrl-names = "default";
index 91f93bc..dd93222 100644 (file)
@@ -22,6 +22,7 @@
        pinctrl-0 = <&emmc_pins>;
        bus-width = <8>;
        status = "okay";
+       non-removable;
 };
 
 &am33xx_pinmux {
index 3124d94..e07dd79 100644 (file)
@@ -75,7 +75,6 @@
        bus-width = <4>;
        non-removable;
        cap-power-off-card;
-       ti,needs-special-hs-handling;
        keep-power-in-suspend;
        pinctrl-names = "default";
        pinctrl-0 = <&mmc3_pins &wl18xx_pins>;
index 5811fb8..83f9452 100644 (file)
        bus-width = <4>;
        non-removable;
        cap-power-off-card;
-       ti,needs-special-hs-handling;
        keep-power-in-suspend;
        pinctrl-names = "default";
        pinctrl-0 = <&mmc3_pins &wl18xx_pins>;
index 4092cd1..609c8db 100644 (file)
@@ -75,7 +75,6 @@
        bus-width = <4>;
        non-removable;
        cap-power-off-card;
-       ti,needs-special-hs-handling;
        keep-power-in-suspend;
        pinctrl-names = "default";
        pinctrl-0 = <&mmc3_pins &wl18xx_pins>;
index 68252da..a4fc6b1 100644 (file)
        bus-width = <4>;
        pinctrl-names = "default";
        pinctrl-0 = <&mmc3_pins &wlan_pins>;
-       ti,non-removable;
-       ti,needs-special-hs-handling;
+       non-removable;
        cap-power-off-card;
        keep-power-in-suspend;
 
index 32f515a..78b6e1f 100644 (file)
 &mmc2 {
        status = "okay";
        vmmc-supply = <&wl12xx_vmmc>;
-       ti,non-removable;
+       non-removable;
        bus-width = <4>;
        cap-power-off-card;
        keep-power-in-suspend;
index fef5828..dbedf72 100644 (file)
        pinctrl-0 = <&emmc_pins>;
        vmmc-supply = <&vmmcsd_fixed>;
        bus-width = <8>;
-       ti,non-removable;
+       non-removable;
        status = "okay";
 };
 
index 6495a12..4e90f9c 100644 (file)
        vmmc-supply = <&vmmcsd_fixed>;
        bus-width = <8>;
        pinctrl-0 = <&mmc1_pins_default>;
-       ti,non-removable;
+       non-removable;
        status = "okay";
 };
 
index 244df9c..f03e72c 100644 (file)
        vmmc-supply = <&vmmcsd_fixed>;
        bus-width = <8>;
        pinctrl-0 = <&mmc2_pins_default>;
-       ti,non-removable;
+       non-removable;
        status = "okay";
 };
 
index 6d7608d..f9a027b 100644 (file)
        pinctrl-0 = <&emmc_pins>;
        vmmc-supply = <&ldo3_reg>;
        bus-width = <8>;
-       ti,non-removable;
+       non-removable;
 };
 
 &mmc3 {
        pinctrl-0 = <&wireless_pins>;
        vmmmc-supply = <&v3v3c_reg>;
        bus-width = <4>;
-       ti,non-removable;
+       non-removable;
        dmas = <&edma_xbar 12 0 1
                &edma_xbar 13 0 2>;
        dma-names = "tx", "rx";
index 3d0672b..7e46b4c 100644 (file)
@@ -69,7 +69,7 @@
        pinctrl-0 = <&emmc_pins>;
        vmmc-supply = <&vmmc_reg>;
        bus-width = <8>;
-       ti,non-removable;
+       non-removable;
        status = "disabled";
 };
 
index 4da7190..f0b2222 100644 (file)
@@ -88,7 +88,6 @@
                        AM33XX_PADCONF(AM335X_PIN_MMC0_DAT3, PIN_INPUT_PULLUP, MUX_MODE0)
                        AM33XX_PADCONF(AM335X_PIN_MMC0_CMD, PIN_INPUT_PULLUP, MUX_MODE0)
                        AM33XX_PADCONF(AM335X_PIN_MMC0_CLK, PIN_INPUT_PULLUP, MUX_MODE0)
-                       AM33XX_PADCONF(AM335X_PIN_MCASP0_ACLKR, PIN_INPUT, MUX_MODE4)           /* (B12) mcasp0_aclkr.mmc0_sdwp */
                >;
        };
 
index 7ff11d6..a9cbefc 100644 (file)
                        ranges = <0x0 0x60000 0x1000>;
 
                        mmc1: mmc@0 {
-                               compatible = "ti,omap4-hsmmc";
-                               ti,dual-volt;
+                               compatible = "ti,am335-sdhci";
                                ti,needs-special-reset;
-                               ti,needs-special-hs-handling;
                                dmas = <&edma_xbar 24 0 0
                                        &edma_xbar 25 0 0>;
                                dma-names = "tx", "rx";
                        ranges = <0x0 0xd8000 0x1000>;
 
                        mmc2: mmc@0 {
-                               compatible = "ti,omap4-hsmmc";
+                               compatible = "ti,am335-sdhci";
                                ti,needs-special-reset;
                                dmas = <&edma 2 0
                                        &edma 3 0>;
index 3b177c9..5fdce10 100644 (file)
                        ranges = <0x0 0x47810000 0x1000>;
 
                        mmc3: mmc@0 {
-                               compatible = "ti,omap4-hsmmc";
+                               compatible = "ti,am335-sdhci";
                                ti,needs-special-reset;
                                interrupts = <29>;
                                reg = <0x0 0x1000>;
+                               status = "disabled";
                        };
                };
 
                              <0x47400010 0x4>;
                        reg-names = "rev", "sysc";
                        ti,sysc-mask = <(SYSC_OMAP4_FREEEMU |
-                                        SYSC_OMAP2_SOFTRESET)>;
+                                        SYSC_OMAP4_SOFTRESET)>;
                        ti,sysc-midle = <SYSC_IDLE_FORCE>,
                                        <SYSC_IDLE_NO>,
                                        <SYSC_IDLE_SMART>;
                        clock-names = "fck";
                        #address-cells = <1>;
                        #size-cells = <1>;
-                       ranges = <0x0 0x47400000 0x5000>;
+                       ranges = <0x0 0x47400000 0x8000>;
 
                        usb0_phy: usb-phy@1300 {
                                compatible = "ti,am335x-usb-phy";
index b4861f7..51ad9e8 100644 (file)
                        ranges = <0x0 0x47810000 0x1000>;
 
                        mmc3: mmc@0 {
-                               compatible = "ti,omap4-hsmmc";
+                               compatible = "ti,am437-sdhci";
                                ti,needs-special-reset;
                                interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
                                reg = <0x0 0x1000>;
+                               status = "disabled";
                        };
                };
 
index 063113a..a6b4fca 100644 (file)
        pinctrl-0 = <&emmc_pins>;
        vmmc-supply = <&vmmc_3v3>;
        bus-width = <8>;
-       ti,non-removable;
+       non-removable;
 };
 
 &spi0 {
index d692e3b..7737863 100644 (file)
 
                backlight = <&lcd_bl>;
 
-               panel-timing {
-                       clock-frequency = <33000000>;
-                       hactive = <800>;
-                       vactive = <480>;
-                       hfront-porch = <210>;
-                       hback-porch = <16>;
-                       hsync-len = <30>;
-                       vback-porch = <10>;
-                       vfront-porch = <22>;
-                       vsync-len = <13>;
-                       hsync-active = <0>;
-                       vsync-active = <0>;
-                       de-active = <1>;
-                       pixelclk-active = <1>;
-               };
-
                port {
                        lcd_in: endpoint {
                                remote-endpoint = <&dpi_out>;
        pinctrl-names = "default", "sleep";
        pinctrl-0 = <&emmc_pins_default>;
        pinctrl-1 = <&emmc_pins_sleep>;
-       ti,non-removable;
+       non-removable;
 };
 
 &mmc3 {
        pinctrl-1 = <&mmc3_pins_sleep>;
        cap-power-off-card;
        keep-power-in-suspend;
-       ti,non-removable;
+       non-removable;
 
        #address-cells = <1>;
        #size-cells = <0>;
index 0d0f9fe..7d19395 100644 (file)
                        ranges = <0x0 0x60000 0x1000>;
 
                        mmc1: mmc@0 {
-                               compatible = "ti,omap4-hsmmc";
+                               compatible = "ti,am437-sdhci";
                                reg = <0x0 0x1000>;
-                               ti,dual-volt;
                                ti,needs-special-reset;
                                dmas = <&edma 24 0>,
                                        <&edma 25 0>;
                        ranges = <0x0 0xd8000 0x1000>;
 
                        mmc2: mmc@0 {
-                               compatible = "ti,omap4-hsmmc";
+                               compatible = "ti,am437-sdhci";
                                reg = <0x0 0x1000>;
                                ti,needs-special-reset;
                                dmas = <&edma 2 0>,
index 4d5a7ca..08eabf0 100644 (file)
 
                enable-gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
 
-               panel-timing {
-                       clock-frequency = <9000000>;
-                       hactive = <480>;
-                       vactive = <272>;
-                       hfront-porch = <2>;
-                       hback-porch = <2>;
-                       hsync-len = <41>;
-                       vfront-porch = <2>;
-                       vback-porch = <2>;
-                       vsync-len = <10>;
-                       hsync-active = <0>;
-                       vsync-active = <0>;
-                       de-active = <1>;
-                       pixelclk-active = <1>;
-               };
-
                port {
                        lcd_in: endpoint {
                                remote-endpoint = <&dpi_out>;
        pinctrl-1 = <&mmc3_pins_sleep>;
        cap-power-off-card;
        keep-power-in-suspend;
-       ti,non-removable;
+       non-removable;
 
        #address-cells = <1>;
        #size-cells = <0>;
index 27259fd..7d4e0df 100644 (file)
 
                backlight = <&lcd_bl>;
 
-               panel-timing {
-                       clock-frequency = <33000000>;
-                       hactive = <800>;
-                       vactive = <480>;
-                       hfront-porch = <210>;
-                       hback-porch = <16>;
-                       hsync-len = <30>;
-                       vback-porch = <10>;
-                       vfront-porch = <22>;
-                       vsync-len = <13>;
-                       hsync-active = <0>;
-                       vsync-active = <0>;
-                       de-active = <1>;
-                       pixelclk-active = <1>;
-               };
-
                port {
                        lcd_in: endpoint {
                                remote-endpoint = <&dpi_out>;
index 9877d77..4c51c6b 100644 (file)
 
 &cpsw_emac0 {
        phy-handle = <&phy0>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-rxid";
 };
 
 &ocp {
index da6d70f..3175266 100644 (file)
                        status = "disabled";
                };
 
-               dma@20000 {
+               dma: dma@20000 {
                        compatible = "arm,pl330", "arm,primecell";
                        reg = <0x20000 0x1000>;
                        interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
                        clocks = <&iprocslow>;
                        clock-names = "apb_pclk";
                        #dma-cells = <1>;
+                       dma-coherent;
+                       status = "disabled";
                };
 
                sdio: sdhci@21000 {
                        status = "disabled";
                };
 
-               mailbox: mailbox@25000 {
+               mailbox: mailbox@25c00 {
                        compatible = "brcm,iproc-fa2-mbox";
-                       reg = <0x25000 0x445>;
-                       interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
+                       reg = <0x25c00 0x400>;
+                       interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
                        #mbox-cells = <1>;
                        brcm,rx-status-len = <32>;
                        brcm,use-bcm-hdr;
index 3343253..29bbecd 100644 (file)
@@ -17,6 +17,7 @@
        };
 
        memory {
+               device_type = "memory";
                reg = <0x00000000 0x08000000
                       0x88000000 0x18000000>;
        };
index 8c388eb..7be4c4e 100644 (file)
 
 /* USB 3 support needed to be complete */
 
+&dma {
+       status = "okay";
+};
+
 &amac0 {
        status = "okay";
 };
index c339771..e58ed7e 100644 (file)
 
 /* USB 3 support needed to be complete */
 
+&dma {
+       status = "okay";
+};
+
 &amac0 {
        status = "okay";
 };
index 1c72ec8..716da62 100644 (file)
 
 /* XHCI support needed to be complete */
 
+&dma {
+       status = "okay";
+};
+
 &amac0 {
        status = "okay";
 };
index 96a021c..a49c2fd 100644 (file)
 
 /* USB 3 and SLIC support needed to be complete */
 
+&dma {
+       status = "okay";
+};
+
 &amac0 {
        status = "okay";
 };
index b2c7f21..dd6dff6 100644 (file)
 
 /* USB 3 and SLIC support needed to be complete */
 
+&dma {
+       status = "okay";
+};
+
 &amac0 {
        status = "okay";
 };
index 536fb24..a71371b 100644 (file)
        status = "okay";
 };
 
+&dma {
+       status = "okay";
+};
+
 &amac0 {
        status = "okay";
 };
index 3fcca12..7b84b54 100644 (file)
        };
 };
 
+&dma {
+       status = "okay";
+};
+
 &amac0 {
        status = "okay";
 };
index f89a64c..2cf6a52 100644 (file)
        rx-num-evt = <32>;
 };
 
-&mailbox5 {
-       status = "okay";
-       mbox_ipu1_ipc3x: mbox_ipu1_ipc3x {
-               status = "okay";
-       };
-       mbox_dsp1_ipc3x: mbox_dsp1_ipc3x {
-               status = "okay";
-       };
-};
-
-&mailbox6 {
-       status = "okay";
-       mbox_ipu2_ipc3x: mbox_ipu2_ipc3x {
-               status = "okay";
-       };
-       mbox_dsp2_ipc3x: mbox_dsp2_ipc3x {
-               status = "okay";
-       };
-};
-
 &pcie1_rc {
        status = "okay";
 };
index 62ca895..0c6f266 100644 (file)
                                        <SYSC_IDLE_SMART>,
                                        <SYSC_IDLE_SMART_WKUP>;
                        /* Domains (P, C): l4per_pwrdm, l4per_clkdm */
-                       clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 0>,
-                                <&timer_sys_clk_div>;
-                       clock-names = "fck", "timer_sys_ck";
+                       clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 0>;
+                       clock-names = "fck";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x0 0x36000 0x1000>;
                                        <SYSC_IDLE_SMART>,
                                        <SYSC_IDLE_SMART_WKUP>;
                        /* Domains (P, C): ipu_pwrdm, ipu_clkdm */
-                       clocks = <&ipu_clkctrl DRA7_IPU_TIMER5_CLKCTRL 0>, <&timer_sys_clk_div>;
-                       clock-names = "fck", "timer_sys_ck";
+                       clocks = <&ipu_clkctrl DRA7_IPU_TIMER5_CLKCTRL 0>;
+                       clock-names = "fck";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x0 0x20000 0x1000>;
                        timer5: timer@0 {
                                compatible = "ti,omap5430-timer";
                                reg = <0x0 0x80>;
-                               clocks = <&ipu_clkctrl DRA7_IPU_TIMER5_CLKCTRL 24>;
-                               clock-names = "fck";
+                               clocks = <&ipu_clkctrl DRA7_IPU_TIMER5_CLKCTRL 24>, <&timer_sys_clk_div>;
+                               clock-names = "fck", "timer_sys_ck";
                                interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
                        };
                };
                                        <SYSC_IDLE_SMART>,
                                        <SYSC_IDLE_SMART_WKUP>;
                        /* Domains (P, C): ipu_pwrdm, ipu_clkdm */
-                       clocks = <&ipu_clkctrl DRA7_IPU_TIMER6_CLKCTRL 0>,
-                                <&timer_sys_clk_div>;
-                       clock-names = "fck", "timer_sys_ck";
+                       clocks = <&ipu_clkctrl DRA7_IPU_TIMER6_CLKCTRL 0>;
+                       clock-names = "fck";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x0 0x22000 0x1000>;
                        timer6: timer@0 {
                                compatible = "ti,omap5430-timer";
                                reg = <0x0 0x80>;
-                               clocks = <&ipu_clkctrl DRA7_IPU_TIMER6_CLKCTRL 24>;
-                               clock-names = "fck";
+                               clocks = <&ipu_clkctrl DRA7_IPU_TIMER6_CLKCTRL 24>, <&timer_sys_clk_div>;
+                               clock-names = "fck", "timer_sys_ck";
                                interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
                        };
                };
                        timer14: timer@0 {
                                compatible = "ti,omap5430-timer";
                                reg = <0x0 0x80>;
-                               clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER14_CLKCTRL 24>;
-                               clock-names = "fck";
+                               clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER14_CLKCTRL 24>, <&timer_sys_clk_div>;
+                               clock-names = "fck", "timer_sys_ck";
                                interrupts = <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>;
                                ti,timer-pwm;
                        };
                        timer15: timer@0 {
                                compatible = "ti,omap5430-timer";
                                reg = <0x0 0x80>;
-                               clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER15_CLKCTRL 24>;
-                               clock-names = "fck";
+                               clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER15_CLKCTRL 24>, <&timer_sys_clk_div>;
+                               clock-names = "fck", "timer_sys_ck";
                                interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>;
                                ti,timer-pwm;
                        };
                        timer16: timer@0 {
                                compatible = "ti,omap5430-timer";
                                reg = <0x0 0x80>;
-                               clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER16_CLKCTRL 24>;
-                               clock-names = "fck";
+                               clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER16_CLKCTRL 24>, <&timer_sys_clk_div>;
+                               clock-names = "fck", "timer_sys_ck";
                                interrupts = <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>;
                                ti,timer-pwm;
                        };
index f05e918..53a25fb 100644 (file)
        status = "okay";
 };
 
-&wdog1 {
-       pinctrl-names = "default";
-       pinctrl-0 = <&pinctrl_wdog>;
-       fsl,ext-reset-output;
-       status = "okay";
-};
-
 &iomuxc {
        pinctrl-0 = <&pinctrl_reset_out &pinctrl_gpio>;
 
                        MX6UL_PAD_NAND_DATA03__USDHC2_DATA3     0x170f9
                >;
        };
-
-       pinctrl_wdog: wdoggrp {
-               fsl,pins = <
-                       MX6UL_PAD_GPIO1_IO09__WDOG1_WDOG_ANY    0x30b0
-               >;
-       };
 };
index a17af4d..61ba21a 100644 (file)
        status = "okay";
 };
 
+&wdog1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_wdog>;
+       fsl,ext-reset-output;
+       status = "okay";
+};
+
 &iomuxc {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_reset_out>;
                        MX6UL_PAD_SNVS_TAMPER9__GPIO5_IO09      0x1b0b0
                >;
        };
+
+       pinctrl_wdog: wdoggrp {
+               fsl,pins = <
+                       MX6UL_PAD_GPIO1_IO09__WDOG1_WDOG_ANY    0x18b0
+               >;
+       };
 };
index e39eee6..08a7d3c 100644 (file)
                #interrupt-cells = <2>;
                #address-cells = <1>;
                #size-cells = <0>;
-               spi-max-frequency = <3000000>;
+               spi-max-frequency = <9600000>;
                spi-cs-high;
+               spi-cpol;
+               spi-cpha;
 
                cpcap_adc: adc {
                        compatible = "motorola,mapphone-cpcap-adc";
index 8047e8c..4548d87 100644 (file)
        ethernet@gpmc {
                reg = <5 0 0xff>;
                interrupt-parent = <&gpio2>;
-               interrupts = <12 IRQ_TYPE_EDGE_FALLING>;        /* gpio_44 */
+               interrupts = <12 IRQ_TYPE_LEVEL_LOW>;           /* gpio_44 */
 
                phy-mode = "mii";
 
index 6c2b07f..4400f5f 100644 (file)
        ti,no-idle;
        timer@0 {
                assigned-clocks = <&l4_wkup_clkctrl OMAP4_TIMER1_CLKCTRL 24>;
-               assigned-clock-parents = <&sys_clkin_ck>;
+               assigned-clock-parents = <&sys_32k_ck>;
        };
 };
index e6308fb..a88ee52 100644 (file)
                };
        };
 
-       mcc {
-               compatible = "arm,vexpress,config-bus";
-               arm,vexpress,config-bridge = <&v2m_sysreg>;
-
-               oscclk0 {
-                       /* MCC static memory clock */
-                       compatible = "arm,vexpress-osc";
-                       arm,vexpress-sysreg,func = <1 0>;
-                       freq-range = <25000000 60000000>;
-                       #clock-cells = <0>;
-                       clock-output-names = "v2m:oscclk0";
-               };
-
-               v2m_oscclk1: oscclk1 {
-                       /* CLCD clock */
-                       compatible = "arm,vexpress-osc";
-                       arm,vexpress-sysreg,func = <1 1>;
-                       freq-range = <23750000 65000000>;
-                       #clock-cells = <0>;
-                       clock-output-names = "v2m:oscclk1";
-               };
-
-               v2m_oscclk2: oscclk2 {
-                       /* IO FPGA peripheral clock */
-                       compatible = "arm,vexpress-osc";
-                       arm,vexpress-sysreg,func = <1 2>;
-                       freq-range = <24000000 24000000>;
-                       #clock-cells = <0>;
-                       clock-output-names = "v2m:oscclk2";
-               };
-
-               volt-vio {
-                       /* Logic level voltage */
-                       compatible = "arm,vexpress-volt";
-                       arm,vexpress-sysreg,func = <2 0>;
-                       regulator-name = "VIO";
-                       regulator-always-on;
-                       label = "VIO";
-               };
-
-               temp-mcc {
-                       /* MCC internal operating temperature */
-                       compatible = "arm,vexpress-temp";
-                       arm,vexpress-sysreg,func = <4 0>;
-                       label = "MCC";
-               };
-
-               reset {
-                       compatible = "arm,vexpress-reset";
-                       arm,vexpress-sysreg,func = <5 0>;
-               };
-
-               muxfpga {
-                       compatible = "arm,vexpress-muxfpga";
-                       arm,vexpress-sysreg,func = <7 0>;
-               };
-
-               shutdown {
-                       compatible = "arm,vexpress-shutdown";
-                       arm,vexpress-sysreg,func = <8 0>;
-               };
-
-               reboot {
-                       compatible = "arm,vexpress-reboot";
-                       arm,vexpress-sysreg,func = <9 0>;
-               };
-
-               dvimode {
-                       compatible = "arm,vexpress-dvimode";
-                       arm,vexpress-sysreg,func = <11 0>;
-               };
-       };
-
        bus@8000000 {
                motherboard-bus {
                        model = "V2M-P1";
                                                };
                                        };
                                };
+
+                               mcc {
+                                       compatible = "arm,vexpress,config-bus";
+                                       arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+                                       oscclk0 {
+                                               /* MCC static memory clock */
+                                               compatible = "arm,vexpress-osc";
+                                               arm,vexpress-sysreg,func = <1 0>;
+                                               freq-range = <25000000 60000000>;
+                                               #clock-cells = <0>;
+                                               clock-output-names = "v2m:oscclk0";
+                                       };
+
+                                       v2m_oscclk1: oscclk1 {
+                                               /* CLCD clock */
+                                               compatible = "arm,vexpress-osc";
+                                               arm,vexpress-sysreg,func = <1 1>;
+                                               freq-range = <23750000 65000000>;
+                                               #clock-cells = <0>;
+                                               clock-output-names = "v2m:oscclk1";
+                                       };
+
+                                       v2m_oscclk2: oscclk2 {
+                                               /* IO FPGA peripheral clock */
+                                               compatible = "arm,vexpress-osc";
+                                               arm,vexpress-sysreg,func = <1 2>;
+                                               freq-range = <24000000 24000000>;
+                                               #clock-cells = <0>;
+                                               clock-output-names = "v2m:oscclk2";
+                                       };
+
+                                       volt-vio {
+                                               /* Logic level voltage */
+                                               compatible = "arm,vexpress-volt";
+                                               arm,vexpress-sysreg,func = <2 0>;
+                                               regulator-name = "VIO";
+                                               regulator-always-on;
+                                               label = "VIO";
+                                       };
+
+                                       temp-mcc {
+                                               /* MCC internal operating temperature */
+                                               compatible = "arm,vexpress-temp";
+                                               arm,vexpress-sysreg,func = <4 0>;
+                                               label = "MCC";
+                                       };
+
+                                       reset {
+                                               compatible = "arm,vexpress-reset";
+                                               arm,vexpress-sysreg,func = <5 0>;
+                                       };
+
+                                       muxfpga {
+                                               compatible = "arm,vexpress-muxfpga";
+                                               arm,vexpress-sysreg,func = <7 0>;
+                                       };
+
+                                       shutdown {
+                                               compatible = "arm,vexpress-shutdown";
+                                               arm,vexpress-sysreg,func = <8 0>;
+                                       };
+
+                                       reboot {
+                                               compatible = "arm,vexpress-reboot";
+                                               arm,vexpress-sysreg,func = <9 0>;
+                                       };
+
+                                       dvimode {
+                                               compatible = "arm,vexpress-dvimode";
+                                               arm,vexpress-sysreg,func = <11 0>;
+                                       };
+                               };
                        };
                };
        };
index 84dc0ba..5dcf3c6 100644 (file)
@@ -87,4 +87,11 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
        return dram_base + SZ_512M;
 }
 
+struct efi_arm_entry_state {
+       u32     cpsr_before_ebs;
+       u32     sctlr_before_ebs;
+       u32     cpsr_after_ebs;
+       u32     sctlr_after_ebs;
+};
+
 #endif /* _ASM_ARM_EFI_H */
index c036a4a..a1570c8 100644 (file)
 #if defined(__APCS_26__)
 #error Sorry, your compiler targets APCS-26 but this kernel requires APCS-32
 #endif
-/*
- * GCC 4.8.0-4.8.2: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58854
- *           miscompiles find_get_entry(), and can result in EXT3 and EXT4
- *           filesystem corruption (possibly other FS too).
- */
-#if defined(GCC_VERSION) && GCC_VERSION >= 40800 && GCC_VERSION < 40803
-#error Your compiler is too buggy; it is known to miscompile kernels
-#error and result in filesystem corruption and oopses.
-#endif
 
 int main(void)
 {
index 10499d4..9a79ef6 100644 (file)
@@ -84,7 +84,8 @@ static int ftrace_modify_code(unsigned long pc, unsigned long old,
                old = __opcode_to_mem_arm(old);
 
        if (validate) {
-               if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
+               if (copy_from_kernel_nofault(&replaced, (void *)pc,
+                               MCOUNT_INSN_SIZE))
                        return -EFAULT;
 
                if (replaced != old)
index 6a95b92..7bd30c0 100644 (file)
@@ -236,7 +236,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
        /* patch_text() only supports int-sized breakpoints */
        BUILD_BUG_ON(sizeof(int) != BREAK_INSTR_SIZE);
 
-       err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
+       err = copy_from_kernel_nofault(bpt->saved_instr, (char *)bpt->bpt_addr,
                                BREAK_INSTR_SIZE);
        if (err)
                return err;
index 65a3b1e..17d5a78 100644 (file)
@@ -396,7 +396,7 @@ int is_valid_bugaddr(unsigned long pc)
        u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE);
 #endif
 
-       if (probe_kernel_address((unsigned *)pc, bkpt))
+       if (get_kernel_nofault(bkpt, (void *)pc))
                return 0;
 
        return bkpt == insn;
index 6aa938b..1df0ee0 100644 (file)
@@ -53,6 +53,7 @@ config ARCH_BCM_NSP
        select ARM_ERRATA_754322
        select ARM_ERRATA_775420
        select ARM_ERRATA_764369 if SMP
+       select ARM_TIMER_SP804
        select THERMAL
        select THERMAL_OF
        help
index f057df8..e9962b4 100644 (file)
@@ -295,14 +295,14 @@ static int __init imx_suspend_alloc_ocram(
        if (!ocram_pool) {
                pr_warn("%s: ocram pool unavailable!\n", __func__);
                ret = -ENODEV;
-               goto put_node;
+               goto put_device;
        }
 
        ocram_base = gen_pool_alloc(ocram_pool, size);
        if (!ocram_base) {
                pr_warn("%s: unable to alloc ocram!\n", __func__);
                ret = -ENOMEM;
-               goto put_node;
+               goto put_device;
        }
 
        phys = gen_pool_virt_to_phys(ocram_pool, ocram_base);
@@ -312,6 +312,8 @@ static int __init imx_suspend_alloc_ocram(
        if (virt_out)
                *virt_out = virt;
 
+put_device:
+       put_device(&pdev->dev);
 put_node:
        of_node_put(node);
 
index dd34dff..40c74b4 100644 (file)
@@ -493,14 +493,14 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
        if (!ocram_pool) {
                pr_warn("%s: ocram pool unavailable!\n", __func__);
                ret = -ENODEV;
-               goto put_node;
+               goto put_device;
        }
 
        ocram_base = gen_pool_alloc(ocram_pool, MX6Q_SUSPEND_OCRAM_SIZE);
        if (!ocram_base) {
                pr_warn("%s: unable to alloc ocram!\n", __func__);
                ret = -ENOMEM;
-               goto put_node;
+               goto put_device;
        }
 
        ocram_pbase = gen_pool_virt_to_phys(ocram_pool, ocram_base);
@@ -523,7 +523,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
        ret = imx6_pm_get_base(&pm_info->mmdc_base, socdata->mmdc_compat);
        if (ret) {
                pr_warn("%s: failed to get mmdc base %d!\n", __func__, ret);
-               goto put_node;
+               goto put_device;
        }
 
        ret = imx6_pm_get_base(&pm_info->src_base, socdata->src_compat);
@@ -570,7 +570,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
                &imx6_suspend,
                MX6Q_SUSPEND_OCRAM_SIZE - sizeof(*pm_info));
 
-       goto put_node;
+       goto put_device;
 
 pl310_cache_map_failed:
        iounmap(pm_info->gpc_base.vbase);
@@ -580,6 +580,8 @@ iomuxc_map_failed:
        iounmap(pm_info->src_base.vbase);
 src_map_failed:
        iounmap(pm_info->mmdc_base.vbase);
+put_device:
+       put_device(&pdev->dev);
 put_node:
        of_node_put(node);
 
index 82706af..c630457 100644 (file)
@@ -3489,7 +3489,7 @@ static const struct omap_hwmod_reset dra7_reset_quirks[] = {
 };
 
 static const struct omap_hwmod_reset omap_reset_quirks[] = {
-       { .match = "dss", .len = 3, .reset = omap_dss_reset, },
+       { .match = "dss_core", .len = 8, .reset = omap_dss_reset, },
        { .match = "hdq1w", .len = 5, .reset = omap_hdq1w_reset, },
        { .match = "i2c", .len = 3, .reset = omap_i2c_reset, },
        { .match = "wd_timer", .len = 8, .reset = omap2_wd_timer_reset, },
index dcb9893..ffecbf2 100644 (file)
@@ -20,14 +20,6 @@ static const char *const stih41x_dt_match[] __initconst = {
        NULL
 };
 
-static void sti_l2_write_sec(unsigned long val, unsigned reg)
-{
-       /*
-        * We can't write to secure registers as we are in non-secure
-        * mode, until we have some SMI service available.
-        */
-}
-
 DT_MACHINE_START(STM, "STi SoC with Flattened Device Tree")
        .dt_compat      = stih41x_dt_match,
        .l2c_aux_val    = L2C_AUX_CTRL_SHARED_OVERRIDE |
@@ -36,5 +28,4 @@ DT_MACHINE_START(STM, "STi SoC with Flattened Device Tree")
                          L2C_AUX_CTRL_WAY_SIZE(4),
        .l2c_aux_mask   = 0xc0000fff,
        .smp            = smp_ops(sti_smp_ops),
-       .l2c_write_sec  = sti_l2_write_sec,
 MACHINE_END
index 84718ed..81a627e 100644 (file)
@@ -774,7 +774,7 @@ static int alignment_get_arm(struct pt_regs *regs, u32 *ip, u32 *inst)
        if (user_mode(regs))
                fault = get_user(instr, ip);
        else
-               fault = probe_kernel_address(ip, instr);
+               fault = get_kernel_nofault(instr, ip);
 
        *inst = __mem_to_opcode_arm(instr);
 
@@ -789,7 +789,7 @@ static int alignment_get_thumb(struct pt_regs *regs, u16 *ip, u16 *inst)
        if (user_mode(regs))
                fault = get_user(instr, ip);
        else
-               fault = probe_kernel_address(ip, instr);
+               fault = get_kernel_nofault(instr, ip);
 
        *inst = __mem_to_opcode_thumb16(instr);
 
index fd4e1ce..e93145d 100644 (file)
@@ -241,7 +241,6 @@ static int __init fdt_find_hyper_node(unsigned long node, const char *uname,
  * see Documentation/devicetree/bindings/arm/xen.txt for the
  * documentation of the Xen Device Tree format.
  */
-#define GRANT_TABLE_PHYSADDR 0
 void __init xen_early_init(void)
 {
        of_scan_flat_dt(fdt_find_hyper_node, NULL);
index 31380da..66dc41f 100644 (file)
@@ -1518,9 +1518,9 @@ config ARM64_PTR_AUTH
        default y
        depends on !KVM || ARM64_VHE
        depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
-       # GCC 9.1 and later inserts a .note.gnu.property section note for PAC
+       # Modern compilers insert a .note.gnu.property section note for PAC
        # which is only understood by binutils starting with version 2.33.1.
-       depends on !CC_IS_GCC || GCC_VERSION < 90100 || LD_VERSION >= 233010000
+       depends on LD_IS_LLD || LD_VERSION >= 233010000 || (CC_IS_GCC && GCC_VERSION < 90100)
        depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE
        depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
        help
@@ -1564,7 +1564,7 @@ config CC_HAS_SIGN_RETURN_ADDRESS
        def_bool $(cc-option,-msign-return-address=all)
 
 config AS_HAS_PAC
-       def_bool $(as-option,-Wa$(comma)-march=armv8.3-a)
+       def_bool $(cc-option,-Wa$(comma)-march=armv8.3-a)
 
 config AS_HAS_CFI_NEGATE_RA_STATE
        def_bool $(as-instr,.cfi_startproc\n.cfi_negate_ra_state\n.cfi_endproc\n)
@@ -1630,6 +1630,8 @@ config ARM64_BTI_KERNEL
        depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI
        # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697
        depends on !CC_IS_GCC || GCC_VERSION >= 100100
+       # https://reviews.llvm.org/rGb8ae3fdfa579dbf366b1bb1cbfdbf8c51db7fa55
+       depends on !CC_IS_CLANG || CLANG_VERSION >= 100001
        depends on !(CC_IS_CLANG && GCOV_KERNEL)
        depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
        help
index cdf7ec0..265c446 100644 (file)
@@ -8,21 +8,6 @@ config PID_IN_CONTEXTIDR
          instructions during context switch. Say Y here only if you are
          planning to use hardware trace tools with this kernel.
 
-config ARM64_RANDOMIZE_TEXT_OFFSET
-       bool "Randomize TEXT_OFFSET at build time"
-       help
-         Say Y here if you want the image load offset (AKA TEXT_OFFSET)
-         of the kernel to be randomized at build-time. When selected,
-         this option will cause TEXT_OFFSET to be randomized upon any
-         build of the kernel, and the offset will be reflected in the
-         text_offset field of the resulting Image. This can be used to
-         fuzz-test bootloaders which respect text_offset.
-
-         This option is intended for bootloader and/or kernel testing
-         only. Bootloaders must make no assumptions regarding the value
-         of TEXT_OFFSET and platforms must not require a specific
-         value.
-
 config DEBUG_EFI
        depends on EFI && DEBUG_INFO
        bool "UEFI debugging"
index 76359cf..a0d94d0 100644 (file)
@@ -121,13 +121,7 @@ endif
 head-y         := arch/arm64/kernel/head.o
 
 # The byte offset of the kernel image in RAM from the start of RAM.
-ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
-TEXT_OFFSET := $(shell awk "BEGIN {srand(); printf \"0x%06x\n\", \
-                int(2 * 1024 * 1024 / (2 ^ $(CONFIG_ARM64_PAGE_SHIFT)) * \
-                rand()) * (2 ^ $(CONFIG_ARM64_PAGE_SHIFT))}")
-else
 TEXT_OFFSET := 0x0
-endif
 
 ifeq ($(CONFIG_KASAN_SW_TAGS), y)
 KASAN_SHADOW_SCALE_SHIFT := 4
index fb0137a..94911b1 100644 (file)
 
                        ldo1_reg: LDO1 {
                                regulator-name = "LDO1";
-                               regulator-min-microvolt = <3000000>;
+                               regulator-min-microvolt = <1600000>;
                                regulator-max-microvolt = <3300000>;
                                regulator-boot-on;
                                regulator-always-on;
 
                        ldo2_reg: LDO2 {
                                regulator-name = "LDO2";
-                               regulator-min-microvolt = <900000>;
+                               regulator-min-microvolt = <800000>;
                                regulator-max-microvolt = <900000>;
                                regulator-boot-on;
                                regulator-always-on;
index e5ec832..0f1d7f8 100644 (file)
 
                        ldo1_reg: LDO1 {
                                regulator-name = "LDO1";
-                               regulator-min-microvolt = <3000000>;
+                               regulator-min-microvolt = <1600000>;
                                regulator-max-microvolt = <3300000>;
                                regulator-boot-on;
                                regulator-always-on;
 
                        ldo2_reg: LDO2 {
                                regulator-name = "LDO2";
-                               regulator-min-microvolt = <900000>;
+                               regulator-min-microvolt = <800000>;
                                regulator-max-microvolt = <900000>;
                                regulator-boot-on;
                                regulator-always-on;
index d07e0e6..a1e5483 100644 (file)
 
                        ldo1_reg: LDO1 {
                                regulator-name = "LDO1";
-                               regulator-min-microvolt = <3000000>;
+                               regulator-min-microvolt = <1600000>;
                                regulator-max-microvolt = <3300000>;
                                regulator-boot-on;
                                regulator-always-on;
 
                        ldo2_reg: LDO2 {
                                regulator-name = "LDO2";
-                               regulator-min-microvolt = <900000>;
+                               regulator-min-microvolt = <800000>;
                                regulator-max-microvolt = <900000>;
                                regulator-boot-on;
                                regulator-always-on;
index 5e5dc05..12f0eb5 100644 (file)
@@ -73,11 +73,11 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
        ".pushsection .altinstructions,\"a\"\n"                         \
        ALTINSTR_ENTRY(feature)                                         \
        ".popsection\n"                                                 \
-       ".pushsection .altinstr_replacement, \"a\"\n"                   \
+       ".subsection 1\n"                                               \
        "663:\n\t"                                                      \
        newinstr "\n"                                                   \
        "664:\n\t"                                                      \
-       ".popsection\n\t"                                               \
+       ".previous\n\t"                                                 \
        ".org   . - (664b-663b) + (662b-661b)\n\t"                      \
        ".org   . - (662b-661b) + (664b-663b)\n"                        \
        ".endif\n"
@@ -117,9 +117,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
 662:   .pushsection .altinstructions, "a"
        altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
        .popsection
-       .pushsection .altinstr_replacement, "ax"
+       .subsection 1
 663:   \insn2
-664:   .popsection
+664:   .previous
        .org    . - (664b-663b) + (662b-661b)
        .org    . - (662b-661b) + (664b-663b)
        .endif
@@ -160,7 +160,7 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
        .pushsection .altinstructions, "a"
        altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f
        .popsection
-       .pushsection .altinstr_replacement, "ax"
+       .subsection 1
        .align 2        /* So GAS knows label 661 is suitably aligned */
 661:
 .endm
@@ -179,9 +179,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
 .macro alternative_else
 662:
        .if .Lasm_alt_mode==0
-       .pushsection .altinstr_replacement, "ax"
+       .subsection 1
        .else
-       .popsection
+       .previous
        .endif
 663:
 .endm
@@ -192,7 +192,7 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
 .macro alternative_endif
 664:
        .if .Lasm_alt_mode==0
-       .popsection
+       .previous
        .endif
        .org    . - (664b-663b) + (662b-661b)
        .org    . - (662b-661b) + (664b-663b)
index a358e97..6647ae4 100644 (file)
@@ -109,7 +109,7 @@ static inline u32 gic_read_pmr(void)
        return read_sysreg_s(SYS_ICC_PMR_EL1);
 }
 
-static inline void gic_write_pmr(u32 val)
+static __always_inline void gic_write_pmr(u32 val)
 {
        write_sysreg_s(val, SYS_ICC_PMR_EL1);
 }
index 7ae54d7..9f0ec21 100644 (file)
@@ -58,6 +58,7 @@ struct arch_timer_erratum_workaround {
        u64 (*read_cntvct_el0)(void);
        int (*set_next_event_phys)(unsigned long, struct clock_event_device *);
        int (*set_next_event_virt)(unsigned long, struct clock_event_device *);
+       bool disable_compat_vdso;
 };
 
 DECLARE_PER_CPU(const struct arch_timer_erratum_workaround *,
index 5d1f4ae..f7c3d1f 100644 (file)
@@ -675,7 +675,7 @@ static inline bool system_supports_generic_auth(void)
                cpus_have_const_cap(ARM64_HAS_GENERIC_AUTH);
 }
 
-static inline bool system_uses_irq_prio_masking(void)
+static __always_inline bool system_uses_irq_prio_masking(void)
 {
        return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
               cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING);
index a87a93f..7219cdd 100644 (file)
@@ -86,6 +86,7 @@
 #define QCOM_CPU_PART_FALKOR           0xC00
 #define QCOM_CPU_PART_KRYO             0x200
 #define QCOM_CPU_PART_KRYO_3XX_SILVER  0x803
+#define QCOM_CPU_PART_KRYO_4XX_GOLD    0x804
 #define QCOM_CPU_PART_KRYO_4XX_SILVER  0x805
 
 #define NVIDIA_CPU_PART_DENVER         0x003
 #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
 #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
 #define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
+#define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD)
 #define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)
 #define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
 #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
index 81fefd2..ba89a9a 100644 (file)
@@ -12,7 +12,6 @@
  * instead.
  */
 #define BTI_C hint 34 ;
-#define BTI_J hint 36 ;
 
 /*
  * When using in-kernel BTI we need to ensure that PCS-conformant assembly
        SYM_START(name, SYM_L_WEAK, SYM_A_NONE)         \
        BTI_C
 
-#define SYM_INNER_LABEL(name, linkage)                 \
-       .type name SYM_T_NONE ASM_NL                    \
-       SYM_ENTRY(name, linkage, SYM_A_NONE)            \
-       BTI_J
-
 #endif
 
 /*
index 68140fd..8444df0 100644 (file)
@@ -19,6 +19,9 @@
 
 typedef struct {
        atomic64_t      id;
+#ifdef CONFIG_COMPAT
+       void            *sigpage;
+#endif
        void            *vdso;
        unsigned long   flags;
 } mm_context_t;
index 2e7e0f4..4d867c6 100644 (file)
@@ -67,7 +67,7 @@ extern bool arm64_use_ng_mappings;
 #define PAGE_HYP               __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
 #define PAGE_HYP_EXEC          __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
 #define PAGE_HYP_RO            __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
-#define PAGE_HYP_DEVICE                __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
+#define PAGE_HYP_DEVICE                __pgprot(_PROT_DEFAULT | PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_HYP | PTE_HYP_XN)
 
 #define PAGE_S2_MEMATTR(attr)                                          \
        ({                                                              \
index 6dbd267..758e2d1 100644 (file)
@@ -416,7 +416,7 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
        __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
 
 #define pgprot_nx(prot) \
-       __pgprot_modify(prot, 0, PTE_PXN)
+       __pgprot_modify(prot, PTE_MAYBE_GP, PTE_PXN)
 
 /*
  * Mark the prot value as uncacheable and unbufferable.
index 6d95d0c..166e369 100644 (file)
@@ -599,9 +599,9 @@ __SYSCALL(__NR_recvfrom, compat_sys_recvfrom)
 #define __NR_shutdown 293
 __SYSCALL(__NR_shutdown, sys_shutdown)
 #define __NR_setsockopt 294
-__SYSCALL(__NR_setsockopt, compat_sys_setsockopt)
+__SYSCALL(__NR_setsockopt, sys_setsockopt)
 #define __NR_getsockopt 295
-__SYSCALL(__NR_getsockopt, compat_sys_getsockopt)
+__SYSCALL(__NR_getsockopt, sys_getsockopt)
 #define __NR_sendmsg 296
 __SYSCALL(__NR_sendmsg, compat_sys_sendmsg)
 #define __NR_recvmsg 297
index df6ea65..b054d9f 100644 (file)
@@ -2,7 +2,10 @@
 #ifndef __ASM_VDSOCLOCKSOURCE_H
 #define __ASM_VDSOCLOCKSOURCE_H
 
-#define VDSO_ARCH_CLOCKMODES   \
-       VDSO_CLOCKMODE_ARCHTIMER
+#define VDSO_ARCH_CLOCKMODES                                   \
+       /* vdso clocksource for both 32 and 64bit tasks */      \
+       VDSO_CLOCKMODE_ARCHTIMER,                               \
+       /* vdso clocksource for 64bit tasks only */             \
+       VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT
 
 #endif
index b6907ae..9a625e8 100644 (file)
@@ -111,7 +111,7 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
         * update. Return something. Core will do another round and then
         * see the mode change and fallback to the syscall.
         */
-       if (clock_mode == VDSO_CLOCKMODE_NONE)
+       if (clock_mode != VDSO_CLOCKMODE_ARCHTIMER)
                return 0;
 
        /*
@@ -152,6 +152,12 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
        return ret;
 }
 
+static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
+{
+       return vd->clock_mode == VDSO_CLOCKMODE_ARCHTIMER;
+}
+#define vdso_clocksource_ok    vdso_clocksource_ok
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
index 151f285..a561cbb 100644 (file)
@@ -29,9 +29,7 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE
 
 obj-$(CONFIG_COMPAT)                   += sys32.o signal32.o                   \
                                           sys_compat.o
-ifneq ($(CONFIG_COMPAT_VDSO), y)
 obj-$(CONFIG_COMPAT)                   += sigreturn32.o
-endif
 obj-$(CONFIG_KUSER_HELPERS)            += kuser32.o
 obj-$(CONFIG_FUNCTION_TRACER)          += ftrace.o entry-ftrace.o
 obj-$(CONFIG_MODULES)                  += module.o
index d1757ef..7303994 100644 (file)
@@ -43,20 +43,8 @@ bool alternative_is_applied(u16 cpufeature)
  */
 static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
 {
-       unsigned long replptr;
-
-       if (kernel_text_address(pc))
-               return true;
-
-       replptr = (unsigned long)ALT_REPL_PTR(alt);
-       if (pc >= replptr && pc <= (replptr + alt->alt_len))
-               return false;
-
-       /*
-        * Branching into *another* alternate sequence is doomed, and
-        * we're not even trying to fix it up.
-        */
-       BUG();
+       unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt);
+       return !(pc >= replptr && pc <= (replptr + alt->alt_len));
 }
 
 #define align_down(x, a)       ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
index ad06d68..79728bf 100644 (file)
@@ -460,6 +460,8 @@ static const struct midr_range arm64_ssb_cpus[] = {
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
        MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
+       MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
+       MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
        {},
 };
 
@@ -470,12 +472,7 @@ static bool
 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
                               int scope)
 {
-       u32 midr = read_cpuid_id();
-       /* Cortex-A76 r0p0 - r3p1 */
-       struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
-
-       WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
-       return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
+       return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode();
 }
 #endif
 
@@ -726,6 +723,8 @@ static const struct midr_range erratum_1418040_list[] = {
        MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
        /* Neoverse-N1 r0p0 to r3p1 */
        MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
+       /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
+       MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
        {},
 };
 #endif
@@ -770,11 +769,23 @@ static const struct midr_range erratum_speculative_at_list[] = {
 #ifdef CONFIG_ARM64_ERRATUM_1530923
        /* Cortex A55 r0p0 to r2p0 */
        MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
+       /* Kryo4xx Silver (rdpe => r1p0) */
+       MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
 #endif
        {},
 };
 #endif
 
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+static const struct midr_range erratum_1463225[] = {
+       /* Cortex-A76 r0p0 - r3p1 */
+       MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
+       /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
+       MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
+       {},
+};
+#endif
+
 const struct arm64_cpu_capabilities arm64_errata[] = {
 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
        {
@@ -914,6 +925,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                .capability = ARM64_WORKAROUND_1463225,
                .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
                .matches = has_cortex_a76_erratum_1463225,
+               .midr_range_list = erratum_1463225,
        },
 #endif
 #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
index 4ae4167..9fae0ef 100644 (file)
@@ -1290,6 +1290,8 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
                MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
                MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
                MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
+               MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
+               MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
                { /* sentinel */ }
        };
        char const *str = "kpti command line option";
@@ -1406,6 +1408,8 @@ static bool cpu_has_broken_dbm(void)
        static const struct midr_range cpus[] = {
 #ifdef CONFIG_ARM64_ERRATUM_1024718
                MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0),  // A55 r0p0 -r1p0
+               /* Kryo4xx Silver (rdpe => r1p0) */
+               MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
 #endif
                {},
        };
index 3dbdf97..d3be9db 100644 (file)
@@ -57,7 +57,7 @@ static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr)
        /*
         * The CPU masked interrupts, and we are leaving them masked during
         * do_debug_exception(). Update PMR as if we had called
-        * local_mask_daif().
+        * local_daif_mask().
         */
        if (system_uses_irq_prio_masking())
                gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
index 5304d19..35de8ba 100644 (file)
@@ -126,8 +126,10 @@ alternative_else_nop_endif
        add     \dst, \dst, #(\sym - .entry.tramp.text)
        .endm
 
-       // This macro corrupts x0-x3. It is the caller's duty
-       // to save/restore them if required.
+       /*
+        * This macro corrupts x0-x3. It is the caller's duty  to save/restore
+        * them if required.
+        */
        .macro  apply_ssbd, state, tmp1, tmp2
 #ifdef CONFIG_ARM64_SSBD
 alternative_cb arm64_enable_wa2_handling
@@ -167,13 +169,28 @@ alternative_cb_end
        stp     x28, x29, [sp, #16 * 14]
 
        .if     \el == 0
+       .if     \regsize == 32
+       /*
+        * If we're returning from a 32-bit task on a system affected by
+        * 1418040 then re-enable userspace access to the virtual counter.
+        */
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+alternative_if ARM64_WORKAROUND_1418040
+       mrs     x0, cntkctl_el1
+       orr     x0, x0, #2      // ARCH_TIMER_USR_VCT_ACCESS_EN
+       msr     cntkctl_el1, x0
+alternative_else_nop_endif
+#endif
+       .endif
        clear_gp_regs
        mrs     x21, sp_el0
        ldr_this_cpu    tsk, __entry_task, x20
        msr     sp_el0, tsk
 
-       // Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
-       // when scheduling.
+       /*
+        * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
+        * when scheduling.
+        */
        ldr     x19, [tsk, #TSK_TI_FLAGS]
        disable_step_tsk x19, x20
 
@@ -320,6 +337,14 @@ alternative_else_nop_endif
        tst     x22, #PSR_MODE32_BIT            // native task?
        b.eq    3f
 
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+alternative_if ARM64_WORKAROUND_1418040
+       mrs     x0, cntkctl_el1
+       bic     x0, x0, #2                      // ARCH_TIMER_USR_VCT_ACCESS_EN
+       msr     cntkctl_el1, x0
+alternative_else_nop_endif
+#endif
+
 #ifdef CONFIG_ARM64_ERRATUM_845719
 alternative_if ARM64_WORKAROUND_845719
 #ifdef CONFIG_PID_IN_CONTEXTIDR
@@ -331,21 +356,6 @@ alternative_if ARM64_WORKAROUND_845719
 alternative_else_nop_endif
 #endif
 3:
-#ifdef CONFIG_ARM64_ERRATUM_1418040
-alternative_if_not ARM64_WORKAROUND_1418040
-       b       4f
-alternative_else_nop_endif
-       /*
-        * if (x22.mode32 == cntkctl_el1.el0vcten)
-        *     cntkctl_el1.el0vcten = ~cntkctl_el1.el0vcten
-        */
-       mrs     x1, cntkctl_el1
-       eon     x0, x1, x22, lsr #3
-       tbz     x0, #1, 4f
-       eor     x1, x1, #2      // ARCH_TIMER_USR_VCT_ACCESS_EN
-       msr     cntkctl_el1, x1
-4:
-#endif
        scs_save tsk, x0
 
        /* No kernel C function calls after this as user keys are set. */
@@ -377,11 +387,11 @@ alternative_else_nop_endif
        .if     \el == 0
 alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-       bne     5f
+       bne     4f
        msr     far_el1, x30
        tramp_alias     x30, tramp_exit_native
        br      x30
-5:
+4:
        tramp_alias     x30, tramp_exit_compat
        br      x30
 #endif
index 35cb5e6..55c8f3e 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/bug.h>
 #include <linux/cache.h>
 #include <linux/compat.h>
+#include <linux/compiler.h>
 #include <linux/cpu.h>
 #include <linux/cpu_pm.h>
 #include <linux/kernel.h>
@@ -119,10 +120,20 @@ struct fpsimd_last_state_struct {
 static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
 
 /* Default VL for tasks that don't set it explicitly: */
-static int sve_default_vl = -1;
+static int __sve_default_vl = -1;
+
+static int get_sve_default_vl(void)
+{
+       return READ_ONCE(__sve_default_vl);
+}
 
 #ifdef CONFIG_ARM64_SVE
 
+static void set_sve_default_vl(int val)
+{
+       WRITE_ONCE(__sve_default_vl, val);
+}
+
 /* Maximum supported vector length across all CPUs (initially poisoned) */
 int __ro_after_init sve_max_vl = SVE_VL_MIN;
 int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN;
@@ -338,13 +349,13 @@ static unsigned int find_supported_vector_length(unsigned int vl)
        return sve_vl_from_vq(__bit_to_vq(bit));
 }
 
-#ifdef CONFIG_SYSCTL
+#if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL)
 
 static int sve_proc_do_default_vl(struct ctl_table *table, int write,
                                  void *buffer, size_t *lenp, loff_t *ppos)
 {
        int ret;
-       int vl = sve_default_vl;
+       int vl = get_sve_default_vl();
        struct ctl_table tmp_table = {
                .data = &vl,
                .maxlen = sizeof(vl),
@@ -361,7 +372,7 @@ static int sve_proc_do_default_vl(struct ctl_table *table, int write,
        if (!sve_vl_valid(vl))
                return -EINVAL;
 
-       sve_default_vl = find_supported_vector_length(vl);
+       set_sve_default_vl(find_supported_vector_length(vl));
        return 0;
 }
 
@@ -383,9 +394,9 @@ static int __init sve_sysctl_init(void)
        return 0;
 }
 
-#else /* ! CONFIG_SYSCTL */
+#else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
 static int __init sve_sysctl_init(void) { return 0; }
-#endif /* ! CONFIG_SYSCTL */
+#endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
 
 #define ZREG(sve_state, vq, n) ((char *)(sve_state) +          \
        (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
@@ -868,7 +879,7 @@ void __init sve_setup(void)
         * For the default VL, pick the maximum supported value <= 64.
         * VL == 64 is guaranteed not to grow the signal frame.
         */
-       sve_default_vl = find_supported_vector_length(64);
+       set_sve_default_vl(find_supported_vector_length(64));
 
        bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map,
                      SVE_VQ_MAX);
@@ -889,7 +900,7 @@ void __init sve_setup(void)
        pr_info("SVE: maximum available vector length %u bytes per vector\n",
                sve_max_vl);
        pr_info("SVE: default vector length %u bytes per vector\n",
-               sve_default_vl);
+               get_sve_default_vl());
 
        /* KVM decides whether to support mismatched systems. Just warn here: */
        if (sve_max_virtualisable_vl < sve_max_vl)
@@ -1029,13 +1040,13 @@ void fpsimd_flush_thread(void)
                 * vector length configured: no kernel task can become a user
                 * task without an exec and hence a call to this function.
                 * By the time the first call to this function is made, all
-                * early hardware probing is complete, so sve_default_vl
+                * early hardware probing is complete, so __sve_default_vl
                 * should be valid.
                 * If a bug causes this to go wrong, we make some noise and
                 * try to fudge thread.sve_vl to a safe value here.
                 */
                vl = current->thread.sve_vl_onexec ?
-                       current->thread.sve_vl_onexec : sve_default_vl;
+                       current->thread.sve_vl_onexec : get_sve_default_vl();
 
                if (WARN_ON(!sve_vl_valid(vl)))
                        vl = SVE_VL_MIN;
index 0b727ed..af234a1 100644 (file)
@@ -730,6 +730,27 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val,
                return 0;
 }
 
+static int watchpoint_report(struct perf_event *wp, unsigned long addr,
+                            struct pt_regs *regs)
+{
+       int step = is_default_overflow_handler(wp);
+       struct arch_hw_breakpoint *info = counter_arch_bp(wp);
+
+       info->trigger = addr;
+
+       /*
+        * If we triggered a user watchpoint from a uaccess routine, then
+        * handle the stepping ourselves since userspace really can't help
+        * us with this.
+        */
+       if (!user_mode(regs) && info->ctrl.privilege == AARCH64_BREAKPOINT_EL0)
+               step = 1;
+       else
+               perf_bp_event(wp, regs);
+
+       return step;
+}
+
 static int watchpoint_handler(unsigned long addr, unsigned int esr,
                              struct pt_regs *regs)
 {
@@ -739,7 +760,6 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
        u64 val;
        struct perf_event *wp, **slots;
        struct debug_info *debug_info;
-       struct arch_hw_breakpoint *info;
        struct arch_hw_breakpoint_ctrl ctrl;
 
        slots = this_cpu_ptr(wp_on_reg);
@@ -777,25 +797,13 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
                if (dist != 0)
                        continue;
 
-               info = counter_arch_bp(wp);
-               info->trigger = addr;
-               perf_bp_event(wp, regs);
-
-               /* Do we need to handle the stepping? */
-               if (is_default_overflow_handler(wp))
-                       step = 1;
+               step = watchpoint_report(wp, addr, regs);
        }
-       if (min_dist > 0 && min_dist != -1) {
-               /* No exact match found. */
-               wp = slots[closest_match];
-               info = counter_arch_bp(wp);
-               info->trigger = addr;
-               perf_bp_event(wp, regs);
 
-               /* Do we need to handle the stepping? */
-               if (is_default_overflow_handler(wp))
-                       step = 1;
-       }
+       /* No exact match found? */
+       if (min_dist > 0 && min_dist != -1)
+               step = watchpoint_report(slots[closest_match], addr, regs);
+
        rcu_read_unlock();
 
        if (!step)
index 684d871..a107375 100644 (file)
@@ -135,7 +135,7 @@ int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
        int ret;
        __le32 val;
 
-       ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
+       ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
        if (!ret)
                *insnp = le32_to_cpu(val);
 
@@ -151,7 +151,7 @@ static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
        raw_spin_lock_irqsave(&patch_lock, flags);
        waddr = patch_map(addr, FIX_TEXT_POKE0);
 
-       ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
+       ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE);
 
        patch_unmap(FIX_TEXT_POKE0);
        raw_spin_unlock_irqrestore(&patch_lock, flags);
index 4311992..1a157ca 100644 (file)
@@ -252,7 +252,7 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
        if (!kgdb_single_step)
                return DBG_HOOK_ERROR;
 
-       kgdb_handle_exception(1, SIGTRAP, 0, regs);
+       kgdb_handle_exception(0, SIGTRAP, 0, regs);
        return DBG_HOOK_HANDLED;
 }
 NOKPROBE_SYMBOL(kgdb_step_brk_fn);
index 522e6f5..361a114 100644 (file)
@@ -219,8 +219,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
                                        MEMBLOCK_NONE, &start, &end, NULL)
                nr_ranges++;
 
-       cmem = kmalloc(sizeof(struct crash_mem) +
-                       sizeof(struct crash_mem_range) * nr_ranges, GFP_KERNEL);
+       cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL);
        if (!cmem)
                return -ENOMEM;
 
index 0bbac61..666b225 100644 (file)
@@ -15,15 +15,34 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
                return 0;
 
        /*
-        * Compat (i.e. 32 bit) mode:
-        * - PC has been set in the pt_regs struct in kernel_entry,
-        * - Handle SP and LR here.
+        * Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but
+        * we're stuck with it for ABI compatability reasons.
+        *
+        * For a 32-bit consumer inspecting a 32-bit task, then it will look at
+        * the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h).
+        * These correspond directly to a prefix of the registers saved in our
+        * 'struct pt_regs', with the exception of the PC, so we copy that down
+        * (x15 corresponds to SP_hyp in the architecture).
+        *
+        * So far, so good.
+        *
+        * The oddity arises when a 64-bit consumer looks at a 32-bit task and
+        * asks for registers beyond PERF_REG_ARM_MAX. In this case, we return
+        * SP_usr, LR_usr and PC in the positions where the AArch64 SP, LR and
+        * PC registers would normally live. The initial idea was to allow a
+        * 64-bit unwinder to unwind a 32-bit task and, although it's not clear
+        * how well that works in practice, somebody might be relying on it.
+        *
+        * At the time we make a sample, we don't know whether the consumer is
+        * 32-bit or 64-bit, so we have to cater for both possibilities.
         */
        if (compat_user_mode(regs)) {
                if ((u32)idx == PERF_REG_ARM64_SP)
                        return regs->compat_sp;
                if ((u32)idx == PERF_REG_ARM64_LR)
                        return regs->compat_lr;
+               if (idx == 15)
+                       return regs->pc;
        }
 
        if ((u32)idx == PERF_REG_ARM64_SP)
index d1c95dc..5290f17 100644 (file)
@@ -120,15 +120,9 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
 
 void *alloc_insn_page(void)
 {
-       void *page;
-
-       page = vmalloc_exec(PAGE_SIZE);
-       if (page) {
-               set_memory_ro((unsigned long)page, 1);
-               set_vm_flush_reset_perms(page);
-       }
-
-       return page;
+       return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
+                       GFP_KERNEL, PAGE_KERNEL_ROX, VM_FLUSH_RESET_PERMS,
+                       NUMA_NO_NODE, __builtin_return_address(0));
 }
 
 /* arm kprobe: install breakpoint in text */
index 82feca6..2f507f5 100644 (file)
@@ -342,38 +342,13 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
                retcode = ptr_to_compat(ka->sa.sa_restorer);
        } else {
                /* Set up sigreturn pointer */
-#ifdef CONFIG_COMPAT_VDSO
-               void *vdso_base = current->mm->context.vdso;
-               void *vdso_trampoline;
-
-               if (ka->sa.sa_flags & SA_SIGINFO) {
-                       if (thumb) {
-                               vdso_trampoline = VDSO_SYMBOL(vdso_base,
-                                                       compat_rt_sigreturn_thumb);
-                       } else {
-                               vdso_trampoline = VDSO_SYMBOL(vdso_base,
-                                                       compat_rt_sigreturn_arm);
-                       }
-               } else {
-                       if (thumb) {
-                               vdso_trampoline = VDSO_SYMBOL(vdso_base,
-                                                       compat_sigreturn_thumb);
-                       } else {
-                               vdso_trampoline = VDSO_SYMBOL(vdso_base,
-                                                       compat_sigreturn_arm);
-                       }
-               }
-
-               retcode = ptr_to_compat(vdso_trampoline) + thumb;
-#else
                unsigned int idx = thumb << 1;
 
                if (ka->sa.sa_flags & SA_SIGINFO)
                        idx += 3;
 
-               retcode = (unsigned long)current->mm->context.vdso +
+               retcode = (unsigned long)current->mm->context.sigpage +
                          (idx << 2) + thumb;
-#endif
        }
 
        regs->regs[0]   = usig;
index 50cc30a..47f651d 100644 (file)
@@ -376,7 +376,7 @@ static int call_undef_hook(struct pt_regs *regs)
 
        if (!user_mode(regs)) {
                __le32 instr_le;
-               if (probe_kernel_address((__force __le32 *)pc, instr_le))
+               if (get_kernel_nofault(instr_le, (__force __le32 *)pc))
                        goto exit;
                instr = le32_to_cpu(instr_le);
        } else if (compat_thumb_mode(regs)) {
@@ -813,6 +813,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
                handler[reason], smp_processor_id(), esr,
                esr_get_class_string(esr));
 
+       __show_regs(regs);
        local_daif_mask();
        panic("bad mode");
 }
index 4e01657..e546df0 100644 (file)
@@ -191,15 +191,12 @@ enum aarch32_map {
 #ifdef CONFIG_COMPAT_VDSO
        AA32_MAP_VVAR,
        AA32_MAP_VDSO,
-#else
-       AA32_MAP_SIGPAGE
 #endif
+       AA32_MAP_SIGPAGE
 };
 
 static struct page *aarch32_vectors_page __ro_after_init;
-#ifndef CONFIG_COMPAT_VDSO
 static struct page *aarch32_sig_page __ro_after_init;
-#endif
 
 static struct vm_special_mapping aarch32_vdso_maps[] = {
        [AA32_MAP_VECTORS] = {
@@ -214,12 +211,11 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
                .name = "[vdso]",
                .mremap = aarch32_vdso_mremap,
        },
-#else
+#endif /* CONFIG_COMPAT_VDSO */
        [AA32_MAP_SIGPAGE] = {
                .name   = "[sigpage]", /* ABI */
                .pages  = &aarch32_sig_page,
        },
-#endif /* CONFIG_COMPAT_VDSO */
 };
 
 static int aarch32_alloc_kuser_vdso_page(void)
@@ -242,27 +238,11 @@ static int aarch32_alloc_kuser_vdso_page(void)
        return 0;
 }
 
-#ifdef CONFIG_COMPAT_VDSO
-static int __aarch32_alloc_vdso_pages(void)
-{
-       int ret;
-
-       vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
-       vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
-
-       ret = __vdso_init(VDSO_ABI_AA32);
-       if (ret)
-               return ret;
-
-       return aarch32_alloc_kuser_vdso_page();
-}
-#else
-static int __aarch32_alloc_vdso_pages(void)
+static int aarch32_alloc_sigpage(void)
 {
        extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
        int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
        unsigned long sigpage;
-       int ret;
 
        sigpage = get_zeroed_page(GFP_ATOMIC);
        if (!sigpage)
@@ -271,18 +251,34 @@ static int __aarch32_alloc_vdso_pages(void)
        memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
        aarch32_sig_page = virt_to_page(sigpage);
        flush_dcache_page(aarch32_sig_page);
+       return 0;
+}
 
-       ret = aarch32_alloc_kuser_vdso_page();
-       if (ret)
-               free_page(sigpage);
+#ifdef CONFIG_COMPAT_VDSO
+static int __aarch32_alloc_vdso_pages(void)
+{
+       vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
+       vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
 
-       return ret;
+       return __vdso_init(VDSO_ABI_AA32);
 }
 #endif /* CONFIG_COMPAT_VDSO */
 
 static int __init aarch32_alloc_vdso_pages(void)
 {
-       return __aarch32_alloc_vdso_pages();
+       int ret;
+
+#ifdef CONFIG_COMPAT_VDSO
+       ret = __aarch32_alloc_vdso_pages();
+       if (ret)
+               return ret;
+#endif
+
+       ret = aarch32_alloc_sigpage();
+       if (ret)
+               return ret;
+
+       return aarch32_alloc_kuser_vdso_page();
 }
 arch_initcall(aarch32_alloc_vdso_pages);
 
@@ -305,7 +301,6 @@ static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
        return PTR_ERR_OR_ZERO(ret);
 }
 
-#ifndef CONFIG_COMPAT_VDSO
 static int aarch32_sigreturn_setup(struct mm_struct *mm)
 {
        unsigned long addr;
@@ -328,12 +323,11 @@ static int aarch32_sigreturn_setup(struct mm_struct *mm)
        if (IS_ERR(ret))
                goto out;
 
-       mm->context.vdso = (void *)addr;
+       mm->context.sigpage = (void *)addr;
 
 out:
        return PTR_ERR_OR_ZERO(ret);
 }
-#endif /* !CONFIG_COMPAT_VDSO */
 
 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 {
@@ -352,10 +346,11 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
                                       mm,
                                       bprm,
                                       uses_interp);
-#else
-       ret = aarch32_sigreturn_setup(mm);
+       if (ret)
+               goto out;
 #endif /* CONFIG_COMPAT_VDSO */
 
+       ret = aarch32_sigreturn_setup(mm);
 out:
        mmap_write_unlock(mm);
        return ret;
index 556d424..45d5cfe 100644 (file)
@@ -23,13 +23,14 @@ btildflags-$(CONFIG_ARM64_BTI_KERNEL) += -z force-bti
 # potential future proofing if we end up with internal calls to the exported
 # routines, as x86 does (see 6f121e548f83 ("x86, vdso: Reimplement vdso.so
 # preparation in build-time C")).
-ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \
-               -Bsymbolic --eh-frame-hdr --build-id -n $(btildflags-y) -T
+ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv       \
+            -Bsymbolic $(call ld-option, --no-eh-frame-hdr) --build-id -n      \
+            $(btildflags-y) -T
 
 ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
 ccflags-y += -DDISABLE_BRANCH_PROFILING
 
-CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS)
+CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS)
 KBUILD_CFLAGS                  += $(DISABLE_LTO)
 KASAN_SANITIZE                 := n
 UBSAN_SANITIZE                 := n
index 620a3ef..0e18729 100644 (file)
 
        .text
 
+/*
+ * NOTE!!!  You may notice that all of the .cfi directives in this file have
+ * been commented out. This is because they have been shown to trigger segfaults
+ * in libgcc when unwinding out of a SIGCANCEL handler to invoke pthread
+ * cleanup handlers during the thread cancellation dance. By omitting the
+ * directives, we trigger an arm64-specific fallback path in the unwinder which
+ * recognises the signal frame and restores many of the registers directly from
+ * the sigcontext. Re-enabling the cfi directives here therefore needs to be
+ * much more comprehensive to reduce the risk of further regressions.
+ */
+
 /* Ensure that the mysterious NOP can be associated with a function. */
-       .cfi_startproc
+//     .cfi_startproc
 
 /*
- * .cfi_signal_frame causes the corresponding Frame Description Entry in the
- * .eh_frame section to be annotated as a signal frame. This allows DWARF
- * unwinders (e.g. libstdc++) to implement _Unwind_GetIPInfo(), which permits
- * unwinding out of the signal trampoline without the need for the mysterious
- * NOP.
+ * .cfi_signal_frame causes the corresponding Frame Description Entry (FDE) in
+ * the .eh_frame section to be annotated as a signal frame. This allows DWARF
+ * unwinders (e.g. libstdc++) to implement _Unwind_GetIPInfo() and identify
+ * the next frame using the unmodified return address instead of subtracting 1,
+ * which may yield the wrong FDE.
  */
-       .cfi_signal_frame
+//     .cfi_signal_frame
 
 /*
  * Tell the unwinder where to locate the frame record linking back to the
- * interrupted context. We don't provide unwind info for registers other
- * than the frame pointer and the link register here; in practice, this
- * is sufficient for unwinding in C/C++ based runtimes and the values in
- * the sigcontext may have been modified by this point anyway. Debuggers
+ * interrupted context. We don't provide unwind info for registers other than
+ * the frame pointer and the link register here; in practice, this is likely to
+ * be insufficient for unwinding in C/C++ based runtimes, especially without a
+ * means to restore the stack pointer. Thankfully, unwinders and debuggers
  * already have baked-in strategies for attempting to unwind out of signals.
  */
-       .cfi_def_cfa    x29, 0
-       .cfi_offset     x29, 0 * 8
-       .cfi_offset     x30, 1 * 8
+//     .cfi_def_cfa    x29, 0
+//     .cfi_offset     x29, 0 * 8
+//     .cfi_offset     x30, 1 * 8
 
 /*
  * This mysterious NOP is required for some unwinders (e.g. libc++) that
        nop     // Mysterious NOP
 
 /*
- * GDB relies on being able to identify the sigreturn instruction sequence to
- * unwind from signal handlers. We cannot, therefore, use SYM_FUNC_START()
- * here, as it will emit a BTI C instruction and break the unwinder. Thankfully,
- * this function is only ever called from a RET and so omitting the landing pad
- * is perfectly fine.
+ * GDB, libgcc and libunwind rely on being able to identify the sigreturn
+ * instruction sequence to unwind from signal handlers. We cannot, therefore,
+ * use SYM_FUNC_START() here, as it will emit a BTI C instruction and break the
+ * unwinder. Thankfully, this function is only ever called from a RET and so
+ * omitting the landing pad is perfectly fine.
  */
 SYM_CODE_START(__kernel_rt_sigreturn)
+//     PLEASE DO NOT MODIFY
        mov     x8, #__NR_rt_sigreturn
+//     PLEASE DO NOT MODIFY
        svc     #0
-       .cfi_endproc
+//     PLEASE DO NOT MODIFY
+//     .cfi_endproc
 SYM_CODE_END(__kernel_rt_sigreturn)
 
 emit_aarch64_feature_1_and
index 7ea1e82..d88148b 100644 (file)
@@ -140,7 +140,6 @@ hostprogs := $(munge)
 
 c-obj-vdso := note.o
 c-obj-vdso-gettimeofday := vgettimeofday.o
-asm-obj-vdso := sigreturn.o
 
 ifneq ($(c-gettimeofday-y),)
 VDSO_CFLAGS_gettimeofday_o += -include $(c-gettimeofday-y)
diff --git a/arch/arm64/kernel/vdso32/sigreturn.S b/arch/arm64/kernel/vdso32/sigreturn.S
deleted file mode 100644 (file)
index b009106..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file provides both A32 and T32 versions, in accordance with the
- * arm sigreturn code.
- *
- * Please read the comments in arch/arm64/kernel/vdso/sigreturn.S to
- * understand some of the craziness in here.
- *
- * Copyright (C) 2018 ARM Limited
- */
-
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/unistd.h>
-
-       .text
-
-       .arm
-       .fnstart
-       .save {r0-r15}
-       .pad #COMPAT_SIGFRAME_REGS_OFFSET
-       nop
-SYM_CODE_START(__kernel_sigreturn_arm)
-       mov r7, #__NR_compat_sigreturn
-       svc #0
-       .fnend
-SYM_CODE_END(__kernel_sigreturn_arm)
-
-       .fnstart
-       .save {r0-r15}
-       .pad #COMPAT_RT_SIGFRAME_REGS_OFFSET
-       nop
-SYM_CODE_START(__kernel_rt_sigreturn_arm)
-       mov r7, #__NR_compat_rt_sigreturn
-       svc #0
-       .fnend
-SYM_CODE_END(__kernel_rt_sigreturn_arm)
-
-       .thumb
-       .fnstart
-       .save {r0-r15}
-       .pad #COMPAT_SIGFRAME_REGS_OFFSET
-       nop
-SYM_CODE_START(__kernel_sigreturn_thumb)
-       mov r7, #__NR_compat_sigreturn
-       svc #0
-       .fnend
-SYM_CODE_END(__kernel_sigreturn_thumb)
-
-       .fnstart
-       .save {r0-r15}
-       .pad #COMPAT_RT_SIGFRAME_REGS_OFFSET
-       nop
-SYM_CODE_START(__kernel_rt_sigreturn_thumb)
-       mov r7, #__NR_compat_rt_sigreturn
-       svc #0
-       .fnend
-SYM_CODE_END(__kernel_rt_sigreturn_thumb)
index a394492..337d035 100644 (file)
@@ -64,19 +64,7 @@ VERSION
                __vdso_clock_gettime;
                __vdso_gettimeofday;
                __vdso_clock_getres;
-               __kernel_sigreturn_arm;
-               __kernel_sigreturn_thumb;
-               __kernel_rt_sigreturn_arm;
-               __kernel_rt_sigreturn_thumb;
                __vdso_clock_gettime64;
        local: *;
        };
 }
-
-/*
- * Make the sigreturn code visible to the kernel.
- */
-VDSO_compat_sigreturn_arm      = __kernel_sigreturn_arm;
-VDSO_compat_sigreturn_thumb    = __kernel_sigreturn_thumb;
-VDSO_compat_rt_sigreturn_arm   = __kernel_rt_sigreturn_arm;
-VDSO_compat_rt_sigreturn_thumb = __kernel_rt_sigreturn_thumb;
index 6827da7..5423ffe 100644 (file)
@@ -165,9 +165,6 @@ SECTIONS
                *(.altinstructions)
                __alt_instructions_end = .;
        }
-       .altinstr_replacement : {
-               *(.altinstr_replacement)
-       }
 
        . = ALIGN(SEGMENT_ALIGN);
        __inittext_end = .;
index 6e6ed55..e76c0e8 100644 (file)
@@ -136,11 +136,15 @@ SYM_CODE_START(__kvm_handle_stub_hvc)
 
 1:     cmp     x0, #HVC_RESET_VECTORS
        b.ne    1f
-reset:
+
        /*
-        * Reset kvm back to the hyp stub. Do not clobber x0-x4 in
-        * case we coming via HVC_SOFT_RESTART.
+        * Set the HVC_RESET_VECTORS return code before entering the common
+        * path so that we do not clobber x0-x2 in case we are coming via
+        * HVC_SOFT_RESTART.
         */
+       mov     x0, xzr
+reset:
+       /* Reset kvm back to the hyp stub. */
        mrs     x5, sctlr_el2
        mov_q   x6, SCTLR_ELx_FLAGS
        bic     x5, x5, x6              // Clear SCTL_M and etc
@@ -151,7 +155,6 @@ reset:
        /* Install stub vectors */
        adr_l   x5, __hyp_stub_vectors
        msr     vbar_el2, x5
-       mov     x0, xzr
        eret
 
 1:     /* Bad stub call */
index b5ae3a5..3c22416 100644 (file)
@@ -159,7 +159,10 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
 }
 
 /*
- * On VHE ensure that only guest events have EL0 counting enabled
+ * On VHE ensure that only guest events have EL0 counting enabled.
+ * This is called from both vcpu_{load,put} and the sysreg handling.
+ * Since the latter is preemptible, special care must be taken to
+ * disable preemption.
  */
 void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
 {
@@ -169,12 +172,14 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
        if (!has_vhe())
                return;
 
+       preempt_disable();
        host = this_cpu_ptr(&kvm_host_data);
        events_guest = host->pmu_events.events_guest;
        events_host = host->pmu_events.events_host;
 
        kvm_vcpu_pmu_enable_el0(events_guest);
        kvm_vcpu_pmu_disable_el0(events_host);
+       preempt_enable();
 }
 
 /*
index 1e0f4c2..f7b52ce 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/arm-smccc.h>
 #include <linux/kvm_host.h>
+#include <linux/sched/stat.h>
 
 #include <asm/kvm_mmu.h>
 #include <asm/pvclock-abi.h>
@@ -73,6 +74,11 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
        return base;
 }
 
+static bool kvm_arm_pvtime_supported(void)
+{
+       return !!sched_info_on();
+}
+
 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
                            struct kvm_device_attr *attr)
 {
@@ -82,7 +88,8 @@ int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
        int ret = 0;
        int idx;
 
-       if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
+       if (!kvm_arm_pvtime_supported() ||
+           attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
                return -ENXIO;
 
        if (get_user(ipa, user))
@@ -110,7 +117,8 @@ int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
        u64 __user *user = (u64 __user *)attr->addr;
        u64 ipa;
 
-       if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
+       if (!kvm_arm_pvtime_supported() ||
+           attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
                return -ENXIO;
 
        ipa = vcpu->arch.steal.base;
@@ -125,7 +133,8 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
 {
        switch (attr->attr) {
        case KVM_ARM_VCPU_PVTIME_IPA:
-               return 0;
+               if (kvm_arm_pvtime_supported())
+                       return 0;
        }
        return -ENXIO;
 }
index d3b2090..6ed36be 100644 (file)
@@ -245,7 +245,7 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
  */
 int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
 {
-       int ret = -EINVAL;
+       int ret;
        bool loaded;
        u32 pstate;
 
@@ -269,15 +269,19 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
 
        if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) ||
            test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) {
-               if (kvm_vcpu_enable_ptrauth(vcpu))
+               if (kvm_vcpu_enable_ptrauth(vcpu)) {
+                       ret = -EINVAL;
                        goto out;
+               }
        }
 
        switch (vcpu->arch.target) {
        default:
                if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
-                       if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1))
+                       if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) {
+                               ret = -EINVAL;
                                goto out;
+                       }
                        pstate = VCPU_RESET_PSTATE_SVC;
                } else {
                        pstate = VCPU_RESET_PSTATE_EL1;
index 27ac833..b5fa73c 100644 (file)
@@ -90,7 +90,15 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
            !irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
                disable_irq_nosync(irq);
 
+       /*
+        * The v4.1 doorbell can fire concurrently with the vPE being
+        * made non-resident. Ensure we only update pending_last
+        * *after* the non-residency sequence has completed.
+        */
+       raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
        vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
+       raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
+
        kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
        kvm_vcpu_kick(vcpu);
 
index e631e64..1e93cfc 100644 (file)
@@ -404,11 +404,6 @@ void __init arm64_memblock_init(void)
        high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
 
        dma_contiguous_reserve(arm64_dma32_phys_limit);
-
-#ifdef CONFIG_ARM64_4K_PAGES
-       hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
-#endif
-
 }
 
 void __init bootmem_init(void)
@@ -424,6 +419,16 @@ void __init bootmem_init(void)
        min_low_pfn = min;
 
        arm64_numa_init();
+
+       /*
+        * must be done after arm64_numa_init() which calls numa_init() to
+        * initialize node_online_map that gets used in hugetlb_cma_reserve()
+        * while allocating required CMA size across online nodes.
+        */
+#ifdef CONFIG_ARM64_4K_PAGES
+       hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
+#endif
+
        /*
         * Sparsemem tries to allocate bootmem in memory_present(), so must be
         * done after the fixed reservations.
index 990929c..1df25f2 100644 (file)
@@ -723,6 +723,7 @@ int kern_addr_valid(unsigned long addr)
        pmd_t *pmdp, pmd;
        pte_t *ptep, pte;
 
+       addr = arch_kasan_reset_tag(addr);
        if ((((long)addr) >> VA_BITS) != -1UL)
                return 0;
 
index 335ca49..dff2e2e 100644 (file)
@@ -6,6 +6,6 @@
 
 /* These are from csum_64plus.S */
 EXPORT_SYMBOL(csum_partial);
-EXPORT_SYMBOL(csum_partial_copy);
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
 EXPORT_SYMBOL(ip_compute_csum);
 EXPORT_SYMBOL(ip_fast_csum);
index 8e625a3..9c07127 100644 (file)
@@ -10,8 +10,8 @@
 #include <linux/linkage.h>
 
 ;
-;unsigned int csum_partial_copy(const char *src, char * dst,
-;                              int len, int sum)
+;unsigned int csum_partial_copy_nocheck(const char *src, char * dst,
+;                                      int len, int sum)
 ;
 ; A4:  src
 ; B4:  dst
@@ -21,7 +21,7 @@
 ;
 
        .text
-ENTRY(csum_partial_copy)
+ENTRY(csum_partial_copy_nocheck)
        MVC     .S2     ILC,B30
 
        MV      .D1X    B6,A31          ; given csum
@@ -149,7 +149,7 @@ L10:        ADD     .D1     A31,A9,A9
 
        BNOP    .S2     B3,4
        MVC     .S2     B30,ILC
-ENDPROC(csum_partial_copy)
+ENDPROC(csum_partial_copy_nocheck)
 
 ;
 ;unsigned short
index 3c425b8..b4a7ec1 100644 (file)
@@ -72,7 +72,8 @@ static int ftrace_check_current_nop(unsigned long hook)
        uint16_t olds[7];
        unsigned long hook_pos = hook - 2;
 
-       if (probe_kernel_read((void *)olds, (void *)hook_pos, sizeof(nops)))
+       if (copy_from_kernel_nofault((void *)olds, (void *)hook_pos,
+                       sizeof(nops)))
                return -EFAULT;
 
        if (memcmp((void *)nops, (void *)olds, sizeof(nops))) {
@@ -97,7 +98,7 @@ static int ftrace_modify_code(unsigned long hook, unsigned long target,
 
        make_jbsr(target, hook, call, nolr);
 
-       ret = probe_kernel_write((void *)hook_pos, enable ? call : nops,
+       ret = copy_to_kernel_nofault((void *)hook_pos, enable ? call : nops,
                                 sizeof(nops));
        if (ret)
                return -EPERM;
index cea15f2..3a033d2 100644 (file)
@@ -35,7 +35,7 @@ static inline void *dereference_function_descriptor(void *ptr)
        struct fdesc *desc = ptr;
        void *p;
 
-       if (!probe_kernel_address(&desc->ip, p))
+       if (!get_kernel_nofault(p, (void *)&desc->ip))
                ptr = p;
        return ptr;
 }
index cee411e..b2ab2d5 100644 (file)
@@ -108,7 +108,7 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
                goto skip_check;
 
        /* read the text we want to modify */
-       if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
+       if (copy_from_kernel_nofault(replaced, (void *)ip, MCOUNT_INSN_SIZE))
                return -EFAULT;
 
        /* Make sure it is what we expect it to be */
@@ -117,7 +117,7 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
 
 skip_check:
        /* replace the text with the new text */
-       if (probe_kernel_write(((void *)ip), new_code, MCOUNT_INSN_SIZE))
+       if (copy_to_kernel_nofault(((void *)ip), new_code, MCOUNT_INSN_SIZE))
                return -EPERM;
        flush_icache_range(ip, ip + MCOUNT_INSN_SIZE);
 
@@ -129,7 +129,7 @@ static int ftrace_make_nop_check(struct dyn_ftrace *rec, unsigned long addr)
        unsigned char __attribute__((aligned(8))) replaced[MCOUNT_INSN_SIZE];
        unsigned long ip = rec->ip;
 
-       if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
+       if (copy_from_kernel_nofault(replaced, (void *)ip, MCOUNT_INSN_SIZE))
                return -EFAULT;
        if (rec->flags & FTRACE_FL_CONVERTED) {
                struct ftrace_call_insn *call_insn, *tmp_call;
index 67994a7..1dd57ba 100644 (file)
@@ -42,7 +42,7 @@ enum unw_register_index {
 
 struct unw_info_block {
        u64 header;
-       u64 desc[0];            /* unwind descriptors */
+       u64 desc[];             /* unwind descriptors */
        /* personality routine and language-specific data follow behind descriptors */
 };
 
index e779b19..f66f4b1 100644 (file)
@@ -138,7 +138,8 @@ void __init setup_arch(char **cmdline_p)
        pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
                 __bss_stop, memory_start, memory_start, memory_end);
 
-       memblock_add(memory_start, memory_end - memory_start);
+       memblock_add(_rambase, memory_end - _rambase);
+       memblock_reserve(_rambase, memory_start - _rambase);
 
        /* Keep a copy of command line */
        *cmdline_p = &command_line[0];
index 29f4792..7d04210 100644 (file)
@@ -174,7 +174,7 @@ void __init cf_bootmem_alloc(void)
        m68k_memory[0].addr = _rambase;
        m68k_memory[0].size = _ramend - _rambase;
 
-       memblock_add(m68k_memory[0].addr, m68k_memory[0].size);
+       memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0);
 
        /* compute total pages in system */
        num_pages = PFN_DOWN(_ramend - _rambase);
index 8d22828..bc72304 100644 (file)
@@ -92,7 +92,7 @@
                        "MIC1N", "Built-in Mic";
                simple-audio-card,pin-switches = "Speaker", "Headphones";
 
-               simple-audio-card,hp-det-gpio = <&gpf 21 GPIO_ACTIVE_HIGH>;
+               simple-audio-card,hp-det-gpio = <&gpf 21 GPIO_ACTIVE_LOW>;
                simple-audio-card,aux-devs = <&speaker_amp>, <&headphones_amp>;
 
                simple-audio-card,bitclock-master = <&dai_codec>;
index 33991fd..897de50 100644 (file)
@@ -3,6 +3,7 @@
 
 /dts-v1/;
 
+#include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/phy/phy-ocelot-serdes.h>
 #include "ocelot.dtsi"
                pins = "GPIO_4";
                function = "gpio";
        };
+
+       phy_load_save_pins: phy_load_save_pins {
+               pins = "GPIO_10";
+               function = "ptp2";
+       };
 };
 
 &mdio0 {
 &mdio1 {
        status = "okay";
        pinctrl-names = "default";
-       pinctrl-0 = <&miim1>, <&phy_int_pins>;
+       pinctrl-0 = <&miim1>, <&phy_int_pins>, <&phy_load_save_pins>;
 
        phy7: ethernet-phy@0 {
                reg = <0>;
                interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-parent = <&gpio>;
+               load-save-gpios = <&gpio 10 GPIO_ACTIVE_HIGH>;
        };
        phy6: ethernet-phy@1 {
                reg = <1>;
                interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-parent = <&gpio>;
+               load-save-gpios = <&gpio 10 GPIO_ACTIVE_HIGH>;
        };
        phy5: ethernet-phy@2 {
                reg = <2>;
                interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-parent = <&gpio>;
+               load-save-gpios = <&gpio 10 GPIO_ACTIVE_HIGH>;
        };
        phy4: ethernet-phy@3 {
                reg = <3>;
                interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-parent = <&gpio>;
+               load-save-gpios = <&gpio 10 GPIO_ACTIVE_HIGH>;
        };
 };
 
index c628747..7dd4a80 100644 (file)
                                                                \
        /*                                                      \
         * We can't unroll if the number of iterations isn't    \
-        * compile-time constant. Unfortunately GCC versions    \
-        * up until 4.6 tend to miss obvious constants & cause  \
+        * compile-time constant. Unfortunately clang versions  \
+        * up until 8.0 tend to miss obvious constants & cause  \
         * this check to fail, even though they go on to        \
         * generate reasonable code for the switch statement,   \
         * so we skip the sanity check for those compilers.     \
         */                                                     \
-       BUILD_BUG_ON((CONFIG_GCC_VERSION >= 40700 ||            \
-                     CONFIG_CLANG_VERSION >= 80000) &&         \
-                    !__builtin_constant_p(times));             \
+       BUILD_BUG_ON(!__builtin_constant_p(times));             \
                                                                \
        switch (times) {                                        \
        case 32: fn(__VA_ARGS__); /* fall through */            \
index 6cfae24..d043c2f 100644 (file)
@@ -86,9 +86,9 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
                goto out;
        }
 
-       if ((probe_kernel_read(&prev_insn, p->addr - 1,
-                               sizeof(mips_instruction)) == 0) &&
-                               insn_has_delayslot(prev_insn)) {
+       if (copy_from_kernel_nofault(&prev_insn, p->addr - 1,
+                       sizeof(mips_instruction)) == 0 &&
+           insn_has_delayslot(prev_insn)) {
                pr_notice("Kprobes for branch delayslot are not supported\n");
                ret = -EINVAL;
                goto out;
index f777141..8488b0d 100644 (file)
@@ -60,8 +60,8 @@
 50     n32     getsockname                     sys_getsockname
 51     n32     getpeername                     sys_getpeername
 52     n32     socketpair                      sys_socketpair
-53     n32     setsockopt                      compat_sys_setsockopt
-54     n32     getsockopt                      compat_sys_getsockopt
+53     n32     setsockopt                      sys_setsockopt
+54     n32     getsockopt                      sys_getsockopt
 55     n32     clone                           __sys_clone
 56     n32     fork                            __sys_fork
 57     n32     execve                          compat_sys_execve
index 1328062..b20522f 100644 (file)
 170    o32     connect                         sys_connect
 171    o32     getpeername                     sys_getpeername
 172    o32     getsockname                     sys_getsockname
-173    o32     getsockopt                      sys_getsockopt                  compat_sys_getsockopt
+173    o32     getsockopt                      sys_getsockopt                  sys_getsockopt
 174    o32     listen                          sys_listen
 175    o32     recv                            sys_recv                        compat_sys_recv
 176    o32     recvfrom                        sys_recvfrom                    compat_sys_recvfrom
 178    o32     send                            sys_send
 179    o32     sendmsg                         sys_sendmsg                     compat_sys_sendmsg
 180    o32     sendto                          sys_sendto
-181    o32     setsockopt                      sys_setsockopt                  compat_sys_setsockopt
+181    o32     setsockopt                      sys_setsockopt                  sys_setsockopt
 182    o32     shutdown                        sys_shutdown
 183    o32     socket                          sys_socket
 184    o32     socketpair                      sys_socketpair
index 7c32c95..f655af6 100644 (file)
@@ -723,12 +723,14 @@ static int simulate_loongson3_cpucfg(struct pt_regs *regs,
                perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
 
                /* Do not emulate on unsupported core models. */
-               if (!loongson3_cpucfg_emulation_enabled(&current_cpu_data))
+               preempt_disable();
+               if (!loongson3_cpucfg_emulation_enabled(&current_cpu_data)) {
+                       preempt_enable();
                        return -1;
-
+               }
                regs->regs[rd] = loongson3_cpucfg_read_synthesized(
                        &current_cpu_data, sel);
-
+               preempt_enable();
                return 0;
        }
 
@@ -2169,6 +2171,7 @@ static void configure_status(void)
 
        change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
                         status_set);
+       back_to_back_c0_hazard();
 }
 
 unsigned int hwrena;
index 5ae82d9..d242300 100644 (file)
@@ -1722,6 +1722,7 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
                          vcpu->arch.gprs[rt], *(u32 *)data);
                break;
 
+#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
        case sdl_op:
                run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
                                        vcpu->arch.host_cp0_badvaddr) & (~0x7);
@@ -1815,6 +1816,7 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
                          vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
                          vcpu->arch.gprs[rt], *(u64 *)data);
                break;
+#endif
 
 #ifdef CONFIG_CPU_LOONGSON64
        case sdc2_op:
@@ -2002,6 +2004,7 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
                }
                break;
 
+#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
        case ldl_op:
                run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
                                        vcpu->arch.host_cp0_badvaddr) & (~0x7);
@@ -2073,6 +2076,7 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
                        break;
                }
                break;
+#endif
 
 #ifdef CONFIG_CPU_LOONGSON64
        case ldc2_op:
index 521bd58..666d335 100644 (file)
@@ -67,8 +67,10 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        VCPU_STAT("vz_ghfc", vz_ghfc_exits),
        VCPU_STAT("vz_gpa", vz_gpa_exits),
        VCPU_STAT("vz_resvd", vz_resvd_exits),
+#ifdef CONFIG_CPU_LOONGSON64
        VCPU_STAT("vz_cpucfg", vz_cpucfg_exits),
 #endif
+#endif
        VCPU_STAT("halt_successful_poll", halt_successful_poll),
        VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
        VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
index aa37545..b103420 100644 (file)
@@ -514,8 +514,8 @@ void __init ltq_soc_init(void)
                clkdev_add_pmu("1e10b308.eth", NULL, 0, 0, PMU_SWITCH |
                               PMU_PPE_DP | PMU_PPE_TC);
                clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF);
-               clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY);
-               clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY);
+               clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY);
+               clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY);
                clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
                clkdev_add_pmu("1e116000.mei", "afe", 1, 2, PMU_ANALOG_DSL_AFE);
                clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
@@ -538,8 +538,8 @@ void __init ltq_soc_init(void)
                                PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM |
                                PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 |
                                PMU_PPE_QSB | PMU_PPE_TOP);
-               clkdev_add_pmu("1e108000.gswip", "gphy0", 0, 0, PMU_GPHY);
-               clkdev_add_pmu("1e108000.gswip", "gphy1", 0, 0, PMU_GPHY);
+               clkdev_add_pmu("1e108000.switch", "gphy0", 0, 0, PMU_GPHY);
+               clkdev_add_pmu("1e108000.switch", "gphy1", 0, 0, PMU_GPHY);
                clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO);
                clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU);
                clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE);
index 22ab77e..3763b3f 100644 (file)
@@ -131,13 +131,14 @@ static int __ftrace_modify_code(unsigned long pc, unsigned long *old_insn,
        unsigned long orig_insn[3];
 
        if (validate) {
-               if (probe_kernel_read(orig_insn, (void *)pc, MCOUNT_INSN_SIZE))
+               if (copy_from_kernel_nofault(orig_insn, (void *)pc,
+                               MCOUNT_INSN_SIZE))
                        return -EFAULT;
                if (memcmp(orig_insn, old_insn, MCOUNT_INSN_SIZE))
                        return -EINVAL;
        }
 
-       if (probe_kernel_write((void *)pc, new_insn, MCOUNT_INSN_SIZE))
+       if (copy_to_kernel_nofault((void *)pc, new_insn, MCOUNT_INSN_SIZE))
                return -EPERM;
 
        return 0;
index ec39698..b4316c3 100644 (file)
 
 /* Take these from lib/checksum.c */
 extern __wsum csum_partial(const void *buff, int len, __wsum sum);
-extern __wsum csum_partial_copy(const void *src, void *dst, int len,
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len,
                                __wsum sum);
-#define csum_partial_copy_nocheck(src, dst, len, sum)  \
-       csum_partial_copy((src), (dst), (len), (sum))
+#define csum_partial_copy_nocheck csum_partial_copy_nocheck
 
 extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
 extern __sum16 ip_compute_csum(const void *buff, int len);
index c152a68..3457276 100644 (file)
@@ -74,8 +74,11 @@ void *arch_dma_set_uncached(void *cpu_addr, size_t size)
         * We need to iterate through the pages, clearing the dcache for
         * them and setting the cache-inhibit bit.
         */
+       mmap_read_lock(&init_mm);
        error = walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops,
                        NULL);
+       mmap_read_unlock(&init_mm);
+
        if (error)
                return ERR_PTR(error);
        return cpu_addr;
@@ -85,9 +88,11 @@ void arch_dma_clear_uncached(void *cpu_addr, size_t size)
 {
        unsigned long va = (unsigned long)cpu_addr;
 
+       mmap_read_lock(&init_mm);
        /* walk_page_range shouldn't be able to fail here */
        WARN_ON(walk_page_range(&init_mm, va, va + size,
                        &clear_nocache_walk_ops, NULL));
+       mmap_read_unlock(&init_mm);
 }
 
 void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
index b836fc6..1df0f67 100644 (file)
@@ -172,7 +172,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 
        ip = (void *)(rec->ip + 4 - size);
 
-       ret = probe_kernel_read(insn, ip, size);
+       ret = copy_from_kernel_nofault(insn, ip, size);
        if (ret)
                return ret;
 
index 664278d..c4554ac 100644 (file)
@@ -154,8 +154,8 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
 
 int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
 {
-       int ret = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
-                               BREAK_INSTR_SIZE);
+       int ret = copy_from_kernel_nofault(bpt->saved_instr,
+                       (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
        if (ret)
                return ret;
 
index 230a642..b7abb12 100644 (file)
@@ -293,7 +293,7 @@ void *dereference_function_descriptor(void *ptr)
        Elf64_Fdesc *desc = ptr;
        void *p;
 
-       if (!probe_kernel_address(&desc->addr, p))
+       if (!get_kernel_nofault(p, (void *)&desc->addr))
                ptr = p;
        return ptr;
 }
index 5a758fa..3494e4f 100644 (file)
 178    common  rt_sigqueueinfo         sys_rt_sigqueueinfo             compat_sys_rt_sigqueueinfo
 179    common  rt_sigsuspend           sys_rt_sigsuspend               compat_sys_rt_sigsuspend
 180    common  chown                   sys_chown
-181    common  setsockopt              sys_setsockopt                  compat_sys_setsockopt
-182    common  getsockopt              sys_getsockopt                  compat_sys_getsockopt
+181    common  setsockopt              sys_setsockopt                  sys_setsockopt
+182    common  getsockopt              sys_getsockopt                  sys_getsockopt
 183    common  sendmsg                 sys_sendmsg                     compat_sys_sendmsg
 184    common  recvmsg                 sys_recvmsg                     compat_sys_recvmsg
 185    common  semop                   sys_semop
index 94a9fe2..4b75388 100644 (file)
@@ -57,7 +57,7 @@ void * memcpy(void * dst,const void *src, size_t count)
 EXPORT_SYMBOL(raw_copy_in_user);
 EXPORT_SYMBOL(memcpy);
 
-bool probe_kernel_read_allowed(const void *unsafe_src, size_t size)
+bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
 {
        if ((unsigned long)unsafe_src < PAGE_SIZE)
                return false;
index b56f141..b0afbdd 100644 (file)
@@ -205,10 +205,6 @@ static inline void pmd_clear(pmd_t *pmdp)
        *pmdp = __pmd(0);
 }
 
-/* to find an entry in a page-table-directory */
-#define pgd_index(address)      ((address) >> PGDIR_SHIFT)
-#define pgd_offset(mm, address)         ((mm)->pgd + pgd_index(address))
-
 /*
  * PTE updates. This function is called whenever an existing
  * valid PTE is updated. This does -not- include set_pte_at()
@@ -230,6 +226,8 @@ static inline void pmd_clear(pmd_t *pmdp)
  * For other page sizes, we have a single entry in the table.
  */
 #ifdef CONFIG_PPC_8xx
+static pmd_t *pmd_off(struct mm_struct *mm, unsigned long addr);
+
 static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
                                     unsigned long clr, unsigned long set, int huge)
 {
@@ -237,7 +235,7 @@ static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, p
        pte_basic_t old = pte_val(*p);
        pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
        int num, i;
-       pmd_t *pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr);
+       pmd_t *pmd = pmd_off(mm, addr);
 
        if (!huge)
                num = PAGE_SIZE / SZ_4K;
@@ -286,6 +284,16 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
        return __pte(pte_update(mm, addr, ptep, ~0, 0, 0));
 }
 
+#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
+#define __HAVE_ARCH_PTEP_GET
+static inline pte_t ptep_get(pte_t *ptep)
+{
+       pte_t pte = {READ_ONCE(ptep->pte), 0, 0, 0};
+
+       return pte;
+}
+#endif
+
 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
                                      pte_t *ptep)
index d198717..324d7b2 100644 (file)
@@ -85,7 +85,7 @@ static inline void *dereference_function_descriptor(void *ptr)
        struct ppc64_opd_entry *desc = ptr;
        void *p;
 
-       if (!probe_kernel_address(&desc->funcaddr, p))
+       if (!get_kernel_nofault(p, (void *)&desc->funcaddr))
                ptr = p;
        return ptr;
 }
diff --git a/arch/powerpc/include/uapi/asm/papr_pdsm.h b/arch/powerpc/include/uapi/asm/papr_pdsm.h
new file mode 100644 (file)
index 0000000..9ccecc1
--- /dev/null
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * PAPR nvDimm Specific Methods (PDSM) and structs for libndctl
+ *
+ * (C) Copyright IBM 2020
+ *
+ * Author: Vaibhav Jain <vaibhav at linux.ibm.com>
+ */
+
+#ifndef _UAPI_ASM_POWERPC_PAPR_PDSM_H_
+#define _UAPI_ASM_POWERPC_PAPR_PDSM_H_
+
+#include <linux/types.h>
+#include <linux/ndctl.h>
+
+/*
+ * PDSM Envelope:
+ *
+ * The ioctl ND_CMD_CALL exchange data between user-space and kernel via
+ * envelope which consists of 2 headers sections and payload sections as
+ * illustrated below:
+ *  +-----------------+---------------+---------------------------+
+ *  |   64-Bytes      |   8-Bytes     |       Max 184-Bytes       |
+ *  +-----------------+---------------+---------------------------+
+ *  | ND-HEADER       |  PDSM-HEADER  |      PDSM-PAYLOAD         |
+ *  +-----------------+---------------+---------------------------+
+ *  | nd_family       |               |                           |
+ *  | nd_size_out     | cmd_status    |                           |
+ *  | nd_size_in      | reserved      |     nd_pdsm_payload       |
+ *  | nd_command      | payload   --> |                           |
+ *  | nd_fw_size      |               |                           |
+ *  | nd_payload ---> |               |                           |
+ *  +---------------+-----------------+---------------------------+
+ *
+ * ND Header:
+ * This is the generic libnvdimm header described as 'struct nd_cmd_pkg'
+ * which is interpreted by libnvdimm before passed on to papr_scm. Important
+ * member fields used are:
+ * 'nd_family'         : (In) NVDIMM_FAMILY_PAPR_SCM
+ * 'nd_size_in'                : (In) PDSM-HEADER + PDSM-IN-PAYLOAD (usually 0)
+ * 'nd_size_out'        : (In) PDSM-HEADER + PDSM-RETURN-PAYLOAD
+ * 'nd_command'         : (In) One of PAPR_PDSM_XXX
+ * 'nd_fw_size'         : (Out) PDSM-HEADER + size of actual payload returned
+ *
+ * PDSM Header:
+ * This is papr-scm specific header that precedes the payload. This is defined
+ * as nd_cmd_pdsm_pkg.  Following fields aare available in this header:
+ *
+ * 'cmd_status'                : (Out) Errors if any encountered while servicing PDSM.
+ * 'reserved'          : Not used, reserved for future and should be set to 0.
+ * 'payload'            : A union of all the possible payload structs
+ *
+ * PDSM Payload:
+ *
+ * The layout of the PDSM Payload is defined by various structs shared between
+ * papr_scm and libndctl so that contents of payload can be interpreted. As such
+ * its defined as a union of all possible payload structs as
+ * 'union nd_pdsm_payload'. Based on the value of 'nd_cmd_pkg.nd_command'
+ * appropriate member of the union is accessed.
+ */
+
+/* Max payload size that we can handle */
+#define ND_PDSM_PAYLOAD_MAX_SIZE 184
+
+/* Max payload size that we can handle */
+#define ND_PDSM_HDR_SIZE \
+       (sizeof(struct nd_pkg_pdsm) - ND_PDSM_PAYLOAD_MAX_SIZE)
+
+/* Various nvdimm health indicators */
+#define PAPR_PDSM_DIMM_HEALTHY       0
+#define PAPR_PDSM_DIMM_UNHEALTHY     1
+#define PAPR_PDSM_DIMM_CRITICAL      2
+#define PAPR_PDSM_DIMM_FATAL         3
+
+/*
+ * Struct exchanged between kernel & ndctl in for PAPR_PDSM_HEALTH
+ * Various flags indicate the health status of the dimm.
+ *
+ * extension_flags     : Any extension fields present in the struct.
+ * dimm_unarmed                : Dimm not armed. So contents wont persist.
+ * dimm_bad_shutdown   : Previous shutdown did not persist contents.
+ * dimm_bad_restore    : Contents from previous shutdown werent restored.
+ * dimm_scrubbed       : Contents of the dimm have been scrubbed.
+ * dimm_locked         : Contents of the dimm cant be modified until CEC reboot
+ * dimm_encrypted      : Contents of dimm are encrypted.
+ * dimm_health         : Dimm health indicator. One of PAPR_PDSM_DIMM_XXXX
+ */
+struct nd_papr_pdsm_health {
+       union {
+               struct {
+                       __u32 extension_flags;
+                       __u8 dimm_unarmed;
+                       __u8 dimm_bad_shutdown;
+                       __u8 dimm_bad_restore;
+                       __u8 dimm_scrubbed;
+                       __u8 dimm_locked;
+                       __u8 dimm_encrypted;
+                       __u16 dimm_health;
+               };
+               __u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE];
+       };
+};
+
+/*
+ * Methods to be embedded in ND_CMD_CALL request. These are sent to the kernel
+ * via 'nd_cmd_pkg.nd_command' member of the ioctl struct
+ */
+enum papr_pdsm {
+       PAPR_PDSM_MIN = 0x0,
+       PAPR_PDSM_HEALTH,
+       PAPR_PDSM_MAX,
+};
+
+/* Maximal union that can hold all possible payload types */
+union nd_pdsm_payload {
+       struct nd_papr_pdsm_health health;
+       __u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE];
+} __packed;
+
+/*
+ * PDSM-header + payload expected with ND_CMD_CALL ioctl from libnvdimm
+ * Valid member of union 'payload' is identified via 'nd_cmd_pkg.nd_command'
+ * that should always precede this struct when sent to papr_scm via CMD_CALL
+ * interface.
+ */
+struct nd_pkg_pdsm {
+       __s32 cmd_status;       /* Out: Sub-cmd status returned back */
+       __u16 reserved[2];      /* Ignored and to be set as '0' */
+       union nd_pdsm_payload payload;
+} __packed;
+
+#endif /* _UAPI_ASM_POWERPC_PAPR_PDSM_H_ */
index e70ebb5..fa08069 100644 (file)
@@ -270,7 +270,7 @@ BEGIN_FTR_SECTION
 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
        .endif
 
-       ld      r10,PACA_EXGEN+EX_CTR(r13)
+       ld      r10,IAREA+EX_CTR(r13)
        mtctr   r10
 BEGIN_FTR_SECTION
        ld      r10,IAREA+EX_PPR(r13)
@@ -298,7 +298,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 
        .if IKVM_SKIP
 89:    mtocrf  0x80,r9
-       ld      r10,PACA_EXGEN+EX_CTR(r13)
+       ld      r10,IAREA+EX_CTR(r13)
        mtctr   r10
        ld      r9,IAREA+EX_R9(r13)
        ld      r10,IAREA+EX_R10(r13)
index 652b285..4090802 100644 (file)
@@ -421,7 +421,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
        unsigned int instr;
        struct ppc_inst *addr = (struct ppc_inst *)bpt->bpt_addr;
 
-       err = probe_kernel_address(addr, instr);
+       err = get_kernel_nofault(instr, (unsigned *) addr);
        if (err)
                return err;
 
index 6f96f65..9cc792a 100644 (file)
@@ -289,7 +289,7 @@ int kprobe_handler(struct pt_regs *regs)
        if (!p) {
                unsigned int instr;
 
-               if (probe_kernel_address(addr, instr))
+               if (get_kernel_nofault(instr, addr))
                        goto no_kprobe;
 
                if (instr != BREAKPOINT_INSTRUCTION) {
index f4c2fa1..ae2b188 100644 (file)
@@ -756,7 +756,8 @@ int module_trampoline_target(struct module *mod, unsigned long addr,
 
        stub = (struct ppc64_stub_entry *)addr;
 
-       if (probe_kernel_read(&magic, &stub->magic, sizeof(magic))) {
+       if (copy_from_kernel_nofault(&magic, &stub->magic,
+                       sizeof(magic))) {
                pr_err("%s: fault reading magic for stub %lx for %s\n", __func__, addr, mod->name);
                return -EFAULT;
        }
@@ -766,7 +767,8 @@ int module_trampoline_target(struct module *mod, unsigned long addr,
                return -EFAULT;
        }
 
-       if (probe_kernel_read(&funcdata, &stub->funcdata, sizeof(funcdata))) {
+       if (copy_from_kernel_nofault(&funcdata, &stub->funcdata,
+                       sizeof(funcdata))) {
                pr_err("%s: fault reading funcdata for stub %lx for %s\n", __func__, addr, mod->name);
                 return -EFAULT;
        }
index 7bb7faf..4650b9b 100644 (file)
@@ -1252,29 +1252,31 @@ struct task_struct *__switch_to(struct task_struct *prev,
 static void show_instructions(struct pt_regs *regs)
 {
        int i;
+       unsigned long nip = regs->nip;
        unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
 
        printk("Instruction dump:");
 
+       /*
+        * If we were executing with the MMU off for instructions, adjust pc
+        * rather than printing XXXXXXXX.
+        */
+       if (!IS_ENABLED(CONFIG_BOOKE) && !(regs->msr & MSR_IR)) {
+               pc = (unsigned long)phys_to_virt(pc);
+               nip = (unsigned long)phys_to_virt(regs->nip);
+       }
+
        for (i = 0; i < NR_INSN_TO_PRINT; i++) {
                int instr;
 
                if (!(i % 8))
                        pr_cont("\n");
 
-#if !defined(CONFIG_BOOKE)
-               /* If executing with the IMMU off, adjust pc rather
-                * than print XXXXXXXX.
-                */
-               if (!(regs->msr & MSR_IR))
-                       pc = (unsigned long)phys_to_virt(pc);
-#endif
-
                if (!__kernel_text_address(pc) ||
-                   probe_kernel_address((const void *)pc, instr)) {
+                   get_kernel_nofault(instr, (const void *)pc)) {
                        pr_cont("XXXXXXXX ");
                } else {
-                       if (regs->nip == pc)
+                       if (nip == pc)
                                pr_cont("<%08x> ", instr);
                        else
                                pr_cont("%08x ", instr);
@@ -1305,7 +1307,8 @@ void show_user_instructions(struct pt_regs *regs)
                for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) {
                        int instr;
 
-                       if (probe_user_read(&instr, (void __user *)pc, sizeof(instr))) {
+                       if (copy_from_user_nofault(&instr, (void __user *)pc,
+                                       sizeof(instr))) {
                                seq_buf_printf(&s, "XXXXXXXX ");
                                continue;
                        }
index f833a31..94eb5b2 100644 (file)
 336    common  recv                            sys_recv                        compat_sys_recv
 337    common  recvfrom                        sys_recvfrom                    compat_sys_recvfrom
 338    common  shutdown                        sys_shutdown
-339    common  setsockopt                      sys_setsockopt                  compat_sys_setsockopt
-340    common  getsockopt                      sys_getsockopt                  compat_sys_getsockopt
+339    common  setsockopt                      sys_setsockopt                  sys_setsockopt
+340    common  getsockopt                      sys_getsockopt                  sys_getsockopt
 341    common  sendmsg                         sys_sendmsg                     compat_sys_sendmsg
 342    common  recvmsg                         sys_recvmsg                     compat_sys_recvmsg
 343    32      recvmmsg                        sys_recvmmsg_time32             compat_sys_recvmmsg_time32
index 5e39962..c1fede6 100644 (file)
@@ -226,7 +226,7 @@ __ftrace_make_nop(struct module *mod,
        unsigned long ip = rec->ip;
        unsigned long tramp;
 
-       if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
+       if (copy_from_kernel_nofault(&op, (void *)ip, MCOUNT_INSN_SIZE))
                return -EFAULT;
 
        /* Make sure that that this is still a 24bit jump */
@@ -249,7 +249,7 @@ __ftrace_make_nop(struct module *mod,
        pr_devel("ip:%lx jumps to %lx", ip, tramp);
 
        /* Find where the trampoline jumps to */
-       if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
+       if (copy_from_kernel_nofault(jmp, (void *)tramp, sizeof(jmp))) {
                pr_err("Failed to read %lx\n", tramp);
                return -EFAULT;
        }
index 3cb0c98..6a73714 100644 (file)
@@ -40,7 +40,8 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
        /* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */
        if (kvmhv_on_pseries())
                return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr,
-                                         __pa(to), __pa(from), n);
+                                         (to != NULL) ? __pa(to): 0,
+                                         (from != NULL) ? __pa(from): 0, n);
 
        quadrant = 1;
        if (!pid)
@@ -64,9 +65,9 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
        isync();
 
        if (is_load)
-               ret = probe_user_read(to, (const void __user *)from, n);
+               ret = copy_from_user_nofault(to, (const void __user *)from, n);
        else
-               ret = probe_user_write((void __user *)to, from, n);
+               ret = copy_to_user_nofault((void __user *)to, from, n);
 
        /* switch the pid first to avoid running host with unallocated pid */
        if (quadrant == 1 && pid != old_pid)
index aedfd6e..9cc17eb 100644 (file)
@@ -15,11 +15,11 @@ int probe_user_read_inst(struct ppc_inst *inst,
        unsigned int val, suffix;
        int err;
 
-       err = probe_user_read(&val, nip, sizeof(val));
+       err = copy_from_user_nofault(&val, nip, sizeof(val));
        if (err)
                return err;
        if (get_op(val) == OP_PREFIX) {
-               err = probe_user_read(&suffix, (void __user *)nip + 4, 4);
+               err = copy_from_user_nofault(&suffix, (void __user *)nip + 4, 4);
                *inst = ppc_inst_prefix(val, suffix);
        } else {
                *inst = ppc_inst(val);
@@ -33,11 +33,11 @@ int probe_kernel_read_inst(struct ppc_inst *inst,
        unsigned int val, suffix;
        int err;
 
-       err = probe_kernel_read(&val, src, sizeof(val));
+       err = copy_from_kernel_nofault(&val, src, sizeof(val));
        if (err)
                return err;
        if (get_op(val) == OP_PREFIX) {
-               err = probe_kernel_read(&suffix, (void *)src + 4, 4);
+               err = copy_from_kernel_nofault(&suffix, (void *)src + 4, 4);
                *inst = ppc_inst_prefix(val, suffix);
        } else {
                *inst = ppc_inst(val);
@@ -51,7 +51,7 @@ int probe_user_read_inst(struct ppc_inst *inst,
        unsigned int val;
        int err;
 
-       err = probe_user_read(&val, nip, sizeof(val));
+       err = copy_from_user_nofault(&val, nip, sizeof(val));
        if (!err)
                *inst = ppc_inst(val);
 
@@ -64,7 +64,7 @@ int probe_kernel_read_inst(struct ppc_inst *inst,
        unsigned int val;
        int err;
 
-       err = probe_kernel_read(&val, src, sizeof(val));
+       err = copy_from_kernel_nofault(&val, src, sizeof(val));
        if (!err)
                *inst = ppc_inst(val);
 
index 1199fc2..ca5fcb4 100644 (file)
@@ -353,9 +353,6 @@ static bool pkey_access_permitted(int pkey, bool write, bool execute)
        int pkey_shift;
        u64 amr;
 
-       if (!is_pkey_enabled(pkey))
-               return true;
-
        pkey_shift = pkeyshift(pkey);
        if (execute && !(read_iamr() & (IAMR_EX_BIT << pkey_shift)))
                return true;
index 4a75f2d..bce0e53 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/memblock.h>
 #include <linux/libfdt.h>
 #include <linux/crash_core.h>
+#include <asm/cacheflush.h>
 #include <asm/pgalloc.h>
 #include <asm/prom.h>
 #include <asm/kdump.h>
index 6f347fa..9db7ada 100644 (file)
@@ -33,7 +33,8 @@ static unsigned int user_getsp32(unsigned int sp, int is_first)
         * which means that we've done all that we can do from
         * interrupt context.
         */
-       if (probe_user_read(stack_frame, (void __user *)p, sizeof(stack_frame)))
+       if (copy_from_user_nofault(stack_frame, (void __user *)p,
+                       sizeof(stack_frame)))
                return 0;
 
        if (!is_first)
@@ -51,7 +52,8 @@ static unsigned long user_getsp64(unsigned long sp, int is_first)
 {
        unsigned long stack_frame[3];
 
-       if (probe_user_read(stack_frame, (void __user *)sp, sizeof(stack_frame)))
+       if (copy_from_user_nofault(stack_frame, (void __user *)sp,
+                       sizeof(stack_frame)))
                return 0;
 
        if (!is_first)
index f7d888d..542e68b 100644 (file)
@@ -44,7 +44,7 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
            ((unsigned long)ptr & 3))
                return -EFAULT;
 
-       rc = probe_user_read(ret, ptr, sizeof(*ret));
+       rc = copy_from_user_nofault(ret, ptr, sizeof(*ret));
 
        if (IS_ENABLED(CONFIG_PPC64) && rc)
                return read_user_stack_slow(ptr, ret, 4);
index 814d1c2..fa2a1b8 100644 (file)
@@ -50,7 +50,7 @@ static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
            ((unsigned long)ptr & 7))
                return -EFAULT;
 
-       if (!probe_user_read(ret, ptr, sizeof(*ret)))
+       if (!copy_from_user_nofault(ret, ptr, sizeof(*ret)))
                return 0;
 
        return read_user_stack_slow(ptr, ret, 8);
index 13b9dd5..cd6a742 100644 (file)
@@ -418,14 +418,16 @@ static __u64 power_pmu_bhrb_to(u64 addr)
        __u64 target;
 
        if (is_kernel_addr(addr)) {
-               if (probe_kernel_read(&instr, (void *)addr, sizeof(instr)))
+               if (copy_from_kernel_nofault(&instr, (void *)addr,
+                               sizeof(instr)))
                        return 0;
 
                return branch_target((struct ppc_inst *)&instr);
        }
 
        /* Userspace: need copy instruction here then translate it */
-       if (probe_user_read(&instr, (unsigned int __user *)addr, sizeof(instr)))
+       if (copy_from_user_nofault(&instr, (unsigned int __user *)addr,
+                       sizeof(instr)))
                return 0;
 
        target = branch_target((struct ppc_inst *)&instr);
index cbee366..abdef9b 100644 (file)
@@ -35,7 +35,7 @@
  */
 
 static void *spu_syscall_table[] = {
-#define __SYSCALL(nr, entry)   entry,
+#define __SYSCALL(nr, entry) [nr] = entry,
 #include <asm/syscall_table_spu.h>
 #undef __SYSCALL
 };
index f355924..9c56907 100644 (file)
 #include <linux/libnvdimm.h>
 #include <linux/platform_device.h>
 #include <linux/delay.h>
+#include <linux/seq_buf.h>
 
 #include <asm/plpar_wrappers.h>
+#include <asm/papr_pdsm.h>
 
 #define BIND_ANY_ADDR (~0ul)
 
 #define PAPR_SCM_DIMM_CMD_MASK \
        ((1ul << ND_CMD_GET_CONFIG_SIZE) | \
         (1ul << ND_CMD_GET_CONFIG_DATA) | \
-        (1ul << ND_CMD_SET_CONFIG_DATA))
-
+        (1ul << ND_CMD_SET_CONFIG_DATA) | \
+        (1ul << ND_CMD_CALL))
+
+/* DIMM health bitmap bitmap indicators */
+/* SCM device is unable to persist memory contents */
+#define PAPR_PMEM_UNARMED                   (1ULL << (63 - 0))
+/* SCM device failed to persist memory contents */
+#define PAPR_PMEM_SHUTDOWN_DIRTY            (1ULL << (63 - 1))
+/* SCM device contents are persisted from previous IPL */
+#define PAPR_PMEM_SHUTDOWN_CLEAN            (1ULL << (63 - 2))
+/* SCM device contents are not persisted from previous IPL */
+#define PAPR_PMEM_EMPTY                     (1ULL << (63 - 3))
+/* SCM device memory life remaining is critically low */
+#define PAPR_PMEM_HEALTH_CRITICAL           (1ULL << (63 - 4))
+/* SCM device will be garded off next IPL due to failure */
+#define PAPR_PMEM_HEALTH_FATAL              (1ULL << (63 - 5))
+/* SCM contents cannot persist due to current platform health status */
+#define PAPR_PMEM_HEALTH_UNHEALTHY          (1ULL << (63 - 6))
+/* SCM device is unable to persist memory contents in certain conditions */
+#define PAPR_PMEM_HEALTH_NON_CRITICAL       (1ULL << (63 - 7))
+/* SCM device is encrypted */
+#define PAPR_PMEM_ENCRYPTED                 (1ULL << (63 - 8))
+/* SCM device has been scrubbed and locked */
+#define PAPR_PMEM_SCRUBBED_AND_LOCKED       (1ULL << (63 - 9))
+
+/* Bits status indicators for health bitmap indicating unarmed dimm */
+#define PAPR_PMEM_UNARMED_MASK (PAPR_PMEM_UNARMED |            \
+                               PAPR_PMEM_HEALTH_UNHEALTHY)
+
+/* Bits status indicators for health bitmap indicating unflushed dimm */
+#define PAPR_PMEM_BAD_SHUTDOWN_MASK (PAPR_PMEM_SHUTDOWN_DIRTY)
+
+/* Bits status indicators for health bitmap indicating unrestored dimm */
+#define PAPR_PMEM_BAD_RESTORE_MASK  (PAPR_PMEM_EMPTY)
+
+/* Bit status indicators for smart event notification */
+#define PAPR_PMEM_SMART_EVENT_MASK (PAPR_PMEM_HEALTH_CRITICAL | \
+                                   PAPR_PMEM_HEALTH_FATAL |    \
+                                   PAPR_PMEM_HEALTH_UNHEALTHY)
+
+/* private struct associated with each region */
 struct papr_scm_priv {
        struct platform_device *pdev;
        struct device_node *dn;
@@ -39,6 +80,15 @@ struct papr_scm_priv {
        struct resource res;
        struct nd_region *region;
        struct nd_interleave_set nd_set;
+
+       /* Protect dimm health data from concurrent read/writes */
+       struct mutex health_mutex;
+
+       /* Last time the health information of the dimm was updated */
+       unsigned long lasthealth_jiffies;
+
+       /* Health information for the dimm */
+       u64 health_bitmap;
 };
 
 static int drc_pmem_bind(struct papr_scm_priv *p)
@@ -144,6 +194,61 @@ err_out:
        return drc_pmem_bind(p);
 }
 
+/*
+ * Issue hcall to retrieve dimm health info and populate papr_scm_priv with the
+ * health information.
+ */
+static int __drc_pmem_query_health(struct papr_scm_priv *p)
+{
+       unsigned long ret[PLPAR_HCALL_BUFSIZE];
+       long rc;
+
+       /* issue the hcall */
+       rc = plpar_hcall(H_SCM_HEALTH, ret, p->drc_index);
+       if (rc != H_SUCCESS) {
+               dev_err(&p->pdev->dev,
+                       "Failed to query health information, Err:%ld\n", rc);
+               return -ENXIO;
+       }
+
+       p->lasthealth_jiffies = jiffies;
+       p->health_bitmap = ret[0] & ret[1];
+
+       dev_dbg(&p->pdev->dev,
+               "Queried dimm health info. Bitmap:0x%016lx Mask:0x%016lx\n",
+               ret[0], ret[1]);
+
+       return 0;
+}
+
+/* Min interval in seconds for assuming stable dimm health */
+#define MIN_HEALTH_QUERY_INTERVAL 60
+
+/* Query cached health info and if needed call drc_pmem_query_health */
+static int drc_pmem_query_health(struct papr_scm_priv *p)
+{
+       unsigned long cache_timeout;
+       int rc;
+
+       /* Protect concurrent modifications to papr_scm_priv */
+       rc = mutex_lock_interruptible(&p->health_mutex);
+       if (rc)
+               return rc;
+
+       /* Jiffies offset for which the health data is assumed to be same */
+       cache_timeout = p->lasthealth_jiffies +
+               msecs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL * 1000);
+
+       /* Fetch new health info is its older than MIN_HEALTH_QUERY_INTERVAL */
+       if (time_after(jiffies, cache_timeout))
+               rc = __drc_pmem_query_health(p);
+       else
+               /* Assume cached health data is valid */
+               rc = 0;
+
+       mutex_unlock(&p->health_mutex);
+       return rc;
+}
 
 static int papr_scm_meta_get(struct papr_scm_priv *p,
                             struct nd_cmd_get_config_data_hdr *hdr)
@@ -246,16 +351,250 @@ static int papr_scm_meta_set(struct papr_scm_priv *p,
        return 0;
 }
 
+/*
+ * Do a sanity checks on the inputs args to dimm-control function and return
+ * '0' if valid. Validation of PDSM payloads happens later in
+ * papr_scm_service_pdsm.
+ */
+static int is_cmd_valid(struct nvdimm *nvdimm, unsigned int cmd, void *buf,
+                       unsigned int buf_len)
+{
+       unsigned long cmd_mask = PAPR_SCM_DIMM_CMD_MASK;
+       struct nd_cmd_pkg *nd_cmd;
+       struct papr_scm_priv *p;
+       enum papr_pdsm pdsm;
+
+       /* Only dimm-specific calls are supported atm */
+       if (!nvdimm)
+               return -EINVAL;
+
+       /* get the provider data from struct nvdimm */
+       p = nvdimm_provider_data(nvdimm);
+
+       if (!test_bit(cmd, &cmd_mask)) {
+               dev_dbg(&p->pdev->dev, "Unsupported cmd=%u\n", cmd);
+               return -EINVAL;
+       }
+
+       /* For CMD_CALL verify pdsm request */
+       if (cmd == ND_CMD_CALL) {
+               /* Verify the envelope and envelop size */
+               if (!buf ||
+                   buf_len < (sizeof(struct nd_cmd_pkg) + ND_PDSM_HDR_SIZE)) {
+                       dev_dbg(&p->pdev->dev, "Invalid pkg size=%u\n",
+                               buf_len);
+                       return -EINVAL;
+               }
+
+               /* Verify that the nd_cmd_pkg.nd_family is correct */
+               nd_cmd = (struct nd_cmd_pkg *)buf;
+
+               if (nd_cmd->nd_family != NVDIMM_FAMILY_PAPR) {
+                       dev_dbg(&p->pdev->dev, "Invalid pkg family=0x%llx\n",
+                               nd_cmd->nd_family);
+                       return -EINVAL;
+               }
+
+               pdsm = (enum papr_pdsm)nd_cmd->nd_command;
+
+               /* Verify if the pdsm command is valid */
+               if (pdsm <= PAPR_PDSM_MIN || pdsm >= PAPR_PDSM_MAX) {
+                       dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Invalid PDSM\n",
+                               pdsm);
+                       return -EINVAL;
+               }
+
+               /* Have enough space to hold returned 'nd_pkg_pdsm' header */
+               if (nd_cmd->nd_size_out < ND_PDSM_HDR_SIZE) {
+                       dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Invalid payload\n",
+                               pdsm);
+                       return -EINVAL;
+               }
+       }
+
+       /* Let the command be further processed */
+       return 0;
+}
+
+/* Fetch the DIMM health info and populate it in provided package. */
+static int papr_pdsm_health(struct papr_scm_priv *p,
+                           union nd_pdsm_payload *payload)
+{
+       int rc;
+
+       /* Ensure dimm health mutex is taken preventing concurrent access */
+       rc = mutex_lock_interruptible(&p->health_mutex);
+       if (rc)
+               goto out;
+
+       /* Always fetch upto date dimm health data ignoring cached values */
+       rc = __drc_pmem_query_health(p);
+       if (rc) {
+               mutex_unlock(&p->health_mutex);
+               goto out;
+       }
+
+       /* update health struct with various flags derived from health bitmap */
+       payload->health = (struct nd_papr_pdsm_health) {
+               .extension_flags = 0,
+               .dimm_unarmed = !!(p->health_bitmap & PAPR_PMEM_UNARMED_MASK),
+               .dimm_bad_shutdown = !!(p->health_bitmap & PAPR_PMEM_BAD_SHUTDOWN_MASK),
+               .dimm_bad_restore = !!(p->health_bitmap & PAPR_PMEM_BAD_RESTORE_MASK),
+               .dimm_scrubbed = !!(p->health_bitmap & PAPR_PMEM_SCRUBBED_AND_LOCKED),
+               .dimm_locked = !!(p->health_bitmap & PAPR_PMEM_SCRUBBED_AND_LOCKED),
+               .dimm_encrypted = !!(p->health_bitmap & PAPR_PMEM_ENCRYPTED),
+               .dimm_health = PAPR_PDSM_DIMM_HEALTHY,
+       };
+
+       /* Update field dimm_health based on health_bitmap flags */
+       if (p->health_bitmap & PAPR_PMEM_HEALTH_FATAL)
+               payload->health.dimm_health = PAPR_PDSM_DIMM_FATAL;
+       else if (p->health_bitmap & PAPR_PMEM_HEALTH_CRITICAL)
+               payload->health.dimm_health = PAPR_PDSM_DIMM_CRITICAL;
+       else if (p->health_bitmap & PAPR_PMEM_HEALTH_UNHEALTHY)
+               payload->health.dimm_health = PAPR_PDSM_DIMM_UNHEALTHY;
+
+       /* struct populated hence can release the mutex now */
+       mutex_unlock(&p->health_mutex);
+       rc = sizeof(struct nd_papr_pdsm_health);
+
+out:
+       return rc;
+}
+
+/*
+ * 'struct pdsm_cmd_desc'
+ * Identifies supported PDSMs' expected length of in/out payloads
+ * and pdsm service function.
+ *
+ * size_in     : Size of input payload if any in the PDSM request.
+ * size_out    : Size of output payload if any in the PDSM request.
+ * service     : Service function for the PDSM request. Return semantics:
+ *               rc < 0 : Error servicing PDSM and rc indicates the error.
+ *               rc >=0 : Serviced successfully and 'rc' indicate number of
+ *                     bytes written to payload.
+ */
+struct pdsm_cmd_desc {
+       u32 size_in;
+       u32 size_out;
+       int (*service)(struct papr_scm_priv *dimm,
+                      union nd_pdsm_payload *payload);
+};
+
+/* Holds all supported PDSMs' command descriptors */
+static const struct pdsm_cmd_desc __pdsm_cmd_descriptors[] = {
+       [PAPR_PDSM_MIN] = {
+               .size_in = 0,
+               .size_out = 0,
+               .service = NULL,
+       },
+       /* New PDSM command descriptors to be added below */
+
+       [PAPR_PDSM_HEALTH] = {
+               .size_in = 0,
+               .size_out = sizeof(struct nd_papr_pdsm_health),
+               .service = papr_pdsm_health,
+       },
+       /* Empty */
+       [PAPR_PDSM_MAX] = {
+               .size_in = 0,
+               .size_out = 0,
+               .service = NULL,
+       },
+};
+
+/* Given a valid pdsm cmd return its command descriptor else return NULL */
+static inline const struct pdsm_cmd_desc *pdsm_cmd_desc(enum papr_pdsm cmd)
+{
+       if (cmd >= 0 || cmd < ARRAY_SIZE(__pdsm_cmd_descriptors))
+               return &__pdsm_cmd_descriptors[cmd];
+
+       return NULL;
+}
+
+/*
+ * For a given pdsm request call an appropriate service function.
+ * Returns errors if any while handling the pdsm command package.
+ */
+static int papr_scm_service_pdsm(struct papr_scm_priv *p,
+                                struct nd_cmd_pkg *pkg)
+{
+       /* Get the PDSM header and PDSM command */
+       struct nd_pkg_pdsm *pdsm_pkg = (struct nd_pkg_pdsm *)pkg->nd_payload;
+       enum papr_pdsm pdsm = (enum papr_pdsm)pkg->nd_command;
+       const struct pdsm_cmd_desc *pdsc;
+       int rc;
+
+       /* Fetch corresponding pdsm descriptor for validation and servicing */
+       pdsc = pdsm_cmd_desc(pdsm);
+
+       /* Validate pdsm descriptor */
+       /* Ensure that reserved fields are 0 */
+       if (pdsm_pkg->reserved[0] || pdsm_pkg->reserved[1]) {
+               dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Invalid reserved field\n",
+                       pdsm);
+               return -EINVAL;
+       }
+
+       /* If pdsm expects some input, then ensure that the size_in matches */
+       if (pdsc->size_in &&
+           pkg->nd_size_in != (pdsc->size_in + ND_PDSM_HDR_SIZE)) {
+               dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Mismatched size_in=%d\n",
+                       pdsm, pkg->nd_size_in);
+               return -EINVAL;
+       }
+
+       /* If pdsm wants to return data, then ensure that  size_out matches */
+       if (pdsc->size_out &&
+           pkg->nd_size_out != (pdsc->size_out + ND_PDSM_HDR_SIZE)) {
+               dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Mismatched size_out=%d\n",
+                       pdsm, pkg->nd_size_out);
+               return -EINVAL;
+       }
+
+       /* Service the pdsm */
+       if (pdsc->service) {
+               dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Servicing..\n", pdsm);
+
+               rc = pdsc->service(p, &pdsm_pkg->payload);
+
+               if (rc < 0) {
+                       /* error encountered while servicing pdsm */
+                       pdsm_pkg->cmd_status = rc;
+                       pkg->nd_fw_size = ND_PDSM_HDR_SIZE;
+               } else {
+                       /* pdsm serviced and 'rc' bytes written to payload */
+                       pdsm_pkg->cmd_status = 0;
+                       pkg->nd_fw_size = ND_PDSM_HDR_SIZE + rc;
+               }
+       } else {
+               dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Unsupported PDSM request\n",
+                       pdsm);
+               pdsm_pkg->cmd_status = -ENOENT;
+               pkg->nd_fw_size = ND_PDSM_HDR_SIZE;
+       }
+
+       return pdsm_pkg->cmd_status;
+}
+
 static int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc,
                          struct nvdimm *nvdimm, unsigned int cmd, void *buf,
                          unsigned int buf_len, int *cmd_rc)
 {
        struct nd_cmd_get_config_size *get_size_hdr;
+       struct nd_cmd_pkg *call_pkg = NULL;
        struct papr_scm_priv *p;
+       int rc;
 
-       /* Only dimm-specific calls are supported atm */
-       if (!nvdimm)
-               return -EINVAL;
+       rc = is_cmd_valid(nvdimm, cmd, buf, buf_len);
+       if (rc) {
+               pr_debug("Invalid cmd=0x%x. Err=%d\n", cmd, rc);
+               return rc;
+       }
+
+       /* Use a local variable in case cmd_rc pointer is NULL */
+       if (!cmd_rc)
+               cmd_rc = &rc;
 
        p = nvdimm_provider_data(nvdimm);
 
@@ -277,7 +616,13 @@ static int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc,
                *cmd_rc = papr_scm_meta_set(p, buf);
                break;
 
+       case ND_CMD_CALL:
+               call_pkg = (struct nd_cmd_pkg *)buf;
+               *cmd_rc = papr_scm_service_pdsm(p, call_pkg);
+               break;
+
        default:
+               dev_dbg(&p->pdev->dev, "Unknown command = %d\n", cmd);
                return -EINVAL;
        }
 
@@ -286,6 +631,64 @@ static int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc,
        return 0;
 }
 
+static ssize_t flags_show(struct device *dev,
+                         struct device_attribute *attr, char *buf)
+{
+       struct nvdimm *dimm = to_nvdimm(dev);
+       struct papr_scm_priv *p = nvdimm_provider_data(dimm);
+       struct seq_buf s;
+       u64 health;
+       int rc;
+
+       rc = drc_pmem_query_health(p);
+       if (rc)
+               return rc;
+
+       /* Copy health_bitmap locally, check masks & update out buffer */
+       health = READ_ONCE(p->health_bitmap);
+
+       seq_buf_init(&s, buf, PAGE_SIZE);
+       if (health & PAPR_PMEM_UNARMED_MASK)
+               seq_buf_printf(&s, "not_armed ");
+
+       if (health & PAPR_PMEM_BAD_SHUTDOWN_MASK)
+               seq_buf_printf(&s, "flush_fail ");
+
+       if (health & PAPR_PMEM_BAD_RESTORE_MASK)
+               seq_buf_printf(&s, "restore_fail ");
+
+       if (health & PAPR_PMEM_ENCRYPTED)
+               seq_buf_printf(&s, "encrypted ");
+
+       if (health & PAPR_PMEM_SMART_EVENT_MASK)
+               seq_buf_printf(&s, "smart_notify ");
+
+       if (health & PAPR_PMEM_SCRUBBED_AND_LOCKED)
+               seq_buf_printf(&s, "scrubbed locked ");
+
+       if (seq_buf_used(&s))
+               seq_buf_printf(&s, "\n");
+
+       return seq_buf_used(&s);
+}
+DEVICE_ATTR_RO(flags);
+
+/* papr_scm specific dimm attributes */
+static struct attribute *papr_nd_attributes[] = {
+       &dev_attr_flags.attr,
+       NULL,
+};
+
+static struct attribute_group papr_nd_attribute_group = {
+       .name = "papr",
+       .attrs = papr_nd_attributes,
+};
+
+static const struct attribute_group *papr_nd_attr_groups[] = {
+       &papr_nd_attribute_group,
+       NULL,
+};
+
 static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
 {
        struct device *dev = &p->pdev->dev;
@@ -312,8 +715,8 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
        dimm_flags = 0;
        set_bit(NDD_LABELING, &dimm_flags);
 
-       p->nvdimm = nvdimm_create(p->bus, p, NULL, dimm_flags,
-                                 PAPR_SCM_DIMM_CMD_MASK, 0, NULL);
+       p->nvdimm = nvdimm_create(p->bus, p, papr_nd_attr_groups,
+                                 dimm_flags, PAPR_SCM_DIMM_CMD_MASK, 0, NULL);
        if (!p->nvdimm) {
                dev_err(dev, "Error creating DIMM object for %pOF\n", p->dn);
                goto err;
@@ -399,6 +802,9 @@ static int papr_scm_probe(struct platform_device *pdev)
        if (!p)
                return -ENOMEM;
 
+       /* Initialize the dimm mutex */
+       mutex_init(&p->health_mutex);
+
        /* optional DT properties */
        of_property_read_u32(dn, "ibm,metadata-size", &metadata_size);
 
index 4a8874b..040b9d0 100644 (file)
@@ -1066,10 +1066,10 @@ int fsl_pci_mcheck_exception(struct pt_regs *regs)
 
        if (is_in_pci_mem_space(addr)) {
                if (user_mode(regs))
-                       ret = probe_user_read(&inst, (void __user *)regs->nip,
-                                             sizeof(inst));
+                       ret = copy_from_user_nofault(&inst,
+                                       (void __user *)regs->nip, sizeof(inst));
                else
-                       ret = probe_kernel_address((void *)regs->nip, inst);
+                       ret = get_kernel_nofault(inst, (void *)regs->nip);
 
                if (!ret && mcheck_handle_load(regs, inst)) {
                        regs->nip += 4;
index d969bab..262e5bb 100644 (file)
                        "       bnez %1, 0b\n"                          \
                        "1:\n"                                          \
                        : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)    \
-                       : "rJ" (__old), "rJ" (__new)                    \
+                       : "rJ" ((long)__old), "rJ" (__new)              \
                        : "memory");                                    \
                break;                                                  \
        case 8:                                                         \
                        RISCV_ACQUIRE_BARRIER                           \
                        "1:\n"                                          \
                        : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)    \
-                       : "rJ" (__old), "rJ" (__new)                    \
+                       : "rJ" ((long)__old), "rJ" (__new)              \
                        : "memory");                                    \
                break;                                                  \
        case 8:                                                         \
                        "       bnez %1, 0b\n"                          \
                        "1:\n"                                          \
                        : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)    \
-                       : "rJ" (__old), "rJ" (__new)                    \
+                       : "rJ" ((long)__old), "rJ" (__new)              \
                        : "memory");                                    \
                break;                                                  \
        case 8:                                                         \
                        "       fence rw, rw\n"                         \
                        "1:\n"                                          \
                        : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr)    \
-                       : "rJ" (__old), "rJ" (__new)                    \
+                       : "rJ" ((long)__old), "rJ" (__new)              \
                        : "memory");                                    \
                break;                                                  \
        case 8:                                                         \
index 0839661..2ff63d0 100644 (file)
@@ -38,7 +38,8 @@ static int ftrace_check_current_call(unsigned long hook_pos,
         * Read the text we want to modify;
         * return must be -EFAULT on read error
         */
-       if (probe_kernel_read(replaced, (void *)hook_pos, MCOUNT_INSN_SIZE))
+       if (copy_from_kernel_nofault(replaced, (void *)hook_pos,
+                       MCOUNT_INSN_SIZE))
                return -EFAULT;
 
        /*
index f16ade8..c3275f4 100644 (file)
@@ -62,7 +62,7 @@ int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
        unsigned int rs1_num, rs2_num;
        int op_code;
 
-       if (probe_kernel_address((void *)pc, op_code))
+       if (get_kernel_nofault(op_code, (void *)pc))
                return -EINVAL;
        if ((op_code & __INSN_LENGTH_MASK) != __INSN_LENGTH_GE_32) {
                if (is_c_jalr_insn(op_code) || is_c_jr_insn(op_code)) {
@@ -146,14 +146,14 @@ int do_single_step(struct pt_regs *regs)
                return error;
 
        /* Store the op code in the stepped address */
-       error = probe_kernel_address((void *)addr, stepped_opcode);
+       error = get_kernel_nofault(stepped_opcode, (void *)addr);
        if (error)
                return error;
 
        stepped_address = addr;
 
        /* Replace the op code with the break instruction */
-       error = probe_kernel_write((void *)stepped_address,
+       error = copy_to_kernel_nofault((void *)stepped_address,
                                   arch_kgdb_ops.gdb_bpt_instr,
                                   BREAK_INSTR_SIZE);
        /* Flush and return */
@@ -173,7 +173,7 @@ int do_single_step(struct pt_regs *regs)
 static void undo_single_step(struct pt_regs *regs)
 {
        if (stepped_opcode != 0) {
-               probe_kernel_write((void *)stepped_address,
+               copy_to_kernel_nofault((void *)stepped_address,
                                   (void *)&stepped_opcode, BREAK_INSTR_SIZE);
                flush_icache_range(stepped_address,
                                   stepped_address + BREAK_INSTR_SIZE);
index d4a64df..3fe7a52 100644 (file)
@@ -63,7 +63,7 @@ static int patch_insn_write(void *addr, const void *insn, size_t len)
 
        waddr = patch_map(addr, FIX_TEXT_POKE0);
 
-       ret = probe_kernel_write(waddr, insn, len);
+       ret = copy_to_kernel_nofault(waddr, insn, len);
 
        patch_unmap(FIX_TEXT_POKE0);
 
@@ -76,7 +76,7 @@ NOKPROBE_SYMBOL(patch_insn_write);
 #else
 static int patch_insn_write(void *addr, const void *insn, size_t len)
 {
-       return probe_kernel_write(addr, insn, len);
+       return copy_to_kernel_nofault(addr, insn, len);
 }
 NOKPROBE_SYMBOL(patch_insn_write);
 #endif /* CONFIG_MMU */
index f3619f5..12f8a7f 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/syscalls.h>
 #include <asm/unistd.h>
 #include <asm/cacheflush.h>
+#include <asm-generic/mman-common.h>
 
 static long riscv_sys_mmap(unsigned long addr, unsigned long len,
                           unsigned long prot, unsigned long flags,
@@ -16,6 +17,11 @@ static long riscv_sys_mmap(unsigned long addr, unsigned long len,
 {
        if (unlikely(offset & (~PAGE_MASK >> page_shift_offset)))
                return -EINVAL;
+
+       if ((prot & PROT_WRITE) && (prot & PROT_EXEC))
+               if (unlikely(!(prot & PROT_READ)))
+                       return -EINVAL;
+
        return ksys_mmap_pgoff(addr, len, prot, flags, fd,
                               offset >> (PAGE_SHIFT - page_shift_offset));
 }
index ecec177..7d95cce 100644 (file)
@@ -137,7 +137,7 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
 {
        bug_insn_t insn;
 
-       if (probe_kernel_address((bug_insn_t *)pc, insn))
+       if (get_kernel_nofault(insn, (bug_insn_t *)pc))
                return 0;
 
        return GET_INSN_LENGTH(insn);
@@ -165,7 +165,7 @@ int is_valid_bugaddr(unsigned long pc)
 
        if (pc < VMALLOC_START)
                return 0;
-       if (probe_kernel_address((bug_insn_t *)pc, insn))
+       if (get_kernel_nofault(insn, (bug_insn_t *)pc))
                return 0;
        if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
                return (insn == __BUG_INSN_32);
index 38ba55b..e4c7c2c 100644 (file)
@@ -17,7 +17,7 @@ vdso-syms += flush_icache
 obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o
 
 ifneq ($(c-gettimeofday-y),)
-  CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
+  CFLAGS_vgettimeofday.o += -fPIC -include $(c-gettimeofday-y)
 endif
 
 # Build rules
@@ -27,6 +27,9 @@ obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
 obj-y += vdso.o vdso-syms.o
 CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
 
+# Disable -pg to prevent insert call site
+CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os
+
 # Disable gcov profiling for VDSO code
 GCOV_PROFILE := n
 
index d264943..cc0d806 100644 (file)
@@ -9,16 +9,22 @@
 #include <linux/time.h>
 #include <linux/types.h>
 
+extern
+int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts);
 int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
 {
        return __cvdso_clock_gettime(clock, ts);
 }
 
+extern
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
 int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
 {
        return __cvdso_gettimeofday(tv, tz);
 }
 
+extern
+int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res);
 int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res)
 {
        return __cvdso_clock_getres(clock_id, res);
index ec2c70f..289a9a5 100644 (file)
@@ -151,6 +151,7 @@ int set_memory_nx(unsigned long addr, int numpages)
 
 int set_direct_map_invalid_noflush(struct page *page)
 {
+       int ret;
        unsigned long start = (unsigned long)page_address(page);
        unsigned long end = start + PAGE_SIZE;
        struct pageattr_masks masks = {
@@ -158,11 +159,16 @@ int set_direct_map_invalid_noflush(struct page *page)
                .clear_mask = __pgprot(_PAGE_PRESENT)
        };
 
-       return walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
+       mmap_read_lock(&init_mm);
+       ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
+       mmap_read_unlock(&init_mm);
+
+       return ret;
 }
 
 int set_direct_map_default_noflush(struct page *page)
 {
+       int ret;
        unsigned long start = (unsigned long)page_address(page);
        unsigned long end = start + PAGE_SIZE;
        struct pageattr_masks masks = {
@@ -170,7 +176,11 @@ int set_direct_map_default_noflush(struct page *page)
                .clear_mask = __pgprot(0)
        };
 
-       return walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
+       mmap_read_lock(&init_mm);
+       ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
+       mmap_read_unlock(&init_mm);
+
+       return ret;
 }
 
 void __kernel_map_pages(struct page *page, int numpages, int enable)
index 20e235d..75c1e99 100644 (file)
 #include <linux/filter.h>
 #include <asm/cacheflush.h>
 
+static inline bool rvc_enabled(void)
+{
+       return IS_ENABLED(CONFIG_RISCV_ISA_C);
+}
+
 enum {
        RV_REG_ZERO =   0,      /* The constant value 0 */
        RV_REG_RA =     1,      /* Return address */
@@ -48,9 +53,21 @@ enum {
        RV_REG_T6 =     31,
 };
 
+static inline bool is_creg(u8 reg)
+{
+       return (1 << reg) & (BIT(RV_REG_FP) |
+                            BIT(RV_REG_S1) |
+                            BIT(RV_REG_A0) |
+                            BIT(RV_REG_A1) |
+                            BIT(RV_REG_A2) |
+                            BIT(RV_REG_A3) |
+                            BIT(RV_REG_A4) |
+                            BIT(RV_REG_A5));
+}
+
 struct rv_jit_context {
        struct bpf_prog *prog;
-       u32 *insns;             /* RV insns */
+       u16 *insns;             /* RV insns */
        int ninsns;
        int epilogue_offset;
        int *offset;            /* BPF to RV */
@@ -58,6 +75,12 @@ struct rv_jit_context {
        int stack_size;
 };
 
+/* Convert from ninsns to bytes. */
+static inline int ninsns_rvoff(int ninsns)
+{
+       return ninsns << 1;
+}
+
 struct rv_jit_data {
        struct bpf_binary_header *header;
        u8 *image;
@@ -74,8 +97,22 @@ static inline void bpf_flush_icache(void *start, void *end)
        flush_icache_range((unsigned long)start, (unsigned long)end);
 }
 
+/* Emit a 4-byte riscv instruction. */
 static inline void emit(const u32 insn, struct rv_jit_context *ctx)
 {
+       if (ctx->insns) {
+               ctx->insns[ctx->ninsns] = insn;
+               ctx->insns[ctx->ninsns + 1] = (insn >> 16);
+       }
+
+       ctx->ninsns += 2;
+}
+
+/* Emit a 2-byte riscv compressed instruction. */
+static inline void emitc(const u16 insn, struct rv_jit_context *ctx)
+{
+       BUILD_BUG_ON(!rvc_enabled());
+
        if (ctx->insns)
                ctx->insns[ctx->ninsns] = insn;
 
@@ -86,7 +123,7 @@ static inline int epilogue_offset(struct rv_jit_context *ctx)
 {
        int to = ctx->epilogue_offset, from = ctx->ninsns;
 
-       return (to - from) << 2;
+       return ninsns_rvoff(to - from);
 }
 
 /* Return -1 or inverted cond. */
@@ -117,6 +154,36 @@ static inline int invert_bpf_cond(u8 cond)
        return -1;
 }
 
+static inline bool is_6b_int(long val)
+{
+       return -(1L << 5) <= val && val < (1L << 5);
+}
+
+static inline bool is_7b_uint(unsigned long val)
+{
+       return val < (1UL << 7);
+}
+
+static inline bool is_8b_uint(unsigned long val)
+{
+       return val < (1UL << 8);
+}
+
+static inline bool is_9b_uint(unsigned long val)
+{
+       return val < (1UL << 9);
+}
+
+static inline bool is_10b_int(long val)
+{
+       return -(1L << 9) <= val && val < (1L << 9);
+}
+
+static inline bool is_10b_uint(unsigned long val)
+{
+       return val < (1UL << 10);
+}
+
 static inline bool is_12b_int(long val)
 {
        return -(1L << 11) <= val && val < (1L << 11);
@@ -149,7 +216,7 @@ static inline int rv_offset(int insn, int off, struct rv_jit_context *ctx)
        off++; /* BPF branch is from PC+1, RV is from PC */
        from = (insn > 0) ? ctx->offset[insn - 1] : 0;
        to = (insn + off > 0) ? ctx->offset[insn + off - 1] : 0;
-       return (to - from) << 2;
+       return ninsns_rvoff(to - from);
 }
 
 /* Instruction formats. */
@@ -207,6 +274,59 @@ static inline u32 rv_amo_insn(u8 funct5, u8 aq, u8 rl, u8 rs2, u8 rs1,
        return rv_r_insn(funct7, rs2, rs1, funct3, rd, opcode);
 }
 
+/* RISC-V compressed instruction formats. */
+
+static inline u16 rv_cr_insn(u8 funct4, u8 rd, u8 rs2, u8 op)
+{
+       return (funct4 << 12) | (rd << 7) | (rs2 << 2) | op;
+}
+
+static inline u16 rv_ci_insn(u8 funct3, u32 imm6, u8 rd, u8 op)
+{
+       u32 imm;
+
+       imm = ((imm6 & 0x20) << 7) | ((imm6 & 0x1f) << 2);
+       return (funct3 << 13) | (rd << 7) | op | imm;
+}
+
+static inline u16 rv_css_insn(u8 funct3, u32 uimm, u8 rs2, u8 op)
+{
+       return (funct3 << 13) | (uimm << 7) | (rs2 << 2) | op;
+}
+
+static inline u16 rv_ciw_insn(u8 funct3, u32 uimm, u8 rd, u8 op)
+{
+       return (funct3 << 13) | (uimm << 5) | ((rd & 0x7) << 2) | op;
+}
+
+static inline u16 rv_cl_insn(u8 funct3, u32 imm_hi, u8 rs1, u32 imm_lo, u8 rd,
+                            u8 op)
+{
+       return (funct3 << 13) | (imm_hi << 10) | ((rs1 & 0x7) << 7) |
+               (imm_lo << 5) | ((rd & 0x7) << 2) | op;
+}
+
+static inline u16 rv_cs_insn(u8 funct3, u32 imm_hi, u8 rs1, u32 imm_lo, u8 rs2,
+                            u8 op)
+{
+       return (funct3 << 13) | (imm_hi << 10) | ((rs1 & 0x7) << 7) |
+               (imm_lo << 5) | ((rs2 & 0x7) << 2) | op;
+}
+
+static inline u16 rv_ca_insn(u8 funct6, u8 rd, u8 funct2, u8 rs2, u8 op)
+{
+       return (funct6 << 10) | ((rd & 0x7) << 7) | (funct2 << 5) |
+               ((rs2 & 0x7) << 2) | op;
+}
+
+static inline u16 rv_cb_insn(u8 funct3, u32 imm6, u8 funct2, u8 rd, u8 op)
+{
+       u32 imm;
+
+       imm = ((imm6 & 0x20) << 7) | ((imm6 & 0x1f) << 2);
+       return (funct3 << 13) | (funct2 << 10) | ((rd & 0x7) << 7) | op | imm;
+}
+
 /* Instructions shared by both RV32 and RV64. */
 
 static inline u32 rv_addi(u8 rd, u8 rs1, u16 imm11_0)
@@ -414,6 +534,135 @@ static inline u32 rv_amoadd_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
        return rv_amo_insn(0, aq, rl, rs2, rs1, 2, rd, 0x2f);
 }
 
+/* RVC instrutions. */
+
+static inline u16 rvc_addi4spn(u8 rd, u32 imm10)
+{
+       u32 imm;
+
+       imm = ((imm10 & 0x30) << 2) | ((imm10 & 0x3c0) >> 4) |
+               ((imm10 & 0x4) >> 1) | ((imm10 & 0x8) >> 3);
+       return rv_ciw_insn(0x0, imm, rd, 0x0);
+}
+
+static inline u16 rvc_lw(u8 rd, u32 imm7, u8 rs1)
+{
+       u32 imm_hi, imm_lo;
+
+       imm_hi = (imm7 & 0x38) >> 3;
+       imm_lo = ((imm7 & 0x4) >> 1) | ((imm7 & 0x40) >> 6);
+       return rv_cl_insn(0x2, imm_hi, rs1, imm_lo, rd, 0x0);
+}
+
+static inline u16 rvc_sw(u8 rs1, u32 imm7, u8 rs2)
+{
+       u32 imm_hi, imm_lo;
+
+       imm_hi = (imm7 & 0x38) >> 3;
+       imm_lo = ((imm7 & 0x4) >> 1) | ((imm7 & 0x40) >> 6);
+       return rv_cs_insn(0x6, imm_hi, rs1, imm_lo, rs2, 0x0);
+}
+
+static inline u16 rvc_addi(u8 rd, u32 imm6)
+{
+       return rv_ci_insn(0, imm6, rd, 0x1);
+}
+
+static inline u16 rvc_li(u8 rd, u32 imm6)
+{
+       return rv_ci_insn(0x2, imm6, rd, 0x1);
+}
+
+static inline u16 rvc_addi16sp(u32 imm10)
+{
+       u32 imm;
+
+       imm = ((imm10 & 0x200) >> 4) | (imm10 & 0x10) | ((imm10 & 0x40) >> 3) |
+               ((imm10 & 0x180) >> 6) | ((imm10 & 0x20) >> 5);
+       return rv_ci_insn(0x3, imm, RV_REG_SP, 0x1);
+}
+
+static inline u16 rvc_lui(u8 rd, u32 imm6)
+{
+       return rv_ci_insn(0x3, imm6, rd, 0x1);
+}
+
+static inline u16 rvc_srli(u8 rd, u32 imm6)
+{
+       return rv_cb_insn(0x4, imm6, 0, rd, 0x1);
+}
+
+static inline u16 rvc_srai(u8 rd, u32 imm6)
+{
+       return rv_cb_insn(0x4, imm6, 0x1, rd, 0x1);
+}
+
+static inline u16 rvc_andi(u8 rd, u32 imm6)
+{
+       return rv_cb_insn(0x4, imm6, 0x2, rd, 0x1);
+}
+
+static inline u16 rvc_sub(u8 rd, u8 rs)
+{
+       return rv_ca_insn(0x23, rd, 0, rs, 0x1);
+}
+
+static inline u16 rvc_xor(u8 rd, u8 rs)
+{
+       return rv_ca_insn(0x23, rd, 0x1, rs, 0x1);
+}
+
+static inline u16 rvc_or(u8 rd, u8 rs)
+{
+       return rv_ca_insn(0x23, rd, 0x2, rs, 0x1);
+}
+
+static inline u16 rvc_and(u8 rd, u8 rs)
+{
+       return rv_ca_insn(0x23, rd, 0x3, rs, 0x1);
+}
+
+static inline u16 rvc_slli(u8 rd, u32 imm6)
+{
+       return rv_ci_insn(0, imm6, rd, 0x2);
+}
+
+static inline u16 rvc_lwsp(u8 rd, u32 imm8)
+{
+       u32 imm;
+
+       imm = ((imm8 & 0xc0) >> 6) | (imm8 & 0x3c);
+       return rv_ci_insn(0x2, imm, rd, 0x2);
+}
+
+static inline u16 rvc_jr(u8 rs1)
+{
+       return rv_cr_insn(0x8, rs1, RV_REG_ZERO, 0x2);
+}
+
+static inline u16 rvc_mv(u8 rd, u8 rs)
+{
+       return rv_cr_insn(0x8, rd, rs, 0x2);
+}
+
+static inline u16 rvc_jalr(u8 rs1)
+{
+       return rv_cr_insn(0x9, rs1, RV_REG_ZERO, 0x2);
+}
+
+static inline u16 rvc_add(u8 rd, u8 rs)
+{
+       return rv_cr_insn(0x9, rd, rs, 0x2);
+}
+
+static inline u16 rvc_swsp(u32 imm8, u8 rs2)
+{
+       u32 imm;
+
+       imm = (imm8 & 0x3c) | ((imm8 & 0xc0) >> 6);
+       return rv_css_insn(0x6, imm, rs2, 0x2);
+}
+
 /*
  * RV64-only instructions.
  *
@@ -503,6 +752,234 @@ static inline u32 rv_amoadd_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
        return rv_amo_insn(0, aq, rl, rs2, rs1, 3, rd, 0x2f);
 }
 
+/* RV64-only RVC instructions. */
+
+static inline u16 rvc_ld(u8 rd, u32 imm8, u8 rs1)
+{
+       u32 imm_hi, imm_lo;
+
+       imm_hi = (imm8 & 0x38) >> 3;
+       imm_lo = (imm8 & 0xc0) >> 6;
+       return rv_cl_insn(0x3, imm_hi, rs1, imm_lo, rd, 0x0);
+}
+
+static inline u16 rvc_sd(u8 rs1, u32 imm8, u8 rs2)
+{
+       u32 imm_hi, imm_lo;
+
+       imm_hi = (imm8 & 0x38) >> 3;
+       imm_lo = (imm8 & 0xc0) >> 6;
+       return rv_cs_insn(0x7, imm_hi, rs1, imm_lo, rs2, 0x0);
+}
+
+static inline u16 rvc_subw(u8 rd, u8 rs)
+{
+       return rv_ca_insn(0x27, rd, 0, rs, 0x1);
+}
+
+static inline u16 rvc_addiw(u8 rd, u32 imm6)
+{
+       return rv_ci_insn(0x1, imm6, rd, 0x1);
+}
+
+static inline u16 rvc_ldsp(u8 rd, u32 imm9)
+{
+       u32 imm;
+
+       imm = ((imm9 & 0x1c0) >> 6) | (imm9 & 0x38);
+       return rv_ci_insn(0x3, imm, rd, 0x2);
+}
+
+static inline u16 rvc_sdsp(u32 imm9, u8 rs2)
+{
+       u32 imm;
+
+       imm = (imm9 & 0x38) | ((imm9 & 0x1c0) >> 6);
+       return rv_css_insn(0x7, imm, rs2, 0x2);
+}
+
+#endif /* __riscv_xlen == 64 */
+
+/* Helper functions that emit RVC instructions when possible. */
+
+static inline void emit_jalr(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
+{
+       if (rvc_enabled() && rd == RV_REG_RA && rs && !imm)
+               emitc(rvc_jalr(rs), ctx);
+       else if (rvc_enabled() && !rd && rs && !imm)
+               emitc(rvc_jr(rs), ctx);
+       else
+               emit(rv_jalr(rd, rs, imm), ctx);
+}
+
+static inline void emit_mv(u8 rd, u8 rs, struct rv_jit_context *ctx)
+{
+       if (rvc_enabled() && rd && rs)
+               emitc(rvc_mv(rd, rs), ctx);
+       else
+               emit(rv_addi(rd, rs, 0), ctx);
+}
+
+static inline void emit_add(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
+{
+       if (rvc_enabled() && rd && rd == rs1 && rs2)
+               emitc(rvc_add(rd, rs2), ctx);
+       else
+               emit(rv_add(rd, rs1, rs2), ctx);
+}
+
+static inline void emit_addi(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
+{
+       if (rvc_enabled() && rd == RV_REG_SP && rd == rs && is_10b_int(imm) && imm && !(imm & 0xf))
+               emitc(rvc_addi16sp(imm), ctx);
+       else if (rvc_enabled() && is_creg(rd) && rs == RV_REG_SP && is_10b_uint(imm) &&
+                !(imm & 0x3) && imm)
+               emitc(rvc_addi4spn(rd, imm), ctx);
+       else if (rvc_enabled() && rd && rd == rs && imm && is_6b_int(imm))
+               emitc(rvc_addi(rd, imm), ctx);
+       else
+               emit(rv_addi(rd, rs, imm), ctx);
+}
+
+static inline void emit_li(u8 rd, s32 imm, struct rv_jit_context *ctx)
+{
+       if (rvc_enabled() && rd && is_6b_int(imm))
+               emitc(rvc_li(rd, imm), ctx);
+       else
+               emit(rv_addi(rd, RV_REG_ZERO, imm), ctx);
+}
+
+static inline void emit_lui(u8 rd, s32 imm, struct rv_jit_context *ctx)
+{
+       if (rvc_enabled() && rd && rd != RV_REG_SP && is_6b_int(imm) && imm)
+               emitc(rvc_lui(rd, imm), ctx);
+       else
+               emit(rv_lui(rd, imm), ctx);
+}
+
+static inline void emit_slli(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
+{
+       if (rvc_enabled() && rd && rd == rs && imm && (u32)imm < __riscv_xlen)
+               emitc(rvc_slli(rd, imm), ctx);
+       else
+               emit(rv_slli(rd, rs, imm), ctx);
+}
+
+static inline void emit_andi(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
+{
+       if (rvc_enabled() && is_creg(rd) && rd == rs && is_6b_int(imm))
+               emitc(rvc_andi(rd, imm), ctx);
+       else
+               emit(rv_andi(rd, rs, imm), ctx);
+}
+
+static inline void emit_srli(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
+{
+       if (rvc_enabled() && is_creg(rd) && rd == rs && imm && (u32)imm < __riscv_xlen)
+               emitc(rvc_srli(rd, imm), ctx);
+       else
+               emit(rv_srli(rd, rs, imm), ctx);
+}
+
+static inline void emit_srai(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
+{
+       if (rvc_enabled() && is_creg(rd) && rd == rs && imm && (u32)imm < __riscv_xlen)
+               emitc(rvc_srai(rd, imm), ctx);
+       else
+               emit(rv_srai(rd, rs, imm), ctx);
+}
+
+static inline void emit_sub(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
+{
+       if (rvc_enabled() && is_creg(rd) && rd == rs1 && is_creg(rs2))
+               emitc(rvc_sub(rd, rs2), ctx);
+       else
+               emit(rv_sub(rd, rs1, rs2), ctx);
+}
+
+static inline void emit_or(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
+{
+       if (rvc_enabled() && is_creg(rd) && rd == rs1 && is_creg(rs2))
+               emitc(rvc_or(rd, rs2), ctx);
+       else
+               emit(rv_or(rd, rs1, rs2), ctx);
+}
+
+static inline void emit_and(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
+{
+       if (rvc_enabled() && is_creg(rd) && rd == rs1 && is_creg(rs2))
+               emitc(rvc_and(rd, rs2), ctx);
+       else
+               emit(rv_and(rd, rs1, rs2), ctx);
+}
+
+static inline void emit_xor(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
+{
+       if (rvc_enabled() && is_creg(rd) && rd == rs1 && is_creg(rs2))
+               emitc(rvc_xor(rd, rs2), ctx);
+       else
+               emit(rv_xor(rd, rs1, rs2), ctx);
+}
+
+static inline void emit_lw(u8 rd, s32 off, u8 rs1, struct rv_jit_context *ctx)
+{
+       if (rvc_enabled() && rs1 == RV_REG_SP && rd && is_8b_uint(off) && !(off & 0x3))
+               emitc(rvc_lwsp(rd, off), ctx);
+       else if (rvc_enabled() && is_creg(rd) && is_creg(rs1) && is_7b_uint(off) && !(off & 0x3))
+               emitc(rvc_lw(rd, off, rs1), ctx);
+       else
+               emit(rv_lw(rd, off, rs1), ctx);
+}
+
+static inline void emit_sw(u8 rs1, s32 off, u8 rs2, struct rv_jit_context *ctx)
+{
+       if (rvc_enabled() && rs1 == RV_REG_SP && is_8b_uint(off) && !(off & 0x3))
+               emitc(rvc_swsp(off, rs2), ctx);
+       else if (rvc_enabled() && is_creg(rs1) && is_creg(rs2) && is_7b_uint(off) && !(off & 0x3))
+               emitc(rvc_sw(rs1, off, rs2), ctx);
+       else
+               emit(rv_sw(rs1, off, rs2), ctx);
+}
+
+/* RV64-only helper functions. */
+#if __riscv_xlen == 64
+
+static inline void emit_addiw(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
+{
+       if (rvc_enabled() && rd && rd == rs && is_6b_int(imm))
+               emitc(rvc_addiw(rd, imm), ctx);
+       else
+               emit(rv_addiw(rd, rs, imm), ctx);
+}
+
+static inline void emit_ld(u8 rd, s32 off, u8 rs1, struct rv_jit_context *ctx)
+{
+       if (rvc_enabled() && rs1 == RV_REG_SP && rd && is_9b_uint(off) && !(off & 0x7))
+               emitc(rvc_ldsp(rd, off), ctx);
+       else if (rvc_enabled() && is_creg(rd) && is_creg(rs1) && is_8b_uint(off) && !(off & 0x7))
+               emitc(rvc_ld(rd, off, rs1), ctx);
+       else
+               emit(rv_ld(rd, off, rs1), ctx);
+}
+
+static inline void emit_sd(u8 rs1, s32 off, u8 rs2, struct rv_jit_context *ctx)
+{
+       if (rvc_enabled() && rs1 == RV_REG_SP && is_9b_uint(off) && !(off & 0x7))
+               emitc(rvc_sdsp(off, rs2), ctx);
+       else if (rvc_enabled() && is_creg(rs1) && is_creg(rs2) && is_8b_uint(off) && !(off & 0x7))
+               emitc(rvc_sd(rs1, off, rs2), ctx);
+       else
+               emit(rv_sd(rs1, off, rs2), ctx);
+}
+
+static inline void emit_subw(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
+{
+       if (rvc_enabled() && is_creg(rd) && rd == rs1 && is_creg(rs2))
+               emitc(rvc_subw(rd, rs2), ctx);
+       else
+               emit(rv_subw(rd, rs1, rs2), ctx);
+}
+
 #endif /* __riscv_xlen == 64 */
 
 void bpf_jit_build_prologue(struct rv_jit_context *ctx);
index b198eaa..bc5f220 100644 (file)
@@ -644,7 +644,7 @@ static int emit_branch_r64(const s8 *src1, const s8 *src2, s32 rvoff,
 
        e = ctx->ninsns;
        /* Adjust for extra insns. */
-       rvoff -= (e - s) << 2;
+       rvoff -= ninsns_rvoff(e - s);
        emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
        return 0;
 }
@@ -713,7 +713,7 @@ static int emit_bcc(u8 op, u8 rd, u8 rs, int rvoff, struct rv_jit_context *ctx)
        if (far) {
                e = ctx->ninsns;
                /* Adjust for extra insns. */
-               rvoff -= (e - s) << 2;
+               rvoff -= ninsns_rvoff(e - s);
                emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
        }
        return 0;
@@ -731,7 +731,7 @@ static int emit_branch_r32(const s8 *src1, const s8 *src2, s32 rvoff,
 
        e = ctx->ninsns;
        /* Adjust for extra insns. */
-       rvoff -= (e - s) << 2;
+       rvoff -= ninsns_rvoff(e - s);
 
        if (emit_bcc(op, lo(rs1), lo(rs2), rvoff, ctx))
                return -1;
@@ -795,7 +795,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
         * if (index >= max_entries)
         *   goto out;
         */
-       off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
+       off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
        emit_bcc(BPF_JGE, lo(idx_reg), RV_REG_T1, off, ctx);
 
        /*
@@ -804,7 +804,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
         *   goto out;
         */
        emit(rv_addi(RV_REG_T1, RV_REG_TCC, -1), ctx);
-       off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
+       off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
        emit_bcc(BPF_JSLT, RV_REG_TCC, RV_REG_ZERO, off, ctx);
 
        /*
@@ -818,7 +818,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
        if (is_12b_check(off, insn))
                return -1;
        emit(rv_lw(RV_REG_T0, off, RV_REG_T0), ctx);
-       off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
+       off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
        emit_bcc(BPF_JEQ, RV_REG_T0, RV_REG_ZERO, off, ctx);
 
        /*
@@ -1214,7 +1214,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
                        emit_imm32(tmp2, imm, ctx);
                        src = tmp2;
                        e = ctx->ninsns;
-                       rvoff -= (e - s) << 2;
+                       rvoff -= ninsns_rvoff(e - s);
                }
 
                if (is64)
index 6cfd164..8a56b52 100644 (file)
@@ -132,19 +132,23 @@ static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx)
         *
         * This also means that we need to process LSB to MSB.
         */
-       s64 upper = (val + (1 << 11)) >> 12, lower = val & 0xfff;
+       s64 upper = (val + (1 << 11)) >> 12;
+       /* Sign-extend lower 12 bits to 64 bits since immediates for li, addiw,
+        * and addi are signed and RVC checks will perform signed comparisons.
+        */
+       s64 lower = ((val & 0xfff) << 52) >> 52;
        int shift;
 
        if (is_32b_int(val)) {
                if (upper)
-                       emit(rv_lui(rd, upper), ctx);
+                       emit_lui(rd, upper, ctx);
 
                if (!upper) {
-                       emit(rv_addi(rd, RV_REG_ZERO, lower), ctx);
+                       emit_li(rd, lower, ctx);
                        return;
                }
 
-               emit(rv_addiw(rd, rd, lower), ctx);
+               emit_addiw(rd, rd, lower, ctx);
                return;
        }
 
@@ -154,9 +158,9 @@ static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx)
 
        emit_imm(rd, upper, ctx);
 
-       emit(rv_slli(rd, rd, shift), ctx);
+       emit_slli(rd, rd, shift, ctx);
        if (lower)
-               emit(rv_addi(rd, rd, lower), ctx);
+               emit_addi(rd, rd, lower, ctx);
 }
 
 static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
@@ -164,43 +168,43 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
        int stack_adjust = ctx->stack_size, store_offset = stack_adjust - 8;
 
        if (seen_reg(RV_REG_RA, ctx)) {
-               emit(rv_ld(RV_REG_RA, store_offset, RV_REG_SP), ctx);
+               emit_ld(RV_REG_RA, store_offset, RV_REG_SP, ctx);
                store_offset -= 8;
        }
-       emit(rv_ld(RV_REG_FP, store_offset, RV_REG_SP), ctx);
+       emit_ld(RV_REG_FP, store_offset, RV_REG_SP, ctx);
        store_offset -= 8;
        if (seen_reg(RV_REG_S1, ctx)) {
-               emit(rv_ld(RV_REG_S1, store_offset, RV_REG_SP), ctx);
+               emit_ld(RV_REG_S1, store_offset, RV_REG_SP, ctx);
                store_offset -= 8;
        }
        if (seen_reg(RV_REG_S2, ctx)) {
-               emit(rv_ld(RV_REG_S2, store_offset, RV_REG_SP), ctx);
+               emit_ld(RV_REG_S2, store_offset, RV_REG_SP, ctx);
                store_offset -= 8;
        }
        if (seen_reg(RV_REG_S3, ctx)) {
-               emit(rv_ld(RV_REG_S3, store_offset, RV_REG_SP), ctx);
+               emit_ld(RV_REG_S3, store_offset, RV_REG_SP, ctx);
                store_offset -= 8;
        }
        if (seen_reg(RV_REG_S4, ctx)) {
-               emit(rv_ld(RV_REG_S4, store_offset, RV_REG_SP), ctx);
+               emit_ld(RV_REG_S4, store_offset, RV_REG_SP, ctx);
                store_offset -= 8;
        }
        if (seen_reg(RV_REG_S5, ctx)) {
-               emit(rv_ld(RV_REG_S5, store_offset, RV_REG_SP), ctx);
+               emit_ld(RV_REG_S5, store_offset, RV_REG_SP, ctx);
                store_offset -= 8;
        }
        if (seen_reg(RV_REG_S6, ctx)) {
-               emit(rv_ld(RV_REG_S6, store_offset, RV_REG_SP), ctx);
+               emit_ld(RV_REG_S6, store_offset, RV_REG_SP, ctx);
                store_offset -= 8;
        }
 
-       emit(rv_addi(RV_REG_SP, RV_REG_SP, stack_adjust), ctx);
+       emit_addi(RV_REG_SP, RV_REG_SP, stack_adjust, ctx);
        /* Set return value. */
        if (!is_tail_call)
-               emit(rv_addi(RV_REG_A0, RV_REG_A5, 0), ctx);
-       emit(rv_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA,
-                    is_tail_call ? 4 : 0), /* skip TCC init */
-            ctx);
+               emit_mv(RV_REG_A0, RV_REG_A5, ctx);
+       emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA,
+                 is_tail_call ? 4 : 0, /* skip TCC init */
+                 ctx);
 }
 
 static void emit_bcc(u8 cond, u8 rd, u8 rs, int rvoff,
@@ -280,8 +284,8 @@ static void emit_branch(u8 cond, u8 rd, u8 rs, int rvoff,
 
 static void emit_zext_32(u8 reg, struct rv_jit_context *ctx)
 {
-       emit(rv_slli(reg, reg, 32), ctx);
-       emit(rv_srli(reg, reg, 32), ctx);
+       emit_slli(reg, reg, 32, ctx);
+       emit_srli(reg, reg, 32, ctx);
 }
 
 static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
@@ -304,35 +308,35 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
        if (is_12b_check(off, insn))
                return -1;
        emit(rv_lwu(RV_REG_T1, off, RV_REG_A1), ctx);
-       off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
+       off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
        emit_branch(BPF_JGE, RV_REG_A2, RV_REG_T1, off, ctx);
 
        /* if (TCC-- < 0)
         *     goto out;
         */
-       emit(rv_addi(RV_REG_T1, tcc, -1), ctx);
-       off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
+       emit_addi(RV_REG_T1, tcc, -1, ctx);
+       off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
        emit_branch(BPF_JSLT, tcc, RV_REG_ZERO, off, ctx);
 
        /* prog = array->ptrs[index];
         * if (!prog)
         *     goto out;
         */
-       emit(rv_slli(RV_REG_T2, RV_REG_A2, 3), ctx);
-       emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_A1), ctx);
+       emit_slli(RV_REG_T2, RV_REG_A2, 3, ctx);
+       emit_add(RV_REG_T2, RV_REG_T2, RV_REG_A1, ctx);
        off = offsetof(struct bpf_array, ptrs);
        if (is_12b_check(off, insn))
                return -1;
-       emit(rv_ld(RV_REG_T2, off, RV_REG_T2), ctx);
-       off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
+       emit_ld(RV_REG_T2, off, RV_REG_T2, ctx);
+       off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
        emit_branch(BPF_JEQ, RV_REG_T2, RV_REG_ZERO, off, ctx);
 
        /* goto *(prog->bpf_func + 4); */
        off = offsetof(struct bpf_prog, bpf_func);
        if (is_12b_check(off, insn))
                return -1;
-       emit(rv_ld(RV_REG_T3, off, RV_REG_T2), ctx);
-       emit(rv_addi(RV_REG_TCC, RV_REG_T1, 0), ctx);
+       emit_ld(RV_REG_T3, off, RV_REG_T2, ctx);
+       emit_mv(RV_REG_TCC, RV_REG_T1, ctx);
        __build_epilogue(true, ctx);
        return 0;
 }
@@ -360,9 +364,9 @@ static void init_regs(u8 *rd, u8 *rs, const struct bpf_insn *insn,
 
 static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx)
 {
-       emit(rv_addi(RV_REG_T2, *rd, 0), ctx);
+       emit_mv(RV_REG_T2, *rd, ctx);
        emit_zext_32(RV_REG_T2, ctx);
-       emit(rv_addi(RV_REG_T1, *rs, 0), ctx);
+       emit_mv(RV_REG_T1, *rs, ctx);
        emit_zext_32(RV_REG_T1, ctx);
        *rd = RV_REG_T2;
        *rs = RV_REG_T1;
@@ -370,15 +374,15 @@ static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx)
 
 static void emit_sext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx)
 {
-       emit(rv_addiw(RV_REG_T2, *rd, 0), ctx);
-       emit(rv_addiw(RV_REG_T1, *rs, 0), ctx);
+       emit_addiw(RV_REG_T2, *rd, 0, ctx);
+       emit_addiw(RV_REG_T1, *rs, 0, ctx);
        *rd = RV_REG_T2;
        *rs = RV_REG_T1;
 }
 
 static void emit_zext_32_rd_t1(u8 *rd, struct rv_jit_context *ctx)
 {
-       emit(rv_addi(RV_REG_T2, *rd, 0), ctx);
+       emit_mv(RV_REG_T2, *rd, ctx);
        emit_zext_32(RV_REG_T2, ctx);
        emit_zext_32(RV_REG_T1, ctx);
        *rd = RV_REG_T2;
@@ -386,7 +390,7 @@ static void emit_zext_32_rd_t1(u8 *rd, struct rv_jit_context *ctx)
 
 static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx)
 {
-       emit(rv_addiw(RV_REG_T2, *rd, 0), ctx);
+       emit_addiw(RV_REG_T2, *rd, 0, ctx);
        *rd = RV_REG_T2;
 }
 
@@ -432,7 +436,7 @@ static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx)
        if (ret)
                return ret;
        rd = bpf_to_rv_reg(BPF_REG_0, ctx);
-       emit(rv_addi(rd, RV_REG_A0, 0), ctx);
+       emit_mv(rd, RV_REG_A0, ctx);
        return 0;
 }
 
@@ -458,7 +462,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
                        emit_zext_32(rd, ctx);
                        break;
                }
-               emit(is64 ? rv_addi(rd, rs, 0) : rv_addiw(rd, rs, 0), ctx);
+               emit_mv(rd, rs, ctx);
                if (!is64 && !aux->verifier_zext)
                        emit_zext_32(rd, ctx);
                break;
@@ -466,31 +470,35 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
        /* dst = dst OP src */
        case BPF_ALU | BPF_ADD | BPF_X:
        case BPF_ALU64 | BPF_ADD | BPF_X:
-               emit(is64 ? rv_add(rd, rd, rs) : rv_addw(rd, rd, rs), ctx);
+               emit_add(rd, rd, rs, ctx);
                if (!is64 && !aux->verifier_zext)
                        emit_zext_32(rd, ctx);
                break;
        case BPF_ALU | BPF_SUB | BPF_X:
        case BPF_ALU64 | BPF_SUB | BPF_X:
-               emit(is64 ? rv_sub(rd, rd, rs) : rv_subw(rd, rd, rs), ctx);
+               if (is64)
+                       emit_sub(rd, rd, rs, ctx);
+               else
+                       emit_subw(rd, rd, rs, ctx);
+
                if (!is64 && !aux->verifier_zext)
                        emit_zext_32(rd, ctx);
                break;
        case BPF_ALU | BPF_AND | BPF_X:
        case BPF_ALU64 | BPF_AND | BPF_X:
-               emit(rv_and(rd, rd, rs), ctx);
+               emit_and(rd, rd, rs, ctx);
                if (!is64 && !aux->verifier_zext)
                        emit_zext_32(rd, ctx);
                break;
        case BPF_ALU | BPF_OR | BPF_X:
        case BPF_ALU64 | BPF_OR | BPF_X:
-               emit(rv_or(rd, rd, rs), ctx);
+               emit_or(rd, rd, rs, ctx);
                if (!is64 && !aux->verifier_zext)
                        emit_zext_32(rd, ctx);
                break;
        case BPF_ALU | BPF_XOR | BPF_X:
        case BPF_ALU64 | BPF_XOR | BPF_X:
-               emit(rv_xor(rd, rd, rs), ctx);
+               emit_xor(rd, rd, rs, ctx);
                if (!is64 && !aux->verifier_zext)
                        emit_zext_32(rd, ctx);
                break;
@@ -534,8 +542,7 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
        /* dst = -dst */
        case BPF_ALU | BPF_NEG:
        case BPF_ALU64 | BPF_NEG:
-               emit(is64 ? rv_sub(rd, RV_REG_ZERO, rd) :
-                    rv_subw(rd, RV_REG_ZERO, rd), ctx);
+               emit_sub(rd, RV_REG_ZERO, rd, ctx);
                if (!is64 && !aux->verifier_zext)
                        emit_zext_32(rd, ctx);
                break;
@@ -544,8 +551,8 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
        case BPF_ALU | BPF_END | BPF_FROM_LE:
                switch (imm) {
                case 16:
-                       emit(rv_slli(rd, rd, 48), ctx);
-                       emit(rv_srli(rd, rd, 48), ctx);
+                       emit_slli(rd, rd, 48, ctx);
+                       emit_srli(rd, rd, 48, ctx);
                        break;
                case 32:
                        if (!aux->verifier_zext)
@@ -558,51 +565,51 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
                break;
 
        case BPF_ALU | BPF_END | BPF_FROM_BE:
-               emit(rv_addi(RV_REG_T2, RV_REG_ZERO, 0), ctx);
+               emit_li(RV_REG_T2, 0, ctx);
 
-               emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
-               emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
-               emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
-               emit(rv_srli(rd, rd, 8), ctx);
+               emit_andi(RV_REG_T1, rd, 0xff, ctx);
+               emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+               emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+               emit_srli(rd, rd, 8, ctx);
                if (imm == 16)
                        goto out_be;
 
-               emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
-               emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
-               emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
-               emit(rv_srli(rd, rd, 8), ctx);
+               emit_andi(RV_REG_T1, rd, 0xff, ctx);
+               emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+               emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+               emit_srli(rd, rd, 8, ctx);
 
-               emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
-               emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
-               emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
-               emit(rv_srli(rd, rd, 8), ctx);
+               emit_andi(RV_REG_T1, rd, 0xff, ctx);
+               emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+               emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+               emit_srli(rd, rd, 8, ctx);
                if (imm == 32)
                        goto out_be;
 
-               emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
-               emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
-               emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
-               emit(rv_srli(rd, rd, 8), ctx);
-
-               emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
-               emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
-               emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
-               emit(rv_srli(rd, rd, 8), ctx);
-
-               emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
-               emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
-               emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
-               emit(rv_srli(rd, rd, 8), ctx);
-
-               emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
-               emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
-               emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx);
-               emit(rv_srli(rd, rd, 8), ctx);
+               emit_andi(RV_REG_T1, rd, 0xff, ctx);
+               emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+               emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+               emit_srli(rd, rd, 8, ctx);
+
+               emit_andi(RV_REG_T1, rd, 0xff, ctx);
+               emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+               emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+               emit_srli(rd, rd, 8, ctx);
+
+               emit_andi(RV_REG_T1, rd, 0xff, ctx);
+               emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+               emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+               emit_srli(rd, rd, 8, ctx);
+
+               emit_andi(RV_REG_T1, rd, 0xff, ctx);
+               emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+               emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+               emit_srli(rd, rd, 8, ctx);
 out_be:
-               emit(rv_andi(RV_REG_T1, rd, 0xff), ctx);
-               emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx);
+               emit_andi(RV_REG_T1, rd, 0xff, ctx);
+               emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
 
-               emit(rv_addi(rd, RV_REG_T2, 0), ctx);
+               emit_mv(rd, RV_REG_T2, ctx);
                break;
 
        /* dst = imm */
@@ -617,12 +624,10 @@ out_be:
        case BPF_ALU | BPF_ADD | BPF_K:
        case BPF_ALU64 | BPF_ADD | BPF_K:
                if (is_12b_int(imm)) {
-                       emit(is64 ? rv_addi(rd, rd, imm) :
-                            rv_addiw(rd, rd, imm), ctx);
+                       emit_addi(rd, rd, imm, ctx);
                } else {
                        emit_imm(RV_REG_T1, imm, ctx);
-                       emit(is64 ? rv_add(rd, rd, RV_REG_T1) :
-                            rv_addw(rd, rd, RV_REG_T1), ctx);
+                       emit_add(rd, rd, RV_REG_T1, ctx);
                }
                if (!is64 && !aux->verifier_zext)
                        emit_zext_32(rd, ctx);
@@ -630,12 +635,10 @@ out_be:
        case BPF_ALU | BPF_SUB | BPF_K:
        case BPF_ALU64 | BPF_SUB | BPF_K:
                if (is_12b_int(-imm)) {
-                       emit(is64 ? rv_addi(rd, rd, -imm) :
-                            rv_addiw(rd, rd, -imm), ctx);
+                       emit_addi(rd, rd, -imm, ctx);
                } else {
                        emit_imm(RV_REG_T1, imm, ctx);
-                       emit(is64 ? rv_sub(rd, rd, RV_REG_T1) :
-                            rv_subw(rd, rd, RV_REG_T1), ctx);
+                       emit_sub(rd, rd, RV_REG_T1, ctx);
                }
                if (!is64 && !aux->verifier_zext)
                        emit_zext_32(rd, ctx);
@@ -643,10 +646,10 @@ out_be:
        case BPF_ALU | BPF_AND | BPF_K:
        case BPF_ALU64 | BPF_AND | BPF_K:
                if (is_12b_int(imm)) {
-                       emit(rv_andi(rd, rd, imm), ctx);
+                       emit_andi(rd, rd, imm, ctx);
                } else {
                        emit_imm(RV_REG_T1, imm, ctx);
-                       emit(rv_and(rd, rd, RV_REG_T1), ctx);
+                       emit_and(rd, rd, RV_REG_T1, ctx);
                }
                if (!is64 && !aux->verifier_zext)
                        emit_zext_32(rd, ctx);
@@ -657,7 +660,7 @@ out_be:
                        emit(rv_ori(rd, rd, imm), ctx);
                } else {
                        emit_imm(RV_REG_T1, imm, ctx);
-                       emit(rv_or(rd, rd, RV_REG_T1), ctx);
+                       emit_or(rd, rd, RV_REG_T1, ctx);
                }
                if (!is64 && !aux->verifier_zext)
                        emit_zext_32(rd, ctx);
@@ -668,7 +671,7 @@ out_be:
                        emit(rv_xori(rd, rd, imm), ctx);
                } else {
                        emit_imm(RV_REG_T1, imm, ctx);
-                       emit(rv_xor(rd, rd, RV_REG_T1), ctx);
+                       emit_xor(rd, rd, RV_REG_T1, ctx);
                }
                if (!is64 && !aux->verifier_zext)
                        emit_zext_32(rd, ctx);
@@ -699,19 +702,28 @@ out_be:
                break;
        case BPF_ALU | BPF_LSH | BPF_K:
        case BPF_ALU64 | BPF_LSH | BPF_K:
-               emit(is64 ? rv_slli(rd, rd, imm) : rv_slliw(rd, rd, imm), ctx);
+               emit_slli(rd, rd, imm, ctx);
+
                if (!is64 && !aux->verifier_zext)
                        emit_zext_32(rd, ctx);
                break;
        case BPF_ALU | BPF_RSH | BPF_K:
        case BPF_ALU64 | BPF_RSH | BPF_K:
-               emit(is64 ? rv_srli(rd, rd, imm) : rv_srliw(rd, rd, imm), ctx);
+               if (is64)
+                       emit_srli(rd, rd, imm, ctx);
+               else
+                       emit(rv_srliw(rd, rd, imm), ctx);
+
                if (!is64 && !aux->verifier_zext)
                        emit_zext_32(rd, ctx);
                break;
        case BPF_ALU | BPF_ARSH | BPF_K:
        case BPF_ALU64 | BPF_ARSH | BPF_K:
-               emit(is64 ? rv_srai(rd, rd, imm) : rv_sraiw(rd, rd, imm), ctx);
+               if (is64)
+                       emit_srai(rd, rd, imm, ctx);
+               else
+                       emit(rv_sraiw(rd, rd, imm), ctx);
+
                if (!is64 && !aux->verifier_zext)
                        emit_zext_32(rd, ctx);
                break;
@@ -757,13 +769,13 @@ out_be:
                        e = ctx->ninsns;
 
                        /* Adjust for extra insns */
-                       rvoff -= (e - s) << 2;
+                       rvoff -= ninsns_rvoff(e - s);
                }
 
                if (BPF_OP(code) == BPF_JSET) {
                        /* Adjust for and */
                        rvoff -= 4;
-                       emit(rv_and(RV_REG_T1, rd, rs), ctx);
+                       emit_and(RV_REG_T1, rd, rs, ctx);
                        emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff,
                                    ctx);
                } else {
@@ -810,7 +822,7 @@ out_be:
                e = ctx->ninsns;
 
                /* Adjust for extra insns */
-               rvoff -= (e - s) << 2;
+               rvoff -= ninsns_rvoff(e - s);
                emit_branch(BPF_OP(code), rd, rs, rvoff, ctx);
                break;
 
@@ -819,19 +831,19 @@ out_be:
                rvoff = rv_offset(i, off, ctx);
                s = ctx->ninsns;
                if (is_12b_int(imm)) {
-                       emit(rv_andi(RV_REG_T1, rd, imm), ctx);
+                       emit_andi(RV_REG_T1, rd, imm, ctx);
                } else {
                        emit_imm(RV_REG_T1, imm, ctx);
-                       emit(rv_and(RV_REG_T1, rd, RV_REG_T1), ctx);
+                       emit_and(RV_REG_T1, rd, RV_REG_T1, ctx);
                }
                /* For jset32, we should clear the upper 32 bits of t1, but
                 * sign-extension is sufficient here and saves one instruction,
                 * as t1 is used only in comparison against zero.
                 */
                if (!is64 && imm < 0)
-                       emit(rv_addiw(RV_REG_T1, RV_REG_T1, 0), ctx);
+                       emit_addiw(RV_REG_T1, RV_REG_T1, 0, ctx);
                e = ctx->ninsns;
-               rvoff -= (e - s) << 2;
+               rvoff -= ninsns_rvoff(e - s);
                emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, ctx);
                break;
 
@@ -887,7 +899,7 @@ out_be:
                }
 
                emit_imm(RV_REG_T1, off, ctx);
-               emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx);
+               emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
                emit(rv_lbu(rd, 0, RV_REG_T1), ctx);
                if (insn_is_zext(&insn[1]))
                        return 1;
@@ -899,7 +911,7 @@ out_be:
                }
 
                emit_imm(RV_REG_T1, off, ctx);
-               emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx);
+               emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
                emit(rv_lhu(rd, 0, RV_REG_T1), ctx);
                if (insn_is_zext(&insn[1]))
                        return 1;
@@ -911,20 +923,20 @@ out_be:
                }
 
                emit_imm(RV_REG_T1, off, ctx);
-               emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx);
+               emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
                emit(rv_lwu(rd, 0, RV_REG_T1), ctx);
                if (insn_is_zext(&insn[1]))
                        return 1;
                break;
        case BPF_LDX | BPF_MEM | BPF_DW:
                if (is_12b_int(off)) {
-                       emit(rv_ld(rd, off, rs), ctx);
+                       emit_ld(rd, off, rs, ctx);
                        break;
                }
 
                emit_imm(RV_REG_T1, off, ctx);
-               emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx);
-               emit(rv_ld(rd, 0, RV_REG_T1), ctx);
+               emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
+               emit_ld(rd, 0, RV_REG_T1, ctx);
                break;
 
        /* ST: *(size *)(dst + off) = imm */
@@ -936,7 +948,7 @@ out_be:
                }
 
                emit_imm(RV_REG_T2, off, ctx);
-               emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx);
+               emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
                emit(rv_sb(RV_REG_T2, 0, RV_REG_T1), ctx);
                break;
 
@@ -948,30 +960,30 @@ out_be:
                }
 
                emit_imm(RV_REG_T2, off, ctx);
-               emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx);
+               emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
                emit(rv_sh(RV_REG_T2, 0, RV_REG_T1), ctx);
                break;
        case BPF_ST | BPF_MEM | BPF_W:
                emit_imm(RV_REG_T1, imm, ctx);
                if (is_12b_int(off)) {
-                       emit(rv_sw(rd, off, RV_REG_T1), ctx);
+                       emit_sw(rd, off, RV_REG_T1, ctx);
                        break;
                }
 
                emit_imm(RV_REG_T2, off, ctx);
-               emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx);
-               emit(rv_sw(RV_REG_T2, 0, RV_REG_T1), ctx);
+               emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
+               emit_sw(RV_REG_T2, 0, RV_REG_T1, ctx);
                break;
        case BPF_ST | BPF_MEM | BPF_DW:
                emit_imm(RV_REG_T1, imm, ctx);
                if (is_12b_int(off)) {
-                       emit(rv_sd(rd, off, RV_REG_T1), ctx);
+                       emit_sd(rd, off, RV_REG_T1, ctx);
                        break;
                }
 
                emit_imm(RV_REG_T2, off, ctx);
-               emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx);
-               emit(rv_sd(RV_REG_T2, 0, RV_REG_T1), ctx);
+               emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
+               emit_sd(RV_REG_T2, 0, RV_REG_T1, ctx);
                break;
 
        /* STX: *(size *)(dst + off) = src */
@@ -982,7 +994,7 @@ out_be:
                }
 
                emit_imm(RV_REG_T1, off, ctx);
-               emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx);
+               emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
                emit(rv_sb(RV_REG_T1, 0, rs), ctx);
                break;
        case BPF_STX | BPF_MEM | BPF_H:
@@ -992,28 +1004,28 @@ out_be:
                }
 
                emit_imm(RV_REG_T1, off, ctx);
-               emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx);
+               emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
                emit(rv_sh(RV_REG_T1, 0, rs), ctx);
                break;
        case BPF_STX | BPF_MEM | BPF_W:
                if (is_12b_int(off)) {
-                       emit(rv_sw(rd, off, rs), ctx);
+                       emit_sw(rd, off, rs, ctx);
                        break;
                }
 
                emit_imm(RV_REG_T1, off, ctx);
-               emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx);
-               emit(rv_sw(RV_REG_T1, 0, rs), ctx);
+               emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+               emit_sw(RV_REG_T1, 0, rs, ctx);
                break;
        case BPF_STX | BPF_MEM | BPF_DW:
                if (is_12b_int(off)) {
-                       emit(rv_sd(rd, off, rs), ctx);
+                       emit_sd(rd, off, rs, ctx);
                        break;
                }
 
                emit_imm(RV_REG_T1, off, ctx);
-               emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx);
-               emit(rv_sd(RV_REG_T1, 0, rs), ctx);
+               emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+               emit_sd(RV_REG_T1, 0, rs, ctx);
                break;
        /* STX XADD: lock *(u32 *)(dst + off) += src */
        case BPF_STX | BPF_XADD | BPF_W:
@@ -1021,10 +1033,10 @@ out_be:
        case BPF_STX | BPF_XADD | BPF_DW:
                if (off) {
                        if (is_12b_int(off)) {
-                               emit(rv_addi(RV_REG_T1, rd, off), ctx);
+                               emit_addi(RV_REG_T1, rd, off, ctx);
                        } else {
                                emit_imm(RV_REG_T1, off, ctx);
-                               emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx);
+                               emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
                        }
 
                        rd = RV_REG_T1;
@@ -1073,52 +1085,53 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx)
 
        /* First instruction is always setting the tail-call-counter
         * (TCC) register. This instruction is skipped for tail calls.
+        * Force using a 4-byte (non-compressed) instruction.
         */
        emit(rv_addi(RV_REG_TCC, RV_REG_ZERO, MAX_TAIL_CALL_CNT), ctx);
 
-       emit(rv_addi(RV_REG_SP, RV_REG_SP, -stack_adjust), ctx);
+       emit_addi(RV_REG_SP, RV_REG_SP, -stack_adjust, ctx);
 
        if (seen_reg(RV_REG_RA, ctx)) {
-               emit(rv_sd(RV_REG_SP, store_offset, RV_REG_RA), ctx);
+               emit_sd(RV_REG_SP, store_offset, RV_REG_RA, ctx);
                store_offset -= 8;
        }
-       emit(rv_sd(RV_REG_SP, store_offset, RV_REG_FP), ctx);
+       emit_sd(RV_REG_SP, store_offset, RV_REG_FP, ctx);
        store_offset -= 8;
        if (seen_reg(RV_REG_S1, ctx)) {
-               emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S1), ctx);
+               emit_sd(RV_REG_SP, store_offset, RV_REG_S1, ctx);
                store_offset -= 8;
        }
        if (seen_reg(RV_REG_S2, ctx)) {
-               emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S2), ctx);
+               emit_sd(RV_REG_SP, store_offset, RV_REG_S2, ctx);
                store_offset -= 8;
        }
        if (seen_reg(RV_REG_S3, ctx)) {
-               emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S3), ctx);
+               emit_sd(RV_REG_SP, store_offset, RV_REG_S3, ctx);
                store_offset -= 8;
        }
        if (seen_reg(RV_REG_S4, ctx)) {
-               emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S4), ctx);
+               emit_sd(RV_REG_SP, store_offset, RV_REG_S4, ctx);
                store_offset -= 8;
        }
        if (seen_reg(RV_REG_S5, ctx)) {
-               emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S5), ctx);
+               emit_sd(RV_REG_SP, store_offset, RV_REG_S5, ctx);
                store_offset -= 8;
        }
        if (seen_reg(RV_REG_S6, ctx)) {
-               emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S6), ctx);
+               emit_sd(RV_REG_SP, store_offset, RV_REG_S6, ctx);
                store_offset -= 8;
        }
 
-       emit(rv_addi(RV_REG_FP, RV_REG_SP, stack_adjust), ctx);
+       emit_addi(RV_REG_FP, RV_REG_SP, stack_adjust, ctx);
 
        if (bpf_stack_adjust)
-               emit(rv_addi(RV_REG_S5, RV_REG_SP, bpf_stack_adjust), ctx);
+               emit_addi(RV_REG_S5, RV_REG_SP, bpf_stack_adjust, ctx);
 
        /* Program contains calls and tail calls, so RV_REG_TCC need
         * to be saved across calls.
         */
        if (seen_tail_call(ctx) && seen_call(ctx))
-               emit(rv_addi(RV_REG_TCC_SAVED, RV_REG_TCC, 0), ctx);
+               emit_mv(RV_REG_TCC_SAVED, RV_REG_TCC, ctx);
 
        ctx->stack_size = stack_adjust;
 }
index 709b94e..3630d44 100644 (file)
@@ -73,7 +73,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
 
        if (ctx->offset) {
                extra_pass = true;
-               image_size = sizeof(u32) * ctx->ninsns;
+               image_size = sizeof(*ctx->insns) * ctx->ninsns;
                goto skip_init_ctx;
        }
 
@@ -103,7 +103,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
                        if (jit_data->header)
                                break;
 
-                       image_size = sizeof(u32) * ctx->ninsns;
+                       image_size = sizeof(*ctx->insns) * ctx->ninsns;
                        jit_data->header =
                                bpf_jit_binary_alloc(image_size,
                                                     &jit_data->image,
@@ -114,7 +114,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
                                goto out_offset;
                        }
 
-                       ctx->insns = (u32 *)jit_data->image;
+                       ctx->insns = (u16 *)jit_data->image;
                        /*
                         * Now, when the image is allocated, the image can
                         * potentially shrink more (auipc/jalr -> jal).
index 1948249..c7d7ede 100644 (file)
@@ -462,6 +462,7 @@ config NUMA
 
 config NODES_SHIFT
        int
+       depends on NEED_MULTIPLE_NODES
        default "1"
 
 config SCHED_SMT
index 46038bc..0cf9a82 100644 (file)
@@ -1,5 +1,6 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_WATCH_QUEUE=y
 CONFIG_AUDIT=y
 CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -14,7 +15,6 @@ CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_NUMA_BALANCING=y
 CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
 CONFIG_BLK_CGROUP=y
 CONFIG_CFS_BANDWIDTH=y
 CONFIG_RT_GROUP_SCHED=y
@@ -31,9 +31,9 @@ CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_SCHED_AUTOGROUP=y
-CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 # CONFIG_SYSFS_SYSCALL is not set
+CONFIG_BPF_LSM=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
@@ -51,14 +51,11 @@ CONFIG_CHSC_SCH=y
 CONFIG_VFIO_CCW=m
 CONFIG_VFIO_AP=m
 CONFIG_CRASH_DUMP=y
-CONFIG_HIBERNATION=y
-CONFIG_PM_DEBUG=y
 CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
 CONFIG_CMM=m
 CONFIG_APPLDATA_BASE=y
 CONFIG_KVM=m
-CONFIG_VHOST_NET=m
-CONFIG_VHOST_VSOCK=m
+CONFIG_S390_UNWIND_SELFTEST=y
 CONFIG_OPROFILE=m
 CONFIG_KPROBES=y
 CONFIG_JUMP_LABEL=y
@@ -77,6 +74,8 @@ CONFIG_BLK_DEV_THROTTLING=y
 CONFIG_BLK_WBT=y
 CONFIG_BLK_CGROUP_IOLATENCY=y
 CONFIG_BLK_CGROUP_IOCOST=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_BSD_DISKLABEL=y
@@ -96,7 +95,6 @@ CONFIG_CMA_DEBUG=y
 CONFIG_CMA_DEBUGFS=y
 CONFIG_MEM_SOFT_DIRTY=y
 CONFIG_ZSWAP=y
-CONFIG_ZBUD=m
 CONFIG_ZSMALLOC=m
 CONFIG_ZSMALLOC_STAT=y
 CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
@@ -130,6 +128,7 @@ CONFIG_SYN_COOKIES=y
 CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
+CONFIG_INET_ESPINTCP=y
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
@@ -144,6 +143,7 @@ CONFIG_TCP_CONG_ILLINOIS=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
+CONFIG_INET6_ESPINTCP=y
 CONFIG_INET6_IPCOMP=m
 CONFIG_IPV6_MIP6=m
 CONFIG_IPV6_VTI=m
@@ -151,7 +151,10 @@ CONFIG_IPV6_SIT=m
 CONFIG_IPV6_GRE=m
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_RPL_LWTUNNEL=y
+CONFIG_MPTCP=y
 CONFIG_NETFILTER=y
+CONFIG_BRIDGE_NETFILTER=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
@@ -317,6 +320,7 @@ CONFIG_L2TP_V3=y
 CONFIG_L2TP_IP=m
 CONFIG_L2TP_ETH=m
 CONFIG_BRIDGE=m
+CONFIG_BRIDGE_MRP=y
 CONFIG_VLAN_8021Q=m
 CONFIG_VLAN_8021Q_GVRP=y
 CONFIG_NET_SCHED=y
@@ -341,6 +345,7 @@ CONFIG_NET_SCH_CODEL=m
 CONFIG_NET_SCH_FQ_CODEL=m
 CONFIG_NET_SCH_INGRESS=m
 CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_SCH_ETS=m
 CONFIG_NET_CLS_BASIC=m
 CONFIG_NET_CLS_TCINDEX=m
 CONFIG_NET_CLS_ROUTE4=m
@@ -364,6 +369,7 @@ CONFIG_NET_ACT_PEDIT=m
 CONFIG_NET_ACT_SIMP=m
 CONFIG_NET_ACT_SKBEDIT=m
 CONFIG_NET_ACT_CSUM=m
+CONFIG_NET_ACT_GATE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_OPENVSWITCH=m
 CONFIG_VSOCKETS=m
@@ -374,6 +380,7 @@ CONFIG_BPF_JIT=y
 CONFIG_NET_PKTGEN=m
 # CONFIG_NET_DROP_MONITOR is not set
 CONFIG_PCI=y
+# CONFIG_PCIEASPM is not set
 CONFIG_PCI_DEBUG=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
@@ -435,6 +442,7 @@ CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_MULTIPATH_QL=m
 CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_MULTIPATH_HST=m
 CONFIG_DM_DELAY=m
 CONFIG_DM_UEVENT=y
 CONFIG_DM_FLAKEY=m
@@ -448,6 +456,8 @@ CONFIG_EQUALIZER=m
 CONFIG_IFB=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
+CONFIG_BAREUDP=m
 CONFIG_TUN=m
 CONFIG_VETH=m
 CONFIG_VIRTIO_NET=m
@@ -481,7 +491,6 @@ CONFIG_NLMON=m
 CONFIG_MLX4_EN=m
 CONFIG_MLX5_CORE=m
 CONFIG_MLX5_CORE_EN=y
-# CONFIG_MLXFW is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_MICROCHIP is not set
 # CONFIG_NET_VENDOR_MICROSEMI is not set
@@ -514,6 +523,7 @@ CONFIG_MLX5_CORE_EN=y
 # CONFIG_NET_VENDOR_TI is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_XILINX is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -561,6 +571,8 @@ CONFIG_VFIO_MDEV_DEVICE=m
 CONFIG_VIRTIO_PCI=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
+CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
 CONFIG_S390_CCW_IOMMU=y
 CONFIG_S390_AP_IOMMU=y
 CONFIG_EXT4_FS=y
@@ -608,6 +620,7 @@ CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
+CONFIG_EXFAT_FS=m
 CONFIG_NTFS_FS=m
 CONFIG_NTFS_RW=y
 CONFIG_PROC_KCORE=y
@@ -650,8 +663,8 @@ CONFIG_NLS_UTF8=m
 CONFIG_DLM=m
 CONFIG_UNICODE=y
 CONFIG_PERSISTENT_KEYRINGS=y
-CONFIG_BIG_KEYS=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_KEY_NOTIFICATIONS=y
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
 CONFIG_FORTIFY_SOURCE=y
@@ -675,8 +688,11 @@ CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
+CONFIG_CRYPTO_GCM=y
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_SEQIV=y
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -685,6 +701,7 @@ CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -701,6 +718,7 @@ CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
 CONFIG_CRYPTO_CAST5=m
 CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=m
 CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SALSA20=m
@@ -719,6 +737,9 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
 CONFIG_CRYPTO_STATS=y
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 CONFIG_ZCRYPT=m
 CONFIG_PKEY=m
 CONFIG_CRYPTO_PAES_S390=m
@@ -774,6 +795,7 @@ CONFIG_DEBUG_SHIRQ=y
 CONFIG_PANIC_ON_OOPS=y
 CONFIG_DETECT_HUNG_TASK=y
 CONFIG_WQ_WATCHDOG=y
+CONFIG_TEST_LOCKUP=m
 CONFIG_DEBUG_TIMEKEEPING=y
 CONFIG_PROVE_LOCKING=y
 CONFIG_LOCK_STAT=y
@@ -786,7 +808,9 @@ CONFIG_BUG_ON_DATA_CORRUPTION=y
 CONFIG_DEBUG_CREDENTIALS=y
 CONFIG_RCU_TORTURE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=300
+# CONFIG_RCU_TRACE is not set
 CONFIG_LATENCYTOP=y
+CONFIG_BOOTTIME_TRACING=y
 CONFIG_FUNCTION_PROFILER=y
 CONFIG_STACK_TRACER=y
 CONFIG_IRQSOFF_TRACER=y
@@ -808,10 +832,12 @@ CONFIG_FAULT_INJECTION_DEBUG_FS=y
 CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
 CONFIG_LKDTM=m
 CONFIG_TEST_LIST_SORT=y
+CONFIG_TEST_MIN_HEAP=y
 CONFIG_TEST_SORT=y
 CONFIG_KPROBES_SANITY_TEST=y
 CONFIG_RBTREE_TEST=y
 CONFIG_INTERVAL_TREE_TEST=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_TEST_BITOPS=m
 CONFIG_TEST_BPF=m
index 7cd0648..5df9759 100644 (file)
@@ -1,5 +1,6 @@
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_WATCH_QUEUE=y
 CONFIG_AUDIT=y
 CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -13,7 +14,6 @@ CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_NUMA_BALANCING=y
 CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
 CONFIG_BLK_CGROUP=y
 CONFIG_CFS_BANDWIDTH=y
 CONFIG_RT_GROUP_SCHED=y
@@ -30,9 +30,9 @@ CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
 CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_SCHED_AUTOGROUP=y
-CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 # CONFIG_SYSFS_SYSCALL is not set
+CONFIG_BPF_LSM=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
@@ -41,7 +41,6 @@ CONFIG_LIVEPATCH=y
 CONFIG_TUNE_ZEC12=y
 CONFIG_NR_CPUS=512
 CONFIG_NUMA=y
-# CONFIG_NUMA_EMU is not set
 CONFIG_HZ_100=y
 CONFIG_KEXEC_FILE=y
 CONFIG_KEXEC_SIG=y
@@ -51,14 +50,11 @@ CONFIG_CHSC_SCH=y
 CONFIG_VFIO_CCW=m
 CONFIG_VFIO_AP=m
 CONFIG_CRASH_DUMP=y
-CONFIG_HIBERNATION=y
-CONFIG_PM_DEBUG=y
 CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
 CONFIG_CMM=m
 CONFIG_APPLDATA_BASE=y
 CONFIG_KVM=m
-CONFIG_VHOST_NET=m
-CONFIG_VHOST_VSOCK=m
+CONFIG_S390_UNWIND_SELFTEST=m
 CONFIG_OPROFILE=m
 CONFIG_KPROBES=y
 CONFIG_JUMP_LABEL=y
@@ -74,6 +70,8 @@ CONFIG_BLK_DEV_THROTTLING=y
 CONFIG_BLK_WBT=y
 CONFIG_BLK_CGROUP_IOLATENCY=y
 CONFIG_BLK_CGROUP_IOCOST=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_BSD_DISKLABEL=y
@@ -91,7 +89,6 @@ CONFIG_CLEANCACHE=y
 CONFIG_FRONTSWAP=y
 CONFIG_MEM_SOFT_DIRTY=y
 CONFIG_ZSWAP=y
-CONFIG_ZBUD=m
 CONFIG_ZSMALLOC=m
 CONFIG_ZSMALLOC_STAT=y
 CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
@@ -125,6 +122,7 @@ CONFIG_SYN_COOKIES=y
 CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
+CONFIG_INET_ESPINTCP=y
 CONFIG_INET_IPCOMP=m
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
@@ -139,6 +137,7 @@ CONFIG_TCP_CONG_ILLINOIS=m
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
+CONFIG_INET6_ESPINTCP=y
 CONFIG_INET6_IPCOMP=m
 CONFIG_IPV6_MIP6=m
 CONFIG_IPV6_VTI=m
@@ -146,7 +145,10 @@ CONFIG_IPV6_SIT=m
 CONFIG_IPV6_GRE=m
 CONFIG_IPV6_MULTIPLE_TABLES=y
 CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_RPL_LWTUNNEL=y
+CONFIG_MPTCP=y
 CONFIG_NETFILTER=y
+CONFIG_BRIDGE_NETFILTER=m
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
@@ -311,6 +313,7 @@ CONFIG_L2TP_V3=y
 CONFIG_L2TP_IP=m
 CONFIG_L2TP_ETH=m
 CONFIG_BRIDGE=m
+CONFIG_BRIDGE_MRP=y
 CONFIG_VLAN_8021Q=m
 CONFIG_VLAN_8021Q_GVRP=y
 CONFIG_NET_SCHED=y
@@ -335,6 +338,7 @@ CONFIG_NET_SCH_CODEL=m
 CONFIG_NET_SCH_FQ_CODEL=m
 CONFIG_NET_SCH_INGRESS=m
 CONFIG_NET_SCH_PLUG=m
+CONFIG_NET_SCH_ETS=m
 CONFIG_NET_CLS_BASIC=m
 CONFIG_NET_CLS_TCINDEX=m
 CONFIG_NET_CLS_ROUTE4=m
@@ -358,6 +362,7 @@ CONFIG_NET_ACT_PEDIT=m
 CONFIG_NET_ACT_SIMP=m
 CONFIG_NET_ACT_SKBEDIT=m
 CONFIG_NET_ACT_CSUM=m
+CONFIG_NET_ACT_GATE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_OPENVSWITCH=m
 CONFIG_VSOCKETS=m
@@ -368,6 +373,7 @@ CONFIG_BPF_JIT=y
 CONFIG_NET_PKTGEN=m
 # CONFIG_NET_DROP_MONITOR is not set
 CONFIG_PCI=y
+# CONFIG_PCIEASPM is not set
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
 CONFIG_UEVENT_HELPER=y
@@ -430,6 +436,7 @@ CONFIG_DM_ZERO=m
 CONFIG_DM_MULTIPATH=m
 CONFIG_DM_MULTIPATH_QL=m
 CONFIG_DM_MULTIPATH_ST=m
+CONFIG_DM_MULTIPATH_HST=m
 CONFIG_DM_DELAY=m
 CONFIG_DM_UEVENT=y
 CONFIG_DM_FLAKEY=m
@@ -444,6 +451,8 @@ CONFIG_EQUALIZER=m
 CONFIG_IFB=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
+CONFIG_VXLAN=m
+CONFIG_BAREUDP=m
 CONFIG_TUN=m
 CONFIG_VETH=m
 CONFIG_VIRTIO_NET=m
@@ -477,7 +486,6 @@ CONFIG_NLMON=m
 CONFIG_MLX4_EN=m
 CONFIG_MLX5_CORE=m
 CONFIG_MLX5_CORE_EN=y
-# CONFIG_MLXFW is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_MICROCHIP is not set
 # CONFIG_NET_VENDOR_MICROSEMI is not set
@@ -510,6 +518,7 @@ CONFIG_MLX5_CORE_EN=y
 # CONFIG_NET_VENDOR_TI is not set
 # CONFIG_NET_VENDOR_VIA is not set
 # CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_XILINX is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
@@ -557,6 +566,8 @@ CONFIG_VFIO_MDEV_DEVICE=m
 CONFIG_VIRTIO_PCI=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
+CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
 CONFIG_S390_CCW_IOMMU=y
 CONFIG_S390_AP_IOMMU=y
 CONFIG_EXT4_FS=y
@@ -600,6 +611,7 @@ CONFIG_ZISOFS=y
 CONFIG_UDF_FS=m
 CONFIG_MSDOS_FS=m
 CONFIG_VFAT_FS=m
+CONFIG_EXFAT_FS=m
 CONFIG_NTFS_FS=m
 CONFIG_NTFS_RW=y
 CONFIG_PROC_KCORE=y
@@ -642,8 +654,8 @@ CONFIG_NLS_UTF8=m
 CONFIG_DLM=m
 CONFIG_UNICODE=y
 CONFIG_PERSISTENT_KEYRINGS=y
-CONFIG_BIG_KEYS=y
 CONFIG_ENCRYPTED_KEYS=m
+CONFIG_KEY_NOTIFICATIONS=y
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
 CONFIG_SECURITY_SELINUX=y
@@ -667,8 +679,11 @@ CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_DH=m
 CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CURVE25519=m
+CONFIG_CRYPTO_GCM=y
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_SEQIV=y
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_OFB=m
@@ -678,6 +693,7 @@ CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_BLAKE2S=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
@@ -694,6 +710,7 @@ CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
 CONFIG_CRYPTO_CAST5=m
 CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_DES=m
 CONFIG_CRYPTO_FCRYPT=m
 CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SALSA20=m
@@ -712,6 +729,9 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
 CONFIG_CRYPTO_STATS=y
+CONFIG_CRYPTO_LIB_BLAKE2S=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
 CONFIG_ZCRYPT=m
 CONFIG_PKEY=m
 CONFIG_CRYPTO_PAES_S390=m
@@ -725,6 +745,7 @@ CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
 CONFIG_CRYPTO_CRC32_S390=y
 CONFIG_CORDIC=m
+CONFIG_PRIME_NUMBERS=m
 CONFIG_CRC4=m
 CONFIG_CRC7=m
 CONFIG_CRC8=m
@@ -739,10 +760,12 @@ CONFIG_DEBUG_SECTION_MISMATCH=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_PANIC_ON_OOPS=y
+CONFIG_TEST_LOCKUP=m
 CONFIG_BUG_ON_DATA_CORRUPTION=y
 CONFIG_RCU_TORTURE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 CONFIG_LATENCYTOP=y
+CONFIG_BOOTTIME_TRACING=y
 CONFIG_FUNCTION_PROFILER=y
 CONFIG_STACK_TRACER=y
 CONFIG_SCHED_TRACER=y
index 20c51e5..4091c50 100644 (file)
@@ -30,6 +30,7 @@ CONFIG_IBM_PARTITION=y
 # CONFIG_BOUNCE is not set
 CONFIG_NET=y
 # CONFIG_IUCV is not set
+# CONFIG_ETHTOOL_NETLINK is not set
 CONFIG_DEVTMPFS=y
 CONFIG_BLK_DEV_RAM=y
 # CONFIG_BLK_DEV_XPRAM is not set
@@ -55,6 +56,8 @@ CONFIG_RAW_DRIVER=y
 # CONFIG_MONWRITER is not set
 # CONFIG_S390_VMUR is not set
 # CONFIG_HID is not set
+# CONFIG_VIRTIO_MENU is not set
+# CONFIG_VHOST_MENU is not set
 # CONFIG_IOMMU_SUPPORT is not set
 # CONFIG_DNOTIFY is not set
 # CONFIG_INOTIFY_USER is not set
@@ -62,7 +65,9 @@ CONFIG_CONFIGFS_FS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_LSM="yama,loadpin,safesetid,integrity"
+# CONFIG_ZLIB_DFLTCC is not set
 CONFIG_PRINTK_TIME=y
+# CONFIG_SYMBOLIC_ERRNAME is not set
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
index d977643..e1ae239 100644 (file)
@@ -693,7 +693,7 @@ static ssize_t prng_chunksize_show(struct device *dev,
                                   struct device_attribute *attr,
                                   char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "%u\n", prng_chunk_size);
+       return scnprintf(buf, PAGE_SIZE, "%u\n", prng_chunk_size);
 }
 static DEVICE_ATTR(chunksize, 0444, prng_chunksize_show, NULL);
 
@@ -712,7 +712,7 @@ static ssize_t prng_counter_show(struct device *dev,
                counter = prng_data->prngws.byte_counter;
        mutex_unlock(&prng_data->mutex);
 
-       return snprintf(buf, PAGE_SIZE, "%llu\n", counter);
+       return scnprintf(buf, PAGE_SIZE, "%llu\n", counter);
 }
 static DEVICE_ATTR(byte_counter, 0444, prng_counter_show, NULL);
 
@@ -721,7 +721,7 @@ static ssize_t prng_errorflag_show(struct device *dev,
                                   struct device_attribute *attr,
                                   char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "%d\n", prng_errorflag);
+       return scnprintf(buf, PAGE_SIZE, "%d\n", prng_errorflag);
 }
 static DEVICE_ATTR(errorflag, 0444, prng_errorflag_show, NULL);
 
@@ -731,9 +731,9 @@ static ssize_t prng_mode_show(struct device *dev,
                              char *buf)
 {
        if (prng_mode == PRNG_MODE_TDES)
-               return snprintf(buf, PAGE_SIZE, "TDES\n");
+               return scnprintf(buf, PAGE_SIZE, "TDES\n");
        else
-               return snprintf(buf, PAGE_SIZE, "SHA512\n");
+               return scnprintf(buf, PAGE_SIZE, "SHA512\n");
 }
 static DEVICE_ATTR(mode, 0444, prng_mode_show, NULL);
 
@@ -756,7 +756,7 @@ static ssize_t prng_reseed_limit_show(struct device *dev,
                                      struct device_attribute *attr,
                                      char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "%u\n", prng_reseed_limit);
+       return scnprintf(buf, PAGE_SIZE, "%u\n", prng_reseed_limit);
 }
 static ssize_t prng_reseed_limit_store(struct device *dev,
                                       struct device_attribute *attr,
@@ -787,7 +787,7 @@ static ssize_t prng_strength_show(struct device *dev,
                                  struct device_attribute *attr,
                                  char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "256\n");
+       return scnprintf(buf, PAGE_SIZE, "256\n");
 }
 static DEVICE_ATTR(strength, 0444, prng_strength_show, NULL);
 
index cee3cb6..6ea0820 100644 (file)
 #define KVM_USER_MEM_SLOTS 32
 
 /*
- * These seem to be used for allocating ->chip in the routing table,
- * which we don't use. 4096 is an out-of-thin-air value. If we need
- * to look at ->chip later on, we'll need to revisit this.
+ * These seem to be used for allocating ->chip in the routing table, which we
+ * don't use. 1 is as small as we can get to reduce the needed memory. If we
+ * need to look at ->chip later on, we'll need to revisit this.
  */
 #define KVM_NR_IRQCHIPS 1
-#define KVM_IRQCHIP_NUM_PINS 4096
+#define KVM_IRQCHIP_NUM_PINS 1
 #define KVM_HALT_POLL_NS_DEFAULT 50000
 
 /* s390-specific vcpu->requests bit members */
index f073292..d9d5de0 100644 (file)
@@ -33,7 +33,17 @@ static inline void syscall_rollback(struct task_struct *task,
 static inline long syscall_get_error(struct task_struct *task,
                                     struct pt_regs *regs)
 {
-       return IS_ERR_VALUE(regs->gprs[2]) ? regs->gprs[2] : 0;
+       unsigned long error = regs->gprs[2];
+#ifdef CONFIG_COMPAT
+       if (test_tsk_thread_flag(task, TIF_31BIT)) {
+               /*
+                * Sign-extend the value so (int)-EFOO becomes (long)-EFOO
+                * and will match correctly in comparisons.
+                */
+               error = (long)(int)error;
+       }
+#endif
+       return IS_ERR_VALUE(error) ? error : 0;
 }
 
 static inline long syscall_get_return_value(struct task_struct *task,
index 3bcfdeb..0cd085c 100644 (file)
@@ -36,6 +36,7 @@ struct vdso_data {
        __u32 tk_shift;                 /* Shift used for xtime_nsec    0x60 */
        __u32 ts_dir;                   /* TOD steering direction       0x64 */
        __u64 ts_end;                   /* TOD steering end             0x68 */
+       __u32 hrtimer_res;              /* hrtimer resolution           0x70 */
 };
 
 struct vdso_per_cpu_data {
index 165031b..5d8cc18 100644 (file)
@@ -76,6 +76,7 @@ int main(void)
        OFFSET(__VDSO_TK_SHIFT, vdso_data, tk_shift);
        OFFSET(__VDSO_TS_DIR, vdso_data, ts_dir);
        OFFSET(__VDSO_TS_END, vdso_data, ts_end);
+       OFFSET(__VDSO_CLOCK_REALTIME_RES, vdso_data, hrtimer_res);
        OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base);
        OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time);
        OFFSET(__VDSO_GETCPU_VAL, vdso_per_cpu_data, getcpu_val);
@@ -86,7 +87,6 @@ int main(void)
        DEFINE(__CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
        DEFINE(__CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE);
        DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID);
-       DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
        DEFINE(__CLOCK_COARSE_RES, LOW_RES_NSEC);
        BLANK();
        /* idle data offsets */
index 6364460..263075a 100644 (file)
@@ -198,9 +198,10 @@ static debug_entry_t ***debug_areas_alloc(int pages_per_area, int nr_areas)
        if (!areas)
                goto fail_malloc_areas;
        for (i = 0; i < nr_areas; i++) {
+               /* GFP_NOWARN to avoid user triggerable WARN, we handle fails */
                areas[i] = kmalloc_array(pages_per_area,
                                         sizeof(debug_entry_t *),
-                                        GFP_KERNEL);
+                                        GFP_KERNEL | __GFP_NOWARN);
                if (!areas[i])
                        goto fail_malloc_areas2;
                for (j = 0; j < pages_per_area; j++) {
index cd241ee..0782772 100644 (file)
@@ -170,6 +170,8 @@ static noinline __init void setup_lowcore_early(void)
        psw_t psw;
 
        psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
+       if (IS_ENABLED(CONFIG_KASAN))
+               psw.mask |= PSW_MASK_DAT;
        psw.addr = (unsigned long) s390_base_ext_handler;
        S390_lowcore.external_new_psw = psw;
        psw.addr = (unsigned long) s390_base_pgm_handler;
index 50ff6dd..969b35b 100644 (file)
@@ -378,9 +378,9 @@ ENTRY(system_call)
        stmg    %r8,%r15,__LC_SAVE_AREA_SYNC
        BPOFF
        lg      %r12,__LC_CURRENT
-       lghi    %r13,__TASK_thread
        lghi    %r14,_PIF_SYSCALL
 .Lsysc_per:
+       lghi    %r13,__TASK_thread
        lg      %r15,__LC_KERNEL_STACK
        la      %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
        UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
@@ -401,9 +401,9 @@ ENTRY(system_call)
        jnz     .Lsysc_nr_ok
        # svc 0: system call number in %r1
        llgfr   %r1,%r1                         # clear high word in r1
+       sth     %r1,__PT_INT_CODE+2(%r11)
        cghi    %r1,NR_syscalls
        jnl     .Lsysc_nr_ok
-       sth     %r1,__PT_INT_CODE+2(%r11)
        slag    %r8,%r1,3
 .Lsysc_nr_ok:
        xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
index 44e01dd..b388e87 100644 (file)
@@ -83,7 +83,7 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
 {
        struct ftrace_insn orig, new, old;
 
-       if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
+       if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old)))
                return -EFAULT;
        if (addr == MCOUNT_ADDR) {
                /* Initial code replacement */
@@ -105,7 +105,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 {
        struct ftrace_insn orig, new, old;
 
-       if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
+       if (copy_from_kernel_nofault(&old, (void *) rec->ip, sizeof(old)))
                return -EFAULT;
        /* Replace nop with an ftrace call. */
        ftrace_generate_nop_insn(&orig);
index ccea9a2..90a2a17 100644 (file)
@@ -181,7 +181,7 @@ static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \
                struct kobj_attribute *attr,                            \
                char *page)                                             \
 {                                                                      \
-       return snprintf(page, PAGE_SIZE, _format, ##args);              \
+       return scnprintf(page, PAGE_SIZE, _format, ##args);             \
 }
 
 #define IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk)                        \
index 85a711d..4f9e462 100644 (file)
@@ -881,12 +881,21 @@ out:
        return err;
 }
 
+static bool is_callchain_event(struct perf_event *event)
+{
+       u64 sample_type = event->attr.sample_type;
+
+       return sample_type & (PERF_SAMPLE_CALLCHAIN | PERF_SAMPLE_REGS_USER |
+                             PERF_SAMPLE_STACK_USER);
+}
+
 static int cpumsf_pmu_event_init(struct perf_event *event)
 {
        int err;
 
        /* No support for taken branch sampling */
-       if (has_branch_stack(event))
+       /* No support for callchain, stacks and registers */
+       if (has_branch_stack(event) || is_callchain_event(event))
                return -EOPNOTSUPP;
 
        switch (event->attr.type) {
index ce60a45..3cc15c0 100644 (file)
@@ -323,6 +323,25 @@ static inline void __poke_user_per(struct task_struct *child,
                child->thread.per_user.end = data;
 }
 
+static void fixup_int_code(struct task_struct *child, addr_t data)
+{
+       struct pt_regs *regs = task_pt_regs(child);
+       int ilc = regs->int_code >> 16;
+       u16 insn;
+
+       if (ilc > 6)
+               return;
+
+       if (ptrace_access_vm(child, regs->psw.addr - (regs->int_code >> 16),
+                       &insn, sizeof(insn), FOLL_FORCE) != sizeof(insn))
+               return;
+
+       /* double check that tracee stopped on svc instruction */
+       if ((insn >> 8) != 0xa)
+               return;
+
+       regs->int_code = 0x20000 | (data & 0xffff);
+}
 /*
  * Write a word to the user area of a process at location addr. This
  * operation does have an additional problem compared to peek_user.
@@ -334,7 +353,9 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
        struct user *dummy = NULL;
        addr_t offset;
 
+
        if (addr < (addr_t) &dummy->regs.acrs) {
+               struct pt_regs *regs = task_pt_regs(child);
                /*
                 * psw and gprs are stored on the stack
                 */
@@ -352,7 +373,11 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
                                /* Invalid addressing mode bits */
                                return -EINVAL;
                }
-               *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
+
+               if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
+                       addr == offsetof(struct user, regs.gprs[2]))
+                       fixup_int_code(child, data);
+               *(addr_t *)((addr_t) &regs->psw + addr) = data;
 
        } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
                /*
@@ -718,6 +743,10 @@ static int __poke_user_compat(struct task_struct *child,
                        regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
                                (__u64)(tmp & PSW32_ADDR_AMODE);
                } else {
+
+                       if (test_pt_regs_flag(regs, PIF_SYSCALL) &&
+                               addr == offsetof(struct compat_user, regs.gprs[2]))
+                               fixup_int_code(child, data);
                        /* gpr 0-15 */
                        *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
                }
@@ -837,40 +866,66 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
 {
        unsigned long mask = -1UL;
+       long ret = -1;
+
+       if (is_compat_task())
+               mask = 0xffffffff;
 
        /*
         * The sysc_tracesys code in entry.S stored the system
         * call number to gprs[2].
         */
        if (test_thread_flag(TIF_SYSCALL_TRACE) &&
-           (tracehook_report_syscall_entry(regs) ||
-            regs->gprs[2] >= NR_syscalls)) {
+           tracehook_report_syscall_entry(regs)) {
                /*
-                * Tracing decided this syscall should not happen or the
-                * debugger stored an invalid system call number. Skip
+                * Tracing decided this syscall should not happen. Skip
                 * the system call and the system call restart handling.
                 */
-               clear_pt_regs_flag(regs, PIF_SYSCALL);
-               return -1;
+               goto skip;
        }
 
+#ifdef CONFIG_SECCOMP
        /* Do the secure computing check after ptrace. */
-       if (secure_computing()) {
-               /* seccomp failures shouldn't expose any additional code. */
-               return -1;
+       if (unlikely(test_thread_flag(TIF_SECCOMP))) {
+               struct seccomp_data sd;
+
+               if (is_compat_task()) {
+                       sd.instruction_pointer = regs->psw.addr & 0x7fffffff;
+                       sd.arch = AUDIT_ARCH_S390;
+               } else {
+                       sd.instruction_pointer = regs->psw.addr;
+                       sd.arch = AUDIT_ARCH_S390X;
+               }
+
+               sd.nr = regs->int_code & 0xffff;
+               sd.args[0] = regs->orig_gpr2 & mask;
+               sd.args[1] = regs->gprs[3] & mask;
+               sd.args[2] = regs->gprs[4] & mask;
+               sd.args[3] = regs->gprs[5] & mask;
+               sd.args[4] = regs->gprs[6] & mask;
+               sd.args[5] = regs->gprs[7] & mask;
+
+               if (__secure_computing(&sd) == -1)
+                       goto skip;
        }
+#endif /* CONFIG_SECCOMP */
 
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
-               trace_sys_enter(regs, regs->gprs[2]);
+               trace_sys_enter(regs, regs->int_code & 0xffff);
 
-       if (is_compat_task())
-               mask = 0xffffffff;
 
-       audit_syscall_entry(regs->gprs[2], regs->orig_gpr2 & mask,
+       audit_syscall_entry(regs->int_code & 0xffff, regs->orig_gpr2 & mask,
                            regs->gprs[3] &mask, regs->gprs[4] &mask,
                            regs->gprs[5] &mask);
 
+       if ((signed long)regs->gprs[2] >= NR_syscalls) {
+               regs->gprs[2] = -ENOSYS;
+               ret = -ENOSYS;
+       }
        return regs->gprs[2];
+skip:
+       clear_pt_regs_flag(regs, PIF_SYSCALL);
+       return ret;
 }
 
 asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
index 5853c98..07aa15b 100644 (file)
@@ -1100,6 +1100,7 @@ void __init setup_arch(char **cmdline_p)
        if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
                nospec_auto_detect();
 
+       jump_label_init();
        parse_early_param();
 #ifdef CONFIG_CRASH_DUMP
        /* Deactivate elfcorehdr= kernel parameter */
index bfdcb76..0d63c71 100644 (file)
 362  common    connect                 sys_connect                     sys_connect
 363  common    listen                  sys_listen                      sys_listen
 364  common    accept4                 sys_accept4                     sys_accept4
-365  common    getsockopt              sys_getsockopt                  compat_sys_getsockopt
-366  common    setsockopt              sys_setsockopt                  compat_sys_setsockopt
+365  common    getsockopt              sys_getsockopt                  sys_getsockopt
+366  common    setsockopt              sys_setsockopt                  sys_setsockopt
 367  common    getsockname             sys_getsockname                 sys_getsockname
 368  common    getpeername             sys_getpeername                 sys_getpeername
 369  common    sendto                  sys_sendto                      sys_sendto
index f9d070d..b1113b5 100644 (file)
@@ -301,6 +301,7 @@ void update_vsyscall(struct timekeeper *tk)
 
        vdso_data->tk_mult = tk->tkr_mono.mult;
        vdso_data->tk_shift = tk->tkr_mono.shift;
+       vdso_data->hrtimer_res = hrtimer_resolution;
        smp_wmb();
        ++vdso_data->tb_update_count;
 }
index 66e89b2..c296e5c 100644 (file)
@@ -331,7 +331,7 @@ EXPORT_SYMBOL_GPL(arch_make_page_accessible);
 static ssize_t uv_query_facilities(struct kobject *kobj,
                                   struct kobj_attribute *attr, char *page)
 {
-       return snprintf(page, PAGE_SIZE, "%lx\n%lx\n%lx\n%lx\n",
+       return scnprintf(page, PAGE_SIZE, "%lx\n%lx\n%lx\n%lx\n",
                        uv_info.inst_calls_list[0],
                        uv_info.inst_calls_list[1],
                        uv_info.inst_calls_list[2],
@@ -344,7 +344,7 @@ static struct kobj_attribute uv_query_facilities_attr =
 static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
                                       struct kobj_attribute *attr, char *page)
 {
-       return snprintf(page, PAGE_SIZE, "%d\n",
+       return scnprintf(page, PAGE_SIZE, "%d\n",
                        uv_info.max_guest_cpus);
 }
 
@@ -354,7 +354,7 @@ static struct kobj_attribute uv_query_max_guest_cpus_attr =
 static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
                                      struct kobj_attribute *attr, char *page)
 {
-       return snprintf(page, PAGE_SIZE, "%d\n",
+       return scnprintf(page, PAGE_SIZE, "%d\n",
                        uv_info.max_num_sec_conf);
 }
 
@@ -364,7 +364,7 @@ static struct kobj_attribute uv_query_max_guest_vms_attr =
 static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
                                       struct kobj_attribute *attr, char *page)
 {
-       return snprintf(page, PAGE_SIZE, "%lx\n",
+       return scnprintf(page, PAGE_SIZE, "%lx\n",
                        uv_info.max_sec_stor_addr);
 }
 
index bec19e7..4a66a1c 100644 (file)
@@ -18,8 +18,8 @@ KBUILD_AFLAGS_64 += -m64 -s
 
 KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
 KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin
-KBUILD_CFLAGS_64 += -nostdlib -Wl,-soname=linux-vdso64.so.1 \
-                   -Wl,--hash-style=both
+ldflags-y := -fPIC -shared -nostdlib -soname=linux-vdso64.so.1 \
+            --hash-style=both --build-id -T
 
 $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64)
 $(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_64)
@@ -37,8 +37,8 @@ KASAN_SANITIZE := n
 $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
 
 # link rule for the .so file, .lds has to be first
-$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE
-       $(call if_changed,vdso64ld)
+$(obj)/vdso64.so.dbg: $(obj)/vdso64.lds $(obj-vdso64) FORCE
+       $(call if_changed,ld)
 
 # strip rule for the .so file
 $(obj)/%.so: OBJCOPYFLAGS := -S
@@ -50,8 +50,6 @@ $(obj-vdso64): %.o: %.S FORCE
        $(call if_changed_dep,vdso64as)
 
 # actual build commands
-quiet_cmd_vdso64ld = VDSO64L $@
-      cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
 quiet_cmd_vdso64as = VDSO64A $@
       cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
 
index 0814353..0c79caa 100644 (file)
        .type  __kernel_clock_getres,@function
 __kernel_clock_getres:
        CFI_STARTPROC
-       larl    %r1,4f
+       larl    %r1,3f
+       lg      %r0,0(%r1)
        cghi    %r2,__CLOCK_REALTIME_COARSE
        je      0f
        cghi    %r2,__CLOCK_MONOTONIC_COARSE
        je      0f
-       larl    %r1,3f
+       larl    %r1,_vdso_data
+       llgf    %r0,__VDSO_CLOCK_REALTIME_RES(%r1)
        cghi    %r2,__CLOCK_REALTIME
        je      0f
        cghi    %r2,__CLOCK_MONOTONIC
@@ -36,7 +38,6 @@ __kernel_clock_getres:
        jz      2f
 0:     ltgr    %r3,%r3
        jz      1f                              /* res == NULL */
-       lg      %r0,0(%r1)
        xc      0(8,%r3),0(%r3)                 /* set tp->tv_sec to zero */
        stg     %r0,8(%r3)                      /* store tp->tv_usec */
 1:     lghi    %r2,0
@@ -45,6 +46,5 @@ __kernel_clock_getres:
        svc     0
        br      %r14
        CFI_ENDPROC
-3:     .quad   __CLOCK_REALTIME_RES
-4:     .quad   __CLOCK_COARSE_RES
+3:     .quad   __CLOCK_COARSE_RES
        .size   __kernel_clock_getres,.-__kernel_clock_getres
index 6a24751..d53c2e2 100644 (file)
@@ -105,7 +105,7 @@ static int bad_address(void *p)
 {
        unsigned long dummy;
 
-       return probe_kernel_address((unsigned long *)p, dummy);
+       return get_kernel_nofault(dummy, (unsigned long *)p);
 }
 
 static void dump_pagetable(unsigned long asce, unsigned long address)
index 82df06d..3b5a4d2 100644 (file)
@@ -117,7 +117,7 @@ static inline pte_t __rste_to_pte(unsigned long rste)
                                             _PAGE_YOUNG);
 #ifdef CONFIG_MEM_SOFT_DIRTY
                pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY,
-                                            _PAGE_DIRTY);
+                                            _PAGE_SOFT_DIRTY);
 #endif
                pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC,
                                             _PAGE_NOEXEC);
index 22a0be6..1d17413 100644 (file)
@@ -62,11 +62,15 @@ notrace void *s390_kernel_write(void *dst, const void *src, size_t size)
        long copied;
 
        spin_lock_irqsave(&s390_kernel_write_lock, flags);
-       while (size) {
-               copied = s390_kernel_write_odd(tmp, src, size);
-               tmp += copied;
-               src += copied;
-               size -= copied;
+       if (!(flags & PSW_MASK_DAT)) {
+               memcpy(dst, src, size);
+       } else {
+               while (size) {
+                       copied = s390_kernel_write_odd(tmp, src, size);
+                       tmp += copied;
+                       src += copied;
+                       size -= copied;
+               }
        }
        spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
 
index f4242b8..26f97a1 100644 (file)
@@ -489,6 +489,24 @@ static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth)
        } while (re <= last);
 }
 
+static void bpf_skip(struct bpf_jit *jit, int size)
+{
+       if (size >= 6 && !is_valid_rel(size)) {
+               /* brcl 0xf,size */
+               EMIT6_PCREL_RIL(0xc0f4000000, size);
+               size -= 6;
+       } else if (size >= 4 && is_valid_rel(size)) {
+               /* brc 0xf,size */
+               EMIT4_PCREL(0xa7f40000, size);
+               size -= 4;
+       }
+       while (size >= 2) {
+               /* bcr 0,%0 */
+               _EMIT2(0x0700);
+               size -= 2;
+       }
+}
+
 /*
  * Emit function prologue
  *
@@ -501,10 +519,11 @@ static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
                /* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
                _EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
        } else {
-               /* j tail_call_start: NOP if no tail calls are used */
-               EMIT4_PCREL(0xa7f40000, 6);
-               /* bcr 0,%0 */
-               EMIT2(0x0700, 0, REG_0);
+               /*
+                * There are no tail calls. Insert nops in order to have
+                * tail_call_start at a predictable offset.
+                */
+               bpf_skip(jit, 6);
        }
        /* Tail calls have to skip above initialization */
        jit->tail_call_start = jit->prg;
@@ -1268,8 +1287,12 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
                last = (i == fp->len - 1) ? 1 : 0;
                if (last)
                        break;
-               /* j <exit> */
-               EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg);
+               if (!is_first_pass(jit) && can_use_rel(jit, jit->exit_ip))
+                       /* brc 0xf, <exit> */
+                       EMIT4_PCREL_RIC(0xa7040000, 0xf, jit->exit_ip);
+               else
+                       /* brcl 0xf, <exit> */
+                       EMIT6_PCREL_RILC(0xc0040000, 0xf, jit->exit_ip);
                break;
        /*
         * Branch relative (number of skipped instructions) to offset on
@@ -1417,21 +1440,10 @@ branch_ks:
                }
                break;
 branch_ku:
-               is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
-               /* clfi or clgfi %dst,imm */
-               EMIT6_IMM(is_jmp32 ? 0xc20f0000 : 0xc20e0000,
-                         dst_reg, imm);
-               if (!is_first_pass(jit) &&
-                   can_use_rel(jit, addrs[i + off + 1])) {
-                       /* brc mask,off */
-                       EMIT4_PCREL_RIC(0xa7040000,
-                                       mask >> 12, addrs[i + off + 1]);
-               } else {
-                       /* brcl mask,off */
-                       EMIT6_PCREL_RILC(0xc0040000,
-                                        mask >> 12, addrs[i + off + 1]);
-               }
-               break;
+               /* lgfi %w1,imm (load sign extend imm) */
+               src_reg = REG_1;
+               EMIT6_IMM(0xc0010000, src_reg, imm);
+               goto branch_xu;
 branch_xs:
                is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32;
                if (!is_first_pass(jit) &&
@@ -1510,7 +1522,14 @@ static bool bpf_is_new_addr_sane(struct bpf_jit *jit, int i)
  */
 static int bpf_set_addr(struct bpf_jit *jit, int i)
 {
-       if (!bpf_is_new_addr_sane(jit, i))
+       int delta;
+
+       if (is_codegen_pass(jit)) {
+               delta = jit->prg - jit->addrs[i];
+               if (delta < 0)
+                       bpf_skip(jit, -delta);
+       }
+       if (WARN_ON_ONCE(!bpf_is_new_addr_sane(jit, i)))
                return -1;
        jit->addrs[i] = jit->prg;
        return 0;
index 08e1d61..fdebd28 100644 (file)
@@ -94,7 +94,18 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
                }
                zdev->fh = ccdf->fh;
                zdev->state = ZPCI_FN_STATE_CONFIGURED;
-               zpci_create_device(zdev);
+               ret = zpci_enable_device(zdev);
+               if (ret)
+                       break;
+
+               pdev = pci_scan_single_device(zdev->zbus->bus, zdev->devfn);
+               if (!pdev)
+                       break;
+
+               pci_bus_add_device(pdev);
+               pci_lock_rescan_remove();
+               pci_bus_add_devices(zdev->zbus->bus);
+               pci_unlock_rescan_remove();
                break;
        case 0x0302: /* Reserved -> Standby */
                if (!zdev) {
index 1b04270..0646c59 100644 (file)
@@ -119,7 +119,7 @@ static void ftrace_mod_code(void)
         * But if one were to fail, then they all should, and if one were
         * to succeed, then they all should.
         */
-       mod_code_status = probe_kernel_write(mod_code_ip, mod_code_newcode,
+       mod_code_status = copy_to_kernel_nofault(mod_code_ip, mod_code_newcode,
                                             MCOUNT_INSN_SIZE);
 
        /* if we fail, then kill any new writers */
@@ -203,7 +203,7 @@ static int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
         */
 
        /* read the text we want to modify */
-       if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
+       if (copy_from_kernel_nofault(replaced, (void *)ip, MCOUNT_INSN_SIZE))
                return -EFAULT;
 
        /* Make sure it is what we expect it to be */
@@ -268,7 +268,7 @@ static int ftrace_mod(unsigned long ip, unsigned long old_addr,
 {
        unsigned char code[MCOUNT_INSN_SIZE];
 
-       if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
+       if (copy_from_kernel_nofault(code, (void *)ip, MCOUNT_INSN_SIZE))
                return -EFAULT;
 
        if (old_addr != __raw_readl((unsigned long *)code))
index a330254..9c3d32b 100644 (file)
@@ -118,7 +118,7 @@ int is_valid_bugaddr(unsigned long addr)
 
        if (addr < PAGE_OFFSET)
                return 0;
-       if (probe_kernel_address((insn_size_t *)addr, opcode))
+       if (get_kernel_nofault(opcode, (insn_size_t *)addr))
                return 0;
        if (opcode == TRAPA_BUG_OPCODE)
                return 1;
index 489ffab..a45f0f3 100644 (file)
@@ -157,22 +157,22 @@ do_sys_shutdown: /* sys_shutdown(int, int) */
        nop
        nop
        nop
-do_sys_setsockopt: /* compat_sys_setsockopt(int, int, int, char *, int) */
+do_sys_setsockopt: /* sys_setsockopt(int, int, int, char *, int) */
 47:    ldswa           [%o1 + 0x0] %asi, %o0
-       sethi           %hi(compat_sys_setsockopt), %g1
+       sethi           %hi(sys_setsockopt), %g1
 48:    ldswa           [%o1 + 0x8] %asi, %o2
 49:    lduwa           [%o1 + 0xc] %asi, %o3
 50:    ldswa           [%o1 + 0x10] %asi, %o4
-       jmpl            %g1 + %lo(compat_sys_setsockopt), %g0
+       jmpl            %g1 + %lo(sys_setsockopt), %g0
 51:     ldswa          [%o1 + 0x4] %asi, %o1
        nop
-do_sys_getsockopt: /* compat_sys_getsockopt(int, int, int, u32, u32) */
+do_sys_getsockopt: /* sys_getsockopt(int, int, int, u32, u32) */
 52:    ldswa           [%o1 + 0x0] %asi, %o0
-       sethi           %hi(compat_sys_getsockopt), %g1
+       sethi           %hi(sys_getsockopt), %g1
 53:    ldswa           [%o1 + 0x8] %asi, %o2
 54:    lduwa           [%o1 + 0xc] %asi, %o3
 55:    lduwa           [%o1 + 0x10] %asi, %o4
-       jmpl            %g1 + %lo(compat_sys_getsockopt), %g0
+       jmpl            %g1 + %lo(sys_getsockopt), %g0
 56:     ldswa          [%o1 + 0x4] %asi, %o1
        nop
 do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */
index 8004a27..c59b379 100644 (file)
 115    32      getgroups32             sys_getgroups
 116    common  gettimeofday            sys_gettimeofday                compat_sys_gettimeofday
 117    common  getrusage               sys_getrusage                   compat_sys_getrusage
-118    common  getsockopt              sys_getsockopt                  compat_sys_getsockopt
+118    common  getsockopt              sys_getsockopt                  sys_getsockopt
 119    common  getcwd                  sys_getcwd
 120    common  readv                   sys_readv                       compat_sys_readv
 121    common  writev                  sys_writev                      compat_sys_writev
 352    common  userfaultfd             sys_userfaultfd
 353    common  bind                    sys_bind
 354    common  listen                  sys_listen
-355    common  setsockopt              sys_setsockopt                  compat_sys_setsockopt
+355    common  setsockopt              sys_setsockopt                  sys_setsockopt
 356    common  mlock2                  sys_mlock2
 357    common  copy_file_range         sys_copy_file_range
 358    common  preadv2                 sys_preadv2                     compat_sys_preadv2
index e929c09..8ccd568 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/kernel.h>
 #include <os.h>
 
-bool probe_kernel_read_allowed(const void *src, size_t size)
+bool copy_from_kernel_nofault_allowed(const void *src, size_t size)
 {
        void *psrc = (void *)rounddown((unsigned long)src, PAGE_SIZE);
 
index 6a0cc52..883da0a 100644 (file)
@@ -67,7 +67,7 @@ config X86
        select ARCH_HAS_FILTER_PGPROT
        select ARCH_HAS_FORTIFY_SOURCE
        select ARCH_HAS_GCOV_PROFILE_ALL
-       select ARCH_HAS_KCOV                    if X86_64
+       select ARCH_HAS_KCOV                    if X86_64 && STACK_VALIDATION
        select ARCH_HAS_MEM_ENCRYPT
        select ARCH_HAS_MEMBARRIER_SYNC_CORE
        select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
index e821a7d..97d37f0 100644 (file)
@@ -213,7 +213,6 @@ SYM_FUNC_START(startup_32)
         * We place all of the values on our mini stack so lret can
         * used to perform that far jump.
         */
-       pushl   $__KERNEL_CS
        leal    startup_64(%ebp), %eax
 #ifdef CONFIG_EFI_MIXED
        movl    efi32_boot_args(%ebp), %edi
@@ -224,11 +223,20 @@ SYM_FUNC_START(startup_32)
        movl    efi32_boot_args+8(%ebp), %edx   // saved bootparams pointer
        cmpl    $0, %edx
        jnz     1f
+       /*
+        * efi_pe_entry uses MS calling convention, which requires 32 bytes of
+        * shadow space on the stack even if all arguments are passed in
+        * registers. We also need an additional 8 bytes for the space that
+        * would be occupied by the return address, and this also results in
+        * the correct stack alignment for entry.
+        */
+       subl    $40, %esp
        leal    efi_pe_entry(%ebp), %eax
        movl    %edi, %ecx                      // MS calling convention
        movl    %esi, %edx
 1:
 #endif
+       pushl   $__KERNEL_CS
        pushl   %eax
 
        /* Enter paged protected Mode, activating Long Mode */
@@ -784,6 +792,7 @@ SYM_DATA_LOCAL(boot_heap,   .fill BOOT_HEAP_SIZE, 1, 0)
 
 SYM_DATA_START_LOCAL(boot_stack)
        .fill BOOT_STACK_SIZE, 1, 0
+       .balign 16
 SYM_DATA_END_LABEL(boot_stack, SYM_L_LOCAL, boot_stack_end)
 
 /*
index bd3f141..e83b3f1 100644 (file)
 #define CREATE_TRACE_POINTS
 #include <trace/events/syscalls.h>
 
+/* Check that the stack and regs on entry from user mode are sane. */
+static void check_user_regs(struct pt_regs *regs)
+{
+       if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) {
+               /*
+                * Make sure that the entry code gave us a sensible EFLAGS
+                * register.  Native because we want to check the actual CPU
+                * state, not the interrupt state as imagined by Xen.
+                */
+               unsigned long flags = native_save_fl();
+               WARN_ON_ONCE(flags & (X86_EFLAGS_AC | X86_EFLAGS_DF |
+                                     X86_EFLAGS_NT));
+
+               /* We think we came from user mode. Make sure pt_regs agrees. */
+               WARN_ON_ONCE(!user_mode(regs));
+
+               /*
+                * All entries from user mode (except #DF) should be on the
+                * normal thread stack and should have user pt_regs in the
+                * correct location.
+                */
+               WARN_ON_ONCE(!on_thread_stack());
+               WARN_ON_ONCE(regs != task_pt_regs(current));
+       }
+}
+
 #ifdef CONFIG_CONTEXT_TRACKING
 /**
  * enter_from_user_mode - Establish state when coming from user mode
@@ -127,9 +153,6 @@ static long syscall_trace_enter(struct pt_regs *regs)
        unsigned long ret = 0;
        u32 work;
 
-       if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
-               BUG_ON(regs != task_pt_regs(current));
-
        work = READ_ONCE(ti->flags);
 
        if (work & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU)) {
@@ -346,6 +369,8 @@ __visible noinstr void do_syscall_64(unsigned long nr, struct pt_regs *regs)
 {
        struct thread_info *ti;
 
+       check_user_regs(regs);
+
        enter_from_user_mode();
        instrumentation_begin();
 
@@ -409,6 +434,8 @@ static void do_syscall_32_irqs_on(struct pt_regs *regs)
 /* Handles int $0x80 */
 __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
 {
+       check_user_regs(regs);
+
        enter_from_user_mode();
        instrumentation_begin();
 
@@ -460,6 +487,8 @@ __visible noinstr long do_fast_syscall_32(struct pt_regs *regs)
                                        vdso_image_32.sym_int80_landing_pad;
        bool success;
 
+       check_user_regs(regs);
+
        /*
         * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
         * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
@@ -510,6 +539,18 @@ __visible noinstr long do_fast_syscall_32(struct pt_regs *regs)
                (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
 #endif
 }
+
+/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
+__visible noinstr long do_SYSENTER_32(struct pt_regs *regs)
+{
+       /* SYSENTER loses RSP, but the vDSO saved it in RBP. */
+       regs->sp = regs->bp;
+
+       /* SYSENTER clobbers EFLAGS.IF.  Assume it was set in usermode. */
+       regs->flags |= X86_EFLAGS_IF;
+
+       return do_fast_syscall_32(regs);
+}
 #endif
 
 SYSCALL_DEFINE0(ni_syscall)
@@ -553,6 +594,7 @@ SYSCALL_DEFINE0(ni_syscall)
 bool noinstr idtentry_enter_cond_rcu(struct pt_regs *regs)
 {
        if (user_mode(regs)) {
+               check_user_regs(regs);
                enter_from_user_mode();
                return false;
        }
@@ -686,6 +728,7 @@ void noinstr idtentry_exit_cond_rcu(struct pt_regs *regs, bool rcu_exit)
  */
 void noinstr idtentry_enter_user(struct pt_regs *regs)
 {
+       check_user_regs(regs);
        enter_from_user_mode();
 }
 
index 024d7d2..2d0bd5d 100644 (file)
@@ -933,9 +933,8 @@ SYM_FUNC_START(entry_SYSENTER_32)
 
 .Lsysenter_past_esp:
        pushl   $__USER_DS              /* pt_regs->ss */
-       pushl   %ebp                    /* pt_regs->sp (stashed in bp) */
+       pushl   $0                      /* pt_regs->sp (placeholder) */
        pushfl                          /* pt_regs->flags (except IF = 0) */
-       orl     $X86_EFLAGS_IF, (%esp)  /* Fix IF */
        pushl   $__USER_CS              /* pt_regs->cs */
        pushl   $0                      /* pt_regs->ip = 0 (placeholder) */
        pushl   %eax                    /* pt_regs->orig_ax */
@@ -965,7 +964,7 @@ SYM_FUNC_START(entry_SYSENTER_32)
 .Lsysenter_flags_fixed:
 
        movl    %esp, %eax
-       call    do_fast_syscall_32
+       call    do_SYSENTER_32
        /* XEN PV guests always use IRET path */
        ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
                    "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
index 0f974ae..541fdaf 100644 (file)
@@ -57,29 +57,30 @@ SYM_CODE_START(entry_SYSENTER_compat)
 
        movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
-       /*
-        * User tracing code (ptrace or signal handlers) might assume that
-        * the saved RAX contains a 32-bit number when we're invoking a 32-bit
-        * syscall.  Just in case the high bits are nonzero, zero-extend
-        * the syscall number.  (This could almost certainly be deleted
-        * with no ill effects.)
-        */
-       movl    %eax, %eax
-
        /* Construct struct pt_regs on stack */
        pushq   $__USER32_DS            /* pt_regs->ss */
-       pushq   %rbp                    /* pt_regs->sp (stashed in bp) */
+       pushq   $0                      /* pt_regs->sp = 0 (placeholder) */
 
        /*
         * Push flags.  This is nasty.  First, interrupts are currently
-        * off, but we need pt_regs->flags to have IF set.  Second, even
-        * if TF was set when SYSENTER started, it's clear by now.  We fix
-        * that later using TIF_SINGLESTEP.
+        * off, but we need pt_regs->flags to have IF set.  Second, if TS
+        * was set in usermode, it's still set, and we're singlestepping
+        * through this code.  do_SYSENTER_32() will fix up IF.
         */
        pushfq                          /* pt_regs->flags (except IF = 0) */
-       orl     $X86_EFLAGS_IF, (%rsp)  /* Fix saved flags */
        pushq   $__USER32_CS            /* pt_regs->cs */
        pushq   $0                      /* pt_regs->ip = 0 (placeholder) */
+SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL)
+
+       /*
+        * User tracing code (ptrace or signal handlers) might assume that
+        * the saved RAX contains a 32-bit number when we're invoking a 32-bit
+        * syscall.  Just in case the high bits are nonzero, zero-extend
+        * the syscall number.  (This could almost certainly be deleted
+        * with no ill effects.)
+        */
+       movl    %eax, %eax
+
        pushq   %rax                    /* pt_regs->orig_ax */
        pushq   %rdi                    /* pt_regs->di */
        pushq   %rsi                    /* pt_regs->si */
@@ -135,7 +136,7 @@ SYM_CODE_START(entry_SYSENTER_compat)
 .Lsysenter_flags_fixed:
 
        movq    %rsp, %rdi
-       call    do_fast_syscall_32
+       call    do_SYSENTER_32
        /* XEN PV guests always use IRET path */
        ALTERNATIVE "testl %eax, %eax; jz swapgs_restore_regs_and_return_to_usermode", \
                    "jmp swapgs_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
index 3d8d70d..1583831 100644 (file)
@@ -8,6 +8,13 @@
 #include <asm/unistd.h>
 #include <asm/syscall.h>
 
+/*
+ * Reuse the 64-bit entry points for the x32 versions that occupy different
+ * slots in the syscall table.
+ */
+#define __x32_sys_getsockopt   __x64_sys_getsockopt
+#define __x32_sys_setsockopt   __x64_sys_setsockopt
+
 #define __SYSCALL_64(nr, sym)
 
 #define __SYSCALL_X32(nr, sym) extern long __x32_##sym(const struct pt_regs *);
index d8f8a1a..43742a6 100644 (file)
 362    i386    connect                 sys_connect
 363    i386    listen                  sys_listen
 364    i386    accept4                 sys_accept4
-365    i386    getsockopt              sys_getsockopt                  compat_sys_getsockopt
-366    i386    setsockopt              sys_setsockopt                  compat_sys_setsockopt
+365    i386    getsockopt              sys_getsockopt                  sys_getsockopt
+366    i386    setsockopt              sys_setsockopt                  sys_setsockopt
 367    i386    getsockname             sys_getsockname
 368    i386    getpeername             sys_getpeername
 369    i386    sendto                  sys_sendto
index 78847b3..e008d63 100644 (file)
 538    x32     sendmmsg                compat_sys_sendmmsg
 539    x32     process_vm_readv        compat_sys_process_vm_readv
 540    x32     process_vm_writev       compat_sys_process_vm_writev
-541    x32     setsockopt              compat_sys_setsockopt
-542    x32     getsockopt              compat_sys_getsockopt
+541    x32     setsockopt              sys_setsockopt
+542    x32     getsockopt              sys_getsockopt
 543    x32     io_setup                compat_sys_io_setup
 544    x32     io_submit               compat_sys_io_submit
 545    x32     execveat                compat_sys_execveat
index 12c42eb..9933c0e 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
 obj-y                                  += core.o probe.o
-obj-$(PERF_EVENTS_INTEL_RAPL)          += rapl.o
+obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL)   += rapl.o
 obj-y                                  += amd/
 obj-$(CONFIG_X86_LOCAL_APIC)            += msr.o
 obj-$(CONFIG_CPU_SUP_INTEL)            += intel/
index a54c6a4..6035df1 100644 (file)
@@ -375,7 +375,10 @@ void __init hyperv_init(void)
        guest_id = generate_guest_id(0, LINUX_VERSION_CODE, 0);
        wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
 
-       hv_hypercall_pg = vmalloc_exec(PAGE_SIZE);
+       hv_hypercall_pg = __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START,
+                       VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_ROX,
+                       VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
+                       __builtin_return_address(0));
        if (hv_hypercall_pg == NULL) {
                wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
                goto remove_cpuhp_state;
index 35460fe..0367efd 100644 (file)
@@ -201,12 +201,8 @@ arch_test_and_change_bit(long nr, volatile unsigned long *addr)
        return GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, c, "Ir", nr);
 }
 
-static __no_kcsan_or_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
+static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
 {
-       /*
-        * Because this is a plain access, we need to disable KCSAN here to
-        * avoid double instrumentation via instrumented bitops.
-        */
        return ((1UL << (nr & (BITS_PER_LONG-1))) &
                (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
 }
index fb34ff6..0281895 100644 (file)
@@ -75,6 +75,12 @@ do {                                                         \
        unreachable();                                          \
 } while (0)
 
+/*
+ * This instrumentation_begin() is strictly speaking incorrect; but it
+ * suppresses the complaints from WARN()s in noinstr code. If such a WARN()
+ * were to trigger, we'd rather wreck the machine in an attempt to get the
+ * message out than not know about it.
+ */
 #define __WARN_FLAGS(flags)                                    \
 do {                                                           \
        instrumentation_begin();                                \
index dd17c2d..da78ccb 100644 (file)
@@ -58,4 +58,9 @@ static inline bool handle_guest_split_lock(unsigned long ip)
        return false;
 }
 #endif
+#ifdef CONFIG_IA32_FEAT_CTL
+void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
+#else
+static inline void init_ia32_feat_ctl(struct cpuinfo_x86 *c) {}
+#endif
 #endif /* _ASM_X86_CPU_H */
index 6722ffc..3afa990 100644 (file)
@@ -11,5 +11,23 @@ extern cpumask_var_t cpu_sibling_setup_mask;
 
 extern void setup_cpu_local_masks(void);
 
+/*
+ * NMI and MCE exceptions need cpu_is_offline() _really_ early,
+ * provide an arch_ special for them to avoid instrumentation.
+ */
+#if NR_CPUS > 1
+static __always_inline bool arch_cpu_online(int cpu)
+{
+       return arch_test_bit(cpu, cpumask_bits(cpu_online_mask));
+}
+#else
+static __always_inline bool arch_cpu_online(int cpu)
+{
+       return cpu == 0;
+}
+#endif
+
+#define arch_cpu_is_offline(cpu)       unlikely(!arch_cpu_online(cpu))
+
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_X86_CPUMASK_H */
index 42159f4..845e748 100644 (file)
@@ -623,6 +623,11 @@ static inline void switch_fpu_finish(struct fpu *new_fpu)
  * MXCSR and XCR definitions:
  */
 
+static inline void ldmxcsr(u32 mxcsr)
+{
+       asm volatile("ldmxcsr %0" :: "m" (mxcsr));
+}
+
 extern unsigned int mxcsr_feature_mask;
 
 #define XCR_XFEATURE_ENABLED_MASK      0x00000000
index cf51c50..eeac6dc 100644 (file)
@@ -353,10 +353,6 @@ static __always_inline void __##func(struct pt_regs *regs)
 
 #else  /* CONFIG_X86_64 */
 
-/* Maps to a regular IDTENTRY on 32bit for now */
-# define DECLARE_IDTENTRY_IST          DECLARE_IDTENTRY
-# define DEFINE_IDTENTRY_IST           DEFINE_IDTENTRY
-
 /**
  * DECLARE_IDTENTRY_DF - Declare functions for double fault 32bit variant
  * @vector:    Vector number (ignored for C)
@@ -387,28 +383,18 @@ __visible noinstr void func(struct pt_regs *regs,                 \
 #endif /* !CONFIG_X86_64 */
 
 /* C-Code mapping */
+#define DECLARE_IDTENTRY_NMI           DECLARE_IDTENTRY_RAW
+#define DEFINE_IDTENTRY_NMI            DEFINE_IDTENTRY_RAW
+
+#ifdef CONFIG_X86_64
 #define DECLARE_IDTENTRY_MCE           DECLARE_IDTENTRY_IST
 #define DEFINE_IDTENTRY_MCE            DEFINE_IDTENTRY_IST
 #define DEFINE_IDTENTRY_MCE_USER       DEFINE_IDTENTRY_NOIST
 
-#define DECLARE_IDTENTRY_NMI           DECLARE_IDTENTRY_RAW
-#define DEFINE_IDTENTRY_NMI            DEFINE_IDTENTRY_RAW
-
 #define DECLARE_IDTENTRY_DEBUG         DECLARE_IDTENTRY_IST
 #define DEFINE_IDTENTRY_DEBUG          DEFINE_IDTENTRY_IST
 #define DEFINE_IDTENTRY_DEBUG_USER     DEFINE_IDTENTRY_NOIST
-
-/**
- * DECLARE_IDTENTRY_XEN - Declare functions for XEN redirect IDT entry points
- * @vector:    Vector number (ignored for C)
- * @func:      Function name of the entry point
- *
- * Used for xennmi and xendebug redirections. No DEFINE as this is all ASM
- * indirection magic.
- */
-#define DECLARE_IDTENTRY_XEN(vector, func)                             \
-       asmlinkage void xen_asm_exc_xen##func(void);                    \
-       asmlinkage void asm_exc_xen##func(void)
+#endif
 
 #else /* !__ASSEMBLY__ */
 
@@ -455,9 +441,6 @@ __visible noinstr void func(struct pt_regs *regs,                   \
 # define DECLARE_IDTENTRY_MCE(vector, func)                            \
        DECLARE_IDTENTRY(vector, func)
 
-# define DECLARE_IDTENTRY_DEBUG(vector, func)                          \
-       DECLARE_IDTENTRY(vector, func)
-
 /* No ASM emitted for DF as this goes through a C shim */
 # define DECLARE_IDTENTRY_DF(vector, func)
 
@@ -469,10 +452,6 @@ __visible noinstr void func(struct pt_regs *regs,                  \
 /* No ASM code emitted for NMI */
 #define DECLARE_IDTENTRY_NMI(vector, func)
 
-/* XEN NMI and DB wrapper */
-#define DECLARE_IDTENTRY_XEN(vector, func)                             \
-       idtentry vector asm_exc_xen##func exc_##func has_error_code=0
-
 /*
  * ASM code to emit the common vector entry stubs where each stub is
  * packed into 8 bytes.
@@ -565,16 +544,28 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_BP,         exc_int3);
 DECLARE_IDTENTRY_RAW_ERRORCODE(X86_TRAP_PF,    exc_page_fault);
 
 #ifdef CONFIG_X86_MCE
+#ifdef CONFIG_X86_64
 DECLARE_IDTENTRY_MCE(X86_TRAP_MC,      exc_machine_check);
+#else
+DECLARE_IDTENTRY_RAW(X86_TRAP_MC,      exc_machine_check);
+#endif
 #endif
 
 /* NMI */
 DECLARE_IDTENTRY_NMI(X86_TRAP_NMI,     exc_nmi);
-DECLARE_IDTENTRY_XEN(X86_TRAP_NMI,     nmi);
+#ifdef CONFIG_XEN_PV
+DECLARE_IDTENTRY_RAW(X86_TRAP_NMI,     xenpv_exc_nmi);
+#endif
 
 /* #DB */
+#ifdef CONFIG_X86_64
 DECLARE_IDTENTRY_DEBUG(X86_TRAP_DB,    exc_debug);
-DECLARE_IDTENTRY_XEN(X86_TRAP_DB,      debug);
+#else
+DECLARE_IDTENTRY_RAW(X86_TRAP_DB,      exc_debug);
+#endif
+#ifdef CONFIG_XEN_PV
+DECLARE_IDTENTRY_RAW(X86_TRAP_DB,      xenpv_exc_debug);
+#endif
 
 /* #DF */
 DECLARE_IDTENTRY_DF(X86_TRAP_DF,       exc_double_fault);
index f8998e9..be5363b 100644 (file)
@@ -943,7 +943,7 @@ struct kvm_arch {
        atomic_t vapics_in_nmi_mode;
        struct mutex apic_map_lock;
        struct kvm_apic_map *apic_map;
-       bool apic_map_dirty;
+       atomic_t apic_map_dirty;
 
        bool apic_access_page_done;
        unsigned long apicv_inhibit_reasons;
@@ -1220,7 +1220,7 @@ struct kvm_x86_ops {
        void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
                                           struct kvm_memory_slot *slot,
                                           gfn_t offset, unsigned long mask);
-       int (*write_log_dirty)(struct kvm_vcpu *vcpu);
+       int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
 
        /* pmu operations of sub-arch */
        const struct kvm_pmu_ops *pmu_ops;
index 73d997a..e039a93 100644 (file)
@@ -25,8 +25,6 @@
 #define TPAUSE_C01_STATE               1
 #define TPAUSE_C02_STATE               0
 
-u32 get_umwait_control_msr(void);
-
 static inline void __monitor(const void *eax, unsigned long ecx,
                             unsigned long edx)
 {
index 2da1f95..816b31c 100644 (file)
@@ -194,6 +194,7 @@ enum page_cache_mode {
 #define _PAGE_TABLE_NOENC       (__PP|__RW|_USR|___A|   0|___D|   0|   0)
 #define _PAGE_TABLE             (__PP|__RW|_USR|___A|   0|___D|   0|   0| _ENC)
 #define __PAGE_KERNEL_RO        (__PP|   0|   0|___A|__NX|___D|   0|___G)
+#define __PAGE_KERNEL_ROX       (__PP|   0|   0|___A|   0|___D|   0|___G)
 #define __PAGE_KERNEL_NOCACHE   (__PP|__RW|   0|___A|__NX|___D|   0|___G| __NC)
 #define __PAGE_KERNEL_VVAR      (__PP|   0|_USR|___A|__NX|___D|   0|___G)
 #define __PAGE_KERNEL_LARGE     (__PP|__RW|   0|___A|__NX|___D|_PSE|___G)
@@ -219,6 +220,7 @@ enum page_cache_mode {
 #define PAGE_KERNEL_RO         __pgprot_mask(__PAGE_KERNEL_RO         | _ENC)
 #define PAGE_KERNEL_EXEC       __pgprot_mask(__PAGE_KERNEL_EXEC       | _ENC)
 #define PAGE_KERNEL_EXEC_NOENC __pgprot_mask(__PAGE_KERNEL_EXEC       |    0)
+#define PAGE_KERNEL_ROX                __pgprot_mask(__PAGE_KERNEL_ROX        | _ENC)
 #define PAGE_KERNEL_NOCACHE    __pgprot_mask(__PAGE_KERNEL_NOCACHE    | _ENC)
 #define PAGE_KERNEL_LARGE      __pgprot_mask(__PAGE_KERNEL_LARGE      | _ENC)
 #define PAGE_KERNEL_LARGE_EXEC __pgprot_mask(__PAGE_KERNEL_LARGE_EXEC | _ENC)
index 42cd333..03b7c4c 100644 (file)
@@ -370,7 +370,7 @@ struct x86_hw_tss {
 #define IO_BITMAP_OFFSET_INVALID       (__KERNEL_TSS_LIMIT + 1)
 
 struct entry_stack {
-       unsigned long           words[64];
+       char    stack[PAGE_SIZE];
 };
 
 struct entry_stack_page {
index ebedeab..255b2dd 100644 (file)
@@ -278,7 +278,7 @@ static inline unsigned long *regs_get_kernel_stack_nth_addr(struct pt_regs *regs
 }
 
 /* To avoid include hell, we can't include uaccess.h */
-extern long probe_kernel_read(void *dst, const void *src, size_t size);
+extern long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
 
 /**
  * regs_get_kernel_stack_nth() - get Nth entry of the stack
@@ -298,7 +298,7 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
 
        addr = regs_get_kernel_stack_nth_addr(regs, n);
        if (addr) {
-               ret = probe_kernel_read(&val, addr, sizeof(val));
+               ret = copy_from_kernel_nofault(&val, addr, sizeof(val));
                if (!ret)
                        return val;
        }
index 17c5a03..0780f97 100644 (file)
@@ -408,14 +408,15 @@ struct kvm_vmx_nested_state_data {
 };
 
 struct kvm_vmx_nested_state_hdr {
-       __u32 flags;
        __u64 vmxon_pa;
        __u64 vmcs12_pa;
-       __u64 preemption_timer_deadline;
 
        struct {
                __u16 flags;
        } smm;
+
+       __u32 flags;
+       __u64 preemption_timer_deadline;
 };
 
 struct kvm_svm_nested_state_data {
index 4267925..c5cf336 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/sched.h>
 #include <linux/sched/clock.h>
 
+#include <asm/cpu.h>
 #include <asm/cpufeature.h>
 #include <asm/e820/api.h>
 #include <asm/mtrr.h>
index 043d93c..95c090a 100644 (file)
@@ -347,6 +347,9 @@ out:
        cr4_clear_bits(X86_CR4_UMIP);
 }
 
+/* These bits should not change their value after CPU init is finished. */
+static const unsigned long cr4_pinned_mask =
+       X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP | X86_CR4_FSGSBASE;
 static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
 static unsigned long cr4_pinned_bits __ro_after_init;
 
@@ -371,20 +374,20 @@ EXPORT_SYMBOL(native_write_cr0);
 
 void native_write_cr4(unsigned long val)
 {
-       unsigned long bits_missing = 0;
+       unsigned long bits_changed = 0;
 
 set_register:
        asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits));
 
        if (static_branch_likely(&cr_pinning)) {
-               if (unlikely((val & cr4_pinned_bits) != cr4_pinned_bits)) {
-                       bits_missing = ~val & cr4_pinned_bits;
-                       val |= bits_missing;
+               if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
+                       bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits;
+                       val = (val & ~cr4_pinned_mask) | cr4_pinned_bits;
                        goto set_register;
                }
-               /* Warn after we've set the missing bits. */
-               WARN_ONCE(bits_missing, "CR4 bits went missing: %lx!?\n",
-                         bits_missing);
+               /* Warn after we've corrected the changed bits. */
+               WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n",
+                         bits_changed);
        }
 }
 #if IS_MODULE(CONFIG_LKDTM)
@@ -419,7 +422,7 @@ void cr4_init(void)
        if (boot_cpu_has(X86_FEATURE_PCID))
                cr4 |= X86_CR4_PCIDE;
        if (static_branch_likely(&cr_pinning))
-               cr4 |= cr4_pinned_bits;
+               cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits;
 
        __write_cr4(cr4);
 
@@ -434,10 +437,7 @@ void cr4_init(void)
  */
 static void __init setup_cr_pinning(void)
 {
-       unsigned long mask;
-
-       mask = (X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP);
-       cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & mask;
+       cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask;
        static_key_enable(&cr_pinning.key);
 }
 
index fb538fc..9d03369 100644 (file)
@@ -81,8 +81,4 @@ extern void update_srbds_msr(void);
 
 extern u64 x86_read_arch_cap_msr(void);
 
-#ifdef CONFIG_IA32_FEAT_CTL
-void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
-#endif
-
 #endif /* ARCH_X86_CPU_H */
index c25a67a..0ab48f1 100644 (file)
@@ -50,6 +50,13 @@ static enum split_lock_detect_state sld_state __ro_after_init = sld_off;
 static u64 msr_test_ctrl_cache __ro_after_init;
 
 /*
+ * With a name like MSR_TEST_CTL it should go without saying, but don't touch
+ * MSR_TEST_CTL unless the CPU is one of the whitelisted models.  Writing it
+ * on CPUs that do not support SLD can cause fireworks, even when writing '0'.
+ */
+static bool cpu_model_supports_sld __ro_after_init;
+
+/*
  * Processors which have self-snooping capability can handle conflicting
  * memory type across CPUs by snooping its own cache. However, there exists
  * CPU models in which having conflicting memory types still leads to
@@ -1071,7 +1078,8 @@ static void sld_update_msr(bool on)
 
 static void split_lock_init(void)
 {
-       split_lock_verify_msr(sld_state != sld_off);
+       if (cpu_model_supports_sld)
+               split_lock_verify_msr(sld_state != sld_off);
 }
 
 static void split_lock_warn(unsigned long ip)
@@ -1177,5 +1185,6 @@ void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c)
                return;
        }
 
+       cpu_model_supports_sld = true;
        split_lock_setup();
 }
index ce9120c..14e4b4d 100644 (file)
@@ -1083,7 +1083,7 @@ static noinstr bool mce_check_crashing_cpu(void)
 {
        unsigned int cpu = smp_processor_id();
 
-       if (cpu_is_offline(cpu) ||
+       if (arch_cpu_is_offline(cpu) ||
            (crashing_cpu != -1 && crashing_cpu != cpu)) {
                u64 mcgstatus;
 
@@ -1901,6 +1901,8 @@ void (*machine_check_vector)(struct pt_regs *) = unexpected_machine_check;
 
 static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
 {
+       WARN_ON_ONCE(user_mode(regs));
+
        /*
         * Only required when from kernel mode. See
         * mce_check_crashing_cpu() for details.
@@ -1954,7 +1956,7 @@ DEFINE_IDTENTRY_MCE_USER(exc_machine_check)
 }
 #else
 /* 32bit unified entry point */
-DEFINE_IDTENTRY_MCE(exc_machine_check)
+DEFINE_IDTENTRY_RAW(exc_machine_check)
 {
        unsigned long dr7;
 
index 12f967c..6a9df71 100644 (file)
@@ -981,10 +981,10 @@ void resctrl_cpu_detect(struct cpuinfo_x86 *c)
 
                c->x86_cache_max_rmid  = ecx;
                c->x86_cache_occ_scale = ebx;
-               if (c->x86_vendor == X86_VENDOR_INTEL)
-                       c->x86_cache_mbm_width_offset = eax & 0xff;
-               else
-                       c->x86_cache_mbm_width_offset = -1;
+               c->x86_cache_mbm_width_offset = eax & 0xff;
+
+               if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset)
+                       c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD;
        }
 }
 
index f20a47d..5ffa322 100644 (file)
@@ -37,6 +37,7 @@
 #define MBA_IS_LINEAR                  0x4
 #define MBA_MAX_MBPS                   U32_MAX
 #define MAX_MBA_BW_AMD                 0x800
+#define MBM_CNTR_WIDTH_OFFSET_AMD      20
 
 #define RMID_VAL_ERROR                 BIT_ULL(63)
 #define RMID_VAL_UNAVAIL               BIT_ULL(62)
index 23b4b61..3f844f1 100644 (file)
@@ -1117,6 +1117,7 @@ static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
        _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
        if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
                _r_cdp = NULL;
+               _d_cdp = NULL;
                ret = -EINVAL;
        }
 
index 300e3fd..ec8064c 100644 (file)
  */
 static u32 umwait_control_cached = UMWAIT_CTRL_VAL(100000, UMWAIT_C02_ENABLE);
 
-u32 get_umwait_control_msr(void)
-{
-       return umwait_control_cached;
-}
-EXPORT_SYMBOL_GPL(get_umwait_control_msr);
-
 /*
  * Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by
  * hardware or BIOS before kernel boot.
index df1358b..05fa4ef 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/sched.h>
 #include <linux/sched/clock.h>
 
+#include <asm/cpu.h>
 #include <asm/cpufeature.h>
 
 #include "cpu.h"
index 456511b..b037cfa 100644 (file)
@@ -106,7 +106,7 @@ void show_opcodes(struct pt_regs *regs, const char *loglvl)
        bad_ip = user_mode(regs) &&
                __chk_range_not_ok(prologue, OPCODE_BUFSIZE, TASK_SIZE_MAX);
 
-       if (bad_ip || probe_kernel_read(opcodes, (u8 *)prologue,
+       if (bad_ip || copy_from_kernel_nofault(opcodes, (u8 *)prologue,
                                        OPCODE_BUFSIZE)) {
                printk("%sCode: Bad RIP value.\n", loglvl);
        } else {
index 06c8189..15247b9 100644 (file)
@@ -101,6 +101,12 @@ void kernel_fpu_begin(void)
                copy_fpregs_to_fpstate(&current->thread.fpu);
        }
        __cpu_invalidate_fpregs_state();
+
+       if (boot_cpu_has(X86_FEATURE_XMM))
+               ldmxcsr(MXCSR_DEFAULT);
+
+       if (boot_cpu_has(X86_FEATURE_FPU))
+               asm volatile ("fninit");
 }
 EXPORT_SYMBOL_GPL(kernel_fpu_begin);
 
index c84d28e..5150456 100644 (file)
@@ -86,7 +86,7 @@ static int ftrace_verify_code(unsigned long ip, const char *old_code)
         * sure what we read is what we expected it to be before modifying it.
         */
        /* read the text we want to modify */
-       if (probe_kernel_read(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) {
+       if (copy_from_kernel_nofault(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) {
                WARN_ON(1);
                return -EFAULT;
        }
@@ -355,7 +355,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
        npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
 
        /* Copy ftrace_caller onto the trampoline memory */
-       ret = probe_kernel_read(trampoline, (void *)start_offset, size);
+       ret = copy_from_kernel_nofault(trampoline, (void *)start_offset, size);
        if (WARN_ON(ret < 0))
                goto fail;
 
@@ -363,13 +363,13 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
 
        /* The trampoline ends with ret(q) */
        retq = (unsigned long)ftrace_stub;
-       ret = probe_kernel_read(ip, (void *)retq, RET_SIZE);
+       ret = copy_from_kernel_nofault(ip, (void *)retq, RET_SIZE);
        if (WARN_ON(ret < 0))
                goto fail;
 
        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
                ip = trampoline + (ftrace_regs_caller_ret - ftrace_regs_caller);
-               ret = probe_kernel_read(ip, (void *)retq, RET_SIZE);
+               ret = copy_from_kernel_nofault(ip, (void *)retq, RET_SIZE);
                if (WARN_ON(ret < 0))
                        goto fail;
        }
@@ -506,7 +506,7 @@ static void *addr_from_call(void *ptr)
        union text_poke_insn call;
        int ret;
 
-       ret = probe_kernel_read(&call, ptr, CALL_INSN_SIZE);
+       ret = copy_from_kernel_nofault(&call, ptr, CALL_INSN_SIZE);
        if (WARN_ON_ONCE(ret < 0))
                return NULL;
 
index c44fe7d..68acd30 100644 (file)
@@ -732,11 +732,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
        int err;
 
        bpt->type = BP_BREAKPOINT;
-       err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
+       err = copy_from_kernel_nofault(bpt->saved_instr, (char *)bpt->bpt_addr,
                                BREAK_INSTR_SIZE);
        if (err)
                return err;
-       err = probe_kernel_write((char *)bpt->bpt_addr,
+       err = copy_to_kernel_nofault((char *)bpt->bpt_addr,
                                 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
        if (!err)
                return err;
@@ -768,7 +768,7 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
        return 0;
 
 knl_write:
-       return probe_kernel_write((char *)bpt->bpt_addr,
+       return copy_to_kernel_nofault((char *)bpt->bpt_addr,
                                  (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
 }
 
index 3bafe1b..ada39dd 100644 (file)
@@ -243,7 +243,7 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
         * Fortunately, we know that the original code is the ideal 5-byte
         * long NOP.
         */
-       if (probe_kernel_read(buf, (void *)addr,
+       if (copy_from_kernel_nofault(buf, (void *)addr,
                MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
                return 0UL;
 
@@ -346,7 +346,8 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
                return 0;
 
        /* This can access kernel text if given address is not recovered */
-       if (probe_kernel_read(dest, (void *)recovered_insn, MAX_INSN_SIZE))
+       if (copy_from_kernel_nofault(dest, (void *)recovered_insn,
+                       MAX_INSN_SIZE))
                return 0;
 
        kernel_insn_init(insn, dest, MAX_INSN_SIZE);
@@ -753,16 +754,11 @@ asm(
 NOKPROBE_SYMBOL(kretprobe_trampoline);
 STACK_FRAME_NON_STANDARD(kretprobe_trampoline);
 
-static struct kprobe kretprobe_kprobe = {
-       .addr = (void *)kretprobe_trampoline,
-};
-
 /*
  * Called from kretprobe_trampoline
  */
 __used __visible void *trampoline_handler(struct pt_regs *regs)
 {
-       struct kprobe_ctlblk *kcb;
        struct kretprobe_instance *ri = NULL;
        struct hlist_head *head, empty_rp;
        struct hlist_node *tmp;
@@ -772,16 +768,12 @@ __used __visible void *trampoline_handler(struct pt_regs *regs)
        void *frame_pointer;
        bool skipped = false;
 
-       preempt_disable();
-
        /*
         * Set a dummy kprobe for avoiding kretprobe recursion.
         * Since kretprobe never run in kprobe handler, kprobe must not
         * be running at this point.
         */
-       kcb = get_kprobe_ctlblk();
-       __this_cpu_write(current_kprobe, &kretprobe_kprobe);
-       kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+       kprobe_busy_begin();
 
        INIT_HLIST_HEAD(&empty_rp);
        kretprobe_hash_lock(current, &head, &flags);
@@ -857,7 +849,7 @@ __used __visible void *trampoline_handler(struct pt_regs *regs)
                        __this_cpu_write(current_kprobe, &ri->rp->kp);
                        ri->ret_addr = correct_ret_addr;
                        ri->rp->handler(ri, regs);
-                       __this_cpu_write(current_kprobe, &kretprobe_kprobe);
+                       __this_cpu_write(current_kprobe, &kprobe_busy);
                }
 
                recycle_rp_inst(ri, &empty_rp);
@@ -873,8 +865,7 @@ __used __visible void *trampoline_handler(struct pt_regs *regs)
 
        kretprobe_hash_unlock(current, &flags);
 
-       __this_cpu_write(current_kprobe, NULL);
-       preempt_enable();
+       kprobe_busy_end();
 
        hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
index 321c199..7af4c61 100644 (file)
@@ -56,7 +56,7 @@ found:
         * overwritten by jump destination address. In this case, original
         * bytes must be recovered from op->optinsn.copied_insn buffer.
         */
-       if (probe_kernel_read(buf, (void *)addr,
+       if (copy_from_kernel_nofault(buf, (void *)addr,
                MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
                return 0UL;
 
index 8748321..b8aee71 100644 (file)
@@ -29,6 +29,8 @@
 #include <asm/mmu_context.h>
 #include <asm/pgtable_areas.h>
 
+#include <xen/xen.h>
+
 /* This is a multiple of PAGE_SIZE. */
 #define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)
 
@@ -543,6 +545,28 @@ static int read_default_ldt(void __user *ptr, unsigned long bytecount)
        return bytecount;
 }
 
+static bool allow_16bit_segments(void)
+{
+       if (!IS_ENABLED(CONFIG_X86_16BIT))
+               return false;
+
+#ifdef CONFIG_XEN_PV
+       /*
+        * Xen PV does not implement ESPFIX64, which means that 16-bit
+        * segments will not work correctly.  Until either Xen PV implements
+        * ESPFIX64 and can signal this fact to the guest or unless someone
+        * provides compelling evidence that allowing broken 16-bit segments
+        * is worthwhile, disallow 16-bit segments under Xen PV.
+        */
+       if (xen_pv_domain()) {
+               pr_info_once("Warning: 16-bit segments do not work correctly in a Xen PV guest\n");
+               return false;
+       }
+#endif
+
+       return true;
+}
+
 static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
 {
        struct mm_struct *mm = current->mm;
@@ -574,7 +598,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
                /* The user wants to clear the entry. */
                memset(&ldt, 0, sizeof(ldt));
        } else {
-               if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
+               if (!ldt_info.seg_32bit && !allow_16bit_segments()) {
                        error = -EINVAL;
                        goto out;
                }
index 2de365f..d7c5e44 100644 (file)
@@ -478,7 +478,7 @@ static DEFINE_PER_CPU(unsigned long, nmi_dr7);
 
 DEFINE_IDTENTRY_RAW(exc_nmi)
 {
-       if (IS_ENABLED(CONFIG_SMP) && cpu_is_offline(smp_processor_id()))
+       if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id()))
                return;
 
        if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) {
index ee02863..9e1def3 100644 (file)
@@ -94,12 +94,12 @@ static bool match_id(struct pci_dev *pdev, unsigned short vendor, unsigned short
 }
 
 static bool probe_list(struct pci_dev *pdev, unsigned short vendor,
-                      const unsigned char *rom_list)
+                      const void *rom_list)
 {
        unsigned short device;
 
        do {
-               if (probe_kernel_address(rom_list, device) != 0)
+               if (get_kernel_nofault(device, rom_list) != 0)
                        device = 0;
 
                if (device && match_id(pdev, vendor, device))
@@ -119,19 +119,19 @@ static struct resource *find_oprom(struct pci_dev *pdev)
        for (i = 0; i < ARRAY_SIZE(adapter_rom_resources); i++) {
                struct resource *res = &adapter_rom_resources[i];
                unsigned short offset, vendor, device, list, rev;
-               const unsigned char *rom;
+               const void *rom;
 
                if (res->end == 0)
                        break;
 
                rom = isa_bus_to_virt(res->start);
-               if (probe_kernel_address(rom + 0x18, offset) != 0)
+               if (get_kernel_nofault(offset, rom + 0x18) != 0)
                        continue;
 
-               if (probe_kernel_address(rom + offset + 0x4, vendor) != 0)
+               if (get_kernel_nofault(vendor, rom + offset + 0x4) != 0)
                        continue;
 
-               if (probe_kernel_address(rom + offset + 0x6, device) != 0)
+               if (get_kernel_nofault(device, rom + offset + 0x6) != 0)
                        continue;
 
                if (match_id(pdev, vendor, device)) {
@@ -139,8 +139,8 @@ static struct resource *find_oprom(struct pci_dev *pdev)
                        break;
                }
 
-               if (probe_kernel_address(rom + offset + 0x8, list) == 0 &&
-                   probe_kernel_address(rom + offset + 0xc, rev) == 0 &&
+               if (get_kernel_nofault(list, rom + offset + 0x8) == 0 &&
+                   get_kernel_nofault(rev, rom + offset + 0xc) == 0 &&
                    rev >= 3 && list &&
                    probe_list(pdev, vendor, rom + offset + list)) {
                        oprom = res;
@@ -183,14 +183,14 @@ static int __init romsignature(const unsigned char *rom)
        const unsigned short * const ptr = (const unsigned short *)rom;
        unsigned short sig;
 
-       return probe_kernel_address(ptr, sig) == 0 && sig == ROMSIGNATURE;
+       return get_kernel_nofault(sig, ptr) == 0 && sig == ROMSIGNATURE;
 }
 
 static int __init romchecksum(const unsigned char *rom, unsigned long length)
 {
        unsigned char sum, c;
 
-       for (sum = 0; length && probe_kernel_address(rom++, c) == 0; length--)
+       for (sum = 0; length && get_kernel_nofault(c, rom++) == 0; length--)
                sum += c;
        return !length && !sum;
 }
@@ -211,7 +211,7 @@ void __init probe_roms(void)
 
                video_rom_resource.start = start;
 
-               if (probe_kernel_address(rom + 2, c) != 0)
+               if (get_kernel_nofault(c, rom + 2) != 0)
                        continue;
 
                /* 0 < length <= 0x7f * 512, historically */
@@ -249,7 +249,7 @@ void __init probe_roms(void)
                if (!romsignature(rom))
                        continue;
 
-               if (probe_kernel_address(rom + 2, c) != 0)
+               if (get_kernel_nofault(c, rom + 2) != 0)
                        continue;
 
                /* 0 < length <= 0x7f * 512, historically */
index af75109..b038695 100644 (file)
@@ -84,17 +84,16 @@ static inline void cond_local_irq_disable(struct pt_regs *regs)
                local_irq_disable();
 }
 
-int is_valid_bugaddr(unsigned long addr)
+__always_inline int is_valid_bugaddr(unsigned long addr)
 {
-       unsigned short ud;
-
        if (addr < TASK_SIZE_MAX)
                return 0;
 
-       if (probe_kernel_address((unsigned short *)addr, ud))
-               return 0;
-
-       return ud == INSN_UD0 || ud == INSN_UD2;
+       /*
+        * We got #UD, if the text isn't readable we'd have gotten
+        * a different exception.
+        */
+       return *(unsigned short *)addr == INSN_UD2;
 }
 
 static nokprobe_inline int
@@ -216,40 +215,45 @@ static inline void handle_invalid_op(struct pt_regs *regs)
                      ILL_ILLOPN, error_get_trap_addr(regs));
 }
 
-DEFINE_IDTENTRY_RAW(exc_invalid_op)
+static noinstr bool handle_bug(struct pt_regs *regs)
 {
-       bool rcu_exit;
+       bool handled = false;
+
+       if (!is_valid_bugaddr(regs->ip))
+               return handled;
 
        /*
-        * Handle BUG/WARN like NMIs instead of like normal idtentries:
-        * if we bugged/warned in a bad RCU context, for example, the last
-        * thing we want is to BUG/WARN again in the idtentry code, ad
-        * infinitum.
+        * All lies, just get the WARN/BUG out.
+        */
+       instrumentation_begin();
+       /*
+        * Since we're emulating a CALL with exceptions, restore the interrupt
+        * state to what it was at the exception site.
         */
-       if (!user_mode(regs) && is_valid_bugaddr(regs->ip)) {
-               enum bug_trap_type type;
+       if (regs->flags & X86_EFLAGS_IF)
+               raw_local_irq_enable();
+       if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) {
+               regs->ip += LEN_UD2;
+               handled = true;
+       }
+       if (regs->flags & X86_EFLAGS_IF)
+               raw_local_irq_disable();
+       instrumentation_end();
 
-               nmi_enter();
-               instrumentation_begin();
-               trace_hardirqs_off_finish();
-               type = report_bug(regs->ip, regs);
-               if (regs->flags & X86_EFLAGS_IF)
-                       trace_hardirqs_on_prepare();
-               instrumentation_end();
-               nmi_exit();
+       return handled;
+}
 
-               if (type == BUG_TRAP_TYPE_WARN) {
-                       /* Skip the ud2. */
-                       regs->ip += LEN_UD2;
-                       return;
-               }
+DEFINE_IDTENTRY_RAW(exc_invalid_op)
+{
+       bool rcu_exit;
 
-               /*
-                * Else, if this was a BUG and report_bug returns or if this
-                * was just a normal #UD, we want to continue onward and
-                * crash.
-                */
-       }
+       /*
+        * We use UD2 as a short encoding for 'CALL __WARN', as such
+        * handle it before exception entry to avoid recursive WARN
+        * in case exception entry is the one triggering WARNs.
+        */
+       if (!user_mode(regs) && handle_bug(regs))
+               return;
 
        rcu_exit = idtentry_enter_cond_rcu(regs);
        instrumentation_begin();
@@ -488,7 +492,8 @@ static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs,
        u8 insn_buf[MAX_INSN_SIZE];
        struct insn insn;
 
-       if (probe_kernel_read(insn_buf, (void *)regs->ip, MAX_INSN_SIZE))
+       if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip,
+                       MAX_INSN_SIZE))
                return GP_NO_HINT;
 
        kernel_insn_init(&insn, insn_buf, MAX_INSN_SIZE);
@@ -690,13 +695,13 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
                (struct bad_iret_stack *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
 
        /* Copy the IRET target to the temporary storage. */
-       memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8);
+       __memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8);
 
        /* Copy the remainder of the stack from the current stack. */
-       memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip));
+       __memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip));
 
        /* Update the entry stack */
-       memcpy(new_stack, &tmp, sizeof(tmp));
+       __memcpy(new_stack, &tmp, sizeof(tmp));
 
        BUG_ON(!user_mode(&new_stack->regs));
        return new_stack;
@@ -865,6 +870,12 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
        trace_hardirqs_off_finish();
 
        /*
+        * If something gets miswired and we end up here for a user mode
+        * #DB, we will malfunction.
+        */
+       WARN_ON_ONCE(user_mode(regs));
+
+       /*
         * Catch SYSENTER with TF set and clear DR_STEP. If this hit a
         * watchpoint at the same time then that will still be handled.
         */
@@ -882,6 +893,12 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
 static __always_inline void exc_debug_user(struct pt_regs *regs,
                                           unsigned long dr6)
 {
+       /*
+        * If something gets miswired and we end up here for a kernel mode
+        * #DB, we will malfunction.
+        */
+       WARN_ON_ONCE(!user_mode(regs));
+
        idtentry_enter_user(regs);
        instrumentation_begin();
 
@@ -912,7 +929,7 @@ DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
 }
 #else
 /* 32 bit does not have separate entry points. */
-DEFINE_IDTENTRY_DEBUG(exc_debug)
+DEFINE_IDTENTRY_RAW(exc_debug)
 {
        unsigned long dr6, dr7;
 
index ff2d0e9..cfe83d4 100644 (file)
@@ -7,7 +7,7 @@
 #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
 #define KVM_POSSIBLE_CR4_GUEST_BITS                              \
        (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
-        | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE)
+        | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_PGE | X86_CR4_TSD)
 
 #define BUILD_KVM_GPR_ACCESSORS(lname, uname)                                \
 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
index 34a7e05..5bf72fc 100644 (file)
@@ -169,6 +169,18 @@ static void kvm_apic_map_free(struct rcu_head *rcu)
        kvfree(map);
 }
 
+/*
+ * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
+ *
+ * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
+ * apic_map_lock_held.
+ */
+enum {
+       CLEAN,
+       UPDATE_IN_PROGRESS,
+       DIRTY
+};
+
 void kvm_recalculate_apic_map(struct kvm *kvm)
 {
        struct kvm_apic_map *new, *old = NULL;
@@ -176,17 +188,17 @@ void kvm_recalculate_apic_map(struct kvm *kvm)
        int i;
        u32 max_id = 255; /* enough space for any xAPIC ID */
 
-       if (!kvm->arch.apic_map_dirty) {
-               /*
-                * Read kvm->arch.apic_map_dirty before
-                * kvm->arch.apic_map
-                */
-               smp_rmb();
+       /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map.  */
+       if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
                return;
-       }
 
        mutex_lock(&kvm->arch.apic_map_lock);
-       if (!kvm->arch.apic_map_dirty) {
+       /*
+        * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map
+        * (if clean) or the APIC registers (if dirty).
+        */
+       if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
+                                  DIRTY, UPDATE_IN_PROGRESS) == CLEAN) {
                /* Someone else has updated the map. */
                mutex_unlock(&kvm->arch.apic_map_lock);
                return;
@@ -256,11 +268,11 @@ out:
                        lockdep_is_held(&kvm->arch.apic_map_lock));
        rcu_assign_pointer(kvm->arch.apic_map, new);
        /*
-        * Write kvm->arch.apic_map before
-        * clearing apic->apic_map_dirty
+        * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
+        * If another update has come in, leave it DIRTY.
         */
-       smp_wmb();
-       kvm->arch.apic_map_dirty = false;
+       atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
+                              UPDATE_IN_PROGRESS, CLEAN);
        mutex_unlock(&kvm->arch.apic_map_lock);
 
        if (old)
@@ -282,20 +294,20 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
                else
                        static_key_slow_inc(&apic_sw_disabled.key);
 
-               apic->vcpu->kvm->arch.apic_map_dirty = true;
+               atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
        }
 }
 
 static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
 {
        kvm_lapic_set_reg(apic, APIC_ID, id << 24);
-       apic->vcpu->kvm->arch.apic_map_dirty = true;
+       atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
 }
 
 static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
 {
        kvm_lapic_set_reg(apic, APIC_LDR, id);
-       apic->vcpu->kvm->arch.apic_map_dirty = true;
+       atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
 }
 
 static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
@@ -311,7 +323,7 @@ static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
 
        kvm_lapic_set_reg(apic, APIC_ID, id);
        kvm_lapic_set_reg(apic, APIC_LDR, ldr);
-       apic->vcpu->kvm->arch.apic_map_dirty = true;
+       atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
 }
 
 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
@@ -1976,7 +1988,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
        case APIC_DFR:
                if (!apic_x2apic_mode(apic)) {
                        kvm_lapic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
-                       apic->vcpu->kvm->arch.apic_map_dirty = true;
+                       atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
                } else
                        ret = 1;
                break;
@@ -2232,7 +2244,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
                        static_key_slow_dec_deferred(&apic_hw_disabled);
                } else {
                        static_key_slow_inc(&apic_hw_disabled.key);
-                       vcpu->kvm->arch.apic_map_dirty = true;
+                       atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
                }
        }
 
@@ -2273,7 +2285,6 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
        if (!apic)
                return;
 
-       vcpu->kvm->arch.apic_map_dirty = false;
        /* Stop the timer in case it's a reset to an active apic */
        hrtimer_cancel(&apic->lapic_timer.timer);
 
@@ -2567,6 +2578,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
        }
        memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
 
+       atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
        kvm_recalculate_apic_map(vcpu->kvm);
        kvm_apic_set_version(vcpu);
 
index 0ad06bf..444bb9c 100644 (file)
@@ -222,7 +222,7 @@ void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
                                    struct kvm_memory_slot *slot, u64 gfn);
-int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
+int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
 
 int kvm_mmu_post_init_vm(struct kvm *kvm);
 void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
index fdd05c2..6d6a0ae 100644 (file)
@@ -1745,10 +1745,10 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
  * Emulate arch specific page modification logging for the
  * nested hypervisor
  */
-int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu)
+int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu, gpa_t l2_gpa)
 {
        if (kvm_x86_ops.write_log_dirty)
-               return kvm_x86_ops.write_log_dirty(vcpu);
+               return kvm_x86_ops.write_log_dirty(vcpu, l2_gpa);
 
        return 0;
 }
@@ -4449,7 +4449,7 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
                        nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
                        rsvd_bits(maxphyaddr, 51);
                rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd |
-                       nonleaf_bit8_rsvd | gbpages_bit_rsvd |
+                       gbpages_bit_rsvd |
                        rsvd_bits(maxphyaddr, 51);
                rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
                        rsvd_bits(maxphyaddr, 51);
index a6d484e..bd70ece 100644 (file)
@@ -235,7 +235,7 @@ static inline unsigned FNAME(gpte_access)(u64 gpte)
 static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
                                             struct kvm_mmu *mmu,
                                             struct guest_walker *walker,
-                                            int write_fault)
+                                            gpa_t addr, int write_fault)
 {
        unsigned level, index;
        pt_element_t pte, orig_pte;
@@ -260,7 +260,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
                                !(pte & PT_GUEST_DIRTY_MASK)) {
                        trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
 #if PTTYPE == PTTYPE_EPT
-                       if (kvm_arch_write_log_dirty(vcpu))
+                       if (kvm_arch_write_log_dirty(vcpu, addr))
                                return -EINVAL;
 #endif
                        pte |= PT_GUEST_DIRTY_MASK;
@@ -360,7 +360,6 @@ retry_walk:
        ++walker->level;
 
        do {
-               gfn_t real_gfn;
                unsigned long host_addr;
 
                pt_access = pte_access;
@@ -375,7 +374,7 @@ retry_walk:
                walker->table_gfn[walker->level - 1] = table_gfn;
                walker->pte_gpa[walker->level - 1] = pte_gpa;
 
-               real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
+               real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
                                              nested_access,
                                              &walker->fault);
 
@@ -389,12 +388,10 @@ retry_walk:
                 * information to fix the exit_qualification or exit_info_1
                 * fields.
                 */
-               if (unlikely(real_gfn == UNMAPPED_GVA))
+               if (unlikely(real_gpa == UNMAPPED_GVA))
                        return 0;
 
-               real_gfn = gpa_to_gfn(real_gfn);
-
-               host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, real_gfn,
+               host_addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gpa_to_gfn(real_gpa),
                                            &walker->pte_writable[walker->level - 1]);
                if (unlikely(kvm_is_error_hva(host_addr)))
                        goto error;
@@ -457,7 +454,8 @@ retry_walk:
                        (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
 
        if (unlikely(!accessed_dirty)) {
-               ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
+               ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker,
+                                                       addr, write_fault);
                if (unlikely(ret < 0))
                        goto error;
                else if (ret)
index 8ccfa41..c0da4dd 100644 (file)
@@ -3344,7 +3344,7 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
 
 void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
 
-static fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
+static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
 {
        fastpath_t exit_fastpath;
        struct vcpu_svm *svm = to_svm(vcpu);
index d1af20b..d4a4cec 100644 (file)
@@ -4109,7 +4109,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
         * CR0_GUEST_HOST_MASK is already set in the original vmcs01
         * (KVM doesn't change it);
         */
-       vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
+       vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
        vmx_set_cr0(vcpu, vmcs12->host_cr0);
 
        /* Same as above - no reason to call set_cr4_guest_host_mask().  */
@@ -4259,7 +4259,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
         */
        vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
 
-       vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
+       vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
        vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
 
        vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
@@ -6176,6 +6176,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
                        goto error_guest_mode;
        }
 
+       vmx->nested.has_preemption_timer_deadline = false;
        if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) {
                vmx->nested.has_preemption_timer_deadline = true;
                vmx->nested.preemption_timer_deadline =
index 5c0ff80..7a3675f 100644 (file)
@@ -72,11 +72,24 @@ struct loaded_vmcs {
        struct vmcs_controls_shadow controls_shadow;
 };
 
+static inline bool is_intr_type(u32 intr_info, u32 type)
+{
+       const u32 mask = INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK;
+
+       return (intr_info & mask) == (INTR_INFO_VALID_MASK | type);
+}
+
+static inline bool is_intr_type_n(u32 intr_info, u32 type, u8 vector)
+{
+       const u32 mask = INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK |
+                        INTR_INFO_VECTOR_MASK;
+
+       return (intr_info & mask) == (INTR_INFO_VALID_MASK | type | vector);
+}
+
 static inline bool is_exception_n(u32 intr_info, u8 vector)
 {
-       return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
-                            INTR_INFO_VALID_MASK)) ==
-               (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK);
+       return is_intr_type_n(intr_info, INTR_TYPE_HARD_EXCEPTION, vector);
 }
 
 static inline bool is_debug(u32 intr_info)
@@ -106,28 +119,23 @@ static inline bool is_gp_fault(u32 intr_info)
 
 static inline bool is_machine_check(u32 intr_info)
 {
-       return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
-                            INTR_INFO_VALID_MASK)) ==
-               (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
+       return is_exception_n(intr_info, MC_VECTOR);
 }
 
 /* Undocumented: icebp/int1 */
 static inline bool is_icebp(u32 intr_info)
 {
-       return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
-               == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK);
+       return is_intr_type(intr_info, INTR_TYPE_PRIV_SW_EXCEPTION);
 }
 
 static inline bool is_nmi(u32 intr_info)
 {
-       return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
-               == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
+       return is_intr_type(intr_info, INTR_TYPE_NMI_INTR);
 }
 
 static inline bool is_external_intr(u32 intr_info)
 {
-       return (intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK))
-               == (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR);
+       return is_intr_type(intr_info, INTR_TYPE_EXT_INTR);
 }
 
 enum vmcs_field_width {
index 36c7717..13745f2 100644 (file)
@@ -133,9 +133,6 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
 #define KVM_VM_CR0_ALWAYS_ON                           \
        (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST |      \
         X86_CR0_WP | X86_CR0_PG | X86_CR0_PE)
-#define KVM_CR4_GUEST_OWNED_BITS                                     \
-       (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
-        | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)
 
 #define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
@@ -4034,9 +4031,9 @@ void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
 
 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
 {
-       vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
-       if (enable_ept)
-               vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
+       vmx->vcpu.arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS;
+       if (!enable_ept)
+               vmx->vcpu.arch.cr4_guest_owned_bits &= ~X86_CR4_PGE;
        if (is_guest_mode(&vmx->vcpu))
                vmx->vcpu.arch.cr4_guest_owned_bits &=
                        ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
@@ -4333,8 +4330,8 @@ static void init_vmcs(struct vcpu_vmx *vmx)
        /* 22.2.1, 20.8.1 */
        vm_entry_controls_set(vmx, vmx_vmentry_ctrl());
 
-       vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS;
-       vmcs_writel(CR0_GUEST_HOST_MASK, ~X86_CR0_TS);
+       vmx->vcpu.arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
+       vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
 
        set_cr4_guest_host_mask(vmx);
 
@@ -6606,23 +6603,6 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
                                        msrs[i].host, false);
 }
 
-static void atomic_switch_umwait_control_msr(struct vcpu_vmx *vmx)
-{
-       u32 host_umwait_control;
-
-       if (!vmx_has_waitpkg(vmx))
-               return;
-
-       host_umwait_control = get_umwait_control_msr();
-
-       if (vmx->msr_ia32_umwait_control != host_umwait_control)
-               add_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL,
-                       vmx->msr_ia32_umwait_control,
-                       host_umwait_control, false);
-       else
-               clear_atomic_switch_msr(vmx, MSR_IA32_UMWAIT_CONTROL);
-}
-
 static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6728,9 +6708,7 @@ reenter_guest:
 
        pt_guest_enter(vmx);
 
-       if (vcpu_to_pmu(vcpu)->version)
-               atomic_switch_perf_msrs(vmx);
-       atomic_switch_umwait_control_msr(vmx);
+       atomic_switch_perf_msrs(vmx);
 
        if (enable_preemption_timer)
                vmx_update_hv_timer(vcpu);
@@ -7501,11 +7479,11 @@ static void vmx_flush_log_dirty(struct kvm *kvm)
        kvm_flush_pml_buffers(kvm);
 }
 
-static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
+static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
 {
        struct vmcs12 *vmcs12;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       gpa_t gpa, dst;
+       gpa_t dst;
 
        if (is_guest_mode(vcpu)) {
                WARN_ON_ONCE(vmx->nested.pml_full);
@@ -7524,7 +7502,7 @@ static int vmx_write_pml_buffer(struct kvm_vcpu *vcpu)
                        return 1;
                }
 
-               gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS) & ~0xFFFull;
+               gpa &= ~0xFFFull;
                dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
 
                if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
index 8a83b5e..639798e 100644 (file)
@@ -288,8 +288,6 @@ struct vcpu_vmx {
 
        u64 current_tsc_ratio;
 
-       u32 host_pkru;
-
        unsigned long host_debugctlmsr;
 
        /*
index 00c88c2..88c593f 100644 (file)
@@ -975,6 +975,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
        if (is_long_mode(vcpu)) {
                if (!(cr4 & X86_CR4_PAE))
                        return 1;
+               if ((cr4 ^ old_cr4) & X86_CR4_LA57)
+                       return 1;
        } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
                   && ((cr4 ^ old_cr4) & pdptr_bits)
                   && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
@@ -2693,6 +2695,9 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
        if (data & 0x30)
                return 1;
 
+       if (!lapic_in_kernel(vcpu))
+               return 1;
+
        vcpu->arch.apf.msr_en_val = data;
 
        if (!kvm_pv_async_pf_enabled(vcpu)) {
@@ -2856,7 +2861,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                return kvm_mtrr_set_msr(vcpu, msr, data);
        case MSR_IA32_APICBASE:
                return kvm_set_apic_base(vcpu, msr_info);
-       case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
+       case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
                return kvm_x2apic_msr_write(vcpu, msr, data);
        case MSR_IA32_TSCDEADLINE:
                kvm_set_lapic_tscdeadline_msr(vcpu, data);
@@ -3196,7 +3201,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_IA32_APICBASE:
                msr_info->data = kvm_get_apic_base(vcpu);
                break;
-       case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
+       case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
                return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
        case MSR_IA32_TSCDEADLINE:
                msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
@@ -4603,7 +4608,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                r = -EINVAL;
                user_tsc_khz = (u32)arg;
 
-               if (user_tsc_khz >= kvm_max_guest_tsc_khz)
+               if (kvm_has_tsc_control &&
+                   user_tsc_khz >= kvm_max_guest_tsc_khz)
                        goto out;
 
                if (user_tsc_khz == 0)
index 56b243b..bbcc05b 100644 (file)
@@ -8,6 +8,8 @@
 #include <asm/alternative-asm.h>
 #include <asm/export.h>
 
+.pushsection .noinstr.text, "ax"
+
 /*
  * We build a jump to memcpy_orig by default which gets NOPped out on
  * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
@@ -184,6 +186,8 @@ SYM_FUNC_START_LOCAL(memcpy_orig)
        retq
 SYM_FUNC_END(memcpy_orig)
 
+.popsection
+
 #ifndef CONFIG_UML
 
 MCSAFE_TEST_CTL
index fff28c6..b0dfac3 100644 (file)
@@ -24,6 +24,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
        asm volatile(
                "       testq  %[size8],%[size8]\n"
                "       jz     4f\n"
+               "       .align 16\n"
                "0:     movq $0,(%[dst])\n"
                "       addq   $8,%[dst]\n"
                "       decl %%ecx ; jnz   0b\n"
index 66be9bd..1ead568 100644 (file)
@@ -99,7 +99,7 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
                return !instr_lo || (instr_lo>>1) == 1;
        case 0x00:
                /* Prefetch instruction is 0x0F0D or 0x0F18 */
-               if (probe_kernel_address(instr, opcode))
+               if (get_kernel_nofault(opcode, instr))
                        return 0;
 
                *prefetch = (instr_lo == 0xF) &&
@@ -133,7 +133,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
        while (instr < max_instr) {
                unsigned char opcode;
 
-               if (probe_kernel_address(instr, opcode))
+               if (get_kernel_nofault(opcode, instr))
                        break;
 
                instr++;
@@ -301,7 +301,7 @@ static int bad_address(void *p)
 {
        unsigned long dummy;
 
-       return probe_kernel_address((unsigned long *)p, dummy);
+       return get_kernel_nofault(dummy, (unsigned long *)p);
 }
 
 static void dump_pagetable(unsigned long address)
@@ -442,7 +442,7 @@ static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index)
                return;
        }
 
-       if (probe_kernel_read(&desc, (void *)(gdt->address + offset),
+       if (copy_from_kernel_nofault(&desc, (void *)(gdt->address + offset),
                              sizeof(struct ldttss_desc))) {
                pr_alert("%s: 0x%hx -- GDT entry is not readable\n",
                         name, index);
index bda909e..8b4afad 100644 (file)
@@ -737,7 +737,7 @@ static void __init test_wp_bit(void)
 
        __set_fixmap(FIX_WP_TEST, __pa_symbol(empty_zero_page), PAGE_KERNEL_RO);
 
-       if (probe_kernel_write((char *)fix_to_virt(FIX_WP_TEST), &z, 1)) {
+       if (copy_to_kernel_nofault((char *)fix_to_virt(FIX_WP_TEST), &z, 1)) {
                clear_fixmap(FIX_WP_TEST);
                printk(KERN_CONT "Ok.\n");
                return;
index e1d7d74..92ec176 100644 (file)
@@ -9,7 +9,7 @@ static __always_inline u64 canonical_address(u64 vaddr, u8 vaddr_bits)
        return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
 }
 
-bool probe_kernel_read_allowed(const void *unsafe_src, size_t size)
+bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
 {
        unsigned long vaddr = (unsigned long)unsafe_src;
 
@@ -22,7 +22,7 @@ bool probe_kernel_read_allowed(const void *unsafe_src, size_t size)
               canonical_address(vaddr, boot_cpu_data.x86_virt_bits) == vaddr;
 }
 #else
-bool probe_kernel_read_allowed(const void *unsafe_src, size_t size)
+bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
 {
        return (unsigned long)unsafe_src >= TASK_SIZE_MAX;
 }
index 9c97d81..4f15280 100644 (file)
@@ -302,7 +302,7 @@ static const struct pci_raw_ops *__init pci_find_bios(void)
             check <= (union bios32 *) __va(0xffff0);
             ++check) {
                long sig;
-               if (probe_kernel_address(&check->fields.signature, sig))
+               if (get_kernel_nofault(sig, &check->fields.signature))
                        continue;
 
                if (check->fields.signature != BIOS32_SIGNATURE)
index b8f7f19..30bd571 100644 (file)
@@ -287,8 +287,8 @@ void intel_scu_devices_create(void)
 
                adapter = i2c_get_adapter(i2c_bus[i]);
                if (adapter) {
-                       client = i2c_new_device(adapter, i2c_devs[i]);
-                       if (!client)
+                       client = i2c_new_client_device(adapter, i2c_devs[i]);
+                       if (IS_ERR(client))
                                pr_err("can't create i2c device %s\n",
                                        i2c_devs[i]->type);
                } else
index 7c65102..db1378c 100644 (file)
@@ -193,6 +193,8 @@ static void fix_processor_context(void)
  */
 static void notrace __restore_processor_state(struct saved_context *ctxt)
 {
+       struct cpuinfo_x86 *c;
+
        if (ctxt->misc_enable_saved)
                wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
        /*
@@ -263,6 +265,10 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
        mtrr_bp_restore();
        perf_restore_debug_store();
        msr_restore_context(ctxt);
+
+       c = &cpu_data(smp_processor_id());
+       if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL))
+               init_ia32_feat_ctl(c);
 }
 
 /* Needed by apm.c */
index b04e6e7..088bd76 100644 (file)
@@ -34,6 +34,7 @@ KCOV_INSTRUMENT := n
 PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
 PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
 PURGATORY_CFLAGS += $(DISABLE_STACKLEAK_PLUGIN) -DDISABLE_BRANCH_PROFILING
+PURGATORY_CFLAGS += $(call cc-option,-fno-stack-protector)
 
 # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
 # in turn leaves some undefined symbols like __fentry__ in purgatory and not
index 33b309d..0d68948 100644 (file)
@@ -386,7 +386,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
 
        preempt_disable();
 
-       probe_kernel_read(&dummy, v, 1);
+       copy_from_kernel_nofault(&dummy, v, 1);
 
        if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
                BUG();
@@ -598,6 +598,26 @@ static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
 }
 
 #ifdef CONFIG_X86_64
+void noist_exc_debug(struct pt_regs *regs);
+
+DEFINE_IDTENTRY_RAW(xenpv_exc_nmi)
+{
+       /* On Xen PV, NMI doesn't use IST.  The C part is the sane as native. */
+       exc_nmi(regs);
+}
+
+DEFINE_IDTENTRY_RAW(xenpv_exc_debug)
+{
+       /*
+        * There's no IST on Xen PV, but we still need to dispatch
+        * to the correct handler.
+        */
+       if (user_mode(regs))
+               noist_exc_debug(regs);
+       else
+               exc_debug(regs);
+}
+
 struct trap_array_entry {
        void (*orig)(void);
        void (*xen)(void);
@@ -609,18 +629,18 @@ struct trap_array_entry {
        .xen            = xen_asm_##func,               \
        .ist_okay       = ist_ok }
 
-#define TRAP_ENTRY_REDIR(func, xenfunc, ist_ok) {      \
+#define TRAP_ENTRY_REDIR(func, ist_ok) {               \
        .orig           = asm_##func,                   \
-       .xen            = xen_asm_##xenfunc,            \
+       .xen            = xen_asm_xenpv_##func,         \
        .ist_okay       = ist_ok }
 
 static struct trap_array_entry trap_array[] = {
-       TRAP_ENTRY_REDIR(exc_debug, exc_xendebug,       true  ),
+       TRAP_ENTRY_REDIR(exc_debug,                     true  ),
        TRAP_ENTRY(exc_double_fault,                    true  ),
 #ifdef CONFIG_X86_MCE
        TRAP_ENTRY(exc_machine_check,                   true  ),
 #endif
-       TRAP_ENTRY_REDIR(exc_nmi, exc_xennmi,           true  ),
+       TRAP_ENTRY_REDIR(exc_nmi,                       true  ),
        TRAP_ENTRY(exc_int3,                            false ),
        TRAP_ENTRY(exc_overflow,                        false ),
 #ifdef CONFIG_IA32_EMULATION
index 5d252aa..aab1d99 100644 (file)
@@ -29,10 +29,9 @@ _ASM_NOKPROBE(xen_\name)
 .endm
 
 xen_pv_trap asm_exc_divide_error
-xen_pv_trap asm_exc_debug
-xen_pv_trap asm_exc_xendebug
+xen_pv_trap asm_xenpv_exc_debug
 xen_pv_trap asm_exc_int3
-xen_pv_trap asm_exc_xennmi
+xen_pv_trap asm_xenpv_exc_nmi
 xen_pv_trap asm_exc_overflow
 xen_pv_trap asm_exc_bounds
 xen_pv_trap asm_exc_invalid_op
@@ -161,10 +160,22 @@ SYM_FUNC_END(xen_syscall32_target)
 
 /* 32-bit compat sysenter target */
 SYM_FUNC_START(xen_sysenter_target)
-       mov 0*8(%rsp), %rcx
-       mov 1*8(%rsp), %r11
-       mov 5*8(%rsp), %rsp
-       jmp entry_SYSENTER_compat
+       /*
+        * NB: Xen is polite and clears TF from EFLAGS for us.  This means
+        * that we don't need to guard against single step exceptions here.
+        */
+       popq %rcx
+       popq %r11
+
+       /*
+        * Neither Xen nor the kernel really knows what the old SS and
+        * CS were.  The kernel expects __USER32_DS and __USER32_CS, so
+        * report those values even though Xen will guess its own values.
+        */
+       movq $__USER32_DS, 4*8(%rsp)
+       movq $__USER32_CS, 1*8(%rsp)
+
+       jmp entry_SYSENTER_compat_after_hwframe
 SYM_FUNC_END(xen_sysenter_target)
 
 #else /* !CONFIG_IA32_EMULATION */
index 23632a3..9ffd7e2 100644 (file)
@@ -24,6 +24,19 @@ void blk_flush_integrity(void)
        flush_workqueue(kintegrityd_wq);
 }
 
+static void __bio_integrity_free(struct bio_set *bs,
+                                struct bio_integrity_payload *bip)
+{
+       if (bs && mempool_initialized(&bs->bio_integrity_pool)) {
+               if (bip->bip_vec)
+                       bvec_free(&bs->bvec_integrity_pool, bip->bip_vec,
+                                 bip->bip_slab);
+               mempool_free(bip, &bs->bio_integrity_pool);
+       } else {
+               kfree(bip);
+       }
+}
+
 /**
  * bio_integrity_alloc - Allocate integrity payload and attach it to bio
  * @bio:       bio to attach integrity metadata to
@@ -78,7 +91,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
 
        return bip;
 err:
-       mempool_free(bip, &bs->bio_integrity_pool);
+       __bio_integrity_free(bs, bip);
        return ERR_PTR(-ENOMEM);
 }
 EXPORT_SYMBOL(bio_integrity_alloc);
@@ -99,14 +112,7 @@ void bio_integrity_free(struct bio *bio)
                kfree(page_address(bip->bip_vec->bv_page) +
                      bip->bip_vec->bv_offset);
 
-       if (bs && mempool_initialized(&bs->bio_integrity_pool)) {
-               bvec_free(&bs->bvec_integrity_pool, bip->bip_vec, bip->bip_slab);
-
-               mempool_free(bip, &bs->bio_integrity_pool);
-       } else {
-               kfree(bip);
-       }
-
+       __bio_integrity_free(bs, bip);
        bio->bi_integrity = NULL;
        bio->bi_opf &= ~REQ_INTEGRITY;
 }
index 15df3a3..e0b2bc1 100644 (file)
@@ -125,6 +125,9 @@ static const char *const blk_queue_flag_name[] = {
        QUEUE_FLAG_NAME(REGISTERED),
        QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
        QUEUE_FLAG_NAME(QUIESCED),
+       QUEUE_FLAG_NAME(PCI_P2PDMA),
+       QUEUE_FLAG_NAME(ZONE_RESETALL),
+       QUEUE_FLAG_NAME(RQ_ALLOC_TIME),
 };
 #undef QUEUE_FLAG_NAME
 
index 44f3d09..ae722f8 100644 (file)
@@ -376,7 +376,7 @@ static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
 void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
                void *priv)
 {
-       return __blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS);
+       __blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS);
 }
 
 /**
index 4f57d27..4e0d173 100644 (file)
@@ -828,10 +828,10 @@ static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq,
                               void *priv, bool reserved)
 {
        /*
-        * If we find a request that is inflight and the queue matches,
+        * If we find a request that isn't idle and the queue matches,
         * we know the queue is busy. Return false to stop the iteration.
         */
-       if (rq->state == MQ_RQ_IN_FLIGHT && rq->q == hctx->queue) {
+       if (blk_mq_request_started(rq) && rq->q == hctx->queue) {
                bool *busy = priv;
 
                *busy = true;
@@ -3479,7 +3479,9 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
 
        if (set->nr_maps == 1 && nr_hw_queues > nr_cpu_ids)
                nr_hw_queues = nr_cpu_ids;
-       if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
+       if (nr_hw_queues < 1)
+               return;
+       if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
                return;
 
        list_for_each_entry(q, &set->tag_list, tag_set_list)
index c2ef41b..35abcb1 100644 (file)
@@ -374,8 +374,7 @@ void blk_ksm_destroy(struct blk_keyslot_manager *ksm)
        if (!ksm)
                return;
        kvfree(ksm->slot_hashtable);
-       memzero_explicit(ksm->slots, sizeof(ksm->slots[0]) * ksm->num_slots);
-       kvfree(ksm->slots);
+       kvfree_sensitive(ksm->slots, sizeof(ksm->slots[0]) * ksm->num_slots);
        memzero_explicit(ksm, sizeof(*ksm));
 }
 EXPORT_SYMBOL_GPL(blk_ksm_destroy);
index 6fdfcb4..d333786 100644 (file)
@@ -910,7 +910,7 @@ static bool ldm_parse_dsk4 (const u8 *buffer, int buflen, struct vblk *vb)
                return false;
 
        disk = &vb->vblk.disk;
-       uuid_copy(&disk->disk_id, (uuid_t *)(buffer + 0x18 + r_name));
+       import_uuid(&disk->disk_id, buffer + 0x18 + r_name);
        return true;
 }
 
index 841580a..d8d6bea 100644 (file)
@@ -93,7 +93,7 @@ struct frag {                         /* VBLK Fragment handling */
        u8              num;            /* Total number of records */
        u8              rec;            /* This is record number n */
        u8              map;            /* Which portions are in use */
-       u8              data[0];
+       u8              data[];
 };
 
 /* In memory LDM database structures. */
index b1cd353..892242a 100644 (file)
@@ -128,21 +128,15 @@ EXPORT_SYMBOL_GPL(af_alg_release);
 void af_alg_release_parent(struct sock *sk)
 {
        struct alg_sock *ask = alg_sk(sk);
-       unsigned int nokey = ask->nokey_refcnt;
-       bool last = nokey && !ask->refcnt;
+       unsigned int nokey = atomic_read(&ask->nokey_refcnt);
 
        sk = ask->parent;
        ask = alg_sk(sk);
 
-       local_bh_disable();
-       bh_lock_sock(sk);
-       ask->nokey_refcnt -= nokey;
-       if (!last)
-               last = !--ask->refcnt;
-       bh_unlock_sock(sk);
-       local_bh_enable();
+       if (nokey)
+               atomic_dec(&ask->nokey_refcnt);
 
-       if (last)
+       if (atomic_dec_and_test(&ask->refcnt))
                sock_put(sk);
 }
 EXPORT_SYMBOL_GPL(af_alg_release_parent);
@@ -187,7 +181,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
 
        err = -EBUSY;
        lock_sock(sk);
-       if (ask->refcnt | ask->nokey_refcnt)
+       if (atomic_read(&ask->refcnt))
                goto unlock;
 
        swap(ask->type, type);
@@ -203,8 +197,7 @@ unlock:
        return err;
 }
 
-static int alg_setkey(struct sock *sk, char __user *ukey,
-                     unsigned int keylen)
+static int alg_setkey(struct sock *sk, sockptr_t ukey, unsigned int keylen)
 {
        struct alg_sock *ask = alg_sk(sk);
        const struct af_alg_type *type = ask->type;
@@ -216,7 +209,7 @@ static int alg_setkey(struct sock *sk, char __user *ukey,
                return -ENOMEM;
 
        err = -EFAULT;
-       if (copy_from_user(key, ukey, keylen))
+       if (copy_from_sockptr(key, ukey, keylen))
                goto out;
 
        err = type->setkey(ask->private, key, keylen);
@@ -228,7 +221,7 @@ out:
 }
 
 static int alg_setsockopt(struct socket *sock, int level, int optname,
-                         char __user *optval, unsigned int optlen)
+                         sockptr_t optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct alg_sock *ask = alg_sk(sk);
@@ -236,7 +229,7 @@ static int alg_setsockopt(struct socket *sock, int level, int optname,
        int err = -EBUSY;
 
        lock_sock(sk);
-       if (ask->refcnt)
+       if (atomic_read(&ask->refcnt) != atomic_read(&ask->nokey_refcnt))
                goto unlock;
 
        type = ask->type;
@@ -301,12 +294,14 @@ int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
        if (err)
                goto unlock;
 
-       if (nokey || !ask->refcnt++)
+       if (atomic_inc_return_relaxed(&ask->refcnt) == 1)
                sock_hold(sk);
-       ask->nokey_refcnt += nokey;
+       if (nokey) {
+               atomic_inc(&ask->nokey_refcnt);
+               atomic_set(&alg_sk(sk2)->nokey_refcnt, 1);
+       }
        alg_sk(sk2)->parent = sk;
        alg_sk(sk2)->type = type;
-       alg_sk(sk2)->nokey_refcnt = nokey;
 
        newsock->ops = type->ops;
        newsock->state = SS_CONNECTED;
@@ -339,7 +334,6 @@ static const struct proto_ops alg_proto_ops = {
        .ioctl          =       sock_no_ioctl,
        .listen         =       sock_no_listen,
        .shutdown       =       sock_no_shutdown,
-       .getsockopt     =       sock_no_getsockopt,
        .mmap           =       sock_no_mmap,
        .sendpage       =       sock_no_sendpage,
        .sendmsg        =       sock_no_sendmsg,
index 535f1f8..5ebccbd 100644 (file)
@@ -178,8 +178,6 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
        if (IS_ERR(thread))
                goto err_put_larval;
 
-       wait_for_completion_interruptible(&larval->completion);
-
        return NOTIFY_STOP;
 
 err_put_larval:
index eb1910b..527d09a 100644 (file)
@@ -361,11 +361,9 @@ static struct proto_ops algif_aead_ops = {
        .ioctl          =       sock_no_ioctl,
        .listen         =       sock_no_listen,
        .shutdown       =       sock_no_shutdown,
-       .getsockopt     =       sock_no_getsockopt,
        .mmap           =       sock_no_mmap,
        .bind           =       sock_no_bind,
        .accept         =       sock_no_accept,
-       .setsockopt     =       sock_no_setsockopt,
 
        .release        =       af_alg_release,
        .sendmsg        =       aead_sendmsg,
@@ -384,7 +382,7 @@ static int aead_check_key(struct socket *sock)
        struct alg_sock *ask = alg_sk(sk);
 
        lock_sock(sk);
-       if (ask->refcnt)
+       if (!atomic_read(&ask->nokey_refcnt))
                goto unlock_child;
 
        psk = ask->parent;
@@ -396,11 +394,8 @@ static int aead_check_key(struct socket *sock)
        if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
                goto unlock;
 
-       if (!pask->refcnt++)
-               sock_hold(psk);
-
-       ask->refcnt = 1;
-       sock_put(psk);
+       atomic_dec(&pask->nokey_refcnt);
+       atomic_set(&ask->nokey_refcnt, 0);
 
        err = 0;
 
@@ -457,11 +452,9 @@ static struct proto_ops algif_aead_ops_nokey = {
        .ioctl          =       sock_no_ioctl,
        .listen         =       sock_no_listen,
        .shutdown       =       sock_no_shutdown,
-       .getsockopt     =       sock_no_getsockopt,
        .mmap           =       sock_no_mmap,
        .bind           =       sock_no_bind,
        .accept         =       sock_no_accept,
-       .setsockopt     =       sock_no_setsockopt,
 
        .release        =       af_alg_release,
        .sendmsg        =       aead_sendmsg_nokey,
index da1ffa4..50f7b22 100644 (file)
@@ -279,10 +279,8 @@ static struct proto_ops algif_hash_ops = {
        .ioctl          =       sock_no_ioctl,
        .listen         =       sock_no_listen,
        .shutdown       =       sock_no_shutdown,
-       .getsockopt     =       sock_no_getsockopt,
        .mmap           =       sock_no_mmap,
        .bind           =       sock_no_bind,
-       .setsockopt     =       sock_no_setsockopt,
 
        .release        =       af_alg_release,
        .sendmsg        =       hash_sendmsg,
@@ -301,7 +299,7 @@ static int hash_check_key(struct socket *sock)
        struct alg_sock *ask = alg_sk(sk);
 
        lock_sock(sk);
-       if (ask->refcnt)
+       if (!atomic_read(&ask->nokey_refcnt))
                goto unlock_child;
 
        psk = ask->parent;
@@ -313,11 +311,8 @@ static int hash_check_key(struct socket *sock)
        if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
                goto unlock;
 
-       if (!pask->refcnt++)
-               sock_hold(psk);
-
-       ask->refcnt = 1;
-       sock_put(psk);
+       atomic_dec(&pask->nokey_refcnt);
+       atomic_set(&ask->nokey_refcnt, 0);
 
        err = 0;
 
@@ -386,10 +381,8 @@ static struct proto_ops algif_hash_ops_nokey = {
        .ioctl          =       sock_no_ioctl,
        .listen         =       sock_no_listen,
        .shutdown       =       sock_no_shutdown,
-       .getsockopt     =       sock_no_getsockopt,
        .mmap           =       sock_no_mmap,
        .bind           =       sock_no_bind,
-       .setsockopt     =       sock_no_setsockopt,
 
        .release        =       af_alg_release,
        .sendmsg        =       hash_sendmsg_nokey,
index 087c0ad..6300e05 100644 (file)
@@ -101,11 +101,9 @@ static struct proto_ops algif_rng_ops = {
        .ioctl          =       sock_no_ioctl,
        .listen         =       sock_no_listen,
        .shutdown       =       sock_no_shutdown,
-       .getsockopt     =       sock_no_getsockopt,
        .mmap           =       sock_no_mmap,
        .bind           =       sock_no_bind,
        .accept         =       sock_no_accept,
-       .setsockopt     =       sock_no_setsockopt,
        .sendmsg        =       sock_no_sendmsg,
        .sendpage       =       sock_no_sendpage,
 
index e2c8ab4..c487887 100644 (file)
@@ -74,14 +74,10 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
                return PTR_ERR(areq);
 
        /* convert iovecs of output buffers into RX SGL */
-       err = af_alg_get_rsgl(sk, msg, flags, areq, -1, &len);
+       err = af_alg_get_rsgl(sk, msg, flags, areq, ctx->used, &len);
        if (err)
                goto free;
 
-       /* Process only as much RX buffers for which we have TX data */
-       if (len > ctx->used)
-               len = ctx->used;
-
        /*
         * If more buffers are to be expected to be processed, process only
         * full block size buffers.
@@ -192,11 +188,9 @@ static struct proto_ops algif_skcipher_ops = {
        .ioctl          =       sock_no_ioctl,
        .listen         =       sock_no_listen,
        .shutdown       =       sock_no_shutdown,
-       .getsockopt     =       sock_no_getsockopt,
        .mmap           =       sock_no_mmap,
        .bind           =       sock_no_bind,
        .accept         =       sock_no_accept,
-       .setsockopt     =       sock_no_setsockopt,
 
        .release        =       af_alg_release,
        .sendmsg        =       skcipher_sendmsg,
@@ -215,7 +209,7 @@ static int skcipher_check_key(struct socket *sock)
        struct alg_sock *ask = alg_sk(sk);
 
        lock_sock(sk);
-       if (ask->refcnt)
+       if (!atomic_read(&ask->nokey_refcnt))
                goto unlock_child;
 
        psk = ask->parent;
@@ -227,11 +221,8 @@ static int skcipher_check_key(struct socket *sock)
        if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
                goto unlock;
 
-       if (!pask->refcnt++)
-               sock_hold(psk);
-
-       ask->refcnt = 1;
-       sock_put(psk);
+       atomic_dec(&pask->nokey_refcnt);
+       atomic_set(&ask->nokey_refcnt, 0);
 
        err = 0;
 
@@ -288,11 +279,9 @@ static struct proto_ops algif_skcipher_ops_nokey = {
        .ioctl          =       sock_no_ioctl,
        .listen         =       sock_no_listen,
        .shutdown       =       sock_no_shutdown,
-       .getsockopt     =       sock_no_getsockopt,
        .mmap           =       sock_no_mmap,
        .bind           =       sock_no_bind,
        .accept         =       sock_no_accept,
-       .setsockopt     =       sock_no_setsockopt,
 
        .release        =       af_alg_release,
        .sendmsg        =       skcipher_sendmsg_nokey,
index 37526eb..8d80d93 100644 (file)
@@ -1631,10 +1631,12 @@ static int drbg_uninstantiate(struct drbg_state *drbg)
        if (drbg->random_ready.func) {
                del_random_ready_callback(&drbg->random_ready);
                cancel_work_sync(&drbg->seed_work);
-               crypto_free_rng(drbg->jent);
-               drbg->jent = NULL;
        }
 
+       if (!IS_ERR_OR_NULL(drbg->jent))
+               crypto_free_rng(drbg->jent);
+       drbg->jent = NULL;
+
        if (drbg->d_ops)
                drbg->d_ops->crypto_fini(drbg);
        drbg_dealloc_state(drbg);
index ece8c1a..88c8af4 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/configfs.h>
 #include <linux/acpi.h>
+#include <linux/security.h>
 
 #include "acpica/accommon.h"
 #include "acpica/actables.h"
@@ -28,7 +29,10 @@ static ssize_t acpi_table_aml_write(struct config_item *cfg,
 {
        const struct acpi_table_header *header = data;
        struct acpi_table *table;
-       int ret;
+       int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
+
+       if (ret)
+               return ret;
 
        table = container_of(cfg, struct acpi_table, cfg);
 
index 5fab7e3..92b996a 100644 (file)
@@ -228,6 +228,7 @@ static const struct acpi_device_id int3407_device_ids[] = {
        {"INT3407", 0},
        {"INT3532", 0},
        {"INTC1047", 0},
+       {"INTC1050", 0},
        {"", 0},
 };
 MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
index 873e039..6287338 100644 (file)
@@ -25,8 +25,8 @@ static int acpi_fan_remove(struct platform_device *pdev);
 
 static const struct acpi_device_id fan_device_ids[] = {
        {"PNP0C0B", 0},
-       {"INT1044", 0},
        {"INT3404", 0},
+       {"INTC1044", 0},
        {"", 0},
 };
 MODULE_DEVICE_TABLE(acpi, fan_device_ids);
index 3a89909..76c668c 100644 (file)
@@ -938,13 +938,13 @@ static void __exit interrupt_stats_exit(void)
 }
 
 static ssize_t
-acpi_show_profile(struct device *dev, struct device_attribute *attr,
+acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr,
                  char *buf)
 {
        return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
 }
 
-static const struct device_attribute pm_profile_attr =
+static const struct kobj_attribute pm_profile_attr =
        __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
 
 static ssize_t hotplug_enabled_show(struct kobject *kobj,
index 57d3b2e..0b2c20f 100644 (file)
@@ -120,7 +120,7 @@ static const u32 tegra_ahb_gizmo[] = {
 struct tegra_ahb {
        void __iomem    *regs;
        struct device   *dev;
-       u32             ctx[0];
+       u32             ctx[];
 };
 
 static inline u32 gizmo_readl(struct tegra_ahb *ahb, u32 offset)
index e47c8a4..f50c5f1 100644 (file)
@@ -4686,8 +4686,15 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
 
 static void binder_free_proc(struct binder_proc *proc)
 {
+       struct binder_device *device;
+
        BUG_ON(!list_empty(&proc->todo));
        BUG_ON(!list_empty(&proc->delivered_death));
+       device = container_of(proc->context, struct binder_device, context);
+       if (refcount_dec_and_test(&device->ref)) {
+               kfree(proc->context->name);
+               kfree(device);
+       }
        binder_alloc_deferred_release(&proc->alloc);
        put_task_struct(proc->tsk);
        binder_stats_deleted(BINDER_STAT_PROC);
@@ -5406,7 +5413,6 @@ static int binder_node_release(struct binder_node *node, int refs)
 static void binder_deferred_release(struct binder_proc *proc)
 {
        struct binder_context *context = proc->context;
-       struct binder_device *device;
        struct rb_node *n;
        int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
 
@@ -5423,12 +5429,6 @@ static void binder_deferred_release(struct binder_proc *proc)
                context->binder_context_mgr_node = NULL;
        }
        mutex_unlock(&context->context_mgr_node_lock);
-       device = container_of(proc->context, struct binder_device, context);
-       if (refcount_dec_and_test(&device->ref)) {
-               kfree(context->name);
-               kfree(device);
-       }
-       proc->context = NULL;
        binder_inner_proc_lock(proc);
        /*
         * Make sure proc stays alive after we
index 69361ec..b1cd4d9 100644 (file)
@@ -42,7 +42,6 @@
 #include <linux/workqueue.h>
 #include <linux/scatterlist.h>
 #include <linux/io.h>
-#include <linux/async.h>
 #include <linux/log2.h>
 #include <linux/slab.h>
 #include <linux/glob.h>
@@ -5778,7 +5777,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
        /* perform each probe asynchronously */
        for (i = 0; i < host->n_ports; i++) {
                struct ata_port *ap = host->ports[i];
-               async_schedule(async_port_probe, ap);
+               ap->cookie = async_schedule(async_port_probe, ap);
        }
 
        return 0;
@@ -5920,11 +5919,11 @@ void ata_host_detach(struct ata_host *host)
 {
        int i;
 
-       /* Ensure ata_port probe has completed */
-       async_synchronize_full();
-
-       for (i = 0; i < host->n_ports; i++)
+       for (i = 0; i < host->n_ports; i++) {
+               /* Ensure ata_port probe has completed */
+               async_synchronize_cookie(host->ports[i]->cookie + 1);
                ata_port_detach(host->ports[i]);
+       }
 
        /* the host is dead now, dissociate ACPI */
        ata_acpi_dissociate(host);
index 435781a..4633608 100644 (file)
@@ -3684,12 +3684,13 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
 {
        struct scsi_cmnd *scmd = qc->scsicmd;
        const u8 *cdb = scmd->cmnd;
-       const u8 *p;
        u8 pg, spg;
        unsigned six_byte, pg_len, hdr_len, bd_len;
        int len;
        u16 fp = (u16)-1;
        u8 bp = 0xff;
+       u8 buffer[64];
+       const u8 *p = buffer;
 
        VPRINTK("ENTER\n");
 
@@ -3723,12 +3724,14 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
        if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len)
                goto invalid_param_len;
 
-       p = page_address(sg_page(scsi_sglist(scmd)));
-
        /* Move past header and block descriptors.  */
        if (len < hdr_len)
                goto invalid_param_len;
 
+       if (!sg_copy_to_buffer(scsi_sglist(scmd), scsi_sg_count(scmd),
+                              buffer, sizeof(buffer)))
+               goto invalid_param_len;
+
        if (six_byte)
                bd_len = p[3];
        else
index 980aacd..141ac60 100644 (file)
@@ -907,7 +907,7 @@ static int sata_rcar_probe(struct platform_device *pdev)
        pm_runtime_enable(dev);
        ret = pm_runtime_get_sync(dev);
        if (ret < 0)
-               goto err_pm_disable;
+               goto err_pm_put;
 
        host = ata_host_alloc(dev, 1);
        if (!host) {
@@ -937,7 +937,6 @@ static int sata_rcar_probe(struct platform_device *pdev)
 
 err_pm_put:
        pm_runtime_put(dev);
-err_pm_disable:
        pm_runtime_disable(dev);
        return ret;
 }
@@ -991,8 +990,10 @@ static int sata_rcar_resume(struct device *dev)
        int ret;
 
        ret = pm_runtime_get_sync(dev);
-       if (ret < 0)
+       if (ret < 0) {
+               pm_runtime_put(dev);
                return ret;
+       }
 
        if (priv->type == RCAR_GEN3_SATA) {
                sata_rcar_init_module(priv);
@@ -1017,8 +1018,10 @@ static int sata_rcar_restore(struct device *dev)
        int ret;
 
        ret = pm_runtime_get_sync(dev);
-       if (ret < 0)
+       if (ret < 0) {
+               pm_runtime_put(dev);
                return ret;
+       }
 
        sata_rcar_setup_port(host);
 
index 8007e05..b9370bb 100644 (file)
@@ -306,8 +306,9 @@ config ATM_IA
          for more info about the cards. Say Y (or M to compile as a module
          named iphase) here if you have one of these cards.
 
-         See the file <file:Documentation/networking/iphase.rst> for further
-         details.
+         See the file
+         <file:Documentation/networking/device_drivers/atm/iphase.rst>
+         for further details.
 
 config ATM_IA_DEBUG
        bool "Enable debugging messages"
@@ -336,7 +337,8 @@ config ATM_FORE200E
          on PCI and SBUS hosts. Say Y (or M to compile as a module
          named fore_200e) here if you have one of these ATM adapters.
 
-         See the file <file:Documentation/networking/fore200e.rst> for
+         See the file
+         <file:Documentation/networking/device_drivers/atm/fore200e.rst> for
          further details.
 
 config ATM_FORE200E_USE_TASKLET
index 17d47ad..b3d8e00 100644 (file)
@@ -2027,21 +2027,6 @@ static int eni_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
        return dev->phy->ioctl(dev,cmd,arg);
 }
 
-
-static int eni_getsockopt(struct atm_vcc *vcc,int level,int optname,
-    void __user *optval,int optlen)
-{
-       return -EINVAL;
-}
-
-
-static int eni_setsockopt(struct atm_vcc *vcc,int level,int optname,
-    void __user *optval,unsigned int optlen)
-{
-       return -EINVAL;
-}
-
-
 static int eni_send(struct atm_vcc *vcc,struct sk_buff *skb)
 {
        enum enq_res res;
@@ -2215,8 +2200,6 @@ static const struct atmdev_ops ops = {
        .open           = eni_open,
        .close          = eni_close,
        .ioctl          = eni_ioctl,
-       .getsockopt     = eni_getsockopt,
-       .setsockopt     = eni_setsockopt,
        .send           = eni_send,
        .phy_put        = eni_phy_put,
        .phy_get        = eni_phy_get,
index cc87004..2ca9ec8 100644 (file)
@@ -1277,8 +1277,6 @@ static const struct atmdev_ops ops = {
        .send =         fs_send,
        .owner =        THIS_MODULE,
        /* ioctl:          fs_ioctl, */
-       /* getsockopt:     fs_getsockopt, */
-       /* setsockopt:     fs_setsockopt, */
        /* change_qos:     fs_change_qos, */
 
        /* For now implement these internally here... */  
index f4ad7ce..a81bc49 100644 (file)
@@ -1710,31 +1710,6 @@ fore200e_getstats(struct fore200e* fore200e)
     return 0;
 }
 
-
-static int
-fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
-{
-    /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
-
-    DPRINTK(2, "getsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
-           vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
-
-    return -EINVAL;
-}
-
-
-static int
-fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, unsigned int optlen)
-{
-    /* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */
-    
-    DPRINTK(2, "setsockopt %d.%d.%d, level = %d, optname = 0x%x, optval = 0x%p, optlen = %d\n",
-           vcc->itf, vcc->vpi, vcc->vci, level, optname, optval, optlen);
-    
-    return -EINVAL;
-}
-
-
 #if 0 /* currently unused */
 static int
 fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
@@ -3026,8 +3001,6 @@ static const struct atmdev_ops fore200e_ops = {
        .open       = fore200e_open,
        .close      = fore200e_close,
        .ioctl      = fore200e_ioctl,
-       .getsockopt = fore200e_getsockopt,
-       .setsockopt = fore200e_setsockopt,
        .send       = fore200e_send,
        .change_qos = fore200e_change_qos,
        .proc_read  = fore200e_proc_read,
index e5da51f..4f2951c 100644 (file)
@@ -2528,46 +2528,6 @@ static void hrz_close (struct atm_vcc * atm_vcc) {
 }
 
 #if 0
-static int hrz_getsockopt (struct atm_vcc * atm_vcc, int level, int optname,
-                          void *optval, int optlen) {
-  hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
-  PRINTD (DBG_FLOW|DBG_VCC, "hrz_getsockopt");
-  switch (level) {
-    case SOL_SOCKET:
-      switch (optname) {
-//     case SO_BCTXOPT:
-//       break;
-//     case SO_BCRXOPT:
-//       break;
-       default:
-         return -ENOPROTOOPT;
-      };
-      break;
-  }
-  return -EINVAL;
-}
-
-static int hrz_setsockopt (struct atm_vcc * atm_vcc, int level, int optname,
-                          void *optval, unsigned int optlen) {
-  hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
-  PRINTD (DBG_FLOW|DBG_VCC, "hrz_setsockopt");
-  switch (level) {
-    case SOL_SOCKET:
-      switch (optname) {
-//     case SO_BCTXOPT:
-//       break;
-//     case SO_BCRXOPT:
-//       break;
-       default:
-         return -ENOPROTOOPT;
-      };
-      break;
-  }
-  return -EINVAL;
-}
-#endif
-
-#if 0
 static int hrz_ioctl (struct atm_dev * atm_dev, unsigned int cmd, void *arg) {
   hrz_dev * dev = HRZ_DEV(atm_dev);
   PRINTD (DBG_FLOW, "hrz_ioctl");
index 8c7a996..eef637f 100644 (file)
@@ -2880,20 +2880,6 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
    return 0;  
 }  
   
-static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,   
-       void __user *optval, int optlen)  
-{  
-       IF_EVENT(printk(">ia_getsockopt\n");)  
-       return -EINVAL;  
-}  
-  
-static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,   
-       void __user *optval, unsigned int optlen)  
-{  
-       IF_EVENT(printk(">ia_setsockopt\n");)  
-       return -EINVAL;  
-}  
-  
 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
         IADEV *iadev;
         struct dle *wr_ptr;
@@ -3164,8 +3150,6 @@ static const struct atmdev_ops ops = {
        .open           = ia_open,  
        .close          = ia_close,  
        .ioctl          = ia_ioctl,  
-       .getsockopt     = ia_getsockopt,  
-       .setsockopt     = ia_setsockopt,  
        .send           = ia_send,  
        .phy_put        = ia_phy_put,  
        .phy_get        = ia_phy_get,  
index 645a6bc..986c131 100644 (file)
@@ -2537,8 +2537,6 @@ static const struct atmdev_ops ops = {
        .dev_close      = lanai_dev_close,
        .open           = lanai_open,
        .close          = lanai_close,
-       .getsockopt     = NULL,
-       .setsockopt     = NULL,
        .send           = lanai_send,
        .phy_put        = NULL,
        .phy_get        = NULL,
index c32f7dd..94fbc3a 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
  * Driver for the Solos PCI ADSL2+ card, designed to support Linux by
- *  Traverse Technologies -- http://www.traverse.com.au/
+ *  Traverse Technologies -- https://www.traverse.com.au/
  *  Xrio Limited          -- http://www.xrio.com/
  *
  * Copyright Â© 2008 Traverse Technologies
@@ -1179,8 +1179,6 @@ static const struct atmdev_ops fpga_ops = {
        .open =         popen,
        .close =        pclose,
        .ioctl =        NULL,
-       .getsockopt =   NULL,
-       .setsockopt =   NULL,
        .send =         psend,
        .send_oam =     NULL,
        .phy_put =      NULL,
index 57f97b9..2788b98 100644 (file)
@@ -1515,20 +1515,6 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
        }
 }
 
-
-static int zatm_getsockopt(struct atm_vcc *vcc,int level,int optname,
-    void __user *optval,int optlen)
-{
-       return -EINVAL;
-}
-
-
-static int zatm_setsockopt(struct atm_vcc *vcc,int level,int optname,
-    void __user *optval,unsigned int optlen)
-{
-       return -EINVAL;
-}
-
 static int zatm_send(struct atm_vcc *vcc,struct sk_buff *skb)
 {
        int error;
@@ -1582,8 +1568,6 @@ static const struct atmdev_ops ops = {
        .open           = zatm_open,
        .close          = zatm_close,
        .ioctl          = zatm_ioctl,
-       .getsockopt     = zatm_getsockopt,
-       .setsockopt     = zatm_setsockopt,
        .send           = zatm_send,
        .phy_put        = zatm_phy_put,
        .phy_get        = zatm_phy_get,
index 977d27b..a97f33d 100644 (file)
@@ -265,14 +265,14 @@ static struct notifier_block pm_trace_nb = {
        .notifier_call = pm_trace_notify,
 };
 
-static int early_resume_init(void)
+static int __init early_resume_init(void)
 {
        hash_value_early_read = read_magic_time();
        register_pm_notifier(&pm_trace_nb);
        return 0;
 }
 
-static int late_resume_init(void)
+static int __init late_resume_init(void)
 {
        unsigned int val = hash_value_early_read;
        unsigned int user, file, dev;
index c472f62..06a7968 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/delay.h>
 #include <linux/log2.h>
 #include <linux/hwspinlock.h>
+#include <asm/unaligned.h>
 
 #define CREATE_TRACE_POINTS
 #include "trace.h"
@@ -249,22 +250,20 @@ static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
 
 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
 {
-       __be16 *b = buf;
-
-       b[0] = cpu_to_be16(val << shift);
+       put_unaligned_be16(val << shift, buf);
 }
 
 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
 {
-       __le16 *b = buf;
-
-       b[0] = cpu_to_le16(val << shift);
+       put_unaligned_le16(val << shift, buf);
 }
 
 static void regmap_format_16_native(void *buf, unsigned int val,
                                    unsigned int shift)
 {
-       *(u16 *)buf = val << shift;
+       u16 v = val << shift;
+
+       memcpy(buf, &v, sizeof(v));
 }
 
 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
@@ -280,43 +279,39 @@ static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
 
 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
 {
-       __be32 *b = buf;
-
-       b[0] = cpu_to_be32(val << shift);
+       put_unaligned_be32(val << shift, buf);
 }
 
 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
 {
-       __le32 *b = buf;
-
-       b[0] = cpu_to_le32(val << shift);
+       put_unaligned_le32(val << shift, buf);
 }
 
 static void regmap_format_32_native(void *buf, unsigned int val,
                                    unsigned int shift)
 {
-       *(u32 *)buf = val << shift;
+       u32 v = val << shift;
+
+       memcpy(buf, &v, sizeof(v));
 }
 
 #ifdef CONFIG_64BIT
 static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
 {
-       __be64 *b = buf;
-
-       b[0] = cpu_to_be64((u64)val << shift);
+       put_unaligned_be64((u64) val << shift, buf);
 }
 
 static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
 {
-       __le64 *b = buf;
-
-       b[0] = cpu_to_le64((u64)val << shift);
+       put_unaligned_le64((u64) val << shift, buf);
 }
 
 static void regmap_format_64_native(void *buf, unsigned int val,
                                    unsigned int shift)
 {
-       *(u64 *)buf = (u64)val << shift;
+       u64 v = (u64) val << shift;
+
+       memcpy(buf, &v, sizeof(v));
 }
 #endif
 
@@ -333,35 +328,34 @@ static unsigned int regmap_parse_8(const void *buf)
 
 static unsigned int regmap_parse_16_be(const void *buf)
 {
-       const __be16 *b = buf;
-
-       return be16_to_cpu(b[0]);
+       return get_unaligned_be16(buf);
 }
 
 static unsigned int regmap_parse_16_le(const void *buf)
 {
-       const __le16 *b = buf;
-
-       return le16_to_cpu(b[0]);
+       return get_unaligned_le16(buf);
 }
 
 static void regmap_parse_16_be_inplace(void *buf)
 {
-       __be16 *b = buf;
+       u16 v = get_unaligned_be16(buf);
 
-       b[0] = be16_to_cpu(b[0]);
+       memcpy(buf, &v, sizeof(v));
 }
 
 static void regmap_parse_16_le_inplace(void *buf)
 {
-       __le16 *b = buf;
+       u16 v = get_unaligned_le16(buf);
 
-       b[0] = le16_to_cpu(b[0]);
+       memcpy(buf, &v, sizeof(v));
 }
 
 static unsigned int regmap_parse_16_native(const void *buf)
 {
-       return *(u16 *)buf;
+       u16 v;
+
+       memcpy(&v, buf, sizeof(v));
+       return v;
 }
 
 static unsigned int regmap_parse_24(const void *buf)
@@ -376,69 +370,67 @@ static unsigned int regmap_parse_24(const void *buf)
 
 static unsigned int regmap_parse_32_be(const void *buf)
 {
-       const __be32 *b = buf;
-
-       return be32_to_cpu(b[0]);
+       return get_unaligned_be32(buf);
 }
 
 static unsigned int regmap_parse_32_le(const void *buf)
 {
-       const __le32 *b = buf;
-
-       return le32_to_cpu(b[0]);
+       return get_unaligned_le32(buf);
 }
 
 static void regmap_parse_32_be_inplace(void *buf)
 {
-       __be32 *b = buf;
+       u32 v = get_unaligned_be32(buf);
 
-       b[0] = be32_to_cpu(b[0]);
+       memcpy(buf, &v, sizeof(v));
 }
 
 static void regmap_parse_32_le_inplace(void *buf)
 {
-       __le32 *b = buf;
+       u32 v = get_unaligned_le32(buf);
 
-       b[0] = le32_to_cpu(b[0]);
+       memcpy(buf, &v, sizeof(v));
 }
 
 static unsigned int regmap_parse_32_native(const void *buf)
 {
-       return *(u32 *)buf;
+       u32 v;
+
+       memcpy(&v, buf, sizeof(v));
+       return v;
 }
 
 #ifdef CONFIG_64BIT
 static unsigned int regmap_parse_64_be(const void *buf)
 {
-       const __be64 *b = buf;
-
-       return be64_to_cpu(b[0]);
+       return get_unaligned_be64(buf);
 }
 
 static unsigned int regmap_parse_64_le(const void *buf)
 {
-       const __le64 *b = buf;
-
-       return le64_to_cpu(b[0]);
+       return get_unaligned_le64(buf);
 }
 
 static void regmap_parse_64_be_inplace(void *buf)
 {
-       __be64 *b = buf;
+       u64 v =  get_unaligned_be64(buf);
 
-       b[0] = be64_to_cpu(b[0]);
+       memcpy(buf, &v, sizeof(v));
 }
 
 static void regmap_parse_64_le_inplace(void *buf)
 {
-       __le64 *b = buf;
+       u64 v = get_unaligned_le64(buf);
 
-       b[0] = le64_to_cpu(b[0]);
+       memcpy(buf, &v, sizeof(v));
 }
 
 static unsigned int regmap_parse_64_native(const void *buf)
 {
-       return *(u64 *)buf;
+       u64 v;
+
+       memcpy(&v, buf, sizeof(v));
+       return v;
 }
 #endif
 
@@ -1357,6 +1349,7 @@ void regmap_exit(struct regmap *map)
        if (map->hwlock)
                hwspin_lock_free(map->hwlock);
        kfree_const(map->name);
+       kfree(map->patch);
        kfree(map);
 }
 EXPORT_SYMBOL_GPL(regmap_exit);
@@ -2944,8 +2937,9 @@ EXPORT_SYMBOL_GPL(regmap_update_bits_base);
  * @reg: Register to read from
  * @bits: Bits to test
  *
- * Returns -1 if the underlying regmap_read() fails, 0 if at least one of the
- * tested bits is not set and 1 if all tested bits are set.
+ * Returns 0 if at least one of the tested bits is not set, 1 if all tested
+ * bits are set and a negative error number if the underlying regmap_read()
+ * fails.
  */
 int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
 {
index a5df3d1..8a1e470 100644 (file)
@@ -122,6 +122,7 @@ static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id)
 static int bcma_gpio_irq_init(struct bcma_drv_cc *cc)
 {
        struct gpio_chip *chip = &cc->gpio;
+       struct gpio_irq_chip *girq = &chip->irq;
        int hwirq, err;
 
        if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC)
@@ -136,15 +137,13 @@ static int bcma_gpio_irq_init(struct bcma_drv_cc *cc)
        bcma_chipco_gpio_intmask(cc, ~0, 0);
        bcma_cc_set32(cc, BCMA_CC_IRQMASK, BCMA_CC_IRQ_GPIO);
 
-       err =  gpiochip_irqchip_add(chip,
-                                   &bcma_gpio_irq_chip,
-                                   0,
-                                   handle_simple_irq,
-                                   IRQ_TYPE_NONE);
-       if (err) {
-               free_irq(hwirq, cc);
-               return err;
-       }
+       girq->chip = &bcma_gpio_irq_chip;
+       /* This will let us handle the parent IRQ in the driver */
+       girq->parent_handler = NULL;
+       girq->num_parents = 0;
+       girq->parents = NULL;
+       girq->default_type = IRQ_TYPE_NONE;
+       girq->handler = handle_simple_irq;
 
        return 0;
 }
@@ -212,13 +211,13 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
        else
                chip->base              = -1;
 
-       err = gpiochip_add_data(chip, cc);
+       err = bcma_gpio_irq_init(cc);
        if (err)
                return err;
 
-       err = bcma_gpio_irq_init(cc);
+       err = gpiochip_add_data(chip, cc);
        if (err) {
-               gpiochip_remove(chip);
+               bcma_gpio_irq_exit(cc);
                return err;
        }
 
index 1a942f7..d49e7c0 100644 (file)
@@ -219,7 +219,7 @@ static s32 bcma_erom_get_mst_port(struct bcma_bus *bus, u32 __iomem **eromptr)
 static u32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
                                  u32 type, u8 port)
 {
-       u32 addrl, addrh, sizeh = 0;
+       u32 addrl;
        u32 size;
 
        u32 ent = bcma_erom_get_ent(bus, eromptr);
@@ -233,14 +233,12 @@ static u32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
 
        addrl = ent & SCAN_ADDR_ADDR;
        if (ent & SCAN_ADDR_AG32)
-               addrh = bcma_erom_get_ent(bus, eromptr);
-       else
-               addrh = 0;
+               bcma_erom_get_ent(bus, eromptr);
 
        if ((ent & SCAN_ADDR_SZ) == SCAN_ADDR_SZ_SZD) {
                size = bcma_erom_get_ent(bus, eromptr);
                if (size & SCAN_SIZE_SG32)
-                       sizeh = bcma_erom_get_ent(bus, eromptr);
+                       bcma_erom_get_ent(bus, eromptr);
        }
 
        return addrl;
index 14345a8..33d0831 100644 (file)
@@ -620,7 +620,7 @@ struct fifo_buffer {
        unsigned int head_index;
        unsigned int size;
        int total; /* sum of all values */
-       int values[0];
+       int values[];
 };
 extern struct fifo_buffer *fifo_alloc(unsigned int fifo_size);
 
index e6fc5ad..dea59c9 100644 (file)
@@ -271,7 +271,7 @@ struct p_rs_param {
        u32 resync_rate;
 
              /* Since protocol version 88 and higher. */
-       char verify_alg[0];
+       char verify_alg[];
 } __packed;
 
 struct p_rs_param_89 {
@@ -305,7 +305,7 @@ struct p_protocol {
        u32 two_primaries;
 
        /* Since protocol version 87 and higher. */
-       char integrity_alg[0];
+       char integrity_alg[];
 
 } __packed;
 
@@ -360,7 +360,7 @@ struct p_sizes {
        u16         dds_flags; /* use enum dds_flags here. */
 
        /* optional queue_limits if (agreed_features & DRBD_FF_WSAME) */
-       struct o_qlim qlim[0];
+       struct o_qlim qlim[];
 } __packed;
 
 struct p_state {
@@ -409,7 +409,7 @@ struct p_compressed_bm {
         */
        u8 encoding;
 
-       u8 code[0];
+       u8 code[];
 } __packed;
 
 struct p_delay_probe93 {
index c33bbbf..475e1a7 100644 (file)
@@ -1368,14 +1368,14 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
            lo->lo_sizelimit != info->lo_sizelimit) {
                size_changed = true;
                sync_blockdev(lo->lo_device);
-               kill_bdev(lo->lo_device);
+               invalidate_bdev(lo->lo_device);
        }
 
        /* I/O need to be drained during transfer transition */
        blk_mq_freeze_queue(lo->lo_queue);
 
        if (size_changed && lo->lo_device->bd_inode->i_mapping->nrpages) {
-               /* If any pages were dirtied after kill_bdev(), try again */
+               /* If any pages were dirtied after invalidate_bdev(), try again */
                err = -EAGAIN;
                pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
                        __func__, lo->lo_number, lo->lo_file_name,
@@ -1615,11 +1615,11 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
                return 0;
 
        sync_blockdev(lo->lo_device);
-       kill_bdev(lo->lo_device);
+       invalidate_bdev(lo->lo_device);
 
        blk_mq_freeze_queue(lo->lo_queue);
 
-       /* kill_bdev should have truncated all the pages */
+       /* invalidate_bdev should have truncated all the pages */
        if (lo->lo_device->bd_inode->i_mapping->nrpages) {
                err = -EAGAIN;
                pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
index 43cff01..ce7e9f2 100644 (file)
@@ -1033,25 +1033,26 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
             test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
                dev_err(disk_to_dev(nbd->disk),
                        "Device being setup by another task");
-               sockfd_put(sock);
-               return -EBUSY;
+               err = -EBUSY;
+               goto put_socket;
+       }
+
+       nsock = kzalloc(sizeof(*nsock), GFP_KERNEL);
+       if (!nsock) {
+               err = -ENOMEM;
+               goto put_socket;
        }
 
        socks = krealloc(config->socks, (config->num_connections + 1) *
                         sizeof(struct nbd_sock *), GFP_KERNEL);
        if (!socks) {
-               sockfd_put(sock);
-               return -ENOMEM;
+               kfree(nsock);
+               err = -ENOMEM;
+               goto put_socket;
        }
 
        config->socks = socks;
 
-       nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
-       if (!nsock) {
-               sockfd_put(sock);
-               return -ENOMEM;
-       }
-
        nsock->fallback_index = -1;
        nsock->dead = false;
        mutex_init(&nsock->tx_lock);
@@ -1063,6 +1064,10 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
        atomic_inc(&config->live_connections);
 
        return 0;
+
+put_socket:
+       sockfd_put(sock);
+       return err;
 }
 
 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
index 7420648..4f61e92 100644 (file)
@@ -1451,8 +1451,10 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
 static void rbd_osd_format_read(struct ceph_osd_request *osd_req)
 {
        struct rbd_obj_request *obj_request = osd_req->r_priv;
+       struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
+       struct ceph_options *opt = rbd_dev->rbd_client->client->options;
 
-       osd_req->r_flags = CEPH_OSD_FLAG_READ;
+       osd_req->r_flags = CEPH_OSD_FLAG_READ | opt->read_from_replica;
        osd_req->r_snapid = obj_request->img_request->snap_id;
 }
 
index 9d21bf0..980df85 100644 (file)
@@ -878,6 +878,7 @@ out_put_disk:
        put_disk(vblk->disk);
 out_free_vq:
        vdev->config->del_vqs(vdev);
+       kfree(vblk->vqs);
 out_free_vblk:
        kfree(vblk);
 out_free_index:
index 3affd18..bb54fb5 100644 (file)
@@ -221,6 +221,35 @@ static u32 sysc_read_sysstatus(struct sysc *ddata)
        return sysc_read(ddata, offset);
 }
 
+/* Poll on reset status */
+static int sysc_wait_softreset(struct sysc *ddata)
+{
+       u32 sysc_mask, syss_done, rstval;
+       int syss_offset, error = 0;
+
+       syss_offset = ddata->offsets[SYSC_SYSSTATUS];
+       sysc_mask = BIT(ddata->cap->regbits->srst_shift);
+
+       if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
+               syss_done = 0;
+       else
+               syss_done = ddata->cfg.syss_mask;
+
+       if (syss_offset >= 0) {
+               error = readx_poll_timeout(sysc_read_sysstatus, ddata, rstval,
+                                          (rstval & ddata->cfg.syss_mask) ==
+                                          syss_done,
+                                          100, MAX_MODULE_SOFTRESET_WAIT);
+
+       } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) {
+               error = readx_poll_timeout(sysc_read_sysconfig, ddata, rstval,
+                                          !(rstval & sysc_mask),
+                                          100, MAX_MODULE_SOFTRESET_WAIT);
+       }
+
+       return error;
+}
+
 static int sysc_add_named_clock_from_child(struct sysc *ddata,
                                           const char *name,
                                           const char *optfck_name)
@@ -925,18 +954,47 @@ static int sysc_enable_module(struct device *dev)
        struct sysc *ddata;
        const struct sysc_regbits *regbits;
        u32 reg, idlemodes, best_mode;
+       int error;
 
        ddata = dev_get_drvdata(dev);
+
+       /*
+        * Some modules like DSS reset automatically on idle. Enable optional
+        * reset clocks and wait for OCP softreset to complete.
+        */
+       if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) {
+               error = sysc_enable_opt_clocks(ddata);
+               if (error) {
+                       dev_err(ddata->dev,
+                               "Optional clocks failed for enable: %i\n",
+                               error);
+                       return error;
+               }
+       }
+       error = sysc_wait_softreset(ddata);
+       if (error)
+               dev_warn(ddata->dev, "OCP softreset timed out\n");
+       if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET)
+               sysc_disable_opt_clocks(ddata);
+
+       /*
+        * Some subsystem private interconnects, like DSS top level module,
+        * need only the automatic OCP softreset handling with no sysconfig
+        * register bits to configure.
+        */
        if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
                return 0;
 
        regbits = ddata->cap->regbits;
        reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
 
-       /* Set CLOCKACTIVITY, we only use it for ick */
+       /*
+        * Set CLOCKACTIVITY, we only use it for ick. And we only configure it
+        * based on the SYSC_QUIRK_USE_CLOCKACT flag, not based on the hardware
+        * capabilities. See the old HWMOD_SET_DEFAULT_CLOCKACT flag.
+        */
        if (regbits->clkact_shift >= 0 &&
-           (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT ||
-            ddata->cfg.sysc_val & BIT(regbits->clkact_shift)))
+           (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT))
                reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift;
 
        /* Set SIDLE mode */
@@ -991,6 +1049,9 @@ set_autoidle:
                sysc_write_sysconfig(ddata, reg);
        }
 
+       /* Flush posted write */
+       sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
        if (ddata->module_enable_quirk)
                ddata->module_enable_quirk(ddata);
 
@@ -1071,6 +1132,9 @@ set_sidle:
                reg |= 1 << regbits->autoidle_shift;
        sysc_write_sysconfig(ddata, reg);
 
+       /* Flush posted write */
+       sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
        return 0;
 }
 
@@ -1488,7 +1552,7 @@ static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
        bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false;
        const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1);
        int manager_count;
-       bool framedonetv_irq;
+       bool framedonetv_irq = true;
        u32 val, irq_mask = 0;
 
        switch (sysc_soc->soc) {
@@ -1505,6 +1569,7 @@ static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
                break;
        case SOC_AM4:
                manager_count = 1;
+               framedonetv_irq = false;
                break;
        case SOC_UNKNOWN:
        default:
@@ -1822,11 +1887,10 @@ static int sysc_legacy_init(struct sysc *ddata)
  */
 static int sysc_reset(struct sysc *ddata)
 {
-       int sysc_offset, syss_offset, sysc_val, rstval, error = 0;
-       u32 sysc_mask, syss_done;
+       int sysc_offset, sysc_val, error;
+       u32 sysc_mask;
 
        sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
-       syss_offset = ddata->offsets[SYSC_SYSSTATUS];
 
        if (ddata->legacy_mode ||
            ddata->cap->regbits->srst_shift < 0 ||
@@ -1835,11 +1899,6 @@ static int sysc_reset(struct sysc *ddata)
 
        sysc_mask = BIT(ddata->cap->regbits->srst_shift);
 
-       if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
-               syss_done = 0;
-       else
-               syss_done = ddata->cfg.syss_mask;
-
        if (ddata->pre_reset_quirk)
                ddata->pre_reset_quirk(ddata);
 
@@ -1856,18 +1915,9 @@ static int sysc_reset(struct sysc *ddata)
        if (ddata->post_reset_quirk)
                ddata->post_reset_quirk(ddata);
 
-       /* Poll on reset status */
-       if (syss_offset >= 0) {
-               error = readx_poll_timeout(sysc_read_sysstatus, ddata, rstval,
-                                          (rstval & ddata->cfg.syss_mask) ==
-                                          syss_done,
-                                          100, MAX_MODULE_SOFTRESET_WAIT);
-
-       } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) {
-               error = readx_poll_timeout(sysc_read_sysconfig, ddata, rstval,
-                                          !(rstval & sysc_mask),
-                                          100, MAX_MODULE_SOFTRESET_WAIT);
-       }
+       error = sysc_wait_softreset(ddata);
+       if (error)
+               dev_warn(ddata->dev, "OCP softreset timed out\n");
 
        if (ddata->reset_done_quirk)
                ddata->reset_done_quirk(ddata);
index e2330e7..0016170 100644 (file)
@@ -244,6 +244,7 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
        ret = pm_runtime_get_sync(dev);
        if (ret < 0) {
                dev_err(dev, "Failed to enable SA power-domain\n");
+               pm_runtime_put_noidle(dev);
                pm_runtime_disable(dev);
                return ret;
        }
index 31cae88..934c92d 100644 (file)
@@ -171,7 +171,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
                        if (!ptr)
                                goto failed;
 
-                       probe = probe_kernel_read(bounce, ptr, sz);
+                       probe = copy_from_kernel_nofault(bounce, ptr, sz);
                        unxlate_dev_mem_ptr(p, ptr);
                        if (probe)
                                goto failed;
index 35333b6..7c617ed 100644 (file)
@@ -210,7 +210,7 @@ static int st33zp24_i2c_request_resources(struct i2c_client *client)
 
 /*
  * st33zp24_i2c_probe initialize the TPM device
- * @param: client, the i2c_client drescription (TPM I2C description).
+ * @param: client, the i2c_client description (TPM I2C description).
  * @param: id, the i2c_device_id struct.
  * @return: 0 in case of success.
  *      -1 in other case.
index 26e09de..a75dafd 100644 (file)
@@ -329,7 +329,7 @@ static int st33zp24_spi_request_resources(struct spi_device *dev)
 
 /*
  * st33zp24_spi_probe initialize the TPM device
- * @param: dev, the spi_device drescription (TPM SPI description).
+ * @param: dev, the spi_device description (TPM SPI description).
  * @return: 0 in case of success.
  *      or a negative value describing the error.
  */
@@ -378,7 +378,7 @@ static int st33zp24_spi_probe(struct spi_device *dev)
 
 /*
  * st33zp24_spi_remove remove the TPM device
- * @param: client, the spi_device drescription (TPM SPI description).
+ * @param: client, the spi_device description (TPM SPI description).
  * @return: 0 in case of success.
  */
 static int st33zp24_spi_remove(struct spi_device *dev)
index 37bb13f..4ec10ab 100644 (file)
@@ -502,7 +502,7 @@ static const struct tpm_class_ops st33zp24_tpm = {
 
 /*
  * st33zp24_probe initialize the TPM device
- * @param: client, the i2c_client drescription (TPM I2C description).
+ * @param: client, the i2c_client description (TPM I2C description).
  * @param: id, the i2c_device_id struct.
  * @return: 0 in case of success.
  *      -1 in other case.
index 87f4493..1784530 100644 (file)
@@ -189,15 +189,6 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
                goto out;
        }
 
-       /* atomic tpm command send and result receive. We only hold the ops
-        * lock during this period so that the tpm can be unregistered even if
-        * the char dev is held open.
-        */
-       if (tpm_try_get_ops(priv->chip)) {
-               ret = -EPIPE;
-               goto out;
-       }
-
        priv->response_length = 0;
        priv->response_read = false;
        *off = 0;
@@ -211,11 +202,19 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
        if (file->f_flags & O_NONBLOCK) {
                priv->command_enqueued = true;
                queue_work(tpm_dev_wq, &priv->async_work);
-               tpm_put_ops(priv->chip);
                mutex_unlock(&priv->buffer_mutex);
                return size;
        }
 
+       /* atomic tpm command send and result receive. We only hold the ops
+        * lock during this period so that the tpm can be unregistered even if
+        * the char dev is held open.
+        */
+       if (tpm_try_get_ops(priv->chip)) {
+               ret = -EPIPE;
+               goto out;
+       }
+
        ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
                               sizeof(priv->data_buffer));
        tpm_put_ops(priv->chip);
index 09fe452..994385b 100644 (file)
@@ -683,13 +683,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
        if (rc)
                goto init_irq_cleanup;
 
-       if (!strcmp(id->compat, "IBM,vtpm20")) {
-               chip->flags |= TPM_CHIP_FLAG_TPM2;
-               rc = tpm2_get_cc_attrs_tbl(chip);
-               if (rc)
-                       goto init_irq_cleanup;
-       }
-
        if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
                                ibmvtpm->rtce_buf != NULL,
                                HZ)) {
@@ -697,6 +690,13 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
                goto init_irq_cleanup;
        }
 
+       if (!strcmp(id->compat, "IBM,vtpm20")) {
+               chip->flags |= TPM_CHIP_FLAG_TPM2;
+               rc = tpm2_get_cc_attrs_tbl(chip);
+               if (rc)
+                       goto init_irq_cleanup;
+       }
+
        return tpm_chip_register(chip);
 init_irq_cleanup:
        do {
index e7df342..0b21496 100644 (file)
@@ -235,6 +235,13 @@ static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
        return tpm_tis_init(&pnp_dev->dev, &tpm_info);
 }
 
+/*
+ * There is a known bug caused by 93e1b7d42e1e ("[PATCH] tpm: add HID module
+ * parameter"). This commit added IFX0102 device ID, which is also used by
+ * tpm_infineon but ignored to add quirks to probe which driver ought to be
+ * used.
+ */
+
 static struct pnp_device_id tpm_pnp_tbl[] = {
        {"PNP0C31", 0},         /* TPM */
        {"ATM1200", 0},         /* Atmel */
index 2435216..65ab1b0 100644 (file)
@@ -1085,7 +1085,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
 
        return 0;
 out_err:
-       if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL))
+       if (chip->ops->clk_enable != NULL)
                chip->ops->clk_enable(chip, false);
 
        tpm_tis_remove(chip);
index d967559..3856f6e 100644 (file)
@@ -53,8 +53,6 @@ static int tpm_tis_spi_flow_control(struct tpm_tis_spi_phy *phy,
 
        if ((phy->iobuf[3] & 0x01) == 0) {
                // handle SPI wait states
-               phy->iobuf[0] = 0;
-
                for (i = 0; i < TPM_RETRY; i++) {
                        spi_xfer->len = 1;
                        spi_message_init(&m);
@@ -104,6 +102,8 @@ int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
                if (ret < 0)
                        goto exit;
 
+               /* Flow control transfers are receive only */
+               spi_xfer.tx_buf = NULL;
                ret = phy->flow_control(phy, &spi_xfer);
                if (ret < 0)
                        goto exit;
@@ -113,9 +113,8 @@ int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
                spi_xfer.delay.value = 5;
                spi_xfer.delay.unit = SPI_DELAY_UNIT_USECS;
 
-               if (in) {
-                       spi_xfer.tx_buf = NULL;
-               } else if (out) {
+               if (out) {
+                       spi_xfer.tx_buf = phy->iobuf;
                        spi_xfer.rx_buf = NULL;
                        memcpy(phy->iobuf, out, transfer_len);
                        out += transfer_len;
@@ -288,6 +287,7 @@ static struct spi_driver tpm_tis_spi_driver = {
                .pm = &tpm_tis_pm,
                .of_match_table = of_match_ptr(of_tis_spi_match),
                .acpi_match_table = ACPI_PTR(acpi_tis_spi_match),
+               .probe_type = PROBE_PREFER_ASYNCHRONOUS,
        },
        .probe = tpm_tis_spi_driver_probe,
        .remove = tpm_tis_spi_remove,
index 6282ee2..a8901f9 100644 (file)
@@ -586,7 +586,10 @@ static int sifive_fu540_prci_probe(struct platform_device *pdev)
        struct __prci_data *pd;
        int r;
 
-       pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+       pd = devm_kzalloc(dev,
+                         struct_size(pd, hw_clks.hws,
+                                     ARRAY_SIZE(__prci_init_clocks)),
+                         GFP_KERNEL);
        if (!pd)
                return -ENOMEM;
 
index ecf7b7d..6c3e841 100644 (file)
@@ -480,6 +480,14 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
                .set_next_event_virt = erratum_set_next_event_tval_virt,
        },
 #endif
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+       {
+               .match_type = ate_match_local_cap_id,
+               .id = (void *)ARM64_WORKAROUND_1418040,
+               .desc = "ARM erratum 1418040",
+               .disable_compat_vdso = true,
+       },
+#endif
 };
 
 typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
@@ -566,6 +574,9 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
        if (wa->read_cntvct_el0) {
                clocksource_counter.vdso_clock_mode = VDSO_CLOCKMODE_NONE;
                vdso_default = VDSO_CLOCKMODE_NONE;
+       } else if (wa->disable_compat_vdso && vdso_default != VDSO_CLOCKMODE_NONE) {
+               vdso_default = VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT;
+               clocksource_counter.vdso_clock_mode = vdso_default;
        }
 }
 
index 8e23a69..e771e8b 100644 (file)
@@ -2677,6 +2677,8 @@ static struct acpi_platform_list plat_info[] __initdata = {
        { } /* End */
 };
 
+#define BITMASK_OOB    (BIT(8) | BIT(18))
+
 static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
 {
        const struct x86_cpu_id *id;
@@ -2686,8 +2688,9 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
        id = x86_match_cpu(intel_pstate_cpu_oob_ids);
        if (id) {
                rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
-               if (misc_pwr & (1 << 8)) {
-                       pr_debug("Bit 8 in the MISC_PWR_MGMT MSR set\n");
+               if (misc_pwr & BITMASK_OOB) {
+                       pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n");
+                       pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n");
                        return true;
                }
        }
index c149d9e..8719731 100644 (file)
@@ -186,9 +186,10 @@ int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
         * be frozen safely.
         */
        index = find_deepest_state(drv, dev, U64_MAX, 0, true);
-       if (index > 0)
+       if (index > 0) {
                enter_s2idle_proper(drv, dev, index);
-
+               local_irq_enable();
+       }
        return index;
 }
 #endif /* CONFIG_SUSPEND */
index a62f228..bc35aa0 100644 (file)
@@ -147,7 +147,7 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
        select HW_RANDOM
        help
          Selecting this will register the SEC4 hardware rng to
-         the hw_random API for suppying the kernel entropy pool.
+         the hw_random API for supplying the kernel entropy pool.
 
 endif # CRYPTO_DEV_FSL_CAAM_JR
 
index 4fcdd26..f3d20b7 100644 (file)
@@ -54,7 +54,7 @@ static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
 
                /*
                 * load 1 to clear written reg:
-                * resets the done interrrupt and returns the RNG to idle.
+                * resets the done interrupt and returns the RNG to idle.
                 */
                append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
 
@@ -156,7 +156,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
                                     DESC_DER_DECO_STAT_SHIFT;
 
                /*
-                * If an error occured in the descriptor, then
+                * If an error occurred in the descriptor, then
                 * the DECO status field will be set to 0x0D
                 */
                if (deco_state == DECO_STAT_HOST_ERR)
@@ -264,7 +264,7 @@ static void devm_deinstantiate_rng(void *data)
  *        - -ENODEV if DECO0 couldn't be acquired
  *        - -EAGAIN if an error occurred when executing the descriptor
  *           f.i. there was a RNG hardware error due to not "good enough"
- *           entropy being aquired.
+ *           entropy being acquired.
  */
 static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
                           int gen_sk)
@@ -733,8 +733,8 @@ static int caam_probe(struct platform_device *pdev)
        handle_imx6_err005766(&ctrl->mcr);
 
        /*
-        *  Read the Compile Time paramters and SCFGR to determine
-        * if Virtualization is enabled for this platform
+        *  Read the Compile Time parameters and SCFGR to determine
+        * if virtualization is enabled for this platform
         */
        scfgr = rd_reg32(&ctrl->scfgr);
 
@@ -863,9 +863,9 @@ static int caam_probe(struct platform_device *pdev)
                        }
                        /*
                         * if instantiate_rng(...) fails, the loop will rerun
-                        * and the kick_trng(...) function will modfiy the
+                        * and the kick_trng(...) function will modify the
                         * upper and lower limits of the entropy sampling
-                        * interval, leading to a sucessful initialization of
+                        * interval, leading to a successful initialization of
                         * the RNG.
                         */
                        ret = instantiate_rng(dev, inst_handles,
@@ -882,8 +882,8 @@ static int caam_probe(struct platform_device *pdev)
                        return ret;
                }
                /*
-                * Set handles init'ed by this module as the complement of the
-                * already initialized ones
+                * Set handles initialized by this module as the complement of
+                * the already initialized ones
                 */
                ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_MASK;
 
index e796d3c..e134709 100644 (file)
@@ -18,7 +18,7 @@
  */
 
 #define SEC4_SG_LEN_EXT                0x80000000      /* Entry points to table */
-#define SEC4_SG_LEN_FIN                0x40000000      /* Last ent in table */
+#define SEC4_SG_LEN_FIN                0x40000000      /* Last entry in table */
 #define SEC4_SG_BPID_MASK      0x000000ff
 #define SEC4_SG_BPID_SHIFT     16
 #define SEC4_SG_LEN_MASK       0x3fffffff      /* Excludes EXT and FINAL */
  */
 #define HDR_REVERSE            0x00000800
 
-/* Propogate DNR property to SharedDesc */
+/* Propagate DNR property to SharedDesc */
 #define HDR_PROP_DNR           0x00000800
 
 /* JobDesc/SharedDesc share property */
index 68c1fd5..8ccc220 100644 (file)
@@ -453,7 +453,7 @@ struct srtp_decap_pdb {
 #define DSA_PDB_N_MASK         0x7f
 
 struct dsa_sign_pdb {
-       u32 sgf_ln; /* Use DSA_PDB_ defintions per above */
+       u32 sgf_ln; /* Use DSA_PDB_ definitions per above */
        u8 *q;
        u8 *r;
        u8 *g;  /* or Gx,y */
index a242633..476113e 100644 (file)
@@ -376,6 +376,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
        struct sev_device *sev = psp_master->sev_data;
        struct sev_user_data_pek_csr input;
        struct sev_data_pek_csr *data;
+       void __user *input_address;
        void *blob = NULL;
        int ret;
 
@@ -394,6 +395,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
                goto cmd;
 
        /* allocate a physically contiguous buffer to store the CSR blob */
+       input_address = (void __user *)input.address;
        if (input.length > SEV_FW_BLOB_MAX_SIZE) {
                ret = -EFAULT;
                goto e_free;
@@ -426,7 +428,7 @@ cmd:
        }
 
        if (blob) {
-               if (copy_to_user((void __user *)input.address, blob, input.length))
+               if (copy_to_user(input_address, blob, input.length))
                        ret = -EFAULT;
        }
 
@@ -437,7 +439,7 @@ e_free:
        return ret;
 }
 
-void *psp_copy_user_blob(u64 __user uaddr, u32 len)
+void *psp_copy_user_blob(u64 uaddr, u32 len)
 {
        if (!uaddr || !len)
                return ERR_PTR(-EINVAL);
@@ -446,7 +448,7 @@ void *psp_copy_user_blob(u64 __user uaddr, u32 len)
        if (len > SEV_FW_BLOB_MAX_SIZE)
                return ERR_PTR(-EINVAL);
 
-       return memdup_user((void __user *)(uintptr_t)uaddr, len);
+       return memdup_user((void __user *)uaddr, len);
 }
 EXPORT_SYMBOL_GPL(psp_copy_user_blob);
 
@@ -621,6 +623,7 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
 {
        struct sev_user_data_get_id2 input;
        struct sev_data_get_id *data;
+       void __user *input_address;
        void *id_blob = NULL;
        int ret;
 
@@ -631,6 +634,8 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
        if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
                return -EFAULT;
 
+       input_address = (void __user *)input.address;
+
        data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
@@ -660,8 +665,7 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
        }
 
        if (id_blob) {
-               if (copy_to_user((void __user *)input.address,
-                                id_blob, data->len)) {
+               if (copy_to_user(input_address, id_blob, data->len)) {
                        ret = -EFAULT;
                        goto e_free;
                }
@@ -720,6 +724,8 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
        struct sev_user_data_pdh_cert_export input;
        void *pdh_blob = NULL, *cert_blob = NULL;
        struct sev_data_pdh_cert_export *data;
+       void __user *input_cert_chain_address;
+       void __user *input_pdh_cert_address;
        int ret;
 
        /* If platform is not in INIT state then transition it to INIT. */
@@ -745,6 +751,9 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
            !input.cert_chain_address)
                goto cmd;
 
+       input_pdh_cert_address = (void __user *)input.pdh_cert_address;
+       input_cert_chain_address = (void __user *)input.cert_chain_address;
+
        /* Allocate a physically contiguous buffer to store the PDH blob. */
        if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) {
                ret = -EFAULT;
@@ -788,7 +797,7 @@ cmd:
        }
 
        if (pdh_blob) {
-               if (copy_to_user((void __user *)input.pdh_cert_address,
+               if (copy_to_user(input_pdh_cert_address,
                                 pdh_blob, input.pdh_cert_len)) {
                        ret = -EFAULT;
                        goto e_free_cert;
@@ -796,7 +805,7 @@ cmd:
        }
 
        if (cert_blob) {
-               if (copy_to_user((void __user *)input.cert_chain_address,
+               if (copy_to_user(input_cert_chain_address,
                                 cert_blob, input.cert_chain_len))
                        ret = -EFAULT;
        }
index b3fdbdc..31e427e 100644 (file)
@@ -223,7 +223,7 @@ struct chcr_authenc_ctx {
 
 struct __aead_ctx {
        struct chcr_gcm_ctx gcm[0];
-       struct chcr_authenc_ctx authenc[0];
+       struct chcr_authenc_ctx authenc[];
 };
 
 struct chcr_aead_ctx {
@@ -235,7 +235,7 @@ struct chcr_aead_ctx {
        u8 nonce[4];
        u16 hmac_ctrl;
        u16 mayverify;
-       struct  __aead_ctx ctx[0];
+       struct  __aead_ctx ctx[];
 };
 
 struct hmac_ctx {
@@ -247,7 +247,7 @@ struct hmac_ctx {
 struct __crypto_ctx {
        struct hmac_ctx hmacctx[0];
        struct ablk_ctx ablkctx[0];
-       struct chcr_aead_ctx aeadctx[0];
+       struct chcr_aead_ctx aeadctx[];
 };
 
 struct chcr_context {
@@ -257,7 +257,7 @@ struct chcr_context {
        unsigned int  ntxq;
        unsigned int  nrxq;
        struct completion cbc_aes_aio_done;
-       struct __crypto_ctx crypto_ctx[0];
+       struct __crypto_ctx crypto_ctx[];
 };
 
 struct chcr_hctx_per_wr {
index f200fae..fd5c558 100644 (file)
@@ -1056,6 +1056,7 @@ static void chtls_pass_accept_rpl(struct sk_buff *skb,
        opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
        opt2 |= T5_ISS_F;
        opt2 |= T5_OPT_2_VALID_F;
+       opt2 |= WND_SCALE_EN_V(WSCALE_OK(tp));
        rpl5->opt0 = cpu_to_be64(opt0);
        rpl5->opt2 = cpu_to_be32(opt2);
        rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
index d98b89d..c3058dc 100644 (file)
@@ -488,7 +488,7 @@ static int chtls_getsockopt(struct sock *sk, int level, int optname,
 }
 
 static int do_chtls_setsockopt(struct sock *sk, int optname,
-                              char __user *optval, unsigned int optlen)
+                              sockptr_t optval, unsigned int optlen)
 {
        struct tls_crypto_info *crypto_info, tmp_crypto_info;
        struct chtls_sock *csk;
@@ -498,12 +498,12 @@ static int do_chtls_setsockopt(struct sock *sk, int optname,
 
        csk = rcu_dereference_sk_user_data(sk);
 
-       if (!optval || optlen < sizeof(*crypto_info)) {
+       if (sockptr_is_null(optval) || optlen < sizeof(*crypto_info)) {
                rc = -EINVAL;
                goto out;
        }
 
-       rc = copy_from_user(&tmp_crypto_info, optval, sizeof(*crypto_info));
+       rc = copy_from_sockptr(&tmp_crypto_info, optval, sizeof(*crypto_info));
        if (rc) {
                rc = -EFAULT;
                goto out;
@@ -525,8 +525,9 @@ static int do_chtls_setsockopt(struct sock *sk, int optname,
                /* Obtain version and type from previous copy */
                crypto_info[0] = tmp_crypto_info;
                /* Now copy the following data */
-               rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info),
-                               optval + sizeof(*crypto_info),
+               sockptr_advance(optval, sizeof(*crypto_info));
+               rc = copy_from_sockptr((char *)crypto_info + sizeof(*crypto_info),
+                               optval,
                                sizeof(struct tls12_crypto_info_aes_gcm_128)
                                - sizeof(*crypto_info));
 
@@ -541,8 +542,9 @@ static int do_chtls_setsockopt(struct sock *sk, int optname,
        }
        case TLS_CIPHER_AES_GCM_256: {
                crypto_info[0] = tmp_crypto_info;
-               rc = copy_from_user((char *)crypto_info + sizeof(*crypto_info),
-                                   optval + sizeof(*crypto_info),
+               sockptr_advance(optval, sizeof(*crypto_info));
+               rc = copy_from_sockptr((char *)crypto_info + sizeof(*crypto_info),
+                                   optval,
                                sizeof(struct tls12_crypto_info_aes_gcm_256)
                                - sizeof(*crypto_info));
 
@@ -565,7 +567,7 @@ out:
 }
 
 static int chtls_setsockopt(struct sock *sk, int level, int optname,
-                           char __user *optval, unsigned int optlen)
+                           sockptr_t optval, unsigned int optlen)
 {
        struct tls_context *ctx = tls_get_ctx(sk);
 
index 0e8c7e3..725a739 100644 (file)
@@ -66,7 +66,8 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
 
        sgl_size = sizeof(struct acc_hw_sge) * sge_nr +
                   sizeof(struct hisi_acc_hw_sgl);
-       block_size = PAGE_SIZE * (1 << (MAX_ORDER - 1));
+       block_size = 1 << (PAGE_SHIFT + MAX_ORDER <= 32 ?
+                          PAGE_SHIFT + MAX_ORDER - 1 : 31);
        sgl_num_per_block = block_size / sgl_size;
        block_num = count / sgl_num_per_block;
        remain_sgl = count % sgl_num_per_block;
index 60e744f..1e0a1d7 100644 (file)
@@ -118,6 +118,9 @@ static void otx_cpt_aead_callback(int status, void *arg1, void *arg2)
        struct otx_cpt_req_info *cpt_req;
        struct pci_dev *pdev;
 
+       if (!cpt_info)
+               goto complete;
+
        cpt_req = cpt_info->req;
        if (!status) {
                /*
@@ -129,10 +132,10 @@ static void otx_cpt_aead_callback(int status, void *arg1, void *arg2)
                    !cpt_req->is_enc)
                        status = validate_hmac_cipher_null(cpt_req);
        }
-       if (cpt_info) {
-               pdev = cpt_info->pdev;
-               do_request_cleanup(pdev, cpt_info);
-       }
+       pdev = cpt_info->pdev;
+       do_request_cleanup(pdev, cpt_info);
+
+complete:
        if (areq)
                areq->complete(areq, status);
 }
index c9aa15f..193b40e 100644 (file)
@@ -135,7 +135,8 @@ int __init dio_find(int deviceid)
                else
                        va = ioremap(pa, PAGE_SIZE);
 
-                if (probe_kernel_read(&i, (unsigned char *)va + DIO_IDOFF, 1)) {
+               if (copy_from_kernel_nofault(&i,
+                               (unsigned char *)va + DIO_IDOFF, 1)) {
                        if (scode >= DIOII_SCBASE)
                                iounmap(va);
                         continue;             /* no board present at that select code */
@@ -208,7 +209,8 @@ static int __init dio_init(void)
                else
                        va = ioremap(pa, PAGE_SIZE);
 
-                if (probe_kernel_read(&i, (unsigned char *)va + DIO_IDOFF, 1)) {
+               if (copy_from_kernel_nofault(&i,
+                               (unsigned char *)va + DIO_IDOFF, 1)) {
                        if (scode >= DIOII_SCBASE)
                                iounmap(va);
                         continue;              /* no board present at that select code */
index 01ce125..4126296 100644 (file)
@@ -54,37 +54,11 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
                             dentry->d_name.name, ret > 0 ? name : "");
 }
 
-static const struct dentry_operations dma_buf_dentry_ops = {
-       .d_dname = dmabuffs_dname,
-};
-
-static struct vfsmount *dma_buf_mnt;
-
-static int dma_buf_fs_init_context(struct fs_context *fc)
-{
-       struct pseudo_fs_context *ctx;
-
-       ctx = init_pseudo(fc, DMA_BUF_MAGIC);
-       if (!ctx)
-               return -ENOMEM;
-       ctx->dops = &dma_buf_dentry_ops;
-       return 0;
-}
-
-static struct file_system_type dma_buf_fs_type = {
-       .name = "dmabuf",
-       .init_fs_context = dma_buf_fs_init_context,
-       .kill_sb = kill_anon_super,
-};
-
-static int dma_buf_release(struct inode *inode, struct file *file)
+static void dma_buf_release(struct dentry *dentry)
 {
        struct dma_buf *dmabuf;
 
-       if (!is_dma_buf_file(file))
-               return -EINVAL;
-
-       dmabuf = file->private_data;
+       dmabuf = dentry->d_fsdata;
 
        BUG_ON(dmabuf->vmapping_counter);
 
@@ -110,9 +84,32 @@ static int dma_buf_release(struct inode *inode, struct file *file)
        module_put(dmabuf->owner);
        kfree(dmabuf->name);
        kfree(dmabuf);
+}
+
+static const struct dentry_operations dma_buf_dentry_ops = {
+       .d_dname = dmabuffs_dname,
+       .d_release = dma_buf_release,
+};
+
+static struct vfsmount *dma_buf_mnt;
+
+static int dma_buf_fs_init_context(struct fs_context *fc)
+{
+       struct pseudo_fs_context *ctx;
+
+       ctx = init_pseudo(fc, DMA_BUF_MAGIC);
+       if (!ctx)
+               return -ENOMEM;
+       ctx->dops = &dma_buf_dentry_ops;
        return 0;
 }
 
+static struct file_system_type dma_buf_fs_type = {
+       .name = "dmabuf",
+       .init_fs_context = dma_buf_fs_init_context,
+       .kill_sb = kill_anon_super,
+};
+
 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
 {
        struct dma_buf *dmabuf;
@@ -412,7 +409,6 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
 }
 
 static const struct file_operations dma_buf_fops = {
-       .release        = dma_buf_release,
        .mmap           = dma_buf_mmap_internal,
        .llseek         = dma_buf_llseek,
        .poll           = dma_buf_poll,
index 8853d44..a8cfb59 100644 (file)
@@ -77,7 +77,7 @@ struct milbeaut_hdmac_device {
        struct dma_device ddev;
        struct clk *clk;
        void __iomem *reg_base;
-       struct milbeaut_hdmac_chan channels[0];
+       struct milbeaut_hdmac_chan channels[];
 };
 
 static struct milbeaut_hdmac_chan *
index ab3d2f3..85a5972 100644 (file)
@@ -74,7 +74,7 @@ struct milbeaut_xdmac_chan {
 struct milbeaut_xdmac_device {
        struct dma_device ddev;
        void __iomem *reg_base;
-       struct milbeaut_xdmac_chan channels[0];
+       struct milbeaut_xdmac_chan channels[];
 };
 
 static struct milbeaut_xdmac_chan *
index 4ab493d..347146a 100644 (file)
@@ -127,7 +127,7 @@ struct moxart_desc {
        unsigned int                    dma_cycles;
        struct virt_dma_desc            vd;
        uint8_t                         es;
-       struct moxart_sg                sg[0];
+       struct moxart_sg                sg[];
 };
 
 struct moxart_chan {
index b9f0d96..55fc740 100644 (file)
@@ -225,7 +225,7 @@ struct tegra_dma {
        u32                             global_pause_count;
 
        /* Last member of the structure */
-       struct tegra_dma_channel channels[0];
+       struct tegra_dma_channel channels[];
 };
 
 static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
index c4a5c17..35d81bd 100644 (file)
@@ -211,7 +211,7 @@ struct edma_desc {
        u32                             residue;
        u32                             residue_stat;
 
-       struct edma_pset                pset[0];
+       struct edma_pset                pset[];
 };
 
 struct edma_cc;
index 945b7c6..c91e2dc 100644 (file)
@@ -170,7 +170,7 @@ struct udma_desc {
        void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
 
        unsigned int hwdesc_count;
-       struct udma_hwdesc hwdesc[0];
+       struct udma_hwdesc hwdesc[];
 };
 
 enum udma_chan_state {
index 3938269..68e48bf 100644 (file)
@@ -88,7 +88,7 @@ struct timb_dma {
        struct dma_device       dma;
        void __iomem            *membase;
        struct tasklet_struct   tasklet;
-       struct timb_dma_chan    channels[0];
+       struct timb_dma_chan    channels[];
 };
 
 static struct device *chan2dev(struct dma_chan *chan)
index ef90070..6262f63 100644 (file)
@@ -269,6 +269,8 @@ static int get_scrub_rate(struct mem_ctl_info *mci)
 
                if (pvt->model == 0x60)
                        amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
+               else
+                       amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
        } else {
                amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
        }
index c7ea4f2..fb6c651 100644 (file)
@@ -117,7 +117,7 @@ struct inbound_transaction_resource {
 struct descriptor_resource {
        struct client_resource resource;
        struct fw_descriptor descriptor;
-       u32 data[0];
+       u32 data[];
 };
 
 struct iso_resource {
index 404a035..439d918 100644 (file)
@@ -620,7 +620,7 @@ struct fw_request {
        u32 request_header[4];
        int ack;
        u32 length;
-       u32 data[0];
+       u32 data[];
 };
 
 static void free_response_callback(struct fw_packet *packet,
index 4b0e4ee..71d5f16 100644 (file)
@@ -191,7 +191,7 @@ struct fw_node {
        /* Upper layer specific data. */
        void *data;
 
-       struct fw_node *ports[0];
+       struct fw_node *ports[];
 };
 
 static inline struct fw_node *fw_node_get(struct fw_node *node)
index 6ca2f5a..5fd6a60 100644 (file)
@@ -52,7 +52,7 @@ struct pcl {
 
 struct packet {
        unsigned int length;
-       char data[0];
+       char data[];
 };
 
 struct packet_buffer {
index 3326931..54fdc39 100644 (file)
@@ -111,7 +111,7 @@ struct descriptor_buffer {
        dma_addr_t buffer_bus;
        size_t buffer_size;
        size_t used;
-       struct descriptor buffer[0];
+       struct descriptor buffer[];
 };
 
 struct context {
index b618002..8b8127f 100644 (file)
@@ -262,7 +262,7 @@ struct dmi_system_event_log {
        u8      header_format;
        u8      type_descriptors_supported_count;
        u8      per_log_type_descriptor_length;
-       u8      supported_log_type_descriptos[0];
+       u8      supported_log_type_descriptos[];
 } __packed;
 
 #define DMI_SYSFS_SEL_FIELD(_field) \
index e6fc022..3939699 100644 (file)
@@ -278,3 +278,14 @@ config EFI_EARLYCON
        depends on SERIAL_EARLYCON && !ARM && !IA64
        select FONT_SUPPORT
        select ARCH_USE_MEMREMAP_PROT
+
+config EFI_CUSTOM_SSDT_OVERLAYS
+       bool "Load custom ACPI SSDT overlay from an EFI variable"
+       depends on EFI_VARS && ACPI
+       default ACPI_TABLE_UPGRADE
+       help
+         Allow loading of an ACPI SSDT overlay from an EFI variable specified
+         by a kernel command line option.
+
+         See Documentation/admin-guide/acpi/ssdt-overlays.rst for more
+         information.
index c697e70..71c445d 100644 (file)
@@ -52,9 +52,11 @@ static phys_addr_t __init efi_to_phys(unsigned long addr)
 }
 
 static __initdata unsigned long screen_info_table = EFI_INVALID_TABLE_ADDR;
+static __initdata unsigned long cpu_state_table = EFI_INVALID_TABLE_ADDR;
 
 static const efi_config_table_type_t arch_tables[] __initconst = {
        {LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID, &screen_info_table},
+       {LINUX_EFI_ARM_CPU_STATE_TABLE_GUID, &cpu_state_table},
        {}
 };
 
@@ -62,7 +64,8 @@ static void __init init_screen_info(void)
 {
        struct screen_info *si;
 
-       if (screen_info_table != EFI_INVALID_TABLE_ADDR) {
+       if (IS_ENABLED(CONFIG_ARM) &&
+           screen_info_table != EFI_INVALID_TABLE_ADDR) {
                si = early_memremap_ro(screen_info_table, sizeof(*si));
                if (!si) {
                        pr_err("Could not map screen_info config table\n");
@@ -116,7 +119,8 @@ static int __init uefi_init(u64 efi_system_table)
                goto out;
        }
        retval = efi_config_parse_tables(config_tables, systab->nr_tables,
-                                        arch_tables);
+                                        IS_ENABLED(CONFIG_ARM) ? arch_tables
+                                                               : NULL);
 
        early_memunmap(config_tables, table_size);
 out:
@@ -238,9 +242,37 @@ void __init efi_init(void)
 
        init_screen_info();
 
+#ifdef CONFIG_ARM
        /* ARM does not permit early mappings to persist across paging_init() */
-       if (IS_ENABLED(CONFIG_ARM))
-               efi_memmap_unmap();
+       efi_memmap_unmap();
+
+       if (cpu_state_table != EFI_INVALID_TABLE_ADDR) {
+               struct efi_arm_entry_state *state;
+               bool dump_state = true;
+
+               state = early_memremap_ro(cpu_state_table,
+                                         sizeof(struct efi_arm_entry_state));
+               if (state == NULL) {
+                       pr_warn("Unable to map CPU entry state table.\n");
+                       return;
+               }
+
+               if ((state->sctlr_before_ebs & 1) == 0)
+                       pr_warn(FW_BUG "EFI stub was entered with MMU and Dcache disabled, please fix your firmware!\n");
+               else if ((state->sctlr_after_ebs & 1) == 0)
+                       pr_warn(FW_BUG "ExitBootServices() returned with MMU and Dcache disabled, please fix your firmware!\n");
+               else
+                       dump_state = false;
+
+               if (dump_state || efi_enabled(EFI_DBG)) {
+                       pr_info("CPSR at EFI stub entry        : 0x%08x\n", state->cpsr_before_ebs);
+                       pr_info("SCTLR at EFI stub entry       : 0x%08x\n", state->sctlr_before_ebs);
+                       pr_info("CPSR after ExitBootServices() : 0x%08x\n", state->cpsr_after_ebs);
+                       pr_info("SCTLR after ExitBootServices(): 0x%08x\n", state->sctlr_after_ebs);
+               }
+               early_memunmap(state, sizeof(struct efi_arm_entry_state));
+       }
+#endif
 }
 
 static bool efifb_overlaps_pci_range(const struct of_pci_range *range)
index 7f1657b..5114cae 100644 (file)
@@ -189,7 +189,7 @@ static void generic_ops_unregister(void)
        efivars_unregister(&generic_efivars);
 }
 
-#if IS_ENABLED(CONFIG_ACPI)
+#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
 #define EFIVAR_SSDT_NAME_MAX   16
 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
 static int __init efivar_ssdt_setup(char *str)
@@ -622,7 +622,8 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
                        rsv = (void *)(p + prsv % PAGE_SIZE);
 
                        /* reserve the entry itself */
-                       memblock_reserve(prsv, EFI_MEMRESERVE_SIZE(rsv->size));
+                       memblock_reserve(prsv,
+                                        struct_size(rsv, entry, rsv->size));
 
                        for (i = 0; i < atomic_read(&rsv->count); i++) {
                                memblock_reserve(rsv->entry[i].base,
index e3d6926..d591527 100644 (file)
@@ -181,7 +181,7 @@ static int esre_create_sysfs_entry(void *esre, int entry_num)
                rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL,
                                          "entry%d", entry_num);
                if (rc) {
-                       kfree(entry);
+                       kobject_put(&entry->kobj);
                        return rc;
                }
        }
index 75daaf2..4cce372 100644 (file)
@@ -6,7 +6,8 @@
 # enabled, even if doing so doesn't break the build.
 #
 cflags-$(CONFIG_X86_32)                := -march=i386
-cflags-$(CONFIG_X86_64)                := -mcmodel=small
+cflags-$(CONFIG_X86_64)                := -mcmodel=small \
+                                  $(call cc-option,-maccumulate-outgoing-args)
 cflags-$(CONFIG_X86)           += -m$(BITS) -D__KERNEL__ \
                                   -fPIC -fno-strict-aliasing -mno-red-zone \
                                   -mno-mmx -mno-sse -fshort-wchar \
index 40243f5..d08e5d5 100644 (file)
@@ -7,10 +7,49 @@
 
 #include "efistub.h"
 
+static efi_guid_t cpu_state_guid = LINUX_EFI_ARM_CPU_STATE_TABLE_GUID;
+
+struct efi_arm_entry_state *efi_entry_state;
+
+static void get_cpu_state(u32 *cpsr, u32 *sctlr)
+{
+       asm("mrs %0, cpsr" : "=r"(*cpsr));
+       if ((*cpsr & MODE_MASK) == HYP_MODE)
+               asm("mrc p15, 4, %0, c1, c0, 0" : "=r"(*sctlr));
+       else
+               asm("mrc p15, 0, %0, c1, c0, 0" : "=r"(*sctlr));
+}
+
 efi_status_t check_platform_features(void)
 {
+       efi_status_t status;
+       u32 cpsr, sctlr;
        int block;
 
+       get_cpu_state(&cpsr, &sctlr);
+
+       efi_info("Entering in %s mode with MMU %sabled\n",
+                ((cpsr & MODE_MASK) == HYP_MODE) ? "HYP" : "SVC",
+                (sctlr & 1) ? "en" : "dis");
+
+       status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+                            sizeof(*efi_entry_state),
+                            (void **)&efi_entry_state);
+       if (status != EFI_SUCCESS) {
+               efi_err("allocate_pool() failed\n");
+               return status;
+       }
+
+       efi_entry_state->cpsr_before_ebs = cpsr;
+       efi_entry_state->sctlr_before_ebs = sctlr;
+
+       status = efi_bs_call(install_configuration_table, &cpu_state_guid,
+                            efi_entry_state);
+       if (status != EFI_SUCCESS) {
+               efi_err("install_configuration_table() failed\n");
+               goto free_state;
+       }
+
        /* non-LPAE kernels can run anywhere */
        if (!IS_ENABLED(CONFIG_ARM_LPAE))
                return EFI_SUCCESS;
@@ -19,9 +58,22 @@ efi_status_t check_platform_features(void)
        block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
        if (block < 5) {
                efi_err("This LPAE kernel is not supported by your CPU\n");
-               return EFI_UNSUPPORTED;
+               status = EFI_UNSUPPORTED;
+               goto drop_table;
        }
        return EFI_SUCCESS;
+
+drop_table:
+       efi_bs_call(install_configuration_table, &cpu_state_guid, NULL);
+free_state:
+       efi_bs_call(free_pool, efi_entry_state);
+       return status;
+}
+
+void efi_handle_post_ebs_state(void)
+{
+       get_cpu_state(&efi_entry_state->cpsr_after_ebs,
+                     &efi_entry_state->sctlr_after_ebs);
 }
 
 static efi_guid_t screen_info_guid = LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID;
index 89f0752..d40fd68 100644 (file)
@@ -32,6 +32,10 @@ bool __pure __efi_soft_reserve_enabled(void)
        return !efi_nosoftreserve;
 }
 
+/**
+ * efi_char16_puts() - Write a UCS-2 encoded string to the console
+ * @str:       UCS-2 encoded string
+ */
 void efi_char16_puts(efi_char16_t *str)
 {
        efi_call_proto(efi_table_attr(efi_system_table, con_out),
@@ -83,6 +87,10 @@ u32 utf8_to_utf32(const u8 **s8)
        return c32;
 }
 
+/**
+ * efi_puts() - Write a UTF-8 encoded string to the console
+ * @str:       UTF-8 encoded string
+ */
 void efi_puts(const char *str)
 {
        efi_char16_t buf[128];
@@ -113,6 +121,16 @@ void efi_puts(const char *str)
        }
 }
 
+/**
+ * efi_printk() - Print a kernel message
+ * @fmt:       format string
+ *
+ * The first letter of the format string is used to determine the logging level
+ * of the message. If the level is less then the current EFI logging level, the
+ * message is suppressed. The message will be truncated to 255 bytes.
+ *
+ * Return:     number of printed characters
+ */
 int efi_printk(const char *fmt, ...)
 {
        char printf_buf[256];
@@ -154,13 +172,18 @@ int efi_printk(const char *fmt, ...)
        return printed;
 }
 
-/*
- * Parse the ASCII string 'cmdline' for EFI options, denoted by the efi=
+/**
+ * efi_parse_options() - Parse EFI command line options
+ * @cmdline:   kernel command line
+ *
+ * Parse the ASCII string @cmdline for EFI options, denoted by the efi=
  * option, e.g. efi=nochunk.
  *
  * It should be noted that efi= is parsed in two very different
  * environments, first in the early boot environment of the EFI boot
  * stub, and subsequently during the kernel boot.
+ *
+ * Return:     status code
  */
 efi_status_t efi_parse_options(char const *cmdline)
 {
@@ -286,13 +309,21 @@ char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len)
        return (char *)cmdline_addr;
 }
 
-/*
+/**
+ * efi_exit_boot_services() - Exit boot services
+ * @handle:    handle of the exiting image
+ * @map:       pointer to receive the memory map
+ * @priv:      argument to be passed to @priv_func
+ * @priv_func: function to process the memory map before exiting boot services
+ *
  * Handle calling ExitBootServices according to the requirements set out by the
  * spec.  Obtains the current memory map, and returns that info after calling
  * ExitBootServices.  The client must specify a function to perform any
  * processing of the memory map data prior to ExitBootServices.  A client
  * specific structure may be passed to the function via priv.  The client
  * function may be called multiple times.
+ *
+ * Return:     status code
  */
 efi_status_t efi_exit_boot_services(void *handle,
                                    struct efi_boot_memmap *map,
@@ -361,6 +392,11 @@ fail:
        return status;
 }
 
+/**
+ * get_efi_config_table() - retrieve UEFI configuration table
+ * @guid:      GUID of the configuration table to be retrieved
+ * Return:     pointer to the configuration table or NULL
+ */
 void *get_efi_config_table(efi_guid_t guid)
 {
        unsigned long tables = efi_table_attr(efi_system_table, tables);
@@ -408,17 +444,18 @@ static const struct {
 };
 
 /**
- * efi_load_initrd_dev_path - load the initrd from the Linux initrd device path
+ * efi_load_initrd_dev_path() - load the initrd from the Linux initrd device path
  * @load_addr: pointer to store the address where the initrd was loaded
  * @load_size: pointer to store the size of the loaded initrd
  * @max:       upper limit for the initrd memory allocation
- * @return:    %EFI_SUCCESS if the initrd was loaded successfully, in which
- *             case @load_addr and @load_size are assigned accordingly
- *             %EFI_NOT_FOUND if no LoadFile2 protocol exists on the initrd
- *             device path
- *             %EFI_INVALID_PARAMETER if load_addr == NULL or load_size == NULL
- *             %EFI_OUT_OF_RESOURCES if memory allocation failed
- *             %EFI_LOAD_ERROR in all other cases
+ *
+ * Return:
+ * * %EFI_SUCCESS if the initrd was loaded successfully, in which
+ *   case @load_addr and @load_size are assigned accordingly
+ * * %EFI_NOT_FOUND if no LoadFile2 protocol exists on the initrd device path
+ * * %EFI_INVALID_PARAMETER if load_addr == NULL or load_size == NULL
+ * * %EFI_OUT_OF_RESOURCES if memory allocation failed
+ * * %EFI_LOAD_ERROR in all other cases
  */
 static
 efi_status_t efi_load_initrd_dev_path(unsigned long *load_addr,
@@ -481,6 +518,16 @@ efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image,
                                    load_addr, load_size);
 }
 
+/**
+ * efi_load_initrd() - Load initial RAM disk
+ * @image:     EFI loaded image protocol
+ * @load_addr: pointer to loaded initrd
+ * @load_size: size of loaded initrd
+ * @soft_limit:        preferred size of allocated memory for loading the initrd
+ * @hard_limit:        minimum size of allocated memory
+ *
+ * Return:     status code
+ */
 efi_status_t efi_load_initrd(efi_loaded_image_t *image,
                             unsigned long *load_addr,
                             unsigned long *load_size,
@@ -505,6 +552,15 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image,
        return status;
 }
 
+/**
+ * efi_wait_for_key() - Wait for key stroke
+ * @usec:      number of microseconds to wait for key stroke
+ * @key:       key entered
+ *
+ * Wait for up to @usec microseconds for a key stroke.
+ *
+ * Return:     status code, EFI_SUCCESS if key received
+ */
 efi_status_t efi_wait_for_key(unsigned long usec, efi_input_key_t *key)
 {
        efi_event_t events[2], timer;
index e97370b..3318ec3 100644 (file)
@@ -329,6 +329,9 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
        if (status != EFI_SUCCESS)
                goto fail_free_initrd;
 
+       if (IS_ENABLED(CONFIG_ARM))
+               efi_handle_post_ebs_state();
+
        efi_enter_kernel(image_addr, fdt_addr, fdt_totalsize((void *)fdt_addr));
        /* not reached */
 
index bcd8c0a..2c9d422 100644 (file)
@@ -157,8 +157,14 @@ typedef void (__efiapi *efi_event_notify_t)(efi_event_t, void *);
 #define EFI_EVT_NOTIFY_WAIT    0x00000100U
 #define EFI_EVT_NOTIFY_SIGNAL  0x00000200U
 
-/*
- * boottime->wait_for_event takes an array of events as input.
+/**
+ * efi_set_event_at() - add event to events array
+ *
+ * @events:    array of UEFI events
+ * @ids:       index where to put the event in the array
+ * @event:     event to add to the aray
+ *
+ * boottime->wait_for_event() takes an array of events as input.
  * Provide a helper to set it up correctly for mixed mode.
  */
 static inline
@@ -771,4 +777,6 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image,
                             unsigned long soft_limit,
                             unsigned long hard_limit);
 
+void efi_handle_post_ebs_state(void);
+
 #endif
index 2005e33..630caa6 100644 (file)
@@ -102,12 +102,20 @@ static int find_file_option(const efi_char16_t *cmdline, int cmdline_len,
        if (!found)
                return 0;
 
+       /* Skip any leading slashes */
+       while (cmdline[i] == L'/' || cmdline[i] == L'\\')
+               i++;
+
        while (--result_len > 0 && i < cmdline_len) {
-               if (cmdline[i] == L'\0' ||
-                   cmdline[i] == L'\n' ||
-                   cmdline[i] == L' ')
+               efi_char16_t c = cmdline[i++];
+
+               if (c == L'\0' || c == L'\n' || c == L' ')
                        break;
-               *result++ = cmdline[i++];
+               else if (c == L'/')
+                       /* Replace UNIX dir separators with EFI standard ones */
+                       *result++ = L'\\';
+               else
+                       *result++ = c;
        }
        *result = L'\0';
        return i;
index a700b3c..159fb4e 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 
 #include <linux/ctype.h>
+#include <linux/string.h>
 #include <linux/types.h>
 
 char *skip_spaces(const char *str)
index fd7f0fb..d17e4d6 100644 (file)
@@ -21,7 +21,7 @@
 struct cbmem_cons {
        u32 size_dont_access_after_boot;
        u32 cursor;
-       u8  body[0];
+       u8  body[];
 } __packed;
 
 #define CURSOR_MASK ((1 << 28) - 1)
index db08122..d23c5c6 100644 (file)
@@ -32,7 +32,7 @@ struct vpd_cbmem {
        u32 version;
        u32 ro_size;
        u32 rw_size;
-       u8  blob[0];
+       u8  blob[];
 };
 
 struct vpd_section {
index 96758b7..7127a04 100644 (file)
@@ -104,7 +104,7 @@ struct ibft_control {
        u16 tgt0_off;
        u16 nic1_off;
        u16 tgt1_off;
-       u16 expansion[0];
+       u16 expansion[];
 } __attribute__((__packed__));
 
 struct ibft_initiator {
index ce75d1d..e025405 100644 (file)
@@ -103,6 +103,6 @@ struct pcdp {
        u8                      creator_id[4];
        u32                     creator_rev;
        u32                     num_uarts;
-       struct pcdp_uart        uart[0];        /* actual size is num_uarts */
+       struct pcdp_uart        uart[]; /* actual size is num_uarts */
        /* remainder of table is pcdp_device structures */
 } __attribute__((packed));
index 873841a..3d6ba42 100644 (file)
@@ -157,8 +157,10 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
 
        cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups),
                             GFP_KERNEL);
-       if (!cpu_groups)
+       if (!cpu_groups) {
+               free_cpumask_var(tmp);
                return -ENOMEM;
+       }
 
        cpumask_copy(tmp, cpu_online_mask);
 
@@ -167,6 +169,7 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
                        topology_core_cpumask(cpumask_any(tmp));
 
                if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) {
+                       free_cpumask_var(tmp);
                        free_cpu_groups(num_groups, &cpu_groups);
                        return -ENOMEM;
                }
@@ -196,13 +199,12 @@ static int hotplug_tests(void)
        if (!page_buf)
                goto out_free_cpu_groups;
 
-       err = 0;
        /*
         * Of course the last CPU cannot be powered down and cpu_down() should
         * refuse doing that.
         */
        pr_info("Trying to turn off and on again all CPUs\n");
-       err += down_and_up_cpus(cpu_online_mask, offlined_cpus);
+       err = down_and_up_cpus(cpu_online_mask, offlined_cpus);
 
        /*
         * Take down CPUs by cpu group this time. When the last CPU is turned
index ef80988..625c8fd 100644 (file)
@@ -181,6 +181,7 @@ EXPORT_SYMBOL_GPL(rpi_firmware_property);
 static void
 rpi_firmware_print_firmware_revision(struct rpi_firmware *fw)
 {
+       time64_t date_and_time;
        u32 packet;
        int ret = rpi_firmware_property(fw,
                                        RPI_FIRMWARE_GET_FIRMWARE_REVISION,
@@ -189,7 +190,9 @@ rpi_firmware_print_firmware_revision(struct rpi_firmware *fw)
        if (ret)
                return;
 
-       dev_info(fw->cl.dev, "Attached to firmware from %ptT\n", &packet);
+       /* This is not compatible with y2038 */
+       date_and_time = packet;
+       dev_info(fw->cl.dev, "Attached to firmware from %ptT\n", &date_and_time);
 }
 
 static void
index b2408a7..7cd5a29 100644 (file)
@@ -208,7 +208,7 @@ config FPGA_DFL_PCI
 
 config FPGA_MGR_ZYNQMP_FPGA
        tristate "Xilinx ZynqMP FPGA"
-       depends on ARCH_ZYNQMP || COMPILE_TEST
+       depends on ZYNQMP_FIRMWARE || (!ZYNQMP_FIRMWARE && COMPILE_TEST)
        help
          FPGA manager driver support for Xilinx ZynqMP FPGAs.
          This driver uses the processor configuration port(PCAP)
index 5640efe..5bda38e 100644 (file)
@@ -64,6 +64,7 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset)
                ret = pm_runtime_get_sync(chip->parent);
                if (ret < 0) {
                        dev_err(chip->parent, "Failed to resume: %d\n", ret);
+                       pm_runtime_put_autosuspend(chip->parent);
                        return ret;
                }
 
@@ -72,12 +73,15 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset)
                if (ret < 0) {
                        dev_err(chip->parent, "Failed to drop cache: %d\n",
                                ret);
+                       pm_runtime_put_autosuspend(chip->parent);
                        return ret;
                }
 
                ret = regmap_read(arizona->regmap, reg, &val);
-               if (ret < 0)
+               if (ret < 0) {
+                       pm_runtime_put_autosuspend(chip->parent);
                        return ret;
+               }
 
                pm_runtime_mark_last_busy(chip->parent);
                pm_runtime_put_autosuspend(chip->parent);
@@ -106,6 +110,7 @@ static int arizona_gpio_direction_out(struct gpio_chip *chip,
                ret = pm_runtime_get_sync(chip->parent);
                if (ret < 0) {
                        dev_err(chip->parent, "Failed to resume: %d\n", ret);
+                       pm_runtime_put(chip->parent);
                        return ret;
                }
        }
index 1fca8dd..a3b9bde 100644 (file)
@@ -107,6 +107,84 @@ static const struct i2c_device_id pca953x_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, pca953x_id);
 
+#ifdef CONFIG_GPIO_PCA953X_IRQ
+
+#include <linux/dmi.h>
+#include <linux/gpio.h>
+#include <linux/list.h>
+
+static const struct dmi_system_id pca953x_dmi_acpi_irq_info[] = {
+       {
+               /*
+                * On Intel Galileo Gen 2 board the IRQ pin of one of
+                * the I²C GPIO expanders, which has GpioInt() resource,
+                * is provided as an absolute number instead of being
+                * relative. Since first controller (gpio-sch.c) and
+                * second (gpio-dwapb.c) are at the fixed bases, we may
+                * safely refer to the number in the global space to get
+                * an IRQ out of it.
+                */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
+               },
+       },
+       {}
+};
+
+#ifdef CONFIG_ACPI
+static int pca953x_acpi_get_pin(struct acpi_resource *ares, void *data)
+{
+       struct acpi_resource_gpio *agpio;
+       int *pin = data;
+
+       if (acpi_gpio_get_irq_resource(ares, &agpio))
+               *pin = agpio->pin_table[0];
+       return 1;
+}
+
+static int pca953x_acpi_find_pin(struct device *dev)
+{
+       struct acpi_device *adev = ACPI_COMPANION(dev);
+       int pin = -ENOENT, ret;
+       LIST_HEAD(r);
+
+       ret = acpi_dev_get_resources(adev, &r, pca953x_acpi_get_pin, &pin);
+       acpi_dev_free_resource_list(&r);
+       if (ret < 0)
+               return ret;
+
+       return pin;
+}
+#else
+static inline int pca953x_acpi_find_pin(struct device *dev) { return -ENXIO; }
+#endif
+
+static int pca953x_acpi_get_irq(struct device *dev)
+{
+       int pin, ret;
+
+       pin = pca953x_acpi_find_pin(dev);
+       if (pin < 0)
+               return pin;
+
+       dev_info(dev, "Applying ACPI interrupt quirk (GPIO %d)\n", pin);
+
+       if (!gpio_is_valid(pin))
+               return -EINVAL;
+
+       ret = gpio_request(pin, "pca953x interrupt");
+       if (ret)
+               return ret;
+
+       ret = gpio_to_irq(pin);
+
+       /* When pin is used as an IRQ, no need to keep it requested */
+       gpio_free(pin);
+
+       return ret;
+}
+#endif
+
 static const struct acpi_device_id pca953x_acpi_ids[] = {
        { "INT3491", 16 | PCA953X_TYPE | PCA_LATCH_INT, },
        { }
@@ -322,6 +400,7 @@ static const struct regmap_config pca953x_ai_i2c_regmap = {
        .writeable_reg = pca953x_writeable_register,
        .volatile_reg = pca953x_volatile_register,
 
+       .disable_locking = true,
        .cache_type = REGCACHE_RBTREE,
        .max_register = 0x7f,
 };
@@ -623,8 +702,6 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
        DECLARE_BITMAP(reg_direction, MAX_LINE);
        int level;
 
-       pca953x_read_regs(chip, chip->regs->direction, reg_direction);
-
        if (chip->driver_data & PCA_PCAL) {
                /* Enable latch on interrupt-enabled inputs */
                pca953x_write_regs(chip, PCAL953X_IN_LATCH, chip->irq_mask);
@@ -635,7 +712,11 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
                pca953x_write_regs(chip, PCAL953X_INT_MASK, irq_mask);
        }
 
+       /* Switch direction to input if needed */
+       pca953x_read_regs(chip, chip->regs->direction, reg_direction);
+
        bitmap_or(irq_mask, chip->irq_trig_fall, chip->irq_trig_raise, gc->ngpio);
+       bitmap_complement(reg_direction, reg_direction, gc->ngpio);
        bitmap_and(irq_mask, irq_mask, reg_direction, gc->ngpio);
 
        /* Look for any newly setup interrupt */
@@ -734,14 +815,16 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid)
        struct gpio_chip *gc = &chip->gpio_chip;
        DECLARE_BITMAP(pending, MAX_LINE);
        int level;
+       bool ret;
 
-       if (!pca953x_irq_pending(chip, pending))
-               return IRQ_NONE;
+       mutex_lock(&chip->i2c_lock);
+       ret = pca953x_irq_pending(chip, pending);
+       mutex_unlock(&chip->i2c_lock);
 
        for_each_set_bit(level, pending, gc->ngpio)
                handle_nested_irq(irq_find_mapping(gc->irq.domain, level));
 
-       return IRQ_HANDLED;
+       return IRQ_RETVAL(ret);
 }
 
 static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base)
@@ -752,6 +835,12 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base)
        DECLARE_BITMAP(irq_stat, MAX_LINE);
        int ret;
 
+       if (dmi_first_match(pca953x_dmi_acpi_irq_info)) {
+               ret = pca953x_acpi_get_irq(&client->dev);
+               if (ret > 0)
+                       client->irq = ret;
+       }
+
        if (!client->irq)
                return 0;
 
index 58f9d8c..44f9276 100644 (file)
@@ -204,6 +204,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
                                (mode_info->atom_context->bios + data_offset);
                        switch (crev) {
                        case 11:
+                       case 12:
                                mem_channel_number = igp_info->v11.umachannelnumber;
                                /* channel width is 64 */
                                if (vram_width)
index 4720718..4fb4c3b 100644 (file)
@@ -37,7 +37,8 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
 
        memset(&ti, 0, sizeof(struct amdgpu_task_info));
 
-       if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
+       if (amdgpu_gpu_recovery &&
+           amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
                DRM_ERROR("ring %s timeout, but soft recovered\n",
                          s_job->sched->name);
                return;
index 775e389..02e6f8c 100644 (file)
@@ -696,7 +696,7 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
  * default power levels, write "r" (reset) to the file to reset them.
  *
  *
- * < For Vega20 >
+ * < For Vega20 and newer ASICs >
  *
  * Reading the file will display:
  *
@@ -1668,7 +1668,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
 }
 
 /**
- * DOC: busy_percent
+ * DOC: gpu_busy_percent
  *
  * The amdgpu driver provides a sysfs API for reading how busy the GPU
  * is as a percentage.  The file gpu_busy_percent is used for this.
@@ -2784,7 +2784,7 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000);
+       return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000);
 }
 
 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
@@ -2819,7 +2819,7 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000);
+       return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000);
 }
 
 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
index 7301fdc..ef3269c 100644 (file)
@@ -372,6 +372,52 @@ static int psp_tmr_load(struct psp_context *psp)
        return ret;
 }
 
+static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
+                                       struct psp_gfx_cmd_resp *cmd)
+{
+       if (amdgpu_sriov_vf(psp->adev))
+               cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
+       else
+               cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
+}
+
+static int psp_tmr_unload(struct psp_context *psp)
+{
+       int ret;
+       struct psp_gfx_cmd_resp *cmd;
+
+       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+       if (!cmd)
+               return -ENOMEM;
+
+       psp_prep_tmr_unload_cmd_buf(psp, cmd);
+       DRM_INFO("free PSP TMR buffer\n");
+
+       ret = psp_cmd_submit_buf(psp, NULL, cmd,
+                                psp->fence_buf_mc_addr);
+
+       kfree(cmd);
+
+       return ret;
+}
+
+static int psp_tmr_terminate(struct psp_context *psp)
+{
+       int ret;
+       void *tmr_buf;
+       void **pptr;
+
+       ret = psp_tmr_unload(psp);
+       if (ret)
+               return ret;
+
+       /* free TMR memory buffer */
+       pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
+       amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
+
+       return 0;
+}
+
 static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
                                uint64_t asd_mc, uint32_t size)
 {
@@ -1779,8 +1825,6 @@ static int psp_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct psp_context *psp = &adev->psp;
-       void *tmr_buf;
-       void **pptr;
 
        if (psp->adev->psp.ta_fw) {
                psp_ras_terminate(psp);
@@ -1790,10 +1834,9 @@ static int psp_hw_fini(void *handle)
 
        psp_asd_unload(psp);
 
+       psp_tmr_terminate(psp);
        psp_ring_destroy(psp, PSP_RING_TYPE__KM);
 
-       pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
-       amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
        amdgpu_bo_free_kernel(&psp->fw_pri_bo,
                              &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
        amdgpu_bo_free_kernel(&psp->fence_buf_bo,
@@ -1840,6 +1883,18 @@ static int psp_suspend(void *handle)
                }
        }
 
+       ret = psp_asd_unload(psp);
+       if (ret) {
+               DRM_ERROR("Failed to unload asd\n");
+               return ret;
+       }
+
+       ret = psp_tmr_terminate(psp);
+       if (ret) {
+               DRM_ERROR("Falied to terminate tmr\n");
+               return ret;
+       }
+
        ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
        if (ret) {
                DRM_ERROR("PSP ring stop failed\n");
index b544baf..5d71c23 100644 (file)
@@ -1298,8 +1298,12 @@ static int sdma_v5_0_sw_fini(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        int i;
 
-       for (i = 0; i < adev->sdma.num_instances; i++)
+       for (i = 0; i < adev->sdma.num_instances; i++) {
+               if (adev->sdma.instance[i].fw != NULL)
+                       release_firmware(adev->sdma.instance[i].fw);
+
                amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+       }
 
        return 0;
 }
index f0587d9..fee6092 100644 (file)
@@ -40,6 +40,7 @@
 #include <drm/drm_file.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_device.h>
+#include <drm/drm_ioctl.h>
 #include <kgd_kfd_interface.h>
 #include <linux/swap.h>
 
@@ -1076,7 +1077,7 @@ static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd)
 #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
        struct drm_device *ddev = kfd->ddev;
 
-       return devcgroup_check_permission(DEVCG_DEV_CHAR, ddev->driver->major,
+       return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR,
                                          ddev->render->index,
                                          DEVCG_ACC_WRITE | DEVCG_ACC_READ);
 #else
index d27221d..0e0c42e 100644 (file)
@@ -428,6 +428,7 @@ struct kfd_process *kfd_create_process(struct file *filep)
                                           (int)process->lead_thread->pid);
                if (ret) {
                        pr_warn("Creating procfs pid directory failed");
+                       kobject_put(process->kobj);
                        goto out;
                }
 
index 7ced9f8..db5e0bb 100644 (file)
@@ -1358,7 +1358,7 @@ static int dm_late_init(void *handle)
        struct dmcu *dmcu = NULL;
        bool ret;
 
-       if (!adev->dm.fw_dmcu)
+       if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
                return detect_mst_link_for_all_connectors(adev->ddev);
 
        dmcu = adev->dm.dc->res_pool->dmcu;
@@ -5024,7 +5024,8 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
        struct drm_connector *connector = &aconnector->base;
        struct amdgpu_device *adev = connector->dev->dev_private;
        struct dc_stream_state *stream;
-       int requested_bpc = connector->state ? connector->state->max_requested_bpc : 8;
+       const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
+       int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
        enum dc_status dc_result = DC_OK;
 
        do {
index 076af26..1d692f4 100644 (file)
@@ -1058,7 +1058,6 @@ static const struct {
                {"link_settings", &dp_link_settings_debugfs_fops},
                {"phy_settings", &dp_phy_settings_debugfs_fop},
                {"test_pattern", &dp_phy_test_pattern_fops},
-               {"output_bpc", &output_bpc_fops},
                {"vrr_range", &vrr_range_fops},
 #ifdef CONFIG_DRM_AMD_DC_HDCP
                {"hdcp_sink_capability", &hdcp_sink_capability_fops},
@@ -1142,6 +1141,9 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
        debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector,
                                   &force_yuv420_output_fops);
 
+       debugfs_create_file("output_bpc", 0644, dir, connector,
+                           &output_bpc_fops);
+
        connector->debugfs_dpcd_address = 0;
        connector->debugfs_dpcd_size = 0;
 
index dcf84a6..949d10e 100644 (file)
@@ -510,8 +510,10 @@ static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin
 
        srm = psp_get_srm(work->hdcp.config.psp.handle, &srm_version, &srm_size);
 
-       if (!srm)
-               return -EINVAL;
+       if (!srm) {
+               ret = -EINVAL;
+               goto ret;
+       }
 
        if (pos >= srm_size)
                ret = 0;
index 6f93a6c..d016f50 100644 (file)
@@ -2538,10 +2538,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
 
        copy_stream_update_to_stream(dc, context, stream, stream_update);
 
-       if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
-               DC_ERROR("Mode validation failed for stream update!\n");
-               dc_release_state(context);
-               return;
+       if (update_type > UPDATE_TYPE_FAST) {
+               if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+                       DC_ERROR("Mode validation failed for stream update!\n");
+                       dc_release_state(context);
+                       return;
+               }
        }
 
        commit_planes_for_stream(
index 3f66868..ea29cf9 100644 (file)
@@ -28,8 +28,6 @@ endif
 endif
 
 CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc_dpi.o := $(dsc_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dsc/dc_dsc.o := $(dsc_ccflags)
 
 DSC = dc_dsc.o rc_calc.o rc_calc_dpi.o
 
index 0ea6662..0c7f247 100644 (file)
  * Author: AMD
  */
 
+#include <drm/drm_dsc.h>
 #include "dc_hw_types.h"
 #include "dsc.h"
 #include <drm/drm_dp_helper.h>
 #include "dc.h"
+#include "rc_calc.h"
 
 /* This module's internal functions */
 
@@ -304,22 +306,6 @@ static inline uint32_t dsc_div_by_10_round_up(uint32_t value)
        return (value + 9) / 10;
 }
 
-static inline uint32_t calc_dsc_bpp_x16(uint32_t stream_bandwidth_kbps, uint32_t pix_clk_100hz, uint32_t bpp_increment_div)
-{
-       uint32_t dsc_target_bpp_x16;
-       float f_dsc_target_bpp;
-       float f_stream_bandwidth_100bps = stream_bandwidth_kbps * 10.0f;
-       uint32_t precision = bpp_increment_div; // bpp_increment_div is actually precision
-
-       f_dsc_target_bpp = f_stream_bandwidth_100bps / pix_clk_100hz;
-
-       // Round down to the nearest precision stop to bring it into DSC spec range
-       dsc_target_bpp_x16 = (uint32_t)(f_dsc_target_bpp * precision);
-       dsc_target_bpp_x16 = (dsc_target_bpp_x16 * 16) / precision;
-
-       return dsc_target_bpp_x16;
-}
-
 /* Get DSC bandwidth range based on [min_bpp, max_bpp] target bitrate range, and timing's pixel clock
  * and uncompressed bandwidth.
  */
index 03ae159..667afbc 100644 (file)
@@ -23,6 +23,7 @@
  * Authors: AMD
  *
  */
+#include <drm/drm_dsc.h>
 
 #include "os_types.h"
 #include "rc_calc.h"
@@ -40,7 +41,8 @@
        break
 
 
-void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc, enum max_min max_min, float bpp)
+static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc,
+                      enum max_min max_min, float bpp)
 {
        int mode = MODE_SELECT(444, 422, 420);
        int sel = table_hash(mode, bpc, max_min);
@@ -85,7 +87,7 @@ void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc, enum ma
        memcpy(qps, table[index].qps, sizeof(qp_set));
 }
 
-double dsc_roundf(double num)
+static double dsc_roundf(double num)
 {
        if (num < 0.0)
                num = num - 0.5;
@@ -95,7 +97,7 @@ double dsc_roundf(double num)
        return (int)(num);
 }
 
-double dsc_ceil(double num)
+static double dsc_ceil(double num)
 {
        double retval = (int)num;
 
@@ -105,7 +107,7 @@ double dsc_ceil(double num)
        return (int)retval;
 }
 
-void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp)
+static void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp)
 {
        int   *p = ofs;
 
@@ -160,7 +162,7 @@ void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp)
        }
 }
 
-int median3(int a, int b, int c)
+static int median3(int a, int b, int c)
 {
        if (a > b)
                swap(a, b);
@@ -172,13 +174,25 @@ int median3(int a, int b, int c)
        return b;
 }
 
-void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_comp bpc, float bpp, int slice_width, int slice_height, int minor_version)
+static void _do_calc_rc_params(struct rc_params *rc, enum colour_mode cm,
+                              enum bits_per_comp bpc, u8 drm_bpp,
+                              bool is_navite_422_or_420,
+                              int slice_width, int slice_height,
+                              int minor_version)
 {
+       float bpp;
        float bpp_group;
        float initial_xmit_delay_factor;
        int padding_pixels;
        int i;
 
+       bpp = ((float)drm_bpp / 16.0);
+       /* in native_422 or native_420 modes, the bits_per_pixel is double the
+        * target bpp (the latter is what calc_rc_params expects)
+        */
+       if (is_navite_422_or_420)
+               bpp /= 2.0;
+
        rc->rc_quant_incr_limit0 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
        rc->rc_quant_incr_limit1 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
 
@@ -251,3 +265,128 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com
        rc->rc_buf_thresh[13] = 8064;
 }
 
+static u32 _do_bytes_per_pixel_calc(int slice_width, u8 drm_bpp,
+                                   bool is_navite_422_or_420)
+{
+       float bpp;
+       u32 bytes_per_pixel;
+       double d_bytes_per_pixel;
+
+       bpp = ((float)drm_bpp / 16.0);
+       d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width;
+       // TODO: Make sure the formula for calculating this is precise (ceiling
+       // vs. floor, and at what point they should be applied)
+       if (is_navite_422_or_420)
+               d_bytes_per_pixel /= 2;
+
+       bytes_per_pixel = (u32)dsc_ceil(d_bytes_per_pixel * 0x10000000);
+
+       return bytes_per_pixel;
+}
+
+static u32 _do_calc_dsc_bpp_x16(u32 stream_bandwidth_kbps, u32 pix_clk_100hz,
+                               u32 bpp_increment_div)
+{
+       u32 dsc_target_bpp_x16;
+       float f_dsc_target_bpp;
+       float f_stream_bandwidth_100bps;
+       // bpp_increment_div is actually precision
+       u32 precision = bpp_increment_div;
+
+       f_stream_bandwidth_100bps = stream_bandwidth_kbps * 10.0f;
+       f_dsc_target_bpp = f_stream_bandwidth_100bps / pix_clk_100hz;
+
+       // Round down to the nearest precision stop to bring it into DSC spec
+       // range
+       dsc_target_bpp_x16 = (u32)(f_dsc_target_bpp * precision);
+       dsc_target_bpp_x16 = (dsc_target_bpp_x16 * 16) / precision;
+
+       return dsc_target_bpp_x16;
+}
+
+/**
+ * calc_rc_params - reads the user's cmdline mode
+ * @rc: DC internal DSC parameters
+ * @pps: DRM struct with all required DSC values
+ *
+ * This function expects a drm_dsc_config data struct with all the required DSC
+ * values previously filled out by our driver and based on this information it
+ * computes some of the DSC values.
+ *
+ * @note This calculation requires float point operation, most of it executes
+ * under kernel_fpu_{begin,end}.
+ */
+void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps)
+{
+       enum colour_mode mode;
+       enum bits_per_comp bpc;
+       bool is_navite_422_or_420;
+       u8 drm_bpp = pps->bits_per_pixel;
+       int slice_width  = pps->slice_width;
+       int slice_height = pps->slice_height;
+
+       mode = pps->convert_rgb ? CM_RGB : (pps->simple_422  ? CM_444 :
+                                          (pps->native_422  ? CM_422 :
+                                           pps->native_420  ? CM_420 : CM_444));
+       bpc = (pps->bits_per_component == 8) ? BPC_8 : (pps->bits_per_component == 10)
+                                            ? BPC_10 : BPC_12;
+
+       is_navite_422_or_420 = pps->native_422 || pps->native_420;
+
+       DC_FP_START();
+       _do_calc_rc_params(rc, mode, bpc, drm_bpp, is_navite_422_or_420,
+                          slice_width, slice_height,
+                          pps->dsc_version_minor);
+       DC_FP_END();
+}
+
+/**
+ * calc_dsc_bytes_per_pixel - calculate bytes per pixel
+ * @pps: DRM struct with all required DSC values
+ *
+ * Based on the information inside drm_dsc_config, this function calculates the
+ * total of bytes per pixel.
+ *
+ * @note This calculation requires float point operation, most of it executes
+ * under kernel_fpu_{begin,end}.
+ *
+ * Return:
+ * Return the number of bytes per pixel
+ */
+u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps)
+
+{
+       u32 ret;
+       u8 drm_bpp = pps->bits_per_pixel;
+       int slice_width  = pps->slice_width;
+       bool is_navite_422_or_420 = pps->native_422 || pps->native_420;
+
+       DC_FP_START();
+       ret = _do_bytes_per_pixel_calc(slice_width, drm_bpp,
+                                      is_navite_422_or_420);
+       DC_FP_END();
+       return ret;
+}
+
+/**
+ * calc_dsc_bpp_x16 - retrieve the dsc bits per pixel
+ * @stream_bandwidth_kbps:
+ * @pix_clk_100hz:
+ * @bpp_increment_div:
+ *
+ * Calculate the total of bits per pixel for DSC configuration.
+ *
+ * @note This calculation requires float point operation, most of it executes
+ * under kernel_fpu_{begin,end}.
+ */
+u32 calc_dsc_bpp_x16(u32 stream_bandwidth_kbps, u32 pix_clk_100hz,
+                    u32 bpp_increment_div)
+{
+       u32 dsc_bpp;
+
+       DC_FP_START();
+       dsc_bpp =  _do_calc_dsc_bpp_x16(stream_bandwidth_kbps, pix_clk_100hz,
+                                       bpp_increment_div);
+       DC_FP_END();
+       return dsc_bpp;
+}
index b6b1f09..21723fa 100644 (file)
@@ -77,7 +77,10 @@ struct qp_entry {
 
 typedef struct qp_entry qp_table[];
 
-void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_comp bpc, float bpp, int slice_width, int slice_height, int minor_version);
+void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps);
+u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps);
+u32 calc_dsc_bpp_x16(u32 stream_bandwidth_kbps, u32 pix_clk_100hz,
+                    u32 bpp_increment_div);
 
 #endif
 
index 1f6e63b..ef830ad 100644 (file)
@@ -27,8 +27,6 @@
 #include "dscc_types.h"
 #include "rc_calc.h"
 
-double dsc_ceil(double num);
-
 static void copy_pps_fields(struct drm_dsc_config *to, const struct drm_dsc_config *from)
 {
        to->line_buf_depth           = from->line_buf_depth;
@@ -100,34 +98,13 @@ static void copy_rc_to_cfg(struct drm_dsc_config *dsc_cfg, const struct rc_param
 
 int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_parameters *dsc_params)
 {
-       enum colour_mode  mode = pps->convert_rgb ? CM_RGB :
-                                                       (pps->simple_422  ? CM_444 :
-                                                       (pps->native_422  ? CM_422 :
-                                                       pps->native_420  ? CM_420 : CM_444));
-       enum bits_per_comp bpc = (pps->bits_per_component == 8) ? BPC_8 :
-                                                       (pps->bits_per_component == 10) ? BPC_10 : BPC_12;
-       float            bpp = ((float) pps->bits_per_pixel / 16.0);
-       int              slice_width  = pps->slice_width;
-       int              slice_height = pps->slice_height;
        int              ret;
        struct rc_params rc;
        struct drm_dsc_config   dsc_cfg;
 
-       double d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width;
-
-       // TODO: Make sure the formula for calculating this is precise (ceiling vs. floor, and at what point they should be applied)
-       if (pps->native_422 || pps->native_420)
-               d_bytes_per_pixel /= 2;
-
-       dsc_params->bytes_per_pixel = (uint32_t)dsc_ceil(d_bytes_per_pixel * 0x10000000);
-
-       /* in native_422 or native_420 modes, the bits_per_pixel is double the target bpp
-        * (the latter is what calc_rc_params expects)
-        */
-       if (pps->native_422 || pps->native_420)
-               bpp /= 2.0;
+       dsc_params->bytes_per_pixel = calc_dsc_bytes_per_pixel(pps);
 
-       calc_rc_params(&rc, mode, bpc, bpp, slice_width, slice_height, pps->dsc_version_minor);
+       calc_rc_params(&rc, pps);
        dsc_params->pps = *pps;
        dsc_params->pps.initial_scale_value = 8 * rc.rc_model_size / (rc.rc_model_size - rc.initial_fullness_offset);
 
index 9431b48..bcfe34e 100644 (file)
@@ -843,7 +843,7 @@ static bool build_regamma(struct pwl_float_data_ex *rgb_regamma,
        pow_buffer_ptr = -1; // reset back to no optimize
        ret = true;
 release:
-       kfree(coeff);
+       kvfree(coeff);
        return ret;
 }
 
@@ -1777,7 +1777,7 @@ bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
 
        kfree(rgb_regamma);
 rgb_regamma_alloc_fail:
-       kvfree(rgb_user);
+       kfree(rgb_user);
 rgb_user_alloc_fail:
        return ret;
 }
index 85e5b1e..56923a9 100644 (file)
@@ -239,7 +239,7 @@ static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
 
        switch (dev_id) {
        case 0x67BA:
-       case 0x66B1:
+       case 0x67B1:
                smu_data->power_tune_defaults = &defaults_hawaii_pro;
                break;
        case 0x67B8:
index 2fb9755..c2e0fbb 100644 (file)
@@ -522,9 +522,11 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr)
        priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
        priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
 
-       ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c);
-       if (ret)
-               goto err4;
+       if (adev->psp.ras.ras) {
+               ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c);
+               if (ret)
+                       goto err4;
+       }
 
        return 0;
 
@@ -560,7 +562,8 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
                        (struct vega20_smumgr *)(hwmgr->smu_backend);
        struct amdgpu_device *adev = hwmgr->adev;
 
-       smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c);
+       if (adev->psp.ras.ras)
+               smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c);
 
        if (priv) {
                amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
index cf80438..e464429 100644 (file)
@@ -61,13 +61,8 @@ int drm_i2c_encoder_init(struct drm_device *dev,
 
        request_module("%s%s", I2C_MODULE_PREFIX, info->type);
 
-       client = i2c_new_device(adap, info);
-       if (!client) {
-               err = -ENOMEM;
-               goto fail;
-       }
-
-       if (!client->dev.driver) {
+       client = i2c_new_client_device(adap, info);
+       if (!i2c_client_has_driver(client)) {
                err = -ENODEV;
                goto fail_unregister;
        }
@@ -84,7 +79,7 @@ int drm_i2c_encoder_init(struct drm_device *dev,
 
        err = encoder_drv->encoder_init(client, dev, encoder);
        if (err)
-               goto fail_unregister;
+               goto fail_module_put;
 
        if (info->platform_data)
                encoder->slave_funcs->set_config(&encoder->base,
@@ -92,10 +87,10 @@ int drm_i2c_encoder_init(struct drm_device *dev,
 
        return 0;
 
+fail_module_put:
+       module_put(module);
 fail_unregister:
        i2c_unregister_device(client);
-       module_put(module);
-fail:
        return err;
 }
 EXPORT_SYMBOL(drm_i2c_encoder_init);
index 170aa76..5609e16 100644 (file)
@@ -227,18 +227,9 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
 }
 EXPORT_SYMBOL(drm_fb_helper_debug_leave);
 
-/**
- * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
- * @fb_helper: driver-allocated fbdev helper, can be NULL
- *
- * This should be called from driver's drm &drm_driver.lastclose callback
- * when implementing an fbcon on top of kms using this helper. This ensures that
- * the user isn't greeted with a black screen when e.g. X dies.
- *
- * RETURNS:
- * Zero if everything went ok, negative error code otherwise.
- */
-int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
+static int
+__drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper,
+                                           bool force)
 {
        bool do_delayed;
        int ret;
@@ -250,7 +241,16 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
                return 0;
 
        mutex_lock(&fb_helper->lock);
-       ret = drm_client_modeset_commit(&fb_helper->client);
+       if (force) {
+               /*
+                * Yes this is the _locked version which expects the master lock
+                * to be held. But for forced restores we're intentionally
+                * racing here, see drm_fb_helper_set_par().
+                */
+               ret = drm_client_modeset_commit_locked(&fb_helper->client);
+       } else {
+               ret = drm_client_modeset_commit(&fb_helper->client);
+       }
 
        do_delayed = fb_helper->delayed_hotplug;
        if (do_delayed)
@@ -262,6 +262,22 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
 
        return ret;
 }
+
+/**
+ * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
+ * @fb_helper: driver-allocated fbdev helper, can be NULL
+ *
+ * This should be called from driver's drm &drm_driver.lastclose callback
+ * when implementing an fbcon on top of kms using this helper. This ensures that
+ * the user isn't greeted with a black screen when e.g. X dies.
+ *
+ * RETURNS:
+ * Zero if everything went ok, negative error code otherwise.
+ */
+int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
+{
+       return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, false);
+}
 EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
 
 #ifdef CONFIG_MAGIC_SYSRQ
@@ -1318,6 +1334,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
 {
        struct drm_fb_helper *fb_helper = info->par;
        struct fb_var_screeninfo *var = &info->var;
+       bool force;
 
        if (oops_in_progress)
                return -EBUSY;
@@ -1327,7 +1344,25 @@ int drm_fb_helper_set_par(struct fb_info *info)
                return -EINVAL;
        }
 
-       drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+       /*
+        * Normally we want to make sure that a kms master takes precedence over
+        * fbdev, to avoid fbdev flickering and occasionally stealing the
+        * display status. But Xorg first sets the vt back to text mode using
+        * the KDSET IOCTL with KD_TEXT, and only after that drops the master
+        * status when exiting.
+        *
+        * In the past this was caught by drm_fb_helper_lastclose(), but on
+        * modern systems where logind always keeps a drm fd open to orchestrate
+        * the vt switching, this doesn't work.
+        *
+        * To not break the userspace ABI we have this special case here, which
+        * is only used for the above case. Everything else uses the normal
+        * commit function, which ensures that we never steal the display from
+        * an active drm master.
+        */
+       force = var->activate & FB_ACTIVATE_KD_TEXT;
+
+       __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force);
 
        return 0;
 }
index ffd95bf..d00ea38 100644 (file)
@@ -30,12 +30,6 @@ struct drm_dmi_panel_orientation_data {
        int orientation;
 };
 
-static const struct drm_dmi_panel_orientation_data acer_s1003 = {
-       .width = 800,
-       .height = 1280,
-       .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
-};
-
 static const struct drm_dmi_panel_orientation_data asus_t100ha = {
        .width = 800,
        .height = 1280,
@@ -114,13 +108,19 @@ static const struct dmi_system_id orientation_data[] = {
                  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
                  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"),
                },
-               .driver_data = (void *)&acer_s1003,
+               .driver_data = (void *)&lcd800x1280_rightside_up,
        }, {    /* Asus T100HA */
                .matches = {
                  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
                  DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
                },
                .driver_data = (void *)&asus_t100ha,
+       }, {    /* Asus T101HA */
+               .matches = {
+                 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T101HA"),
+               },
+               .driver_data = (void *)&lcd800x1280_rightside_up,
        }, {    /* GPD MicroPC (generic strings, also match on bios date) */
                .matches = {
                  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
index 619f814..58b89ec 100644 (file)
@@ -61,7 +61,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
                                struct device *subdrv_dev, void **dma_priv)
 {
        struct exynos_drm_private *priv = drm_dev->dev_private;
-       int ret;
+       int ret = 0;
 
        if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
                DRM_DEV_ERROR(subdrv_dev, "Device %s lacks support for IOMMU\n",
@@ -92,7 +92,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
        if (ret)
                clear_dma_max_seg_size(subdrv_dev);
 
-       return 0;
+       return ret;
 }
 
 /*
index fcee33a..03be314 100644 (file)
@@ -1498,7 +1498,6 @@ static int g2d_probe(struct platform_device *pdev)
 
        g2d->irq = platform_get_irq(pdev, 0);
        if (g2d->irq < 0) {
-               dev_err(dev, "failed to get irq\n");
                ret = g2d->irq;
                goto err_put_clk;
        }
index a86abc1..3821ea7 100644 (file)
@@ -269,8 +269,10 @@ static void mic_pre_enable(struct drm_bridge *bridge)
                goto unlock;
 
        ret = pm_runtime_get_sync(mic->dev);
-       if (ret < 0)
+       if (ret < 0) {
+               pm_runtime_put_noidle(mic->dev);
                goto unlock;
+       }
 
        mic_set_path(mic, 1);
 
index a6fd0c2..544b999 100644 (file)
@@ -307,8 +307,6 @@ static int hibmc_load(struct drm_device *dev)
        /* reset all the states of crtc/plane/encoder/connector */
        drm_mode_config_reset(dev);
 
-       drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
-
        return 0;
 
 err:
@@ -355,6 +353,9 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
                          ret);
                goto err_unload;
        }
+
+       drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
+
        return 0;
 
 err_unload:
index aa22465..0575a1e 100644 (file)
@@ -2579,14 +2579,14 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
 
 static void
 tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock,
-                               u32 level)
+                               u32 level, enum intel_output_type type)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
        const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations;
        u32 n_entries, val, ln, dpcnt_mask, dpcnt_val;
 
-       if (encoder->type == INTEL_OUTPUT_HDMI) {
+       if (type == INTEL_OUTPUT_HDMI) {
                n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
                ddi_translations = tgl_dkl_phy_hdmi_ddi_trans;
        } else {
@@ -2638,7 +2638,7 @@ static void tgl_ddi_vswing_sequence(struct intel_encoder *encoder,
        if (intel_phy_is_combo(dev_priv, phy))
                icl_combo_phy_ddi_vswing_sequence(encoder, level, type);
        else
-               tgl_dkl_phy_ddi_vswing_sequence(encoder, link_clock, level);
+               tgl_dkl_phy_ddi_vswing_sequence(encoder, link_clock, level, type);
 }
 
 static u32 translate_signal_level(struct intel_dp *intel_dp, int signal_levels)
@@ -2987,7 +2987,7 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
                ln1 = intel_de_read(dev_priv, MG_DP_MODE(1, tc_port));
        }
 
-       ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X1_MODE);
+       ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
        ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
 
        /* DPPATC */
@@ -3472,7 +3472,9 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
                                          INTEL_OUTPUT_DP_MST);
        enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
 
-       intel_dp_set_infoframes(encoder, false, old_crtc_state, old_conn_state);
+       if (!is_mst)
+               intel_dp_set_infoframes(encoder, false,
+                                       old_crtc_state, old_conn_state);
 
        /*
         * Power down sink before disabling the port, otherwise we end
index 9ea1a39..26996e1 100644 (file)
@@ -3822,6 +3822,17 @@ skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
        return true;
 }
 
+unsigned int
+intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
+{
+       int x = 0, y = 0;
+
+       intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
+                                         plane_state->color_plane[0].offset, 0);
+
+       return y;
+}
+
 static int skl_check_main_surface(struct intel_plane_state *plane_state)
 {
        struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
index efb4da2..3a06f72 100644 (file)
@@ -608,6 +608,7 @@ unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
                                   u32 pixel_format, u64 modifier,
                                   unsigned int rotation);
 int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
+unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
 
 struct intel_display_error_state *
 intel_display_capture_error_state(struct drm_i915_private *dev_priv);
index d18b406..f29e51c 100644 (file)
@@ -397,6 +397,14 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
         */
        drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port,
                                     false);
+
+       /*
+        * BSpec 4287: disable DIP after the transcoder is disabled and before
+        * the transcoder clock select is set to none.
+        */
+       if (last_mst_stream)
+               intel_dp_set_infoframes(&intel_dig_port->base, false,
+                                       old_crtc_state, NULL);
        /*
         * From TGL spec: "If multi-stream slave transcoder: Configure
         * Transcoder Clock Select to direct no clock to the transcoder"
index 1c26673..a65d9d8 100644 (file)
 #include "intel_frontbuffer.h"
 
 /*
- * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
- * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
- * origin so the x and y offsets can actually fit the registers. As a
- * consequence, the fence doesn't really start exactly at the display plane
- * address we program because it starts at the real start of the buffer, so we
- * have to take this into consideration here.
- */
-static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc)
-{
-       return fbc->state_cache.plane.y - fbc->state_cache.plane.adjusted_y;
-}
-
-/*
  * For SKL+, the plane source size used by the hardware is based on the value we
  * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
  * we wrote to PIPESRC.
@@ -141,7 +128,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
                        fbc_ctl2 |= FBC_CTL_CPU_FENCE;
                intel_de_write(dev_priv, FBC_CONTROL2, fbc_ctl2);
                intel_de_write(dev_priv, FBC_FENCE_OFF,
-                              params->crtc.fence_y_offset);
+                              params->fence_y_offset);
        }
 
        /* enable it... */
@@ -175,7 +162,7 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
        if (params->fence_id >= 0) {
                dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id;
                intel_de_write(dev_priv, DPFC_FENCE_YOFF,
-                              params->crtc.fence_y_offset);
+                              params->fence_y_offset);
        } else {
                intel_de_write(dev_priv, DPFC_FENCE_YOFF, 0);
        }
@@ -243,7 +230,7 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
                        intel_de_write(dev_priv, SNB_DPFC_CTL_SA,
                                       SNB_CPU_FENCE_ENABLE | params->fence_id);
                        intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET,
-                                      params->crtc.fence_y_offset);
+                                      params->fence_y_offset);
                }
        } else {
                if (IS_GEN(dev_priv, 6)) {
@@ -253,7 +240,7 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
        }
 
        intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF,
-                      params->crtc.fence_y_offset);
+                      params->fence_y_offset);
        /* enable it... */
        intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
@@ -320,7 +307,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
                intel_de_write(dev_priv, SNB_DPFC_CTL_SA,
                               SNB_CPU_FENCE_ENABLE | params->fence_id);
                intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET,
-                              params->crtc.fence_y_offset);
+                              params->fence_y_offset);
        } else if (dev_priv->ggtt.num_fences) {
                intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0);
                intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0);
@@ -631,8 +618,8 @@ static bool rotation_is_valid(struct drm_i915_private *dev_priv,
 /*
  * For some reason, the hardware tracking starts looking at whatever we
  * programmed as the display plane base address register. It does not look at
- * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
- * variables instead of just looking at the pipe/plane size.
+ * the X and Y offset registers. That's why we include the src x/y offsets
+ * instead of just looking at the plane size.
  */
 static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
 {
@@ -705,7 +692,6 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
        cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
        cache->plane.adjusted_x = plane_state->color_plane[0].x;
        cache->plane.adjusted_y = plane_state->color_plane[0].y;
-       cache->plane.y = plane_state->uapi.src.y1 >> 16;
 
        cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode;
 
@@ -713,6 +699,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
        cache->fb.stride = fb->pitches[0];
        cache->fb.modifier = fb->modifier;
 
+       cache->fence_y_offset = intel_plane_fence_y_offset(plane_state);
+
        drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE &&
                    !plane_state->vma->fence);
 
@@ -883,10 +871,10 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
        memset(params, 0, sizeof(*params));
 
        params->fence_id = cache->fence_id;
+       params->fence_y_offset = cache->fence_y_offset;
 
        params->crtc.pipe = crtc->pipe;
        params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
-       params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc);
 
        params->fb.format = cache->fb.format;
        params->fb.stride = cache->fb.stride;
index e4aece2..52db2bd 100644 (file)
@@ -204,25 +204,25 @@ static int __ring_active(struct intel_ring *ring)
 {
        int err;
 
-       err = i915_active_acquire(&ring->vma->active);
+       err = intel_ring_pin(ring);
        if (err)
                return err;
 
-       err = intel_ring_pin(ring);
+       err = i915_active_acquire(&ring->vma->active);
        if (err)
-               goto err_active;
+               goto err_pin;
 
        return 0;
 
-err_active:
-       i915_active_release(&ring->vma->active);
+err_pin:
+       intel_ring_unpin(ring);
        return err;
 }
 
 static void __ring_retire(struct intel_ring *ring)
 {
-       intel_ring_unpin(ring);
        i915_active_release(&ring->vma->active);
+       intel_ring_unpin(ring);
 }
 
 __i915_active_call
index da5b610..8691eb6 100644 (file)
@@ -646,7 +646,7 @@ static int engine_setup_common(struct intel_engine_cs *engine)
 struct measure_breadcrumb {
        struct i915_request rq;
        struct intel_ring ring;
-       u32 cs[1024];
+       u32 cs[2048];
 };
 
 static int measure_breadcrumb_dw(struct intel_context *ce)
@@ -668,6 +668,8 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
 
        frame->ring.vaddr = frame->cs;
        frame->ring.size = sizeof(frame->cs);
+       frame->ring.wrap =
+               BITS_PER_TYPE(frame->ring.size) - ilog2(frame->ring.size);
        frame->ring.effective_size = frame->ring.size;
        intel_ring_update_space(&frame->ring);
        frame->rq.ring = &frame->ring;
index 87e6c5b..7c3d8ef 100644 (file)
@@ -1134,6 +1134,13 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
                        list_move(&rq->sched.link, pl);
                        set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
 
+                       /* Check in case we rollback so far we wrap [size/2] */
+                       if (intel_ring_direction(rq->ring,
+                                                intel_ring_wrap(rq->ring,
+                                                                rq->tail),
+                                                rq->ring->tail) > 0)
+                               rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE;
+
                        active = rq;
                } else {
                        struct intel_engine_cs *owner = rq->context->engine;
@@ -1498,8 +1505,9 @@ static u64 execlists_update_context(struct i915_request *rq)
         * HW has a tendency to ignore us rewinding the TAIL to the end of
         * an earlier request.
         */
+       GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail);
+       prev = rq->ring->tail;
        tail = intel_ring_set_tail(rq->ring, rq->tail);
-       prev = ce->lrc_reg_state[CTX_RING_TAIL];
        if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
                desc |= CTX_DESC_FORCE_RESTORE;
        ce->lrc_reg_state[CTX_RING_TAIL] = tail;
@@ -1895,7 +1903,8 @@ static void defer_active(struct intel_engine_cs *engine)
 
 static bool
 need_timeslice(const struct intel_engine_cs *engine,
-              const struct i915_request *rq)
+              const struct i915_request *rq,
+              const struct rb_node *rb)
 {
        int hint;
 
@@ -1903,9 +1912,28 @@ need_timeslice(const struct intel_engine_cs *engine,
                return false;
 
        hint = engine->execlists.queue_priority_hint;
+
+       if (rb) {
+               const struct virtual_engine *ve =
+                       rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+               const struct intel_engine_cs *inflight =
+                       intel_context_inflight(&ve->context);
+
+               if (!inflight || inflight == engine) {
+                       struct i915_request *next;
+
+                       rcu_read_lock();
+                       next = READ_ONCE(ve->request);
+                       if (next)
+                               hint = max(hint, rq_prio(next));
+                       rcu_read_unlock();
+               }
+       }
+
        if (!list_is_last(&rq->sched.link, &engine->active.requests))
                hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
 
+       GEM_BUG_ON(hint >= I915_PRIORITY_UNPREEMPTABLE);
        return hint >= effective_prio(rq);
 }
 
@@ -1977,10 +2005,9 @@ static void set_timeslice(struct intel_engine_cs *engine)
        set_timer_ms(&engine->execlists.timer, duration);
 }
 
-static void start_timeslice(struct intel_engine_cs *engine)
+static void start_timeslice(struct intel_engine_cs *engine, int prio)
 {
        struct intel_engine_execlists *execlists = &engine->execlists;
-       const int prio = queue_prio(execlists);
        unsigned long duration;
 
        if (!intel_engine_has_timeslices(engine))
@@ -2140,7 +2167,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                        __unwind_incomplete_requests(engine);
 
                        last = NULL;
-               } else if (need_timeslice(engine, last) &&
+               } else if (need_timeslice(engine, last, rb) &&
                           timeslice_expired(execlists, last)) {
                        if (i915_request_completed(last)) {
                                tasklet_hi_schedule(&execlists->tasklet);
@@ -2188,7 +2215,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                                 * Even if ELSP[1] is occupied and not worthy
                                 * of timeslices, our queue might be.
                                 */
-                               start_timeslice(engine);
+                               start_timeslice(engine, queue_prio(execlists));
                                return;
                        }
                }
@@ -2223,7 +2250,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 
                        if (last && !can_merge_rq(last, rq)) {
                                spin_unlock(&ve->base.active.lock);
-                               start_timeslice(engine);
+                               start_timeslice(engine, rq_prio(rq));
                                return; /* leave this for another sibling */
                        }
 
@@ -4739,6 +4766,14 @@ static int gen12_emit_flush(struct i915_request *request, u32 mode)
        return 0;
 }
 
+static void assert_request_valid(struct i915_request *rq)
+{
+       struct intel_ring *ring __maybe_unused = rq->ring;
+
+       /* Can we unwind this request without appearing to go forwards? */
+       GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0);
+}
+
 /*
  * Reserve space for 2 NOOPs at the end of each request to be
  * used as a workaround for not being allowed to do lite
@@ -4751,6 +4786,9 @@ static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
        *cs++ = MI_NOOP;
        request->wa_tail = intel_ring_offset(request, cs);
 
+       /* Check that entire request is less than half the ring */
+       assert_request_valid(request);
+
        return cs;
 }
 
index 8cda1b7..bdb3241 100644 (file)
@@ -315,3 +315,7 @@ int intel_ring_cacheline_align(struct i915_request *rq)
        GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
        return 0;
 }
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_ring.c"
+#endif
index 90a2b9e..85d2bef 100644 (file)
@@ -179,6 +179,12 @@ wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
 }
 
 static void
+wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr)
+{
+       wa_write_masked_or(wal, reg, clr, 0);
+}
+
+static void
 wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
 {
        wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val);
@@ -687,6 +693,227 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq)
 }
 
 static void
+gen4_gt_workarounds_init(struct drm_i915_private *i915,
+                        struct i915_wa_list *wal)
+{
+       /* WaDisable_RenderCache_OperationalFlush:gen4,ilk */
+       wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
+}
+
+static void
+g4x_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+{
+       gen4_gt_workarounds_init(i915, wal);
+
+       /* WaDisableRenderCachePipelinedFlush:g4x,ilk */
+       wa_masked_en(wal, CACHE_MODE_0, CM0_PIPELINED_RENDER_FLUSH_DISABLE);
+}
+
+static void
+ilk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+{
+       g4x_gt_workarounds_init(i915, wal);
+
+       wa_masked_en(wal, _3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED);
+}
+
+static void
+snb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+{
+       /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
+       wa_masked_en(wal,
+                    _3D_CHICKEN,
+                    _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
+
+       /* WaDisable_RenderCache_OperationalFlush:snb */
+       wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
+
+       /*
+        * BSpec recommends 8x4 when MSAA is used,
+        * however in practice 16x4 seems fastest.
+        *
+        * Note that PS/WM thread counts depend on the WIZ hashing
+        * disable bit, which we don't touch here, but it's good
+        * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+        */
+       wa_add(wal,
+              GEN6_GT_MODE, 0,
+              _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
+              GEN6_WIZ_HASHING_16x4);
+
+       wa_masked_dis(wal, CACHE_MODE_0, CM0_STC_EVICT_DISABLE_LRA_SNB);
+
+       wa_masked_en(wal,
+                    _3D_CHICKEN3,
+                    /* WaStripsFansDisableFastClipPerformanceFix:snb */
+                    _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
+                    /*
+                     * Bspec says:
+                     * "This bit must be set if 3DSTATE_CLIP clip mode is set
+                     * to normal and 3DSTATE_SF number of SF output attributes
+                     * is more than 16."
+                     */
+                  _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
+}
+
+static void
+ivb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+{
+       /* WaDisableEarlyCull:ivb */
+       wa_masked_en(wal, _3D_CHICKEN3, _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
+
+       /* WaDisablePSDDualDispatchEnable:ivb */
+       if (IS_IVB_GT1(i915))
+               wa_masked_en(wal,
+                            GEN7_HALF_SLICE_CHICKEN1,
+                            GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
+
+       /* WaDisable_RenderCache_OperationalFlush:ivb */
+       wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
+
+       /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
+       wa_masked_dis(wal,
+                     GEN7_COMMON_SLICE_CHICKEN1,
+                     GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+       /* WaApplyL3ControlAndL3ChickenMode:ivb */
+       wa_write(wal, GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
+       wa_write(wal, GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
+
+       /* WaForceL3Serialization:ivb */
+       wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
+       /*
+        * WaVSThreadDispatchOverride:ivb,vlv
+        *
+        * This actually overrides the dispatch
+        * mode for all thread types.
+        */
+       wa_write_masked_or(wal, GEN7_FF_THREAD_MODE,
+                          GEN7_FF_SCHED_MASK,
+                          GEN7_FF_TS_SCHED_HW |
+                          GEN7_FF_VS_SCHED_HW |
+                          GEN7_FF_DS_SCHED_HW);
+
+       if (0) { /* causes HiZ corruption on ivb:gt1 */
+               /* enable HiZ Raw Stall Optimization */
+               wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
+       }
+
+       /* WaDisable4x2SubspanOptimization:ivb */
+       wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+
+       /*
+        * BSpec recommends 8x4 when MSAA is used,
+        * however in practice 16x4 seems fastest.
+        *
+        * Note that PS/WM thread counts depend on the WIZ hashing
+        * disable bit, which we don't touch here, but it's good
+        * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+        */
+       wa_add(wal, GEN7_GT_MODE, 0,
+              _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
+              GEN6_WIZ_HASHING_16x4);
+}
+
+static void
+vlv_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+{
+       /* WaDisableEarlyCull:vlv */
+       wa_masked_en(wal, _3D_CHICKEN3, _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
+
+       /* WaPsdDispatchEnable:vlv */
+       /* WaDisablePSDDualDispatchEnable:vlv */
+       wa_masked_en(wal,
+                    GEN7_HALF_SLICE_CHICKEN1,
+                    GEN7_MAX_PS_THREAD_DEP |
+                    GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
+
+       /* WaDisable_RenderCache_OperationalFlush:vlv */
+       wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
+
+       /* WaForceL3Serialization:vlv */
+       wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
+       /*
+        * WaVSThreadDispatchOverride:ivb,vlv
+        *
+        * This actually overrides the dispatch
+        * mode for all thread types.
+        */
+       wa_write_masked_or(wal,
+                          GEN7_FF_THREAD_MODE,
+                          GEN7_FF_SCHED_MASK,
+                          GEN7_FF_TS_SCHED_HW |
+                          GEN7_FF_VS_SCHED_HW |
+                          GEN7_FF_DS_SCHED_HW);
+
+       /*
+        * BSpec says this must be set, even though
+        * WaDisable4x2SubspanOptimization isn't listed for VLV.
+        */
+       wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+
+       /*
+        * BSpec recommends 8x4 when MSAA is used,
+        * however in practice 16x4 seems fastest.
+        *
+        * Note that PS/WM thread counts depend on the WIZ hashing
+        * disable bit, which we don't touch here, but it's good
+        * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+        */
+       wa_add(wal, GEN7_GT_MODE, 0,
+              _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
+              GEN6_WIZ_HASHING_16x4);
+
+       /*
+        * WaIncreaseL3CreditsForVLVB0:vlv
+        * This is the hardware default actually.
+        */
+       wa_write(wal, GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
+}
+
+static void
+hsw_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+{
+       /* L3 caching of data atomics doesn't work -- disable it. */
+       wa_write(wal, HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
+
+       wa_add(wal,
+              HSW_ROW_CHICKEN3, 0,
+              _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
+               0 /* XXX does this reg exist? */);
+
+       /* WaVSRefCountFullforceMissDisable:hsw */
+       wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME);
+
+       wa_masked_dis(wal,
+                     CACHE_MODE_0_GEN7,
+                     /* WaDisable_RenderCache_OperationalFlush:hsw */
+                     RC_OP_FLUSH_ENABLE |
+                     /* enable HiZ Raw Stall Optimization */
+                     HIZ_RAW_STALL_OPT_DISABLE);
+
+       /* WaDisable4x2SubspanOptimization:hsw */
+       wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+
+       /*
+        * BSpec recommends 8x4 when MSAA is used,
+        * however in practice 16x4 seems fastest.
+        *
+        * Note that PS/WM thread counts depend on the WIZ hashing
+        * disable bit, which we don't touch here, but it's good
+        * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+        */
+       wa_add(wal, GEN7_GT_MODE, 0,
+              _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
+              GEN6_WIZ_HASHING_16x4);
+
+       /* WaSampleCChickenBitEnable:hsw */
+       wa_masked_en(wal, HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
+}
+
+static void
 gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
        /* WaDisableKillLogic:bxt,skl,kbl */
@@ -963,6 +1190,20 @@ gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
                bxt_gt_workarounds_init(i915, wal);
        else if (IS_SKYLAKE(i915))
                skl_gt_workarounds_init(i915, wal);
+       else if (IS_HASWELL(i915))
+               hsw_gt_workarounds_init(i915, wal);
+       else if (IS_VALLEYVIEW(i915))
+               vlv_gt_workarounds_init(i915, wal);
+       else if (IS_IVYBRIDGE(i915))
+               ivb_gt_workarounds_init(i915, wal);
+       else if (IS_GEN(i915, 6))
+               snb_gt_workarounds_init(i915, wal);
+       else if (IS_GEN(i915, 5))
+               ilk_gt_workarounds_init(i915, wal);
+       else if (IS_G4X(i915))
+               g4x_gt_workarounds_init(i915, wal);
+       else if (IS_GEN(i915, 4))
+               gen4_gt_workarounds_init(i915, wal);
        else if (INTEL_GEN(i915) <= 8)
                return;
        else
index 2b2efff..4aa4cc9 100644 (file)
@@ -310,22 +310,20 @@ static bool wait_until_running(struct hang *h, struct i915_request *rq)
                          1000));
 }
 
-static void engine_heartbeat_disable(struct intel_engine_cs *engine,
-                                    unsigned long *saved)
+static void engine_heartbeat_disable(struct intel_engine_cs *engine)
 {
-       *saved = engine->props.heartbeat_interval_ms;
        engine->props.heartbeat_interval_ms = 0;
 
        intel_engine_pm_get(engine);
        intel_engine_park_heartbeat(engine);
 }
 
-static void engine_heartbeat_enable(struct intel_engine_cs *engine,
-                                   unsigned long saved)
+static void engine_heartbeat_enable(struct intel_engine_cs *engine)
 {
        intel_engine_pm_put(engine);
 
-       engine->props.heartbeat_interval_ms = saved;
+       engine->props.heartbeat_interval_ms =
+               engine->defaults.heartbeat_interval_ms;
 }
 
 static int igt_hang_sanitycheck(void *arg)
@@ -473,7 +471,6 @@ static int igt_reset_nop_engine(void *arg)
        for_each_engine(engine, gt, id) {
                unsigned int reset_count, reset_engine_count, count;
                struct intel_context *ce;
-               unsigned long heartbeat;
                IGT_TIMEOUT(end_time);
                int err;
 
@@ -485,7 +482,7 @@ static int igt_reset_nop_engine(void *arg)
                reset_engine_count = i915_reset_engine_count(global, engine);
                count = 0;
 
-               engine_heartbeat_disable(engine, &heartbeat);
+               engine_heartbeat_disable(engine);
                set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
                do {
                        int i;
@@ -529,7 +526,7 @@ static int igt_reset_nop_engine(void *arg)
                        }
                } while (time_before(jiffies, end_time));
                clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
-               engine_heartbeat_enable(engine, heartbeat);
+               engine_heartbeat_enable(engine);
 
                pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
 
@@ -564,7 +561,6 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
 
        for_each_engine(engine, gt, id) {
                unsigned int reset_count, reset_engine_count;
-               unsigned long heartbeat;
                IGT_TIMEOUT(end_time);
 
                if (active && !intel_engine_can_store_dword(engine))
@@ -580,7 +576,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
                reset_count = i915_reset_count(global);
                reset_engine_count = i915_reset_engine_count(global, engine);
 
-               engine_heartbeat_disable(engine, &heartbeat);
+               engine_heartbeat_disable(engine);
                set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
                do {
                        if (active) {
@@ -632,7 +628,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
                        }
                } while (time_before(jiffies, end_time));
                clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
-               engine_heartbeat_enable(engine, heartbeat);
+               engine_heartbeat_enable(engine);
 
                if (err)
                        break;
@@ -789,7 +785,6 @@ static int __igt_reset_engines(struct intel_gt *gt,
                struct active_engine threads[I915_NUM_ENGINES] = {};
                unsigned long device = i915_reset_count(global);
                unsigned long count = 0, reported;
-               unsigned long heartbeat;
                IGT_TIMEOUT(end_time);
 
                if (flags & TEST_ACTIVE &&
@@ -832,7 +827,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
 
                yield(); /* start all threads before we begin */
 
-               engine_heartbeat_disable(engine, &heartbeat);
+               engine_heartbeat_disable(engine);
                set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
                do {
                        struct i915_request *rq = NULL;
@@ -906,7 +901,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
                        }
                } while (time_before(jiffies, end_time));
                clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
-               engine_heartbeat_enable(engine, heartbeat);
+               engine_heartbeat_enable(engine);
 
                pr_info("i915_reset_engine(%s:%s): %lu resets\n",
                        engine->name, test_name, count);
index 824f99c..924bc01 100644 (file)
@@ -51,22 +51,20 @@ static struct i915_vma *create_scratch(struct intel_gt *gt)
        return vma;
 }
 
-static void engine_heartbeat_disable(struct intel_engine_cs *engine,
-                                    unsigned long *saved)
+static void engine_heartbeat_disable(struct intel_engine_cs *engine)
 {
-       *saved = engine->props.heartbeat_interval_ms;
        engine->props.heartbeat_interval_ms = 0;
 
        intel_engine_pm_get(engine);
        intel_engine_park_heartbeat(engine);
 }
 
-static void engine_heartbeat_enable(struct intel_engine_cs *engine,
-                                   unsigned long saved)
+static void engine_heartbeat_enable(struct intel_engine_cs *engine)
 {
        intel_engine_pm_put(engine);
 
-       engine->props.heartbeat_interval_ms = saved;
+       engine->props.heartbeat_interval_ms =
+               engine->defaults.heartbeat_interval_ms;
 }
 
 static bool is_active(struct i915_request *rq)
@@ -224,7 +222,6 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
                struct intel_context *ce[2] = {};
                struct i915_request *rq[2];
                struct igt_live_test t;
-               unsigned long saved;
                int n;
 
                if (prio && !intel_engine_has_preemption(engine))
@@ -237,7 +234,7 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
                        err = -EIO;
                        break;
                }
-               engine_heartbeat_disable(engine, &saved);
+               engine_heartbeat_disable(engine);
 
                for (n = 0; n < ARRAY_SIZE(ce); n++) {
                        struct intel_context *tmp;
@@ -345,7 +342,7 @@ err_ce:
                        intel_context_put(ce[n]);
                }
 
-               engine_heartbeat_enable(engine, saved);
+               engine_heartbeat_enable(engine);
                if (igt_live_test_end(&t))
                        err = -EIO;
                if (err)
@@ -466,7 +463,6 @@ static int live_hold_reset(void *arg)
 
        for_each_engine(engine, gt, id) {
                struct intel_context *ce;
-               unsigned long heartbeat;
                struct i915_request *rq;
 
                ce = intel_context_create(engine);
@@ -475,7 +471,7 @@ static int live_hold_reset(void *arg)
                        break;
                }
 
-               engine_heartbeat_disable(engine, &heartbeat);
+               engine_heartbeat_disable(engine);
 
                rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
                if (IS_ERR(rq)) {
@@ -535,7 +531,7 @@ static int live_hold_reset(void *arg)
                i915_request_put(rq);
 
 out:
-               engine_heartbeat_enable(engine, heartbeat);
+               engine_heartbeat_enable(engine);
                intel_context_put(ce);
                if (err)
                        break;
@@ -580,10 +576,9 @@ static int live_error_interrupt(void *arg)
 
        for_each_engine(engine, gt, id) {
                const struct error_phase *p;
-               unsigned long heartbeat;
                int err = 0;
 
-               engine_heartbeat_disable(engine, &heartbeat);
+               engine_heartbeat_disable(engine);
 
                for (p = phases; p->error[0] != GOOD; p++) {
                        struct i915_request *client[ARRAY_SIZE(phases->error)];
@@ -682,7 +677,7 @@ out:
                        }
                }
 
-               engine_heartbeat_enable(engine, heartbeat);
+               engine_heartbeat_enable(engine);
                if (err) {
                        intel_gt_set_wedged(gt);
                        return err;
@@ -828,7 +823,7 @@ slice_semaphore_queue(struct intel_engine_cs *outer,
                }
        }
 
-       err = release_queue(outer, vma, n, INT_MAX);
+       err = release_queue(outer, vma, n, I915_PRIORITY_BARRIER);
        if (err)
                goto out;
 
@@ -895,16 +890,14 @@ static int live_timeslice_preempt(void *arg)
                enum intel_engine_id id;
 
                for_each_engine(engine, gt, id) {
-                       unsigned long saved;
-
                        if (!intel_engine_has_preemption(engine))
                                continue;
 
                        memset(vaddr, 0, PAGE_SIZE);
 
-                       engine_heartbeat_disable(engine, &saved);
+                       engine_heartbeat_disable(engine);
                        err = slice_semaphore_queue(engine, vma, count);
-                       engine_heartbeat_enable(engine, saved);
+                       engine_heartbeat_enable(engine);
                        if (err)
                                goto err_pin;
 
@@ -1009,7 +1002,6 @@ static int live_timeslice_rewind(void *arg)
                enum { X = 1, Z, Y };
                struct i915_request *rq[3] = {};
                struct intel_context *ce;
-               unsigned long heartbeat;
                unsigned long timeslice;
                int i, err = 0;
                u32 *slot;
@@ -1028,7 +1020,7 @@ static int live_timeslice_rewind(void *arg)
                 * Expect execution/evaluation order XZY
                 */
 
-               engine_heartbeat_disable(engine, &heartbeat);
+               engine_heartbeat_disable(engine);
                timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
 
                slot = memset32(engine->status_page.addr + 1000, 0, 4);
@@ -1122,7 +1114,7 @@ err:
                wmb();
 
                engine->props.timeslice_duration_ms = timeslice;
-               engine_heartbeat_enable(engine, heartbeat);
+               engine_heartbeat_enable(engine);
                for (i = 0; i < 3; i++)
                        i915_request_put(rq[i]);
                if (igt_flush_test(gt->i915))
@@ -1202,12 +1194,11 @@ static int live_timeslice_queue(void *arg)
                        .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
                };
                struct i915_request *rq, *nop;
-               unsigned long saved;
 
                if (!intel_engine_has_preemption(engine))
                        continue;
 
-               engine_heartbeat_disable(engine, &saved);
+               engine_heartbeat_disable(engine);
                memset(vaddr, 0, PAGE_SIZE);
 
                /* ELSP[0]: semaphore wait */
@@ -1284,7 +1275,7 @@ static int live_timeslice_queue(void *arg)
 err_rq:
                i915_request_put(rq);
 err_heartbeat:
-               engine_heartbeat_enable(engine, saved);
+               engine_heartbeat_enable(engine);
                if (err)
                        break;
        }
@@ -1298,6 +1289,121 @@ err_obj:
        return err;
 }
 
+static int live_timeslice_nopreempt(void *arg)
+{
+       struct intel_gt *gt = arg;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       struct igt_spinner spin;
+       int err = 0;
+
+       /*
+        * We should not timeslice into a request that is marked with
+        * I915_REQUEST_NOPREEMPT.
+        */
+       if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+               return 0;
+
+       if (igt_spinner_init(&spin, gt))
+               return -ENOMEM;
+
+       for_each_engine(engine, gt, id) {
+               struct intel_context *ce;
+               struct i915_request *rq;
+               unsigned long timeslice;
+
+               if (!intel_engine_has_preemption(engine))
+                       continue;
+
+               ce = intel_context_create(engine);
+               if (IS_ERR(ce)) {
+                       err = PTR_ERR(ce);
+                       break;
+               }
+
+               engine_heartbeat_disable(engine);
+               timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
+
+               /* Create an unpreemptible spinner */
+
+               rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+               intel_context_put(ce);
+               if (IS_ERR(rq)) {
+                       err = PTR_ERR(rq);
+                       goto out_heartbeat;
+               }
+
+               i915_request_get(rq);
+               i915_request_add(rq);
+
+               if (!igt_wait_for_spinner(&spin, rq)) {
+                       i915_request_put(rq);
+                       err = -ETIME;
+                       goto out_spin;
+               }
+
+               set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags);
+               i915_request_put(rq);
+
+               /* Followed by a maximum priority barrier (heartbeat) */
+
+               ce = intel_context_create(engine);
+               if (IS_ERR(ce)) {
+                       err = PTR_ERR(rq);
+                       goto out_spin;
+               }
+
+               rq = intel_context_create_request(ce);
+               intel_context_put(ce);
+               if (IS_ERR(rq)) {
+                       err = PTR_ERR(rq);
+                       goto out_spin;
+               }
+
+               rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+               i915_request_get(rq);
+               i915_request_add(rq);
+
+               /*
+                * Wait until the barrier is in ELSP, and we know timeslicing
+                * will have been activated.
+                */
+               if (wait_for_submit(engine, rq, HZ / 2)) {
+                       i915_request_put(rq);
+                       err = -ETIME;
+                       goto out_spin;
+               }
+
+               /*
+                * Since the ELSP[0] request is unpreemptible, it should not
+                * allow the maximum priority barrier through. Wait long
+                * enough to see if it is timesliced in by mistake.
+                */
+               if (i915_request_wait(rq, 0, timeslice_threshold(engine)) >= 0) {
+                       pr_err("%s: I915_PRIORITY_BARRIER request completed, bypassing no-preempt request\n",
+                              engine->name);
+                       err = -EINVAL;
+               }
+               i915_request_put(rq);
+
+out_spin:
+               igt_spinner_end(&spin);
+out_heartbeat:
+               xchg(&engine->props.timeslice_duration_ms, timeslice);
+               engine_heartbeat_enable(engine);
+               if (err)
+                       break;
+
+               if (igt_flush_test(gt->i915)) {
+                       err = -EIO;
+                       break;
+               }
+       }
+
+       igt_spinner_fini(&spin);
+       return err;
+}
+
 static int live_busywait_preempt(void *arg)
 {
        struct intel_gt *gt = arg;
@@ -4153,7 +4259,6 @@ static int reset_virtual_engine(struct intel_gt *gt,
 {
        struct intel_engine_cs *engine;
        struct intel_context *ve;
-       unsigned long *heartbeat;
        struct igt_spinner spin;
        struct i915_request *rq;
        unsigned int n;
@@ -4165,15 +4270,9 @@ static int reset_virtual_engine(struct intel_gt *gt,
         * descendents are not executed while the capture is in progress.
         */
 
-       heartbeat = kmalloc_array(nsibling, sizeof(*heartbeat), GFP_KERNEL);
-       if (!heartbeat)
+       if (igt_spinner_init(&spin, gt))
                return -ENOMEM;
 
-       if (igt_spinner_init(&spin, gt)) {
-               err = -ENOMEM;
-               goto out_free;
-       }
-
        ve = intel_execlists_create_virtual(siblings, nsibling);
        if (IS_ERR(ve)) {
                err = PTR_ERR(ve);
@@ -4181,7 +4280,7 @@ static int reset_virtual_engine(struct intel_gt *gt,
        }
 
        for (n = 0; n < nsibling; n++)
-               engine_heartbeat_disable(siblings[n], &heartbeat[n]);
+               engine_heartbeat_disable(siblings[n]);
 
        rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
        if (IS_ERR(rq)) {
@@ -4252,13 +4351,11 @@ out_rq:
        i915_request_put(rq);
 out_heartbeat:
        for (n = 0; n < nsibling; n++)
-               engine_heartbeat_enable(siblings[n], heartbeat[n]);
+               engine_heartbeat_enable(siblings[n]);
 
        intel_context_put(ve);
 out_spin:
        igt_spinner_fini(&spin);
-out_free:
-       kfree(heartbeat);
        return err;
 }
 
@@ -4314,6 +4411,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
                SUBTEST(live_timeslice_preempt),
                SUBTEST(live_timeslice_rewind),
                SUBTEST(live_timeslice_queue),
+               SUBTEST(live_timeslice_nopreempt),
                SUBTEST(live_busywait_preempt),
                SUBTEST(live_preempt),
                SUBTEST(live_late_preempt),
@@ -4932,9 +5030,7 @@ static int live_lrc_gpr(void *arg)
                return PTR_ERR(scratch);
 
        for_each_engine(engine, gt, id) {
-               unsigned long heartbeat;
-
-               engine_heartbeat_disable(engine, &heartbeat);
+               engine_heartbeat_disable(engine);
 
                err = __live_lrc_gpr(engine, scratch, false);
                if (err)
@@ -4945,7 +5041,7 @@ static int live_lrc_gpr(void *arg)
                        goto err;
 
 err:
-               engine_heartbeat_enable(engine, heartbeat);
+               engine_heartbeat_enable(engine);
                if (igt_flush_test(gt->i915))
                        err = -EIO;
                if (err)
@@ -5092,10 +5188,9 @@ static int live_lrc_timestamp(void *arg)
         */
 
        for_each_engine(data.engine, gt, id) {
-               unsigned long heartbeat;
                int i, err = 0;
 
-               engine_heartbeat_disable(data.engine, &heartbeat);
+               engine_heartbeat_disable(data.engine);
 
                for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
                        struct intel_context *tmp;
@@ -5128,7 +5223,7 @@ static int live_lrc_timestamp(void *arg)
                }
 
 err:
-               engine_heartbeat_enable(data.engine, heartbeat);
+               engine_heartbeat_enable(data.engine);
                for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
                        if (!data.ce[i])
                                break;
index 8831ffe..63f87d8 100644 (file)
@@ -18,6 +18,20 @@ struct live_mocs {
        void *vaddr;
 };
 
+static struct intel_context *mocs_context_create(struct intel_engine_cs *engine)
+{
+       struct intel_context *ce;
+
+       ce = intel_context_create(engine);
+       if (IS_ERR(ce))
+               return ce;
+
+       /* We build large requests to read the registers from the ring */
+       ce->ring = __intel_context_ring_size(SZ_16K);
+
+       return ce;
+}
+
 static int request_add_sync(struct i915_request *rq, int err)
 {
        i915_request_get(rq);
@@ -301,7 +315,7 @@ static int live_mocs_clean(void *arg)
        for_each_engine(engine, gt, id) {
                struct intel_context *ce;
 
-               ce = intel_context_create(engine);
+               ce = mocs_context_create(engine);
                if (IS_ERR(ce)) {
                        err = PTR_ERR(ce);
                        break;
@@ -395,7 +409,7 @@ static int live_mocs_reset(void *arg)
        for_each_engine(engine, gt, id) {
                struct intel_context *ce;
 
-               ce = intel_context_create(engine);
+               ce = mocs_context_create(engine);
                if (IS_ERR(ce)) {
                        err = PTR_ERR(ce);
                        break;
diff --git a/drivers/gpu/drm/i915/gt/selftest_ring.c b/drivers/gpu/drm/i915/gt/selftest_ring.c
new file mode 100644 (file)
index 0000000..2a8c534
--- /dev/null
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright Â© 2020 Intel Corporation
+ */
+
+static struct intel_ring *mock_ring(unsigned long sz)
+{
+       struct intel_ring *ring;
+
+       ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
+       if (!ring)
+               return NULL;
+
+       kref_init(&ring->ref);
+       ring->size = sz;
+       ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(sz);
+       ring->effective_size = sz;
+       ring->vaddr = (void *)(ring + 1);
+       atomic_set(&ring->pin_count, 1);
+
+       intel_ring_update_space(ring);
+
+       return ring;
+}
+
+static void mock_ring_free(struct intel_ring *ring)
+{
+       kfree(ring);
+}
+
+static int check_ring_direction(struct intel_ring *ring,
+                               u32 next, u32 prev,
+                               int expected)
+{
+       int result;
+
+       result = intel_ring_direction(ring, next, prev);
+       if (result < 0)
+               result = -1;
+       else if (result > 0)
+               result = 1;
+
+       if (result != expected) {
+               pr_err("intel_ring_direction(%u, %u):%d != %d\n",
+                      next, prev, result, expected);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int check_ring_step(struct intel_ring *ring, u32 x, u32 step)
+{
+       u32 prev = x, next = intel_ring_wrap(ring, x + step);
+       int err = 0;
+
+       err |= check_ring_direction(ring, next, next,  0);
+       err |= check_ring_direction(ring, prev, prev,  0);
+       err |= check_ring_direction(ring, next, prev,  1);
+       err |= check_ring_direction(ring, prev, next, -1);
+
+       return err;
+}
+
+static int check_ring_offset(struct intel_ring *ring, u32 x, u32 step)
+{
+       int err = 0;
+
+       err |= check_ring_step(ring, x, step);
+       err |= check_ring_step(ring, intel_ring_wrap(ring, x + 1), step);
+       err |= check_ring_step(ring, intel_ring_wrap(ring, x - 1), step);
+
+       return err;
+}
+
+static int igt_ring_direction(void *dummy)
+{
+       struct intel_ring *ring;
+       unsigned int half = 2048;
+       int step, err = 0;
+
+       ring = mock_ring(2 * half);
+       if (!ring)
+               return -ENOMEM;
+
+       GEM_BUG_ON(ring->size != 2 * half);
+
+       /* Precision of wrap detection is limited to ring->size / 2 */
+       for (step = 1; step < half; step <<= 1) {
+               err |= check_ring_offset(ring, 0, step);
+               err |= check_ring_offset(ring, half, step);
+       }
+       err |= check_ring_step(ring, 0, half - 64);
+
+       /* And check unwrapped handling for good measure */
+       err |= check_ring_offset(ring, 0, 2 * half + 64);
+       err |= check_ring_offset(ring, 3 * half, 1);
+
+       mock_ring_free(ring);
+       return err;
+}
+
+int intel_ring_mock_selftests(void)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(igt_ring_direction),
+       };
+
+       return i915_subtests(tests, NULL);
+}
index 6275d69..5049c3d 100644 (file)
 /* Try to isolate the impact of cstates from determing frequency response */
 #define CPU_LATENCY 0 /* -1 to disable pm_qos, 0 to disable cstates */
 
-static unsigned long engine_heartbeat_disable(struct intel_engine_cs *engine)
+static void engine_heartbeat_disable(struct intel_engine_cs *engine)
 {
-       unsigned long old;
-
-       old = fetch_and_zero(&engine->props.heartbeat_interval_ms);
+       engine->props.heartbeat_interval_ms = 0;
 
        intel_engine_pm_get(engine);
        intel_engine_park_heartbeat(engine);
-
-       return old;
 }
 
-static void engine_heartbeat_enable(struct intel_engine_cs *engine,
-                                   unsigned long saved)
+static void engine_heartbeat_enable(struct intel_engine_cs *engine)
 {
        intel_engine_pm_put(engine);
 
-       engine->props.heartbeat_interval_ms = saved;
+       engine->props.heartbeat_interval_ms =
+               engine->defaults.heartbeat_interval_ms;
 }
 
 static void dummy_rps_work(struct work_struct *wrk)
@@ -246,7 +242,6 @@ int live_rps_clock_interval(void *arg)
        intel_gt_check_clock_frequency(gt);
 
        for_each_engine(engine, gt, id) {
-               unsigned long saved_heartbeat;
                struct i915_request *rq;
                u32 cycles;
                u64 dt;
@@ -254,13 +249,13 @@ int live_rps_clock_interval(void *arg)
                if (!intel_engine_can_store_dword(engine))
                        continue;
 
-               saved_heartbeat = engine_heartbeat_disable(engine);
+               engine_heartbeat_disable(engine);
 
                rq = igt_spinner_create_request(&spin,
                                                engine->kernel_context,
                                                MI_NOOP);
                if (IS_ERR(rq)) {
-                       engine_heartbeat_enable(engine, saved_heartbeat);
+                       engine_heartbeat_enable(engine);
                        err = PTR_ERR(rq);
                        break;
                }
@@ -271,7 +266,7 @@ int live_rps_clock_interval(void *arg)
                        pr_err("%s: RPS spinner did not start\n",
                               engine->name);
                        igt_spinner_end(&spin);
-                       engine_heartbeat_enable(engine, saved_heartbeat);
+                       engine_heartbeat_enable(engine);
                        intel_gt_set_wedged(engine->gt);
                        err = -EIO;
                        break;
@@ -327,7 +322,7 @@ int live_rps_clock_interval(void *arg)
                intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
 
                igt_spinner_end(&spin);
-               engine_heartbeat_enable(engine, saved_heartbeat);
+               engine_heartbeat_enable(engine);
 
                if (err == 0) {
                        u64 time = intel_gt_pm_interval_to_ns(gt, cycles);
@@ -405,7 +400,6 @@ int live_rps_control(void *arg)
 
        intel_gt_pm_get(gt);
        for_each_engine(engine, gt, id) {
-               unsigned long saved_heartbeat;
                struct i915_request *rq;
                ktime_t min_dt, max_dt;
                int f, limit;
@@ -414,7 +408,7 @@ int live_rps_control(void *arg)
                if (!intel_engine_can_store_dword(engine))
                        continue;
 
-               saved_heartbeat = engine_heartbeat_disable(engine);
+               engine_heartbeat_disable(engine);
 
                rq = igt_spinner_create_request(&spin,
                                                engine->kernel_context,
@@ -430,7 +424,7 @@ int live_rps_control(void *arg)
                        pr_err("%s: RPS spinner did not start\n",
                               engine->name);
                        igt_spinner_end(&spin);
-                       engine_heartbeat_enable(engine, saved_heartbeat);
+                       engine_heartbeat_enable(engine);
                        intel_gt_set_wedged(engine->gt);
                        err = -EIO;
                        break;
@@ -440,7 +434,7 @@ int live_rps_control(void *arg)
                        pr_err("%s: could not set minimum frequency [%x], only %x!\n",
                               engine->name, rps->min_freq, read_cagf(rps));
                        igt_spinner_end(&spin);
-                       engine_heartbeat_enable(engine, saved_heartbeat);
+                       engine_heartbeat_enable(engine);
                        show_pstate_limits(rps);
                        err = -EINVAL;
                        break;
@@ -457,7 +451,7 @@ int live_rps_control(void *arg)
                        pr_err("%s: could not restore minimum frequency [%x], only %x!\n",
                               engine->name, rps->min_freq, read_cagf(rps));
                        igt_spinner_end(&spin);
-                       engine_heartbeat_enable(engine, saved_heartbeat);
+                       engine_heartbeat_enable(engine);
                        show_pstate_limits(rps);
                        err = -EINVAL;
                        break;
@@ -472,7 +466,7 @@ int live_rps_control(void *arg)
                min_dt = ktime_sub(ktime_get(), min_dt);
 
                igt_spinner_end(&spin);
-               engine_heartbeat_enable(engine, saved_heartbeat);
+               engine_heartbeat_enable(engine);
 
                pr_info("%s: range:[%x:%uMHz, %x:%uMHz] limit:[%x:%uMHz], %x:%x response %lluns:%lluns\n",
                        engine->name,
@@ -635,7 +629,6 @@ int live_rps_frequency_cs(void *arg)
        rps->work.func = dummy_rps_work;
 
        for_each_engine(engine, gt, id) {
-               unsigned long saved_heartbeat;
                struct i915_request *rq;
                struct i915_vma *vma;
                u32 *cancel, *cntr;
@@ -644,14 +637,14 @@ int live_rps_frequency_cs(void *arg)
                        int freq;
                } min, max;
 
-               saved_heartbeat = engine_heartbeat_disable(engine);
+               engine_heartbeat_disable(engine);
 
                vma = create_spin_counter(engine,
                                          engine->kernel_context->vm, false,
                                          &cancel, &cntr);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
-                       engine_heartbeat_enable(engine, saved_heartbeat);
+                       engine_heartbeat_enable(engine);
                        break;
                }
 
@@ -732,7 +725,7 @@ err_vma:
                i915_vma_unpin(vma);
                i915_vma_put(vma);
 
-               engine_heartbeat_enable(engine, saved_heartbeat);
+               engine_heartbeat_enable(engine);
                if (igt_flush_test(gt->i915))
                        err = -EIO;
                if (err)
@@ -778,7 +771,6 @@ int live_rps_frequency_srm(void *arg)
        rps->work.func = dummy_rps_work;
 
        for_each_engine(engine, gt, id) {
-               unsigned long saved_heartbeat;
                struct i915_request *rq;
                struct i915_vma *vma;
                u32 *cancel, *cntr;
@@ -787,14 +779,14 @@ int live_rps_frequency_srm(void *arg)
                        int freq;
                } min, max;
 
-               saved_heartbeat = engine_heartbeat_disable(engine);
+               engine_heartbeat_disable(engine);
 
                vma = create_spin_counter(engine,
                                          engine->kernel_context->vm, true,
                                          &cancel, &cntr);
                if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
-                       engine_heartbeat_enable(engine, saved_heartbeat);
+                       engine_heartbeat_enable(engine);
                        break;
                }
 
@@ -874,7 +866,7 @@ err_vma:
                i915_vma_unpin(vma);
                i915_vma_put(vma);
 
-               engine_heartbeat_enable(engine, saved_heartbeat);
+               engine_heartbeat_enable(engine);
                if (igt_flush_test(gt->i915))
                        err = -EIO;
                if (err)
@@ -1066,16 +1058,14 @@ int live_rps_interrupt(void *arg)
        for_each_engine(engine, gt, id) {
                /* Keep the engine busy with a spinner; expect an UP! */
                if (pm_events & GEN6_PM_RP_UP_THRESHOLD) {
-                       unsigned long saved_heartbeat;
-
                        intel_gt_pm_wait_for_idle(engine->gt);
                        GEM_BUG_ON(intel_rps_is_active(rps));
 
-                       saved_heartbeat = engine_heartbeat_disable(engine);
+                       engine_heartbeat_disable(engine);
 
                        err = __rps_up_interrupt(rps, engine, &spin);
 
-                       engine_heartbeat_enable(engine, saved_heartbeat);
+                       engine_heartbeat_enable(engine);
                        if (err)
                                goto out;
 
@@ -1084,15 +1074,13 @@ int live_rps_interrupt(void *arg)
 
                /* Keep the engine awake but idle and check for DOWN */
                if (pm_events & GEN6_PM_RP_DOWN_THRESHOLD) {
-                       unsigned long saved_heartbeat;
-
-                       saved_heartbeat = engine_heartbeat_disable(engine);
+                       engine_heartbeat_disable(engine);
                        intel_rc6_disable(&gt->rc6);
 
                        err = __rps_down_interrupt(rps, engine);
 
                        intel_rc6_enable(&gt->rc6);
-                       engine_heartbeat_enable(engine, saved_heartbeat);
+                       engine_heartbeat_enable(engine);
                        if (err)
                                goto out;
                }
@@ -1168,7 +1156,6 @@ int live_rps_power(void *arg)
        rps->work.func = dummy_rps_work;
 
        for_each_engine(engine, gt, id) {
-               unsigned long saved_heartbeat;
                struct i915_request *rq;
                struct {
                        u64 power;
@@ -1178,13 +1165,13 @@ int live_rps_power(void *arg)
                if (!intel_engine_can_store_dword(engine))
                        continue;
 
-               saved_heartbeat = engine_heartbeat_disable(engine);
+               engine_heartbeat_disable(engine);
 
                rq = igt_spinner_create_request(&spin,
                                                engine->kernel_context,
                                                MI_NOOP);
                if (IS_ERR(rq)) {
-                       engine_heartbeat_enable(engine, saved_heartbeat);
+                       engine_heartbeat_enable(engine);
                        err = PTR_ERR(rq);
                        break;
                }
@@ -1195,7 +1182,7 @@ int live_rps_power(void *arg)
                        pr_err("%s: RPS spinner did not start\n",
                               engine->name);
                        igt_spinner_end(&spin);
-                       engine_heartbeat_enable(engine, saved_heartbeat);
+                       engine_heartbeat_enable(engine);
                        intel_gt_set_wedged(engine->gt);
                        err = -EIO;
                        break;
@@ -1208,7 +1195,7 @@ int live_rps_power(void *arg)
                min.power = measure_power_at(rps, &min.freq);
 
                igt_spinner_end(&spin);
-               engine_heartbeat_enable(engine, saved_heartbeat);
+               engine_heartbeat_enable(engine);
 
                pr_info("%s: min:%llumW @ %uMHz, max:%llumW @ %uMHz\n",
                        engine->name,
index c2578a0..ef1c350 100644 (file)
@@ -751,22 +751,20 @@ out_free:
        return err;
 }
 
-static void engine_heartbeat_disable(struct intel_engine_cs *engine,
-                                    unsigned long *saved)
+static void engine_heartbeat_disable(struct intel_engine_cs *engine)
 {
-       *saved = engine->props.heartbeat_interval_ms;
        engine->props.heartbeat_interval_ms = 0;
 
        intel_engine_pm_get(engine);
        intel_engine_park_heartbeat(engine);
 }
 
-static void engine_heartbeat_enable(struct intel_engine_cs *engine,
-                                   unsigned long saved)
+static void engine_heartbeat_enable(struct intel_engine_cs *engine)
 {
        intel_engine_pm_put(engine);
 
-       engine->props.heartbeat_interval_ms = saved;
+       engine->props.heartbeat_interval_ms =
+               engine->defaults.heartbeat_interval_ms;
 }
 
 static int live_hwsp_rollover_kernel(void *arg)
@@ -785,10 +783,9 @@ static int live_hwsp_rollover_kernel(void *arg)
                struct intel_context *ce = engine->kernel_context;
                struct intel_timeline *tl = ce->timeline;
                struct i915_request *rq[3] = {};
-               unsigned long heartbeat;
                int i;
 
-               engine_heartbeat_disable(engine, &heartbeat);
+               engine_heartbeat_disable(engine);
                if (intel_gt_wait_for_idle(gt, HZ / 2)) {
                        err = -EIO;
                        goto out;
@@ -839,7 +836,7 @@ static int live_hwsp_rollover_kernel(void *arg)
 out:
                for (i = 0; i < ARRAY_SIZE(rq); i++)
                        i915_request_put(rq[i]);
-               engine_heartbeat_enable(engine, heartbeat);
+               engine_heartbeat_enable(engine);
                if (err)
                        break;
        }
index 5ed3232..3278546 100644 (file)
@@ -623,6 +623,8 @@ err_request:
                                err = -EINVAL;
                                goto out_unpin;
                        }
+               } else {
+                       rsvd = 0;
                }
 
                expect = results[0];
diff --git a/drivers/gpu/drm/i915/gt/shaders/README b/drivers/gpu/drm/i915/gt/shaders/README
new file mode 100644 (file)
index 0000000..e7e96d7
--- /dev/null
@@ -0,0 +1,46 @@
+ASM sources for auto generated shaders
+======================================
+
+The i915/gt/hsw_clear_kernel.c and i915/gt/ivb_clear_kernel.c files contain
+pre-compiled batch chunks that will clear any residual render cache during
+context switch.
+
+They are generated from their respective platform ASM files present on
+i915/gt/shaders/clear_kernel directory.
+
+The generated .c files should never be modified directly. Instead, any modification
+needs to be done on the on their respective ASM files and build instructions below
+needes to be followed.
+
+Building
+========
+
+Environment
+-----------
+
+IGT GPU tool scripts and the Mesa's i965 instruction assembler tool are used
+on building.
+
+Please make sure your Mesa tool is compiled with "-Dtools=intel" and
+"-Ddri-drivers=i965", and run this script from IGT source root directory"
+
+The instructions bellow assume:
+    *  IGT gpu tools source code is located on your home directory (~) as ~/igt
+    *  Mesa source code is located on your home directory (~) as ~/mesa
+       and built under the ~/mesa/build directory
+    *  Linux kernel source code is under your home directory (~) as ~/linux
+
+Instructions
+------------
+
+~ $ cp ~/linux/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm \
+       ~/igt/lib/i915/shaders/clear_kernel/ivb.asm
+~ $ cd ~/igt
+igt $ ./scripts/generate_clear_kernel.sh -g ivb \
+      -m ~/mesa/build/src/intel/tools/i965_asm
+
+~ $ cp ~/linux/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm \
+    ~/igt/lib/i915/shaders/clear_kernel/hsw.asm
+~ $ cd ~/igt
+igt $ ./scripts/generate_clear_kernel.sh -g hsw \
+      -m ~/mesa/build/src/intel/tools/i965_asm
\ No newline at end of file
diff --git a/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm
new file mode 100644 (file)
index 0000000..5fdf384
--- /dev/null
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright Â© 2020 Intel Corporation
+ */
+
+/*
+ * Kernel for PAVP buffer clear.
+ *
+ *     1. Clear all 64 GRF registers assigned to the kernel with designated value;
+ *     2. Write 32x16 block of all "0" to render target buffer which indirectly clears
+ *        512 bytes of Render Cache.
+ */
+
+/* Store designated "clear GRF" value */
+mov(1)          f0.1<1>UW       g1.2<0,1,0>UW                   { align1 1N };
+
+/**
+ * Curbe Format
+ *
+ * DW 1.0 - Block Offset to write Render Cache
+ * DW 1.1 [15:0] - Clear Word
+ * DW 1.2 - Delay iterations
+ * DW 1.3 - Enable Instrumentation (only for debug)
+ * DW 1.4 - Rsvd (intended for context ID)
+ * DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
+ * DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
+ * DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
+ *
+ * Binding Table
+ *
+ * BTI 0: 2D Surface to help clear L3 (Render/Data Cache)
+ * BTI 1: Wait/Instrumentation Buffer
+ *  Size : (SliceCount * SubSliceCount  * 16 EUs/SubSlice) rows * (16 threads/EU) cols (Format R32_UINT)
+ *         Expected to be initialized to 0 by driver/another kernel
+ *  Layout:
+ *          RowN: Histogram for EU-N: (SliceID*SubSlicePerSliceCount + SSID)*16 + EUID [assume max 16 EUs / SS]
+ *          Col-k[DW-k]: Threads Executed on ThreadID-k for EU-N
+ */
+add(1)          g1.2<1>UD       g1.2<0,1,0>UD   0x00000001UD    { align1 1N }; /* Loop count to delay kernel: Init to (g1.2 + 1) */
+cmp.z.f0.0(1)   null<1>UD       g1.3<0,1,0>UD   0x00000000UD    { align1 1N };
+(+f0.0) jmpi(1) 352D                                            { align1 WE_all 1N };
+
+/**
+ * State Register has info on where this thread is running
+ *     IVB: sr0.0 :: [15:13]: MBZ, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ *     HSW: sr0.0 :: 15: MBZ, [14:13]: SliceID, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ */
+mov(8)          g3<1>UD         0x00000000UD                    { align1 1Q };
+shr(1)          g3<1>D          sr0<0,1,0>D     12D             { align1 1N };
+and(1)          g3<1>D          g3<0,1,0>D      1D              { align1 1N }; /* g3 has HSID */
+shr(1)          g3.1<1>D        sr0<0,1,0>D     13D             { align1 1N };
+and(1)          g3.1<1>D        g3.1<0,1,0>D    3D              { align1 1N }; /* g3.1 has sliceID */
+mul(1)          g3.5<1>D        g3.1<0,1,0>D    g1.10<0,1,0>UW  { align1 1N };
+add(1)          g3<1>D          g3<0,1,0>D      g3.5<0,1,0>D    { align1 1N }; /* g3 = sliceID * SubSlicePerSliceCount + HSID */
+shr(1)          g3.2<1>D        sr0<0,1,0>D     8D              { align1 1N };
+and(1)          g3.2<1>D        g3.2<0,1,0>D    15D             { align1 1N }; /* g3.2 = EUID */
+mul(1)          g3.4<1>D        g3<0,1,0>D      16D             { align1 1N };
+add(1)          g3.2<1>D        g3.2<0,1,0>D    g3.4<0,1,0>D    { align1 1N }; /* g3.2 now points to EU row number (Y-pixel = V address )  in instrumentation surf */
+
+mov(8)          g5<1>UD         0x00000000UD                    { align1 1Q };
+and(1)          g3.3<1>D        sr0<0,1,0>D     7D              { align1 1N };
+mul(1)          g3.3<1>D        g3.3<0,1,0>D    4D              { align1 1N };
+
+mov(8)          g4<1>UD         g0<8,8,1>UD                     { align1 1Q }; /* Initialize message header with g0 */
+mov(1)          g4<1>UD         g3.3<0,1,0>UD                   { align1 1N }; /* Block offset */
+mov(1)          g4.1<1>UD       g3.2<0,1,0>UD                   { align1 1N }; /* Block offset */
+mov(1)          g4.2<1>UD       0x00000003UD                    { align1 1N }; /* Block size (1 row x 4 bytes) */
+and(1)          g4.3<1>UD       g4.3<0,1,0>UW   0xffffffffUD    { align1 1N };
+
+/* Media block read to fetch current value at specified location in instrumentation buffer */
+sendc(8)        g5<1>UD         g4<8,8,1>F      0x02190001
+
+                            render MsgDesc: media block read MsgCtrl = 0x0 Surface = 1 mlen 1 rlen 1 { align1 1Q };
+add(1)          g5<1>D          g5<0,1,0>D      1D              { align1 1N };
+
+/* Media block write for updated value at specified location in instrumentation buffer */
+sendc(8)        g5<1>UD         g4<8,8,1>F      0x040a8001
+                            render MsgDesc: media block write MsgCtrl = 0x0 Surface = 1 mlen 2 rlen 0 { align1 1Q };
+
+/* Delay thread for specified parameter */
+add.nz.f0.0(1)  g1.2<1>UD       g1.2<0,1,0>UD   -1D             { align1 1N };
+(+f0.0) jmpi(1) -32D                                            { align1 WE_all 1N };
+
+/* Store designated "clear GRF" value */
+mov(1)          f0.1<1>UW       g1.2<0,1,0>UW                   { align1 1N };
+
+/* Initialize looping parameters */
+mov(1)          a0<1>D          0D                              { align1 1N }; /* Initialize a0.0:w=0 */
+mov(1)          a0.4<1>W        127W                            { align1 1N }; /* Loop count. Each loop contains 16 GRF's */
+
+/* Write 32x16 all "0" block */
+mov(8)          g2<1>UD         g0<8,8,1>UD                     { align1 1Q };
+mov(8)          g127<1>UD       g0<8,8,1>UD                     { align1 1Q };
+mov(2)          g2<1>UD         g1<2,2,1>UW                     { align1 1N };
+mov(1)          g2.2<1>UD       0x000f000fUD                    { align1 1N }; /* Block size (16x16) */
+and(1)          g2.3<1>UD       g2.3<0,1,0>UW   0xffffffefUD    { align1 1N };
+mov(16)         g3<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g4<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g5<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g6<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g7<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g8<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g9<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g10<1>UD        0x00000000UD                    { align1 1H };
+sendc(8)        null<1>UD       g2<8,8,1>F      0x120a8000
+                            render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+add(1)          g2<1>UD         g1<0,1,0>UW     0x0010UW        { align1 1N };
+sendc(8)        null<1>UD       g2<8,8,1>F      0x120a8000
+                            render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+
+/* Now, clear all GRF registers */
+add.nz.f0.0(1)  a0.4<1>W        a0.4<0,1,0>W    -1W             { align1 1N };
+mov(16)         g[a0]<1>UW      f0.1<0,1,0>UW                   { align1 1H };
+add(1)          a0<1>D          a0<0,1,0>D      32D             { align1 1N };
+(+f0.0) jmpi(1) -64D                                            { align1 WE_all 1N };
+
+/* Terminante the thread */
+sendc(8)        null<1>UD       g127<8,8,1>F    0x82000010
+                            thread_spawner MsgDesc: mlen 1 rlen 0           { align1 1Q EOT };
diff --git a/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm
new file mode 100644 (file)
index 0000000..97c7ac9
--- /dev/null
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright Â© 2020 Intel Corporation
+ */
+
+/*
+ * Kernel for PAVP buffer clear.
+ *
+ *     1. Clear all 64 GRF registers assigned to the kernel with designated value;
+ *     2. Write 32x16 block of all "0" to render target buffer which indirectly clears
+ *        512 bytes of Render Cache.
+ */
+
+/* Store designated "clear GRF" value */
+mov(1)          f0.1<1>UW       g1.2<0,1,0>UW                   { align1 1N };
+
+/**
+ * Curbe Format
+ *
+ * DW 1.0 - Block Offset to write Render Cache
+ * DW 1.1 [15:0] - Clear Word
+ * DW 1.2 - Delay iterations
+ * DW 1.3 - Enable Instrumentation (only for debug)
+ * DW 1.4 - Rsvd (intended for context ID)
+ * DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
+ * DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
+ * DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
+ *
+ * Binding Table
+ *
+ * BTI 0: 2D Surface to help clear L3 (Render/Data Cache)
+ * BTI 1: Wait/Instrumentation Buffer
+ *  Size : (SliceCount * SubSliceCount  * 16 EUs/SubSlice) rows * (16 threads/EU) cols (Format R32_UINT)
+ *         Expected to be initialized to 0 by driver/another kernel
+ *  Layout :
+ *           RowN: Histogram for EU-N: (SliceID*SubSlicePerSliceCount + SSID)*16 + EUID [assume max 16 EUs / SS]
+ *           Col-k[DW-k]: Threads Executed on ThreadID-k for EU-N
+ */
+add(1)          g1.2<1>UD       g1.2<0,1,0>UD   0x00000001UD    { align1 1N }; /* Loop count to delay kernel: Init to (g1.2 + 1) */
+cmp.z.f0.0(1)   null<1>UD       g1.3<0,1,0>UD   0x00000000UD    { align1 1N };
+(+f0.0) jmpi(1) 44D                                             { align1 WE_all 1N };
+
+/**
+ * State Register has info on where this thread is running
+ *     IVB: sr0.0 :: [15:13]: MBZ, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ *     HSW: sr0.0 :: 15: MBZ, [14:13]: SliceID, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ */
+mov(8)          g3<1>UD         0x00000000UD                    { align1 1Q };
+shr(1)          g3<1>D          sr0<0,1,0>D     12D             { align1 1N };
+and(1)          g3<1>D          g3<0,1,0>D      1D              { align1 1N }; /* g3 has HSID */
+shr(1)          g3.1<1>D        sr0<0,1,0>D     13D             { align1 1N };
+and(1)          g3.1<1>D        g3.1<0,1,0>D    3D              { align1 1N }; /* g3.1 has sliceID */
+mul(1)          g3.5<1>D        g3.1<0,1,0>D    g1.10<0,1,0>UW  { align1 1N };
+add(1)          g3<1>D          g3<0,1,0>D      g3.5<0,1,0>D    { align1 1N }; /* g3 = sliceID * SubSlicePerSliceCount + HSID */
+shr(1)          g3.2<1>D        sr0<0,1,0>D     8D              { align1 1N };
+and(1)          g3.2<1>D        g3.2<0,1,0>D    15D             { align1 1N }; /* g3.2 = EUID */
+mul(1)          g3.4<1>D        g3<0,1,0>D      16D             { align1 1N };
+add(1)          g3.2<1>D        g3.2<0,1,0>D    g3.4<0,1,0>D    { align1 1N }; /* g3.2 now points to EU row number (Y-pixel = V address )  in instrumentation surf */
+
+mov(8)          g5<1>UD         0x00000000UD                    { align1 1Q };
+and(1)          g3.3<1>D        sr0<0,1,0>D     7D              { align1 1N };
+mul(1)          g3.3<1>D        g3.3<0,1,0>D    4D              { align1 1N };
+
+mov(8)          g4<1>UD         g0<8,8,1>UD                     { align1 1Q }; /* Initialize message header with g0 */
+mov(1)          g4<1>UD         g3.3<0,1,0>UD                   { align1 1N }; /* Block offset */
+mov(1)          g4.1<1>UD       g3.2<0,1,0>UD                   { align1 1N }; /* Block offset */
+mov(1)          g4.2<1>UD       0x00000003UD                    { align1 1N }; /* Block size (1 row x 4 bytes) */
+and(1)          g4.3<1>UD       g4.3<0,1,0>UW   0xffffffffUD    { align1 1N };
+
+/* Media block read to fetch current value at specified location in instrumentation buffer */
+sendc(8)        g5<1>UD         g4<8,8,1>F      0x02190001
+                            render MsgDesc: media block read MsgCtrl = 0x0 Surface = 1 mlen 1 rlen 1 { align1 1Q };
+add(1)          g5<1>D          g5<0,1,0>D      1D              { align1 1N };
+
+/* Media block write for updated value at specified location in instrumentation buffer */
+sendc(8)        g5<1>UD         g4<8,8,1>F      0x040a8001
+                            render MsgDesc: media block write MsgCtrl = 0x0 Surface = 1 mlen 2 rlen 0 { align1 1Q };
+/* Delay thread for specified parameter */
+add.nz.f0.0(1)  g1.2<1>UD       g1.2<0,1,0>UD   -1D             { align1 1N };
+(+f0.0) jmpi(1) -4D                                             { align1 WE_all 1N };
+
+/* Store designated "clear GRF" value */
+mov(1)          f0.1<1>UW       g1.2<0,1,0>UW                   { align1 1N };
+
+/* Initialize looping parameters */
+mov(1)          a0<1>D          0D                              { align1 1N }; /* Initialize a0.0:w=0 */
+mov(1)          a0.4<1>W        127W                            { align1 1N }; /* Loop count. Each loop contains 16 GRF's */
+
+/* Write 32x16 all "0" block */
+mov(8)          g2<1>UD         g0<8,8,1>UD                     { align1 1Q };
+mov(8)          g127<1>UD       g0<8,8,1>UD                     { align1 1Q };
+mov(2)          g2<1>UD         g1<2,2,1>UW                     { align1 1N };
+mov(1)          g2.2<1>UD       0x000f000fUD                    { align1 1N }; /* Block size (16x16) */
+and(1)          g2.3<1>UD       g2.3<0,1,0>UW   0xffffffefUD    { align1 1N };
+mov(16)         g3<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g4<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g5<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g6<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g7<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g8<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g9<1>UD         0x00000000UD                    { align1 1H };
+mov(16)         g10<1>UD        0x00000000UD                    { align1 1H };
+sendc(8)        null<1>UD       g2<8,8,1>F      0x120a8000
+                            render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+add(1)          g2<1>UD         g1<0,1,0>UW     0x0010UW        { align1 1N };
+sendc(8)        null<1>UD       g2<8,8,1>F      0x120a8000
+                            render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+
+/* Now, clear all GRF registers */
+add.nz.f0.0(1)  a0.4<1>W        a0.4<0,1,0>W    -1W             { align1 1N };
+mov(16)         g[a0]<1>UW      f0.1<0,1,0>UW                   { align1 1H };
+add(1)          a0<1>D          a0<0,1,0>D      32D             { align1 1N };
+(+f0.0) jmpi(1) -8D                                             { align1 WE_all 1N };
+
+/* Terminante the thread */
+sendc(8)        null<1>UD       g127<8,8,1>F    0x82000010
+                            thread_spawner MsgDesc: mlen 1 rlen 0           { align1 1Q EOT };
index ec47d41..62e6a14 100644 (file)
@@ -66,7 +66,7 @@ static inline int mmio_diff_handler(struct intel_gvt *gvt,
        vreg = vgpu_vreg(param->vgpu, offset);
 
        if (preg != vreg) {
-               node = kmalloc(sizeof(*node), GFP_KERNEL);
+               node = kmalloc(sizeof(*node), GFP_ATOMIC);
                if (!node)
                        return -ENOMEM;
 
index 3e88e3b..fadd2ad 100644 (file)
@@ -1726,13 +1726,13 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
        write_vreg(vgpu, offset, p_data, bytes);
 
-       if (data & _MASKED_BIT_ENABLE(1)) {
+       if (IS_MASKED_BITS_ENABLED(data, 1)) {
                enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
                return 0;
        }
 
        if (IS_COFFEELAKE(vgpu->gvt->gt->i915) &&
-           data & _MASKED_BIT_ENABLE(2)) {
+           IS_MASKED_BITS_ENABLED(data, 2)) {
                enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
                return 0;
        }
@@ -1741,14 +1741,14 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
         * pvinfo, if not, we will treat this guest as non-gvtg-aware
         * guest, and stop emulating its cfg space, mmio, gtt, etc.
         */
-       if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) ||
-                       (data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)))
-                       && !vgpu->pv_notified) {
+       if ((IS_MASKED_BITS_ENABLED(data, GFX_PPGTT_ENABLE) ||
+           IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE)) &&
+           !vgpu->pv_notified) {
                enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
                return 0;
        }
-       if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
-                       || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
+       if (IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE) ||
+           IS_MASKED_BITS_DISABLED(data, GFX_RUN_LIST_ENABLE)) {
                enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
 
                gvt_dbg_core("EXECLIST %s on ring %s\n",
@@ -1809,7 +1809,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
        write_vreg(vgpu, offset, p_data, bytes);
        data = vgpu_vreg(vgpu, offset);
 
-       if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET))
+       if (IS_MASKED_BITS_ENABLED(data, RESET_CTL_REQUEST_RESET))
                data |= RESET_CTL_READY_TO_RESET;
        else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
                data &= ~RESET_CTL_READY_TO_RESET;
@@ -1827,7 +1827,8 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
        (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
        write_vreg(vgpu, offset, p_data, bytes);
 
-       if (data & _MASKED_BIT_ENABLE(0x10) || data & _MASKED_BIT_ENABLE(0x8))
+       if (IS_MASKED_BITS_ENABLED(data, 0x10) ||
+           IS_MASKED_BITS_ENABLED(data, 0x8))
                enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
 
        return 0;
@@ -3055,6 +3056,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
        MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
        MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS);
+       MMIO_D(_MMIO(_PLANE_SURF_3_B), D_SKL_PLUS);
 
        MMIO_D(CSR_SSP_BASE, D_SKL_PLUS);
        MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
@@ -3131,8 +3133,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
        MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
                 NULL, NULL);
 
-       MMIO_D(GAMT_CHKN_BIT_REG, D_KBL);
-       MMIO_D(GEN9_CTX_PREEMPT_REG, D_KBL | D_SKL);
+       MMIO_D(GAMT_CHKN_BIT_REG, D_KBL | D_CFL);
+       MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS);
 
        return 0;
 }
index 970704b..3b25e7f 100644 (file)
@@ -54,8 +54,8 @@ bool is_inhibit_context(struct intel_context *ce);
 
 int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
                                       struct i915_request *req);
-#define IS_RESTORE_INHIBIT(a)  \
-       (_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) == \
-       ((a) & _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)))
+
+#define IS_RESTORE_INHIBIT(a) \
+       IS_MASKED_BITS_ENABLED(a, CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)
 
 #endif
index 5b66e14..b88e033 100644 (file)
 #define GFX_MODE_BIT_SET_IN_MASK(val, bit) \
                ((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
 
+#define IS_MASKED_BITS_ENABLED(_val, _b) \
+               (((_val) & _MASKED_BIT_ENABLE(_b)) == _MASKED_BIT_ENABLE(_b))
+#define IS_MASKED_BITS_DISABLED(_val, _b) \
+               ((_val) & _MASKED_BIT_DISABLE(_b))
+
 #define FORCEWAKE_RENDER_GEN9_REG 0xa278
 #define FORCEWAKE_ACK_RENDER_GEN9_REG 0x0D84
 #define FORCEWAKE_BLITTER_GEN9_REG 0xa188
index bca036a..e7532e7 100644 (file)
@@ -230,7 +230,7 @@ static int per_file_stats(int id, void *ptr, void *data)
        struct file_stats *stats = data;
        struct i915_vma *vma;
 
-       if (!kref_get_unless_zero(&obj->base.refcount))
+       if (IS_ERR_OR_NULL(obj) || !kref_get_unless_zero(&obj->base.refcount))
                return 0;
 
        stats->count++;
index adb9bf3..f79f118 100644 (file)
@@ -410,8 +410,6 @@ struct intel_fbc {
                        int adjusted_x;
                        int adjusted_y;
 
-                       int y;
-
                        u16 pixel_blend_mode;
                } plane;
 
@@ -420,6 +418,8 @@ struct intel_fbc {
                        unsigned int stride;
                        u64 modifier;
                } fb;
+
+               unsigned int fence_y_offset;
                u16 gen9_wa_cfb_stride;
                s8 fence_id;
        } state_cache;
@@ -435,7 +435,6 @@ struct intel_fbc {
                struct {
                        enum pipe pipe;
                        enum i9xx_plane_id i9xx_plane;
-                       unsigned int fence_y_offset;
                } crtc;
 
                struct {
@@ -444,6 +443,7 @@ struct intel_fbc {
                } fb;
 
                int cfb_size;
+               unsigned int fence_y_offset;
                u16 gen9_wa_cfb_stride;
                s8 fence_id;
                bool plane_visible;
index 4dc601d..284cf07 100644 (file)
@@ -3125,6 +3125,7 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
 
        val = I915_READ(GEN11_DE_HPD_IMR);
        val &= ~hotplug_irqs;
+       val |= ~enabled_irqs & hotplug_irqs;
        I915_WRITE(GEN11_DE_HPD_IMR, val);
        POSTING_READ(GEN11_DE_HPD_IMR);
 
index e991a70..962ded9 100644 (file)
@@ -269,12 +269,48 @@ static bool exclusive_mmio_access(const struct drm_i915_private *i915)
        return IS_GEN(i915, 7);
 }
 
+static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns)
+{
+       struct intel_engine_pmu *pmu = &engine->pmu;
+       bool busy;
+       u32 val;
+
+       val = ENGINE_READ_FW(engine, RING_CTL);
+       if (val == 0) /* powerwell off => engine idle */
+               return;
+
+       if (val & RING_WAIT)
+               add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
+       if (val & RING_WAIT_SEMAPHORE)
+               add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
+
+       /* No need to sample when busy stats are supported. */
+       if (intel_engine_supports_stats(engine))
+               return;
+
+       /*
+        * While waiting on a semaphore or event, MI_MODE reports the
+        * ring as idle. However, previously using the seqno, and with
+        * execlists sampling, we account for the ring waiting as the
+        * engine being busy. Therefore, we record the sample as being
+        * busy if either waiting or !idle.
+        */
+       busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
+       if (!busy) {
+               val = ENGINE_READ_FW(engine, RING_MI_MODE);
+               busy = !(val & MODE_IDLE);
+       }
+       if (busy)
+               add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
+}
+
 static void
 engines_sample(struct intel_gt *gt, unsigned int period_ns)
 {
        struct drm_i915_private *i915 = gt->i915;
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
+       unsigned long flags;
 
        if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
                return;
@@ -283,53 +319,17 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
                return;
 
        for_each_engine(engine, gt, id) {
-               struct intel_engine_pmu *pmu = &engine->pmu;
-               spinlock_t *mmio_lock;
-               unsigned long flags;
-               bool busy;
-               u32 val;
-
                if (!intel_engine_pm_get_if_awake(engine))
                        continue;
 
-               mmio_lock = NULL;
-               if (exclusive_mmio_access(i915))
-                       mmio_lock = &engine->uncore->lock;
-
-               if (unlikely(mmio_lock))
-                       spin_lock_irqsave(mmio_lock, flags);
-
-               val = ENGINE_READ_FW(engine, RING_CTL);
-               if (val == 0) /* powerwell off => engine idle */
-                       goto skip;
-
-               if (val & RING_WAIT)
-                       add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
-               if (val & RING_WAIT_SEMAPHORE)
-                       add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
-
-               /* No need to sample when busy stats are supported. */
-               if (intel_engine_supports_stats(engine))
-                       goto skip;
-
-               /*
-                * While waiting on a semaphore or event, MI_MODE reports the
-                * ring as idle. However, previously using the seqno, and with
-                * execlists sampling, we account for the ring waiting as the
-                * engine being busy. Therefore, we record the sample as being
-                * busy if either waiting or !idle.
-                */
-               busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
-               if (!busy) {
-                       val = ENGINE_READ_FW(engine, RING_MI_MODE);
-                       busy = !(val & MODE_IDLE);
+               if (exclusive_mmio_access(i915)) {
+                       spin_lock_irqsave(&engine->uncore->lock, flags);
+                       engine_sample(engine, period_ns);
+                       spin_unlock_irqrestore(&engine->uncore->lock, flags);
+               } else {
+                       engine_sample(engine, period_ns);
                }
-               if (busy)
-                       add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
 
-skip:
-               if (unlikely(mmio_lock))
-                       spin_unlock_irqrestore(mmio_lock, flags);
                intel_engine_pm_put_async(engine);
        }
 }
index 5003a71..8aa7866 100644 (file)
@@ -42,7 +42,7 @@ enum {
  * active request.
  */
 #define I915_PRIORITY_UNPREEMPTABLE INT_MAX
-#define I915_PRIORITY_BARRIER INT_MAX
+#define I915_PRIORITY_BARRIER (I915_PRIORITY_UNPREEMPTABLE - 1)
 
 struct i915_priolist {
        struct list_head requests[I915_PRIORITY_COUNT];
index 7717581..06cd1d2 100644 (file)
@@ -7896,7 +7896,7 @@ enum {
 
 /* GEN7 chicken */
 #define GEN7_COMMON_SLICE_CHICKEN1             _MMIO(0x7010)
-  #define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC    ((1 << 10) | (1 << 26))
+  #define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC    (1 << 10)
   #define GEN9_RHWO_OPTIMIZATION_DISABLE       (1 << 14)
 
 #define COMMON_SLICE_CHICKEN2                                  _MMIO(0x7014)
index fc14ebf..1f9cd33 100644 (file)
@@ -104,6 +104,7 @@ vma_create(struct drm_i915_gem_object *obj,
           struct i915_address_space *vm,
           const struct i915_ggtt_view *view)
 {
+       struct i915_vma *pos = ERR_PTR(-E2BIG);
        struct i915_vma *vma;
        struct rb_node *rb, **p;
 
@@ -184,7 +185,6 @@ vma_create(struct drm_i915_gem_object *obj,
        rb = NULL;
        p = &obj->vma.tree.rb_node;
        while (*p) {
-               struct i915_vma *pos;
                long cmp;
 
                rb = *p;
@@ -196,16 +196,12 @@ vma_create(struct drm_i915_gem_object *obj,
                 * and dispose of ours.
                 */
                cmp = i915_vma_compare(pos, vm, view);
-               if (cmp == 0) {
-                       spin_unlock(&obj->vma.lock);
-                       i915_vma_free(vma);
-                       return pos;
-               }
-
                if (cmp < 0)
                        p = &rb->rb_right;
-               else
+               else if (cmp > 0)
                        p = &rb->rb_left;
+               else
+                       goto err_unlock;
        }
        rb_link_node(&vma->obj_node, rb, p);
        rb_insert_color(&vma->obj_node, &obj->vma.tree);
@@ -228,8 +224,9 @@ vma_create(struct drm_i915_gem_object *obj,
 err_unlock:
        spin_unlock(&obj->vma.lock);
 err_vma:
+       i915_vm_put(vm);
        i915_vma_free(vma);
-       return ERR_PTR(-E2BIG);
+       return pos;
 }
 
 static struct i915_vma *
index 696491d..07f663c 100644 (file)
@@ -6830,16 +6830,6 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
        I915_WRITE(ILK_DISPLAY_CHICKEN2,
                   I915_READ(ILK_DISPLAY_CHICKEN2) |
                   ILK_ELPIN_409_SELECT);
-       I915_WRITE(_3D_CHICKEN2,
-                  _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
-                  _3D_CHICKEN2_WM_READ_PIPELINED);
-
-       /* WaDisableRenderCachePipelinedFlush:ilk */
-       I915_WRITE(CACHE_MODE_0,
-                  _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
-
-       /* WaDisable_RenderCache_OperationalFlush:ilk */
-       I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
 
        g4x_disable_trickle_feed(dev_priv);
 
@@ -6902,27 +6892,6 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
                   I915_READ(ILK_DISPLAY_CHICKEN2) |
                   ILK_ELPIN_409_SELECT);
 
-       /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
-       I915_WRITE(_3D_CHICKEN,
-                  _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
-
-       /* WaDisable_RenderCache_OperationalFlush:snb */
-       I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
-
-       /*
-        * BSpec recoomends 8x4 when MSAA is used,
-        * however in practice 16x4 seems fastest.
-        *
-        * Note that PS/WM thread counts depend on the WIZ hashing
-        * disable bit, which we don't touch here, but it's good
-        * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
-        */
-       I915_WRITE(GEN6_GT_MODE,
-                  _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
-
-       I915_WRITE(CACHE_MODE_0,
-                  _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
-
        I915_WRITE(GEN6_UCGCTL1,
                   I915_READ(GEN6_UCGCTL1) |
                   GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
@@ -6945,18 +6914,6 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
                   GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
                   GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
 
-       /* WaStripsFansDisableFastClipPerformanceFix:snb */
-       I915_WRITE(_3D_CHICKEN3,
-                  _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
-
-       /*
-        * Bspec says:
-        * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
-        * 3DSTATE_SF number of SF output attributes is more than 16."
-        */
-       I915_WRITE(_3D_CHICKEN3,
-                  _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
-
        /*
         * According to the spec the following bits should be
         * set in order to enable memory self-refresh and fbc:
@@ -6986,24 +6943,6 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
        gen6_check_mch_setup(dev_priv);
 }
 
-static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
-{
-       u32 reg = I915_READ(GEN7_FF_THREAD_MODE);
-
-       /*
-        * WaVSThreadDispatchOverride:ivb,vlv
-        *
-        * This actually overrides the dispatch
-        * mode for all thread types.
-        */
-       reg &= ~GEN7_FF_SCHED_MASK;
-       reg |= GEN7_FF_TS_SCHED_HW;
-       reg |= GEN7_FF_VS_SCHED_HW;
-       reg |= GEN7_FF_DS_SCHED_HW;
-
-       I915_WRITE(GEN7_FF_THREAD_MODE, reg);
-}
-
 static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
 {
        /*
@@ -7230,45 +7169,10 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
 
 static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-       /* L3 caching of data atomics doesn't work -- disable it. */
-       I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
-       I915_WRITE(HSW_ROW_CHICKEN3,
-                  _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
-
        /* This is required by WaCatErrorRejectionIssue:hsw */
        I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
-                       I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
-                       GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
-
-       /* WaVSRefCountFullforceMissDisable:hsw */
-       I915_WRITE(GEN7_FF_THREAD_MODE,
-                  I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
-
-       /* WaDisable_RenderCache_OperationalFlush:hsw */
-       I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
-
-       /* enable HiZ Raw Stall Optimization */
-       I915_WRITE(CACHE_MODE_0_GEN7,
-                  _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
-
-       /* WaDisable4x2SubspanOptimization:hsw */
-       I915_WRITE(CACHE_MODE_1,
-                  _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
-
-       /*
-        * BSpec recommends 8x4 when MSAA is used,
-        * however in practice 16x4 seems fastest.
-        *
-        * Note that PS/WM thread counts depend on the WIZ hashing
-        * disable bit, which we don't touch here, but it's good
-        * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
-        */
-       I915_WRITE(GEN7_GT_MODE,
-                  _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
-
-       /* WaSampleCChickenBitEnable:hsw */
-       I915_WRITE(HALF_SLICE_CHICKEN3,
-                  _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
+                  I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+                  GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
        /* WaSwitchSolVfFArbitrationPriority:hsw */
        I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -7282,32 +7186,11 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
 
        I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
 
-       /* WaDisableEarlyCull:ivb */
-       I915_WRITE(_3D_CHICKEN3,
-                  _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
-
        /* WaDisableBackToBackFlipFix:ivb */
        I915_WRITE(IVB_CHICKEN3,
                   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
                   CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
-       /* WaDisablePSDDualDispatchEnable:ivb */
-       if (IS_IVB_GT1(dev_priv))
-               I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
-                          _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
-
-       /* WaDisable_RenderCache_OperationalFlush:ivb */
-       I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
-
-       /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
-       I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
-                  GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
-
-       /* WaApplyL3ControlAndL3ChickenMode:ivb */
-       I915_WRITE(GEN7_L3CNTLREG1,
-                       GEN7_WA_FOR_GEN7_L3_CONTROL);
-       I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
-                  GEN7_WA_L3_CHICKEN_MODE);
        if (IS_IVB_GT1(dev_priv))
                I915_WRITE(GEN7_ROW_CHICKEN2,
                           _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
@@ -7319,10 +7202,6 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
                           _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
        }
 
-       /* WaForceL3Serialization:ivb */
-       I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
-                  ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
-
        /*
         * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
         * This implements the WaDisableRCZUnitClockGating:ivb workaround.
@@ -7337,29 +7216,6 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
 
        g4x_disable_trickle_feed(dev_priv);
 
-       gen7_setup_fixed_func_scheduler(dev_priv);
-
-       if (0) { /* causes HiZ corruption on ivb:gt1 */
-               /* enable HiZ Raw Stall Optimization */
-               I915_WRITE(CACHE_MODE_0_GEN7,
-                          _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
-       }
-
-       /* WaDisable4x2SubspanOptimization:ivb */
-       I915_WRITE(CACHE_MODE_1,
-                  _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
-
-       /*
-        * BSpec recommends 8x4 when MSAA is used,
-        * however in practice 16x4 seems fastest.
-        *
-        * Note that PS/WM thread counts depend on the WIZ hashing
-        * disable bit, which we don't touch here, but it's good
-        * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
-        */
-       I915_WRITE(GEN7_GT_MODE,
-                  _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
-
        snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
        snpcr &= ~GEN6_MBC_SNPCR_MASK;
        snpcr |= GEN6_MBC_SNPCR_MED;
@@ -7373,28 +7229,11 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
 
 static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-       /* WaDisableEarlyCull:vlv */
-       I915_WRITE(_3D_CHICKEN3,
-                  _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
-
        /* WaDisableBackToBackFlipFix:vlv */
        I915_WRITE(IVB_CHICKEN3,
                   CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
                   CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
-       /* WaPsdDispatchEnable:vlv */
-       /* WaDisablePSDDualDispatchEnable:vlv */
-       I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
-                  _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
-                                     GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
-
-       /* WaDisable_RenderCache_OperationalFlush:vlv */
-       I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
-
-       /* WaForceL3Serialization:vlv */
-       I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
-                  ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
-
        /* WaDisableDopClockGating:vlv */
        I915_WRITE(GEN7_ROW_CHICKEN2,
                   _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
@@ -7404,8 +7243,6 @@ static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
                   I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
                   GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
-       gen7_setup_fixed_func_scheduler(dev_priv);
-
        /*
         * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
         * This implements the WaDisableRCZUnitClockGating:vlv workaround.
@@ -7420,30 +7257,6 @@ static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
                   I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
 
        /*
-        * BSpec says this must be set, even though
-        * WaDisable4x2SubspanOptimization isn't listed for VLV.
-        */
-       I915_WRITE(CACHE_MODE_1,
-                  _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
-
-       /*
-        * BSpec recommends 8x4 when MSAA is used,
-        * however in practice 16x4 seems fastest.
-        *
-        * Note that PS/WM thread counts depend on the WIZ hashing
-        * disable bit, which we don't touch here, but it's good
-        * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
-        */
-       I915_WRITE(GEN7_GT_MODE,
-                  _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
-
-       /*
-        * WaIncreaseL3CreditsForVLVB0:vlv
-        * This is the hardware default actually.
-        */
-       I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
-
-       /*
         * WaDisableVLVClockGating_VBIIssue:vlv
         * Disable clock gating on th GCFG unit to prevent a delay
         * in the reporting of vblank events.
@@ -7495,13 +7308,6 @@ static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
                dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
        I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
 
-       /* WaDisableRenderCachePipelinedFlush */
-       I915_WRITE(CACHE_MODE_0,
-                  _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
-
-       /* WaDisable_RenderCache_OperationalFlush:g4x */
-       I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
-
        g4x_disable_trickle_feed(dev_priv);
 }
 
@@ -7517,11 +7323,6 @@ static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
        intel_uncore_write(uncore,
                           MI_ARB_STATE,
                           _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
-
-       /* WaDisable_RenderCache_OperationalFlush:gen4 */
-       intel_uncore_write(uncore,
-                          CACHE_MODE_0,
-                          _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
 }
 
 static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -7534,9 +7335,6 @@ static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
        I915_WRITE(RENCLK_GATE_D2, 0);
        I915_WRITE(MI_ARB_STATE,
                   _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
-
-       /* WaDisable_RenderCache_OperationalFlush:gen4 */
-       I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
 }
 
 static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
index 6a2be7d..6090ce3 100644 (file)
@@ -21,6 +21,7 @@ selftest(fence, i915_sw_fence_mock_selftests)
 selftest(scatterlist, scatterlist_mock_selftests)
 selftest(syncmap, i915_syncmap_mock_selftests)
 selftest(uncore, intel_uncore_mock_selftests)
+selftest(ring, intel_ring_mock_selftests)
 selftest(engine, intel_engine_cs_mock_selftests)
 selftest(timelines, intel_timeline_mock_selftests)
 selftest(requests, i915_request_mock_selftests)
index 04e1d38..08802e5 100644 (file)
@@ -812,7 +812,7 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
        struct drm_crtc *crtc = &pipe->crtc;
        struct drm_plane *plane = &pipe->plane;
        struct drm_device *drm = crtc->dev;
-       struct mcde *mcde = drm->dev_private;
+       struct mcde *mcde = to_mcde(drm);
        const struct drm_display_mode *mode = &cstate->mode;
        struct drm_framebuffer *fb = plane->state->fb;
        u32 format = fb->format->format;
index 84f3e2d..80082d6 100644 (file)
@@ -209,7 +209,6 @@ static int mcde_modeset_init(struct drm_device *drm)
 
        drm_mode_config_reset(drm);
        drm_kms_helper_poll_init(drm);
-       drm_fbdev_generic_setup(drm, 32);
 
        return 0;
 }
@@ -264,6 +263,8 @@ static int mcde_drm_bind(struct device *dev)
        if (ret < 0)
                goto unbind;
 
+       drm_fbdev_generic_setup(drm, 32);
+
        return 0;
 
 unbind:
index c420f5a..aa74aac 100644 (file)
@@ -6,12 +6,12 @@ config DRM_MEDIATEK
        depends on COMMON_CLK
        depends on HAVE_ARM_SMCCC
        depends on OF
+       depends on MTK_MMSYS
        select DRM_GEM_CMA_HELPER
        select DRM_KMS_HELPER
        select DRM_MIPI_DSI
        select DRM_PANEL
        select MEMORY
-       select MTK_MMSYS
        select MTK_SMI
        select VIDEOMODE_HELPERS
        help
index fe46c4b..7cd8f41 100644 (file)
@@ -193,7 +193,6 @@ static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
        int ret;
        int i;
 
-       DRM_DEBUG_DRIVER("%s\n", __func__);
        for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
                ret = clk_prepare_enable(mtk_crtc->ddp_comp[i]->clk);
                if (ret) {
@@ -213,7 +212,6 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
 {
        int i;
 
-       DRM_DEBUG_DRIVER("%s\n", __func__);
        for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
                clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
 }
@@ -258,7 +256,6 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
        int ret;
        int i;
 
-       DRM_DEBUG_DRIVER("%s\n", __func__);
        if (WARN_ON(!crtc->state))
                return -EINVAL;
 
@@ -299,7 +296,6 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
                goto err_mutex_unprepare;
        }
 
-       DRM_DEBUG_DRIVER("mediatek_ddp_ddp_path_setup\n");
        for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
                mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev,
                                      mtk_crtc->ddp_comp[i]->id,
@@ -349,7 +345,6 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
        struct drm_crtc *crtc = &mtk_crtc->base;
        int i;
 
-       DRM_DEBUG_DRIVER("%s\n", __func__);
        for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
                mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]);
                if (i == 1)
@@ -831,7 +826,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
 
 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
        mtk_crtc->cmdq_client =
-                       cmdq_mbox_create(dev, drm_crtc_index(&mtk_crtc->base),
+                       cmdq_mbox_create(mtk_crtc->mmsys_dev,
+                                        drm_crtc_index(&mtk_crtc->base),
                                         2000);
        if (IS_ERR(mtk_crtc->cmdq_client)) {
                dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
index 6bd3694..040a8f3 100644 (file)
@@ -444,7 +444,6 @@ static int mtk_drm_probe(struct platform_device *pdev)
        if (!private)
                return -ENOMEM;
 
-       private->data = of_device_get_match_data(dev);
        private->mmsys_dev = dev->parent;
        if (!private->mmsys_dev) {
                dev_err(dev, "Failed to get MMSYS device\n");
@@ -514,7 +513,8 @@ static int mtk_drm_probe(struct platform_device *pdev)
                                goto err_node;
                        }
 
-                       ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
+                       ret = mtk_ddp_comp_init(dev->parent, node, comp,
+                                               comp_id, NULL);
                        if (ret) {
                                of_node_put(node);
                                goto err_node;
@@ -571,7 +571,6 @@ static int mtk_drm_sys_suspend(struct device *dev)
        int ret;
 
        ret = drm_mode_config_helper_suspend(drm);
-       DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n");
 
        return ret;
 }
@@ -583,7 +582,6 @@ static int mtk_drm_sys_resume(struct device *dev)
        int ret;
 
        ret = drm_mode_config_helper_resume(drm);
-       DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n");
 
        return ret;
 }
index c2bd683..92141a1 100644 (file)
@@ -164,6 +164,16 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
                                                   true, true);
 }
 
+static void mtk_plane_atomic_disable(struct drm_plane *plane,
+                                    struct drm_plane_state *old_state)
+{
+       struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
+
+       state->pending.enable = false;
+       wmb(); /* Make sure the above parameter is set before update */
+       state->pending.dirty = true;
+}
+
 static void mtk_plane_atomic_update(struct drm_plane *plane,
                                    struct drm_plane_state *old_state)
 {
@@ -178,6 +188,11 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
        if (!crtc || WARN_ON(!fb))
                return;
 
+       if (!plane->state->visible) {
+               mtk_plane_atomic_disable(plane, old_state);
+               return;
+       }
+
        gem = fb->obj[0];
        mtk_gem = to_mtk_gem_obj(gem);
        addr = mtk_gem->dma_addr;
@@ -200,16 +215,6 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
        state->pending.dirty = true;
 }
 
-static void mtk_plane_atomic_disable(struct drm_plane *plane,
-                                    struct drm_plane_state *old_state)
-{
-       struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
-
-       state->pending.enable = false;
-       wmb(); /* Make sure the above parameter is set before update */
-       state->pending.dirty = true;
-}
-
 static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
        .prepare_fb = drm_gem_fb_prepare_fb,
        .atomic_check = mtk_plane_atomic_check,
index 270bf22..02ac55c 100644 (file)
@@ -316,10 +316,7 @@ static void mtk_dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi)
 
 static bool mtk_dsi_clk_hs_state(struct mtk_dsi *dsi)
 {
-       u32 tmp_reg1;
-
-       tmp_reg1 = readl(dsi->regs + DSI_PHY_LCCON);
-       return ((tmp_reg1 & LC_HS_TX_EN) == 1) ? true : false;
+       return readl(dsi->regs + DSI_PHY_LCCON) & LC_HS_TX_EN;
 }
 
 static void mtk_dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter)
index 5feb760..1eebe31 100644 (file)
@@ -1630,8 +1630,6 @@ static int mtk_hdmi_audio_startup(struct device *dev, void *data)
 {
        struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
 
-       dev_dbg(dev, "%s\n", __func__);
-
        mtk_hdmi_audio_enable(hdmi);
 
        return 0;
@@ -1641,8 +1639,6 @@ static void mtk_hdmi_audio_shutdown(struct device *dev, void *data)
 {
        struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
 
-       dev_dbg(dev, "%s\n", __func__);
-
        mtk_hdmi_audio_disable(hdmi);
 }
 
@@ -1651,8 +1647,6 @@ mtk_hdmi_audio_digital_mute(struct device *dev, void *data, bool enable)
 {
        struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
 
-       dev_dbg(dev, "%s(%d)\n", __func__, enable);
-
        if (enable)
                mtk_hdmi_hw_aud_mute(hdmi);
        else
@@ -1665,8 +1659,6 @@ static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
 {
        struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
 
-       dev_dbg(dev, "%s\n", __func__);
-
        memcpy(buf, hdmi->conn.eld, min(sizeof(hdmi->conn.eld), len));
 
        return 0;
@@ -1766,7 +1758,6 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev)
                goto err_bridge_remove;
        }
 
-       dev_dbg(dev, "mediatek hdmi probe success\n");
        return 0;
 
 err_bridge_remove:
@@ -1789,7 +1780,7 @@ static int mtk_hdmi_suspend(struct device *dev)
        struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
 
        mtk_hdmi_clk_disable_audio(hdmi);
-       dev_dbg(dev, "hdmi suspend success!\n");
+
        return 0;
 }
 
@@ -1804,7 +1795,6 @@ static int mtk_hdmi_resume(struct device *dev)
                return ret;
        }
 
-       dev_dbg(dev, "hdmi resume success!\n");
        return 0;
 }
 #endif
index b55f516..827b937 100644 (file)
 #define RGS_HDMITX_5T1_EDG             (0xf << 4)
 #define RGS_HDMITX_PLUG_TST            BIT(0)
 
-static const u8 PREDIV[3][4] = {
-       {0x0, 0x0, 0x0, 0x0},   /* 27Mhz */
-       {0x1, 0x1, 0x1, 0x1},   /* 74Mhz */
-       {0x1, 0x1, 0x1, 0x1}    /* 148Mhz */
-};
-
-static const u8 TXDIV[3][4] = {
-       {0x3, 0x3, 0x3, 0x2},   /* 27Mhz */
-       {0x2, 0x1, 0x1, 0x1},   /* 74Mhz */
-       {0x1, 0x0, 0x0, 0x0}    /* 148Mhz */
-};
-
-static const u8 FBKSEL[3][4] = {
-       {0x1, 0x1, 0x1, 0x1},   /* 27Mhz */
-       {0x1, 0x0, 0x1, 0x1},   /* 74Mhz */
-       {0x1, 0x0, 0x1, 0x1}    /* 148Mhz */
-};
-
-static const u8 FBKDIV[3][4] = {
-       {19, 24, 29, 19},       /* 27Mhz */
-       {19, 24, 14, 19},       /* 74Mhz */
-       {19, 24, 14, 19}        /* 148Mhz */
-};
-
-static const u8 DIVEN[3][4] = {
-       {0x2, 0x1, 0x1, 0x2},   /* 27Mhz */
-       {0x2, 0x2, 0x2, 0x2},   /* 74Mhz */
-       {0x2, 0x2, 0x2, 0x2}    /* 148Mhz */
-};
-
-static const u8 HTPLLBP[3][4] = {
-       {0xc, 0xc, 0x8, 0xc},   /* 27Mhz */
-       {0xc, 0xf, 0xf, 0xc},   /* 74Mhz */
-       {0xc, 0xf, 0xf, 0xc}    /* 148Mhz */
-};
-
-static const u8 HTPLLBC[3][4] = {
-       {0x2, 0x3, 0x3, 0x2},   /* 27Mhz */
-       {0x2, 0x3, 0x3, 0x2},   /* 74Mhz */
-       {0x2, 0x3, 0x3, 0x2}    /* 148Mhz */
-};
-
-static const u8 HTPLLBR[3][4] = {
-       {0x1, 0x1, 0x0, 0x1},   /* 27Mhz */
-       {0x1, 0x2, 0x2, 0x1},   /* 74Mhz */
-       {0x1, 0x2, 0x2, 0x1}    /* 148Mhz */
-};
-
 static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
 {
        struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
 
-       dev_dbg(hdmi_phy->dev, "%s\n", __func__);
-
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
        mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_MHLCK_EN);
@@ -178,8 +128,6 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
 {
        struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
 
-       dev_dbg(hdmi_phy->dev, "%s\n", __func__);
-
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
        mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
        usleep_range(100, 150);
index 8ea0054..049c4bf 100644 (file)
 #define VIU_OSD_FIFO_DEPTH_VAL(val)      ((val & 0x7f) << 12)
 #define VIU_OSD_WORDS_PER_BURST(words)   (((words & 0x4) >> 1) << 22)
 #define VIU_OSD_FIFO_LIMITS(size)        ((size & 0xf) << 24)
+#define VIU_OSD_BURST_LENGTH_24          (0x0 << 31 | 0x0 << 10)
+#define VIU_OSD_BURST_LENGTH_32          (0x0 << 31 | 0x1 << 10)
+#define VIU_OSD_BURST_LENGTH_48          (0x0 << 31 | 0x2 << 10)
+#define VIU_OSD_BURST_LENGTH_64          (0x0 << 31 | 0x3 << 10)
+#define VIU_OSD_BURST_LENGTH_96          (0x1 << 31 | 0x0 << 10)
+#define VIU_OSD_BURST_LENGTH_128         (0x1 << 31 | 0x1 << 10)
 
 #define VD1_IF0_GEN_REG 0x1a50
 #define VD1_IF0_CANVAS0 0x1a51
index 304f8ff..aede0c6 100644 (file)
@@ -411,13 +411,6 @@ void meson_viu_gxm_disable_osd1_afbc(struct meson_drm *priv)
                            priv->io_base + _REG(VIU_MISC_CTRL1));
 }
 
-static inline uint32_t meson_viu_osd_burst_length_reg(uint32_t length)
-{
-       uint32_t val = (((length & 0x80) % 24) / 12);
-
-       return (((val & 0x3) << 10) | (((val & 0x4) >> 2) << 31));
-}
-
 void meson_viu_init(struct meson_drm *priv)
 {
        uint32_t reg;
@@ -444,9 +437,9 @@ void meson_viu_init(struct meson_drm *priv)
                VIU_OSD_FIFO_LIMITS(2);      /* fifo_lim: 2*16=32 */
 
        if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
-               reg |= meson_viu_osd_burst_length_reg(32);
+               reg |= VIU_OSD_BURST_LENGTH_32;
        else
-               reg |= meson_viu_osd_burst_length_reg(64);
+               reg |= VIU_OSD_BURST_LENGTH_64;
 
        writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
        writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT));
index 60f6472..6021f8d 100644 (file)
@@ -408,7 +408,7 @@ a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
        struct msm_gem_address_space *aspace;
 
        aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
-               SZ_16M + 0xfff * SZ_64K);
+               0xfff * SZ_64K);
 
        if (IS_ERR(aspace) && !IS_ERR(mmu))
                mmu->funcs->destroy(mmu);
index 096be97..21e77d6 100644 (file)
@@ -1121,7 +1121,7 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
                return -ENODEV;
 
        mmu = msm_iommu_new(gmu->dev, domain);
-       gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x7fffffff);
+       gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
        if (IS_ERR(gmu->aspace)) {
                iommu_domain_free(domain);
                return PTR_ERR(gmu->aspace);
index a1589e0..7768557 100644 (file)
@@ -893,8 +893,8 @@ static const struct adreno_gpu_funcs funcs = {
 #if defined(CONFIG_DRM_MSM_GPU_STATE)
                .gpu_state_get = a6xx_gpu_state_get,
                .gpu_state_put = a6xx_gpu_state_put,
-               .create_address_space = adreno_iommu_create_address_space,
 #endif
+               .create_address_space = adreno_iommu_create_address_space,
        },
        .get_timestamp = a6xx_get_timestamp,
 };
index 89673c7..5db06b5 100644 (file)
@@ -194,7 +194,7 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
        struct msm_gem_address_space *aspace;
 
        aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
-               0xfffffff);
+               0xffffffff - SZ_16M);
 
        if (IS_ERR(aspace) && !IS_ERR(mmu))
                mmu->funcs->destroy(mmu);
index 63976dc..0946a86 100644 (file)
@@ -521,7 +521,7 @@ static struct msm_display_topology dpu_encoder_get_topology(
                        struct dpu_kms *dpu_kms,
                        struct drm_display_mode *mode)
 {
-       struct msm_display_topology topology;
+       struct msm_display_topology topology = {0};
        int i, intf_count = 0;
 
        for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
@@ -537,7 +537,8 @@ static struct msm_display_topology dpu_encoder_get_topology(
         * 1 LM, 1 INTF
         * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
         *
-        * Adding color blocks only to primary interface
+        * Adding color blocks only to primary interface if available in
+        * sufficient number
         */
        if (intf_count == 2)
                topology.num_lm = 2;
@@ -546,8 +547,11 @@ static struct msm_display_topology dpu_encoder_get_topology(
        else
                topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
 
-       if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI)
-               topology.num_dspp = topology.num_lm;
+       if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI) {
+               if (dpu_kms->catalog->dspp &&
+                       (dpu_kms->catalog->dspp_count >= topology.num_lm))
+                       topology.num_dspp = topology.num_lm;
+       }
 
        topology.num_enc = 0;
        topology.num_intf = intf_count;
@@ -2136,7 +2140,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
 
        dpu_enc = to_dpu_encoder_virt(enc);
 
-       mutex_init(&dpu_enc->enc_lock);
        ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
        if (ret)
                goto fail;
@@ -2151,7 +2154,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
                                0);
 
 
-       mutex_init(&dpu_enc->rc_lock);
        INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
                        dpu_encoder_off_work);
        dpu_enc->idle_timeout = IDLE_TIMEOUT;
@@ -2183,7 +2185,7 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
 
        dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
        if (!dpu_enc)
-               return ERR_PTR(ENOMEM);
+               return ERR_PTR(-ENOMEM);
 
        rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
                        drm_enc_mode, NULL);
@@ -2196,6 +2198,8 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
 
        spin_lock_init(&dpu_enc->enc_spinlock);
        dpu_enc->enabled = false;
+       mutex_init(&dpu_enc->enc_lock);
+       mutex_init(&dpu_enc->rc_lock);
 
        return &dpu_enc->base;
 }
index b8615d4..680527e 100644 (file)
@@ -780,7 +780,7 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
 
        mmu = msm_iommu_new(dpu_kms->dev->dev, domain);
        aspace = msm_gem_address_space_create(mmu, "dpu1",
-               0x1000, 0xfffffff);
+               0x1000, 0x100000000 - 0x1000);
 
        if (IS_ERR(aspace)) {
                mmu->funcs->destroy(mmu);
index 0889718..fc6a3f8 100644 (file)
@@ -514,7 +514,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
                        config->iommu);
 
                aspace  = msm_gem_address_space_create(mmu,
-                       "mdp4", 0x1000, 0xffffffff);
+                       "mdp4", 0x1000, 0x100000000 - 0x1000);
 
                if (IS_ERR(aspace)) {
                        if (!IS_ERR(mmu))
index 19ec486..e193865 100644 (file)
@@ -633,7 +633,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
                mmu = msm_iommu_new(iommu_dev, config->platform.iommu);
 
                aspace = msm_gem_address_space_create(mmu, "mdp5",
-                       0x1000, 0xffffffff);
+                       0x1000, 0x100000000 - 0x1000);
 
                if (IS_ERR(aspace)) {
                        if (!IS_ERR(mmu))
index 001fbf5..a1d94be 100644 (file)
@@ -71,8 +71,10 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
        queue->flags = flags;
 
        if (priv->gpu) {
-               if (prio >= priv->gpu->nr_rings)
+               if (prio >= priv->gpu->nr_rings) {
+                       kfree(queue);
                        return -EINVAL;
+               }
 
                queue->prio = prio;
        }
index d472942..519f998 100644 (file)
@@ -601,6 +601,9 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
                                (0x0100 << nv_crtc->index),
        };
 
+       if (!nv_encoder->audio)
+               return;
+
        nv_encoder->audio = false;
        nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
 
index e5c230d..cc99938 100644 (file)
@@ -550,7 +550,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
                                         DMA_BIDIRECTIONAL);
                if (dma_mapping_error(dev, *dma_addr))
                        goto out_free_page;
-               if (drm->dmem->migrate.copy_func(drm, page_size(spage),
+               if (drm->dmem->migrate.copy_func(drm, 1,
                        NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
                        goto out_dma_unmap;
        } else {
index ba9f935..6586d9d 100644 (file)
@@ -562,6 +562,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
                .end = notifier->notifier.interval_tree.last + 1,
                .pfn_flags_mask = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE,
                .hmm_pfns = hmm_pfns,
+               .dev_private_owner = drm->dev,
        };
        struct mm_struct *mm = notifier->notifier.mm;
        int ret;
index c8ab1b5..db7769c 100644 (file)
@@ -118,10 +118,10 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
                if (retries)
                        udelay(400);
 
-               /* transaction request, wait up to 1ms for it to complete */
+               /* transaction request, wait up to 2ms for it to complete */
                nvkm_wr32(device, 0x00e4e4 + base, 0x00010000 | ctrl);
 
-               timeout = 1000;
+               timeout = 2000;
                do {
                        ctrl = nvkm_rd32(device, 0x00e4e4 + base);
                        udelay(1);
index 7ef6089..edb6148 100644 (file)
@@ -118,10 +118,10 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
                if (retries)
                        udelay(400);
 
-               /* transaction request, wait up to 1ms for it to complete */
+               /* transaction request, wait up to 2ms for it to complete */
                nvkm_wr32(device, 0x00d954 + base, 0x00010000 | ctrl);
 
-               timeout = 1000;
+               timeout = 2000;
                do {
                        ctrl = nvkm_rd32(device, 0x00d954 + base);
                        udelay(1);
index b6ecd15..5178f87 100644 (file)
@@ -2495,6 +2495,7 @@ static const struct panel_desc logicpd_type_28 = {
        .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
        .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
                     DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE,
+       .connector_type = DRM_MODE_CONNECTOR_DPI,
 };
 
 static const struct panel_desc mitsubishi_aa070mc01 = {
@@ -2663,6 +2664,7 @@ static const struct panel_desc newhaven_nhd_43_480272ef_atxl = {
        .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
        .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
                     DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
+       .connector_type = DRM_MODE_CONNECTOR_DPI,
 };
 
 static const struct display_timing nlt_nl192108ac18_02d_timing = {
index 134aa2b..f434efd 100644 (file)
@@ -5563,6 +5563,7 @@ static int ci_parse_power_table(struct radeon_device *rdev)
        if (!rdev->pm.dpm.ps)
                return -ENOMEM;
        power_state_offset = (u8 *)state_array->states;
+       rdev->pm.dpm.num_ps = 0;
        for (i = 0; i < state_array->ucNumEntries; i++) {
                u8 *idx;
                power_state = (union pplib_power_state *)power_state_offset;
@@ -5572,10 +5573,8 @@ static int ci_parse_power_table(struct radeon_device *rdev)
                if (!rdev->pm.power_state[i].clock_info)
                        return -EINVAL;
                ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
-               if (ps == NULL) {
-                       kfree(rdev->pm.dpm.ps);
+               if (ps == NULL)
                        return -ENOMEM;
-               }
                rdev->pm.dpm.ps[i].ps_priv = ps;
                ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
                                              non_clock_info,
@@ -5597,8 +5596,8 @@ static int ci_parse_power_table(struct radeon_device *rdev)
                        k++;
                }
                power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+               rdev->pm.dpm.num_ps = i + 1;
        }
-       rdev->pm.dpm.num_ps = state_array->ucNumEntries;
 
        /* fill in the vce power states */
        for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
index b57c37d..c7fbb79 100644 (file)
@@ -2127,7 +2127,7 @@ static int ni_init_smc_spll_table(struct radeon_device *rdev)
                if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
                        ret = -EINVAL;
 
-               if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
+               if (fb_div & ~(SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
                        ret = -EINVAL;
 
                if (clk_v & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
index 0919f1f..f65d148 100644 (file)
@@ -31,6 +31,7 @@ config DRM_RCAR_DW_HDMI
 config DRM_RCAR_LVDS
        tristate "R-Car DU LVDS Encoder Support"
        depends on DRM && DRM_BRIDGE && OF
+       select DRM_KMS_HELPER
        select DRM_PANEL
        select OF_FLATTREE
        select OF_OVERLAY
index ce07ddc..557cbe5 100644 (file)
@@ -259,9 +259,8 @@ sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force)
        struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
        unsigned long reg;
 
-       if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_HPD_REG, reg,
-                              reg & SUN4I_HDMI_HPD_HIGH,
-                              0, 500000)) {
+       reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG);
+       if (reg & SUN4I_HDMI_HPD_HIGH) {
                cec_phys_addr_invalidate(hdmi->cec_adap);
                return connector_status_disconnected;
        }
index 56cc037..cc4fb91 100644 (file)
@@ -363,6 +363,19 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
        mixer->engine.ops = &sun8i_engine_ops;
        mixer->engine.node = dev->of_node;
 
+       if (of_find_property(dev->of_node, "iommus", NULL)) {
+               /*
+                * This assume we have the same DMA constraints for
+                * all our the mixers in our pipeline. This sounds
+                * bad, but it has always been the case for us, and
+                * DRM doesn't do per-device allocation either, so we
+                * would need to fix DRM first...
+                */
+               ret = of_dma_configure(drm->dev, dev->of_node, true);
+               if (ret)
+                       return ret;
+       }
+
        /*
         * While this function can fail, we shouldn't do anything
         * if this happens. Some early DE2 DT entries don't provide
index 83f31c6..04d6848 100644 (file)
@@ -957,6 +957,7 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
        }
 
        drm_plane_helper_add(&plane->base, &tegra_cursor_plane_helper_funcs);
+       drm_plane_create_zpos_immutable_property(&plane->base, 255);
 
        return &plane->base;
 }
index 8183e61..22a03f7 100644 (file)
@@ -149,7 +149,9 @@ int tegra_display_hub_prepare(struct tegra_display_hub *hub)
        for (i = 0; i < hub->soc->num_wgrps; i++) {
                struct tegra_windowgroup *wgrp = &hub->wgrps[i];
 
-               tegra_windowgroup_enable(wgrp);
+               /* Skip orphaned window group whose parent DC is disabled */
+               if (wgrp->parent)
+                       tegra_windowgroup_enable(wgrp);
        }
 
        return 0;
@@ -166,7 +168,9 @@ void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
        for (i = 0; i < hub->soc->num_wgrps; i++) {
                struct tegra_windowgroup *wgrp = &hub->wgrps[i];
 
-               tegra_windowgroup_disable(wgrp);
+               /* Skip orphaned window group whose parent DC is disabled */
+               if (wgrp->parent)
+                       tegra_windowgroup_disable(wgrp);
        }
 }
 
@@ -944,6 +948,15 @@ static int tegra_display_hub_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "failed to register host1x client: %d\n",
                        err);
 
+       err = devm_of_platform_populate(&pdev->dev);
+       if (err < 0)
+               goto unregister;
+
+       return err;
+
+unregister:
+       host1x_client_unregister(&hub->client);
+       pm_runtime_disable(&pdev->dev);
        return err;
 }
 
index f73b81c..0f20e14 100644 (file)
@@ -883,8 +883,10 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
        if (!fence)
                return 0;
 
-       if (no_wait_gpu)
+       if (no_wait_gpu) {
+               dma_fence_put(fence);
                return -EBUSY;
+       }
 
        dma_resv_add_shared_fence(bo->base.resv, fence);
 
index a43aa72..fa03fab 100644 (file)
@@ -300,8 +300,10 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
                        break;
                case -EBUSY:
                case -ERESTARTSYS:
+                       dma_fence_put(moving);
                        return VM_FAULT_NOPAGE;
                default:
+                       dma_fence_put(moving);
                        return VM_FAULT_SIGBUS;
                }
 
index 6a995db..e201f62 100644 (file)
@@ -686,8 +686,17 @@ EXPORT_SYMBOL(host1x_driver_register_full);
  */
 void host1x_driver_unregister(struct host1x_driver *driver)
 {
+       struct host1x *host1x;
+
        driver_unregister(&driver->driver);
 
+       mutex_lock(&devices_lock);
+
+       list_for_each_entry(host1x, &devices, list)
+               host1x_detach_driver(host1x, driver);
+
+       mutex_unlock(&devices_lock);
+
        mutex_lock(&drivers_lock);
        list_del_init(&driver->list);
        mutex_unlock(&drivers_lock);
index d24344e..d0ebb70 100644 (file)
@@ -468,11 +468,18 @@ static int host1x_probe(struct platform_device *pdev)
 
        err = host1x_register(host);
        if (err < 0)
-               goto deinit_intr;
+               goto deinit_debugfs;
+
+       err = devm_of_platform_populate(&pdev->dev);
+       if (err < 0)
+               goto unregister;
 
        return 0;
 
-deinit_intr:
+unregister:
+       host1x_unregister(host);
+deinit_debugfs:
+       host1x_debug_deinit(host);
        host1x_intr_deinit(host);
 deinit_syncpt:
        host1x_syncpt_deinit(host);
index 9147ee9..d69f4ef 100644 (file)
@@ -1368,7 +1368,7 @@ static void hv_kmsg_dump(struct kmsg_dumper *dumper,
         * Write dump contents to the page. No need to synchronize; panic should
         * be single-threaded.
         */
-       kmsg_dump_get_buffer(dumper, true, hv_panic_page, HV_HYP_PAGE_SIZE,
+       kmsg_dump_get_buffer(dumper, false, hv_panic_page, HV_HYP_PAGE_SIZE,
                             &bytes_written);
        if (bytes_written)
                hyperv_report_panic_msg(panic_pa, bytes_written);
index 0db8ef4..a270b97 100644 (file)
@@ -883,7 +883,7 @@ static int acpi_power_meter_add(struct acpi_device *device)
 
        res = setup_attrs(resource);
        if (res)
-               goto exit_free;
+               goto exit_free_capability;
 
        resource->hwmon_dev = hwmon_device_register(&device->dev);
        if (IS_ERR(resource->hwmon_dev)) {
@@ -896,6 +896,8 @@ static int acpi_power_meter_add(struct acpi_device *device)
 
 exit_remove:
        remove_attrs(resource);
+exit_free_capability:
+       free_capabilities(resource);
 exit_free:
        kfree(resource);
 exit:
index 1a9772f..94698ca 100644 (file)
@@ -64,7 +64,7 @@ static const struct pvt_sensor_info pvt_info[] = {
  *     48380,
  * where T = [-48380, 147438] mC and N = [0, 1023].
  */
-static const struct pvt_poly poly_temp_to_N = {
+static const struct pvt_poly __maybe_unused poly_temp_to_N = {
        .total_divider = 10000,
        .terms = {
                {4, 18322, 10000, 10000},
@@ -96,7 +96,7 @@ static const struct pvt_poly poly_N_to_temp = {
  * N = (18658e-3*V - 11572) / 10,
  * V = N * 10^5 / 18658 + 11572 * 10^4 / 18658.
  */
-static const struct pvt_poly poly_volt_to_N = {
+static const struct pvt_poly __maybe_unused poly_volt_to_N = {
        .total_divider = 10,
        .terms = {
                {1, 18658, 1000, 1},
@@ -300,12 +300,12 @@ static irqreturn_t pvt_soft_isr(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
+static inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
 {
        return 0644;
 }
 
-inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
+static inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
 {
        return 0444;
 }
@@ -462,12 +462,12 @@ static irqreturn_t pvt_hard_isr(int irq, void *data)
 
 #define pvt_soft_isr NULL
 
-inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
+static inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
 {
        return 0;
 }
 
-inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
+static inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
 {
        return 0;
 }
index 743752a..64122eb 100644 (file)
@@ -38,8 +38,9 @@ static const u8 MAX6697_REG_CRIT[] = {
  * Map device tree / platform data register bit map to chip bit map.
  * Applies to alert register and over-temperature register.
  */
-#define MAX6697_MAP_BITS(reg)  ((((reg) & 0x7e) >> 1) | \
+#define MAX6697_ALERT_MAP_BITS(reg)    ((((reg) & 0x7e) >> 1) | \
                                 (((reg) & 0x01) << 6) | ((reg) & 0x80))
+#define MAX6697_OVERT_MAP_BITS(reg) (((reg) >> 1) | (((reg) & 0x01) << 7))
 
 #define MAX6697_REG_STAT(n)            (0x44 + (n))
 
@@ -562,12 +563,12 @@ static int max6697_init_chip(struct max6697_data *data,
                return ret;
 
        ret = i2c_smbus_write_byte_data(client, MAX6697_REG_ALERT_MASK,
-                                       MAX6697_MAP_BITS(pdata->alert_mask));
+                               MAX6697_ALERT_MAP_BITS(pdata->alert_mask));
        if (ret < 0)
                return ret;
 
        ret = i2c_smbus_write_byte_data(client, MAX6697_REG_OVERT_MASK,
-                               MAX6697_MAP_BITS(pdata->over_temperature_mask));
+                       MAX6697_OVERT_MAP_BITS(pdata->over_temperature_mask));
        if (ret < 0)
                return ret;
 
index a337195..ea516ce 100644 (file)
@@ -71,7 +71,7 @@ config SENSORS_IR35221
          Infineon IR35221 controller.
 
          This driver can also be built as a module. If so, the module will
-         be called ir35521.
+         be called ir35221.
 
 config SENSORS_IR38064
        tristate "Infineon IR38064"
index a420877..2191575 100644 (file)
@@ -1869,7 +1869,7 @@ static int pmbus_add_fan_ctrl(struct i2c_client *client,
        struct pmbus_sensor *sensor;
 
        sensor = pmbus_add_sensor(data, "fan", "target", index, page,
-                                 PMBUS_VIRT_FAN_TARGET_1 + id, 0xff, PSC_FAN,
+                                 0xff, PMBUS_VIRT_FAN_TARGET_1 + id, PSC_FAN,
                                  false, false, true);
 
        if (!sensor)
@@ -1880,14 +1880,14 @@ static int pmbus_add_fan_ctrl(struct i2c_client *client,
                return 0;
 
        sensor = pmbus_add_sensor(data, "pwm", NULL, index, page,
-                                 PMBUS_VIRT_PWM_1 + id, 0xff, PSC_PWM,
+                                 0xff, PMBUS_VIRT_PWM_1 + id, PSC_PWM,
                                  false, false, true);
 
        if (!sensor)
                return -ENOMEM;
 
        sensor = pmbus_add_sensor(data, "pwm", "enable", index, page,
-                                 PMBUS_VIRT_PWM_ENABLE_1 + id, 0xff, PSC_PWM,
+                                 0xff, PMBUS_VIRT_PWM_ENABLE_1 + id, PSC_PWM,
                                  true, false, false);
 
        if (!sensor)
@@ -1929,7 +1929,7 @@ static int pmbus_add_fan_attributes(struct i2c_client *client,
                                continue;
 
                        if (pmbus_add_sensor(data, "fan", "input", index,
-                                            page, pmbus_fan_registers[f], 0xff,
+                                            page, 0xff, pmbus_fan_registers[f],
                                             PSC_FAN, true, true, true) == NULL)
                                return -ENOMEM;
 
index 4f932a4..603b4a9 100644 (file)
@@ -34,7 +34,7 @@ struct stp_policy_node {
        unsigned int            first_channel;
        unsigned int            last_channel;
        /* this is the one that's exposed to the attributes */
-       unsigned char           priv[0];
+       unsigned char           priv[];
 };
 
 void *stp_policy_node_priv(struct stp_policy_node *pn)
index 3569439..a9be49f 100644 (file)
@@ -23,7 +23,7 @@ void *stp_policy_node_priv(struct stp_policy_node *pn);
 
 struct stp_master {
        unsigned int    nr_free;
-       unsigned long   chan_map[0];
+       unsigned long   chan_map[];
 };
 
 struct stm_device {
@@ -42,7 +42,7 @@ struct stm_device {
        const struct config_item_type           *pdrv_node_type;
        /* master allocation */
        spinlock_t              mc_lock;
-       struct stp_master       *masters[0];
+       struct stp_master       *masters[];
 };
 
 #define to_stm_device(_d)                              \
index ef39c83..bae1dc0 100644 (file)
@@ -113,11 +113,18 @@ config I2C_STUB
 
 config I2C_SLAVE
        bool "I2C slave support"
+       help
+         This enables Linux to act as an I2C slave device. Note that your I2C
+         bus master driver also needs to support this functionality. Please
+         read Documentation/i2c/slave-interface.rst for further details.
 
 if I2C_SLAVE
 
 config I2C_SLAVE_EEPROM
        tristate "I2C eeprom slave driver"
+       help
+         This backend makes Linux behave like an I2C EEPROM. Please read
+         Documentation/i2c/slave-eeprom-backend.rst for further details.
 
 endif
 
index 7f10312..3889787 100644 (file)
@@ -314,7 +314,8 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
                        DEB2("BUS ERROR - SDA Stuck low\n");
                        pca_reset(adap);
                        goto out;
-               case 0x90: /* Bus error - SCL stuck low */
+               case 0x78: /* Bus error - SCL stuck low (PCA9665) */
+               case 0x90: /* Bus error - SCL stuck low (PCA9564) */
                        DEB2("BUS ERROR - SCL Stuck low\n");
                        pca_reset(adap);
                        goto out;
index e3a8640..3c19aad 100644 (file)
@@ -286,10 +286,8 @@ int i2c_dw_acpi_configure(struct device *device)
 }
 EXPORT_SYMBOL_GPL(i2c_dw_acpi_configure);
 
-void i2c_dw_acpi_adjust_bus_speed(struct device *device)
+static u32 i2c_dw_acpi_round_bus_speed(struct device *device)
 {
-       struct dw_i2c_dev *dev = dev_get_drvdata(device);
-       struct i2c_timings *t = &dev->timings;
        u32 acpi_speed;
        int i;
 
@@ -300,9 +298,22 @@ void i2c_dw_acpi_adjust_bus_speed(struct device *device)
         */
        for (i = 0; i < ARRAY_SIZE(supported_speeds); i++) {
                if (acpi_speed >= supported_speeds[i])
-                       break;
+                       return supported_speeds[i];
        }
-       acpi_speed = i < ARRAY_SIZE(supported_speeds) ? supported_speeds[i] : 0;
+
+       return 0;
+}
+
+#else  /* CONFIG_ACPI */
+
+static inline u32 i2c_dw_acpi_round_bus_speed(struct device *device) { return 0; }
+
+#endif /* CONFIG_ACPI */
+
+void i2c_dw_adjust_bus_speed(struct dw_i2c_dev *dev)
+{
+       u32 acpi_speed = i2c_dw_acpi_round_bus_speed(dev->dev);
+       struct i2c_timings *t = &dev->timings;
 
        /*
         * Find bus speed from the "clock-frequency" device property, ACPI
@@ -315,9 +326,7 @@ void i2c_dw_acpi_adjust_bus_speed(struct device *device)
        else
                t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
 }
-EXPORT_SYMBOL_GPL(i2c_dw_acpi_adjust_bus_speed);
-
-#endif /* CONFIG_ACPI */
+EXPORT_SYMBOL_GPL(i2c_dw_adjust_bus_speed);
 
 u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
 {
index 556673a..eb5ef4d 100644 (file)
@@ -361,11 +361,10 @@ static inline int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev) { return 0;
 #endif
 
 int i2c_dw_validate_speed(struct dw_i2c_dev *dev);
+void i2c_dw_adjust_bus_speed(struct dw_i2c_dev *dev);
 
 #if IS_ENABLED(CONFIG_ACPI)
 int i2c_dw_acpi_configure(struct device *device);
-void i2c_dw_acpi_adjust_bus_speed(struct device *device);
 #else
 static inline int i2c_dw_acpi_configure(struct device *device) { return -ENODEV; }
-static inline void i2c_dw_acpi_adjust_bus_speed(struct device *device) {}
 #endif
index 947c096..8522134 100644 (file)
@@ -240,7 +240,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
                }
        }
 
-       i2c_dw_acpi_adjust_bus_speed(&pdev->dev);
+       i2c_dw_adjust_bus_speed(dev);
 
        if (has_acpi_companion(&pdev->dev))
                i2c_dw_acpi_configure(&pdev->dev);
index 0de4e30..a71bc58 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/clk-provider.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
+#include <linux/dmi.h>
 #include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/i2c.h>
@@ -191,6 +192,17 @@ static int dw_i2c_plat_request_regs(struct dw_i2c_dev *dev)
        return ret;
 }
 
+static const struct dmi_system_id dw_i2c_hwmon_class_dmi[] = {
+       {
+               .ident = "Qtechnology QT5222",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Qtechnology"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "QT5222"),
+               },
+       },
+       { } /* terminate list */
+};
+
 static int dw_i2c_plat_probe(struct platform_device *pdev)
 {
        struct dw_i2c_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -228,7 +240,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
        else
                i2c_parse_fw_timings(&pdev->dev, t, false);
 
-       i2c_dw_acpi_adjust_bus_speed(&pdev->dev);
+       i2c_dw_adjust_bus_speed(dev);
 
        if (pdev->dev.of_node)
                dw_i2c_of_configure(pdev);
@@ -267,7 +279,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
 
        adap = &dev->adapter;
        adap->owner = THIS_MODULE;
-       adap->class = I2C_CLASS_DEPRECATED;
+       adap->class = dmi_check_system(dw_i2c_hwmon_class_dmi) ?
+                                       I2C_CLASS_HWMON : I2C_CLASS_DEPRECATED;
        ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
        adap->dev.of_node = pdev->dev.of_node;
        adap->nr = -1;
index bb810de..73f1396 100644 (file)
@@ -180,6 +180,7 @@ static const struct pci_device_id pch_pcidev_id[] = {
        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_I2C), 1, },
        {0,}
 };
+MODULE_DEVICE_TABLE(pci, pch_pcidev_id);
 
 static irqreturn_t pch_i2c_handler(int irq, void *pData);
 
index e0c2569..977d6f5 100644 (file)
@@ -98,7 +98,7 @@
 #define I2C_STAT_DAT_REQ       BIT(25)
 #define I2C_STAT_CMD_COMP      BIT(24)
 #define I2C_STAT_STOP_ERR      BIT(23)
-#define I2C_STAT_MAX_PORT      GENMASK(19, 16)
+#define I2C_STAT_MAX_PORT      GENMASK(22, 16)
 #define I2C_STAT_ANY_INT       BIT(15)
 #define I2C_STAT_SCL_IN                BIT(11)
 #define I2C_STAT_SDA_IN                BIT(10)
index 2fd717d..71d7bae 100644 (file)
@@ -337,9 +337,9 @@ static int mlxcpld_i2c_wait_for_tc(struct mlxcpld_i2c_priv *priv)
                if (priv->smbus_block && (val & MLXCPLD_I2C_SMBUS_BLK_BIT)) {
                        mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_NUM_DAT_REG,
                                              &datalen, 1);
-                       if (unlikely(datalen > (I2C_SMBUS_BLOCK_MAX + 1))) {
+                       if (unlikely(datalen > I2C_SMBUS_BLOCK_MAX)) {
                                dev_err(priv->dev, "Incorrect smbus block read message len\n");
-                               return -E2BIG;
+                               return -EPROTO;
                        }
                } else {
                        datalen = priv->xfer.data_len;
index d1f278f..26f03a1 100644 (file)
@@ -816,31 +816,6 @@ out_err_silent:
 EXPORT_SYMBOL_GPL(i2c_new_client_device);
 
 /**
- * i2c_new_device - instantiate an i2c device
- * @adap: the adapter managing the device
- * @info: describes one I2C device; bus_num is ignored
- * Context: can sleep
- *
- * This deprecated function has the same functionality as
- * @i2c_new_client_device, it just returns NULL instead of an ERR_PTR in case of
- * an error for compatibility with current I2C API. It will be removed once all
- * users are converted.
- *
- * This returns the new i2c client, which may be saved for later use with
- * i2c_unregister_device(); or NULL to indicate an error.
- */
-struct i2c_client *
-i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
-{
-       struct i2c_client *ret;
-
-       ret = i2c_new_client_device(adap, info);
-       return IS_ERR(ret) ? NULL : ret;
-}
-EXPORT_SYMBOL_GPL(i2c_new_device);
-
-
-/**
  * i2c_unregister_device - reverse effect of i2c_new_*_device()
  * @client: value returned from i2c_new_*_device()
  * Context: can sleep
index b34d2ff..f5c9787 100644 (file)
@@ -4,7 +4,7 @@
  *
  * This file contains the SMBus functions which are always included in the I2C
  * core because they can be emulated via I2C. SMBus specific extensions
- * (e.g. smbalert) are handled in a seperate i2c-smbus module.
+ * (e.g. smbalert) are handled in a separate i2c-smbus module.
  *
  * All SMBus-related things are written by Frodo Looijaard <frodol@dds.nl>
  * SMBus 2.0 support by Mark Studebaker <mdsxyz123@yahoo.com> and
@@ -495,6 +495,13 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
                        break;
                case I2C_SMBUS_BLOCK_DATA:
                case I2C_SMBUS_BLOCK_PROC_CALL:
+                       if (msg[1].buf[0] > I2C_SMBUS_BLOCK_MAX) {
+                               dev_err(&adapter->dev,
+                                       "Invalid block size returned: %d\n",
+                                       msg[1].buf[0]);
+                               status = -EPROTO;
+                               goto cleanup;
+                       }
                        for (i = 0; i < msg[1].buf[0] + 1; i++)
                                data->block[i] = msg[1].buf[i];
                        break;
index 9ce787e..0d13772 100644 (file)
@@ -918,6 +918,7 @@ static void cm_free_work(struct cm_work *work)
 
 static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv,
                                 struct cm_work *work)
+       __releases(&cm_id_priv->lock)
 {
        bool immediate;
 
index 3d7cc9f..c30cf53 100644 (file)
@@ -1624,6 +1624,8 @@ static struct rdma_id_private *cma_find_listener(
 {
        struct rdma_id_private *id_priv, *id_priv_dev;
 
+       lockdep_assert_held(&lock);
+
        if (!bind_list)
                return ERR_PTR(-EINVAL);
 
@@ -1670,6 +1672,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
                }
        }
 
+       mutex_lock(&lock);
        /*
         * Net namespace might be getting deleted while route lookup,
         * cm_id lookup is in progress. Therefore, perform netdevice
@@ -1711,6 +1714,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
        id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev);
 err:
        rcu_read_unlock();
+       mutex_unlock(&lock);
        if (IS_ERR(id_priv) && *net_dev) {
                dev_put(*net_dev);
                *net_dev = NULL;
@@ -2492,6 +2496,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
        struct net *net = id_priv->id.route.addr.dev_addr.net;
        int ret;
 
+       lockdep_assert_held(&lock);
+
        if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
                return;
 
@@ -3342,6 +3348,8 @@ static void cma_bind_port(struct rdma_bind_list *bind_list,
        u64 sid, mask;
        __be16 port;
 
+       lockdep_assert_held(&lock);
+
        addr = cma_src_addr(id_priv);
        port = htons(bind_list->port);
 
@@ -3370,6 +3378,8 @@ static int cma_alloc_port(enum rdma_ucm_port_space ps,
        struct rdma_bind_list *bind_list;
        int ret;
 
+       lockdep_assert_held(&lock);
+
        bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
        if (!bind_list)
                return -ENOMEM;
@@ -3396,6 +3406,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
        struct sockaddr  *saddr = cma_src_addr(id_priv);
        __be16 dport = cma_port(daddr);
 
+       lockdep_assert_held(&lock);
+
        hlist_for_each_entry(cur_id, &bind_list->owners, node) {
                struct sockaddr  *cur_daddr = cma_dst_addr(cur_id);
                struct sockaddr  *cur_saddr = cma_src_addr(cur_id);
@@ -3435,6 +3447,8 @@ static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
        unsigned int rover;
        struct net *net = id_priv->id.route.addr.dev_addr.net;
 
+       lockdep_assert_held(&lock);
+
        inet_get_local_port_range(net, &low, &high);
        remaining = (high - low) + 1;
        rover = prandom_u32() % remaining + low;
@@ -3482,6 +3496,8 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
        struct rdma_id_private *cur_id;
        struct sockaddr *addr, *cur_addr;
 
+       lockdep_assert_held(&lock);
+
        addr = cma_src_addr(id_priv);
        hlist_for_each_entry(cur_id, &bind_list->owners, node) {
                if (id_priv == cur_id)
@@ -3512,6 +3528,8 @@ static int cma_use_port(enum rdma_ucm_port_space ps,
        unsigned short snum;
        int ret;
 
+       lockdep_assert_held(&lock);
+
        snum = ntohs(cma_port(cma_src_addr(id_priv)));
        if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
                return -EACCES;
index 2257d7f..738d1fa 100644 (file)
@@ -202,7 +202,7 @@ static int __rdma_counter_unbind_qp(struct ib_qp *qp)
        return ret;
 }
 
-static void counter_history_stat_update(const struct rdma_counter *counter)
+static void counter_history_stat_update(struct rdma_counter *counter)
 {
        struct ib_device *dev = counter->device;
        struct rdma_port_counter *port_counter;
@@ -212,6 +212,8 @@ static void counter_history_stat_update(const struct rdma_counter *counter)
        if (!port_counter->hstats)
                return;
 
+       rdma_counter_query_stats(counter);
+
        for (i = 0; i < counter->stats->num_counters; i++)
                port_counter->hstats->value[i] += counter->stats->value[i];
 }
index 186e0d6..a09f8e3 100644 (file)
@@ -509,10 +509,10 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
        xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
 
        flush_workqueue(port_priv->wq);
-       ib_cancel_rmpp_recvs(mad_agent_priv);
 
        deref_mad_agent(mad_agent_priv);
        wait_for_completion(&mad_agent_priv->comp);
+       ib_cancel_rmpp_recvs(mad_agent_priv);
 
        ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
 
@@ -2718,6 +2718,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
                                                 DMA_FROM_DEVICE);
                if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
                                                  sg_list.addr))) {
+                       kfree(mad_priv);
                        ret = -ENOMEM;
                        break;
                }
index 38de494..3027cd2 100644 (file)
@@ -470,40 +470,46 @@ static struct ib_uobject *
 alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
                       struct uverbs_attr_bundle *attrs)
 {
-       const struct uverbs_obj_fd_type *fd_type =
-               container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
+       const struct uverbs_obj_fd_type *fd_type;
        int new_fd;
-       struct ib_uobject *uobj;
+       struct ib_uobject *uobj, *ret;
        struct file *filp;
 
+       uobj = alloc_uobj(attrs, obj);
+       if (IS_ERR(uobj))
+               return uobj;
+
+       fd_type =
+               container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
        if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release &&
-                   fd_type->fops->release != &uverbs_async_event_release))
-               return ERR_PTR(-EINVAL);
+                   fd_type->fops->release != &uverbs_async_event_release)) {
+               ret = ERR_PTR(-EINVAL);
+               goto err_fd;
+       }
 
        new_fd = get_unused_fd_flags(O_CLOEXEC);
-       if (new_fd < 0)
-               return ERR_PTR(new_fd);
-
-       uobj = alloc_uobj(attrs, obj);
-       if (IS_ERR(uobj))
+       if (new_fd < 0) {
+               ret = ERR_PTR(new_fd);
                goto err_fd;
+       }
 
        /* Note that uverbs_uobject_fd_release() is called during abort */
        filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL,
                                  fd_type->flags);
        if (IS_ERR(filp)) {
-               uverbs_uobject_put(uobj);
-               uobj = ERR_CAST(filp);
-               goto err_fd;
+               ret = ERR_CAST(filp);
+               goto err_getfile;
        }
        uobj->object = filp;
 
        uobj->id = new_fd;
        return uobj;
 
-err_fd:
+err_getfile:
        put_unused_fd(new_fd);
-       return uobj;
+err_fd:
+       uverbs_uobject_put(uobj);
+       return ret;
 }
 
 struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
index a2ed09a..8c930bf 100644 (file)
@@ -829,13 +829,20 @@ static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
        return len;
 }
 
-static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
+static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
 {
        struct sk_buff *skb = NULL;
        struct nlmsghdr *nlh;
        void *data;
        struct ib_sa_mad *mad;
        int len;
+       unsigned long flags;
+       unsigned long delay;
+       gfp_t gfp_flag;
+       int ret;
+
+       INIT_LIST_HEAD(&query->list);
+       query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
 
        mad = query->mad_buf->mad;
        len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
@@ -860,36 +867,25 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
        /* Repair the nlmsg header length */
        nlmsg_end(skb, nlh);
 
-       return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask);
-}
+       gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC :
+               GFP_NOWAIT;
 
-static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
-{
-       unsigned long flags;
-       unsigned long delay;
-       int ret;
+       spin_lock_irqsave(&ib_nl_request_lock, flags);
+       ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag);
 
-       INIT_LIST_HEAD(&query->list);
-       query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
+       if (ret)
+               goto out;
 
-       /* Put the request on the list first.*/
-       spin_lock_irqsave(&ib_nl_request_lock, flags);
+       /* Put the request on the list.*/
        delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
        query->timeout = delay + jiffies;
        list_add_tail(&query->list, &ib_nl_request_list);
        /* Start the timeout if this is the only request */
        if (ib_nl_request_list.next == &query->list)
                queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
-       spin_unlock_irqrestore(&ib_nl_request_lock, flags);
 
-       ret = ib_nl_send_msg(query, gfp_mask);
-       if (ret) {
-               ret = -EIO;
-               /* Remove the request */
-               spin_lock_irqsave(&ib_nl_request_lock, flags);
-               list_del(&query->list);
-               spin_unlock_irqrestore(&ib_nl_request_lock, flags);
-       }
+out:
+       spin_unlock_irqrestore(&ib_nl_request_lock, flags);
 
        return ret;
 }
index 08313f7..7dd0824 100644 (file)
@@ -212,6 +212,7 @@ int efa_query_device(struct ib_device *ibdev,
        props->max_send_sge = dev_attr->max_sq_sge;
        props->max_recv_sge = dev_attr->max_rq_sge;
        props->max_sge_rd = dev_attr->max_wr_rdma_sge;
+       props->max_pkeys = 1;
 
        if (udata && udata->outlen) {
                resp.max_sq_sge = dev_attr->max_sq_sge;
index 4633a0c..2ced236 100644 (file)
@@ -985,15 +985,10 @@ static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf,
 static int __i2c_debugfs_open(struct inode *in, struct file *fp, u32 target)
 {
        struct hfi1_pportdata *ppd;
-       int ret;
 
        ppd = private2ppd(fp);
 
-       ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
-       if (ret) /* failed - release the module */
-               module_put(THIS_MODULE);
-
-       return ret;
+       return acquire_chip_resource(ppd->dd, i2c_target(target), 0);
 }
 
 static int i2c1_debugfs_open(struct inode *in, struct file *fp)
@@ -1013,7 +1008,6 @@ static int __i2c_debugfs_release(struct inode *in, struct file *fp, u32 target)
        ppd = private2ppd(fp);
 
        release_chip_resource(ppd->dd, i2c_target(target));
-       module_put(THIS_MODULE);
 
        return 0;
 }
@@ -1031,18 +1025,10 @@ static int i2c2_debugfs_release(struct inode *in, struct file *fp)
 static int __qsfp_debugfs_open(struct inode *in, struct file *fp, u32 target)
 {
        struct hfi1_pportdata *ppd;
-       int ret;
-
-       if (!try_module_get(THIS_MODULE))
-               return -ENODEV;
 
        ppd = private2ppd(fp);
 
-       ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
-       if (ret) /* failed - release the module */
-               module_put(THIS_MODULE);
-
-       return ret;
+       return acquire_chip_resource(ppd->dd, i2c_target(target), 0);
 }
 
 static int qsfp1_debugfs_open(struct inode *in, struct file *fp)
@@ -1062,7 +1048,6 @@ static int __qsfp_debugfs_release(struct inode *in, struct file *fp, u32 target)
        ppd = private2ppd(fp);
 
        release_chip_resource(ppd->dd, i2c_target(target));
-       module_put(THIS_MODULE);
 
        return 0;
 }
index 5eed436..cb7ad12 100644 (file)
@@ -831,6 +831,29 @@ wq_error:
 }
 
 /**
+ * destroy_workqueues - destroy per port workqueues
+ * @dd: the hfi1_ib device
+ */
+static void destroy_workqueues(struct hfi1_devdata *dd)
+{
+       int pidx;
+       struct hfi1_pportdata *ppd;
+
+       for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+               ppd = dd->pport + pidx;
+
+               if (ppd->hfi1_wq) {
+                       destroy_workqueue(ppd->hfi1_wq);
+                       ppd->hfi1_wq = NULL;
+               }
+               if (ppd->link_wq) {
+                       destroy_workqueue(ppd->link_wq);
+                       ppd->link_wq = NULL;
+               }
+       }
+}
+
+/**
  * enable_general_intr() - Enable the IRQs that will be handled by the
  * general interrupt handler.
  * @dd: valid devdata
@@ -1103,15 +1126,10 @@ static void shutdown_device(struct hfi1_devdata *dd)
                 * We can't count on interrupts since we are stopping.
                 */
                hfi1_quiet_serdes(ppd);
-
-               if (ppd->hfi1_wq) {
-                       destroy_workqueue(ppd->hfi1_wq);
-                       ppd->hfi1_wq = NULL;
-               }
-               if (ppd->link_wq) {
-                       destroy_workqueue(ppd->link_wq);
-                       ppd->link_wq = NULL;
-               }
+               if (ppd->hfi1_wq)
+                       flush_workqueue(ppd->hfi1_wq);
+               if (ppd->link_wq)
+                       flush_workqueue(ppd->link_wq);
        }
        sdma_exit(dd);
 }
@@ -1756,6 +1774,7 @@ static void remove_one(struct pci_dev *pdev)
         * clear dma engines, etc.
         */
        shutdown_device(dd);
+       destroy_workqueues(dd);
 
        stop_timers(dd);
 
index 07847cb..d580aa1 100644 (file)
@@ -399,7 +399,7 @@ static inline void iowait_get_priority(struct iowait *w)
  * @wait_head: the wait queue
  *
  * This function is called to insert an iowait struct into a
- * wait queue after a resource (eg, sdma decriptor or pio
+ * wait queue after a resource (eg, sdma descriptor or pio
  * buffer) is run out.
  */
 static inline void iowait_queue(bool pkts_sent, struct iowait *w,
index 185c9b0..b8c9d0a 100644 (file)
@@ -67,6 +67,9 @@ struct hfi1_ipoib_circ_buf {
  * @sde: sdma engine
  * @tx_list: tx request list
  * @sent_txreqs: count of txreqs posted to sdma
+ * @stops: count of stops of queue
+ * @ring_full: ring has been filled
+ * @no_desc: descriptor shortage seen
  * @flow: tracks when list needs to be flushed for a flow change
  * @q_idx: ipoib Tx queue index
  * @pkts_sent: indicator packets have been sent from this queue
@@ -80,6 +83,9 @@ struct hfi1_ipoib_txq {
        struct sdma_engine *sde;
        struct list_head tx_list;
        u64 sent_txreqs;
+       atomic_t stops;
+       atomic_t ring_full;
+       atomic_t no_desc;
        union hfi1_ipoib_flow flow;
        u8 q_idx;
        bool pkts_sent;
index 883cb9d..9df292b 100644 (file)
@@ -55,23 +55,48 @@ static u64 hfi1_ipoib_txreqs(const u64 sent, const u64 completed)
        return sent - completed;
 }
 
-static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
+static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq)
 {
-       if (unlikely(hfi1_ipoib_txreqs(++txq->sent_txreqs,
-                                      atomic64_read(&txq->complete_txreqs)) >=
-           min_t(unsigned int, txq->priv->netdev->tx_queue_len,
-                 txq->tx_ring.max_items - 1)))
+       return hfi1_ipoib_txreqs(txq->sent_txreqs,
+                                atomic64_read(&txq->complete_txreqs));
+}
+
+static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq)
+{
+       if (atomic_inc_return(&txq->stops) == 1)
                netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
 }
 
+static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq)
+{
+       if (atomic_dec_and_test(&txq->stops))
+               netif_wake_subqueue(txq->priv->netdev, txq->q_idx);
+}
+
+static uint hfi1_ipoib_ring_hwat(struct hfi1_ipoib_txq *txq)
+{
+       return min_t(uint, txq->priv->netdev->tx_queue_len,
+                    txq->tx_ring.max_items - 1);
+}
+
+static uint hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq *txq)
+{
+       return min_t(uint, txq->priv->netdev->tx_queue_len,
+                    txq->tx_ring.max_items) >> 1;
+}
+
+static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
+{
+       ++txq->sent_txreqs;
+       if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) &&
+           !atomic_xchg(&txq->ring_full, 1))
+               hfi1_ipoib_stop_txq(txq);
+}
+
 static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
 {
        struct net_device *dev = txq->priv->netdev;
 
-       /* If the queue is already running just return */
-       if (likely(!__netif_subqueue_stopped(dev, txq->q_idx)))
-               return;
-
        /* If shutting down just return as queue state is irrelevant */
        if (unlikely(dev->reg_state != NETREG_REGISTERED))
                return;
@@ -86,11 +111,9 @@ static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
         * Use the minimum of the current tx_queue_len or the rings max txreqs
         * to protect against ring overflow.
         */
-       if (hfi1_ipoib_txreqs(txq->sent_txreqs,
-                             atomic64_read(&txq->complete_txreqs))
-           < min_t(unsigned int, dev->tx_queue_len,
-                   txq->tx_ring.max_items) >> 1)
-               netif_wake_subqueue(dev, txq->q_idx);
+       if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) &&
+           atomic_xchg(&txq->ring_full, 0))
+               hfi1_ipoib_wake_txq(txq);
 }
 
 static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
@@ -364,11 +387,12 @@ static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev,
        if (unlikely(!tx))
                return ERR_PTR(-ENOMEM);
 
-       /* so that we can test if the sdma decriptors are there */
+       /* so that we can test if the sdma descriptors are there */
        tx->txreq.num_desc = 0;
        tx->priv = priv;
        tx->txq = txp->txq;
        tx->skb = skb;
+       INIT_LIST_HEAD(&tx->txreq.list);
 
        hfi1_ipoib_build_ib_tx_headers(tx, txp);
 
@@ -469,6 +493,7 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev,
 
        ret = hfi1_ipoib_submit_tx(txq, tx);
        if (likely(!ret)) {
+tx_ok:
                trace_sdma_output_ibhdr(tx->priv->dd,
                                        &tx->sdma_hdr.hdr,
                                        ib_is_sc5(txp->flow.sc5));
@@ -478,20 +503,8 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev,
 
        txq->pkts_sent = false;
 
-       if (ret == -EBUSY) {
-               list_add_tail(&tx->txreq.list, &txq->tx_list);
-
-               trace_sdma_output_ibhdr(tx->priv->dd,
-                                       &tx->sdma_hdr.hdr,
-                                       ib_is_sc5(txp->flow.sc5));
-               hfi1_ipoib_check_queue_depth(txq);
-               return NETDEV_TX_OK;
-       }
-
-       if (ret == -ECOMM) {
-               hfi1_ipoib_check_queue_depth(txq);
-               return NETDEV_TX_OK;
-       }
+       if (ret == -EBUSY || ret == -ECOMM)
+               goto tx_ok;
 
        sdma_txclean(priv->dd, &tx->txreq);
        dev_kfree_skb_any(skb);
@@ -509,9 +522,17 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev,
        struct ipoib_txreq *tx;
 
        /* Has the flow change ? */
-       if (txq->flow.as_int != txp->flow.as_int)
-               (void)hfi1_ipoib_flush_tx_list(dev, txq);
-
+       if (txq->flow.as_int != txp->flow.as_int) {
+               int ret;
+
+               ret = hfi1_ipoib_flush_tx_list(dev, txq);
+               if (unlikely(ret)) {
+                       if (ret == -EBUSY)
+                               ++dev->stats.tx_dropped;
+                       dev_kfree_skb_any(skb);
+                       return NETDEV_TX_OK;
+               }
+       }
        tx = hfi1_ipoib_send_dma_common(dev, skb, txp);
        if (IS_ERR(tx)) {
                int ret = PTR_ERR(tx);
@@ -610,10 +631,14 @@ static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde,
                        return -EAGAIN;
                }
 
-               netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
-
-               if (list_empty(&txq->wait.list))
+               if (list_empty(&txreq->list))
+                       /* came from non-list submit */
+                       list_add_tail(&txreq->list, &txq->tx_list);
+               if (list_empty(&txq->wait.list)) {
+                       if (!atomic_xchg(&txq->no_desc, 1))
+                               hfi1_ipoib_stop_txq(txq);
                        iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
+               }
 
                write_sequnlock(&sde->waitlock);
                return -EBUSY;
@@ -648,9 +673,9 @@ static void hfi1_ipoib_flush_txq(struct work_struct *work)
        struct net_device *dev = txq->priv->netdev;
 
        if (likely(dev->reg_state == NETREG_REGISTERED) &&
-           likely(__netif_subqueue_stopped(dev, txq->q_idx)) &&
            likely(!hfi1_ipoib_flush_tx_list(dev, txq)))
-               netif_wake_subqueue(dev, txq->q_idx);
+               if (atomic_xchg(&txq->no_desc, 0))
+                       hfi1_ipoib_wake_txq(txq);
 }
 
 int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
@@ -704,6 +729,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
                txq->sde = NULL;
                INIT_LIST_HEAD(&txq->tx_list);
                atomic64_set(&txq->complete_txreqs, 0);
+               atomic_set(&txq->stops, 0);
+               atomic_set(&txq->ring_full, 0);
+               atomic_set(&txq->no_desc, 0);
                txq->q_idx = i;
                txq->flow.tx_queue = 0xff;
                txq->flow.sc5 = 0xff;
@@ -769,7 +797,7 @@ static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
                atomic64_inc(complete_txreqs);
        }
 
-       if (hfi1_ipoib_txreqs(txq->sent_txreqs, atomic64_read(complete_txreqs)))
+       if (hfi1_ipoib_used(txq))
                dd_dev_warn(txq->priv->dd,
                            "txq %d not empty found %llu requests\n",
                            txq->q_idx,
index 63688e8..6d263c9 100644 (file)
@@ -373,7 +373,7 @@ void hfi1_netdev_free(struct hfi1_devdata *dd)
 {
        if (dd->dummy_netdev) {
                dd_dev_info(dd, "hfi1 netdev freed\n");
-               free_netdev(dd->dummy_netdev);
+               kfree(dd->dummy_netdev);
                dd->dummy_netdev = NULL;
        }
 }
index 0c2ae9f..be62284 100644 (file)
@@ -195,7 +195,7 @@ static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
 {
        /* Constraining 10KB packets to 8KB packets */
        if (mtu == (enum ib_mtu)OPA_MTU_10240)
-               mtu = OPA_MTU_8192;
+               mtu = (enum ib_mtu)OPA_MTU_8192;
        return opa_mtu_enum_to_int((enum opa_mtu)mtu);
 }
 
@@ -367,7 +367,10 @@ bool _hfi1_schedule_send(struct rvt_qp *qp)
        struct hfi1_ibport *ibp =
                to_iport(qp->ibqp.device, qp->port_num);
        struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
-       struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+       struct hfi1_devdata *dd = ppd->dd;
+
+       if (dd->flags & HFI1_SHUTDOWN)
+               return true;
 
        return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
                               priv->s_sde ?
index 243b4ba..facff13 100644 (file)
@@ -5406,7 +5406,10 @@ static bool _hfi1_schedule_tid_send(struct rvt_qp *qp)
        struct hfi1_ibport *ibp =
                to_iport(qp->ibqp.device, qp->port_num);
        struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
-       struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+       struct hfi1_devdata *dd = ppd->dd;
+
+       if ((dd->flags & HFI1_SHUTDOWN))
+               return true;
 
        return iowait_tid_schedule(&priv->s_iowait, ppd->hfi1_wq,
                                   priv->s_sde ?
index bfa6e08..d2d526c 100644 (file)
@@ -91,7 +91,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
        tx->mr = NULL;
        tx->sde = priv->s_sde;
        tx->psc = priv->s_sendcontext;
-       /* so that we can test if the sdma decriptors are there */
+       /* so that we can test if the sdma descriptors are there */
        tx->txreq.num_desc = 0;
        /* Set the header type */
        tx->phdr.hdr.hdr_type = priv->hdr_type;
index a77fa67..479fa55 100644 (file)
@@ -898,13 +898,14 @@ struct hns_roce_hw {
        int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
        void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
                        enum ib_mtu mtu);
-       int (*write_mtpt)(void *mb_buf, struct hns_roce_mr *mr,
-                         unsigned long mtpt_idx);
+       int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
+                         struct hns_roce_mr *mr, unsigned long mtpt_idx);
        int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
                                struct hns_roce_mr *mr, int flags, u32 pdn,
                                int mr_access_flags, u64 iova, u64 size,
                                void *mb_buf);
-       int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr);
+       int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
+                              struct hns_roce_mr *mr);
        int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
        void (*write_cqc)(struct hns_roce_dev *hr_dev,
                          struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
index d02207c..cf39f56 100644 (file)
@@ -1756,10 +1756,10 @@ static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
                   val);
 }
 
-static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
+static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf,
+                                 struct hns_roce_mr *mr,
                                  unsigned long mtpt_idx)
 {
-       struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
        u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 };
        struct ib_device *ibdev = &hr_dev->ib_dev;
        struct hns_roce_v1_mpt_entry *mpt_entry;
index c597d72..dd01a51 100644 (file)
@@ -910,7 +910,7 @@ static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
        instance_stage = handle->rinfo.instance_state;
        reset_stage = handle->rinfo.reset_state;
        reset_cnt = ops->ae_dev_reset_cnt(handle);
-       hw_resetting = ops->get_hw_reset_stat(handle);
+       hw_resetting = ops->get_cmdq_stat(handle);
        sw_resetting = ops->ae_dev_resetting(handle);
 
        if (reset_cnt != hr_dev->reset_cnt)
@@ -2529,10 +2529,10 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
        return hns_roce_cmq_send(hr_dev, &desc, 1);
 }
 
-static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
+static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
+                       struct hns_roce_v2_mpt_entry *mpt_entry,
                        struct hns_roce_mr *mr)
 {
-       struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
        u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
        struct ib_device *ibdev = &hr_dev->ib_dev;
        dma_addr_t pbl_ba;
@@ -2571,7 +2571,8 @@ static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
        return 0;
 }
 
-static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
+static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
+                                 void *mb_buf, struct hns_roce_mr *mr,
                                  unsigned long mtpt_idx)
 {
        struct hns_roce_v2_mpt_entry *mpt_entry;
@@ -2620,7 +2621,7 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
        if (mr->type == MR_TYPE_DMA)
                return 0;
 
-       ret = set_mtpt_pbl(mpt_entry, mr);
+       ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
 
        return ret;
 }
@@ -2666,15 +2667,15 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
                mr->iova = iova;
                mr->size = size;
 
-               ret = set_mtpt_pbl(mpt_entry, mr);
+               ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
        }
 
        return ret;
 }
 
-static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
+static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
+                                      void *mb_buf, struct hns_roce_mr *mr)
 {
-       struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
        struct ib_device *ibdev = &hr_dev->ib_dev;
        struct hns_roce_v2_mpt_entry *mpt_entry;
        dma_addr_t pbl_ba = 0;
index 4c0bbb1..0e71ebe 100644 (file)
@@ -180,9 +180,10 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
        }
 
        if (mr->type != MR_TYPE_FRMR)
-               ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
+               ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr,
+                                            mtpt_idx);
        else
-               ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr);
+               ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
        if (ret) {
                dev_err(dev, "Write mtpt fail!\n");
                goto err_page;
index 8942f82..34da9eb 100644 (file)
@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
-ccflags-y :=  -I $(srctree)/drivers/net/ethernet/intel/i40e
 
 obj-$(CONFIG_INFINIBAND_I40IW) += i40iw.o
 
index 49d9263..25747b8 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/crc32c.h>
+#include <linux/net/intel/i40e_client.h>
 #include <rdma/ib_smi.h>
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_pack.h>
@@ -57,7 +58,6 @@
 #include "i40iw_d.h"
 #include "i40iw_hmc.h"
 
-#include <i40e_client.h>
 #include "i40iw_type.h"
 #include "i40iw_p.h"
 #include <rdma/i40iw-abi.h>
index 343a8b8..6f99ed0 100644 (file)
@@ -511,7 +511,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
                                           mdev_port_num);
        if (err)
                goto out;
-       ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet);
+       ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
        eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
 
        props->active_width     = IB_WIDTH_4X;
index 81bf6b9..e050ead 100644 (file)
@@ -1862,7 +1862,7 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
        if (!in)
                return -ENOMEM;
 
-       if (MLX5_CAP_GEN(mdev, ece_support))
+       if (MLX5_CAP_GEN(mdev, ece_support) && ucmd)
                MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
        qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
 
@@ -2341,18 +2341,18 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
        unsigned long flags;
        int err;
 
-       if (qp->ibqp.rwq_ind_tbl) {
+       if (qp->is_rss) {
                destroy_rss_raw_qp_tir(dev, qp);
                return;
        }
 
-       base = (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
+       base = (qp->type == IB_QPT_RAW_PACKET ||
                qp->flags & IB_QP_CREATE_SOURCE_QPN) ?
-              &qp->raw_packet_qp.rq.base :
-              &qp->trans_qp.base;
+                      &qp->raw_packet_qp.rq.base :
+                      &qp->trans_qp.base;
 
        if (qp->state != IB_QPS_RESET) {
-               if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET &&
+               if (qp->type != IB_QPT_RAW_PACKET &&
                    !(qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
                        err = mlx5_core_qp_modify(dev, MLX5_CMD_OP_2RST_QP, 0,
                                                  NULL, &base->mqp, NULL);
@@ -2368,8 +2368,8 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
                                     base->mqp.qpn);
        }
 
-       get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
-               &send_cq, &recv_cq);
+       get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq, &send_cq,
+               &recv_cq);
 
        spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
        mlx5_ib_lock_cqs(send_cq, recv_cq);
@@ -2391,7 +2391,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
        mlx5_ib_unlock_cqs(send_cq, recv_cq);
        spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
 
-       if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
+       if (qp->type == IB_QPT_RAW_PACKET ||
            qp->flags & IB_QP_CREATE_SOURCE_QPN) {
                destroy_raw_packet_qp(dev, qp);
        } else {
@@ -2668,6 +2668,13 @@ static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
        if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl)
                return (create_flags) ? -EINVAL : 0;
 
+       process_create_flag(dev, &create_flags, IB_QP_CREATE_NETIF_QP,
+                           mlx5_get_flow_namespace(dev->mdev,
+                                                   MLX5_FLOW_NAMESPACE_BYPASS),
+                           qp);
+       process_create_flag(dev, &create_flags,
+                           IB_QP_CREATE_INTEGRITY_EN,
+                           MLX5_CAP_GEN(mdev, sho), qp);
        process_create_flag(dev, &create_flags,
                            IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
                            MLX5_CAP_GEN(mdev, block_lb_mc), qp);
@@ -2873,7 +2880,6 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
 static int check_ucmd_data(struct mlx5_ib_dev *dev,
                           struct mlx5_create_qp_params *params)
 {
-       struct ib_qp_init_attr *attr = params->attr;
        struct ib_udata *udata = params->udata;
        size_t size, last;
        int ret;
@@ -2885,14 +2891,7 @@ static int check_ucmd_data(struct mlx5_ib_dev *dev,
                 */
                last = sizeof(struct mlx5_ib_create_qp_rss);
        else
-               /* IB_QPT_RAW_PACKET doesn't have ECE data */
-               switch (attr->qp_type) {
-               case IB_QPT_RAW_PACKET:
-                       last = offsetof(struct mlx5_ib_create_qp, ece_options);
-                       break;
-               default:
-                       last = offsetof(struct mlx5_ib_create_qp, reserved);
-               }
+               last = offsetof(struct mlx5_ib_create_qp, reserved);
 
        if (udata->inlen <= last)
                return 0;
@@ -2907,7 +2906,7 @@ static int check_ucmd_data(struct mlx5_ib_dev *dev,
        if (!ret)
                mlx5_ib_dbg(
                        dev,
-                       "udata is not cleared, inlen = %lu, ucmd = %lu, last = %lu, size = %lu\n",
+                       "udata is not cleared, inlen = %zu, ucmd = %zu, last = %zu, size = %zu\n",
                        udata->inlen, params->ucmd_size, last, size);
        return ret ? 0 : -EINVAL;
 }
@@ -3002,10 +3001,19 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
        return &qp->ibqp;
 
 destroy_qp:
-       if (qp->type == MLX5_IB_QPT_DCT)
+       if (qp->type == MLX5_IB_QPT_DCT) {
                mlx5_ib_destroy_dct(qp);
-       else
+       } else {
+               /*
+                * These lines below are temp solution till QP allocation
+                * will be moved to be under IB/core responsiblity.
+                */
+               qp->ibqp.send_cq = attr->send_cq;
+               qp->ibqp.recv_cq = attr->recv_cq;
+               qp->ibqp.pd = pd;
                destroy_qp_common(dev, qp, udata);
+       }
+
        qp = NULL;
 free_qp:
        kfree(qp);
@@ -4162,8 +4170,6 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 
                if (udata->outlen < min_resp_len)
                        return -EINVAL;
-               resp.response_length = min_resp_len;
-
                /*
                 * If we don't have enough space for the ECE options,
                 * simply indicate it with resp.response_length.
@@ -4384,8 +4390,7 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
                                MLX5_GET(ads, path, src_addr_index),
                                MLX5_GET(ads, path, hop_limit),
                                MLX5_GET(ads, path, tclass));
-               memcpy(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip),
-                      MLX5_FLD_SZ_BYTES(ads, rgid_rip));
+               rdma_ah_set_dgid_raw(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip));
        }
 }
 
index c19d91d..7c3968e 100644 (file)
@@ -346,6 +346,9 @@ static int get_ece_from_mbox(void *out, u16 opcode)
        int ece = 0;
 
        switch (opcode) {
+       case MLX5_CMD_OP_INIT2INIT_QP:
+               ece = MLX5_GET(init2init_qp_out, out, ece);
+               break;
        case MLX5_CMD_OP_INIT2RTR_QP:
                ece = MLX5_GET(init2rtr_qp_out, out, ece);
                break;
@@ -355,6 +358,9 @@ static int get_ece_from_mbox(void *out, u16 opcode)
        case MLX5_CMD_OP_RTS2RTS_QP:
                ece = MLX5_GET(rts2rts_qp_out, out, ece);
                break;
+       case MLX5_CMD_OP_RST2INIT_QP:
+               ece = MLX5_GET(rst2init_qp_out, out, ece);
+               break;
        default:
                break;
        }
@@ -406,6 +412,7 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
                        return -ENOMEM;
                MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
                                  opt_param_mask, qpc, uid);
+               MLX5_SET(rst2init_qp_in, mbox->in, ece, ece);
                break;
        case MLX5_CMD_OP_INIT2RTR_QP:
                if (MBOX_ALLOC(mbox, init2rtr_qp))
@@ -439,6 +446,7 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
                        return -ENOMEM;
                MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
                                  opt_param_mask, qpc, uid);
+               MLX5_SET(init2init_qp_in, mbox->in, ece, ece);
                break;
        default:
                return -EINVAL;
index ccaedfd..b1de8d6 100644 (file)
@@ -346,9 +346,14 @@ static void qedr_free_resources(struct qedr_dev *dev)
 
 static int qedr_alloc_resources(struct qedr_dev *dev)
 {
+       struct qed_chain_init_params params = {
+               .mode           = QED_CHAIN_MODE_PBL,
+               .intended_use   = QED_CHAIN_USE_TO_CONSUME,
+               .cnt_type       = QED_CHAIN_CNT_TYPE_U16,
+               .elem_size      = sizeof(struct regpair *),
+       };
        struct qedr_cnq *cnq;
        __le16 *cons_pi;
-       u16 n_entries;
        int i, rc;
 
        dev->sgid_tbl = kcalloc(QEDR_MAX_SGID, sizeof(union ib_gid),
@@ -382,7 +387,9 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
        dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev);
 
        /* Allocate CNQ PBLs */
-       n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE);
+       params.num_elems = min_t(u32, QED_RDMA_MAX_CNQ_SIZE,
+                                QEDR_ROCE_MAX_CNQ_SIZE);
+
        for (i = 0; i < dev->num_cnq; i++) {
                cnq = &dev->cnq_array[i];
 
@@ -391,13 +398,8 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
                if (rc)
                        goto err3;
 
-               rc = dev->ops->common->chain_alloc(dev->cdev,
-                                                  QED_CHAIN_USE_TO_CONSUME,
-                                                  QED_CHAIN_MODE_PBL,
-                                                  QED_CHAIN_CNT_TYPE_U16,
-                                                  n_entries,
-                                                  sizeof(struct regpair *),
-                                                  &cnq->pbl, NULL);
+               rc = dev->ops->common->chain_alloc(dev->cdev, &cnq->pbl,
+                                                  &params);
                if (rc)
                        goto err4;
 
index 792eecd..97fc7dd 100644 (file)
@@ -150,8 +150,17 @@ qedr_iw_issue_event(void *context,
        if (params->cm_info) {
                event.ird = params->cm_info->ird;
                event.ord = params->cm_info->ord;
-               event.private_data_len = params->cm_info->private_data_len;
-               event.private_data = (void *)params->cm_info->private_data;
+               /* Only connect_request and reply have valid private data
+                * the rest of the events this may be left overs from
+                * connection establishment. CONNECT_REQUEST is issued via
+                * qedr_iw_mpa_request
+                */
+               if (event_type == IW_CM_EVENT_CONNECT_REPLY) {
+                       event.private_data_len =
+                               params->cm_info->private_data_len;
+                       event.private_data =
+                               (void *)params->cm_info->private_data;
+               }
        }
 
        if (ep->cm_id)
index 9b9e802..49b8a43 100644 (file)
@@ -891,6 +891,12 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
                udata, struct qedr_ucontext, ibucontext);
        struct qed_rdma_destroy_cq_out_params destroy_oparams;
        struct qed_rdma_destroy_cq_in_params destroy_iparams;
+       struct qed_chain_init_params chain_params = {
+               .mode           = QED_CHAIN_MODE_PBL,
+               .intended_use   = QED_CHAIN_USE_TO_CONSUME,
+               .cnt_type       = QED_CHAIN_CNT_TYPE_U32,
+               .elem_size      = sizeof(union rdma_cqe),
+       };
        struct qedr_dev *dev = get_qedr_dev(ibdev);
        struct qed_rdma_create_cq_in_params params;
        struct qedr_create_cq_ureq ureq = {};
@@ -917,6 +923,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
 
        chain_entries = qedr_align_cq_entries(entries);
        chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
+       chain_params.num_elems = chain_entries;
 
        /* calc db offset. user will add DPI base, kernel will add db addr */
        db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
@@ -951,13 +958,8 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
        } else {
                cq->cq_type = QEDR_CQ_TYPE_KERNEL;
 
-               rc = dev->ops->common->chain_alloc(dev->cdev,
-                                                  QED_CHAIN_USE_TO_CONSUME,
-                                                  QED_CHAIN_MODE_PBL,
-                                                  QED_CHAIN_CNT_TYPE_U32,
-                                                  chain_entries,
-                                                  sizeof(union rdma_cqe),
-                                                  &cq->pbl, NULL);
+               rc = dev->ops->common->chain_alloc(dev->cdev, &cq->pbl,
+                                                  &chain_params);
                if (rc)
                        goto err0;
 
@@ -1446,6 +1448,12 @@ static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
                                        struct ib_srq_init_attr *init_attr)
 {
        struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
+       struct qed_chain_init_params params = {
+               .mode           = QED_CHAIN_MODE_PBL,
+               .intended_use   = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+               .cnt_type       = QED_CHAIN_CNT_TYPE_U32,
+               .elem_size      = QEDR_SRQ_WQE_ELEM_SIZE,
+       };
        dma_addr_t phy_prod_pair_addr;
        u32 num_elems;
        void *va;
@@ -1464,13 +1472,9 @@ static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
        hw_srq->virt_prod_pair_addr = va;
 
        num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE;
-       rc = dev->ops->common->chain_alloc(dev->cdev,
-                                          QED_CHAIN_USE_TO_CONSUME_PRODUCE,
-                                          QED_CHAIN_MODE_PBL,
-                                          QED_CHAIN_CNT_TYPE_U32,
-                                          num_elems,
-                                          QEDR_SRQ_WQE_ELEM_SIZE,
-                                          &hw_srq->pbl, NULL);
+       params.num_elems = num_elems;
+
+       rc = dev->ops->common->chain_alloc(dev->cdev, &hw_srq->pbl, &params);
        if (rc)
                goto err0;
 
@@ -1901,29 +1905,28 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev,
                           u32 n_sq_elems, u32 n_rq_elems)
 {
        struct qed_rdma_create_qp_out_params out_params;
+       struct qed_chain_init_params params = {
+               .mode           = QED_CHAIN_MODE_PBL,
+               .cnt_type       = QED_CHAIN_CNT_TYPE_U32,
+       };
        int rc;
 
-       rc = dev->ops->common->chain_alloc(dev->cdev,
-                                          QED_CHAIN_USE_TO_PRODUCE,
-                                          QED_CHAIN_MODE_PBL,
-                                          QED_CHAIN_CNT_TYPE_U32,
-                                          n_sq_elems,
-                                          QEDR_SQE_ELEMENT_SIZE,
-                                          &qp->sq.pbl, NULL);
+       params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
+       params.num_elems = n_sq_elems;
+       params.elem_size = QEDR_SQE_ELEMENT_SIZE;
 
+       rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params);
        if (rc)
                return rc;
 
        in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
        in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
 
-       rc = dev->ops->common->chain_alloc(dev->cdev,
-                                          QED_CHAIN_USE_TO_CONSUME_PRODUCE,
-                                          QED_CHAIN_MODE_PBL,
-                                          QED_CHAIN_CNT_TYPE_U32,
-                                          n_rq_elems,
-                                          QEDR_RQE_ELEMENT_SIZE,
-                                          &qp->rq.pbl, NULL);
+       params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
+       params.elem_size = n_rq_elems;
+       params.elem_size = QEDR_RQE_ELEMENT_SIZE;
+
+       rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
        if (rc)
                return rc;
 
@@ -1949,14 +1952,19 @@ qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
                            u32 n_sq_elems, u32 n_rq_elems)
 {
        struct qed_rdma_create_qp_out_params out_params;
-       struct qed_chain_ext_pbl ext_pbl;
+       struct qed_chain_init_params params = {
+               .mode           = QED_CHAIN_MODE_PBL,
+               .cnt_type       = QED_CHAIN_CNT_TYPE_U32,
+       };
        int rc;
 
        in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
                                                     QEDR_SQE_ELEMENT_SIZE,
+                                                    QED_CHAIN_PAGE_SIZE,
                                                     QED_CHAIN_MODE_PBL);
        in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
                                                     QEDR_RQE_ELEMENT_SIZE,
+                                                    QED_CHAIN_PAGE_SIZE,
                                                     QED_CHAIN_MODE_PBL);
 
        qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
@@ -1966,31 +1974,24 @@ qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
                return -EINVAL;
 
        /* Now we allocate the chain */
-       ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
-       ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
 
-       rc = dev->ops->common->chain_alloc(dev->cdev,
-                                          QED_CHAIN_USE_TO_PRODUCE,
-                                          QED_CHAIN_MODE_PBL,
-                                          QED_CHAIN_CNT_TYPE_U32,
-                                          n_sq_elems,
-                                          QEDR_SQE_ELEMENT_SIZE,
-                                          &qp->sq.pbl, &ext_pbl);
+       params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
+       params.num_elems = n_sq_elems;
+       params.elem_size = QEDR_SQE_ELEMENT_SIZE;
+       params.ext_pbl_virt = out_params.sq_pbl_virt;
+       params.ext_pbl_phys = out_params.sq_pbl_phys;
 
+       rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params);
        if (rc)
                goto err;
 
-       ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
-       ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
-
-       rc = dev->ops->common->chain_alloc(dev->cdev,
-                                          QED_CHAIN_USE_TO_CONSUME_PRODUCE,
-                                          QED_CHAIN_MODE_PBL,
-                                          QED_CHAIN_CNT_TYPE_U32,
-                                          n_rq_elems,
-                                          QEDR_RQE_ELEMENT_SIZE,
-                                          &qp->rq.pbl, &ext_pbl);
+       params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
+       params.num_elems = n_rq_elems;
+       params.elem_size = QEDR_RQE_ELEMENT_SIZE;
+       params.ext_pbl_virt = out_params.rq_pbl_virt;
+       params.ext_pbl_phys = out_params.rq_pbl_phys;
 
+       rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
        if (rc)
                goto err;
 
index 511b728..7db35dd 100644 (file)
@@ -1204,7 +1204,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
                err = alloc_ud_wq_attr(qp, rdi->dparms.node);
                if (err) {
                        ret = (ERR_PTR(err));
-                       goto bail_driver_priv;
+                       goto bail_rq_rvt;
                }
 
                if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
@@ -1314,9 +1314,11 @@ bail_qpn:
        rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
 
 bail_rq_wq:
-       rvt_free_rq(&qp->r_rq);
        free_ud_wq_attr(qp);
 
+bail_rq_rvt:
+       rvt_free_rq(&qp->r_rq);
+
 bail_driver_priv:
        rdi->driver_f.qp_priv_free(rdi, qp);
 
index a0b8cc6..ed60c9e 100644 (file)
@@ -67,12 +67,13 @@ static int siw_device_register(struct siw_device *sdev, const char *name)
        static int dev_id = 1;
        int rv;
 
+       sdev->vendor_part_id = dev_id++;
+
        rv = ib_register_device(base_dev, name);
        if (rv) {
                pr_warn("siw: device registration error %d\n", rv);
                return rv;
        }
-       sdev->vendor_part_id = dev_id++;
 
        siw_dbg(base_dev, "HWaddr=%pM\n", sdev->netdev->dev_addr);
 
index 6505202..7271d70 100644 (file)
@@ -139,7 +139,8 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx,
                        break;
 
                bytes = min(bytes, len);
-               if (siw_rx_kva(srx, (void *)buf_addr, bytes) == bytes) {
+               if (siw_rx_kva(srx, (void *)(uintptr_t)buf_addr, bytes) ==
+                   bytes) {
                        copied += bytes;
                        offset += bytes;
                        len -= bytes;
index 6542523..13eacf6 100644 (file)
@@ -1021,7 +1021,7 @@ static int __init hp_sdc_register(void)
        hp_sdc.base_io   = (unsigned long) 0xf0428000;
        hp_sdc.data_io   = (unsigned long) hp_sdc.base_io + 1;
        hp_sdc.status_io = (unsigned long) hp_sdc.base_io + 3;
-       if (!probe_kernel_read(&i, (unsigned char *)hp_sdc.data_io, 1))
+       if (!copy_from_kernel_nofault(&i, (unsigned char *)hp_sdc.data_io, 1))
                hp_sdc.dev = (void *)1;
        hp_sdc.dev_err   = hp_sdc_init();
 #endif
index b510f67..6dc49ed 100644 (file)
@@ -211,7 +211,7 @@ config INTEL_IOMMU_DEBUGFS
 
 config INTEL_IOMMU_SVM
        bool "Support for Shared Virtual Memory with Intel IOMMU"
-       depends on INTEL_IOMMU && X86
+       depends on INTEL_IOMMU && X86_64
        select PCI_PASID
        select PCI_PRI
        select MMU_NOTIFIER
index cc46dff..683b812 100644 (file)
@@ -898,7 +898,8 @@ int __init detect_intel_iommu(void)
        if (!ret)
                ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
                                           &validate_drhd_cb);
-       if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) {
+       if (!ret && !no_iommu && !iommu_detected &&
+           (!dmar_disabled || dmar_platform_optin())) {
                iommu_detected = 1;
                /* Make sure ACS will be enabled */
                pci_request_acs();
index 9129663..d759e72 100644 (file)
@@ -612,6 +612,12 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
        return g_iommus[iommu_id];
 }
 
+static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
+{
+       return sm_supported(iommu) ?
+                       ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
+}
+
 static void domain_update_iommu_coherency(struct dmar_domain *domain)
 {
        struct dmar_drhd_unit *drhd;
@@ -623,7 +629,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
 
        for_each_domain_iommu(i, domain) {
                found = true;
-               if (!ecap_coherent(g_iommus[i]->ecap)) {
+               if (!iommu_paging_structure_coherency(g_iommus[i])) {
                        domain->iommu_coherency = 0;
                        break;
                }
@@ -634,7 +640,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
        /* No hardware attached; use lowest common denominator */
        rcu_read_lock();
        for_each_active_iommu(iommu, drhd) {
-               if (!ecap_coherent(iommu->ecap)) {
+               if (!iommu_paging_structure_coherency(iommu)) {
                        domain->iommu_coherency = 0;
                        break;
                }
@@ -921,7 +927,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
                        domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
                        pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
                        if (domain_use_first_level(domain))
-                               pteval |= DMA_FL_PTE_XD;
+                               pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
                        if (cmpxchg64(&pte->val, 0ULL, pteval))
                                /* Someone else set it while we were thinking; use theirs. */
                                free_pgtable_page(tmp_page);
@@ -1951,7 +1957,6 @@ static inline void
 context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
 {
        context->hi |= pasid & ((1 << 20) - 1);
-       context->hi |= (1 << 20);
 }
 
 /*
@@ -2095,7 +2100,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
 
        context_set_fault_enable(context);
        context_set_present(context);
-       domain_flush_cache(domain, context, sizeof(*context));
+       if (!ecap_coherent(iommu->ecap))
+               clflush_cache_range(context, sizeof(*context));
 
        /*
         * It's a non-present to present mapping. If hardware doesn't cache
@@ -2243,7 +2249,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
 
        attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
        if (domain_use_first_level(domain))
-               attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD;
+               attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
 
        if (!sg) {
                sg_res = nr_pages;
@@ -2695,7 +2701,9 @@ static int __init si_domain_init(int hw)
                                    end >> agaw_to_width(si_domain->agaw)))
                                continue;
 
-                       ret = iommu_domain_identity_map(si_domain, start, end);
+                       ret = iommu_domain_identity_map(si_domain,
+                                       mm_to_dma_pfn(start >> PAGE_SHIFT),
+                                       mm_to_dma_pfn(end >> PAGE_SHIFT));
                        if (ret)
                                return ret;
                }
@@ -6021,6 +6029,23 @@ intel_iommu_domain_set_attr(struct iommu_domain *domain,
        return ret;
 }
 
+/*
+ * Check that the device does not live on an external facing PCI port that is
+ * marked as untrusted. Such devices should not be able to apply quirks and
+ * thus not be able to bypass the IOMMU restrictions.
+ */
+static bool risky_device(struct pci_dev *pdev)
+{
+       if (pdev->untrusted) {
+               pci_info(pdev,
+                        "Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n",
+                        pdev->vendor, pdev->device);
+               pci_info(pdev, "Please check with your BIOS/Platform vendor about this\n");
+               return true;
+       }
+       return false;
+}
+
 const struct iommu_ops intel_iommu_ops = {
        .capable                = intel_iommu_capable,
        .domain_alloc           = intel_iommu_domain_alloc,
@@ -6060,6 +6085,9 @@ const struct iommu_ops intel_iommu_ops = {
 
 static void quirk_iommu_igfx(struct pci_dev *dev)
 {
+       if (risky_device(dev))
+               return;
+
        pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
        dmar_map_gfx = 0;
 }
@@ -6101,6 +6129,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);
 
 static void quirk_iommu_rwbf(struct pci_dev *dev)
 {
+       if (risky_device(dev))
+               return;
+
        /*
         * Mobile 4 Series Chipset neglects to set RWBF capability,
         * but needs it. Same seems to hold for the desktop versions.
@@ -6131,6 +6162,9 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
 {
        unsigned short ggc;
 
+       if (risky_device(dev))
+               return;
+
        if (pci_read_config_word(dev, GGC, &ggc))
                return;
 
@@ -6164,6 +6198,12 @@ static void __init check_tylersburg_isoch(void)
        pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
        if (!pdev)
                return;
+
+       if (risky_device(pdev)) {
+               pci_dev_put(pdev);
+               return;
+       }
+
        pci_dev_put(pdev);
 
        /* System Management Registers. Might be hidden, in which case
@@ -6173,6 +6213,11 @@ static void __init check_tylersburg_isoch(void)
        if (!pdev)
                return;
 
+       if (risky_device(pdev)) {
+               pci_dev_put(pdev);
+               return;
+       }
+
        if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
                pci_dev_put(pdev);
                return;
index 29fead2..216b3b8 100644 (file)
@@ -563,7 +563,7 @@ config LOONGSON_PCH_PIC
          Support for the Loongson PCH PIC Controller.
 
 config LOONGSON_PCH_MSI
-       bool "Loongson PCH PIC Controller"
+       bool "Loongson PCH MSI Controller"
        depends on MACH_LOONGSON64 || COMPILE_TEST
        depends on PCI
        default MACH_LOONGSON64
index cd685f5..beac4ca 100644 (file)
@@ -3797,10 +3797,10 @@ static void its_wait_vpt_parse_complete(void)
        if (!gic_rdists->has_vpend_valid_dirty)
                return;
 
-       WARN_ON_ONCE(readq_relaxed_poll_timeout(vlpi_base + GICR_VPENDBASER,
-                                               val,
-                                               !(val & GICR_VPENDBASER_Dirty),
-                                               10, 500));
+       WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER,
+                                                      val,
+                                                      !(val & GICR_VPENDBASER_Dirty),
+                                                      10, 500));
 }
 
 static void its_vpe_schedule(struct its_vpe *vpe)
@@ -4054,16 +4054,24 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
        u64 val;
 
        if (info->req_db) {
+               unsigned long flags;
+
                /*
                 * vPE is going to block: make the vPE non-resident with
                 * PendingLast clear and DB set. The GIC guarantees that if
                 * we read-back PendingLast clear, then a doorbell will be
                 * delivered when an interrupt comes.
+                *
+                * Note the locking to deal with the concurrent update of
+                * pending_last from the doorbell interrupt handler that can
+                * run concurrently.
                 */
+               raw_spin_lock_irqsave(&vpe->vpe_lock, flags);
                val = its_clear_vpend_valid(vlpi_base,
                                            GICR_VPENDBASER_PendingLast,
                                            GICR_VPENDBASER_4_1_DB);
                vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
+               raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
        } else {
                /*
                 * We're not blocking, so just make the vPE non-resident
index 00de05a..c17fabd 100644 (file)
@@ -329,10 +329,8 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
                            bool force)
 {
-       void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
-       unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
-       u32 val, mask, bit;
-       unsigned long flags;
+       void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d);
+       unsigned int cpu;
 
        if (!force)
                cpu = cpumask_any_and(mask_val, cpu_online_mask);
@@ -342,13 +340,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
        if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
                return -EINVAL;
 
-       gic_lock_irqsave(flags);
-       mask = 0xff << shift;
-       bit = gic_cpu_map[cpu] << shift;
-       val = readl_relaxed(reg) & ~mask;
-       writel_relaxed(val | bit, reg);
-       gic_unlock_irqrestore(flags);
-
+       writeb_relaxed(gic_cpu_map[cpu], reg);
        irq_data_update_effective_affinity(d, cpumask_of(cpu));
 
        return IRQ_SET_MASK_OK_DONE;
index a6f97fa..8017f6d 100644 (file)
@@ -99,7 +99,7 @@ static int __init riscv_intc_init(struct device_node *node,
 
        hartid = riscv_of_parent_hartid(node);
        if (hartid < 0) {
-               pr_warn("unable to fine hart id for %pOF\n", node);
+               pr_warn("unable to find hart id for %pOF\n", node);
                return 0;
        }
 
index cc408ad..fdb43a6 100644 (file)
@@ -5,7 +5,7 @@ config ISDN_CAPI
          This provides CAPI (the Common ISDN Application Programming
          Interface) Version 2.0, a standard making it easy for programs to
          access ISDN hardware in a device independent way. (For details see
-         <http://www.capi.org/>.)  CAPI supports making and accepting voice
+         <https://www.capi.org/>.)  CAPI supports making and accepting voice
          and data connections, controlling call options and protocols,
          as well as ISDN supplementary services like call forwarding or
          three-party conferences (if supported by the specific hardware
index abdf787..904a4f4 100644 (file)
@@ -158,7 +158,8 @@ release_io_hfcpci(struct hfc_pci *hc)
        /* disable memory mapped ports + busmaster */
        pci_write_config_word(hc->pdev, PCI_COMMAND, 0);
        del_timer(&hc->hw.timer);
-       pci_free_consistent(hc->pdev, 0x8000, hc->hw.fifos, hc->hw.dmahandle);
+       dma_free_coherent(&hc->pdev->dev, 0x8000, hc->hw.fifos,
+                         hc->hw.dmahandle);
        iounmap(hc->hw.pci_io);
 }
 
@@ -2004,8 +2005,9 @@ setup_hw(struct hfc_pci *hc)
        }
        /* Allocate memory for FIFOS */
        /* the memory needs to be on a 32k boundary within the first 4G */
-       pci_set_dma_mask(hc->pdev, 0xFFFF8000);
-       buffer = pci_alloc_consistent(hc->pdev, 0x8000, &hc->hw.dmahandle);
+       dma_set_mask(&hc->pdev->dev, 0xFFFF8000);
+       buffer = dma_alloc_coherent(&hc->pdev->dev, 0x8000, &hc->hw.dmahandle,
+                                   GFP_KERNEL);
        /* We silently assume the address is okay if nonzero */
        if (!buffer) {
                printk(KERN_WARNING
@@ -2018,8 +2020,8 @@ setup_hw(struct hfc_pci *hc)
        if (unlikely(!hc->hw.pci_io)) {
                printk(KERN_WARNING
                       "HFC-PCI: Error in ioremap for PCI!\n");
-               pci_free_consistent(hc->pdev, 0x8000, hc->hw.fifos,
-                                   hc->hw.dmahandle);
+               dma_free_coherent(&hc->pdev->dev, 0x8000, hc->hw.fifos,
+                                 hc->hw.dmahandle);
                return 1;
        }
 
index 621364b..4274906 100644 (file)
@@ -261,8 +261,7 @@ hfcsusb_ph_info(struct hfcsusb *hw)
                phi->bch[i].Flags = hw->bch[i].Flags;
        }
        _queue_data(&dch->dev.D, MPH_INFORMATION_IND, MISDN_ID_ANY,
-                   sizeof(struct ph_info_dch) + dch->dev.nrbchan *
-                   sizeof(struct ph_info_ch), phi, GFP_ATOMIC);
+                   struct_size(phi, bch, dch->dev.nrbchan), phi, GFP_ATOMIC);
        kfree(phi);
 }
 
index 6aae97e..ee925b5 100644 (file)
@@ -297,8 +297,8 @@ inittiger(struct tiger_hw *card)
 {
        int i;
 
-       card->dma_p = pci_alloc_consistent(card->pdev, NJ_DMA_SIZE,
-                                          &card->dma);
+       card->dma_p = dma_alloc_coherent(&card->pdev->dev, NJ_DMA_SIZE,
+                                        &card->dma, GFP_ATOMIC);
        if (!card->dma_p) {
                pr_info("%s: No DMA memory\n", card->name);
                return -ENOMEM;
@@ -965,8 +965,8 @@ nj_release(struct tiger_hw *card)
                kfree(card->bc[i].hrbuf);
        }
        if (card->dma_p)
-               pci_free_consistent(card->pdev, NJ_DMA_SIZE,
-                                   card->dma_p, card->dma);
+               dma_free_coherent(&card->pdev->dev, NJ_DMA_SIZE, card->dma_p,
+                                 card->dma);
        write_lock_irqsave(&card_lock, flags);
        list_del(&card->list);
        write_unlock_irqrestore(&card_lock, flags);
index dff4132..a660673 100644 (file)
@@ -401,20 +401,20 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
 }
 
 static int data_sock_setsockopt(struct socket *sock, int level, int optname,
-                               char __user *optval, unsigned int len)
+                               sockptr_t optval, unsigned int len)
 {
        struct sock *sk = sock->sk;
        int err = 0, opt = 0;
 
        if (*debug & DEBUG_SOCKET)
-               printk(KERN_DEBUG "%s(%p, %d, %x, %p, %d)\n", __func__, sock,
-                      level, optname, optval, len);
+               printk(KERN_DEBUG "%s(%p, %d, %x, optval, %d)\n", __func__, sock,
+                      level, optname, len);
 
        lock_sock(sk);
 
        switch (optname) {
        case MISDN_TIME_STAMP:
-               if (get_user(opt, (int __user *)optval)) {
+               if (copy_from_sockptr(&opt, optval, sizeof(int))) {
                        err = -EFAULT;
                        break;
                }
@@ -738,8 +738,6 @@ static const struct proto_ops base_sock_ops = {
        .recvmsg        = sock_no_recvmsg,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
-       .setsockopt     = sock_no_setsockopt,
-       .getsockopt     = sock_no_getsockopt,
        .connect        = sock_no_connect,
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
index 39de94e..6548a60 100644 (file)
@@ -1389,7 +1389,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
                        if (__set_blocks(n1, n1->keys + n2->keys,
                                         block_bytes(b->c)) >
                            btree_blocks(new_nodes[i]))
-                               goto out_nocoalesce;
+                               goto out_unlock_nocoalesce;
 
                        keys = n2->keys;
                        /* Take the key of the node we're getting rid of */
@@ -1418,7 +1418,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
 
                if (__bch_keylist_realloc(&keylist,
                                          bkey_u64s(&new_nodes[i]->key)))
-                       goto out_nocoalesce;
+                       goto out_unlock_nocoalesce;
 
                bch_btree_node_write(new_nodes[i], &cl);
                bch_keylist_add(&keylist, &new_nodes[i]->key);
@@ -1464,6 +1464,10 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
        /* Invalidated our iterator */
        return -EINTR;
 
+out_unlock_nocoalesce:
+       for (i = 0; i < nodes; i++)
+               mutex_unlock(&new_nodes[i]->write_lock);
+
 out_nocoalesce:
        closure_sync(&cl);
 
index f9975c2..2014016 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/genhd.h>
 #include <linux/idr.h>
 #include <linux/kthread.h>
+#include <linux/workqueue.h>
 #include <linux/module.h>
 #include <linux/random.h>
 #include <linux/reboot.h>
@@ -819,7 +820,8 @@ static void bcache_device_free(struct bcache_device *d)
 }
 
 static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
-                             sector_t sectors, make_request_fn make_request_fn)
+                             sector_t sectors, make_request_fn make_request_fn,
+                             struct block_device *cached_bdev)
 {
        struct request_queue *q;
        const size_t max_stripes = min_t(size_t, INT_MAX,
@@ -885,6 +887,20 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
        q->limits.io_min                = block_size;
        q->limits.logical_block_size    = block_size;
        q->limits.physical_block_size   = block_size;
+
+       if (q->limits.logical_block_size > PAGE_SIZE && cached_bdev) {
+               /*
+                * This should only happen with BCACHE_SB_VERSION_BDEV.
+                * Block/page size is checked for BCACHE_SB_VERSION_CDEV.
+                */
+               pr_info("%s: sb/logical block size (%u) greater than page size (%lu) falling back to device logical block size (%u)\n",
+                       d->disk->disk_name, q->limits.logical_block_size,
+                       PAGE_SIZE, bdev_logical_block_size(cached_bdev));
+
+               /* This also adjusts physical block size/min io size if needed */
+               blk_queue_logical_block_size(q, bdev_logical_block_size(cached_bdev));
+       }
+
        blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue);
        blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue);
        blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue);
@@ -1340,7 +1356,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
 
        ret = bcache_device_init(&dc->disk, block_size,
                         dc->bdev->bd_part->nr_sects - dc->sb.data_offset,
-                        cached_dev_make_request);
+                        cached_dev_make_request, dc->bdev);
        if (ret)
                return ret;
 
@@ -1453,7 +1469,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
        kobject_init(&d->kobj, &bch_flash_dev_ktype);
 
        if (bcache_device_init(d, block_bytes(c), u->sectors,
-                       flash_dev_make_request))
+                       flash_dev_make_request, NULL))
                goto err;
 
        bcache_device_attach(d, c, u - c->uuids);
@@ -2364,7 +2380,7 @@ static bool bch_is_open(struct block_device *bdev)
 }
 
 struct async_reg_args {
-       struct work_struct reg_work;
+       struct delayed_work reg_work;
        char *path;
        struct cache_sb *sb;
        struct cache_sb_disk *sb_disk;
@@ -2375,7 +2391,7 @@ static void register_bdev_worker(struct work_struct *work)
 {
        int fail = false;
        struct async_reg_args *args =
-               container_of(work, struct async_reg_args, reg_work);
+               container_of(work, struct async_reg_args, reg_work.work);
        struct cached_dev *dc;
 
        dc = kzalloc(sizeof(*dc), GFP_KERNEL);
@@ -2405,7 +2421,7 @@ static void register_cache_worker(struct work_struct *work)
 {
        int fail = false;
        struct async_reg_args *args =
-               container_of(work, struct async_reg_args, reg_work);
+               container_of(work, struct async_reg_args, reg_work.work);
        struct cache *ca;
 
        ca = kzalloc(sizeof(*ca), GFP_KERNEL);
@@ -2433,11 +2449,12 @@ out:
 static void register_device_aync(struct async_reg_args *args)
 {
        if (SB_IS_BDEV(args->sb))
-               INIT_WORK(&args->reg_work, register_bdev_worker);
+               INIT_DELAYED_WORK(&args->reg_work, register_bdev_worker);
        else
-               INIT_WORK(&args->reg_work, register_cache_worker);
+               INIT_DELAYED_WORK(&args->reg_work, register_cache_worker);
 
-       queue_work(system_wq, &args->reg_work);
+       /* 10 jiffies is enough for a delay */
+       queue_delayed_work(system_wq, &args->reg_work, 10);
 }
 
 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
index ac83f50..489935d 100644 (file)
@@ -1471,7 +1471,7 @@ static void retrieve_deps(struct dm_table *table,
        /*
         * Check we have enough space.
         */
-       needed = sizeof(*deps) + (sizeof(*deps->dev) * count);
+       needed = struct_size(deps, dev, count);
        if (len < needed) {
                param->flags |= DM_BUFFER_FULL_FLAG;
                return;
index f60c025..85e0daa 100644 (file)
@@ -146,10 +146,6 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
  */
 static void rq_completed(struct mapped_device *md)
 {
-       /* nudge anyone waiting on suspend queue */
-       if (unlikely(wq_has_sleeper(&md->wait)))
-               wake_up(&md->wait);
-
        /*
         * dm_put() must be at the end of this function. See the comment above
         */
index 74f3c50..5358894 100644 (file)
@@ -282,6 +282,8 @@ static int persistent_memory_claim(struct dm_writecache *wc)
                        while (daa-- && i < p) {
                                pages[i++] = pfn_t_to_page(pfn);
                                pfn.val++;
+                               if (!(i & 15))
+                                       cond_resched();
                        }
                } while (i < p);
                wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL);
@@ -849,10 +851,14 @@ static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_
 
                if (likely(!e->write_in_progress)) {
                        if (!discarded_something) {
-                               writecache_wait_for_ios(wc, READ);
-                               writecache_wait_for_ios(wc, WRITE);
+                               if (!WC_MODE_PMEM(wc)) {
+                                       writecache_wait_for_ios(wc, READ);
+                                       writecache_wait_for_ios(wc, WRITE);
+                               }
                                discarded_something = true;
                        }
+                       if (!writecache_entry_is_committed(wc, e))
+                               wc->uncommitted_blocks--;
                        writecache_free_entry(wc, e);
                }
 
@@ -2260,6 +2266,12 @@ invalid_optional:
        }
 
        if (WC_MODE_PMEM(wc)) {
+               if (!dax_synchronous(wc->ssd_dev->dax_dev)) {
+                       r = -EOPNOTSUPP;
+                       ti->error = "Asynchronous persistent memory not supported as pmem cache";
+                       goto bad;
+               }
+
                r = persistent_memory_claim(wc);
                if (r) {
                        ti->error = "Unable to map persistent memory for cache";
index 130b5a6..b298fef 100644 (file)
@@ -1078,7 +1078,8 @@ static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_sb *dsb,
        nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + zmd->zone_nr_blocks - 1)
                >> zmd->zone_nr_blocks_shift;
        if (!nr_meta_zones ||
-           nr_meta_zones >= zmd->nr_rnd_zones) {
+           (zmd->nr_devs <= 1 && nr_meta_zones >= zmd->nr_rnd_zones) ||
+           (zmd->nr_devs > 1 && nr_meta_zones >= zmd->nr_cache_zones)) {
                dmz_dev_err(dev, "Invalid number of metadata blocks");
                return -ENXIO;
        }
@@ -1949,7 +1950,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd,
                                                    unsigned int idx, bool idle)
 {
        struct dm_zone *dzone = NULL;
-       struct dm_zone *zone, *last = NULL;
+       struct dm_zone *zone, *maxw_z = NULL;
        struct list_head *zone_list;
 
        /* If we have cache zones select from the cache zone list */
@@ -1961,18 +1962,37 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd,
        } else
                zone_list = &zmd->dev[idx].map_rnd_list;
 
+       /*
+        * Find the buffer zone with the heaviest weight or the first (oldest)
+        * data zone that can be reclaimed.
+        */
        list_for_each_entry(zone, zone_list, link) {
                if (dmz_is_buf(zone)) {
                        dzone = zone->bzone;
-                       if (dzone->dev->dev_idx != idx)
-                               continue;
-                       if (!last) {
-                               last = dzone;
+                       if (dmz_is_rnd(dzone) && dzone->dev->dev_idx != idx)
                                continue;
-                       }
-                       if (last->weight < dzone->weight)
+                       if (!maxw_z || maxw_z->weight < dzone->weight)
+                               maxw_z = dzone;
+               } else {
+                       dzone = zone;
+                       if (dmz_lock_zone_reclaim(dzone))
+                               return dzone;
+               }
+       }
+
+       if (maxw_z && dmz_lock_zone_reclaim(maxw_z))
+               return maxw_z;
+
+       /*
+        * If we come here, none of the zones inspected could be locked for
+        * reclaim. Try again, being more aggressive, that is, find the
+        * first zone that can be reclaimed regardless of its weitght.
+        */
+       list_for_each_entry(zone, zone_list, link) {
+               if (dmz_is_buf(zone)) {
+                       dzone = zone->bzone;
+                       if (dmz_is_rnd(dzone) && dzone->dev->dev_idx != idx)
                                continue;
-                       dzone = last;
                } else
                        dzone = zone;
                if (dmz_lock_zone_reclaim(dzone))
@@ -2006,7 +2026,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd,
 struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd,
                                         unsigned int dev_idx, bool idle)
 {
-       struct dm_zone *zone;
+       struct dm_zone *zone = NULL;
 
        /*
         * Search for a zone candidate to reclaim: 2 cases are possible.
@@ -2019,7 +2039,7 @@ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd,
        dmz_lock_map(zmd);
        if (list_empty(&zmd->reserved_seq_zones_list))
                zone = dmz_get_seq_zone_for_reclaim(zmd, dev_idx);
-       else
+       if (!zone)
                zone = dmz_get_rnd_zone_for_reclaim(zmd, dev_idx, idle);
        dmz_unlock_map(zmd);
 
@@ -2197,8 +2217,15 @@ struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned int dev_idx,
 {
        struct list_head *list;
        struct dm_zone *zone;
-       int i = 0;
+       int i;
+
+       /* Schedule reclaim to ensure free zones are available */
+       if (!(flags & DMZ_ALLOC_RECLAIM)) {
+               for (i = 0; i < zmd->nr_devs; i++)
+                       dmz_schedule_reclaim(zmd->dev[i].reclaim);
+       }
 
+       i = 0;
 again:
        if (flags & DMZ_ALLOC_CACHE)
                list = &zmd->unmap_cache_list;
index 2261b4d..9c0ecc9 100644 (file)
@@ -377,6 +377,7 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc)
                        dmz_metadata_label(zmd), zrc->dev_idx);
                return -EBUSY;
        }
+       rzone = dzone;
 
        start = jiffies;
        if (dmz_is_cache(dzone) || dmz_is_rnd(dzone)) {
@@ -391,8 +392,6 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc)
                         */
                        ret = dmz_reclaim_rnd_data(zrc, dzone);
                }
-               rzone = dzone;
-
        } else {
                struct dm_zone *bzone = dzone->bzone;
                sector_t chunk_block = 0;
@@ -415,7 +414,6 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc)
                         * be later reclaimed.
                         */
                        ret = dmz_reclaim_seq_data(zrc, dzone);
-                       rzone = dzone;
                }
        }
 out:
@@ -458,6 +456,8 @@ static unsigned int dmz_reclaim_percentage(struct dmz_reclaim *zrc)
                nr_zones = dmz_nr_rnd_zones(zmd, zrc->dev_idx);
                nr_unmap = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx);
        }
+       if (nr_unmap <= 1)
+               return 0;
        return nr_unmap * 100 / nr_zones;
 }
 
@@ -503,7 +503,7 @@ static void dmz_reclaim_work(struct work_struct *work)
 {
        struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work);
        struct dmz_metadata *zmd = zrc->metadata;
-       unsigned int p_unmap, nr_unmap_rnd = 0, nr_rnd = 0;
+       unsigned int p_unmap;
        int ret;
 
        if (dmz_dev_is_dying(zmd))
@@ -529,9 +529,6 @@ static void dmz_reclaim_work(struct work_struct *work)
                zrc->kc_throttle.throttle = min(75U, 100U - p_unmap / 2);
        }
 
-       nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx);
-       nr_rnd = dmz_nr_rnd_zones(zmd, zrc->dev_idx);
-
        DMDEBUG("(%s/%u): Reclaim (%u): %s, %u%% free zones (%u/%u cache %u/%u random)",
                dmz_metadata_label(zmd), zrc->dev_idx,
                zrc->kc_throttle.throttle,
index a907a94..42aa513 100644 (file)
@@ -400,15 +400,7 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
                dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
        struct dmz_metadata *zmd = dmz->metadata;
        struct dm_zone *zone;
-       int i, ret;
-
-       /*
-        * Write may trigger a zone allocation. So make sure the
-        * allocation can succeed.
-        */
-       if (bio_op(bio) == REQ_OP_WRITE)
-               for (i = 0; i < dmz->nr_ddevs; i++)
-                       dmz_schedule_reclaim(dmz->dev[i].reclaim);
+       int ret;
 
        dmz_lock_metadata(zmd);
 
@@ -890,7 +882,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        }
 
        /* Set target (no write same support) */
-       ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata) << 9;
+       ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata);
        ti->num_flush_bios = 1;
        ti->num_discard_bios = 1;
        ti->num_write_zeroes_bios = 1;
index 109e81f..52449af 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/sched/mm.h>
 #include <linux/sched/signal.h>
 #include <linux/blkpg.h>
 #include <linux/bio.h>
@@ -654,28 +655,6 @@ static void free_tio(struct dm_target_io *tio)
        bio_put(&tio->clone);
 }
 
-static bool md_in_flight_bios(struct mapped_device *md)
-{
-       int cpu;
-       struct hd_struct *part = &dm_disk(md)->part0;
-       long sum = 0;
-
-       for_each_possible_cpu(cpu) {
-               sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
-               sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
-       }
-
-       return sum != 0;
-}
-
-static bool md_in_flight(struct mapped_device *md)
-{
-       if (queue_is_mq(md->queue))
-               return blk_mq_queue_inflight(md->queue);
-       else
-               return md_in_flight_bios(md);
-}
-
 u64 dm_start_time_ns_from_clone(struct bio *bio)
 {
        struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
@@ -1009,6 +988,7 @@ static void clone_endio(struct bio *bio)
        struct dm_io *io = tio->io;
        struct mapped_device *md = tio->io->md;
        dm_endio_fn endio = tio->ti->type->end_io;
+       struct bio *orig_bio = io->orig_bio;
 
        if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
                if (bio_op(bio) == REQ_OP_DISCARD &&
@@ -1022,6 +1002,18 @@ static void clone_endio(struct bio *bio)
                        disable_write_zeroes(md);
        }
 
+       /*
+        * For zone-append bios get offset in zone of the written
+        * sector and add that to the original bio sector pos.
+        */
+       if (bio_op(orig_bio) == REQ_OP_ZONE_APPEND) {
+               sector_t written_sector = bio->bi_iter.bi_sector;
+               struct request_queue *q = orig_bio->bi_disk->queue;
+               u64 mask = (u64)blk_queue_zone_sectors(q) - 1;
+
+               orig_bio->bi_iter.bi_sector += written_sector & mask;
+       }
+
        if (endio) {
                int r = endio(tio->ti, bio, &error);
                switch (r) {
@@ -1452,9 +1444,6 @@ static int __send_empty_flush(struct clone_info *ci)
        BUG_ON(bio_has_data(ci->bio));
        while ((ti = dm_table_get_target(ci->map, target_nr++)))
                __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
-
-       bio_disassociate_blkg(ci->bio);
-
        return 0;
 }
 
@@ -1642,6 +1631,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
                ci.bio = &flush_bio;
                ci.sector_count = 0;
                error = __send_empty_flush(&ci);
+               bio_uninit(ci.bio);
                /* dec_pending submits any data associated with flush */
        } else if (op_is_zone_mgmt(bio_op(bio))) {
                ci.bio = bio;
@@ -1716,6 +1706,7 @@ static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
                ci.bio = &flush_bio;
                ci.sector_count = 0;
                error = __send_empty_flush(&ci);
+               bio_uninit(ci.bio);
                /* dec_pending submits any data associated with flush */
        } else {
                struct dm_target_io *tio;
@@ -2457,15 +2448,29 @@ void dm_put(struct mapped_device *md)
 }
 EXPORT_SYMBOL_GPL(dm_put);
 
-static int dm_wait_for_completion(struct mapped_device *md, long task_state)
+static bool md_in_flight_bios(struct mapped_device *md)
+{
+       int cpu;
+       struct hd_struct *part = &dm_disk(md)->part0;
+       long sum = 0;
+
+       for_each_possible_cpu(cpu) {
+               sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
+               sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
+       }
+
+       return sum != 0;
+}
+
+static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state)
 {
        int r = 0;
        DEFINE_WAIT(wait);
 
-       while (1) {
+       while (true) {
                prepare_to_wait(&md->wait, &wait, task_state);
 
-               if (!md_in_flight(md))
+               if (!md_in_flight_bios(md))
                        break;
 
                if (signal_pending_state(task_state, current)) {
@@ -2480,6 +2485,28 @@ static int dm_wait_for_completion(struct mapped_device *md, long task_state)
        return r;
 }
 
+static int dm_wait_for_completion(struct mapped_device *md, long task_state)
+{
+       int r = 0;
+
+       if (!queue_is_mq(md->queue))
+               return dm_wait_for_bios_completion(md, task_state);
+
+       while (true) {
+               if (!blk_mq_queue_inflight(md->queue))
+                       break;
+
+               if (signal_pending_state(task_state, current)) {
+                       r = -EINTR;
+                       break;
+               }
+
+               msleep(5);
+       }
+
+       return r;
+}
+
 /*
  * Process the deferred bios
  */
@@ -2913,17 +2940,25 @@ EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
                       unsigned cookie)
 {
+       int r;
+       unsigned noio_flag;
        char udev_cookie[DM_COOKIE_LENGTH];
        char *envp[] = { udev_cookie, NULL };
 
+       noio_flag = memalloc_noio_save();
+
        if (!cookie)
-               return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
+               r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
        else {
                snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
                         DM_COOKIE_ENV_VAR_NAME, cookie);
-               return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
-                                         action, envp);
+               r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
+                                      action, envp);
        }
+
+       memalloc_noio_restore(noio_flag);
+
+       return r;
 }
 
 uint32_t dm_next_uevent_seq(struct mapped_device *md)
index a4ee6b8..b91e472 100644 (file)
@@ -39,8 +39,6 @@
  *     Troy Laramy <t-laramy@ti.com>
  */
 
-#include <asm/cacheflush.h>
-
 #include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/delay.h>
index 10c214b..1ac9aef 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
-#include <asm/cacheflush.h>
 
 #include <media/v4l2-dev.h>
 #include <media/v4l2-ioctl.h>
index 3362962..b02a3c7 100644 (file)
@@ -193,7 +193,7 @@ struct pwc_raw_frame {
                                   decompressor) */
        __u8   cmd[4];          /* the four byte of the command (in case of
                                   nala, only the first 3 bytes is filled) */
-       __u8   rawframe[0];     /* frame_size = H / 4 * vbandlength */
+       __u8   rawframe[];      /* frame_size = H / 4 * vbandlength */
 } __packed;
 
 /* intermediate buffers with raw data from the USB cam */
index 68aea22..5216487 100644 (file)
@@ -1324,13 +1324,13 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
                        return 0; /* fw doesn't need any host buffers */
 
                /* spin till we get enough memory */
-               while(host_page_buffer_sz > 0) {
-
-                       if((ioc->HostPageBuffer = pci_alloc_consistent(
-                           ioc->pcidev,
-                           host_page_buffer_sz,
-                           &ioc->HostPageBuffer_dma)) != NULL) {
-
+               while (host_page_buffer_sz > 0) {
+                       ioc->HostPageBuffer =
+                               dma_alloc_coherent(&ioc->pcidev->dev,
+                                               host_page_buffer_sz,
+                                               &ioc->HostPageBuffer_dma,
+                                               GFP_KERNEL);
+                       if (ioc->HostPageBuffer) {
                                dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
                                    "host_page_buffer @ %p, dma @ %x, sz=%d bytes\n",
                                    ioc->name, ioc->HostPageBuffer,
@@ -2741,8 +2741,8 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
                sz = ioc->alloc_sz;
                dexitprintk(ioc, printk(MYIOC_s_INFO_FMT "free  @ %p, sz=%d bytes\n",
                    ioc->name, ioc->alloc, ioc->alloc_sz));
-               pci_free_consistent(ioc->pcidev, sz,
-                               ioc->alloc, ioc->alloc_dma);
+               dma_free_coherent(&ioc->pcidev->dev, sz, ioc->alloc,
+                               ioc->alloc_dma);
                ioc->reply_frames = NULL;
                ioc->req_frames = NULL;
                ioc->alloc = NULL;
@@ -2751,8 +2751,8 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
 
        if (ioc->sense_buf_pool != NULL) {
                sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
-               pci_free_consistent(ioc->pcidev, sz,
-                               ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
+               dma_free_coherent(&ioc->pcidev->dev, sz, ioc->sense_buf_pool,
+                               ioc->sense_buf_pool_dma);
                ioc->sense_buf_pool = NULL;
                ioc->alloc_total -= sz;
        }
@@ -2802,7 +2802,7 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
                        "HostPageBuffer free  @ %p, sz=%d bytes\n",
                        ioc->name, ioc->HostPageBuffer,
                        ioc->HostPageBuffer_sz));
-               pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz,
+               dma_free_coherent(&ioc->pcidev->dev, ioc->HostPageBuffer_sz,
                    ioc->HostPageBuffer, ioc->HostPageBuffer_dma);
                ioc->HostPageBuffer = NULL;
                ioc->HostPageBuffer_sz = 0;
@@ -4497,7 +4497,8 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
                                ioc->name, sz, sz, num_chain));
 
                total_size += sz;
-               mem = pci_alloc_consistent(ioc->pcidev, total_size, &alloc_dma);
+               mem = dma_alloc_coherent(&ioc->pcidev->dev, total_size,
+                               &alloc_dma, GFP_KERNEL);
                if (mem == NULL) {
                        printk(MYIOC_s_ERR_FMT "Unable to allocate Reply, Request, Chain Buffers!\n",
                                ioc->name);
@@ -4574,8 +4575,8 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
                spin_unlock_irqrestore(&ioc->FreeQlock, flags);
 
                sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
-               ioc->sense_buf_pool =
-                       pci_alloc_consistent(ioc->pcidev, sz, &ioc->sense_buf_pool_dma);
+               ioc->sense_buf_pool = dma_alloc_coherent(&ioc->pcidev->dev, sz,
+                               &ioc->sense_buf_pool_dma, GFP_KERNEL);
                if (ioc->sense_buf_pool == NULL) {
                        printk(MYIOC_s_ERR_FMT "Unable to allocate Sense Buffers!\n",
                                ioc->name);
@@ -4613,18 +4614,16 @@ out_fail:
 
        if (ioc->alloc != NULL) {
                sz = ioc->alloc_sz;
-               pci_free_consistent(ioc->pcidev,
-                               sz,
-                               ioc->alloc, ioc->alloc_dma);
+               dma_free_coherent(&ioc->pcidev->dev, sz, ioc->alloc,
+                               ioc->alloc_dma);
                ioc->reply_frames = NULL;
                ioc->req_frames = NULL;
                ioc->alloc_total -= sz;
        }
        if (ioc->sense_buf_pool != NULL) {
                sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
-               pci_free_consistent(ioc->pcidev,
-                               sz,
-                               ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
+               dma_free_coherent(&ioc->pcidev->dev, sz, ioc->sense_buf_pool,
+                               ioc->sense_buf_pool_dma);
                ioc->sense_buf_pool = NULL;
        }
 
index f0737c5..1491561 100644 (file)
@@ -118,8 +118,6 @@ int                 mptscsih_suspend(struct pci_dev *pdev, pm_message_t state);
 int            mptscsih_resume(struct pci_dev *pdev);
 #endif
 
-#define SNS_LEN(scp)   SCSI_SENSE_BUFFERSIZE
-
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /*
@@ -2422,7 +2420,7 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
                /* Copy the sense received into the scsi command block. */
                req_index = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
                sense_data = ((u8 *)ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC));
-               memcpy(sc->sense_buffer, sense_data, SNS_LEN(sc));
+               memcpy(sc->sense_buffer, sense_data, MPT_SENSE_BUFFER_ALLOC);
 
                /* Log SMART data (asc = 0x5D, non-IM case only) if required.
                 */
index db8cdf5..e9cacc2 100644 (file)
@@ -412,6 +412,7 @@ MODULE_DEVICE_TABLE(of, mt6360_pmu_of_id);
 
 static struct i2c_driver mt6360_pmu_driver = {
        .driver = {
+               .name = "mt6360_pmu",
                .pm = &mt6360_pmu_pm_ops,
                .of_match_table = of_match_ptr(mt6360_pmu_of_id),
        },
index f82974a..b0f62cb 100644 (file)
@@ -62,6 +62,12 @@ static void hl_fence_release(struct dma_fence *fence)
                container_of(fence, struct hl_cs_compl, base_fence);
        struct hl_device *hdev = hl_cs_cmpl->hdev;
 
+       /* EBUSY means the CS was never submitted and hence we don't have
+        * an attached hw_sob object that we should handle here
+        */
+       if (fence->error == -EBUSY)
+               goto free;
+
        if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
                        (hl_cs_cmpl->type == CS_TYPE_WAIT)) {
 
@@ -92,6 +98,7 @@ static void hl_fence_release(struct dma_fence *fence)
                kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset);
        }
 
+free:
        kfree_rcu(hl_cs_cmpl, base_fence.rcu);
 }
 
@@ -328,10 +335,16 @@ static void cs_do_release(struct kref *ref)
 
        hl_ctx_put(cs->ctx);
 
+       /* We need to mark an error for not submitted because in that case
+        * the dma fence release flow is different. Mainly, we don't need
+        * to handle hw_sob for signal/wait
+        */
        if (cs->timedout)
                dma_fence_set_error(cs->fence, -ETIMEDOUT);
        else if (cs->aborted)
                dma_fence_set_error(cs->fence, -EIO);
+       else if (!cs->submitted)
+               dma_fence_set_error(cs->fence, -EBUSY);
 
        dma_fence_signal(cs->fence);
        dma_fence_put(cs->fence);
index 3c8dcdf..fc4372c 100644 (file)
@@ -480,7 +480,7 @@ out:
        return 0;
 }
 
-static ssize_t mmu_write(struct file *file, const char __user *buf,
+static ssize_t mmu_asid_va_write(struct file *file, const char __user *buf,
                size_t count, loff_t *f_pos)
 {
        struct seq_file *s = file->private_data;
@@ -1125,7 +1125,7 @@ static const struct hl_info_list hl_debugfs_list[] = {
        {"command_submission_jobs", command_submission_jobs_show, NULL},
        {"userptr", userptr_show, NULL},
        {"vm", vm_show, NULL},
-       {"mmu", mmu_show, mmu_write},
+       {"mmu", mmu_show, mmu_asid_va_write},
        {"engines", engines_show, NULL}
 };
 
index 61f88e9..834470d 100644 (file)
@@ -96,7 +96,7 @@
 
 #define GAUDI_NUM_OF_QM_ARB_ERR_CAUSE  3
 
-#define GAUDI_ARB_WDT_TIMEOUT          0x400000
+#define GAUDI_ARB_WDT_TIMEOUT          0x1000000
 
 static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
                "gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3",
@@ -1893,6 +1893,8 @@ static void gaudi_init_pci_dma_qman(struct hl_device *hdev, int dma_id,
        WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 + q_off, so_base_ws_lo);
        WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 + q_off, so_base_ws_hi);
 
+       WREG32(mmDMA0_QM_CP_BARRIER_CFG_0 + q_off, 0x100);
+
        /* The following configuration is needed only once per QMAN */
        if (qman_id == 0) {
                /* Configure RAZWI IRQ */
@@ -2725,6 +2727,12 @@ static int gaudi_mmu_init(struct hl_device *hdev)
        WREG32(mmSTLB_HOP_CONFIGURATION,
                        hdev->mmu_huge_page_opt ? 0x30440 : 0x40440);
 
+       /*
+        * The H/W expects the first PI after init to be 1. After wraparound
+        * we'll write 0.
+        */
+       gaudi->mmu_cache_inv_pi = 1;
+
        gaudi->hw_cap_initialized |= HW_CAP_MMU;
 
        return 0;
@@ -3790,6 +3798,25 @@ static int gaudi_validate_dma_pkt_no_mmu(struct hl_device *hdev,
                                                src_in_host);
 }
 
+static int gaudi_validate_load_and_exe_pkt(struct hl_device *hdev,
+                                       struct hl_cs_parser *parser,
+                                       struct packet_load_and_exe *user_pkt)
+{
+       u32 cfg;
+
+       cfg = le32_to_cpu(user_pkt->cfg);
+
+       if (cfg & GAUDI_PKT_LOAD_AND_EXE_CFG_DST_MASK) {
+               dev_err(hdev->dev,
+                       "User not allowed to use Load and Execute\n");
+               return -EPERM;
+       }
+
+       parser->patched_cb_size += sizeof(struct packet_load_and_exe);
+
+       return 0;
+}
+
 static int gaudi_validate_cb(struct hl_device *hdev,
                        struct hl_cs_parser *parser, bool is_mmu)
 {
@@ -3838,6 +3865,11 @@ static int gaudi_validate_cb(struct hl_device *hdev,
                        rc = -EPERM;
                        break;
 
+               case PACKET_LOAD_AND_EXE:
+                       rc = gaudi_validate_load_and_exe_pkt(hdev, parser,
+                               (struct packet_load_and_exe *) user_pkt);
+                       break;
+
                case PACKET_LIN_DMA:
                        parser->contains_dma_pkt = true;
                        if (is_mmu)
@@ -3855,7 +3887,6 @@ static int gaudi_validate_cb(struct hl_device *hdev,
                case PACKET_FENCE:
                case PACKET_NOP:
                case PACKET_ARB_POINT:
-               case PACKET_LOAD_AND_EXE:
                        parser->patched_cb_size += pkt_size;
                        break;
 
@@ -5994,6 +6025,8 @@ static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
        mutex_lock(&hdev->mmu_cache_lock);
 
        /* L0 & L1 invalidation */
+       WREG32(mmSTLB_INV_PS, 3);
+       WREG32(mmSTLB_CACHE_INV, gaudi->mmu_cache_inv_pi++);
        WREG32(mmSTLB_INV_PS, 2);
 
        rc = hl_poll_timeout(
index a46530d..41a8d9b 100644 (file)
@@ -229,6 +229,8 @@ struct gaudi_internal_qman_info {
  * @multi_msi_mode: whether we are working in multi MSI single MSI mode.
  *                  Multi MSI is possible only with IOMMU enabled.
  * @ext_queue_idx: helper index for external queues initialization.
+ * @mmu_cache_inv_pi: PI for MMU cache invalidation flow. The H/W expects an
+ *                    8-bit value so use u8.
  */
 struct gaudi_device {
        int (*armcp_info_get)(struct hl_device *hdev);
@@ -248,6 +250,7 @@ struct gaudi_device {
        u32                             hw_cap_initialized;
        u8                              multi_msi_mode;
        u8                              ext_queue_idx;
+       u8                              mmu_cache_inv_pi;
 };
 
 void gaudi_init_security(struct hl_device *hdev);
index 9a5800b..0f0cd06 100644 (file)
@@ -197,6 +197,9 @@ struct packet_wait {
        __le32 ctl;
 };
 
+#define GAUDI_PKT_LOAD_AND_EXE_CFG_DST_SHIFT   0
+#define GAUDI_PKT_LOAD_AND_EXE_CFG_DST_MASK    0x00000001
+
 struct packet_load_and_exe {
        __le32 cfg;
        __le32 ctl;
index bccd341..d5d2af4 100644 (file)
@@ -828,7 +828,7 @@ static void run_plant_and_detach_test(int is_early)
        char before[BREAK_INSTR_SIZE];
        char after[BREAK_INSTR_SIZE];
 
-       probe_kernel_read(before, (char *)kgdbts_break_test,
+       copy_from_kernel_nofault(before, (char *)kgdbts_break_test,
          BREAK_INSTR_SIZE);
        init_simple_test();
        ts.tst = plant_and_detach_test;
@@ -836,8 +836,8 @@ static void run_plant_and_detach_test(int is_early)
        /* Activate test with initial breakpoint */
        if (!is_early)
                kgdb_breakpoint();
-       probe_kernel_read(after, (char *)kgdbts_break_test,
-         BREAK_INSTR_SIZE);
+       copy_from_kernel_nofault(after, (char *)kgdbts_break_test,
+                       BREAK_INSTR_SIZE);
        if (memcmp(before, after, BREAK_INSTR_SIZE)) {
                printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
                panic("kgdb memory corruption");
index 9392934..7becfc7 100644 (file)
@@ -94,6 +94,7 @@
 #define MEI_DEV_ID_JSP_N      0x4DE0  /* Jasper Lake Point N */
 
 #define MEI_DEV_ID_TGP_LP     0xA0E0  /* Tiger Lake Point LP */
+#define MEI_DEV_ID_TGP_H      0x43E0  /* Tiger Lake Point H */
 
 #define MEI_DEV_ID_MCC        0x4B70  /* Mule Creek Canyon (EHL) */
 #define MEI_DEV_ID_MCC_4      0x4B75  /* Mule Creek Canyon 4 (EHL) */
 #  define PCI_CFG_HFS_1_D0I3_MSK     0x80000000
 #define PCI_CFG_HFS_2         0x48
 #define PCI_CFG_HFS_3         0x60
+#  define PCI_CFG_HFS_3_FW_SKU_MSK   0x00000070
+#  define PCI_CFG_HFS_3_FW_SKU_SPS   0x00000060
 #define PCI_CFG_HFS_4         0x64
 #define PCI_CFG_HFS_5         0x68
 #define PCI_CFG_HFS_6         0x6C
index f620442..7649710 100644 (file)
@@ -1366,7 +1366,7 @@ static bool mei_me_fw_type_nm(struct pci_dev *pdev)
 #define MEI_CFG_FW_NM                           \
        .quirk_probe = mei_me_fw_type_nm
 
-static bool mei_me_fw_type_sps(struct pci_dev *pdev)
+static bool mei_me_fw_type_sps_4(struct pci_dev *pdev)
 {
        u32 reg;
        unsigned int devfn;
@@ -1382,7 +1382,36 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
        return (reg & 0xf0000) == 0xf0000;
 }
 
-#define MEI_CFG_FW_SPS                           \
+#define MEI_CFG_FW_SPS_4                          \
+       .quirk_probe = mei_me_fw_type_sps_4
+
+/**
+ * mei_me_fw_sku_sps() - check for sps sku
+ *
+ * Read ME FW Status register to check for SPS Firmware.
+ * The SPS FW is only signaled in pci function 0
+ *
+ * @pdev: pci device
+ *
+ * Return: true in case of SPS firmware
+ */
+static bool mei_me_fw_type_sps(struct pci_dev *pdev)
+{
+       u32 reg;
+       u32 fw_type;
+       unsigned int devfn;
+
+       devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
+       pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_3, &reg);
+       trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_3", PCI_CFG_HFS_3, reg);
+       fw_type = (reg & PCI_CFG_HFS_3_FW_SKU_MSK);
+
+       dev_dbg(&pdev->dev, "fw type is %d\n", fw_type);
+
+       return fw_type == PCI_CFG_HFS_3_FW_SKU_SPS;
+}
+
+#define MEI_CFG_FW_SPS                          \
        .quirk_probe = mei_me_fw_type_sps
 
 #define MEI_CFG_FW_VER_SUPP                     \
@@ -1452,10 +1481,17 @@ static const struct mei_cfg mei_me_pch8_cfg = {
 };
 
 /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
-static const struct mei_cfg mei_me_pch8_sps_cfg = {
+static const struct mei_cfg mei_me_pch8_sps_4_cfg = {
        MEI_CFG_PCH8_HFS,
        MEI_CFG_FW_VER_SUPP,
-       MEI_CFG_FW_SPS,
+       MEI_CFG_FW_SPS_4,
+};
+
+/* LBG with quirk for SPS (4.0) Firmware exclusion */
+static const struct mei_cfg mei_me_pch12_sps_4_cfg = {
+       MEI_CFG_PCH8_HFS,
+       MEI_CFG_FW_VER_SUPP,
+       MEI_CFG_FW_SPS_4,
 };
 
 /* Cannon Lake and newer devices */
@@ -1465,10 +1501,20 @@ static const struct mei_cfg mei_me_pch12_cfg = {
        MEI_CFG_DMA_128,
 };
 
-/* LBG with quirk for SPS Firmware exclusion */
+/* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion */
 static const struct mei_cfg mei_me_pch12_sps_cfg = {
        MEI_CFG_PCH8_HFS,
        MEI_CFG_FW_VER_SUPP,
+       MEI_CFG_DMA_128,
+       MEI_CFG_FW_SPS,
+};
+
+/* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion
+ * w/o DMA support
+ */
+static const struct mei_cfg mei_me_pch12_nodma_sps_cfg = {
+       MEI_CFG_PCH8_HFS,
+       MEI_CFG_FW_VER_SUPP,
        MEI_CFG_FW_SPS,
 };
 
@@ -1480,6 +1526,15 @@ static const struct mei_cfg mei_me_pch15_cfg = {
        MEI_CFG_TRC,
 };
 
+/* Tiger Lake with quirk for SPS 5.0 and newer Firmware exclusion */
+static const struct mei_cfg mei_me_pch15_sps_cfg = {
+       MEI_CFG_PCH8_HFS,
+       MEI_CFG_FW_VER_SUPP,
+       MEI_CFG_DMA_128,
+       MEI_CFG_TRC,
+       MEI_CFG_FW_SPS,
+};
+
 /*
  * mei_cfg_list - A list of platform platform specific configurations.
  * Note: has to be synchronized with  enum mei_cfg_idx.
@@ -1492,10 +1547,13 @@ static const struct mei_cfg *const mei_cfg_list[] = {
        [MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg,
        [MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
        [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
-       [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
+       [MEI_ME_PCH8_SPS_4_CFG] = &mei_me_pch8_sps_4_cfg,
        [MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
+       [MEI_ME_PCH12_SPS_4_CFG] = &mei_me_pch12_sps_4_cfg,
        [MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg,
+       [MEI_ME_PCH12_SPS_NODMA_CFG] = &mei_me_pch12_nodma_sps_cfg,
        [MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg,
+       [MEI_ME_PCH15_SPS_CFG] = &mei_me_pch15_sps_cfg,
 };
 
 const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
index b6b94e2..6a89736 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2012-2020, Intel Corporation. All rights reserved.
  * Intel Management Engine Interface (Intel MEI) Linux driver
  */
 
@@ -76,14 +76,20 @@ struct mei_me_hw {
  *                         with quirk for Node Manager exclusion.
  * @MEI_ME_PCH8_CFG:       Platform Controller Hub Gen8 and newer
  *                         client platforms.
- * @MEI_ME_PCH8_SPS_CFG:   Platform Controller Hub Gen8 and newer
+ * @MEI_ME_PCH8_SPS_4_CFG: Platform Controller Hub Gen8 and newer
  *                         servers platforms with quirk for
  *                         SPS firmware exclusion.
  * @MEI_ME_PCH12_CFG:      Platform Controller Hub Gen12 and newer
- * @MEI_ME_PCH12_SPS_CFG:  Platform Controller Hub Gen12 and newer
+ * @MEI_ME_PCH12_SPS_4_CFG:Platform Controller Hub Gen12 up to 4.0
+ *                         servers platforms with quirk for
+ *                         SPS firmware exclusion.
+ * @MEI_ME_PCH12_SPS_CFG:  Platform Controller Hub Gen12 5.0 and newer
  *                         servers platforms with quirk for
  *                         SPS firmware exclusion.
  * @MEI_ME_PCH15_CFG:      Platform Controller Hub Gen15 and newer
+ * @MEI_ME_PCH15_SPS_CFG:  Platform Controller Hub Gen15 and newer
+ *                         servers platforms with quirk for
+ *                         SPS firmware exclusion.
  * @MEI_ME_NUM_CFG:        Upper Sentinel.
  */
 enum mei_cfg_idx {
@@ -94,10 +100,13 @@ enum mei_cfg_idx {
        MEI_ME_PCH7_CFG,
        MEI_ME_PCH_CPT_PBG_CFG,
        MEI_ME_PCH8_CFG,
-       MEI_ME_PCH8_SPS_CFG,
+       MEI_ME_PCH8_SPS_4_CFG,
        MEI_ME_PCH12_CFG,
+       MEI_ME_PCH12_SPS_4_CFG,
        MEI_ME_PCH12_SPS_CFG,
+       MEI_ME_PCH12_SPS_NODMA_CFG,
        MEI_ME_PCH15_CFG,
+       MEI_ME_PCH15_SPS_CFG,
        MEI_ME_NUM_CFG,
 };
 
index 71f795b..2a3f2fd 100644 (file)
@@ -59,18 +59,18 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
        {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH7_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH7_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH7_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_4_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_4_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_4_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, MEI_ME_PCH8_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, MEI_ME_PCH8_CFG)},
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_SPT, MEI_ME_PCH8_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_4_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_4_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_4_CFG)},
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
@@ -84,8 +84,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_3, MEI_ME_PCH8_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH8_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_SPS_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH12_SPS_NODMA_CFG)},
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
@@ -96,6 +96,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
        {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_H, MEI_ME_PCH15_SPS_CFG)},
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_JSP_N, MEI_ME_PCH15_CFG)},
 
index 7eb38d7..08a3b1c 100644 (file)
@@ -1146,9 +1146,11 @@ static int meson_mmc_probe(struct platform_device *pdev)
 
        mmc->caps |= MMC_CAP_CMD23;
        if (host->dram_access_quirk) {
+               /* Limit segments to 1 due to low available sram memory */
+               mmc->max_segs = 1;
                /* Limit to the available sram memory */
-               mmc->max_segs = SD_EMMC_SRAM_DATA_BUF_LEN / mmc->max_blk_size;
-               mmc->max_blk_count = mmc->max_segs;
+               mmc->max_blk_count = SD_EMMC_SRAM_DATA_BUF_LEN /
+                                    mmc->max_blk_size;
        } else {
                mmc->max_blk_count = CMD_CFG_LENGTH_MASK;
                mmc->max_segs = SD_EMMC_DESC_BUF_LEN /
index 5e20c09..df43f42 100644 (file)
@@ -689,7 +689,7 @@ MODULE_DEVICE_TABLE(of, owl_mmc_of_match);
 static struct platform_driver owl_mmc_driver = {
        .driver = {
                .name   = "owl_mmc",
-               .of_match_table = of_match_ptr(owl_mmc_of_match),
+               .of_match_table = owl_mmc_of_match,
        },
        .probe          = owl_mmc_probe,
        .remove         = owl_mmc_remove,
index b277dd7..c0d58e9 100644 (file)
@@ -618,8 +618,9 @@ static int msm_init_cm_dll(struct sdhci_host *host)
        config &= ~CORE_CLK_PWRSAVE;
        writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
 
-       config = msm_host->dll_config;
-       writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
+       if (msm_host->dll_config)
+               writel_relaxed(msm_host->dll_config,
+                               host->ioaddr + msm_offset->core_dll_config);
 
        if (msm_host->use_14lpp_dll_reset) {
                config = readl_relaxed(host->ioaddr +
index 76d832a..7d93056 100644 (file)
@@ -1273,8 +1273,8 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
                return -EROFS;
        if (!len)
                return 0;
-       if (!mtd->oops_panic_write)
-               mtd->oops_panic_write = true;
+       if (!master->oops_panic_write)
+               master->oops_panic_write = true;
 
        return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
                                    retlen, buf);
index 0a5cb77..f5a53aa 100644 (file)
@@ -1761,7 +1761,7 @@ static void ns_switch_state(struct nandsim *ns)
 
                NS_DBG("switch_state: operation is unknown, try to find it\n");
 
-               if (!ns_find_operation(ns, 0))
+               if (ns_find_operation(ns, 0))
                        return;
 
                if ((ns->state & ACTION_MASK) &&
index 94bfba9..2925547 100644 (file)
@@ -224,7 +224,7 @@ static int xway_nand_remove(struct platform_device *pdev)
        struct nand_chip *chip = &data->chip;
        int ret;
 
-       ret = mtd_device_unregister(mtd);
+       ret = mtd_device_unregister(nand_to_mtd(chip));
        WARN_ON(ret);
        nand_cleanup(chip);
 
index 9a49c4c..1368d1d 100644 (file)
@@ -460,7 +460,7 @@ config NET_SB1000
 
          At present this driver only compiles as a module, so say M here if
          you have this card. The module will be called sb1000. Then read
-         <file:Documentation/networking/device_drivers/sb1000.rst> for
+         <file:Documentation/networking/device_drivers/cable/sb1000.rst> for
          information on how to use this module, as it needs special ppp
          scripts for establishing a connection. Further documentation
          and the necessary scripts can be found at:
@@ -495,6 +495,7 @@ config XEN_NETDEV_FRONTEND
        tristate "Xen network device frontend driver"
        depends on XEN
        select XEN_XENBUS_FRONTEND
+       select PAGE_POOL
        default y
        help
          This driver provides support for Xen paravirtual network
index 09f94d4..d4f22a2 100644 (file)
@@ -59,7 +59,8 @@ config COPS
          package. This driver is experimental, which means that it may not
          work. This driver will only work if you choose "AppleTalk DDP"
          networking support, above.
-         Please read the file <file:Documentation/networking/cops.rst>.
+         Please read the file
+         <file:Documentation/networking/device_drivers/appletalk/cops.rst>.
 
 config COPS_DAYNA
        bool "Dayna firmware support"
index efd1a1d..44eb2b1 100644 (file)
@@ -125,7 +125,6 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
                bareudp->dev->stats.rx_dropped++;
                goto drop;
        }
-
        tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0);
        if (!tun_dst) {
                bareudp->dev->stats.rx_dropped++;
@@ -552,6 +551,8 @@ static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[],
 static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf,
                        struct netlink_ext_ack *extack)
 {
+       memset(conf, 0, sizeof(*conf));
+
        if (!data[IFLA_BAREUDP_PORT]) {
                NL_SET_ERR_MSG(extack, "port not specified");
                return -EINVAL;
@@ -570,6 +571,9 @@ static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf,
        if (data[IFLA_BAREUDP_SRCPORT_MIN])
                conf->sport_min =  nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]);
 
+       if (data[IFLA_BAREUDP_MULTIPROTO_MODE])
+               conf->multi_proto_mode = true;
+
        return 0;
 }
 
@@ -607,6 +611,7 @@ static int bareudp_configure(struct net *net, struct net_device *dev,
        bareudp->ethertype = conf->ethertype;
        bareudp->sport_min = conf->sport_min;
        bareudp->multi_proto_mode = conf->multi_proto_mode;
+
        err = register_netdevice(dev);
        if (err)
                return err;
index 004919a..e2d491c 100644 (file)
@@ -79,6 +79,7 @@
 #include <net/pkt_sched.h>
 #include <linux/rculist.h>
 #include <net/flow_dissector.h>
+#include <net/xfrm.h>
 #include <net/bonding.h>
 #include <net/bond_3ad.h>
 #include <net/bond_alb.h>
@@ -278,8 +279,6 @@ const char *bond_mode_name(int mode)
        return names[mode];
 }
 
-/*---------------------------------- VLAN -----------------------------------*/
-
 /**
  * bond_dev_queue_xmit - Prepare skb for xmit.
  *
@@ -302,6 +301,8 @@ netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
        return dev_queue_xmit(skb);
 }
 
+/*---------------------------------- VLAN -----------------------------------*/
+
 /* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
  * We don't protect the slave list iteration with a lock because:
  * a. This operation is performed in IOCTL context,
@@ -372,6 +373,98 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
        return 0;
 }
 
+/*---------------------------------- XFRM -----------------------------------*/
+
+#ifdef CONFIG_XFRM_OFFLOAD
+/**
+ * bond_ipsec_add_sa - program device with a security association
+ * @xs: pointer to transformer state struct
+ **/
+static int bond_ipsec_add_sa(struct xfrm_state *xs)
+{
+       struct net_device *bond_dev = xs->xso.dev;
+       struct bonding *bond;
+       struct slave *slave;
+
+       if (!bond_dev)
+               return -EINVAL;
+
+       bond = netdev_priv(bond_dev);
+       slave = rcu_dereference(bond->curr_active_slave);
+       xs->xso.real_dev = slave->dev;
+       bond->xs = xs;
+
+       if (!(slave->dev->xfrmdev_ops
+             && slave->dev->xfrmdev_ops->xdo_dev_state_add)) {
+               slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n");
+               return -EINVAL;
+       }
+
+       return slave->dev->xfrmdev_ops->xdo_dev_state_add(xs);
+}
+
+/**
+ * bond_ipsec_del_sa - clear out this specific SA
+ * @xs: pointer to transformer state struct
+ **/
+static void bond_ipsec_del_sa(struct xfrm_state *xs)
+{
+       struct net_device *bond_dev = xs->xso.dev;
+       struct bonding *bond;
+       struct slave *slave;
+
+       if (!bond_dev)
+               return;
+
+       bond = netdev_priv(bond_dev);
+       slave = rcu_dereference(bond->curr_active_slave);
+
+       if (!slave)
+               return;
+
+       xs->xso.real_dev = slave->dev;
+
+       if (!(slave->dev->xfrmdev_ops
+             && slave->dev->xfrmdev_ops->xdo_dev_state_delete)) {
+               slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__);
+               return;
+       }
+
+       slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs);
+}
+
+/**
+ * bond_ipsec_offload_ok - can this packet use the xfrm hw offload
+ * @skb: current data packet
+ * @xs: pointer to transformer state struct
+ **/
+static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
+{
+       struct net_device *bond_dev = xs->xso.dev;
+       struct bonding *bond = netdev_priv(bond_dev);
+       struct slave *curr_active = rcu_dereference(bond->curr_active_slave);
+       struct net_device *slave_dev = curr_active->dev;
+
+       if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)
+               return true;
+
+       if (!(slave_dev->xfrmdev_ops
+             && slave_dev->xfrmdev_ops->xdo_dev_offload_ok)) {
+               slave_warn(bond_dev, slave_dev, "%s: no slave xdo_dev_offload_ok\n", __func__);
+               return false;
+       }
+
+       xs->xso.real_dev = slave_dev;
+       return slave_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
+}
+
+static const struct xfrmdev_ops bond_xfrmdev_ops = {
+       .xdo_dev_state_add = bond_ipsec_add_sa,
+       .xdo_dev_state_delete = bond_ipsec_del_sa,
+       .xdo_dev_offload_ok = bond_ipsec_offload_ok,
+};
+#endif /* CONFIG_XFRM_OFFLOAD */
+
 /*------------------------------- Link status -------------------------------*/
 
 /* Set the carrier state for the master according to the state of its
@@ -878,6 +971,11 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
        if (old_active == new_active)
                return;
 
+#ifdef CONFIG_XFRM_OFFLOAD
+       if (old_active && bond->xs)
+               bond_ipsec_del_sa(bond->xs);
+#endif /* CONFIG_XFRM_OFFLOAD */
+
        if (new_active) {
                new_active->last_link_up = jiffies;
 
@@ -950,6 +1048,13 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
                }
        }
 
+#ifdef CONFIG_XFRM_OFFLOAD
+       if (new_active && bond->xs) {
+               xfrm_dev_state_flush(dev_net(bond->dev), bond->dev, true);
+               bond_ipsec_add_sa(bond->xs);
+       }
+#endif /* CONFIG_XFRM_OFFLOAD */
+
        /* resend IGMP joins since active slave has changed or
         * all were sent on curr_active_slave.
         * resend only if bond is brought up with the affected
@@ -1130,12 +1235,16 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
 #define BOND_MPLS_FEATURES     (NETIF_F_HW_CSUM | NETIF_F_SG | \
                                 NETIF_F_ALL_TSO)
 
+
 static void bond_compute_features(struct bonding *bond)
 {
        unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
                                        IFF_XMIT_DST_RELEASE_PERM;
        netdev_features_t vlan_features = BOND_VLAN_FEATURES;
        netdev_features_t enc_features  = BOND_ENC_FEATURES;
+#ifdef CONFIG_XFRM_OFFLOAD
+       netdev_features_t xfrm_features  = BOND_XFRM_FEATURES;
+#endif /* CONFIG_XFRM_OFFLOAD */
        netdev_features_t mpls_features  = BOND_MPLS_FEATURES;
        struct net_device *bond_dev = bond->dev;
        struct list_head *iter;
@@ -1157,6 +1266,12 @@ static void bond_compute_features(struct bonding *bond)
                                                         slave->dev->hw_enc_features,
                                                         BOND_ENC_FEATURES);
 
+#ifdef CONFIG_XFRM_OFFLOAD
+               xfrm_features = netdev_increment_features(xfrm_features,
+                                                         slave->dev->hw_enc_features,
+                                                         BOND_XFRM_FEATURES);
+#endif /* CONFIG_XFRM_OFFLOAD */
+
                mpls_features = netdev_increment_features(mpls_features,
                                                          slave->dev->mpls_features,
                                                          BOND_MPLS_FEATURES);
@@ -1176,6 +1291,9 @@ done:
                                    NETIF_F_HW_VLAN_CTAG_TX |
                                    NETIF_F_HW_VLAN_STAG_TX |
                                    NETIF_F_GSO_UDP_L4;
+#ifdef CONFIG_XFRM_OFFLOAD
+       bond_dev->hw_enc_features |= xfrm_features;
+#endif /* CONFIG_XFRM_OFFLOAD */
        bond_dev->mpls_features = mpls_features;
        bond_dev->gso_max_segs = gso_max_segs;
        netif_set_gso_max_size(bond_dev, gso_max_size);
@@ -1464,6 +1582,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
                slave_dbg(bond_dev, slave_dev, "is !NETIF_F_VLAN_CHALLENGED\n");
        }
 
+       if (slave_dev->features & NETIF_F_HW_ESP)
+               slave_dbg(bond_dev, slave_dev, "is esp-hw-offload capable\n");
+
        /* Old ifenslave binaries are no longer supported.  These can
         * be identified with moderate accuracy by the state of the slave:
         * the current ifenslave will set the interface down prior to
@@ -4540,6 +4661,12 @@ void bond_setup(struct net_device *bond_dev)
        bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE;
        bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
 
+#ifdef CONFIG_XFRM_OFFLOAD
+       /* set up xfrm device ops (only supported in active-backup right now) */
+       bond_dev->xfrmdev_ops = &bond_xfrmdev_ops;
+       bond->xs = NULL;
+#endif /* CONFIG_XFRM_OFFLOAD */
+
        /* don't acquire bond device's netif_tx_lock when transmitting */
        bond_dev->features |= NETIF_F_LLTX;
 
@@ -4558,8 +4685,16 @@ void bond_setup(struct net_device *bond_dev)
                                NETIF_F_HW_VLAN_CTAG_FILTER;
 
        bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
+#ifdef CONFIG_XFRM_OFFLOAD
+       bond_dev->hw_features |= BOND_XFRM_FEATURES;
+#endif /* CONFIG_XFRM_OFFLOAD */
        bond_dev->features |= bond_dev->hw_features;
        bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
+#ifdef CONFIG_XFRM_OFFLOAD
+       /* Disable XFRM features if this isn't an active-backup config */
+       if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)
+               bond_dev->features &= ~BOND_XFRM_FEATURES;
+#endif /* CONFIG_XFRM_OFFLOAD */
 }
 
 /* Destroy a bonding device.
index ddb3916..9abfaae 100644 (file)
@@ -767,6 +767,14 @@ static int bond_option_mode_set(struct bonding *bond,
        if (newval->value == BOND_MODE_ALB)
                bond->params.tlb_dynamic_lb = 1;
 
+#ifdef CONFIG_XFRM_OFFLOAD
+       if (newval->value == BOND_MODE_ACTIVEBACKUP)
+               bond->dev->wanted_features |= BOND_XFRM_FEATURES;
+       else
+               bond->dev->wanted_features &= ~BOND_XFRM_FEATURES;
+       netdev_change_features(bond->dev);
+#endif /* CONFIG_XFRM_OFFLOAD */
+
        /* don't cache arp_validate between modes */
        bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
        bond->params.mode = newval->value;
index bbb2575..4a33ec4 100644 (file)
@@ -1006,7 +1006,7 @@ static void cfhsi_aggregation_tout(struct timer_list *t)
        cfhsi_start_tx(cfhsi);
 }
 
-static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct cfhsi *cfhsi = NULL;
        int start_xfer = 0;
@@ -1072,7 +1072,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
                spin_unlock_bh(&cfhsi->lock);
                if (aggregate_ready)
                        cfhsi_start_tx(cfhsi);
-               return 0;
+               return NETDEV_TX_OK;
        }
 
        /* Delete inactivity timer if started. */
@@ -1102,7 +1102,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
                        queue_work(cfhsi->wq, &cfhsi->wake_up_work);
        }
 
-       return 0;
+       return NETDEV_TX_OK;
 }
 
 static const struct net_device_ops cfhsi_netdevops;
index d737ceb..bcc14c5 100644 (file)
@@ -266,7 +266,7 @@ error:
        return tty_wr;
 }
 
-static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t caif_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct ser_device *ser;
 
index 63f2548..7d58996 100644 (file)
@@ -488,7 +488,7 @@ static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc)
        complete(&cfspi->comp);
 }
 
-static int cfspi_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t cfspi_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct cfspi *cfspi = NULL;
        unsigned long flags;
@@ -514,7 +514,7 @@ static int cfspi_xmit(struct sk_buff *skb, struct net_device *dev)
                cfspi->cfdev.flowctrl(cfspi->ndev, 0);
        }
 
-       return 0;
+       return NETDEV_TX_OK;
 }
 
 int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
index eb42682..80ea2e9 100644 (file)
@@ -519,7 +519,7 @@ err:
 }
 
 /* Put the CAIF packet on the virtio ring and kick the receiver */
-static int cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev)
 {
        struct cfv_info *cfv = netdev_priv(netdev);
        struct buf_info *buf_info;
index d08a3d5..6ad83a8 100644 (file)
@@ -146,7 +146,7 @@ struct pciefd_rx_dma {
        __le32 irq_status;
        __le32 sys_time_low;
        __le32 sys_time_high;
-       struct pucan_rx_msg msg[0];
+       struct pucan_rx_msg msg[];
 } __packed __aligned(4);
 
 /* Tx Link record */
@@ -194,7 +194,7 @@ struct pciefd_board {
        struct pci_dev *pci_dev;
        int can_count;
        spinlock_t cmd_lock;            /* 64-bits cmds must be atomic */
-       struct pciefd_can *can[0];      /* array of network devices */
+       struct pciefd_can *can[];       /* array of network devices */
 };
 
 /* supported device ids. */
index d0024cb..468b3c4 100644 (file)
@@ -70,6 +70,7 @@ config NET_DSA_QCA8K
 config NET_DSA_REALTEK_SMI
        tristate "Realtek SMI Ethernet switch family support"
        depends on NET_DSA
+       select NET_DSA_TAG_RTL4_A
        select FIXED_PHY
        select IRQ_DOMAIN
        select REALTEK_PHY
index 1df0584..6500179 100644 (file)
@@ -1037,7 +1037,8 @@ static void b53_force_link(struct b53_device *dev, int port, int link)
 }
 
 static void b53_force_port_config(struct b53_device *dev, int port,
-                                 int speed, int duplex, int pause)
+                                 int speed, int duplex,
+                                 bool tx_pause, bool rx_pause)
 {
        u8 reg, val, off;
 
@@ -1075,9 +1076,9 @@ static void b53_force_port_config(struct b53_device *dev, int port,
                return;
        }
 
-       if (pause & MLO_PAUSE_RX)
+       if (rx_pause)
                reg |= PORT_OVERRIDE_RX_FLOW;
-       if (pause & MLO_PAUSE_TX)
+       if (tx_pause)
                reg |= PORT_OVERRIDE_TX_FLOW;
 
        b53_write8(dev, B53_CTRL_PAGE, off, reg);
@@ -1089,22 +1090,24 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
        struct b53_device *dev = ds->priv;
        struct ethtool_eee *p = &dev->ports[port].eee;
        u8 rgmii_ctrl = 0, reg = 0, off;
-       int pause = 0;
+       bool tx_pause = false;
+       bool rx_pause = false;
 
        if (!phy_is_pseudo_fixed_link(phydev))
                return;
 
        /* Enable flow control on BCM5301x's CPU port */
        if (is5301x(dev) && port == dev->cpu_port)
-               pause = MLO_PAUSE_TXRX_MASK;
+               tx_pause = rx_pause = true;
 
        if (phydev->pause) {
                if (phydev->asym_pause)
-                       pause |= MLO_PAUSE_TX;
-               pause |= MLO_PAUSE_RX;
+                       tx_pause = true;
+               rx_pause = true;
        }
 
-       b53_force_port_config(dev, port, phydev->speed, phydev->duplex, pause);
+       b53_force_port_config(dev, port, phydev->speed, phydev->duplex,
+                             tx_pause, rx_pause);
        b53_force_link(dev, port, phydev->link);
 
        if (is531x5(dev) && phy_interface_is_rgmii(phydev)) {
@@ -1166,7 +1169,7 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
        } else if (is5301x(dev)) {
                if (port != dev->cpu_port) {
                        b53_force_port_config(dev, dev->cpu_port, 2000,
-                                             DUPLEX_FULL, MLO_PAUSE_TXRX_MASK);
+                                             DUPLEX_FULL, true, true);
                        b53_force_link(dev, dev->cpu_port, 1);
                }
        }
@@ -1251,15 +1254,9 @@ void b53_phylink_mac_config(struct dsa_switch *ds, int port,
 {
        struct b53_device *dev = ds->priv;
 
-       if (mode == MLO_AN_PHY)
+       if (mode == MLO_AN_PHY || mode == MLO_AN_FIXED)
                return;
 
-       if (mode == MLO_AN_FIXED) {
-               b53_force_port_config(dev, port, state->speed,
-                                     state->duplex, state->pause);
-               return;
-       }
-
        if ((phy_interface_mode_is_8023z(state->interface) ||
             state->interface == PHY_INTERFACE_MODE_SGMII) &&
             dev->ops->serdes_config)
@@ -1309,6 +1306,8 @@ void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
                return;
 
        if (mode == MLO_AN_FIXED) {
+               b53_force_port_config(dev, port, speed, duplex,
+                                     tx_pause, rx_pause);
                b53_force_link(dev, port, true);
                return;
        }
index f89f530..7abec8d 100644 (file)
@@ -145,42 +145,52 @@ static int b53_spi_read8(struct b53_device *dev, u8 page, u8 reg, u8 *val)
 
 static int b53_spi_read16(struct b53_device *dev, u8 page, u8 reg, u16 *val)
 {
-       int ret = b53_spi_read(dev, page, reg, (u8 *)val, 2);
+       __le16 value;
+       int ret;
+
+       ret = b53_spi_read(dev, page, reg, (u8 *)&value, 2);
 
        if (!ret)
-               *val = le16_to_cpu(*val);
+               *val = le16_to_cpu(value);
 
        return ret;
 }
 
 static int b53_spi_read32(struct b53_device *dev, u8 page, u8 reg, u32 *val)
 {
-       int ret = b53_spi_read(dev, page, reg, (u8 *)val, 4);
+       __le32 value;
+       int ret;
+
+       ret = b53_spi_read(dev, page, reg, (u8 *)&value, 4);
 
        if (!ret)
-               *val = le32_to_cpu(*val);
+               *val = le32_to_cpu(value);
 
        return ret;
 }
 
 static int b53_spi_read48(struct b53_device *dev, u8 page, u8 reg, u64 *val)
 {
+       __le64 value;
        int ret;
 
        *val = 0;
-       ret = b53_spi_read(dev, page, reg, (u8 *)val, 6);
+       ret = b53_spi_read(dev, page, reg, (u8 *)&value, 6);
        if (!ret)
-               *val = le64_to_cpu(*val);
+               *val = le64_to_cpu(value);
 
        return ret;
 }
 
 static int b53_spi_read64(struct b53_device *dev, u8 page, u8 reg, u64 *val)
 {
-       int ret = b53_spi_read(dev, page, reg, (u8 *)val, 8);
+       __le64 value;
+       int ret;
+
+       ret = b53_spi_read(dev, page, reg, (u8 *)&value, 8);
 
        if (!ret)
-               *val = le64_to_cpu(*val);
+               *val = le64_to_cpu(value);
 
        return ret;
 }
index c7ac63f..bafddb3 100644 (file)
@@ -558,16 +558,11 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
 {
        struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
        u32 id_mode_dis = 0, port_mode;
-       u32 reg, offset;
+       u32 reg;
 
        if (port == core_readl(priv, CORE_IMP0_PRT_ID))
                return;
 
-       if (priv->type == BCM7445_DEVICE_ID)
-               offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
-       else
-               offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
-
        switch (state->interface) {
        case PHY_INTERFACE_MODE_RGMII:
                id_mode_dis = 1;
@@ -582,8 +577,8 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
                port_mode = EXT_REVMII;
                break;
        default:
-               /* all other PHYs: internal and MoCA */
-               goto force_link;
+               /* Nothing required for all other PHYs: internal and MoCA */
+               return;
        }
 
        /* Clear id_mode_dis bit, and the existing port mode, let
@@ -592,38 +587,12 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
        reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
        reg &= ~ID_MODE_DIS;
        reg &= ~(PORT_MODE_MASK << PORT_MODE_SHIFT);
-       reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
 
        reg |= port_mode;
        if (id_mode_dis)
                reg |= ID_MODE_DIS;
 
-       if (state->pause & MLO_PAUSE_TXRX_MASK) {
-               if (state->pause & MLO_PAUSE_TX)
-                       reg |= TX_PAUSE_EN;
-               reg |= RX_PAUSE_EN;
-       }
-
        reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
-
-force_link:
-       /* Force link settings detected from the PHY */
-       reg = SW_OVERRIDE;
-       switch (state->speed) {
-       case SPEED_1000:
-               reg |= SPDSTS_1000 << SPEED_SHIFT;
-               break;
-       case SPEED_100:
-               reg |= SPDSTS_100 << SPEED_SHIFT;
-               break;
-       }
-
-       if (state->link)
-               reg |= LINK_STS;
-       if (state->duplex == DUPLEX_FULL)
-               reg |= DUPLX_MODE;
-
-       core_writel(priv, reg, offset);
 }
 
 static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
@@ -650,6 +619,20 @@ static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
                                     unsigned int mode,
                                     phy_interface_t interface)
 {
+       struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+       u32 reg, offset;
+
+       if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
+               if (priv->type == BCM7445_DEVICE_ID)
+                       offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
+               else
+                       offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
+
+               reg = core_readl(priv, offset);
+               reg &= ~LINK_STS;
+               core_writel(priv, reg, offset);
+       }
+
        bcm_sf2_sw_mac_link_set(ds, port, interface, false);
 }
 
@@ -662,9 +645,47 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
 {
        struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
        struct ethtool_eee *p = &priv->dev->ports[port].eee;
+       u32 reg, offset;
 
        bcm_sf2_sw_mac_link_set(ds, port, interface, true);
 
+       if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
+               if (priv->type == BCM7445_DEVICE_ID)
+                       offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
+               else
+                       offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
+
+               if (interface == PHY_INTERFACE_MODE_RGMII ||
+                   interface == PHY_INTERFACE_MODE_RGMII_TXID ||
+                   interface == PHY_INTERFACE_MODE_MII ||
+                   interface == PHY_INTERFACE_MODE_REVMII) {
+                       reg = reg_readl(priv, REG_RGMII_CNTRL_P(port));
+                       reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
+
+                       if (tx_pause)
+                               reg |= TX_PAUSE_EN;
+                       if (rx_pause)
+                               reg |= RX_PAUSE_EN;
+
+                       reg_writel(priv, reg, REG_RGMII_CNTRL_P(port));
+               }
+
+               reg = SW_OVERRIDE | LINK_STS;
+               switch (speed) {
+               case SPEED_1000:
+                       reg |= SPDSTS_1000 << SPEED_SHIFT;
+                       break;
+               case SPEED_100:
+                       reg |= SPDSTS_100 << SPEED_SHIFT;
+                       break;
+               }
+
+               if (duplex == DUPLEX_FULL)
+                       reg |= DUPLX_MODE;
+
+               core_writel(priv, reg, offset);
+       }
+
        if (mode == MLO_AN_PHY && phydev)
                p->eee_enabled = b53_eee_init(ds, port, phydev);
 }
@@ -1147,6 +1168,8 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
        set_bit(0, priv->cfp.used);
        set_bit(0, priv->cfp.unique);
 
+       /* Balance of_node_put() done by of_find_node_by_name() */
+       of_node_get(dn);
        ports = of_find_node_by_name(dn, "ports");
        if (ports) {
                bcm_sf2_identify_ports(priv, ports);
index f707edc..d82cee5 100644 (file)
@@ -128,12 +128,12 @@ static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout)
        return count;
 }
 
-static inline u32 udf_upper_bits(unsigned int num_udf)
+static inline u32 udf_upper_bits(int num_udf)
 {
        return GENMASK(num_udf - 1, 0) >> (UDFS_PER_SLICE - 1);
 }
 
-static inline u32 udf_lower_bits(unsigned int num_udf)
+static inline u32 udf_lower_bits(int num_udf)
 {
        return (u8)GENMASK(num_udf - 1, 0);
 }
@@ -348,8 +348,8 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
                                     unsigned int queue_num,
                                     struct ethtool_rx_flow_spec *fs)
 {
+       __be16 vlan_tci = 0, vlan_m_tci = htons(0xffff);
        struct ethtool_rx_flow_spec_input input = {};
-       __be16 vlan_tci = 0 , vlan_m_tci = 0xffff;
        const struct cfp_udf_layout *layout;
        unsigned int slice_num, rule_index;
        struct ethtool_rx_flow_rule *flow;
@@ -629,8 +629,8 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
                                     unsigned int queue_num,
                                     struct ethtool_rx_flow_spec *fs)
 {
+       __be16 vlan_tci = 0, vlan_m_tci = htons(0xffff);
        struct ethtool_rx_flow_spec_input input = {};
-       __be16 vlan_tci = 0, vlan_m_tci = 0xffff;
        unsigned int slice_num, rule_index[2];
        const struct cfp_udf_layout *layout;
        struct ethtool_rx_flow_rule *flow;
index 400207c..f8bc85a 100644 (file)
@@ -280,13 +280,11 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
        struct dsa_loop_pdata *pdata = mdiodev->dev.platform_data;
        struct dsa_loop_priv *ps;
        struct dsa_switch *ds;
+       int ret;
 
        if (!pdata)
                return -ENODEV;
 
-       dev_info(&mdiodev->dev, "%s: 0x%0x\n",
-                pdata->name, pdata->enabled_ports);
-
        ds = devm_kzalloc(&mdiodev->dev, sizeof(*ds), GFP_KERNEL);
        if (!ds)
                return -ENOMEM;
@@ -311,7 +309,12 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
 
        dev_set_drvdata(&mdiodev->dev, ds);
 
-       return dsa_register_switch(ds);
+       ret = dsa_register_switch(ds);
+       if (!ret)
+               dev_info(&mdiodev->dev, "%s: 0x%0x\n",
+                        pdata->name, pdata->enabled_ports);
+
+       return ret;
 }
 
 static void dsa_loop_drv_remove(struct mdio_device *mdiodev)
index cc17a44..aa1142d 100644 (file)
@@ -1042,7 +1042,7 @@ static void lan9303_adjust_link(struct dsa_switch *ds, int port,
                                struct phy_device *phydev)
 {
        struct lan9303 *chip = ds->priv;
-       int ctl, res;
+       int ctl;
 
        if (!phy_is_pseudo_fixed_link(phydev))
                return;
@@ -1063,15 +1063,14 @@ static void lan9303_adjust_link(struct dsa_switch *ds, int port,
        else
                ctl &= ~BMCR_FULLDPLX;
 
-       res =  lan9303_phy_write(ds, port, MII_BMCR, ctl);
+       lan9303_phy_write(ds, port, MII_BMCR, ctl);
 
        if (port == chip->phy_addr_base) {
                /* Virtual Phy: Remove Turbo 200Mbit mode */
                lan9303_read(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, &ctl);
 
                ctl &= ~LAN9303_VIRT_SPECIAL_TURBO;
-               res =  regmap_write(chip->regmap,
-                                   LAN9303_VIRT_SPECIAL_CTRL, ctl);
+               regmap_write(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, ctl);
        }
 }
 
index 47d65b7..8f1d15e 100644 (file)
@@ -731,15 +731,6 @@ static void ksz8795_port_stp_state_set(struct dsa_switch *ds, int port,
 
        ksz_pwrite8(dev, port, P_STP_CTRL, data);
        p->stp_state = state;
-       if (data & PORT_RX_ENABLE)
-               dev->rx_ports |= BIT(port);
-       else
-               dev->rx_ports &= ~BIT(port);
-       if (data & PORT_TX_ENABLE)
-               dev->tx_ports |= BIT(port);
-       else
-               dev->tx_ports &= ~BIT(port);
-
        /* Port membership may share register with STP state. */
        if (member >= 0 && member != p->member)
                ksz8795_cfg_port_member(dev, port, (u8)member);
@@ -976,15 +967,8 @@ static void ksz8795_port_setup(struct ksz_device *dev, int port, bool cpu_port)
                p->phydev.duplex = 1;
 
                member = dev->port_mask;
-               dev->on_ports = dev->host_mask;
-               dev->live_ports = dev->host_mask;
        } else {
                member = dev->host_mask | p->vid_member;
-               dev->on_ports |= BIT(port);
-
-               /* Link was detected before port is enabled. */
-               if (p->phydev.link)
-                       dev->live_ports |= BIT(port);
        }
        ksz8795_cfg_port_member(dev, port, member);
 }
@@ -1111,9 +1095,8 @@ static const struct dsa_switch_ops ksz8795_switch_ops = {
        .setup                  = ksz8795_setup,
        .phy_read               = ksz_phy_read16,
        .phy_write              = ksz_phy_write16,
-       .adjust_link            = ksz_adjust_link,
+       .phylink_mac_link_down  = ksz_mac_link_down,
        .port_enable            = ksz_enable_port,
-       .port_disable           = ksz_disable_port,
        .get_strings            = ksz8795_get_strings,
        .get_ethtool_stats      = ksz_get_ethtool_stats,
        .get_sset_count         = ksz_sset_count,
@@ -1268,6 +1251,9 @@ static int ksz8795_switch_init(struct ksz_device *dev)
                        return -ENOMEM;
        }
 
+       /* set the real number of ports */
+       dev->ds->num_ports = dev->port_cnt;
+
        return 0;
 }
 
index 9a51b8a..687d442 100644 (file)
@@ -452,15 +452,6 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
        ksz_pwrite8(dev, port, P_STP_CTRL, data);
        p->stp_state = state;
        mutex_lock(&dev->dev_mutex);
-       if (data & PORT_RX_ENABLE)
-               dev->rx_ports |= (1 << port);
-       else
-               dev->rx_ports &= ~(1 << port);
-       if (data & PORT_TX_ENABLE)
-               dev->tx_ports |= (1 << port);
-       else
-               dev->tx_ports &= ~(1 << port);
-
        /* Port membership may share register with STP state. */
        if (member >= 0 && member != p->member)
                ksz9477_cfg_port_member(dev, port, (u8)member);
@@ -1268,18 +1259,10 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
                p->phydev.duplex = 1;
        }
        mutex_lock(&dev->dev_mutex);
-       if (cpu_port) {
+       if (cpu_port)
                member = dev->port_mask;
-               dev->on_ports = dev->host_mask;
-               dev->live_ports = dev->host_mask;
-       } else {
+       else
                member = dev->host_mask | p->vid_member;
-               dev->on_ports |= (1 << port);
-
-               /* Link was detected before port is enabled. */
-               if (p->phydev.link)
-                       dev->live_ports |= (1 << port);
-       }
        mutex_unlock(&dev->dev_mutex);
        ksz9477_cfg_port_member(dev, port, member);
 
@@ -1399,9 +1382,8 @@ static const struct dsa_switch_ops ksz9477_switch_ops = {
        .setup                  = ksz9477_setup,
        .phy_read               = ksz9477_phy_read16,
        .phy_write              = ksz9477_phy_write16,
-       .adjust_link            = ksz_adjust_link,
+       .phylink_mac_link_down  = ksz_mac_link_down,
        .port_enable            = ksz_enable_port,
-       .port_disable           = ksz_disable_port,
        .get_strings            = ksz9477_get_strings,
        .get_ethtool_stats      = ksz_get_ethtool_stats,
        .get_sset_count         = ksz_sset_count,
@@ -1588,6 +1570,9 @@ static int ksz9477_switch_init(struct ksz_device *dev)
                        return -ENOMEM;
        }
 
+       /* set the real number of ports */
+       dev->ds->num_ports = dev->port_cnt;
+
        return 0;
 }
 
index 7d050fa..7951f52 100644 (file)
@@ -79,6 +79,7 @@ MODULE_DEVICE_TABLE(i2c, ksz9477_i2c_id);
 static const struct of_device_id ksz9477_dt_ids[] = {
        { .compatible = "microchip,ksz9477" },
        { .compatible = "microchip,ksz9897" },
+       { .compatible = "microchip,ksz9893" },
        { .compatible = "microchip,ksz9567" },
        {},
 };
index fd1d667..74f2216 100644 (file)
@@ -135,26 +135,17 @@ int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
 }
 EXPORT_SYMBOL_GPL(ksz_phy_write16);
 
-void ksz_adjust_link(struct dsa_switch *ds, int port,
-                    struct phy_device *phydev)
+void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
+                      phy_interface_t interface)
 {
        struct ksz_device *dev = ds->priv;
        struct ksz_port *p = &dev->ports[port];
 
        /* Read all MIB counters when the link is going down. */
-       if (!phydev->link) {
-               p->read = true;
-               schedule_delayed_work(&dev->mib_read, 0);
-       }
-       mutex_lock(&dev->dev_mutex);
-       if (!phydev->link)
-               dev->live_ports &= ~(1 << port);
-       else
-               /* Remember which port is connected and active. */
-               dev->live_ports |= (1 << port) & dev->on_ports;
-       mutex_unlock(&dev->dev_mutex);
+       p->read = true;
+       schedule_delayed_work(&dev->mib_read, 0);
 }
-EXPORT_SYMBOL_GPL(ksz_adjust_link);
+EXPORT_SYMBOL_GPL(ksz_mac_link_down);
 
 int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
 {
@@ -369,22 +360,6 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
 }
 EXPORT_SYMBOL_GPL(ksz_enable_port);
 
-void ksz_disable_port(struct dsa_switch *ds, int port)
-{
-       struct ksz_device *dev = ds->priv;
-
-       if (!dsa_is_user_port(ds, port))
-               return;
-
-       dev->on_ports &= ~(1 << port);
-       dev->live_ports &= ~(1 << port);
-
-       /* port_stp_state_set() will be called after to disable the port so
-        * there is no need to do anything.
-        */
-}
-EXPORT_SYMBOL_GPL(ksz_disable_port);
-
 struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
 {
        struct dsa_switch *ds;
index f2c9bb6..f7d92c1 100644 (file)
@@ -84,10 +84,6 @@ struct ksz_device {
        unsigned long mib_read_interval;
        u16 br_member;
        u16 member;
-       u16 live_ports;
-       u16 on_ports;                   /* ports enabled by DSA */
-       u16 rx_ports;
-       u16 tx_ports;
        u16 mirror_rx;
        u16 mirror_tx;
        u32 features;                   /* chip specific features */
@@ -159,8 +155,8 @@ void ksz_init_mib_timer(struct ksz_device *dev);
 
 int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg);
 int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val);
-void ksz_adjust_link(struct dsa_switch *ds, int port,
-                    struct phy_device *phydev);
+void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
+                      phy_interface_t interface);
 int ksz_sset_count(struct dsa_switch *ds, int port, int sset);
 void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf);
 int ksz_port_bridge_join(struct dsa_switch *ds, int port,
@@ -179,7 +175,6 @@ void ksz_port_mdb_add(struct dsa_switch *ds, int port,
 int ksz_port_mdb_del(struct dsa_switch *ds, int port,
                     const struct switchdev_obj_port_mdb *mdb);
 int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
-void ksz_disable_port(struct dsa_switch *ds, int port);
 
 /* Common register access functions */
 
index 7627ea6..b8a3e8c 100644 (file)
@@ -1751,7 +1751,7 @@ static int mv88e6xxx_policy_insert(struct mv88e6xxx_chip *chip, int port,
        }
 
        if ((fs->flow_type & FLOW_EXT) && fs->m_ext.vlan_tci) {
-               if (fs->m_ext.vlan_tci != 0xffff)
+               if (fs->m_ext.vlan_tci != htons(0xffff))
                        return -EOPNOTSUPP;
                vid = be16_to_cpu(fs->h_ext.vlan_tci) & VLAN_VID_MASK;
        }
@@ -2693,6 +2693,35 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
        return mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_DEFAULT_VLAN, 0);
 }
 
+static int mv88e6xxx_get_max_mtu(struct dsa_switch *ds, int port)
+{
+       struct mv88e6xxx_chip *chip = ds->priv;
+
+       if (chip->info->ops->port_set_jumbo_size)
+               return 10240;
+       else if (chip->info->ops->set_max_frame_size)
+               return 1632;
+       return 1522;
+}
+
+static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+       struct mv88e6xxx_chip *chip = ds->priv;
+       int ret = 0;
+
+       mv88e6xxx_reg_lock(chip);
+       if (chip->info->ops->port_set_jumbo_size)
+               ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu);
+       else if (chip->info->ops->set_max_frame_size)
+               ret = chip->info->ops->set_max_frame_size(chip, new_mtu);
+       else
+               if (new_mtu > 1522)
+                       ret = -EINVAL;
+       mv88e6xxx_reg_unlock(chip);
+
+       return ret;
+}
+
 static int mv88e6xxx_port_enable(struct dsa_switch *ds, int port,
                                 struct phy_device *phydev)
 {
@@ -3425,6 +3454,7 @@ static const struct mv88e6xxx_ops mv88e6085_ops = {
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
        .phylink_validate = mv88e6185_phylink_validate,
+       .set_max_frame_size = mv88e6185_g1_set_max_frame_size,
 };
 
 static const struct mv88e6xxx_ops mv88e6095_ops = {
@@ -3453,6 +3483,7 @@ static const struct mv88e6xxx_ops mv88e6095_ops = {
        .vtu_getnext = mv88e6185_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
        .phylink_validate = mv88e6185_phylink_validate,
+       .set_max_frame_size = mv88e6185_g1_set_max_frame_size,
 };
 
 static const struct mv88e6xxx_ops mv88e6097_ops = {
@@ -3469,7 +3500,6 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
        .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
-       .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
        .port_egress_rate_limiting = mv88e6095_port_egress_rate_limiting,
        .port_pause_limit = mv88e6097_port_pause_limit,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
@@ -3491,6 +3521,7 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
        .phylink_validate = mv88e6185_phylink_validate,
+       .set_max_frame_size = mv88e6185_g1_set_max_frame_size,
 };
 
 static const struct mv88e6xxx_ops mv88e6123_ops = {
@@ -3525,6 +3556,7 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
        .vtu_getnext = mv88e6352_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
        .phylink_validate = mv88e6185_phylink_validate,
+       .set_max_frame_size = mv88e6185_g1_set_max_frame_size,
 };
 
 static const struct mv88e6xxx_ops mv88e6131_ops = {
@@ -3914,6 +3946,7 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
        .vtu_getnext = mv88e6185_g1_vtu_getnext,
        .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
        .phylink_validate = mv88e6185_phylink_validate,
+       .set_max_frame_size = mv88e6185_g1_set_max_frame_size,
 };
 
 static const struct mv88e6xxx_ops mv88e6190_ops = {
@@ -3934,6 +3967,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
        .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
+       .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
        .port_pause_limit = mv88e6390_port_pause_limit,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
        .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
@@ -3992,6 +4026,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
        .port_set_frame_mode = mv88e6351_port_set_frame_mode,
        .port_set_egress_floods = mv88e6352_port_set_egress_floods,
        .port_set_ether_type = mv88e6351_port_set_ether_type,
+       .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
        .port_pause_limit = mv88e6390_port_pause_limit,
        .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
        .port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
@@ -5525,6 +5560,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
        .get_sset_count         = mv88e6xxx_get_sset_count,
        .port_enable            = mv88e6xxx_port_enable,
        .port_disable           = mv88e6xxx_port_disable,
+       .port_max_mtu           = mv88e6xxx_get_max_mtu,
+       .port_change_mtu        = mv88e6xxx_change_mtu,
        .get_mac_eee            = mv88e6xxx_get_mac_eee,
        .set_mac_eee            = mv88e6xxx_set_mac_eee,
        .get_eeprom_len         = mv88e6xxx_get_eeprom_len,
index e5430cf..2d70eac 100644 (file)
@@ -167,7 +167,7 @@ struct mv88e6xxx_irq {
        u16 masked;
        struct irq_chip chip;
        struct irq_domain *domain;
-       unsigned int nirqs;
+       int nirqs;
 };
 
 /* state flags for mv88e6xxx_port_hwtstamp::state */
@@ -552,6 +552,9 @@ struct mv88e6xxx_ops {
        void (*phylink_validate)(struct mv88e6xxx_chip *chip, int port,
                                 unsigned long *mask,
                                 struct phylink_link_state *state);
+
+       /* Max Frame Size */
+       int (*set_max_frame_size)(struct mv88e6xxx_chip *chip, int mtu);
 };
 
 struct mv88e6xxx_irq_ops {
@@ -654,7 +657,7 @@ static inline unsigned int mv88e6xxx_num_ports(struct mv88e6xxx_chip *chip)
 
 static inline u16 mv88e6xxx_port_mask(struct mv88e6xxx_chip *chip)
 {
-       return GENMASK(mv88e6xxx_num_ports(chip) - 1, 0);
+       return GENMASK((s32)mv88e6xxx_num_ports(chip) - 1, 0);
 }
 
 static inline unsigned int mv88e6xxx_num_gpio(struct mv88e6xxx_chip *chip)
index ca3a7a7..f62aa83 100644 (file)
@@ -196,6 +196,23 @@ int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip)
        return mv88e6185_g1_wait_ppu_disabled(chip);
 }
 
+int mv88e6185_g1_set_max_frame_size(struct mv88e6xxx_chip *chip, int mtu)
+{
+       u16 val;
+       int err;
+
+       err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
+       if (err)
+               return err;
+
+       val &= ~MV88E6185_G1_CTL1_MAX_FRAME_1632;
+
+       if (mtu > 1518)
+               val |= MV88E6185_G1_CTL1_MAX_FRAME_1632;
+
+       return mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val);
+}
+
 /* Offset 0x10: IP-PRI Mapping Register 0
  * Offset 0x11: IP-PRI Mapping Register 1
  * Offset 0x12: IP-PRI Mapping Register 2
index 5324c6f..1e3546f 100644 (file)
@@ -282,6 +282,8 @@ int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
 int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
 int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
 
+int mv88e6185_g1_set_max_frame_size(struct mv88e6xxx_chip *chip, int mtu);
+
 int mv88e6xxx_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port);
 int mv88e6320_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port);
 int mv88e6390_g1_stats_snapshot(struct mv88e6xxx_chip *chip, int port);
index 8fd4830..75b227d 100644 (file)
@@ -876,19 +876,18 @@ static int mv88e6390_watchdog_setup(struct mv88e6xxx_chip *chip)
 
 static int mv88e6390_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
 {
-       int err;
        u16 reg;
 
        mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
                           MV88E6390_G2_WDOG_CTL_PTR_EVENT);
-       err = mv88e6xxx_g2_read(chip, MV88E6390_G2_WDOG_CTL, &reg);
+       mv88e6xxx_g2_read(chip, MV88E6390_G2_WDOG_CTL, &reg);
 
        dev_info(chip->dev, "Watchdog event: 0x%04x",
                 reg & MV88E6390_G2_WDOG_CTL_DATA_MASK);
 
        mv88e6xxx_g2_write(chip, MV88E6390_G2_WDOG_CTL,
                           MV88E6390_G2_WDOG_CTL_PTR_HISTORY);
-       err = mv88e6xxx_g2_read(chip, MV88E6390_G2_WDOG_CTL, &reg);
+       mv88e6xxx_g2_read(chip, MV88E6390_G2_WDOG_CTL, &reg);
 
        dev_info(chip->dev, "Watchdog history: 0x%04x",
                 reg & MV88E6390_G2_WDOG_CTL_DATA_MASK);
index 33b7b95..7c2c674 100644 (file)
@@ -44,7 +44,8 @@ static int mv88e6xxx_g2_scratch_write(struct mv88e6xxx_chip *chip, int reg,
 /**
  * mv88e6xxx_g2_scratch_gpio_get_bit - get a bit
  * @chip: chip private data
- * @nr: bit index
+ * @base_reg: base of scratch bits
+ * @offset: index of bit within the register
  * @set: is bit set?
  */
 static int mv88e6xxx_g2_scratch_get_bit(struct mv88e6xxx_chip *chip,
@@ -68,8 +69,9 @@ static int mv88e6xxx_g2_scratch_get_bit(struct mv88e6xxx_chip *chip,
 /**
  * mv88e6xxx_g2_scratch_gpio_set_bit - set (or clear) a bit
  * @chip: chip private data
- * @nr: bit index
- * @set: set if true, clear if false
+ * @base_reg: base of scratch bits
+ * @offset: index of bit within the register
+ * @set: should this bit be set?
  *
  * Helper function for dealing with the direction and data registers.
  */
@@ -165,6 +167,7 @@ static int mv88e6352_g2_scratch_gpio_get_dir(struct mv88e6xxx_chip *chip,
  * mv88e6352_g2_scratch_gpio_set_dir - set direction of gpio pin
  * @chip: chip private data
  * @pin: gpio index
+ * @input: should the gpio be an input, or an output?
  */
 static int mv88e6352_g2_scratch_gpio_set_dir(struct mv88e6xxx_chip *chip,
                                             unsigned int pin, bool input)
index a5b7cca..f121619 100644 (file)
@@ -4,11 +4,16 @@ config NET_DSA_MSCC_FELIX
        depends on NET_DSA && PCI
        depends on NET_VENDOR_MICROSEMI
        depends on NET_VENDOR_FREESCALE
-       select MSCC_OCELOT_SWITCH
+       depends on HAS_IOMEM
+       select MSCC_OCELOT_SWITCH_LIB
        select NET_DSA_TAG_OCELOT
        select FSL_ENETC_MDIO
        help
-         This driver supports the VSC9959 network switch, which is a member of
-         the Vitesse / Microsemi / Microchip Ocelot family of switching cores.
-         It is embedded as a PCIe function of the NXP LS1028A ENETC integrated
-         endpoint.
+         This driver supports network switches from the the Vitesse /
+         Microsemi / Microchip Ocelot family of switching cores that are
+         connected to their host CPU via Ethernet.
+         The following switches are supported:
+         - VSC9959 (Felix): embedded as a PCIe function of the NXP LS1028A
+           ENETC integrated endpoint.
+         - VSC9953 (Seville): embedded as a platform device on the
+           NXP T1040 SoC.
index 37ad403..ec57a5a 100644 (file)
@@ -3,4 +3,5 @@ obj-$(CONFIG_NET_DSA_MSCC_FELIX) += mscc_felix.o
 
 mscc_felix-objs := \
        felix.o \
-       felix_vsc9959.o
+       felix_vsc9959.o \
+       seville_vsc9953.o
index 6664898..c69d959 100644 (file)
@@ -1,5 +1,9 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Copyright 2019 NXP Semiconductors
+ *
+ * This is an umbrella module for all network switches that are
+ * register-compatible with Ocelot and that perform I/O to their host CPU
+ * through an NPI (Node Processor Interface) Ethernet port.
  */
 #include <uapi/linux/if_bridge.h>
 #include <soc/mscc/ocelot_vcap.h>
@@ -9,6 +13,7 @@
 #include <soc/mscc/ocelot_ana.h>
 #include <soc/mscc/ocelot_ptp.h>
 #include <soc/mscc/ocelot.h>
+#include <linux/platform_device.h>
 #include <linux/packing.h>
 #include <linux/module.h>
 #include <linux/of_net.h>
@@ -59,6 +64,29 @@ static int felix_fdb_del(struct dsa_switch *ds, int port,
        return ocelot_fdb_del(ocelot, port, addr, vid);
 }
 
+/* This callback needs to be present */
+static int felix_mdb_prepare(struct dsa_switch *ds, int port,
+                            const struct switchdev_obj_port_mdb *mdb)
+{
+       return 0;
+}
+
+static void felix_mdb_add(struct dsa_switch *ds, int port,
+                         const struct switchdev_obj_port_mdb *mdb)
+{
+       struct ocelot *ocelot = ds->priv;
+
+       ocelot_port_mdb_add(ocelot, port, mdb);
+}
+
+static int felix_mdb_del(struct dsa_switch *ds, int port,
+                        const struct switchdev_obj_port_mdb *mdb)
+{
+       struct ocelot *ocelot = ds->priv;
+
+       return ocelot_port_mdb_del(ocelot, port, mdb);
+}
+
 static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port,
                                       u8 state)
 {
@@ -162,35 +190,10 @@ static void felix_phylink_validate(struct dsa_switch *ds, int port,
                                   struct phylink_link_state *state)
 {
        struct ocelot *ocelot = ds->priv;
-       struct ocelot_port *ocelot_port = ocelot->ports[port];
-       __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
-       if (state->interface != PHY_INTERFACE_MODE_NA &&
-           state->interface != ocelot_port->phy_mode) {
-               bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
-               return;
-       }
-
-       /* No half-duplex. */
-       phylink_set_port_modes(mask);
-       phylink_set(mask, Autoneg);
-       phylink_set(mask, Pause);
-       phylink_set(mask, Asym_Pause);
-       phylink_set(mask, 10baseT_Full);
-       phylink_set(mask, 100baseT_Full);
-       phylink_set(mask, 1000baseT_Full);
-
-       if (state->interface == PHY_INTERFACE_MODE_INTERNAL ||
-           state->interface == PHY_INTERFACE_MODE_2500BASEX ||
-           state->interface == PHY_INTERFACE_MODE_USXGMII) {
-               phylink_set(mask, 2500baseT_Full);
-               phylink_set(mask, 2500baseX_Full);
-       }
+       struct felix *felix = ocelot_to_felix(ocelot);
 
-       bitmap_and(supported, supported, mask,
-                  __ETHTOOL_LINK_MODE_MASK_NBITS);
-       bitmap_and(state->advertising, state->advertising, mask,
-                  __ETHTOOL_LINK_MODE_MASK_NBITS);
+       if (felix->info->phylink_validate)
+               felix->info->phylink_validate(ocelot, port, supported, state);
 }
 
 static int felix_phylink_mac_pcs_get_state(struct dsa_switch *ds, int port,
@@ -210,50 +213,10 @@ static void felix_phylink_mac_config(struct dsa_switch *ds, int port,
                                     const struct phylink_link_state *state)
 {
        struct ocelot *ocelot = ds->priv;
-       struct ocelot_port *ocelot_port = ocelot->ports[port];
        struct felix *felix = ocelot_to_felix(ocelot);
-       u32 mac_fc_cfg;
-
-       /* Take port out of reset by clearing the MAC_TX_RST, MAC_RX_RST and
-        * PORT_RST bits in CLOCK_CFG
-        */
-       ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(state->speed),
-                          DEV_CLOCK_CFG);
-
-       /* Flow control. Link speed is only used here to evaluate the time
-        * specification in incoming pause frames.
-        */
-       mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(state->speed);
 
-       /* handle Rx pause in all cases, with 2500base-X this is used for rate
-        * adaptation.
-        */
-       mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA;
-
-       if (state->pause & MLO_PAUSE_TX)
-               mac_fc_cfg |= SYS_MAC_FC_CFG_TX_FC_ENA |
-                             SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) |
-                             SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) |
-                             SYS_MAC_FC_CFG_ZERO_PAUSE_ENA;
-       ocelot_write_rix(ocelot, mac_fc_cfg, SYS_MAC_FC_CFG, port);
-
-       ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port);
-
-       if (felix->info->pcs_init)
-               felix->info->pcs_init(ocelot, port, link_an_mode, state);
-
-       if (felix->info->port_sched_speed_set)
-               felix->info->port_sched_speed_set(ocelot, port,
-                                                 state->speed);
-}
-
-static void felix_phylink_mac_an_restart(struct dsa_switch *ds, int port)
-{
-       struct ocelot *ocelot = ds->priv;
-       struct felix *felix = ocelot_to_felix(ocelot);
-
-       if (felix->info->pcs_an_restart)
-               felix->info->pcs_an_restart(ocelot, port);
+       if (felix->info->pcs_config)
+               felix->info->pcs_config(ocelot, port, link_an_mode, state);
 }
 
 static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
@@ -264,8 +227,7 @@ static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
        struct ocelot_port *ocelot_port = ocelot->ports[port];
 
        ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG);
-       ocelot_rmw_rix(ocelot, 0, QSYS_SWITCH_PORT_MODE_PORT_ENA,
-                      QSYS_SWITCH_PORT_MODE, port);
+       ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0);
 }
 
 static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
@@ -277,8 +239,58 @@ static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
 {
        struct ocelot *ocelot = ds->priv;
        struct ocelot_port *ocelot_port = ocelot->ports[port];
+       struct felix *felix = ocelot_to_felix(ocelot);
+       u32 mac_fc_cfg;
+
+       /* Take port out of reset by clearing the MAC_TX_RST, MAC_RX_RST and
+        * PORT_RST bits in DEV_CLOCK_CFG. Note that the way this system is
+        * integrated is that the MAC speed is fixed and it's the PCS who is
+        * performing the rate adaptation, so we have to write "1000Mbps" into
+        * the LINK_SPEED field of DEV_CLOCK_CFG (which is also its default
+        * value).
+        */
+       ocelot_port_writel(ocelot_port,
+                          DEV_CLOCK_CFG_LINK_SPEED(OCELOT_SPEED_1000),
+                          DEV_CLOCK_CFG);
+
+       switch (speed) {
+       case SPEED_10:
+               mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(3);
+               break;
+       case SPEED_100:
+               mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(2);
+               break;
+       case SPEED_1000:
+       case SPEED_2500:
+               mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(1);
+               break;
+       default:
+               dev_err(ocelot->dev, "Unsupported speed on port %d: %d\n",
+                       port, speed);
+               return;
+       }
+
+       /* handle Rx pause in all cases, with 2500base-X this is used for rate
+        * adaptation.
+        */
+       mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA;
+
+       if (tx_pause)
+               mac_fc_cfg |= SYS_MAC_FC_CFG_TX_FC_ENA |
+                             SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) |
+                             SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) |
+                             SYS_MAC_FC_CFG_ZERO_PAUSE_ENA;
 
-       /* Enable MAC module */
+       /* Flow control. Link speed is only used here to evaluate the time
+        * specification in incoming pause frames.
+        */
+       ocelot_write_rix(ocelot, mac_fc_cfg, SYS_MAC_FC_CFG, port);
+
+       ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port);
+
+       /* Undo the effects of felix_phylink_mac_link_down:
+        * enable MAC module
+        */
        ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
                           DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
 
@@ -291,10 +303,15 @@ static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
                         ANA_PORT_PORT_CFG, port);
 
        /* Core: Enable port for frame transfer */
-       ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
-                        QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
-                        QSYS_SWITCH_PORT_MODE_PORT_ENA,
-                        QSYS_SWITCH_PORT_MODE, port);
+       ocelot_fields_write(ocelot, port,
+                           QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
+
+       if (felix->info->pcs_link_up)
+               felix->info->pcs_link_up(ocelot, port, link_an_mode, interface,
+                                        speed, duplex);
+
+       if (felix->info->port_sched_speed_set)
+               felix->info->port_sched_speed_set(ocelot, port, speed);
 }
 
 static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
@@ -417,7 +434,6 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
 {
        struct ocelot *ocelot = &felix->ocelot;
        phy_interface_t *port_phy_modes;
-       resource_size_t switch_base;
        struct resource res;
        int port, i, err;
 
@@ -448,9 +464,6 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
                return err;
        }
 
-       switch_base = pci_resource_start(felix->pdev,
-                                        felix->info->switch_pci_bar);
-
        for (i = 0; i < TARGET_MAX; i++) {
                struct regmap *target;
 
@@ -459,8 +472,8 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
 
                memcpy(&res, &felix->info->target_io_res[i], sizeof(res));
                res.flags = IORESOURCE_MEM;
-               res.start += switch_base;
-               res.end += switch_base;
+               res.start += felix->switch_base;
+               res.end += felix->switch_base;
 
                target = ocelot_regmap_init(ocelot, &res);
                if (IS_ERR(target)) {
@@ -482,7 +495,8 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
 
        for (port = 0; port < num_phys_ports; port++) {
                struct ocelot_port *ocelot_port;
-               void __iomem *port_regs;
+               struct regmap *target;
+               u8 *template;
 
                ocelot_port = devm_kzalloc(ocelot->dev,
                                           sizeof(struct ocelot_port),
@@ -496,21 +510,34 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
 
                memcpy(&res, &felix->info->port_io_res[port], sizeof(res));
                res.flags = IORESOURCE_MEM;
-               res.start += switch_base;
-               res.end += switch_base;
+               res.start += felix->switch_base;
+               res.end += felix->switch_base;
+
+               target = ocelot_regmap_init(ocelot, &res);
+               if (IS_ERR(target)) {
+                       dev_err(ocelot->dev,
+                               "Failed to map memory space for port %d\n",
+                               port);
+                       kfree(port_phy_modes);
+                       return PTR_ERR(target);
+               }
 
-               port_regs = devm_ioremap_resource(ocelot->dev, &res);
-               if (IS_ERR(port_regs)) {
+               template = devm_kzalloc(ocelot->dev, OCELOT_TAG_LEN,
+                                       GFP_KERNEL);
+               if (!template) {
                        dev_err(ocelot->dev,
-                               "failed to map registers for port %d\n", port);
+                               "Failed to allocate memory for DSA tag\n");
                        kfree(port_phy_modes);
-                       return PTR_ERR(port_regs);
+                       return -ENOMEM;
                }
 
                ocelot_port->phy_mode = port_phy_modes[port];
                ocelot_port->ocelot = ocelot;
-               ocelot_port->regs = port_regs;
+               ocelot_port->target = target;
+               ocelot_port->xmit_template = template;
                ocelot->ports[port] = ocelot_port;
+
+               felix->info->xmit_template_populate(ocelot, port);
        }
 
        kfree(port_phy_modes);
@@ -723,9 +750,7 @@ static int felix_port_policer_add(struct dsa_switch *ds, int port,
        struct ocelot *ocelot = ds->priv;
        struct ocelot_policer pol = {
                .rate = div_u64(policer->rate_bytes_per_sec, 1000) * 8,
-               .burst = div_u64(policer->rate_bytes_per_sec *
-                                PSCHED_NS2TICKS(policer->burst),
-                                PSCHED_TICKS_PER_SEC),
+               .burst = policer->burst,
        };
 
        return ocelot_port_policer_add(ocelot, port, &pol);
@@ -751,7 +776,7 @@ static int felix_port_setup_tc(struct dsa_switch *ds, int port,
                return -EOPNOTSUPP;
 }
 
-static const struct dsa_switch_ops felix_switch_ops = {
+const struct dsa_switch_ops felix_switch_ops = {
        .get_tag_protocol       = felix_get_tag_protocol,
        .setup                  = felix_setup,
        .teardown               = felix_teardown,
@@ -763,7 +788,6 @@ static const struct dsa_switch_ops felix_switch_ops = {
        .phylink_validate       = felix_phylink_validate,
        .phylink_mac_link_state = felix_phylink_mac_pcs_get_state,
        .phylink_mac_config     = felix_phylink_mac_config,
-       .phylink_mac_an_restart = felix_phylink_mac_an_restart,
        .phylink_mac_link_down  = felix_phylink_mac_link_down,
        .phylink_mac_link_up    = felix_phylink_mac_link_up,
        .port_enable            = felix_port_enable,
@@ -771,6 +795,9 @@ static const struct dsa_switch_ops felix_switch_ops = {
        .port_fdb_dump          = felix_fdb_dump,
        .port_fdb_add           = felix_fdb_add,
        .port_fdb_del           = felix_fdb_del,
+       .port_mdb_prepare       = felix_mdb_prepare,
+       .port_mdb_add           = felix_mdb_add,
+       .port_mdb_del           = felix_mdb_del,
        .port_bridge_join       = felix_bridge_join,
        .port_bridge_leave      = felix_bridge_leave,
        .port_stp_state_set     = felix_bridge_stp_state_set,
@@ -792,149 +819,28 @@ static const struct dsa_switch_ops felix_switch_ops = {
        .port_setup_tc          = felix_port_setup_tc,
 };
 
-static struct felix_info *felix_instance_tbl[] = {
-       [FELIX_INSTANCE_VSC9959] = &felix_info_vsc9959,
-};
-
-static irqreturn_t felix_irq_handler(int irq, void *data)
-{
-       struct ocelot *ocelot = (struct ocelot *)data;
-
-       /* The INTB interrupt is used for both PTP TX timestamp interrupt
-        * and preemption status change interrupt on each port.
-        *
-        * - Get txtstamp if have
-        * - TODO: handle preemption. Without handling it, driver may get
-        *   interrupt storm.
-        */
-
-       ocelot_get_txtstamp(ocelot);
-
-       return IRQ_HANDLED;
-}
-
-static int felix_pci_probe(struct pci_dev *pdev,
-                          const struct pci_device_id *id)
+static int __init felix_init(void)
 {
-       enum felix_instance instance = id->driver_data;
-       struct dsa_switch *ds;
-       struct ocelot *ocelot;
-       struct felix *felix;
        int err;
 
-       if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) {
-               dev_info(&pdev->dev, "device is disabled, skipping\n");
-               return -ENODEV;
-       }
-
-       err = pci_enable_device(pdev);
-       if (err) {
-               dev_err(&pdev->dev, "device enable failed\n");
-               goto err_pci_enable;
-       }
-
-       /* set up for high or low dma */
-       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
-       if (err) {
-               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
-               if (err) {
-                       dev_err(&pdev->dev,
-                               "DMA configuration failed: 0x%x\n", err);
-                       goto err_dma;
-               }
-       }
-
-       felix = kzalloc(sizeof(struct felix), GFP_KERNEL);
-       if (!felix) {
-               err = -ENOMEM;
-               dev_err(&pdev->dev, "Failed to allocate driver memory\n");
-               goto err_alloc_felix;
-       }
-
-       pci_set_drvdata(pdev, felix);
-       ocelot = &felix->ocelot;
-       ocelot->dev = &pdev->dev;
-       felix->pdev = pdev;
-       felix->info = felix_instance_tbl[instance];
-
-       pci_set_master(pdev);
-
-       err = devm_request_threaded_irq(&pdev->dev, pdev->irq, NULL,
-                                       &felix_irq_handler, IRQF_ONESHOT,
-                                       "felix-intb", ocelot);
-       if (err) {
-               dev_err(&pdev->dev, "Failed to request irq\n");
-               goto err_alloc_irq;
-       }
-
-       ocelot->ptp = 1;
-
-       ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL);
-       if (!ds) {
-               err = -ENOMEM;
-               dev_err(&pdev->dev, "Failed to allocate DSA switch\n");
-               goto err_alloc_ds;
-       }
-
-       ds->dev = &pdev->dev;
-       ds->num_ports = felix->info->num_ports;
-       ds->num_tx_queues = felix->info->num_tx_queues;
-       ds->ops = &felix_switch_ops;
-       ds->priv = ocelot;
-       felix->ds = ds;
+       err = pci_register_driver(&felix_vsc9959_pci_driver);
+       if (err)
+               return err;
 
-       err = dsa_register_switch(ds);
-       if (err) {
-               dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
-               goto err_register_ds;
-       }
+       err = platform_driver_register(&seville_vsc9953_driver);
+       if (err)
+               return err;
 
        return 0;
-
-err_register_ds:
-       kfree(ds);
-err_alloc_ds:
-err_alloc_irq:
-err_alloc_felix:
-       kfree(felix);
-err_dma:
-       pci_disable_device(pdev);
-err_pci_enable:
-       return err;
 }
+module_init(felix_init);
 
-static void felix_pci_remove(struct pci_dev *pdev)
+static void __exit felix_exit(void)
 {
-       struct felix *felix;
-
-       felix = pci_get_drvdata(pdev);
-
-       dsa_unregister_switch(felix->ds);
-
-       kfree(felix->ds);
-       kfree(felix);
-
-       pci_disable_device(pdev);
+       pci_unregister_driver(&felix_vsc9959_pci_driver);
+       platform_driver_unregister(&seville_vsc9953_driver);
 }
-
-static struct pci_device_id felix_ids[] = {
-       {
-               /* NXP LS1028A */
-               PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0xEEF0),
-               .driver_data = FELIX_INSTANCE_VSC9959,
-       },
-       { 0, }
-};
-MODULE_DEVICE_TABLE(pci, felix_ids);
-
-static struct pci_driver felix_pci_driver = {
-       .name           = KBUILD_MODNAME,
-       .id_table       = felix_ids,
-       .probe          = felix_pci_probe,
-       .remove         = felix_pci_remove,
-};
-
-module_pci_driver(felix_pci_driver);
+module_exit(felix_exit);
 
 MODULE_DESCRIPTION("Felix Switch driver");
 MODULE_LICENSE("GPL v2");
index a891736..98f1462 100644 (file)
@@ -28,34 +28,51 @@ struct felix_info {
        int                             imdio_pci_bar;
        int     (*mdio_bus_alloc)(struct ocelot *ocelot);
        void    (*mdio_bus_free)(struct ocelot *ocelot);
-       void    (*pcs_init)(struct ocelot *ocelot, int port,
-                           unsigned int link_an_mode,
-                           const struct phylink_link_state *state);
-       void    (*pcs_an_restart)(struct ocelot *ocelot, int port);
+       void    (*pcs_config)(struct ocelot *ocelot, int port,
+                             unsigned int link_an_mode,
+                             const struct phylink_link_state *state);
+       void    (*pcs_link_up)(struct ocelot *ocelot, int port,
+                              unsigned int link_an_mode,
+                              phy_interface_t interface,
+                              int speed, int duplex);
        void    (*pcs_link_state)(struct ocelot *ocelot, int port,
                                  struct phylink_link_state *state);
+       void    (*phylink_validate)(struct ocelot *ocelot, int port,
+                                   unsigned long *supported,
+                                   struct phylink_link_state *state);
        int     (*prevalidate_phy_mode)(struct ocelot *ocelot, int port,
                                        phy_interface_t phy_mode);
        int     (*port_setup_tc)(struct dsa_switch *ds, int port,
                                 enum tc_setup_type type, void *type_data);
        void    (*port_sched_speed_set)(struct ocelot *ocelot, int port,
                                        u32 speed);
+       void    (*xmit_template_populate)(struct ocelot *ocelot, int port);
 };
 
-extern struct felix_info               felix_info_vsc9959;
-
-enum felix_instance {
-       FELIX_INSTANCE_VSC9959          = 0,
-};
+extern const struct dsa_switch_ops felix_switch_ops;
+extern struct pci_driver felix_vsc9959_pci_driver;
+extern struct platform_driver seville_vsc9953_driver;
 
 /* DSA glue / front-end for struct ocelot */
 struct felix {
        struct dsa_switch               *ds;
-       struct pci_dev                  *pdev;
-       struct felix_info               *info;
+       const struct felix_info         *info;
        struct ocelot                   ocelot;
        struct mii_bus                  *imdio;
        struct phy_device               **pcs;
+       resource_size_t                 switch_base;
+       resource_size_t                 imdio_base;
 };
 
+void vsc9959_pcs_link_state(struct ocelot *ocelot, int port,
+                           struct phylink_link_state *state);
+void vsc9959_pcs_config(struct ocelot *ocelot, int port,
+                       unsigned int link_an_mode,
+                       const struct phylink_link_state *state);
+void vsc9959_pcs_link_up(struct ocelot *ocelot, int port,
+                        unsigned int link_an_mode,
+                        phy_interface_t interface,
+                        int speed, int duplex);
+void vsc9959_mdio_bus_free(struct ocelot *ocelot);
+
 #endif
index 1dd9e34..9b720c8 100644 (file)
@@ -8,37 +8,18 @@
 #include <soc/mscc/ocelot_ptp.h>
 #include <soc/mscc/ocelot_sys.h>
 #include <soc/mscc/ocelot.h>
+#include <linux/packing.h>
 #include <net/pkt_sched.h>
 #include <linux/iopoll.h>
+#include <linux/mdio.h>
 #include <linux/pci.h>
 #include "felix.h"
 
 #define VSC9959_VCAP_IS2_CNT           1024
 #define VSC9959_VCAP_IS2_ENTRY_WIDTH   376
 #define VSC9959_VCAP_PORT_CNT          6
-
-/* TODO: should find a better place for these */
-#define USXGMII_BMCR_RESET             BIT(15)
-#define USXGMII_BMCR_AN_EN             BIT(12)
-#define USXGMII_BMCR_RST_AN            BIT(9)
-#define USXGMII_BMSR_LNKS(status)      (((status) & GENMASK(2, 2)) >> 2)
-#define USXGMII_BMSR_AN_CMPL(status)   (((status) & GENMASK(5, 5)) >> 5)
-#define USXGMII_ADVERTISE_LNKS(x)      (((x) << 15) & BIT(15))
-#define USXGMII_ADVERTISE_FDX          BIT(12)
-#define USXGMII_ADVERTISE_SPEED(x)     (((x) << 9) & GENMASK(11, 9))
-#define USXGMII_LPA_LNKS(lpa)          ((lpa) >> 15)
-#define USXGMII_LPA_DUPLEX(lpa)                (((lpa) & GENMASK(12, 12)) >> 12)
-#define USXGMII_LPA_SPEED(lpa)         (((lpa) & GENMASK(11, 9)) >> 9)
-
 #define VSC9959_TAS_GCL_ENTRY_MAX      63
 
-enum usxgmii_speed {
-       USXGMII_SPEED_10        = 0,
-       USXGMII_SPEED_100       = 1,
-       USXGMII_SPEED_1000      = 2,
-       USXGMII_SPEED_2500      = 4,
-};
-
 static const u32 vsc9959_ana_regmap[] = {
        REG(ANA_ADVLEARN,                       0x0089a0),
        REG(ANA_VLANMASK,                       0x0089a4),
@@ -329,7 +310,49 @@ static const u32 vsc9959_gcb_regmap[] = {
        REG(GCB_SOFT_RST,                       0x000004),
 };
 
-static const u32 *vsc9959_regmap[] = {
+static const u32 vsc9959_dev_gmii_regmap[] = {
+       REG(DEV_CLOCK_CFG,                      0x0),
+       REG(DEV_PORT_MISC,                      0x4),
+       REG(DEV_EVENTS,                         0x8),
+       REG(DEV_EEE_CFG,                        0xc),
+       REG(DEV_RX_PATH_DELAY,                  0x10),
+       REG(DEV_TX_PATH_DELAY,                  0x14),
+       REG(DEV_PTP_PREDICT_CFG,                0x18),
+       REG(DEV_MAC_ENA_CFG,                    0x1c),
+       REG(DEV_MAC_MODE_CFG,                   0x20),
+       REG(DEV_MAC_MAXLEN_CFG,                 0x24),
+       REG(DEV_MAC_TAGS_CFG,                   0x28),
+       REG(DEV_MAC_ADV_CHK_CFG,                0x2c),
+       REG(DEV_MAC_IFG_CFG,                    0x30),
+       REG(DEV_MAC_HDX_CFG,                    0x34),
+       REG(DEV_MAC_DBG_CFG,                    0x38),
+       REG(DEV_MAC_FC_MAC_LOW_CFG,             0x3c),
+       REG(DEV_MAC_FC_MAC_HIGH_CFG,            0x40),
+       REG(DEV_MAC_STICKY,                     0x44),
+       REG_RESERVED(PCS1G_CFG),
+       REG_RESERVED(PCS1G_MODE_CFG),
+       REG_RESERVED(PCS1G_SD_CFG),
+       REG_RESERVED(PCS1G_ANEG_CFG),
+       REG_RESERVED(PCS1G_ANEG_NP_CFG),
+       REG_RESERVED(PCS1G_LB_CFG),
+       REG_RESERVED(PCS1G_DBG_CFG),
+       REG_RESERVED(PCS1G_CDET_CFG),
+       REG_RESERVED(PCS1G_ANEG_STATUS),
+       REG_RESERVED(PCS1G_ANEG_NP_STATUS),
+       REG_RESERVED(PCS1G_LINK_STATUS),
+       REG_RESERVED(PCS1G_LINK_DOWN_CNT),
+       REG_RESERVED(PCS1G_STICKY),
+       REG_RESERVED(PCS1G_DEBUG_STATUS),
+       REG_RESERVED(PCS1G_LPI_CFG),
+       REG_RESERVED(PCS1G_LPI_WAKE_ERROR_CNT),
+       REG_RESERVED(PCS1G_LPI_STATUS),
+       REG_RESERVED(PCS1G_TSTPAT_MODE_CFG),
+       REG_RESERVED(PCS1G_TSTPAT_STATUS),
+       REG_RESERVED(DEV_PCS_FX100_CFG),
+       REG_RESERVED(DEV_PCS_FX100_STATUS),
+};
+
+static const u32 *vsc9959_regmap[TARGET_MAX] = {
        [ANA]   = vsc9959_ana_regmap,
        [QS]    = vsc9959_qs_regmap,
        [QSYS]  = vsc9959_qsys_regmap,
@@ -338,10 +361,11 @@ static const u32 *vsc9959_regmap[] = {
        [S2]    = vsc9959_s2_regmap,
        [PTP]   = vsc9959_ptp_regmap,
        [GCB]   = vsc9959_gcb_regmap,
+       [DEV_GMII] = vsc9959_dev_gmii_regmap,
 };
 
 /* Addresses are relative to the PCI device's base address */
-static const struct resource vsc9959_target_io_res[] = {
+static const struct resource vsc9959_target_io_res[TARGET_MAX] = {
        [ANA] = {
                .start  = 0x0280000,
                .end    = 0x028ffff,
@@ -426,7 +450,7 @@ static const struct resource vsc9959_imdio_res = {
        .name           = "imdio",
 };
 
-static const struct reg_field vsc9959_regfields[] = {
+static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = {
        [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 6, 6),
        [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 5),
        [ANA_ANEVENTS_FLOOD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 30, 30),
@@ -460,6 +484,20 @@ static const struct reg_field vsc9959_regfields[] = {
        [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 10),
        [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 0, 0),
        [GCB_SOFT_RST_SWC_RST] = REG_FIELD(GCB_SOFT_RST, 0, 0),
+       /* Replicated per number of ports (7), register size 4 per port */
+       [QSYS_SWITCH_PORT_MODE_PORT_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 14, 14, 7, 4),
+       [QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 11, 13, 7, 4),
+       [QSYS_SWITCH_PORT_MODE_YEL_RSRVD] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 10, 10, 7, 4),
+       [QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 9, 9, 7, 4),
+       [QSYS_SWITCH_PORT_MODE_TX_PFC_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 1, 8, 7, 4),
+       [QSYS_SWITCH_PORT_MODE_TX_PFC_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 0, 0, 7, 4),
+       [SYS_PORT_MODE_DATA_WO_TS] = REG_FIELD_ID(SYS_PORT_MODE, 5, 6, 7, 4),
+       [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 3, 4, 7, 4),
+       [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 1, 2, 7, 4),
+       [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 7, 4),
+       [SYS_PAUSE_CFG_PAUSE_START] = REG_FIELD_ID(SYS_PAUSE_CFG, 10, 18, 7, 4),
+       [SYS_PAUSE_CFG_PAUSE_STOP] = REG_FIELD_ID(SYS_PAUSE_CFG, 1, 9, 7, 4),
+       [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 7, 4),
 };
 
 static const struct ocelot_stat_layout vsc9959_stats_layout[] = {
@@ -557,7 +595,7 @@ static const struct ocelot_stat_layout vsc9959_stats_layout[] = {
        { .offset = 0x111,      .name = "drop_green_prio_7", },
 };
 
-struct vcap_field vsc9959_vcap_is2_keys[] = {
+static struct vcap_field vsc9959_vcap_is2_keys[] = {
        /* Common: 41 bits */
        [VCAP_IS2_TYPE]                         = {  0,   4},
        [VCAP_IS2_HK_FIRST]                     = {  4,   1},
@@ -637,7 +675,7 @@ struct vcap_field vsc9959_vcap_is2_keys[] = {
        [VCAP_IS2_HK_OAM_IS_Y1731]              = {182,   1},
 };
 
-struct vcap_field vsc9959_vcap_is2_actions[] = {
+static struct vcap_field vsc9959_vcap_is2_actions[] = {
        [VCAP_IS2_ACT_HIT_ME_ONCE]              = {  0,  1},
        [VCAP_IS2_ACT_CPU_COPY_ENA]             = {  1,  1},
        [VCAP_IS2_ACT_CPU_QU_NUM]               = {  2,  3},
@@ -728,20 +766,74 @@ static int vsc9959_reset(struct ocelot *ocelot)
        return 0;
 }
 
-static void vsc9959_pcs_an_restart_sgmii(struct phy_device *pcs)
+/* We enable SGMII AN only when the PHY has managed = "in-band-status" in the
+ * device tree. If we are in MLO_AN_PHY mode, we program directly state->speed
+ * into the PCS, which is retrieved out-of-band over MDIO. This also has the
+ * benefit of working with SGMII fixed-links, like downstream switches, where
+ * both link partners attempt to operate as AN slaves and therefore AN never
+ * completes.  But it also has the disadvantage that some PHY chips don't pass
+ * traffic if SGMII AN is enabled but not completed (acknowledged by us), so
+ * setting MLO_AN_INBAND is actually required for those.
+ */
+static void vsc9959_pcs_config_sgmii(struct phy_device *pcs,
+                                    unsigned int link_an_mode,
+                                    const struct phylink_link_state *state)
 {
-       phy_set_bits(pcs, MII_BMCR, BMCR_ANRESTART);
+       int bmsr, bmcr;
+
+       /* Some PHYs like VSC8234 don't like it when AN restarts on
+        * their system  side and they restart line side AN too, going
+        * into an endless link up/down loop.  Don't restart PCS AN if
+        * link is up already.
+        * We do check that AN is enabled just in case this is the 1st
+        * call, PCS detects a carrier but AN is disabled from power on
+        * or by boot loader.
+        */
+       bmcr = phy_read(pcs, MII_BMCR);
+       if (bmcr < 0)
+               return;
+
+       bmsr = phy_read(pcs, MII_BMSR);
+       if (bmsr < 0)
+               return;
+
+       if ((bmcr & BMCR_ANENABLE) && (bmsr & BMSR_LSTATUS))
+               return;
+
+       /* SGMII spec requires tx_config_Reg[15:0] to be exactly 0x4001
+        * for the MAC PCS in order to acknowledge the AN.
+        */
+       phy_write(pcs, MII_ADVERTISE, ADVERTISE_SGMII |
+                                     ADVERTISE_LPACK);
+
+       phy_write(pcs, ENETC_PCS_IF_MODE,
+                 ENETC_PCS_IF_MODE_SGMII_EN |
+                 ENETC_PCS_IF_MODE_USE_SGMII_AN);
+
+       /* Adjust link timer for SGMII */
+       phy_write(pcs, ENETC_PCS_LINK_TIMER1,
+                 ENETC_PCS_LINK_TIMER1_VAL);
+       phy_write(pcs, ENETC_PCS_LINK_TIMER2,
+                 ENETC_PCS_LINK_TIMER2_VAL);
+
+       phy_set_bits(pcs, MII_BMCR, BMCR_ANENABLE);
 }
 
-static void vsc9959_pcs_an_restart_usxgmii(struct phy_device *pcs)
+static void vsc9959_pcs_config_usxgmii(struct phy_device *pcs,
+                                      unsigned int link_an_mode,
+                                      const struct phylink_link_state *state)
 {
-       phy_write_mmd(pcs, MDIO_MMD_VEND2, MII_BMCR,
-                     USXGMII_BMCR_RESET |
-                     USXGMII_BMCR_AN_EN |
-                     USXGMII_BMCR_RST_AN);
+       /* Configure device ability for the USXGMII Replicator */
+       phy_write_mmd(pcs, MDIO_MMD_VEND2, MII_ADVERTISE,
+                     MDIO_USXGMII_2500FULL |
+                     MDIO_USXGMII_LINK |
+                     ADVERTISE_SGMII |
+                     ADVERTISE_LPACK);
 }
 
-static void vsc9959_pcs_an_restart(struct ocelot *ocelot, int port)
+void vsc9959_pcs_config(struct ocelot *ocelot, int port,
+                       unsigned int link_an_mode,
+                       const struct phylink_link_state *state)
 {
        struct felix *felix = ocelot_to_felix(ocelot);
        struct phy_device *pcs = felix->pcs[port];
@@ -749,107 +841,76 @@ static void vsc9959_pcs_an_restart(struct ocelot *ocelot, int port)
        if (!pcs)
                return;
 
+       /* The PCS does not implement the BMSR register fully, so capability
+        * detection via genphy_read_abilities does not work. Since we can get
+        * the PHY config word from the LPA register though, there is still
+        * value in using the generic phy_resolve_aneg_linkmode function. So
+        * populate the supported and advertising link modes manually here.
+        */
+       linkmode_set_bit_array(phy_basic_ports_array,
+                              ARRAY_SIZE(phy_basic_ports_array),
+                              pcs->supported);
+       linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, pcs->supported);
+       linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, pcs->supported);
+       linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, pcs->supported);
+       linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, pcs->supported);
+       linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, pcs->supported);
+       linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, pcs->supported);
+       if (pcs->interface == PHY_INTERFACE_MODE_2500BASEX ||
+           pcs->interface == PHY_INTERFACE_MODE_USXGMII)
+               linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+                                pcs->supported);
+       if (pcs->interface != PHY_INTERFACE_MODE_2500BASEX)
+               linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+                                pcs->supported);
+       phy_advertise_supported(pcs);
+
+       if (!phylink_autoneg_inband(link_an_mode))
+               return;
+
        switch (pcs->interface) {
        case PHY_INTERFACE_MODE_SGMII:
        case PHY_INTERFACE_MODE_QSGMII:
-               vsc9959_pcs_an_restart_sgmii(pcs);
+               vsc9959_pcs_config_sgmii(pcs, link_an_mode, state);
+               break;
+       case PHY_INTERFACE_MODE_2500BASEX:
+               phydev_err(pcs, "AN not supported on 3.125GHz SerDes lane\n");
                break;
        case PHY_INTERFACE_MODE_USXGMII:
-               vsc9959_pcs_an_restart_usxgmii(pcs);
+               vsc9959_pcs_config_usxgmii(pcs, link_an_mode, state);
                break;
        default:
-               dev_err(ocelot->dev, "Invalid PCS interface type %s\n",
+               dev_err(ocelot->dev, "Unsupported link mode %s\n",
                        phy_modes(pcs->interface));
-               break;
        }
 }
 
-/* We enable SGMII AN only when the PHY has managed = "in-band-status" in the
- * device tree. If we are in MLO_AN_PHY mode, we program directly state->speed
- * into the PCS, which is retrieved out-of-band over MDIO. This also has the
- * benefit of working with SGMII fixed-links, like downstream switches, where
- * both link partners attempt to operate as AN slaves and therefore AN never
- * completes.  But it also has the disadvantage that some PHY chips don't pass
- * traffic if SGMII AN is enabled but not completed (acknowledged by us), so
- * setting MLO_AN_INBAND is actually required for those.
- */
-static void vsc9959_pcs_init_sgmii(struct phy_device *pcs,
-                                  unsigned int link_an_mode,
-                                  const struct phylink_link_state *state)
+static void vsc9959_pcs_link_up_sgmii(struct phy_device *pcs,
+                                     unsigned int link_an_mode,
+                                     int speed, int duplex)
 {
-       if (link_an_mode == MLO_AN_INBAND) {
-               int bmsr, bmcr;
-
-               /* Some PHYs like VSC8234 don't like it when AN restarts on
-                * their system  side and they restart line side AN too, going
-                * into an endless link up/down loop.  Don't restart PCS AN if
-                * link is up already.
-                * We do check that AN is enabled just in case this is the 1st
-                * call, PCS detects a carrier but AN is disabled from power on
-                * or by boot loader.
-                */
-               bmcr = phy_read(pcs, MII_BMCR);
-               if (bmcr < 0)
-                       return;
-
-               bmsr = phy_read(pcs, MII_BMSR);
-               if (bmsr < 0)
-                       return;
-
-               if ((bmcr & BMCR_ANENABLE) && (bmsr & BMSR_LSTATUS))
-                       return;
-
-               /* SGMII spec requires tx_config_Reg[15:0] to be exactly 0x4001
-                * for the MAC PCS in order to acknowledge the AN.
-                */
-               phy_write(pcs, MII_ADVERTISE, ADVERTISE_SGMII |
-                                             ADVERTISE_LPACK);
-
-               phy_write(pcs, ENETC_PCS_IF_MODE,
-                         ENETC_PCS_IF_MODE_SGMII_EN |
-                         ENETC_PCS_IF_MODE_USE_SGMII_AN);
-
-               /* Adjust link timer for SGMII */
-               phy_write(pcs, ENETC_PCS_LINK_TIMER1,
-                         ENETC_PCS_LINK_TIMER1_VAL);
-               phy_write(pcs, ENETC_PCS_LINK_TIMER2,
-                         ENETC_PCS_LINK_TIMER2_VAL);
-
-               phy_write(pcs, MII_BMCR, BMCR_ANRESTART | BMCR_ANENABLE);
-       } else {
-               int speed;
-
-               if (state->duplex == DUPLEX_HALF) {
-                       phydev_err(pcs, "Half duplex not supported\n");
-                       return;
-               }
-               switch (state->speed) {
-               case SPEED_1000:
-                       speed = ENETC_PCS_SPEED_1000;
-                       break;
-               case SPEED_100:
-                       speed = ENETC_PCS_SPEED_100;
-                       break;
-               case SPEED_10:
-                       speed = ENETC_PCS_SPEED_10;
-                       break;
-               case SPEED_UNKNOWN:
-                       /* Silently don't do anything */
-                       return;
-               default:
-                       phydev_err(pcs, "Invalid PCS speed %d\n", state->speed);
-                       return;
-               }
-
-               phy_write(pcs, ENETC_PCS_IF_MODE,
-                         ENETC_PCS_IF_MODE_SGMII_EN |
-                         ENETC_PCS_IF_MODE_SGMII_SPEED(speed));
+       u16 if_mode = ENETC_PCS_IF_MODE_SGMII_EN;
 
-               /* Yes, not a mistake: speed is given by IF_MODE. */
-               phy_write(pcs, MII_BMCR, BMCR_RESET |
-                                        BMCR_SPEED1000 |
-                                        BMCR_FULLDPLX);
+       switch (speed) {
+       case SPEED_1000:
+               if_mode |= ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_1000);
+               break;
+       case SPEED_100:
+               if_mode |= ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_100);
+               break;
+       case SPEED_10:
+               if_mode |= ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_10);
+               break;
+       default:
+               phydev_err(pcs, "Invalid PCS speed %d\n", speed);
+               return;
        }
+
+       if (duplex == DUPLEX_HALF)
+               if_mode |= ENETC_PCS_IF_MODE_DUPLEX_HALF;
+
+       phy_write(pcs, ENETC_PCS_IF_MODE, if_mode);
+       phy_clear_bits(pcs, MII_BMCR, BMCR_ANENABLE);
 }
 
 /* 2500Base-X is SerDes protocol 7 on Felix and 6 on ENETC. It is a SerDes lane
@@ -869,45 +930,24 @@ static void vsc9959_pcs_init_sgmii(struct phy_device *pcs,
  * lower link speed on line side, the system-side interface remains fixed at
  * 2500 Mbps and we do rate adaptation through pause frames.
  */
-static void vsc9959_pcs_init_2500basex(struct phy_device *pcs,
-                                      unsigned int link_an_mode,
-                                      const struct phylink_link_state *state)
+static void vsc9959_pcs_link_up_2500basex(struct phy_device *pcs,
+                                         unsigned int link_an_mode,
+                                         int speed, int duplex)
 {
-       if (link_an_mode == MLO_AN_INBAND) {
-               phydev_err(pcs, "AN not supported on 3.125GHz SerDes lane\n");
-               return;
-       }
-
-       phy_write(pcs, ENETC_PCS_IF_MODE,
-                 ENETC_PCS_IF_MODE_SGMII_EN |
-                 ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_2500));
-
-       phy_write(pcs, MII_BMCR, BMCR_SPEED1000 |
-                                BMCR_FULLDPLX |
-                                BMCR_RESET);
-}
+       u16 if_mode = ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_2500) |
+                     ENETC_PCS_IF_MODE_SGMII_EN;
 
-static void vsc9959_pcs_init_usxgmii(struct phy_device *pcs,
-                                    unsigned int link_an_mode,
-                                    const struct phylink_link_state *state)
-{
-       if (link_an_mode != MLO_AN_INBAND) {
-               phydev_err(pcs, "USXGMII only supports in-band AN for now\n");
-               return;
-       }
+       if (duplex == DUPLEX_HALF)
+               if_mode |= ENETC_PCS_IF_MODE_DUPLEX_HALF;
 
-       /* Configure device ability for the USXGMII Replicator */
-       phy_write_mmd(pcs, MDIO_MMD_VEND2, MII_ADVERTISE,
-                     USXGMII_ADVERTISE_SPEED(USXGMII_SPEED_2500) |
-                     USXGMII_ADVERTISE_LNKS(1) |
-                     ADVERTISE_SGMII |
-                     ADVERTISE_LPACK |
-                     USXGMII_ADVERTISE_FDX);
+       phy_write(pcs, ENETC_PCS_IF_MODE, if_mode);
+       phy_clear_bits(pcs, MII_BMCR, BMCR_ANENABLE);
 }
 
-static void vsc9959_pcs_init(struct ocelot *ocelot, int port,
-                            unsigned int link_an_mode,
-                            const struct phylink_link_state *state)
+void vsc9959_pcs_link_up(struct ocelot *ocelot, int port,
+                        unsigned int link_an_mode,
+                        phy_interface_t interface,
+                        int speed, int duplex)
 {
        struct felix *felix = ocelot_to_felix(ocelot);
        struct phy_device *pcs = felix->pcs[port];
@@ -915,37 +955,20 @@ static void vsc9959_pcs_init(struct ocelot *ocelot, int port,
        if (!pcs)
                return;
 
-       /* The PCS does not implement the BMSR register fully, so capability
-        * detection via genphy_read_abilities does not work. Since we can get
-        * the PHY config word from the LPA register though, there is still
-        * value in using the generic phy_resolve_aneg_linkmode function. So
-        * populate the supported and advertising link modes manually here.
-        */
-       linkmode_set_bit_array(phy_basic_ports_array,
-                              ARRAY_SIZE(phy_basic_ports_array),
-                              pcs->supported);
-       linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, pcs->supported);
-       linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, pcs->supported);
-       linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, pcs->supported);
-       if (pcs->interface == PHY_INTERFACE_MODE_2500BASEX ||
-           pcs->interface == PHY_INTERFACE_MODE_USXGMII)
-               linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
-                                pcs->supported);
-       if (pcs->interface != PHY_INTERFACE_MODE_2500BASEX)
-               linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
-                                pcs->supported);
-       phy_advertise_supported(pcs);
+       if (phylink_autoneg_inband(link_an_mode))
+               return;
 
-       switch (pcs->interface) {
+       switch (interface) {
        case PHY_INTERFACE_MODE_SGMII:
        case PHY_INTERFACE_MODE_QSGMII:
-               vsc9959_pcs_init_sgmii(pcs, link_an_mode, state);
+               vsc9959_pcs_link_up_sgmii(pcs, link_an_mode, speed, duplex);
                break;
        case PHY_INTERFACE_MODE_2500BASEX:
-               vsc9959_pcs_init_2500basex(pcs, link_an_mode, state);
+               vsc9959_pcs_link_up_2500basex(pcs, link_an_mode, speed,
+                                             duplex);
                break;
        case PHY_INTERFACE_MODE_USXGMII:
-               vsc9959_pcs_init_usxgmii(pcs, link_an_mode, state);
+               phydev_err(pcs, "USXGMII only supports in-band AN for now\n");
                break;
        default:
                dev_err(ocelot->dev, "Unsupported link mode %s\n",
@@ -1019,8 +1042,8 @@ static void vsc9959_pcs_link_state_usxgmii(struct phy_device *pcs,
                return;
 
        pcs->autoneg = true;
-       pcs->autoneg_complete = USXGMII_BMSR_AN_CMPL(status);
-       pcs->link = USXGMII_BMSR_LNKS(status);
+       pcs->autoneg_complete = !!(status & BMSR_ANEGCOMPLETE);
+       pcs->link = !!(status & BMSR_LSTATUS);
 
        if (!pcs->link || !pcs->autoneg_complete)
                return;
@@ -1029,31 +1052,31 @@ static void vsc9959_pcs_link_state_usxgmii(struct phy_device *pcs,
        if (lpa < 0)
                return;
 
-       switch (USXGMII_LPA_SPEED(lpa)) {
-       case USXGMII_SPEED_10:
+       switch (lpa & MDIO_USXGMII_SPD_MASK) {
+       case MDIO_USXGMII_10:
                pcs->speed = SPEED_10;
                break;
-       case USXGMII_SPEED_100:
+       case MDIO_USXGMII_100:
                pcs->speed = SPEED_100;
                break;
-       case USXGMII_SPEED_1000:
+       case MDIO_USXGMII_1000:
                pcs->speed = SPEED_1000;
                break;
-       case USXGMII_SPEED_2500:
+       case MDIO_USXGMII_2500:
                pcs->speed = SPEED_2500;
                break;
        default:
                break;
        }
 
-       if (USXGMII_LPA_DUPLEX(lpa))
+       if (lpa & MDIO_USXGMII_FULL_DUPLEX)
                pcs->duplex = DUPLEX_FULL;
        else
                pcs->duplex = DUPLEX_HALF;
 }
 
-static void vsc9959_pcs_link_state(struct ocelot *ocelot, int port,
-                                  struct phylink_link_state *state)
+void vsc9959_pcs_link_state(struct ocelot *ocelot, int port,
+                           struct phylink_link_state *state)
 {
        struct felix *felix = ocelot_to_felix(ocelot);
        struct phy_device *pcs = felix->pcs[port];
@@ -1084,6 +1107,43 @@ static void vsc9959_pcs_link_state(struct ocelot *ocelot, int port,
        vsc9959_pcs_link_state_resolve(pcs, state);
 }
 
+static void vsc9959_phylink_validate(struct ocelot *ocelot, int port,
+                                    unsigned long *supported,
+                                    struct phylink_link_state *state)
+{
+       struct ocelot_port *ocelot_port = ocelot->ports[port];
+       __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+       if (state->interface != PHY_INTERFACE_MODE_NA &&
+           state->interface != ocelot_port->phy_mode) {
+               bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+               return;
+       }
+
+       phylink_set_port_modes(mask);
+       phylink_set(mask, Autoneg);
+       phylink_set(mask, Pause);
+       phylink_set(mask, Asym_Pause);
+       phylink_set(mask, 10baseT_Half);
+       phylink_set(mask, 10baseT_Full);
+       phylink_set(mask, 100baseT_Half);
+       phylink_set(mask, 100baseT_Full);
+       phylink_set(mask, 1000baseT_Half);
+       phylink_set(mask, 1000baseT_Full);
+
+       if (state->interface == PHY_INTERFACE_MODE_INTERNAL ||
+           state->interface == PHY_INTERFACE_MODE_2500BASEX ||
+           state->interface == PHY_INTERFACE_MODE_USXGMII) {
+               phylink_set(mask, 2500baseT_Full);
+               phylink_set(mask, 2500baseX_Full);
+       }
+
+       bitmap_and(supported, supported, mask,
+                  __ETHTOOL_LINK_MODE_MASK_NBITS);
+       bitmap_and(state->advertising, state->advertising, mask,
+                  __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
 static int vsc9959_prevalidate_phy_mode(struct ocelot *ocelot, int port,
                                        phy_interface_t phy_mode)
 {
@@ -1105,8 +1165,21 @@ static int vsc9959_prevalidate_phy_mode(struct ocelot *ocelot, int port,
        }
 }
 
+/* Watermark encode
+ * Bit 8:   Unit; 0:1, 1:16
+ * Bit 7-0: Value to be multiplied with unit
+ */
+static u16 vsc9959_wm_enc(u16 value)
+{
+       if (value >= BIT(8))
+               return BIT(8) | (value / 16);
+
+       return value;
+}
+
 static const struct ocelot_ops vsc9959_ops = {
        .reset                  = vsc9959_reset,
+       .wm_enc                 = vsc9959_wm_enc,
 };
 
 static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
@@ -1114,7 +1187,6 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
        struct felix *felix = ocelot_to_felix(ocelot);
        struct enetc_mdio_priv *mdio_priv;
        struct device *dev = ocelot->dev;
-       resource_size_t imdio_base;
        void __iomem *imdio_regs;
        struct resource res;
        struct enetc_hw *hw;
@@ -1130,13 +1202,10 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
                return -ENOMEM;
        }
 
-       imdio_base = pci_resource_start(felix->pdev,
-                                       felix->info->imdio_pci_bar);
-
        memcpy(&res, felix->info->imdio_res, sizeof(res));
        res.flags = IORESOURCE_MEM;
-       res.start += imdio_base;
-       res.end += imdio_base;
+       res.start += felix->imdio_base;
+       res.end += felix->imdio_base;
 
        imdio_regs = devm_ioremap_resource(dev, &res);
        if (IS_ERR(imdio_regs)) {
@@ -1196,7 +1265,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
        return 0;
 }
 
-static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
+void vsc9959_mdio_bus_free(struct ocelot *ocelot)
 {
        struct felix *felix = ocelot_to_felix(ocelot);
        int port;
@@ -1392,7 +1461,25 @@ static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
        }
 }
 
-struct felix_info felix_info_vsc9959 = {
+static void vsc9959_xmit_template_populate(struct ocelot *ocelot, int port)
+{
+       struct ocelot_port *ocelot_port = ocelot->ports[port];
+       u8 *template = ocelot_port->xmit_template;
+       u64 bypass, dest, src;
+
+       /* Set the source port as the CPU port module and not the
+        * NPI port
+        */
+       src = ocelot->num_phys_ports;
+       dest = BIT(port);
+       bypass = true;
+
+       packing(template, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0);
+       packing(template, &dest,    68,  56, OCELOT_TAG_LEN, PACK, 0);
+       packing(template, &src,     46,  43, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static const struct felix_info felix_info_vsc9959 = {
        .target_io_res          = vsc9959_target_io_res,
        .port_io_res            = vsc9959_port_io_res,
        .imdio_res              = &vsc9959_imdio_res,
@@ -1412,10 +1499,151 @@ struct felix_info felix_info_vsc9959 = {
        .imdio_pci_bar          = 0,
        .mdio_bus_alloc         = vsc9959_mdio_bus_alloc,
        .mdio_bus_free          = vsc9959_mdio_bus_free,
-       .pcs_init               = vsc9959_pcs_init,
-       .pcs_an_restart         = vsc9959_pcs_an_restart,
+       .pcs_config             = vsc9959_pcs_config,
+       .pcs_link_up            = vsc9959_pcs_link_up,
        .pcs_link_state         = vsc9959_pcs_link_state,
+       .phylink_validate       = vsc9959_phylink_validate,
        .prevalidate_phy_mode   = vsc9959_prevalidate_phy_mode,
        .port_setup_tc          = vsc9959_port_setup_tc,
        .port_sched_speed_set   = vsc9959_sched_speed_set,
+       .xmit_template_populate = vsc9959_xmit_template_populate,
+};
+
+static irqreturn_t felix_irq_handler(int irq, void *data)
+{
+       struct ocelot *ocelot = (struct ocelot *)data;
+
+       /* The INTB interrupt is used for both PTP TX timestamp interrupt
+        * and preemption status change interrupt on each port.
+        *
+        * - Get txtstamp if have
+        * - TODO: handle preemption. Without handling it, driver may get
+        *   interrupt storm.
+        */
+
+       ocelot_get_txtstamp(ocelot);
+
+       return IRQ_HANDLED;
+}
+
+static int felix_pci_probe(struct pci_dev *pdev,
+                          const struct pci_device_id *id)
+{
+       struct dsa_switch *ds;
+       struct ocelot *ocelot;
+       struct felix *felix;
+       int err;
+
+       if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) {
+               dev_info(&pdev->dev, "device is disabled, skipping\n");
+               return -ENODEV;
+       }
+
+       err = pci_enable_device(pdev);
+       if (err) {
+               dev_err(&pdev->dev, "device enable failed\n");
+               goto err_pci_enable;
+       }
+
+       /* set up for high or low dma */
+       err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (err) {
+               err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+               if (err) {
+                       dev_err(&pdev->dev,
+                               "DMA configuration failed: 0x%x\n", err);
+                       goto err_dma;
+               }
+       }
+
+       felix = kzalloc(sizeof(struct felix), GFP_KERNEL);
+       if (!felix) {
+               err = -ENOMEM;
+               dev_err(&pdev->dev, "Failed to allocate driver memory\n");
+               goto err_alloc_felix;
+       }
+
+       pci_set_drvdata(pdev, felix);
+       ocelot = &felix->ocelot;
+       ocelot->dev = &pdev->dev;
+       felix->info = &felix_info_vsc9959;
+       felix->switch_base = pci_resource_start(pdev,
+                                               felix->info->switch_pci_bar);
+       felix->imdio_base = pci_resource_start(pdev,
+                                              felix->info->imdio_pci_bar);
+
+       pci_set_master(pdev);
+
+       err = devm_request_threaded_irq(&pdev->dev, pdev->irq, NULL,
+                                       &felix_irq_handler, IRQF_ONESHOT,
+                                       "felix-intb", ocelot);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to request irq\n");
+               goto err_alloc_irq;
+       }
+
+       ocelot->ptp = 1;
+
+       ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL);
+       if (!ds) {
+               err = -ENOMEM;
+               dev_err(&pdev->dev, "Failed to allocate DSA switch\n");
+               goto err_alloc_ds;
+       }
+
+       ds->dev = &pdev->dev;
+       ds->num_ports = felix->info->num_ports;
+       ds->num_tx_queues = felix->info->num_tx_queues;
+       ds->ops = &felix_switch_ops;
+       ds->priv = ocelot;
+       felix->ds = ds;
+
+       err = dsa_register_switch(ds);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
+               goto err_register_ds;
+       }
+
+       return 0;
+
+err_register_ds:
+       kfree(ds);
+err_alloc_ds:
+err_alloc_irq:
+err_alloc_felix:
+       kfree(felix);
+err_dma:
+       pci_disable_device(pdev);
+err_pci_enable:
+       return err;
+}
+
+static void felix_pci_remove(struct pci_dev *pdev)
+{
+       struct felix *felix;
+
+       felix = pci_get_drvdata(pdev);
+
+       dsa_unregister_switch(felix->ds);
+
+       kfree(felix->ds);
+       kfree(felix);
+
+       pci_disable_device(pdev);
+}
+
+static struct pci_device_id felix_ids[] = {
+       {
+               /* NXP LS1028A */
+               PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0xEEF0),
+       },
+       { 0, }
+};
+MODULE_DEVICE_TABLE(pci, felix_ids);
+
+struct pci_driver felix_vsc9959_pci_driver = {
+       .name           = "mscc_felix",
+       .id_table       = felix_ids,
+       .probe          = felix_pci_probe,
+       .remove         = felix_pci_remove,
 };
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
new file mode 100644 (file)
index 0000000..625b189
--- /dev/null
@@ -0,0 +1,1104 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Distributed Switch Architecture VSC9953 driver
+ * Copyright (C) 2020, Maxim Kochetkov <fido_max@inbox.ru>
+ */
+#include <linux/types.h>
+#include <soc/mscc/ocelot_vcap.h>
+#include <soc/mscc/ocelot_sys.h>
+#include <soc/mscc/ocelot.h>
+#include <linux/of_platform.h>
+#include <linux/packing.h>
+#include <linux/iopoll.h>
+#include "felix.h"
+
+#define VSC9953_VCAP_IS2_CNT                   1024
+#define VSC9953_VCAP_IS2_ENTRY_WIDTH           376
+#define VSC9953_VCAP_PORT_CNT                  10
+
+#define MSCC_MIIM_REG_STATUS                   0x0
+#define                MSCC_MIIM_STATUS_STAT_BUSY      BIT(3)
+#define MSCC_MIIM_REG_CMD                      0x8
+#define                MSCC_MIIM_CMD_OPR_WRITE         BIT(1)
+#define                MSCC_MIIM_CMD_OPR_READ          BIT(2)
+#define                MSCC_MIIM_CMD_WRDATA_SHIFT      4
+#define                MSCC_MIIM_CMD_REGAD_SHIFT       20
+#define                MSCC_MIIM_CMD_PHYAD_SHIFT       25
+#define                MSCC_MIIM_CMD_VLD               BIT(31)
+#define MSCC_MIIM_REG_DATA                     0xC
+#define                MSCC_MIIM_DATA_ERROR            (BIT(16) | BIT(17))
+
+#define MSCC_PHY_REG_PHY_CFG           0x0
+#define                PHY_CFG_PHY_ENA         (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+#define                PHY_CFG_PHY_COMMON_RESET BIT(4)
+#define                PHY_CFG_PHY_RESET       (BIT(5) | BIT(6) | BIT(7) | BIT(8))
+#define MSCC_PHY_REG_PHY_STATUS                0x4
+
+static const u32 vsc9953_ana_regmap[] = {
+       REG(ANA_ADVLEARN,                       0x00b500),
+       REG(ANA_VLANMASK,                       0x00b504),
+       REG_RESERVED(ANA_PORT_B_DOMAIN),
+       REG(ANA_ANAGEFIL,                       0x00b50c),
+       REG(ANA_ANEVENTS,                       0x00b510),
+       REG(ANA_STORMLIMIT_BURST,               0x00b514),
+       REG(ANA_STORMLIMIT_CFG,                 0x00b518),
+       REG(ANA_ISOLATED_PORTS,                 0x00b528),
+       REG(ANA_COMMUNITY_PORTS,                0x00b52c),
+       REG(ANA_AUTOAGE,                        0x00b530),
+       REG(ANA_MACTOPTIONS,                    0x00b534),
+       REG(ANA_LEARNDISC,                      0x00b538),
+       REG(ANA_AGENCTRL,                       0x00b53c),
+       REG(ANA_MIRRORPORTS,                    0x00b540),
+       REG(ANA_EMIRRORPORTS,                   0x00b544),
+       REG(ANA_FLOODING,                       0x00b548),
+       REG(ANA_FLOODING_IPMC,                  0x00b54c),
+       REG(ANA_SFLOW_CFG,                      0x00b550),
+       REG(ANA_PORT_MODE,                      0x00b57c),
+       REG_RESERVED(ANA_CUT_THRU_CFG),
+       REG(ANA_PGID_PGID,                      0x00b600),
+       REG(ANA_TABLES_ANMOVED,                 0x00b4ac),
+       REG(ANA_TABLES_MACHDATA,                0x00b4b0),
+       REG(ANA_TABLES_MACLDATA,                0x00b4b4),
+       REG_RESERVED(ANA_TABLES_STREAMDATA),
+       REG(ANA_TABLES_MACACCESS,               0x00b4b8),
+       REG(ANA_TABLES_MACTINDX,                0x00b4bc),
+       REG(ANA_TABLES_VLANACCESS,              0x00b4c0),
+       REG(ANA_TABLES_VLANTIDX,                0x00b4c4),
+       REG_RESERVED(ANA_TABLES_ISDXACCESS),
+       REG_RESERVED(ANA_TABLES_ISDXTIDX),
+       REG(ANA_TABLES_ENTRYLIM,                0x00b480),
+       REG_RESERVED(ANA_TABLES_PTP_ID_HIGH),
+       REG_RESERVED(ANA_TABLES_PTP_ID_LOW),
+       REG_RESERVED(ANA_TABLES_STREAMACCESS),
+       REG_RESERVED(ANA_TABLES_STREAMTIDX),
+       REG_RESERVED(ANA_TABLES_SEQ_HISTORY),
+       REG_RESERVED(ANA_TABLES_SEQ_MASK),
+       REG_RESERVED(ANA_TABLES_SFID_MASK),
+       REG_RESERVED(ANA_TABLES_SFIDACCESS),
+       REG_RESERVED(ANA_TABLES_SFIDTIDX),
+       REG_RESERVED(ANA_MSTI_STATE),
+       REG_RESERVED(ANA_OAM_UPM_LM_CNT),
+       REG_RESERVED(ANA_SG_ACCESS_CTRL),
+       REG_RESERVED(ANA_SG_CONFIG_REG_1),
+       REG_RESERVED(ANA_SG_CONFIG_REG_2),
+       REG_RESERVED(ANA_SG_CONFIG_REG_3),
+       REG_RESERVED(ANA_SG_CONFIG_REG_4),
+       REG_RESERVED(ANA_SG_CONFIG_REG_5),
+       REG_RESERVED(ANA_SG_GCL_GS_CONFIG),
+       REG_RESERVED(ANA_SG_GCL_TI_CONFIG),
+       REG_RESERVED(ANA_SG_STATUS_REG_1),
+       REG_RESERVED(ANA_SG_STATUS_REG_2),
+       REG_RESERVED(ANA_SG_STATUS_REG_3),
+       REG(ANA_PORT_VLAN_CFG,                  0x000000),
+       REG(ANA_PORT_DROP_CFG,                  0x000004),
+       REG(ANA_PORT_QOS_CFG,                   0x000008),
+       REG(ANA_PORT_VCAP_CFG,                  0x00000c),
+       REG(ANA_PORT_VCAP_S1_KEY_CFG,           0x000010),
+       REG(ANA_PORT_VCAP_S2_CFG,               0x00001c),
+       REG(ANA_PORT_PCP_DEI_MAP,               0x000020),
+       REG(ANA_PORT_CPU_FWD_CFG,               0x000060),
+       REG(ANA_PORT_CPU_FWD_BPDU_CFG,          0x000064),
+       REG(ANA_PORT_CPU_FWD_GARP_CFG,          0x000068),
+       REG(ANA_PORT_CPU_FWD_CCM_CFG,           0x00006c),
+       REG(ANA_PORT_PORT_CFG,                  0x000070),
+       REG(ANA_PORT_POL_CFG,                   0x000074),
+       REG_RESERVED(ANA_PORT_PTP_CFG),
+       REG_RESERVED(ANA_PORT_PTP_DLY1_CFG),
+       REG_RESERVED(ANA_PORT_PTP_DLY2_CFG),
+       REG_RESERVED(ANA_PORT_SFID_CFG),
+       REG(ANA_PFC_PFC_CFG,                    0x00c000),
+       REG_RESERVED(ANA_PFC_PFC_TIMER),
+       REG_RESERVED(ANA_IPT_OAM_MEP_CFG),
+       REG_RESERVED(ANA_IPT_IPT),
+       REG_RESERVED(ANA_PPT_PPT),
+       REG_RESERVED(ANA_FID_MAP_FID_MAP),
+       REG(ANA_AGGR_CFG,                       0x00c600),
+       REG(ANA_CPUQ_CFG,                       0x00c604),
+       REG_RESERVED(ANA_CPUQ_CFG2),
+       REG(ANA_CPUQ_8021_CFG,                  0x00c60c),
+       REG(ANA_DSCP_CFG,                       0x00c64c),
+       REG(ANA_DSCP_REWR_CFG,                  0x00c74c),
+       REG(ANA_VCAP_RNG_TYPE_CFG,              0x00c78c),
+       REG(ANA_VCAP_RNG_VAL_CFG,               0x00c7ac),
+       REG_RESERVED(ANA_VRAP_CFG),
+       REG_RESERVED(ANA_VRAP_HDR_DATA),
+       REG_RESERVED(ANA_VRAP_HDR_MASK),
+       REG(ANA_DISCARD_CFG,                    0x00c7d8),
+       REG(ANA_FID_CFG,                        0x00c7dc),
+       REG(ANA_POL_PIR_CFG,                    0x00a000),
+       REG(ANA_POL_CIR_CFG,                    0x00a004),
+       REG(ANA_POL_MODE_CFG,                   0x00a008),
+       REG(ANA_POL_PIR_STATE,                  0x00a00c),
+       REG(ANA_POL_CIR_STATE,                  0x00a010),
+       REG_RESERVED(ANA_POL_STATE),
+       REG(ANA_POL_FLOWC,                      0x00c280),
+       REG(ANA_POL_HYST,                       0x00c2ec),
+       REG_RESERVED(ANA_POL_MISC_CFG),
+};
+
+static const u32 vsc9953_qs_regmap[] = {
+       REG(QS_XTR_GRP_CFG,                     0x000000),
+       REG(QS_XTR_RD,                          0x000008),
+       REG(QS_XTR_FRM_PRUNING,                 0x000010),
+       REG(QS_XTR_FLUSH,                       0x000018),
+       REG(QS_XTR_DATA_PRESENT,                0x00001c),
+       REG(QS_XTR_CFG,                         0x000020),
+       REG(QS_INJ_GRP_CFG,                     0x000024),
+       REG(QS_INJ_WR,                          0x00002c),
+       REG(QS_INJ_CTRL,                        0x000034),
+       REG(QS_INJ_STATUS,                      0x00003c),
+       REG(QS_INJ_ERR,                         0x000040),
+       REG_RESERVED(QS_INH_DBG),
+};
+
+static const u32 vsc9953_s2_regmap[] = {
+       REG(S2_CORE_UPDATE_CTRL,                0x000000),
+       REG(S2_CORE_MV_CFG,                     0x000004),
+       REG(S2_CACHE_ENTRY_DAT,                 0x000008),
+       REG(S2_CACHE_MASK_DAT,                  0x000108),
+       REG(S2_CACHE_ACTION_DAT,                0x000208),
+       REG(S2_CACHE_CNT_DAT,                   0x000308),
+       REG(S2_CACHE_TG_DAT,                    0x000388),
+};
+
+static const u32 vsc9953_qsys_regmap[] = {
+       REG(QSYS_PORT_MODE,                     0x003600),
+       REG(QSYS_SWITCH_PORT_MODE,              0x003630),
+       REG(QSYS_STAT_CNT_CFG,                  0x00365c),
+       REG(QSYS_EEE_CFG,                       0x003660),
+       REG(QSYS_EEE_THRES,                     0x003688),
+       REG(QSYS_IGR_NO_SHARING,                0x00368c),
+       REG(QSYS_EGR_NO_SHARING,                0x003690),
+       REG(QSYS_SW_STATUS,                     0x003694),
+       REG(QSYS_EXT_CPU_CFG,                   0x0036c0),
+       REG_RESERVED(QSYS_PAD_CFG),
+       REG(QSYS_CPU_GROUP_MAP,                 0x0036c8),
+       REG_RESERVED(QSYS_QMAP),
+       REG_RESERVED(QSYS_ISDX_SGRP),
+       REG_RESERVED(QSYS_TIMED_FRAME_ENTRY),
+       REG_RESERVED(QSYS_TFRM_MISC),
+       REG_RESERVED(QSYS_TFRM_PORT_DLY),
+       REG_RESERVED(QSYS_TFRM_TIMER_CFG_1),
+       REG_RESERVED(QSYS_TFRM_TIMER_CFG_2),
+       REG_RESERVED(QSYS_TFRM_TIMER_CFG_3),
+       REG_RESERVED(QSYS_TFRM_TIMER_CFG_4),
+       REG_RESERVED(QSYS_TFRM_TIMER_CFG_5),
+       REG_RESERVED(QSYS_TFRM_TIMER_CFG_6),
+       REG_RESERVED(QSYS_TFRM_TIMER_CFG_7),
+       REG_RESERVED(QSYS_TFRM_TIMER_CFG_8),
+       REG(QSYS_RED_PROFILE,                   0x003724),
+       REG(QSYS_RES_QOS_MODE,                  0x003764),
+       REG(QSYS_RES_CFG,                       0x004000),
+       REG(QSYS_RES_STAT,                      0x004004),
+       REG(QSYS_EGR_DROP_MODE,                 0x003768),
+       REG(QSYS_EQ_CTRL,                       0x00376c),
+       REG_RESERVED(QSYS_EVENTS_CORE),
+       REG_RESERVED(QSYS_QMAXSDU_CFG_0),
+       REG_RESERVED(QSYS_QMAXSDU_CFG_1),
+       REG_RESERVED(QSYS_QMAXSDU_CFG_2),
+       REG_RESERVED(QSYS_QMAXSDU_CFG_3),
+       REG_RESERVED(QSYS_QMAXSDU_CFG_4),
+       REG_RESERVED(QSYS_QMAXSDU_CFG_5),
+       REG_RESERVED(QSYS_QMAXSDU_CFG_6),
+       REG_RESERVED(QSYS_QMAXSDU_CFG_7),
+       REG_RESERVED(QSYS_PREEMPTION_CFG),
+       REG(QSYS_CIR_CFG,                       0x000000),
+       REG_RESERVED(QSYS_EIR_CFG),
+       REG(QSYS_SE_CFG,                        0x000008),
+       REG(QSYS_SE_DWRR_CFG,                   0x00000c),
+       REG_RESERVED(QSYS_SE_CONNECT),
+       REG_RESERVED(QSYS_SE_DLB_SENSE),
+       REG(QSYS_CIR_STATE,                     0x000044),
+       REG_RESERVED(QSYS_EIR_STATE),
+       REG_RESERVED(QSYS_SE_STATE),
+       REG(QSYS_HSCH_MISC_CFG,                 0x003774),
+       REG_RESERVED(QSYS_TAG_CONFIG),
+       REG_RESERVED(QSYS_TAS_PARAM_CFG_CTRL),
+       REG_RESERVED(QSYS_PORT_MAX_SDU),
+       REG_RESERVED(QSYS_PARAM_CFG_REG_1),
+       REG_RESERVED(QSYS_PARAM_CFG_REG_2),
+       REG_RESERVED(QSYS_PARAM_CFG_REG_3),
+       REG_RESERVED(QSYS_PARAM_CFG_REG_4),
+       REG_RESERVED(QSYS_PARAM_CFG_REG_5),
+       REG_RESERVED(QSYS_GCL_CFG_REG_1),
+       REG_RESERVED(QSYS_GCL_CFG_REG_2),
+       REG_RESERVED(QSYS_PARAM_STATUS_REG_1),
+       REG_RESERVED(QSYS_PARAM_STATUS_REG_2),
+       REG_RESERVED(QSYS_PARAM_STATUS_REG_3),
+       REG_RESERVED(QSYS_PARAM_STATUS_REG_4),
+       REG_RESERVED(QSYS_PARAM_STATUS_REG_5),
+       REG_RESERVED(QSYS_PARAM_STATUS_REG_6),
+       REG_RESERVED(QSYS_PARAM_STATUS_REG_7),
+       REG_RESERVED(QSYS_PARAM_STATUS_REG_8),
+       REG_RESERVED(QSYS_PARAM_STATUS_REG_9),
+       REG_RESERVED(QSYS_GCL_STATUS_REG_1),
+       REG_RESERVED(QSYS_GCL_STATUS_REG_2),
+};
+
+static const u32 vsc9953_rew_regmap[] = {
+       REG(REW_PORT_VLAN_CFG,                  0x000000),
+       REG(REW_TAG_CFG,                        0x000004),
+       REG(REW_PORT_CFG,                       0x000008),
+       REG(REW_DSCP_CFG,                       0x00000c),
+       REG(REW_PCP_DEI_QOS_MAP_CFG,            0x000010),
+       REG_RESERVED(REW_PTP_CFG),
+       REG_RESERVED(REW_PTP_DLY1_CFG),
+       REG_RESERVED(REW_RED_TAG_CFG),
+       REG(REW_DSCP_REMAP_DP1_CFG,             0x000610),
+       REG(REW_DSCP_REMAP_CFG,                 0x000710),
+       REG_RESERVED(REW_STAT_CFG),
+       REG_RESERVED(REW_REW_STICKY),
+       REG_RESERVED(REW_PPT),
+};
+
+static const u32 vsc9953_sys_regmap[] = {
+       REG(SYS_COUNT_RX_OCTETS,                0x000000),
+       REG(SYS_COUNT_RX_MULTICAST,             0x000008),
+       REG(SYS_COUNT_RX_SHORTS,                0x000010),
+       REG(SYS_COUNT_RX_FRAGMENTS,             0x000014),
+       REG(SYS_COUNT_RX_JABBERS,               0x000018),
+       REG(SYS_COUNT_RX_64,                    0x000024),
+       REG(SYS_COUNT_RX_65_127,                0x000028),
+       REG(SYS_COUNT_RX_128_255,               0x00002c),
+       REG(SYS_COUNT_RX_256_1023,              0x000030),
+       REG(SYS_COUNT_RX_1024_1526,             0x000034),
+       REG(SYS_COUNT_RX_1527_MAX,              0x000038),
+       REG(SYS_COUNT_RX_LONGS,                 0x000048),
+       REG(SYS_COUNT_TX_OCTETS,                0x000100),
+       REG(SYS_COUNT_TX_COLLISION,             0x000110),
+       REG(SYS_COUNT_TX_DROPS,                 0x000114),
+       REG(SYS_COUNT_TX_64,                    0x00011c),
+       REG(SYS_COUNT_TX_65_127,                0x000120),
+       REG(SYS_COUNT_TX_128_511,               0x000124),
+       REG(SYS_COUNT_TX_512_1023,              0x000128),
+       REG(SYS_COUNT_TX_1024_1526,             0x00012c),
+       REG(SYS_COUNT_TX_1527_MAX,              0x000130),
+       REG(SYS_COUNT_TX_AGING,                 0x000178),
+       REG(SYS_RESET_CFG,                      0x000318),
+       REG_RESERVED(SYS_SR_ETYPE_CFG),
+       REG(SYS_VLAN_ETYPE_CFG,                 0x000320),
+       REG(SYS_PORT_MODE,                      0x000324),
+       REG(SYS_FRONT_PORT_MODE,                0x000354),
+       REG(SYS_FRM_AGING,                      0x00037c),
+       REG(SYS_STAT_CFG,                       0x000380),
+       REG_RESERVED(SYS_SW_STATUS),
+       REG_RESERVED(SYS_MISC_CFG),
+       REG_RESERVED(SYS_REW_MAC_HIGH_CFG),
+       REG_RESERVED(SYS_REW_MAC_LOW_CFG),
+       REG_RESERVED(SYS_TIMESTAMP_OFFSET),
+       REG(SYS_PAUSE_CFG,                      0x00044c),
+       REG(SYS_PAUSE_TOT_CFG,                  0x000478),
+       REG(SYS_ATOP,                           0x00047c),
+       REG(SYS_ATOP_TOT_CFG,                   0x0004a8),
+       REG(SYS_MAC_FC_CFG,                     0x0004ac),
+       REG(SYS_MMGT,                           0x0004d4),
+       REG_RESERVED(SYS_MMGT_FAST),
+       REG_RESERVED(SYS_EVENTS_DIF),
+       REG_RESERVED(SYS_EVENTS_CORE),
+       REG_RESERVED(SYS_CNT),
+       REG_RESERVED(SYS_PTP_STATUS),
+       REG_RESERVED(SYS_PTP_TXSTAMP),
+       REG_RESERVED(SYS_PTP_NXT),
+       REG_RESERVED(SYS_PTP_CFG),
+       REG_RESERVED(SYS_RAM_INIT),
+       REG_RESERVED(SYS_CM_ADDR),
+       REG_RESERVED(SYS_CM_DATA_WR),
+       REG_RESERVED(SYS_CM_DATA_RD),
+       REG_RESERVED(SYS_CM_OP),
+       REG_RESERVED(SYS_CM_DATA),
+};
+
+static const u32 vsc9953_gcb_regmap[] = {
+       REG(GCB_SOFT_RST,                       0x000008),
+       REG(GCB_MIIM_MII_STATUS,                0x0000ac),
+       REG(GCB_MIIM_MII_CMD,                   0x0000b4),
+       REG(GCB_MIIM_MII_DATA,                  0x0000b8),
+};
+
+static const u32 vsc9953_dev_gmii_regmap[] = {
+       REG(DEV_CLOCK_CFG,                      0x0),
+       REG(DEV_PORT_MISC,                      0x4),
+       REG_RESERVED(DEV_EVENTS),
+       REG(DEV_EEE_CFG,                        0xc),
+       REG_RESERVED(DEV_RX_PATH_DELAY),
+       REG_RESERVED(DEV_TX_PATH_DELAY),
+       REG_RESERVED(DEV_PTP_PREDICT_CFG),
+       REG(DEV_MAC_ENA_CFG,                    0x10),
+       REG(DEV_MAC_MODE_CFG,                   0x14),
+       REG(DEV_MAC_MAXLEN_CFG,                 0x18),
+       REG(DEV_MAC_TAGS_CFG,                   0x1c),
+       REG(DEV_MAC_ADV_CHK_CFG,                0x20),
+       REG(DEV_MAC_IFG_CFG,                    0x24),
+       REG(DEV_MAC_HDX_CFG,                    0x28),
+       REG_RESERVED(DEV_MAC_DBG_CFG),
+       REG(DEV_MAC_FC_MAC_LOW_CFG,             0x30),
+       REG(DEV_MAC_FC_MAC_HIGH_CFG,            0x34),
+       REG(DEV_MAC_STICKY,                     0x38),
+       REG_RESERVED(PCS1G_CFG),
+       REG_RESERVED(PCS1G_MODE_CFG),
+       REG_RESERVED(PCS1G_SD_CFG),
+       REG_RESERVED(PCS1G_ANEG_CFG),
+       REG_RESERVED(PCS1G_ANEG_NP_CFG),
+       REG_RESERVED(PCS1G_LB_CFG),
+       REG_RESERVED(PCS1G_DBG_CFG),
+       REG_RESERVED(PCS1G_CDET_CFG),
+       REG_RESERVED(PCS1G_ANEG_STATUS),
+       REG_RESERVED(PCS1G_ANEG_NP_STATUS),
+       REG_RESERVED(PCS1G_LINK_STATUS),
+       REG_RESERVED(PCS1G_LINK_DOWN_CNT),
+       REG_RESERVED(PCS1G_STICKY),
+       REG_RESERVED(PCS1G_DEBUG_STATUS),
+       REG_RESERVED(PCS1G_LPI_CFG),
+       REG_RESERVED(PCS1G_LPI_WAKE_ERROR_CNT),
+       REG_RESERVED(PCS1G_LPI_STATUS),
+       REG_RESERVED(PCS1G_TSTPAT_MODE_CFG),
+       REG_RESERVED(PCS1G_TSTPAT_STATUS),
+       REG_RESERVED(DEV_PCS_FX100_CFG),
+       REG_RESERVED(DEV_PCS_FX100_STATUS),
+};
+
+static const u32 *vsc9953_regmap[TARGET_MAX] = {
+       [ANA]           = vsc9953_ana_regmap,
+       [QS]            = vsc9953_qs_regmap,
+       [QSYS]          = vsc9953_qsys_regmap,
+       [REW]           = vsc9953_rew_regmap,
+       [SYS]           = vsc9953_sys_regmap,
+       [S2]            = vsc9953_s2_regmap,
+       [GCB]           = vsc9953_gcb_regmap,
+       [DEV_GMII]      = vsc9953_dev_gmii_regmap,
+};
+
+/* Addresses are relative to the device's base address */
+static const struct resource vsc9953_target_io_res[TARGET_MAX] = {
+       [ANA] = {
+               .start  = 0x0280000,
+               .end    = 0x028ffff,
+               .name   = "ana",
+       },
+       [QS] = {
+               .start  = 0x0080000,
+               .end    = 0x00800ff,
+               .name   = "qs",
+       },
+       [QSYS] = {
+               .start  = 0x0200000,
+               .end    = 0x021ffff,
+               .name   = "qsys",
+       },
+       [REW] = {
+               .start  = 0x0030000,
+               .end    = 0x003ffff,
+               .name   = "rew",
+       },
+       [SYS] = {
+               .start  = 0x0010000,
+               .end    = 0x001ffff,
+               .name   = "sys",
+       },
+       [S2] = {
+               .start  = 0x0060000,
+               .end    = 0x00603ff,
+               .name   = "s2",
+       },
+       [PTP] = {
+               .start  = 0x0090000,
+               .end    = 0x00900cb,
+               .name   = "ptp",
+       },
+       [GCB] = {
+               .start  = 0x0070000,
+               .end    = 0x00701ff,
+               .name   = "devcpu_gcb",
+       },
+};
+
+static const struct resource vsc9953_port_io_res[] = {
+       {
+               .start  = 0x0100000,
+               .end    = 0x010ffff,
+               .name   = "port0",
+       },
+       {
+               .start  = 0x0110000,
+               .end    = 0x011ffff,
+               .name   = "port1",
+       },
+       {
+               .start  = 0x0120000,
+               .end    = 0x012ffff,
+               .name   = "port2",
+       },
+       {
+               .start  = 0x0130000,
+               .end    = 0x013ffff,
+               .name   = "port3",
+       },
+       {
+               .start  = 0x0140000,
+               .end    = 0x014ffff,
+               .name   = "port4",
+       },
+       {
+               .start  = 0x0150000,
+               .end    = 0x015ffff,
+               .name   = "port5",
+       },
+       {
+               .start  = 0x0160000,
+               .end    = 0x016ffff,
+               .name   = "port6",
+       },
+       {
+               .start  = 0x0170000,
+               .end    = 0x017ffff,
+               .name   = "port7",
+       },
+       {
+               .start  = 0x0180000,
+               .end    = 0x018ffff,
+               .name   = "port8",
+       },
+       {
+               .start  = 0x0190000,
+               .end    = 0x019ffff,
+               .name   = "port9",
+       },
+};
+
+static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = {
+       [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 10, 10),
+       [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 9),
+       [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 24, 24),
+       [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 22, 22),
+       [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 21, 21),
+       [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 20, 20),
+       [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 19, 19),
+       [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 18, 18),
+       [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 17, 17),
+       [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 16, 16),
+       [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 15, 15),
+       [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 13, 13),
+       [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 12, 12),
+       [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 11, 11),
+       [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 10, 10),
+       [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 9, 9),
+       [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 8, 8),
+       [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 7, 7),
+       [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6),
+       [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5),
+       [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 4, 4),
+       [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 3, 3),
+       [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 2, 2),
+       [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 1, 1),
+       [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 0, 0),
+       [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 16, 16),
+       [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 11, 12),
+       [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 10),
+       [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 7, 7),
+       [SYS_RESET_CFG_MEM_ENA] = REG_FIELD(SYS_RESET_CFG, 6, 6),
+       [SYS_RESET_CFG_MEM_INIT] = REG_FIELD(SYS_RESET_CFG, 5, 5),
+       [GCB_SOFT_RST_SWC_RST] = REG_FIELD(GCB_SOFT_RST, 0, 0),
+       [GCB_MIIM_MII_STATUS_PENDING] = REG_FIELD(GCB_MIIM_MII_STATUS, 2, 2),
+       [GCB_MIIM_MII_STATUS_BUSY] = REG_FIELD(GCB_MIIM_MII_STATUS, 3, 3),
+       /* Replicated per number of ports (11), register size 4 per port */
+       [QSYS_SWITCH_PORT_MODE_PORT_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 13, 13, 11, 4),
+       [QSYS_SWITCH_PORT_MODE_YEL_RSRVD] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 10, 10, 11, 4),
+       [QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 9, 9, 11, 4),
+       [QSYS_SWITCH_PORT_MODE_TX_PFC_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 1, 8, 11, 4),
+       [QSYS_SWITCH_PORT_MODE_TX_PFC_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 0, 0, 11, 4),
+       [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 4, 5, 11, 4),
+       [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 2, 3, 11, 4),
+       [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 11, 4),
+       [SYS_PAUSE_CFG_PAUSE_START] = REG_FIELD_ID(SYS_PAUSE_CFG, 11, 20, 11, 4),
+       [SYS_PAUSE_CFG_PAUSE_STOP] = REG_FIELD_ID(SYS_PAUSE_CFG, 1, 10, 11, 4),
+       [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 11, 4),
+};
+
+static const struct ocelot_stat_layout vsc9953_stats_layout[] = {
+       { .offset = 0x00,       .name = "rx_octets", },
+       { .offset = 0x01,       .name = "rx_unicast", },
+       { .offset = 0x02,       .name = "rx_multicast", },
+       { .offset = 0x03,       .name = "rx_broadcast", },
+       { .offset = 0x04,       .name = "rx_shorts", },
+       { .offset = 0x05,       .name = "rx_fragments", },
+       { .offset = 0x06,       .name = "rx_jabbers", },
+       { .offset = 0x07,       .name = "rx_crc_align_errs", },
+       { .offset = 0x08,       .name = "rx_sym_errs", },
+       { .offset = 0x09,       .name = "rx_frames_below_65_octets", },
+       { .offset = 0x0A,       .name = "rx_frames_65_to_127_octets", },
+       { .offset = 0x0B,       .name = "rx_frames_128_to_255_octets", },
+       { .offset = 0x0C,       .name = "rx_frames_256_to_511_octets", },
+       { .offset = 0x0D,       .name = "rx_frames_512_to_1023_octets", },
+       { .offset = 0x0E,       .name = "rx_frames_1024_to_1526_octets", },
+       { .offset = 0x0F,       .name = "rx_frames_over_1526_octets", },
+       { .offset = 0x10,       .name = "rx_pause", },
+       { .offset = 0x11,       .name = "rx_control", },
+       { .offset = 0x12,       .name = "rx_longs", },
+       { .offset = 0x13,       .name = "rx_classified_drops", },
+       { .offset = 0x14,       .name = "rx_red_prio_0", },
+       { .offset = 0x15,       .name = "rx_red_prio_1", },
+       { .offset = 0x16,       .name = "rx_red_prio_2", },
+       { .offset = 0x17,       .name = "rx_red_prio_3", },
+       { .offset = 0x18,       .name = "rx_red_prio_4", },
+       { .offset = 0x19,       .name = "rx_red_prio_5", },
+       { .offset = 0x1A,       .name = "rx_red_prio_6", },
+       { .offset = 0x1B,       .name = "rx_red_prio_7", },
+       { .offset = 0x1C,       .name = "rx_yellow_prio_0", },
+       { .offset = 0x1D,       .name = "rx_yellow_prio_1", },
+       { .offset = 0x1E,       .name = "rx_yellow_prio_2", },
+       { .offset = 0x1F,       .name = "rx_yellow_prio_3", },
+       { .offset = 0x20,       .name = "rx_yellow_prio_4", },
+       { .offset = 0x21,       .name = "rx_yellow_prio_5", },
+       { .offset = 0x22,       .name = "rx_yellow_prio_6", },
+       { .offset = 0x23,       .name = "rx_yellow_prio_7", },
+       { .offset = 0x24,       .name = "rx_green_prio_0", },
+       { .offset = 0x25,       .name = "rx_green_prio_1", },
+       { .offset = 0x26,       .name = "rx_green_prio_2", },
+       { .offset = 0x27,       .name = "rx_green_prio_3", },
+       { .offset = 0x28,       .name = "rx_green_prio_4", },
+       { .offset = 0x29,       .name = "rx_green_prio_5", },
+       { .offset = 0x2A,       .name = "rx_green_prio_6", },
+       { .offset = 0x2B,       .name = "rx_green_prio_7", },
+       { .offset = 0x40,       .name = "tx_octets", },
+       { .offset = 0x41,       .name = "tx_unicast", },
+       { .offset = 0x42,       .name = "tx_multicast", },
+       { .offset = 0x43,       .name = "tx_broadcast", },
+       { .offset = 0x44,       .name = "tx_collision", },
+       { .offset = 0x45,       .name = "tx_drops", },
+       { .offset = 0x46,       .name = "tx_pause", },
+       { .offset = 0x47,       .name = "tx_frames_below_65_octets", },
+       { .offset = 0x48,       .name = "tx_frames_65_to_127_octets", },
+       { .offset = 0x49,       .name = "tx_frames_128_255_octets", },
+       { .offset = 0x4A,       .name = "tx_frames_256_511_octets", },
+       { .offset = 0x4B,       .name = "tx_frames_512_1023_octets", },
+       { .offset = 0x4C,       .name = "tx_frames_1024_1526_octets", },
+       { .offset = 0x4D,       .name = "tx_frames_over_1526_octets", },
+       { .offset = 0x4E,       .name = "tx_yellow_prio_0", },
+       { .offset = 0x4F,       .name = "tx_yellow_prio_1", },
+       { .offset = 0x50,       .name = "tx_yellow_prio_2", },
+       { .offset = 0x51,       .name = "tx_yellow_prio_3", },
+       { .offset = 0x52,       .name = "tx_yellow_prio_4", },
+       { .offset = 0x53,       .name = "tx_yellow_prio_5", },
+       { .offset = 0x54,       .name = "tx_yellow_prio_6", },
+       { .offset = 0x55,       .name = "tx_yellow_prio_7", },
+       { .offset = 0x56,       .name = "tx_green_prio_0", },
+       { .offset = 0x57,       .name = "tx_green_prio_1", },
+       { .offset = 0x58,       .name = "tx_green_prio_2", },
+       { .offset = 0x59,       .name = "tx_green_prio_3", },
+       { .offset = 0x5A,       .name = "tx_green_prio_4", },
+       { .offset = 0x5B,       .name = "tx_green_prio_5", },
+       { .offset = 0x5C,       .name = "tx_green_prio_6", },
+       { .offset = 0x5D,       .name = "tx_green_prio_7", },
+       { .offset = 0x5E,       .name = "tx_aged", },
+       { .offset = 0x80,       .name = "drop_local", },
+       { .offset = 0x81,       .name = "drop_tail", },
+       { .offset = 0x82,       .name = "drop_yellow_prio_0", },
+       { .offset = 0x83,       .name = "drop_yellow_prio_1", },
+       { .offset = 0x84,       .name = "drop_yellow_prio_2", },
+       { .offset = 0x85,       .name = "drop_yellow_prio_3", },
+       { .offset = 0x86,       .name = "drop_yellow_prio_4", },
+       { .offset = 0x87,       .name = "drop_yellow_prio_5", },
+       { .offset = 0x88,       .name = "drop_yellow_prio_6", },
+       { .offset = 0x89,       .name = "drop_yellow_prio_7", },
+       { .offset = 0x8A,       .name = "drop_green_prio_0", },
+       { .offset = 0x8B,       .name = "drop_green_prio_1", },
+       { .offset = 0x8C,       .name = "drop_green_prio_2", },
+       { .offset = 0x8D,       .name = "drop_green_prio_3", },
+       { .offset = 0x8E,       .name = "drop_green_prio_4", },
+       { .offset = 0x8F,       .name = "drop_green_prio_5", },
+       { .offset = 0x90,       .name = "drop_green_prio_6", },
+       { .offset = 0x91,       .name = "drop_green_prio_7", },
+};
+
+static struct vcap_field vsc9953_vcap_is2_keys[] = {
+       /* Common: 41 bits */
+       [VCAP_IS2_TYPE]                         = {  0,   4},
+       [VCAP_IS2_HK_FIRST]                     = {  4,   1},
+       [VCAP_IS2_HK_PAG]                       = {  5,   8},
+       [VCAP_IS2_HK_IGR_PORT_MASK]             = { 13,  11},
+       [VCAP_IS2_HK_RSV2]                      = { 24,   1},
+       [VCAP_IS2_HK_HOST_MATCH]                = { 25,   1},
+       [VCAP_IS2_HK_L2_MC]                     = { 26,   1},
+       [VCAP_IS2_HK_L2_BC]                     = { 27,   1},
+       [VCAP_IS2_HK_VLAN_TAGGED]               = { 28,   1},
+       [VCAP_IS2_HK_VID]                       = { 29,  12},
+       [VCAP_IS2_HK_DEI]                       = { 41,   1},
+       [VCAP_IS2_HK_PCP]                       = { 42,   3},
+       /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */
+       [VCAP_IS2_HK_L2_DMAC]                   = { 45,  48},
+       [VCAP_IS2_HK_L2_SMAC]                   = { 93,  48},
+       /* MAC_ETYPE (TYPE=000) */
+       [VCAP_IS2_HK_MAC_ETYPE_ETYPE]           = {141,  16},
+       [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0]     = {157,  16},
+       [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1]     = {173,   8},
+       [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2]     = {181,   3},
+       /* MAC_LLC (TYPE=001) */
+       [VCAP_IS2_HK_MAC_LLC_L2_LLC]            = {141,  40},
+       /* MAC_SNAP (TYPE=010) */
+       [VCAP_IS2_HK_MAC_SNAP_L2_SNAP]          = {141,  40},
+       /* MAC_ARP (TYPE=011) */
+       [VCAP_IS2_HK_MAC_ARP_SMAC]              = { 45,  48},
+       [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK]     = { 93,   1},
+       [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK]    = { 94,   1},
+       [VCAP_IS2_HK_MAC_ARP_LEN_OK]            = { 95,   1},
+       [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH]      = { 96,   1},
+       [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH]      = { 97,   1},
+       [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN]    = { 98,   1},
+       [VCAP_IS2_HK_MAC_ARP_OPCODE]            = { 99,   2},
+       [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP]        = {101,  32},
+       [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP]        = {133,  32},
+       [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP]        = {165,   1},
+       /* IP4_TCP_UDP / IP4_OTHER common */
+       [VCAP_IS2_HK_IP4]                       = { 45,   1},
+       [VCAP_IS2_HK_L3_FRAGMENT]               = { 46,   1},
+       [VCAP_IS2_HK_L3_FRAG_OFS_GT0]           = { 47,   1},
+       [VCAP_IS2_HK_L3_OPTIONS]                = { 48,   1},
+       [VCAP_IS2_HK_IP4_L3_TTL_GT0]            = { 49,   1},
+       [VCAP_IS2_HK_L3_TOS]                    = { 50,   8},
+       [VCAP_IS2_HK_L3_IP4_DIP]                = { 58,  32},
+       [VCAP_IS2_HK_L3_IP4_SIP]                = { 90,  32},
+       [VCAP_IS2_HK_DIP_EQ_SIP]                = {122,   1},
+       /* IP4_TCP_UDP (TYPE=100) */
+       [VCAP_IS2_HK_TCP]                       = {123,   1},
+       [VCAP_IS2_HK_L4_SPORT]                  = {124,  16},
+       [VCAP_IS2_HK_L4_DPORT]                  = {140,  16},
+       [VCAP_IS2_HK_L4_RNG]                    = {156,   8},
+       [VCAP_IS2_HK_L4_SPORT_EQ_DPORT]         = {164,   1},
+       [VCAP_IS2_HK_L4_SEQUENCE_EQ0]           = {165,   1},
+       [VCAP_IS2_HK_L4_URG]                    = {166,   1},
+       [VCAP_IS2_HK_L4_ACK]                    = {167,   1},
+       [VCAP_IS2_HK_L4_PSH]                    = {168,   1},
+       [VCAP_IS2_HK_L4_RST]                    = {169,   1},
+       [VCAP_IS2_HK_L4_SYN]                    = {170,   1},
+       [VCAP_IS2_HK_L4_FIN]                    = {171,   1},
+       /* IP4_OTHER (TYPE=101) */
+       [VCAP_IS2_HK_IP4_L3_PROTO]              = {123,   8},
+       [VCAP_IS2_HK_L3_PAYLOAD]                = {131,  56},
+       /* IP6_STD (TYPE=110) */
+       [VCAP_IS2_HK_IP6_L3_TTL_GT0]            = { 45,   1},
+       [VCAP_IS2_HK_L3_IP6_SIP]                = { 46, 128},
+       [VCAP_IS2_HK_IP6_L3_PROTO]              = {174,   8},
+};
+
+static struct vcap_field vsc9953_vcap_is2_actions[] = {
+       [VCAP_IS2_ACT_HIT_ME_ONCE]              = {  0,  1},
+       [VCAP_IS2_ACT_CPU_COPY_ENA]             = {  1,  1},
+       [VCAP_IS2_ACT_CPU_QU_NUM]               = {  2,  3},
+       [VCAP_IS2_ACT_MASK_MODE]                = {  5,  2},
+       [VCAP_IS2_ACT_MIRROR_ENA]               = {  7,  1},
+       [VCAP_IS2_ACT_LRN_DIS]                  = {  8,  1},
+       [VCAP_IS2_ACT_POLICE_ENA]               = {  9,  1},
+       [VCAP_IS2_ACT_POLICE_IDX]               = { 10,  8},
+       [VCAP_IS2_ACT_POLICE_VCAP_ONLY]         = { 21,  1},
+       [VCAP_IS2_ACT_PORT_MASK]                = { 22, 10},
+       [VCAP_IS2_ACT_ACL_ID]                   = { 44,  6},
+       [VCAP_IS2_ACT_HIT_CNT]                  = { 50, 32},
+};
+
+static const struct vcap_props vsc9953_vcap_props[] = {
+       [VCAP_IS2] = {
+               .tg_width = 2,
+               .sw_count = 4,
+               .entry_count = VSC9953_VCAP_IS2_CNT,
+               .entry_width = VSC9953_VCAP_IS2_ENTRY_WIDTH,
+               .action_count = VSC9953_VCAP_IS2_CNT +
+                               VSC9953_VCAP_PORT_CNT + 2,
+               .action_width = 101,
+               .action_type_width = 1,
+               .action_table = {
+                       [IS2_ACTION_TYPE_NORMAL] = {
+                               .width = 44,
+                               .count = 2
+                       },
+                       [IS2_ACTION_TYPE_SMAC_SIP] = {
+                               .width = 6,
+                               .count = 4
+                       },
+               },
+               .counter_words = 4,
+               .counter_width = 32,
+       },
+};
+
+#define VSC9953_INIT_TIMEOUT                   50000
+#define VSC9953_GCB_RST_SLEEP                  100
+#define VSC9953_SYS_RAMINIT_SLEEP              80
+#define VCS9953_MII_TIMEOUT                    10000
+
+static int vsc9953_gcb_soft_rst_status(struct ocelot *ocelot)
+{
+       int val;
+
+       ocelot_field_read(ocelot, GCB_SOFT_RST_SWC_RST, &val);
+
+       return val;
+}
+
+static int vsc9953_sys_ram_init_status(struct ocelot *ocelot)
+{
+       int val;
+
+       ocelot_field_read(ocelot, SYS_RESET_CFG_MEM_INIT, &val);
+
+       return val;
+}
+
+static int vsc9953_gcb_miim_pending_status(struct ocelot *ocelot)
+{
+       int val;
+
+       ocelot_field_read(ocelot, GCB_MIIM_MII_STATUS_PENDING, &val);
+
+       return val;
+}
+
+static int vsc9953_gcb_miim_busy_status(struct ocelot *ocelot)
+{
+       int val;
+
+       ocelot_field_read(ocelot, GCB_MIIM_MII_STATUS_BUSY, &val);
+
+       return val;
+}
+
+static int vsc9953_mdio_write(struct mii_bus *bus, int phy_id, int regnum,
+                             u16 value)
+{
+       struct ocelot *ocelot = bus->priv;
+       int err, cmd, val;
+
+       /* Wait while MIIM controller becomes idle */
+       err = readx_poll_timeout(vsc9953_gcb_miim_pending_status, ocelot,
+                                val, !val, 10, VCS9953_MII_TIMEOUT);
+       if (err) {
+               dev_err(ocelot->dev, "MDIO write: pending timeout\n");
+               goto out;
+       }
+
+       cmd = MSCC_MIIM_CMD_VLD | (phy_id << MSCC_MIIM_CMD_PHYAD_SHIFT) |
+             (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) |
+             (value << MSCC_MIIM_CMD_WRDATA_SHIFT) |
+             MSCC_MIIM_CMD_OPR_WRITE;
+
+       ocelot_write(ocelot, cmd, GCB_MIIM_MII_CMD);
+
+out:
+       return err;
+}
+
+static int vsc9953_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+       struct ocelot *ocelot = bus->priv;
+       int err, cmd, val;
+
+       /* Wait until MIIM controller becomes idle */
+       err = readx_poll_timeout(vsc9953_gcb_miim_pending_status, ocelot,
+                                val, !val, 10, VCS9953_MII_TIMEOUT);
+       if (err) {
+               dev_err(ocelot->dev, "MDIO read: pending timeout\n");
+               goto out;
+       }
+
+       /* Write the MIIM COMMAND register */
+       cmd = MSCC_MIIM_CMD_VLD | (phy_id << MSCC_MIIM_CMD_PHYAD_SHIFT) |
+             (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) | MSCC_MIIM_CMD_OPR_READ;
+
+       ocelot_write(ocelot, cmd, GCB_MIIM_MII_CMD);
+
+       /* Wait while read operation via the MIIM controller is in progress */
+       err = readx_poll_timeout(vsc9953_gcb_miim_busy_status, ocelot,
+                                val, !val, 10, VCS9953_MII_TIMEOUT);
+       if (err) {
+               dev_err(ocelot->dev, "MDIO read: busy timeout\n");
+               goto out;
+       }
+
+       val = ocelot_read(ocelot, GCB_MIIM_MII_DATA);
+
+       err = val & 0xFFFF;
+out:
+       return err;
+}
+
+static int vsc9953_reset(struct ocelot *ocelot)
+{
+       int val, err;
+
+       /* soft-reset the switch core */
+       ocelot_field_write(ocelot, GCB_SOFT_RST_SWC_RST, 1);
+
+       err = readx_poll_timeout(vsc9953_gcb_soft_rst_status, ocelot, val, !val,
+                                VSC9953_GCB_RST_SLEEP, VSC9953_INIT_TIMEOUT);
+       if (err) {
+               dev_err(ocelot->dev, "timeout: switch core reset\n");
+               return err;
+       }
+
+       /* initialize switch mem ~40us */
+       ocelot_field_write(ocelot, SYS_RESET_CFG_MEM_INIT, 1);
+       ocelot_field_write(ocelot, SYS_RESET_CFG_MEM_ENA, 1);
+
+       err = readx_poll_timeout(vsc9953_sys_ram_init_status, ocelot, val, !val,
+                                VSC9953_SYS_RAMINIT_SLEEP,
+                                VSC9953_INIT_TIMEOUT);
+       if (err) {
+               dev_err(ocelot->dev, "timeout: switch sram init\n");
+               return err;
+       }
+
+       /* enable switch core */
+       ocelot_field_write(ocelot, SYS_RESET_CFG_MEM_ENA, 1);
+       ocelot_field_write(ocelot, SYS_RESET_CFG_CORE_ENA, 1);
+
+       return 0;
+}
+
+static void vsc9953_phylink_validate(struct ocelot *ocelot, int port,
+                                    unsigned long *supported,
+                                    struct phylink_link_state *state)
+{
+       struct ocelot_port *ocelot_port = ocelot->ports[port];
+       __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+       if (state->interface != PHY_INTERFACE_MODE_NA &&
+           state->interface != ocelot_port->phy_mode) {
+               bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+               return;
+       }
+
+       phylink_set_port_modes(mask);
+       phylink_set(mask, Autoneg);
+       phylink_set(mask, Pause);
+       phylink_set(mask, Asym_Pause);
+       phylink_set(mask, 10baseT_Full);
+       phylink_set(mask, 10baseT_Half);
+       phylink_set(mask, 100baseT_Full);
+       phylink_set(mask, 100baseT_Half);
+       phylink_set(mask, 1000baseT_Full);
+
+       if (state->interface == PHY_INTERFACE_MODE_INTERNAL) {
+               phylink_set(mask, 2500baseT_Full);
+               phylink_set(mask, 2500baseX_Full);
+       }
+
+       bitmap_and(supported, supported, mask,
+                  __ETHTOOL_LINK_MODE_MASK_NBITS);
+       bitmap_and(state->advertising, state->advertising, mask,
+                  __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static int vsc9953_prevalidate_phy_mode(struct ocelot *ocelot, int port,
+                                       phy_interface_t phy_mode)
+{
+       switch (phy_mode) {
+       case PHY_INTERFACE_MODE_INTERNAL:
+               if (port != 8 && port != 9)
+                       return -ENOTSUPP;
+               return 0;
+       case PHY_INTERFACE_MODE_SGMII:
+       case PHY_INTERFACE_MODE_QSGMII:
+               /* Not supported on internal to-CPU ports */
+               if (port == 8 || port == 9)
+                       return -ENOTSUPP;
+               return 0;
+       default:
+               return -ENOTSUPP;
+       }
+}
+
+/* Watermark encode
+ * Bit 9:   Unit; 0:1, 1:16
+ * Bit 8-0: Value to be multiplied with unit
+ */
+static u16 vsc9953_wm_enc(u16 value)
+{
+       if (value >= BIT(9))
+               return BIT(9) | (value / 16);
+
+       return value;
+}
+
+static const struct ocelot_ops vsc9953_ops = {
+       .reset                  = vsc9953_reset,
+       .wm_enc                 = vsc9953_wm_enc,
+};
+
+static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
+{
+       struct felix *felix = ocelot_to_felix(ocelot);
+       struct device *dev = ocelot->dev;
+       struct mii_bus *bus;
+       int port;
+       int rc;
+
+       felix->pcs = devm_kcalloc(dev, felix->info->num_ports,
+                                 sizeof(struct phy_device *),
+                                 GFP_KERNEL);
+       if (!felix->pcs) {
+               dev_err(dev, "failed to allocate array for PCS PHYs\n");
+               return -ENOMEM;
+       }
+
+       bus = devm_mdiobus_alloc(dev);
+       if (!bus)
+               return -ENOMEM;
+
+       bus->name = "VSC9953 internal MDIO bus";
+       bus->read = vsc9953_mdio_read;
+       bus->write = vsc9953_mdio_write;
+       bus->parent = dev;
+       bus->priv = ocelot;
+       snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev));
+
+       /* Needed in order to initialize the bus mutex lock */
+       rc = mdiobus_register(bus);
+       if (rc < 0) {
+               dev_err(dev, "failed to register MDIO bus\n");
+               return rc;
+       }
+
+       felix->imdio = bus;
+
+       for (port = 0; port < felix->info->num_ports; port++) {
+               struct ocelot_port *ocelot_port = ocelot->ports[port];
+               struct phy_device *pcs;
+               int addr = port + 4;
+
+               if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL)
+                       continue;
+
+               pcs = get_phy_device(felix->imdio, addr, false);
+               if (IS_ERR(pcs))
+                       continue;
+
+               pcs->interface = ocelot_port->phy_mode;
+               felix->pcs[port] = pcs;
+
+               dev_info(dev, "Found PCS at internal MDIO address %d\n", addr);
+       }
+
+       return 0;
+}
+
+static void vsc9953_xmit_template_populate(struct ocelot *ocelot, int port)
+{
+       struct ocelot_port *ocelot_port = ocelot->ports[port];
+       u8 *template = ocelot_port->xmit_template;
+       u64 bypass, dest, src;
+
+       /* Set the source port as the CPU port module and not the
+        * NPI port
+        */
+       src = ocelot->num_phys_ports;
+       dest = BIT(port);
+       bypass = true;
+
+       packing(template, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0);
+       packing(template, &dest,    67,  57, OCELOT_TAG_LEN, PACK, 0);
+       packing(template, &src,     46,  43, OCELOT_TAG_LEN, PACK, 0);
+}
+
+static const struct felix_info seville_info_vsc9953 = {
+       .target_io_res          = vsc9953_target_io_res,
+       .port_io_res            = vsc9953_port_io_res,
+       .regfields              = vsc9953_regfields,
+       .map                    = vsc9953_regmap,
+       .ops                    = &vsc9953_ops,
+       .stats_layout           = vsc9953_stats_layout,
+       .num_stats              = ARRAY_SIZE(vsc9953_stats_layout),
+       .vcap_is2_keys          = vsc9953_vcap_is2_keys,
+       .vcap_is2_actions       = vsc9953_vcap_is2_actions,
+       .vcap                   = vsc9953_vcap_props,
+       .shared_queue_sz        = 128 * 1024,
+       .num_mact_rows          = 2048,
+       .num_ports              = 10,
+       .mdio_bus_alloc         = vsc9953_mdio_bus_alloc,
+       .mdio_bus_free          = vsc9959_mdio_bus_free,
+       .pcs_config             = vsc9959_pcs_config,
+       .pcs_link_up            = vsc9959_pcs_link_up,
+       .pcs_link_state         = vsc9959_pcs_link_state,
+       .phylink_validate       = vsc9953_phylink_validate,
+       .prevalidate_phy_mode   = vsc9953_prevalidate_phy_mode,
+       .xmit_template_populate = vsc9953_xmit_template_populate,
+};
+
+static int seville_probe(struct platform_device *pdev)
+{
+       struct dsa_switch *ds;
+       struct ocelot *ocelot;
+       struct resource *res;
+       struct felix *felix;
+       int err;
+
+       felix = kzalloc(sizeof(struct felix), GFP_KERNEL);
+       if (!felix) {
+               err = -ENOMEM;
+               dev_err(&pdev->dev, "Failed to allocate driver memory\n");
+               goto err_alloc_felix;
+       }
+
+       platform_set_drvdata(pdev, felix);
+
+       ocelot = &felix->ocelot;
+       ocelot->dev = &pdev->dev;
+       felix->info = &seville_info_vsc9953;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       felix->switch_base = res->start;
+
+       ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL);
+       if (!ds) {
+               err = -ENOMEM;
+               dev_err(&pdev->dev, "Failed to allocate DSA switch\n");
+               goto err_alloc_ds;
+       }
+
+       ds->dev = &pdev->dev;
+       ds->num_ports = felix->info->num_ports;
+       ds->ops = &felix_switch_ops;
+       ds->priv = ocelot;
+       felix->ds = ds;
+
+       err = dsa_register_switch(ds);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
+               goto err_register_ds;
+       }
+
+       return 0;
+
+err_register_ds:
+       kfree(ds);
+err_alloc_ds:
+err_alloc_felix:
+       kfree(felix);
+       return err;
+}
+
+static int seville_remove(struct platform_device *pdev)
+{
+       struct felix *felix;
+
+       felix = platform_get_drvdata(pdev);
+
+       dsa_unregister_switch(felix->ds);
+
+       kfree(felix->ds);
+       kfree(felix);
+
+       return 0;
+}
+
+static const struct of_device_id seville_of_match[] = {
+       { .compatible = "mscc,vsc9953-switch" },
+       { },
+};
+MODULE_DEVICE_TABLE(of, seville_of_match);
+
+struct platform_driver seville_vsc9953_driver = {
+       .probe          = seville_probe,
+       .remove         = seville_remove,
+       .driver = {
+               .name           = "mscc_seville",
+               .of_match_table = of_match_ptr(seville_of_match),
+       },
+};
index 7c86056..e24a990 100644 (file)
@@ -97,8 +97,7 @@
        (AR9331_SW_PORT_STATUS_TXMAC | AR9331_SW_PORT_STATUS_RXMAC)
 
 #define AR9331_SW_PORT_STATUS_LINK_MASK \
-       (AR9331_SW_PORT_STATUS_LINK_EN | AR9331_SW_PORT_STATUS_FLOW_LINK_EN | \
-        AR9331_SW_PORT_STATUS_DUPLEX_MODE | \
+       (AR9331_SW_PORT_STATUS_DUPLEX_MODE | \
         AR9331_SW_PORT_STATUS_RX_FLOW_EN | AR9331_SW_PORT_STATUS_TX_FLOW_EN | \
         AR9331_SW_PORT_STATUS_SPEED_M)
 
@@ -410,33 +409,10 @@ static void ar9331_sw_phylink_mac_config(struct dsa_switch *ds, int port,
        struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
        struct regmap *regmap = priv->regmap;
        int ret;
-       u32 val;
-
-       switch (state->speed) {
-       case SPEED_1000:
-               val = AR9331_SW_PORT_STATUS_SPEED_1000;
-               break;
-       case SPEED_100:
-               val = AR9331_SW_PORT_STATUS_SPEED_100;
-               break;
-       case SPEED_10:
-               val = AR9331_SW_PORT_STATUS_SPEED_10;
-               break;
-       default:
-               return;
-       }
-
-       if (state->duplex)
-               val |= AR9331_SW_PORT_STATUS_DUPLEX_MODE;
-
-       if (state->pause & MLO_PAUSE_TX)
-               val |= AR9331_SW_PORT_STATUS_TX_FLOW_EN;
-
-       if (state->pause & MLO_PAUSE_RX)
-               val |= AR9331_SW_PORT_STATUS_RX_FLOW_EN;
 
        ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port),
-                                AR9331_SW_PORT_STATUS_LINK_MASK, val);
+                                AR9331_SW_PORT_STATUS_LINK_EN |
+                                AR9331_SW_PORT_STATUS_FLOW_LINK_EN, 0);
        if (ret)
                dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
 }
@@ -464,11 +440,37 @@ static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
 {
        struct ar9331_sw_priv *priv = (struct ar9331_sw_priv *)ds->priv;
        struct regmap *regmap = priv->regmap;
+       u32 val;
        int ret;
 
+       val = AR9331_SW_PORT_STATUS_MAC_MASK;
+       switch (speed) {
+       case SPEED_1000:
+               val |= AR9331_SW_PORT_STATUS_SPEED_1000;
+               break;
+       case SPEED_100:
+               val |= AR9331_SW_PORT_STATUS_SPEED_100;
+               break;
+       case SPEED_10:
+               val |= AR9331_SW_PORT_STATUS_SPEED_10;
+               break;
+       default:
+               return;
+       }
+
+       if (duplex)
+               val |= AR9331_SW_PORT_STATUS_DUPLEX_MODE;
+
+       if (tx_pause)
+               val |= AR9331_SW_PORT_STATUS_TX_FLOW_EN;
+
+       if (rx_pause)
+               val |= AR9331_SW_PORT_STATUS_RX_FLOW_EN;
+
        ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port),
-                                AR9331_SW_PORT_STATUS_MAC_MASK,
-                                AR9331_SW_PORT_STATUS_MAC_MASK);
+                                AR9331_SW_PORT_STATUS_MAC_MASK |
+                                AR9331_SW_PORT_STATUS_LINK_MASK,
+                                val);
        if (ret)
                dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
 }
index d2b5ab4..a5566de 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/of_platform.h>
 #include <linux/if_bridge.h>
 #include <linux/mdio.h>
+#include <linux/phylink.h>
 #include <linux/gpio/consumer.h>
 #include <linux/etherdevice.h>
 
@@ -418,55 +419,6 @@ qca8k_mib_init(struct qca8k_priv *priv)
        mutex_unlock(&priv->reg_mutex);
 }
 
-static int
-qca8k_set_pad_ctrl(struct qca8k_priv *priv, int port, int mode)
-{
-       u32 reg, val;
-
-       switch (port) {
-       case 0:
-               reg = QCA8K_REG_PORT0_PAD_CTRL;
-               break;
-       case 6:
-               reg = QCA8K_REG_PORT6_PAD_CTRL;
-               break;
-       default:
-               pr_err("Can't set PAD_CTRL on port %d\n", port);
-               return -EINVAL;
-       }
-
-       /* Configure a port to be directly connected to an external
-        * PHY or MAC.
-        */
-       switch (mode) {
-       case PHY_INTERFACE_MODE_RGMII:
-               /* RGMII mode means no delay so don't enable the delay */
-               val = QCA8K_PORT_PAD_RGMII_EN;
-               qca8k_write(priv, reg, val);
-               break;
-       case PHY_INTERFACE_MODE_RGMII_ID:
-               /* RGMII_ID needs internal delay. This is enabled through
-                * PORT5_PAD_CTRL for all ports, rather than individual port
-                * registers
-                */
-               qca8k_write(priv, reg,
-                           QCA8K_PORT_PAD_RGMII_EN |
-                           QCA8K_PORT_PAD_RGMII_TX_DELAY(QCA8K_MAX_DELAY) |
-                           QCA8K_PORT_PAD_RGMII_RX_DELAY(QCA8K_MAX_DELAY));
-               qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
-                           QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
-               break;
-       case PHY_INTERFACE_MODE_SGMII:
-               qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
-               break;
-       default:
-               pr_err("xMII mode %d not supported\n", mode);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
 static void
 qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
 {
@@ -639,9 +591,7 @@ static int
 qca8k_setup(struct dsa_switch *ds)
 {
        struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-       phy_interface_t phy_mode = PHY_INTERFACE_MODE_NA;
        int ret, i;
-       u32 mask;
 
        /* Make sure that port 0 is the cpu port */
        if (!dsa_is_cpu_port(ds, 0)) {
@@ -661,24 +611,9 @@ qca8k_setup(struct dsa_switch *ds)
        if (ret)
                return ret;
 
-       /* Initialize CPU port pad mode (xMII type, delays...) */
-       ret = of_get_phy_mode(dsa_to_port(ds, QCA8K_CPU_PORT)->dn, &phy_mode);
-       if (ret) {
-               pr_err("Can't find phy-mode for master device\n");
-               return ret;
-       }
-       ret = qca8k_set_pad_ctrl(priv, QCA8K_CPU_PORT, phy_mode);
-       if (ret < 0)
-               return ret;
-
-       /* Enable CPU Port, force it to maximum bandwidth and full-duplex */
-       mask = QCA8K_PORT_STATUS_SPEED_1000 | QCA8K_PORT_STATUS_TXFLOW |
-              QCA8K_PORT_STATUS_RXFLOW | QCA8K_PORT_STATUS_DUPLEX;
-       qca8k_write(priv, QCA8K_REG_PORT_STATUS(QCA8K_CPU_PORT), mask);
+       /* Enable CPU Port */
        qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0,
                      QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN);
-       qca8k_port_set_status(priv, QCA8K_CPU_PORT, 1);
-       priv->port_sts[QCA8K_CPU_PORT].enabled = 1;
 
        /* Enable MIB counters */
        qca8k_mib_init(priv);
@@ -693,10 +628,9 @@ qca8k_setup(struct dsa_switch *ds)
                qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
                          QCA8K_PORT_LOOKUP_MEMBER, 0);
 
-       /* Disable MAC by default on all user ports */
+       /* Disable MAC by default on all ports */
        for (i = 1; i < QCA8K_NUM_PORTS; i++)
-               if (dsa_is_user_port(ds, i))
-                       qca8k_port_set_status(priv, i, 0);
+               qca8k_port_set_status(priv, i, 0);
 
        /* Forward all unknown frames to CPU port for Linux processing */
        qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
@@ -713,7 +647,7 @@ qca8k_setup(struct dsa_switch *ds)
                                  QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
                }
 
-               /* Invividual user ports get connected to CPU port only */
+               /* Individual user ports get connected to CPU port only */
                if (dsa_is_user_port(ds, i)) {
                        int shift = 16 * (i % 2);
 
@@ -736,51 +670,265 @@ qca8k_setup(struct dsa_switch *ds)
                }
        }
 
+       /* Setup our port MTUs to match power on defaults */
+       for (i = 0; i < QCA8K_NUM_PORTS; i++)
+               priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN;
+       qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
+
        /* Flush the FDB table */
        qca8k_fdb_flush(priv);
 
+       /* We don't have interrupts for link changes, so we need to poll */
+       ds->pcs_poll = true;
+
        return 0;
 }
 
 static void
-qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy)
+qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+                        const struct phylink_link_state *state)
 {
        struct qca8k_priv *priv = ds->priv;
-       u32 reg;
+       u32 reg, val;
+
+       switch (port) {
+       case 0: /* 1st CPU port */
+               if (state->interface != PHY_INTERFACE_MODE_RGMII &&
+                   state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
+                   state->interface != PHY_INTERFACE_MODE_SGMII)
+                       return;
 
-       /* Force fixed-link setting for CPU port, skip others. */
-       if (!phy_is_pseudo_fixed_link(phy))
+               reg = QCA8K_REG_PORT0_PAD_CTRL;
+               break;
+       case 1:
+       case 2:
+       case 3:
+       case 4:
+       case 5:
+               /* Internal PHY, nothing to do */
                return;
+       case 6: /* 2nd CPU port / external PHY */
+               if (state->interface != PHY_INTERFACE_MODE_RGMII &&
+                   state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
+                   state->interface != PHY_INTERFACE_MODE_SGMII &&
+                   state->interface != PHY_INTERFACE_MODE_1000BASEX)
+                       return;
 
-       /* Set port speed */
-       switch (phy->speed) {
-       case 10:
-               reg = QCA8K_PORT_STATUS_SPEED_10;
+               reg = QCA8K_REG_PORT6_PAD_CTRL;
                break;
-       case 100:
-               reg = QCA8K_PORT_STATUS_SPEED_100;
+       default:
+               dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
+               return;
+       }
+
+       if (port != 6 && phylink_autoneg_inband(mode)) {
+               dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
+                       __func__);
+               return;
+       }
+
+       switch (state->interface) {
+       case PHY_INTERFACE_MODE_RGMII:
+               /* RGMII mode means no delay so don't enable the delay */
+               qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
                break;
-       case 1000:
-               reg = QCA8K_PORT_STATUS_SPEED_1000;
+       case PHY_INTERFACE_MODE_RGMII_ID:
+               /* RGMII_ID needs internal delay. This is enabled through
+                * PORT5_PAD_CTRL for all ports, rather than individual port
+                * registers
+                */
+               qca8k_write(priv, reg,
+                           QCA8K_PORT_PAD_RGMII_EN |
+                           QCA8K_PORT_PAD_RGMII_TX_DELAY(QCA8K_MAX_DELAY) |
+                           QCA8K_PORT_PAD_RGMII_RX_DELAY(QCA8K_MAX_DELAY));
+               qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
+                           QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
+               break;
+       case PHY_INTERFACE_MODE_SGMII:
+       case PHY_INTERFACE_MODE_1000BASEX:
+               /* Enable SGMII on the port */
+               qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
+
+               /* Enable/disable SerDes auto-negotiation as necessary */
+               val = qca8k_read(priv, QCA8K_REG_PWS);
+               if (phylink_autoneg_inband(mode))
+                       val &= ~QCA8K_PWS_SERDES_AEN_DIS;
+               else
+                       val |= QCA8K_PWS_SERDES_AEN_DIS;
+               qca8k_write(priv, QCA8K_REG_PWS, val);
+
+               /* Configure the SGMII parameters */
+               val = qca8k_read(priv, QCA8K_REG_SGMII_CTRL);
+
+               val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
+                       QCA8K_SGMII_EN_TX | QCA8K_SGMII_EN_SD;
+
+               if (dsa_is_cpu_port(ds, port)) {
+                       /* CPU port, we're talking to the CPU MAC, be a PHY */
+                       val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
+                       val |= QCA8K_SGMII_MODE_CTRL_PHY;
+               } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
+                       val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
+                       val |= QCA8K_SGMII_MODE_CTRL_MAC;
+               } else if (state->interface == PHY_INTERFACE_MODE_1000BASEX) {
+                       val &= ~QCA8K_SGMII_MODE_CTRL_MASK;
+                       val |= QCA8K_SGMII_MODE_CTRL_BASEX;
+               }
+
+               qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
+               break;
+       default:
+               dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
+                       phy_modes(state->interface), port);
+               return;
+       }
+}
+
+static void
+qca8k_phylink_validate(struct dsa_switch *ds, int port,
+                      unsigned long *supported,
+                      struct phylink_link_state *state)
+{
+       __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+       switch (port) {
+       case 0: /* 1st CPU port */
+               if (state->interface != PHY_INTERFACE_MODE_NA &&
+                   state->interface != PHY_INTERFACE_MODE_RGMII &&
+                   state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
+                   state->interface != PHY_INTERFACE_MODE_SGMII)
+                       goto unsupported;
+               break;
+       case 1:
+       case 2:
+       case 3:
+       case 4:
+       case 5:
+               /* Internal PHY */
+               if (state->interface != PHY_INTERFACE_MODE_NA &&
+                   state->interface != PHY_INTERFACE_MODE_GMII)
+                       goto unsupported;
+               break;
+       case 6: /* 2nd CPU port / external PHY */
+               if (state->interface != PHY_INTERFACE_MODE_NA &&
+                   state->interface != PHY_INTERFACE_MODE_RGMII &&
+                   state->interface != PHY_INTERFACE_MODE_RGMII_ID &&
+                   state->interface != PHY_INTERFACE_MODE_SGMII &&
+                   state->interface != PHY_INTERFACE_MODE_1000BASEX)
+                       goto unsupported;
                break;
        default:
-               dev_dbg(priv->dev, "port%d link speed %dMbps not supported.\n",
-                       port, phy->speed);
+unsupported:
+               linkmode_zero(supported);
                return;
        }
 
-       /* Set duplex mode */
-       if (phy->duplex == DUPLEX_FULL)
-               reg |= QCA8K_PORT_STATUS_DUPLEX;
+       phylink_set_port_modes(mask);
+       phylink_set(mask, Autoneg);
+
+       phylink_set(mask, 1000baseT_Full);
+       phylink_set(mask, 10baseT_Half);
+       phylink_set(mask, 10baseT_Full);
+       phylink_set(mask, 100baseT_Half);
+       phylink_set(mask, 100baseT_Full);
+
+       if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
+               phylink_set(mask, 1000baseX_Full);
 
-       /* Force flow control */
-       if (dsa_is_cpu_port(ds, port))
-               reg |= QCA8K_PORT_STATUS_RXFLOW | QCA8K_PORT_STATUS_TXFLOW;
+       phylink_set(mask, Pause);
+       phylink_set(mask, Asym_Pause);
+
+       linkmode_and(supported, supported, mask);
+       linkmode_and(state->advertising, state->advertising, mask);
+}
+
+static int
+qca8k_phylink_mac_link_state(struct dsa_switch *ds, int port,
+                            struct phylink_link_state *state)
+{
+       struct qca8k_priv *priv = ds->priv;
+       u32 reg;
+
+       reg = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port));
+
+       state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
+       state->an_complete = state->link;
+       state->an_enabled = !!(reg & QCA8K_PORT_STATUS_LINK_AUTO);
+       state->duplex = (reg & QCA8K_PORT_STATUS_DUPLEX) ? DUPLEX_FULL :
+                                                          DUPLEX_HALF;
+
+       switch (reg & QCA8K_PORT_STATUS_SPEED) {
+       case QCA8K_PORT_STATUS_SPEED_10:
+               state->speed = SPEED_10;
+               break;
+       case QCA8K_PORT_STATUS_SPEED_100:
+               state->speed = SPEED_100;
+               break;
+       case QCA8K_PORT_STATUS_SPEED_1000:
+               state->speed = SPEED_1000;
+               break;
+       default:
+               state->speed = SPEED_UNKNOWN;
+               break;
+       }
+
+       state->pause = MLO_PAUSE_NONE;
+       if (reg & QCA8K_PORT_STATUS_RXFLOW)
+               state->pause |= MLO_PAUSE_RX;
+       if (reg & QCA8K_PORT_STATUS_TXFLOW)
+               state->pause |= MLO_PAUSE_TX;
+
+       return 1;
+}
+
+static void
+qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
+                           phy_interface_t interface)
+{
+       struct qca8k_priv *priv = ds->priv;
 
-       /* Force link down before changing MAC options */
        qca8k_port_set_status(priv, port, 0);
+}
+
+static void
+qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
+                         phy_interface_t interface, struct phy_device *phydev,
+                         int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+       struct qca8k_priv *priv = ds->priv;
+       u32 reg;
+
+       if (phylink_autoneg_inband(mode)) {
+               reg = QCA8K_PORT_STATUS_LINK_AUTO;
+       } else {
+               switch (speed) {
+               case SPEED_10:
+                       reg = QCA8K_PORT_STATUS_SPEED_10;
+                       break;
+               case SPEED_100:
+                       reg = QCA8K_PORT_STATUS_SPEED_100;
+                       break;
+               case SPEED_1000:
+                       reg = QCA8K_PORT_STATUS_SPEED_1000;
+                       break;
+               default:
+                       reg = QCA8K_PORT_STATUS_LINK_AUTO;
+                       break;
+               }
+
+               if (duplex == DUPLEX_FULL)
+                       reg |= QCA8K_PORT_STATUS_DUPLEX;
+
+               if (rx_pause || dsa_is_cpu_port(ds, port))
+                       reg |= QCA8K_PORT_STATUS_RXFLOW;
+
+               if (tx_pause || dsa_is_cpu_port(ds, port))
+                       reg |= QCA8K_PORT_STATUS_TXFLOW;
+       }
+
+       reg |= QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC;
+
        qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg);
-       qca8k_port_set_status(priv, port, 1);
 }
 
 static void
@@ -937,13 +1085,11 @@ qca8k_port_enable(struct dsa_switch *ds, int port,
 {
        struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
 
-       if (!dsa_is_user_port(ds, port))
-               return 0;
-
        qca8k_port_set_status(priv, port, 1);
        priv->port_sts[port].enabled = 1;
 
-       phy_support_asym_pause(phy);
+       if (dsa_is_user_port(ds, port))
+               phy_support_asym_pause(phy);
 
        return 0;
 }
@@ -958,6 +1104,30 @@ qca8k_port_disable(struct dsa_switch *ds, int port)
 }
 
 static int
+qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+       struct qca8k_priv *priv = ds->priv;
+       int i, mtu = 0;
+
+       priv->port_mtu[port] = new_mtu;
+
+       for (i = 0; i < QCA8K_NUM_PORTS; i++)
+               if (priv->port_mtu[port] > mtu)
+                       mtu = priv->port_mtu[port];
+
+       /* Include L2 header / FCS length */
+       qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN);
+
+       return 0;
+}
+
+static int
+qca8k_port_max_mtu(struct dsa_switch *ds, int port)
+{
+       return QCA8K_MAX_MTU;
+}
+
+static int
 qca8k_port_fdb_insert(struct qca8k_priv *priv, const u8 *addr,
                      u16 port_mask, u16 vid)
 {
@@ -1026,7 +1196,6 @@ qca8k_get_tag_protocol(struct dsa_switch *ds, int port,
 static const struct dsa_switch_ops qca8k_switch_ops = {
        .get_tag_protocol       = qca8k_get_tag_protocol,
        .setup                  = qca8k_setup,
-       .adjust_link            = qca8k_adjust_link,
        .get_strings            = qca8k_get_strings,
        .get_ethtool_stats      = qca8k_get_ethtool_stats,
        .get_sset_count         = qca8k_get_sset_count,
@@ -1034,12 +1203,19 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
        .set_mac_eee            = qca8k_set_mac_eee,
        .port_enable            = qca8k_port_enable,
        .port_disable           = qca8k_port_disable,
+       .port_change_mtu        = qca8k_port_change_mtu,
+       .port_max_mtu           = qca8k_port_max_mtu,
        .port_stp_state_set     = qca8k_port_stp_state_set,
        .port_bridge_join       = qca8k_port_bridge_join,
        .port_bridge_leave      = qca8k_port_bridge_leave,
        .port_fdb_add           = qca8k_port_fdb_add,
        .port_fdb_del           = qca8k_port_fdb_del,
        .port_fdb_dump          = qca8k_port_fdb_dump,
+       .phylink_validate       = qca8k_phylink_validate,
+       .phylink_mac_link_state = qca8k_phylink_mac_link_state,
+       .phylink_mac_config     = qca8k_phylink_mac_config,
+       .phylink_mac_link_down  = qca8k_phylink_mac_link_down,
+       .phylink_mac_link_up    = qca8k_phylink_mac_link_up,
 };
 
 static int
index 42d6ea2..3143939 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/gpio.h>
 
 #define QCA8K_NUM_PORTS                                        7
+#define QCA8K_MAX_MTU                                  9000
 
 #define PHY_ID_QCA8337                                 0x004dd036
 #define QCA8K_ID_QCA8337                               0x13
@@ -36,6 +37,8 @@
 #define   QCA8K_MAX_DELAY                              3
 #define   QCA8K_PORT_PAD_RGMII_RX_DELAY_EN             BIT(24)
 #define   QCA8K_PORT_PAD_SGMII_EN                      BIT(7)
+#define QCA8K_REG_PWS                                  0x010
+#define   QCA8K_PWS_SERDES_AEN_DIS                     BIT(7)
 #define QCA8K_REG_MODULE_EN                            0x030
 #define   QCA8K_MODULE_EN_MIB                          BIT(0)
 #define QCA8K_REG_MIB                                  0x034
@@ -56,6 +59,7 @@
 #define   QCA8K_MDIO_MASTER_MAX_REG                    32
 #define QCA8K_GOL_MAC_ADDR0                            0x60
 #define QCA8K_GOL_MAC_ADDR1                            0x64
+#define QCA8K_MAX_FRAME_SIZE                           0x78
 #define QCA8K_REG_PORT_STATUS(_i)                      (0x07c + (_i) * 4)
 #define   QCA8K_PORT_STATUS_SPEED                      GENMASK(1, 0)
 #define   QCA8K_PORT_STATUS_SPEED_10                   0
@@ -69,6 +73,7 @@
 #define   QCA8K_PORT_STATUS_LINK_UP                    BIT(8)
 #define   QCA8K_PORT_STATUS_LINK_AUTO                  BIT(9)
 #define   QCA8K_PORT_STATUS_LINK_PAUSE                 BIT(10)
+#define   QCA8K_PORT_STATUS_FLOW_AUTO                  BIT(12)
 #define QCA8K_REG_PORT_HDR_CTRL(_i)                    (0x9c + (_i * 4))
 #define   QCA8K_PORT_HDR_CTRL_RX_MASK                  GENMASK(3, 2)
 #define   QCA8K_PORT_HDR_CTRL_RX_S                     2
 #define   QCA8K_PORT_HDR_CTRL_ALL                      2
 #define   QCA8K_PORT_HDR_CTRL_MGMT                     1
 #define   QCA8K_PORT_HDR_CTRL_NONE                     0
+#define QCA8K_REG_SGMII_CTRL                           0x0e0
+#define   QCA8K_SGMII_EN_PLL                           BIT(1)
+#define   QCA8K_SGMII_EN_RX                            BIT(2)
+#define   QCA8K_SGMII_EN_TX                            BIT(3)
+#define   QCA8K_SGMII_EN_SD                            BIT(4)
+#define   QCA8K_SGMII_CLK125M_DELAY                    BIT(7)
+#define   QCA8K_SGMII_MODE_CTRL_MASK                   (BIT(22) | BIT(23))
+#define   QCA8K_SGMII_MODE_CTRL_BASEX                  (0 << 22)
+#define   QCA8K_SGMII_MODE_CTRL_PHY                    (1 << 22)
+#define   QCA8K_SGMII_MODE_CTRL_MAC                    (2 << 22)
 
 /* EEE control registers */
 #define QCA8K_REG_EEE_CTRL                             0x100
@@ -176,6 +191,7 @@ struct qca8k_priv {
        struct device *dev;
        struct dsa_switch_ops ops;
        struct gpio_desc *reset_gpio;
+       unsigned int port_mtu[QCA8K_NUM_PORTS];
 };
 
 struct qca8k_mib_desc {
index ac88cac..993cf3a 100644 (file)
@@ -272,7 +272,7 @@ int rtl8366_init_vlan(struct realtek_smi *smi)
                        /* For the CPU port, make all ports members of this
                         * VLAN.
                         */
-                       mask = GENMASK(smi->num_ports - 1, 0);
+                       mask = GENMASK((int)smi->num_ports - 1, 0);
                else
                        /* For all other ports, enable itself plus the
                         * CPU port.
index fd19775..48f1ff7 100644 (file)
 /* CPU port control reg */
 #define RTL8368RB_CPU_CTRL_REG         0x0061
 #define RTL8368RB_CPU_PORTS_MSK                0x00FF
-/* Enables inserting custom tag length/type 0x8899 */
-#define RTL8368RB_CPU_INSTAG           BIT(15)
+/* Disables inserting custom tag length/type 0x8899 */
+#define RTL8368RB_CPU_NO_TAG           BIT(15)
 
 #define RTL8366RB_SMAR0                        0x0070 /* bits 0..15 */
 #define RTL8366RB_SMAR1                        0x0071 /* bits 16..31 */
@@ -844,16 +844,14 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
        if (ret)
                return ret;
 
-       /* Enable CPU port and enable inserting CPU tag
+       /* Enable CPU port with custom DSA tag 8899.
         *
-        * Disabling RTL8368RB_CPU_INSTAG here will change the behaviour
-        * of the switch totally and it will start talking Realtek RRCP
-        * internally. It is probably possible to experiment with this,
-        * but then the kernel needs to understand and handle RRCP first.
+        * If you set RTL8368RB_CPU_NO_TAG (bit 15) in this registers
+        * the custom tag is turned off.
         */
        ret = regmap_update_bits(smi->map, RTL8368RB_CPU_CTRL_REG,
                                 0xFFFF,
-                                RTL8368RB_CPU_INSTAG | BIT(smi->cpu_port));
+                                BIT(smi->cpu_port));
        if (ret)
                return ret;
 
@@ -967,21 +965,8 @@ static enum dsa_tag_protocol rtl8366_get_tag_protocol(struct dsa_switch *ds,
                                                      int port,
                                                      enum dsa_tag_protocol mp)
 {
-       /* For now, the RTL switches are handled without any custom tags.
-        *
-        * It is possible to turn on "custom tags" by removing the
-        * RTL8368RB_CPU_INSTAG flag when enabling the port but what it
-        * does is unfamiliar to DSA: ethernet frames of type 8899, the Realtek
-        * Remote Control Protocol (RRCP) start to appear on the CPU port of
-        * the device. So this is not the ordinary few extra bytes in the
-        * frame. Instead it appears that the switch starts to talk Realtek
-        * RRCP internally which means a pretty complex RRCP implementation
-        * decoding and responding the RRCP protocol is needed to exploit this.
-        *
-        * The OpenRRCP project (dormant since 2009) have reverse-egineered
-        * parts of the protocol.
-        */
-       return DSA_TAG_PROTO_NONE;
+       /* This switch uses the 4 byte protocol A Realtek DSA tag */
+       return DSA_TAG_PROTO_RTL4_A;
 }
 
 static void rtl8366rb_adjust_link(struct dsa_switch *ds, int port,
index 29ed216..ba70b40 100644 (file)
@@ -262,12 +262,12 @@ int sja1105_static_config_upload(struct sja1105_private *priv);
 int sja1105_inhibit_tx(const struct sja1105_private *priv,
                       unsigned long port_bitmap, bool tx_inhibited);
 
-extern struct sja1105_info sja1105e_info;
-extern struct sja1105_info sja1105t_info;
-extern struct sja1105_info sja1105p_info;
-extern struct sja1105_info sja1105q_info;
-extern struct sja1105_info sja1105r_info;
-extern struct sja1105_info sja1105s_info;
+extern const struct sja1105_info sja1105e_info;
+extern const struct sja1105_info sja1105t_info;
+extern const struct sja1105_info sja1105p_info;
+extern const struct sja1105_info sja1105q_info;
+extern const struct sja1105_info sja1105r_info;
+extern const struct sja1105_info sja1105s_info;
 
 /* From sja1105_clocking.c */
 
index 4471eec..75247f3 100644 (file)
@@ -638,9 +638,7 @@ static size_t sja1105pqrs_cbs_entry_packing(void *buf, void *entry_ptr,
 #define OP_SEARCH      BIT(3)
 
 /* SJA1105E/T: First generation */
-struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
-       [BLK_IDX_SCHEDULE] = {0},
-       [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0},
+const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
        [BLK_IDX_VL_LOOKUP] = {
                .entry_packing = sja1105et_vl_lookup_entry_packing,
                .cmd_packing = sja1105_vl_lookup_cmd_packing,
@@ -649,8 +647,6 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
                .packed_size = SJA1105ET_SJA1105_SIZE_VL_LOOKUP_DYN_CMD,
                .addr = 0x35,
        },
-       [BLK_IDX_VL_POLICING] = {0},
-       [BLK_IDX_VL_FORWARDING] = {0},
        [BLK_IDX_L2_LOOKUP] = {
                .entry_packing = sja1105et_dyn_l2_lookup_entry_packing,
                .cmd_packing = sja1105et_l2_lookup_cmd_packing,
@@ -667,7 +663,6 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
                .packed_size = SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD,
                .addr = 0x20,
        },
-       [BLK_IDX_L2_POLICING] = {0},
        [BLK_IDX_VLAN_LOOKUP] = {
                .entry_packing = sja1105_vlan_lookup_entry_packing,
                .cmd_packing = sja1105_vlan_lookup_cmd_packing,
@@ -692,9 +687,6 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
                .packed_size = SJA1105ET_SIZE_MAC_CONFIG_DYN_CMD,
                .addr = 0x36,
        },
-       [BLK_IDX_SCHEDULE_PARAMS] = {0},
-       [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0},
-       [BLK_IDX_VL_FORWARDING_PARAMS] = {0},
        [BLK_IDX_L2_LOOKUP_PARAMS] = {
                .entry_packing = sja1105et_l2_lookup_params_entry_packing,
                .cmd_packing = sja1105et_l2_lookup_params_cmd_packing,
@@ -703,8 +695,6 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
                .packed_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD,
                .addr = 0x38,
        },
-       [BLK_IDX_L2_FORWARDING_PARAMS] = {0},
-       [BLK_IDX_AVB_PARAMS] = {0},
        [BLK_IDX_GENERAL_PARAMS] = {
                .entry_packing = sja1105et_general_params_entry_packing,
                .cmd_packing = sja1105et_general_params_cmd_packing,
@@ -729,13 +719,10 @@ struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
                .packed_size = SJA1105ET_SIZE_CBS_DYN_CMD,
                .addr = 0x2c,
        },
-       [BLK_IDX_XMII_PARAMS] = {0},
 };
 
 /* SJA1105P/Q/R/S: Second generation */
-struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
-       [BLK_IDX_SCHEDULE] = {0},
-       [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0},
+const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
        [BLK_IDX_VL_LOOKUP] = {
                .entry_packing = sja1105_vl_lookup_entry_packing,
                .cmd_packing = sja1105_vl_lookup_cmd_packing,
@@ -744,8 +731,6 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
                .packed_size = SJA1105PQRS_SJA1105_SIZE_VL_LOOKUP_DYN_CMD,
                .addr = 0x47,
        },
-       [BLK_IDX_VL_POLICING] = {0},
-       [BLK_IDX_VL_FORWARDING] = {0},
        [BLK_IDX_L2_LOOKUP] = {
                .entry_packing = sja1105pqrs_dyn_l2_lookup_entry_packing,
                .cmd_packing = sja1105pqrs_l2_lookup_cmd_packing,
@@ -762,7 +747,6 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
                .packed_size = SJA1105PQRS_SIZE_L2_LOOKUP_DYN_CMD,
                .addr = 0x24,
        },
-       [BLK_IDX_L2_POLICING] = {0},
        [BLK_IDX_VLAN_LOOKUP] = {
                .entry_packing = sja1105_vlan_lookup_entry_packing,
                .cmd_packing = sja1105_vlan_lookup_cmd_packing,
@@ -787,9 +771,6 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
                .packed_size = SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD,
                .addr = 0x4B,
        },
-       [BLK_IDX_SCHEDULE_PARAMS] = {0},
-       [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0},
-       [BLK_IDX_VL_FORWARDING_PARAMS] = {0},
        [BLK_IDX_L2_LOOKUP_PARAMS] = {
                .entry_packing = sja1105pqrs_l2_lookup_params_entry_packing,
                .cmd_packing = sja1105pqrs_l2_lookup_params_cmd_packing,
@@ -798,7 +779,6 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
                .packed_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_DYN_CMD,
                .addr = 0x54,
        },
-       [BLK_IDX_L2_FORWARDING_PARAMS] = {0},
        [BLK_IDX_AVB_PARAMS] = {
                .entry_packing = sja1105pqrs_avb_params_entry_packing,
                .cmd_packing = sja1105pqrs_avb_params_cmd_packing,
@@ -831,7 +811,6 @@ struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
                .packed_size = SJA1105PQRS_SIZE_CBS_DYN_CMD,
                .addr = 0x32,
        },
-       [BLK_IDX_XMII_PARAMS] = {0},
 };
 
 /* Provides read access to the settings through the dynamic interface
index 1fc0d13..28d4eb5 100644 (file)
@@ -34,7 +34,7 @@ struct sja1105_mgmt_entry {
        u64 index;
 };
 
-extern struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN];
-extern struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN];
+extern const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN];
+extern const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN];
 
 #endif
index 9ee8968..12e7602 100644 (file)
@@ -31,7 +31,7 @@ static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
                                       struct netlink_ext_ack *extack,
                                       unsigned long cookie, int port,
                                       u64 rate_bytes_per_sec,
-                                      s64 burst)
+                                      u32 burst)
 {
        struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
        struct sja1105_l2_policing_entry *policing;
@@ -79,9 +79,8 @@ static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
 
        policing[rule->bcast_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
                                                          512, 1000000);
-       policing[rule->bcast_pol.sharindx].smax = div_u64(rate_bytes_per_sec *
-                                                         PSCHED_NS2TICKS(burst),
-                                                         PSCHED_TICKS_PER_SEC);
+       policing[rule->bcast_pol.sharindx].smax = burst;
+
        /* TODO: support per-flow MTU */
        policing[rule->bcast_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
                                                    ETH_FCS_LEN;
@@ -103,7 +102,7 @@ static int sja1105_setup_tc_policer(struct sja1105_private *priv,
                                    struct netlink_ext_ack *extack,
                                    unsigned long cookie, int port, int tc,
                                    u64 rate_bytes_per_sec,
-                                   s64 burst)
+                                   u32 burst)
 {
        struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
        struct sja1105_l2_policing_entry *policing;
@@ -152,9 +151,8 @@ static int sja1105_setup_tc_policer(struct sja1105_private *priv,
 
        policing[rule->tc_pol.sharindx].rate = div_u64(rate_bytes_per_sec *
                                                       512, 1000000);
-       policing[rule->tc_pol.sharindx].smax = div_u64(rate_bytes_per_sec *
-                                                      PSCHED_NS2TICKS(burst),
-                                                      PSCHED_TICKS_PER_SEC);
+       policing[rule->tc_pol.sharindx].smax = burst;
+
        /* TODO: support per-flow MTU */
        policing[rule->tc_pol.sharindx].maxlen = VLAN_ETH_FRAME_LEN +
                                                 ETH_FCS_LEN;
@@ -177,7 +175,7 @@ static int sja1105_flower_policer(struct sja1105_private *priv, int port,
                                  unsigned long cookie,
                                  struct sja1105_key *key,
                                  u64 rate_bytes_per_sec,
-                                 s64 burst)
+                                 u32 burst)
 {
        switch (key->type) {
        case SJA1105_KEY_BCAST:
index 789b288..5079e4a 100644 (file)
@@ -3324,9 +3324,7 @@ static int sja1105_port_policer_add(struct dsa_switch *ds, int port,
         */
        policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec,
                                      1000000);
-       policing[port].smax = div_u64(policer->rate_bytes_per_sec *
-                                     PSCHED_NS2TICKS(policer->burst),
-                                     PSCHED_TICKS_PER_SEC);
+       policing[port].smax = policer->burst;
 
        return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
 }
index bc0e47c..1771345 100644 (file)
@@ -891,16 +891,16 @@ void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int port,
 
        mutex_lock(&ptp_data->lock);
 
-       rc = sja1105_ptpclkval_read(priv, &ticks, NULL);
+       rc = sja1105_ptpegr_ts_poll(ds, port, &ts);
        if (rc < 0) {
-               dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc);
+               dev_err(ds->dev, "timed out polling for tstamp\n");
                kfree_skb(skb);
                goto out;
        }
 
-       rc = sja1105_ptpegr_ts_poll(ds, port, &ts);
+       rc = sja1105_ptpclkval_read(priv, &ticks, NULL);
        if (rc < 0) {
-               dev_err(ds->dev, "timed out polling for tstamp\n");
+               dev_err(ds->dev, "Failed to read PTP clock: %d\n", rc);
                kfree_skb(skb);
                goto out;
        }
index bb52b9c..704dcf1 100644 (file)
@@ -507,7 +507,7 @@ static struct sja1105_regs sja1105pqrs_regs = {
        .ptpsyncts = 0x1F,
 };
 
-struct sja1105_info sja1105e_info = {
+const struct sja1105_info sja1105e_info = {
        .device_id              = SJA1105E_DEVICE_ID,
        .part_no                = SJA1105ET_PART_NO,
        .static_ops             = sja1105e_table_ops,
@@ -523,7 +523,8 @@ struct sja1105_info sja1105e_info = {
        .regs                   = &sja1105et_regs,
        .name                   = "SJA1105E",
 };
-struct sja1105_info sja1105t_info = {
+
+const struct sja1105_info sja1105t_info = {
        .device_id              = SJA1105T_DEVICE_ID,
        .part_no                = SJA1105ET_PART_NO,
        .static_ops             = sja1105t_table_ops,
@@ -539,7 +540,8 @@ struct sja1105_info sja1105t_info = {
        .regs                   = &sja1105et_regs,
        .name                   = "SJA1105T",
 };
-struct sja1105_info sja1105p_info = {
+
+const struct sja1105_info sja1105p_info = {
        .device_id              = SJA1105PR_DEVICE_ID,
        .part_no                = SJA1105P_PART_NO,
        .static_ops             = sja1105p_table_ops,
@@ -556,7 +558,8 @@ struct sja1105_info sja1105p_info = {
        .regs                   = &sja1105pqrs_regs,
        .name                   = "SJA1105P",
 };
-struct sja1105_info sja1105q_info = {
+
+const struct sja1105_info sja1105q_info = {
        .device_id              = SJA1105QS_DEVICE_ID,
        .part_no                = SJA1105Q_PART_NO,
        .static_ops             = sja1105q_table_ops,
@@ -573,7 +576,8 @@ struct sja1105_info sja1105q_info = {
        .regs                   = &sja1105pqrs_regs,
        .name                   = "SJA1105Q",
 };
-struct sja1105_info sja1105r_info = {
+
+const struct sja1105_info sja1105r_info = {
        .device_id              = SJA1105PR_DEVICE_ID,
        .part_no                = SJA1105R_PART_NO,
        .static_ops             = sja1105r_table_ops,
@@ -590,7 +594,8 @@ struct sja1105_info sja1105r_info = {
        .regs                   = &sja1105pqrs_regs,
        .name                   = "SJA1105R",
 };
-struct sja1105_info sja1105s_info = {
+
+const struct sja1105_info sja1105s_info = {
        .device_id              = SJA1105QS_DEVICE_ID,
        .part_no                = SJA1105S_PART_NO,
        .static_ops             = sja1105s_table_ops,
index ff3fe47..139b7b4 100644 (file)
@@ -838,12 +838,7 @@ sja1105_static_config_get_length(const struct sja1105_static_config *config)
 /* Compatibility matrices */
 
 /* SJA1105E: First generation, no TTEthernet */
-struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = {
-       [BLK_IDX_SCHEDULE] = {0},
-       [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0},
-       [BLK_IDX_VL_LOOKUP] = {0},
-       [BLK_IDX_VL_POLICING] = {0},
-       [BLK_IDX_VL_FORWARDING] = {0},
+const struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = {
        [BLK_IDX_L2_LOOKUP] = {
                .packing = sja1105et_l2_lookup_entry_packing,
                .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
@@ -874,9 +869,6 @@ struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = {
                .packed_entry_size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY,
                .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
        },
-       [BLK_IDX_SCHEDULE_PARAMS] = {0},
-       [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0},
-       [BLK_IDX_VL_FORWARDING_PARAMS] = {0},
        [BLK_IDX_L2_LOOKUP_PARAMS] = {
                .packing = sja1105et_l2_lookup_params_entry_packing,
                .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
@@ -916,7 +908,7 @@ struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = {
 };
 
 /* SJA1105T: First generation, TTEthernet */
-struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = {
+const struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = {
        [BLK_IDX_SCHEDULE] = {
                .packing = sja1105_schedule_entry_packing,
                .unpacked_entry_size = sizeof(struct sja1105_schedule_entry),
@@ -1034,12 +1026,7 @@ struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = {
 };
 
 /* SJA1105P: Second generation, no TTEthernet, no SGMII */
-struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = {
-       [BLK_IDX_SCHEDULE] = {0},
-       [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0},
-       [BLK_IDX_VL_LOOKUP] = {0},
-       [BLK_IDX_VL_POLICING] = {0},
-       [BLK_IDX_VL_FORWARDING] = {0},
+const struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = {
        [BLK_IDX_L2_LOOKUP] = {
                .packing = sja1105pqrs_l2_lookup_entry_packing,
                .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
@@ -1070,9 +1057,6 @@ struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = {
                .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
                .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
        },
-       [BLK_IDX_SCHEDULE_PARAMS] = {0},
-       [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0},
-       [BLK_IDX_VL_FORWARDING_PARAMS] = {0},
        [BLK_IDX_L2_LOOKUP_PARAMS] = {
                .packing = sja1105pqrs_l2_lookup_params_entry_packing,
                .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
@@ -1112,7 +1096,7 @@ struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = {
 };
 
 /* SJA1105Q: Second generation, TTEthernet, no SGMII */
-struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = {
+const struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = {
        [BLK_IDX_SCHEDULE] = {
                .packing = sja1105_schedule_entry_packing,
                .unpacked_entry_size = sizeof(struct sja1105_schedule_entry),
@@ -1230,12 +1214,7 @@ struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = {
 };
 
 /* SJA1105R: Second generation, no TTEthernet, SGMII */
-struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = {
-       [BLK_IDX_SCHEDULE] = {0},
-       [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {0},
-       [BLK_IDX_VL_LOOKUP] = {0},
-       [BLK_IDX_VL_POLICING] = {0},
-       [BLK_IDX_VL_FORWARDING] = {0},
+const struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = {
        [BLK_IDX_L2_LOOKUP] = {
                .packing = sja1105pqrs_l2_lookup_entry_packing,
                .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
@@ -1266,9 +1245,6 @@ struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = {
                .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
                .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
        },
-       [BLK_IDX_SCHEDULE_PARAMS] = {0},
-       [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {0},
-       [BLK_IDX_VL_FORWARDING_PARAMS] = {0},
        [BLK_IDX_L2_LOOKUP_PARAMS] = {
                .packing = sja1105pqrs_l2_lookup_params_entry_packing,
                .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
@@ -1308,7 +1284,7 @@ struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = {
 };
 
 /* SJA1105S: Second generation, TTEthernet, SGMII */
-struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = {
+const struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = {
        [BLK_IDX_SCHEDULE] = {
                .packing = sja1105_schedule_entry_packing,
                .unpacked_entry_size = sizeof(struct sja1105_schedule_entry),
index ee0f100..bc76068 100644 (file)
@@ -381,12 +381,12 @@ struct sja1105_static_config {
        struct sja1105_table tables[BLK_IDX_MAX];
 };
 
-extern struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX];
-extern struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX];
-extern struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX];
-extern struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX];
-extern struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX];
-extern struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX];
+extern const struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX];
+extern const struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX];
+extern const struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX];
+extern const struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX];
+extern const struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX];
+extern const struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX];
 
 size_t sja1105_table_header_packing(void *buf, void *hdr, enum packing_op op);
 void
index 3aa1a8b..31d8acf 100644 (file)
@@ -475,8 +475,7 @@ bool sja1105_gating_check_conflicts(struct sja1105_private *priv, int port,
        if (list_empty(&gating_cfg->entries))
                return false;
 
-       dummy = kzalloc(sizeof(struct tc_taprio_sched_entry) * num_entries +
-                       sizeof(struct tc_taprio_qopt_offload), GFP_KERNEL);
+       dummy = kzalloc(struct_size(dummy, entries, num_entries), GFP_KERNEL);
        if (!dummy) {
                NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory");
                return true;
index bdfd6c4..ffc4042 100644 (file)
@@ -7,6 +7,165 @@
 
 #define SJA1105_SIZE_VL_STATUS                 8
 
+/* Insert into the global gate list, sorted by gate action time. */
+static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg,
+                                    struct sja1105_rule *rule,
+                                    u8 gate_state, s64 entry_time,
+                                    struct netlink_ext_ack *extack)
+{
+       struct sja1105_gate_entry *e;
+       int rc;
+
+       e = kzalloc(sizeof(*e), GFP_KERNEL);
+       if (!e)
+               return -ENOMEM;
+
+       e->rule = rule;
+       e->gate_state = gate_state;
+       e->interval = entry_time;
+
+       if (list_empty(&gating_cfg->entries)) {
+               list_add(&e->list, &gating_cfg->entries);
+       } else {
+               struct sja1105_gate_entry *p;
+
+               list_for_each_entry(p, &gating_cfg->entries, list) {
+                       if (p->interval == e->interval) {
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "Gate conflict");
+                               rc = -EBUSY;
+                               goto err;
+                       }
+
+                       if (e->interval < p->interval)
+                               break;
+               }
+               list_add(&e->list, p->list.prev);
+       }
+
+       gating_cfg->num_entries++;
+
+       return 0;
+err:
+       kfree(e);
+       return rc;
+}
+
+/* The gate entries contain absolute times in their e->interval field. Convert
+ * that to proper intervals (i.e. "0, 5, 10, 15" to "5, 5, 5, 5").
+ */
+static void
+sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg,
+                                   u64 cycle_time)
+{
+       struct sja1105_gate_entry *last_e;
+       struct sja1105_gate_entry *e;
+       struct list_head *prev;
+
+       list_for_each_entry(e, &gating_cfg->entries, list) {
+               struct sja1105_gate_entry *p;
+
+               prev = e->list.prev;
+
+               if (prev == &gating_cfg->entries)
+                       continue;
+
+               p = list_entry(prev, struct sja1105_gate_entry, list);
+               p->interval = e->interval - p->interval;
+       }
+       last_e = list_last_entry(&gating_cfg->entries,
+                                struct sja1105_gate_entry, list);
+       last_e->interval = cycle_time - last_e->interval;
+}
+
+static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg)
+{
+       struct sja1105_gate_entry *e, *n;
+
+       list_for_each_entry_safe(e, n, &gating_cfg->entries, list) {
+               list_del(&e->list);
+               kfree(e);
+       }
+}
+
+static int sja1105_compose_gating_subschedule(struct sja1105_private *priv,
+                                             struct netlink_ext_ack *extack)
+{
+       struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
+       struct sja1105_rule *rule;
+       s64 max_cycle_time = 0;
+       s64 its_base_time = 0;
+       int i, rc = 0;
+
+       sja1105_free_gating_config(gating_cfg);
+
+       list_for_each_entry(rule, &priv->flow_block.rules, list) {
+               if (rule->type != SJA1105_RULE_VL)
+                       continue;
+               if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
+                       continue;
+
+               if (max_cycle_time < rule->vl.cycle_time) {
+                       max_cycle_time = rule->vl.cycle_time;
+                       its_base_time = rule->vl.base_time;
+               }
+       }
+
+       if (!max_cycle_time)
+               return 0;
+
+       dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n",
+               max_cycle_time, its_base_time);
+
+       gating_cfg->base_time = its_base_time;
+       gating_cfg->cycle_time = max_cycle_time;
+       gating_cfg->num_entries = 0;
+
+       list_for_each_entry(rule, &priv->flow_block.rules, list) {
+               s64 time;
+               s64 rbt;
+
+               if (rule->type != SJA1105_RULE_VL)
+                       continue;
+               if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
+                       continue;
+
+               /* Calculate the difference between this gating schedule's
+                * base time, and the base time of the gating schedule with the
+                * longest cycle time. We call it the relative base time (rbt).
+                */
+               rbt = future_base_time(rule->vl.base_time, rule->vl.cycle_time,
+                                      its_base_time);
+               rbt -= its_base_time;
+
+               time = rbt;
+
+               for (i = 0; i < rule->vl.num_entries; i++) {
+                       u8 gate_state = rule->vl.entries[i].gate_state;
+                       s64 entry_time = time;
+
+                       while (entry_time < max_cycle_time) {
+                               rc = sja1105_insert_gate_entry(gating_cfg, rule,
+                                                              gate_state,
+                                                              entry_time,
+                                                              extack);
+                               if (rc)
+                                       goto err;
+
+                               entry_time += rule->vl.cycle_time;
+                       }
+                       time += rule->vl.entries[i].interval;
+               }
+       }
+
+       sja1105_gating_cfg_time_to_interval(gating_cfg, max_cycle_time);
+
+       return 0;
+err:
+       sja1105_free_gating_config(gating_cfg);
+       return rc;
+}
+
 /* The switch flow classification core implements TTEthernet, which 'thinks' in
  * terms of Virtual Links (VL), a concept borrowed from ARINC 664 part 7.
  * However it also has one other operating mode (VLLUPFORMAT=0) where it acts
@@ -342,7 +501,9 @@ int sja1105_vl_redirect(struct sja1105_private *priv, int port,
                NL_SET_ERR_MSG_MOD(extack,
                                   "Can only redirect based on DMAC");
                return -EOPNOTSUPP;
-       } else if (key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+       } else if ((priv->vlan_state == SJA1105_VLAN_BEST_EFFORT ||
+                   priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) &&
+                  key->type != SJA1105_KEY_VLAN_AWARE_VL) {
                NL_SET_ERR_MSG_MOD(extack,
                                   "Can only redirect based on {DMAC, VID, PCP}");
                return -EOPNOTSUPP;
@@ -388,171 +549,19 @@ int sja1105_vl_delete(struct sja1105_private *priv, int port,
                kfree(rule);
        }
 
-       rc = sja1105_init_virtual_links(priv, extack);
+       rc = sja1105_compose_gating_subschedule(priv, extack);
        if (rc)
                return rc;
 
-       return sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
-}
-
-/* Insert into the global gate list, sorted by gate action time. */
-static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg,
-                                    struct sja1105_rule *rule,
-                                    u8 gate_state, s64 entry_time,
-                                    struct netlink_ext_ack *extack)
-{
-       struct sja1105_gate_entry *e;
-       int rc;
-
-       e = kzalloc(sizeof(*e), GFP_KERNEL);
-       if (!e)
-               return -ENOMEM;
-
-       e->rule = rule;
-       e->gate_state = gate_state;
-       e->interval = entry_time;
-
-       if (list_empty(&gating_cfg->entries)) {
-               list_add(&e->list, &gating_cfg->entries);
-       } else {
-               struct sja1105_gate_entry *p;
-
-               list_for_each_entry(p, &gating_cfg->entries, list) {
-                       if (p->interval == e->interval) {
-                               NL_SET_ERR_MSG_MOD(extack,
-                                                  "Gate conflict");
-                               rc = -EBUSY;
-                               goto err;
-                       }
-
-                       if (e->interval < p->interval)
-                               break;
-               }
-               list_add(&e->list, p->list.prev);
-       }
-
-       gating_cfg->num_entries++;
-
-       return 0;
-err:
-       kfree(e);
-       return rc;
-}
-
-/* The gate entries contain absolute times in their e->interval field. Convert
- * that to proper intervals (i.e. "0, 5, 10, 15" to "5, 5, 5, 5").
- */
-static void
-sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg,
-                                   u64 cycle_time)
-{
-       struct sja1105_gate_entry *last_e;
-       struct sja1105_gate_entry *e;
-       struct list_head *prev;
-
-       list_for_each_entry(e, &gating_cfg->entries, list) {
-               struct sja1105_gate_entry *p;
-
-               prev = e->list.prev;
-
-               if (prev == &gating_cfg->entries)
-                       continue;
-
-               p = list_entry(prev, struct sja1105_gate_entry, list);
-               p->interval = e->interval - p->interval;
-       }
-       last_e = list_last_entry(&gating_cfg->entries,
-                                struct sja1105_gate_entry, list);
-       if (last_e->list.prev != &gating_cfg->entries)
-               last_e->interval = cycle_time - last_e->interval;
-}
-
-static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg)
-{
-       struct sja1105_gate_entry *e, *n;
-
-       list_for_each_entry_safe(e, n, &gating_cfg->entries, list) {
-               list_del(&e->list);
-               kfree(e);
-       }
-}
-
-static int sja1105_compose_gating_subschedule(struct sja1105_private *priv,
-                                             struct netlink_ext_ack *extack)
-{
-       struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
-       struct sja1105_rule *rule;
-       s64 max_cycle_time = 0;
-       s64 its_base_time = 0;
-       int i, rc = 0;
-
-       list_for_each_entry(rule, &priv->flow_block.rules, list) {
-               if (rule->type != SJA1105_RULE_VL)
-                       continue;
-               if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
-                       continue;
-
-               if (max_cycle_time < rule->vl.cycle_time) {
-                       max_cycle_time = rule->vl.cycle_time;
-                       its_base_time = rule->vl.base_time;
-               }
-       }
-
-       if (!max_cycle_time)
-               return 0;
-
-       dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n",
-               max_cycle_time, its_base_time);
-
-       sja1105_free_gating_config(gating_cfg);
-
-       gating_cfg->base_time = its_base_time;
-       gating_cfg->cycle_time = max_cycle_time;
-       gating_cfg->num_entries = 0;
-
-       list_for_each_entry(rule, &priv->flow_block.rules, list) {
-               s64 time;
-               s64 rbt;
-
-               if (rule->type != SJA1105_RULE_VL)
-                       continue;
-               if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
-                       continue;
-
-               /* Calculate the difference between this gating schedule's
-                * base time, and the base time of the gating schedule with the
-                * longest cycle time. We call it the relative base time (rbt).
-                */
-               rbt = future_base_time(rule->vl.base_time, rule->vl.cycle_time,
-                                      its_base_time);
-               rbt -= its_base_time;
-
-               time = rbt;
-
-               for (i = 0; i < rule->vl.num_entries; i++) {
-                       u8 gate_state = rule->vl.entries[i].gate_state;
-                       s64 entry_time = time;
-
-                       while (entry_time < max_cycle_time) {
-                               rc = sja1105_insert_gate_entry(gating_cfg, rule,
-                                                              gate_state,
-                                                              entry_time,
-                                                              extack);
-                               if (rc)
-                                       goto err;
-
-                               entry_time += rule->vl.cycle_time;
-                       }
-                       time += rule->vl.entries[i].interval;
-               }
-       }
+       rc = sja1105_init_virtual_links(priv, extack);
+       if (rc)
+               return rc;
 
-       sja1105_gating_cfg_time_to_interval(gating_cfg, max_cycle_time);
+       rc = sja1105_init_scheduling(priv);
+       if (rc < 0)
+               return rc;
 
-       return 0;
-err:
-       sja1105_free_gating_config(gating_cfg);
-       return rc;
+       return sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
 }
 
 int sja1105_vl_gate(struct sja1105_private *priv, int port,
@@ -588,14 +597,12 @@ int sja1105_vl_gate(struct sja1105_private *priv, int port,
 
        if (priv->vlan_state == SJA1105_VLAN_UNAWARE &&
            key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
-               dev_err(priv->ds->dev, "1: vlan state %d key type %d\n",
-                       priv->vlan_state, key->type);
                NL_SET_ERR_MSG_MOD(extack,
                                   "Can only gate based on DMAC");
                return -EOPNOTSUPP;
-       } else if (key->type != SJA1105_KEY_VLAN_AWARE_VL) {
-               dev_err(priv->ds->dev, "2: vlan state %d key type %d\n",
-                       priv->vlan_state, key->type);
+       } else if ((priv->vlan_state == SJA1105_VLAN_BEST_EFFORT ||
+                   priv->vlan_state == SJA1105_VLAN_FILTERING_FULL) &&
+                  key->type != SJA1105_KEY_VLAN_AWARE_VL) {
                NL_SET_ERR_MSG_MOD(extack,
                                   "Can only gate based on {DMAC, VID, PCP}");
                return -EOPNOTSUPP;
@@ -771,7 +778,7 @@ int sja1105_vl_stats(struct sja1105_private *priv, int port,
 
        pkts = timingerr + unreleased + lengtherr;
 
-       flow_stats_update(stats, 0, pkts - rule->vl.stats.pkts,
+       flow_stats_update(stats, 0, pkts - rule->vl.stats.pkts, 0,
                          jiffies - rule->vl.stats.lastused,
                          FLOW_ACTION_HW_STATS_IMMEDIATE);
 
index 5e54a57..2a57f33 100644 (file)
@@ -28,7 +28,7 @@
 #define VSC73XX_CMD_PLATFORM_SUBBLOCK_MASK             0xf
 #define VSC73XX_CMD_PLATFORM_REGISTER_SHIFT            2
 
-/**
+/*
  * struct vsc73xx_platform - VSC73xx Platform state container
  */
 struct vsc73xx_platform {
index e73c8fc..81eca4a 100644 (file)
@@ -26,7 +26,7 @@
 #define VSC73XX_CMD_SPI_BLOCK_MASK             0x7
 #define VSC73XX_CMD_SPI_SUBBLOCK_MASK          0xf
 
-/**
+/*
  * struct vsc73xx_spi - VSC73xx SPI state container
  */
 struct vsc73xx_spi {
index 5984b70..741c67e 100644 (file)
@@ -1149,7 +1149,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
 
        print_info = (vortex_debug > 1);
        if (print_info)
-               pr_info("See Documentation/networking/device_drivers/3com/vortex.rst\n");
+               pr_info("See Documentation/networking/device_drivers/ethernet/3com/vortex.rst\n");
 
        pr_info("%s: 3Com %s %s at %p.\n",
               print_name,
@@ -1954,7 +1954,7 @@ vortex_error(struct net_device *dev, int status)
                                   dev->name, tx_status);
                        if (tx_status == 0x82) {
                                pr_err("Probably a duplex mismatch.  See "
-                                               "Documentation/networking/device_drivers/3com/vortex.rst\n");
+                                               "Documentation/networking/device_drivers/ethernet/3com/vortex.rst\n");
                        }
                        dump_tx_ring(dev);
                }
index 074b097..a52a374 100644 (file)
@@ -76,8 +76,8 @@ config VORTEX
          "Hurricane" (3c555/3cSOHO)                           PCI
 
          If you have such a card, say Y here.  More specific information is in
-         <file:Documentation/networking/device_drivers/3com/vortex.rst> and
-         in the comments at the beginning of
+         <file:Documentation/networking/device_drivers/ethernet/3com/vortex.rst>
+         and in the comments at the beginning of
          <file:drivers/net/ethernet/3com/3c59x.c>.
 
          To compile this support as a module, choose M here.
index 5ed33c2..d3b30ba 100644 (file)
@@ -1801,9 +1801,8 @@ typhoon_free_rx_rings(struct typhoon *tp)
 }
 
 static int
-typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
+typhoon_sleep_early(struct typhoon *tp, __le16 events)
 {
-       struct pci_dev *pdev = tp->pdev;
        void __iomem *ioaddr = tp->ioaddr;
        struct cmd_desc xp_cmd;
        int err;
@@ -1832,20 +1831,29 @@ typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
         */
        netif_carrier_off(tp->dev);
 
+       return 0;
+}
+
+static int
+typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
+{
+       int err;
+
+       err = typhoon_sleep_early(tp, events);
+
+       if (err)
+               return err;
+
        pci_enable_wake(tp->pdev, state, 1);
-       pci_disable_device(pdev);
-       return pci_set_power_state(pdev, state);
+       pci_disable_device(tp->pdev);
+       return pci_set_power_state(tp->pdev, state);
 }
 
 static int
 typhoon_wakeup(struct typhoon *tp, int wait_type)
 {
-       struct pci_dev *pdev = tp->pdev;
        void __iomem *ioaddr = tp->ioaddr;
 
-       pci_set_power_state(pdev, PCI_D0);
-       pci_restore_state(pdev);
-
        /* Post 2.x.x versions of the Sleep Image require a reset before
         * we can download the Runtime Image. But let's not make users of
         * the old firmware pay for the reset.
@@ -2049,6 +2057,9 @@ typhoon_open(struct net_device *dev)
        if (err)
                goto out;
 
+       pci_set_power_state(tp->pdev, PCI_D0);
+       pci_restore_state(tp->pdev);
+
        err = typhoon_wakeup(tp, WaitSleep);
        if (err < 0) {
                netdev_err(dev, "unable to wakeup device\n");
@@ -2114,11 +2125,10 @@ typhoon_close(struct net_device *dev)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int
-typhoon_resume(struct pci_dev *pdev)
+static int __maybe_unused
+typhoon_resume(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
        struct typhoon *tp = netdev_priv(dev);
 
        /* If we're down, resume when we are upped.
@@ -2144,9 +2154,10 @@ reset:
        return -EBUSY;
 }
 
-static int
-typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused
+typhoon_suspend(struct device *dev_d)
 {
+       struct pci_dev *pdev = to_pci_dev(dev_d);
        struct net_device *dev = pci_get_drvdata(pdev);
        struct typhoon *tp = netdev_priv(dev);
        struct cmd_desc xp_cmd;
@@ -2190,18 +2201,19 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
                goto need_resume;
        }
 
-       if (typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
+       if (typhoon_sleep_early(tp, tp->wol_events) < 0) {
                netdev_err(dev, "unable to put card to sleep\n");
                goto need_resume;
        }
 
+       device_wakeup_enable(dev_d);
+
        return 0;
 
 need_resume:
-       typhoon_resume(pdev);
+       typhoon_resume(dev_d);
        return -EBUSY;
 }
-#endif
 
 static int
 typhoon_test_mmio(struct pci_dev *pdev)
@@ -2533,15 +2545,14 @@ typhoon_remove_one(struct pci_dev *pdev)
        free_netdev(dev);
 }
 
+static SIMPLE_DEV_PM_OPS(typhoon_pm_ops, typhoon_suspend, typhoon_resume);
+
 static struct pci_driver typhoon_driver = {
        .name           = KBUILD_MODNAME,
        .id_table       = typhoon_pci_tbl,
        .probe          = typhoon_init_one,
        .remove         = typhoon_remove_one,
-#ifdef CONFIG_PM
-       .suspend        = typhoon_suspend,
-       .resume         = typhoon_resume,
-#endif
+       .driver.pm      = &typhoon_pm_ops,
 };
 
 static int __init
index 529c728..e522644 100644 (file)
@@ -1,8 +1,10 @@
 /* Generic NS8390 register definitions. */
+
 /* This file is part of Donald Becker's 8390 drivers, and is distributed
-   under the same license. Auto-loading of 8390.o only in v2.2 - Paul G.
-   Some of these names and comments originated from the Crynwr
-   packet drivers, which are distributed under the GPL. */
+ * under the same license. Auto-loading of 8390.o only in v2.2 - Paul G.
+ * Some of these names and comments originated from the Crynwr
+ * packet drivers, which are distributed under the GPL.
+ */
 
 #ifndef _8390_h
 #define _8390_h
@@ -16,9 +18,9 @@
 
 /* The 8390 specific per-packet-header format. */
 struct e8390_pkt_hdr {
-  unsigned char status; /* status */
-  unsigned char next;   /* pointer to next packet. */
-  unsigned short count; /* header + packet length in bytes */
+       unsigned char status; /* status */
+       unsigned char next;   /* pointer to next packet. */
+       unsigned short count; /* header + packet length in bytes */
 };
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -66,18 +68,24 @@ static inline struct net_device *alloc_eip_netdev(void)
 /* You have one of these per-board */
 struct ei_device {
        const char *name;
-       void (*reset_8390)(struct net_device *);
-       void (*get_8390_hdr)(struct net_device *, struct e8390_pkt_hdr *, int);
-       void (*block_output)(struct net_device *, int, const unsigned char *, int);
-       void (*block_input)(struct net_device *, int, struct sk_buff *, int);
+       void (*reset_8390)(struct net_device *dev);
+       void (*get_8390_hdr)(struct net_device *dev,
+                            struct e8390_pkt_hdr *hdr, int ring_page);
+       void (*block_output)(struct net_device *dev, int count,
+                            const unsigned char *buf, int start_page);
+       void (*block_input)(struct net_device *dev, int count,
+                           struct sk_buff *skb, int ring_offset);
        unsigned long rmem_start;
        unsigned long rmem_end;
        void __iomem *mem;
        unsigned char mcfilter[8];
        unsigned open:1;
-       unsigned word16:1;              /* We have the 16-bit (vs 8-bit) version of the card. */
-       unsigned bigendian:1;           /* 16-bit big endian mode. Do NOT */
-                                       /* set this on random 8390 clones! */
+       unsigned word16:1;              /* We have the 16-bit (vs 8-bit)
+                                        * version of the card.
+                                        */
+       unsigned bigendian:1;           /* 16-bit big endian mode. Do NOT
+                                        * set this on random 8390 clones!
+                                        */
        unsigned txing:1;               /* Transmit Active */
        unsigned irqlock:1;             /* 8390's intrs disabled when '1'. */
        unsigned dmaing:1;              /* Remote DMA Active */
@@ -115,12 +123,16 @@ struct ei_device {
 #define E8390_RXCONFIG         (ei_status.rxcr_base | 0x04)
 #define E8390_RXOFF            (ei_status.rxcr_base | 0x20)
 #else
-#define E8390_RXCONFIG         0x4     /* EN0_RXCR: broadcasts, no multicast,errors */
-#define E8390_RXOFF            0x20    /* EN0_RXCR: Accept no packets */
+/* EN0_RXCR: broadcasts, no multicast,errors */
+#define E8390_RXCONFIG         0x4
+/* EN0_RXCR: Accept no packets */
+#define E8390_RXOFF            0x20
 #endif
 
-#define E8390_TXCONFIG         0x00    /* EN0_TXCR: Normal transmit mode */
-#define E8390_TXOFF            0x02    /* EN0_TXCR: Transmitter off */
+/* EN0_TXCR: Normal transmit mode */
+#define E8390_TXCONFIG         0x00
+/* EN0_TXCR: Transmitter off */
+#define E8390_TXOFF            0x02
 
 
 /*  Register accessed at EN_CMD, the 8390 base addr.  */
@@ -134,17 +146,16 @@ struct ei_device {
 #define E8390_PAGE1    0x40    /* using the two high-order bits */
 #define E8390_PAGE2    0x80    /* Page 3 is invalid. */
 
-/*
- *     Only generate indirect loads given a machine that needs them.
- *      - removed AMIGA_PCMCIA from this list, handled as ISA io now
- *     - the _p for generates no delay by default 8390p.c overrides this.
+/* Only generate indirect loads given a machine that needs them.
+ * - removed AMIGA_PCMCIA from this list, handled as ISA io now
+ * - the _p for generates no delay by default 8390p.c overrides this.
  */
 
 #ifndef ei_inb
 #define ei_inb(_p)     inb(_p)
-#define ei_outb(_v,_p) outb(_v,_p)
+#define ei_outb(_v, _p)        outb(_v, _p)
 #define ei_inb_p(_p)   inb(_p)
-#define ei_outb_p(_v,_p) outb(_v,_p)
+#define ei_outb_p(_v, _p) outb(_v, _p)
 #endif
 
 #ifndef EI_SHIFT
@@ -153,9 +164,9 @@ struct ei_device {
 
 #define E8390_CMD      EI_SHIFT(0x00)  /* The command register (for all pages) */
 /* Page 0 register offsets. */
-#define EN0_CLDALO     EI_SHIFT(0x01)  /* Low byte of current local dma addr  RD */
+#define EN0_CLDALO     EI_SHIFT(0x01)  /* Low byte of current local dma addr RD */
 #define EN0_STARTPG    EI_SHIFT(0x01)  /* Starting page of ring bfr WR */
-#define EN0_CLDAHI     EI_SHIFT(0x02)  /* High byte of current local dma addr  RD */
+#define EN0_CLDAHI     EI_SHIFT(0x02)  /* High byte of current local dma addr RD */
 #define EN0_STOPPG     EI_SHIFT(0x02)  /* Ending page +1 of ring bfr WR */
 #define EN0_BOUNDARY   EI_SHIFT(0x03)  /* Boundary page of ring bfr RD WR */
 #define EN0_TSR                EI_SHIFT(0x04)  /* Transmit status reg RD */
index 77d78b4..bc6edb3 100644 (file)
@@ -62,7 +62,10 @@ static int options[MAX_UNITS];
 
 #include "8390.h"
 
-static u32 ne2k_msg_enable;
+static int ne2k_msg_enable;
+
+static const int default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+                                     NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR);
 
 #if defined(__powerpc__)
 #define inl_le(addr)  le32_to_cpu(inl(addr))
@@ -74,7 +77,7 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
 MODULE_VERSION(DRV_VERSION);
 MODULE_LICENSE("GPL");
 
-module_param_named(msg_enable, ne2k_msg_enable, uint, 0444);
+module_param_named(msg_enable, ne2k_msg_enable, int, 0444);
 module_param_array(options, int, NULL, 0);
 module_param_array(full_duplex, int, NULL, 0);
 MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
@@ -282,7 +285,7 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
        }
        dev->netdev_ops = &ne2k_netdev_ops;
        ei_local = netdev_priv(dev);
-       ei_local->msg_enable = ne2k_msg_enable;
+       ei_local->msg_enable = netif_msg_init(ne2k_msg_enable, default_msg_level);
 
        SET_NETDEV_DEV(dev, &pdev->dev);
 
@@ -699,30 +702,18 @@ static void ne2k_pci_remove_one(struct pci_dev *pdev)
        pci_disable_device(pdev);
 }
 
-#ifdef CONFIG_PM
-static int ne2k_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused ne2k_pci_suspend(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
 
        netif_device_detach(dev);
-       pci_save_state(pdev);
-       pci_disable_device(pdev);
-       pci_set_power_state(pdev, pci_choose_state(pdev, state));
 
        return 0;
 }
 
-static int ne2k_pci_resume(struct pci_dev *pdev)
+static int __maybe_unused ne2k_pci_resume(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
-       int rc;
-
-       pci_set_power_state(pdev, PCI_D0);
-       pci_restore_state(pdev);
-
-       rc = pci_enable_device(pdev);
-       if (rc)
-               return rc;
+       struct net_device *dev = dev_get_drvdata(dev_d);
 
        NS8390_init(dev, 1);
        netif_device_attach(dev);
@@ -730,19 +721,14 @@ static int ne2k_pci_resume(struct pci_dev *pdev)
        return 0;
 }
 
-#endif /* CONFIG_PM */
-
+static SIMPLE_DEV_PM_OPS(ne2k_pci_pm_ops, ne2k_pci_suspend, ne2k_pci_resume);
 
 static struct pci_driver ne2k_driver = {
        .name           = DRV_NAME,
        .probe          = ne2k_pci_init_one,
        .remove         = ne2k_pci_remove_one,
        .id_table       = ne2k_pci_tbl,
-#ifdef CONFIG_PM
-       .suspend        = ne2k_pci_suspend,
-       .resume         = ne2k_pci_resume,
-#endif
-
+       .driver.pm      = &ne2k_pci_pm_ops,
 };
 
 
index a64191f..ba0055b 100644 (file)
@@ -1984,28 +1984,21 @@ static int netdev_close(struct net_device *dev)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int starfire_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused starfire_suspend(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
 
        if (netif_running(dev)) {
                netif_device_detach(dev);
                netdev_close(dev);
        }
 
-       pci_save_state(pdev);
-       pci_set_power_state(pdev, pci_choose_state(pdev,state));
-
        return 0;
 }
 
-static int starfire_resume(struct pci_dev *pdev)
+static int __maybe_unused starfire_resume(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
-
-       pci_set_power_state(pdev, PCI_D0);
-       pci_restore_state(pdev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
 
        if (netif_running(dev)) {
                netdev_open(dev);
@@ -2014,8 +2007,6 @@ static int starfire_resume(struct pci_dev *pdev)
 
        return 0;
 }
-#endif /* CONFIG_PM */
-
 
 static void starfire_remove_one(struct pci_dev *pdev)
 {
@@ -2040,15 +2031,13 @@ static void starfire_remove_one(struct pci_dev *pdev)
        free_netdev(dev);                       /* Will also free np!! */
 }
 
+static SIMPLE_DEV_PM_OPS(starfire_pm_ops, starfire_suspend, starfire_resume);
 
 static struct pci_driver starfire_driver = {
        .name           = DRV_NAME,
        .probe          = starfire_init_one,
        .remove         = starfire_remove_one,
-#ifdef CONFIG_PM
-       .suspend        = starfire_suspend,
-       .resume         = starfire_resume,
-#endif /* CONFIG_PM */
+       .driver.pm      = &starfire_pm_ops,
        .id_table       = starfire_pci_tbl,
 };
 
index bf54611..9c5891b 100644 (file)
@@ -8,7 +8,7 @@
  * available in the GRLIB VHDL IP core library.
  *
  * Full documentation of both cores can be found here:
- * http://www.gaisler.com/products/grlib/grip.pdf
+ * https://www.gaisler.com/products/grlib/grip.pdf
  *
  * The Gigabit version supports scatter/gather DMA, any alignment of
  * buffers and checksum offloading.
index 865892c..41f8821 100644 (file)
@@ -950,7 +950,6 @@ static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
        u32 hash2 = 0;
        u32 hash3 = 0;
        u32 hash4 = 0;
-       u32 pm_csr;
 
        /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision
         * the multi-cast LIST.  If it is NOT specified, (and "ALL" is not
@@ -984,7 +983,6 @@ static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
        }
 
        /* Write out the new hash to the device */
-       pm_csr = readl(&adapter->regs->global.pm_csr);
        if (!et1310_in_phy_coma(adapter)) {
                writel(hash1, &rxmac->multi_hash1);
                writel(hash2, &rxmac->multi_hash2);
@@ -999,7 +997,6 @@ static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
        u32 uni_pf1;
        u32 uni_pf2;
        u32 uni_pf3;
-       u32 pm_csr;
 
        /* Set up unicast packet filter reg 3 to be the first two octets of
         * the MAC address for both address
@@ -1025,7 +1022,6 @@ static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
                  (adapter->addr[4] << ET_RX_UNI_PF_ADDR1_5_SHIFT) |
                   adapter->addr[5];
 
-       pm_csr = readl(&adapter->regs->global.pm_csr);
        if (!et1310_in_phy_coma(adapter)) {
                writel(uni_pf1, &rxmac->uni_pf_addr1);
                writel(uni_pf2, &rxmac->uni_pf_addr2);
@@ -3443,12 +3439,9 @@ static irqreturn_t et131x_isr(int irq, void *dev_id)
                 * send a pause packet, otherwise just exit
                 */
                if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH) {
-                       u32 pm_csr;
-
                        /* Tell the device to send a pause packet via the back
                         * pressure register (bp req and bp xon/xoff)
                         */
-                       pm_csr = readl(&iomem->global.pm_csr);
                        if (!et1310_in_phy_coma(adapter))
                                writel(3, &iomem->txmac.bp_ctrl);
                }
index 5d192d5..ac86fca 100644 (file)
@@ -642,9 +642,8 @@ static void acenic_remove_one(struct pci_dev *pdev)
 
                        ringp = &ap->skb->rx_std_skbuff[i];
                        mapping = dma_unmap_addr(ringp, mapping);
-                       pci_unmap_page(ap->pdev, mapping,
-                                      ACE_STD_BUFSIZE,
-                                      PCI_DMA_FROMDEVICE);
+                       dma_unmap_page(&ap->pdev->dev, mapping,
+                                      ACE_STD_BUFSIZE, DMA_FROM_DEVICE);
 
                        ap->rx_std_ring[i].size = 0;
                        ap->skb->rx_std_skbuff[i].skb = NULL;
@@ -662,9 +661,9 @@ static void acenic_remove_one(struct pci_dev *pdev)
 
                                ringp = &ap->skb->rx_mini_skbuff[i];
                                mapping = dma_unmap_addr(ringp,mapping);
-                               pci_unmap_page(ap->pdev, mapping,
+                               dma_unmap_page(&ap->pdev->dev, mapping,
                                               ACE_MINI_BUFSIZE,
-                                              PCI_DMA_FROMDEVICE);
+                                              DMA_FROM_DEVICE);
 
                                ap->rx_mini_ring[i].size = 0;
                                ap->skb->rx_mini_skbuff[i].skb = NULL;
@@ -681,9 +680,8 @@ static void acenic_remove_one(struct pci_dev *pdev)
 
                        ringp = &ap->skb->rx_jumbo_skbuff[i];
                        mapping = dma_unmap_addr(ringp, mapping);
-                       pci_unmap_page(ap->pdev, mapping,
-                                      ACE_JUMBO_BUFSIZE,
-                                      PCI_DMA_FROMDEVICE);
+                       dma_unmap_page(&ap->pdev->dev, mapping,
+                                      ACE_JUMBO_BUFSIZE, DMA_FROM_DEVICE);
 
                        ap->rx_jumbo_ring[i].size = 0;
                        ap->skb->rx_jumbo_skbuff[i].skb = NULL;
@@ -713,8 +711,8 @@ static void ace_free_descriptors(struct net_device *dev)
                         RX_JUMBO_RING_ENTRIES +
                         RX_MINI_RING_ENTRIES +
                         RX_RETURN_RING_ENTRIES));
-               pci_free_consistent(ap->pdev, size, ap->rx_std_ring,
-                                   ap->rx_ring_base_dma);
+               dma_free_coherent(&ap->pdev->dev, size, ap->rx_std_ring,
+                                 ap->rx_ring_base_dma);
                ap->rx_std_ring = NULL;
                ap->rx_jumbo_ring = NULL;
                ap->rx_mini_ring = NULL;
@@ -722,31 +720,30 @@ static void ace_free_descriptors(struct net_device *dev)
        }
        if (ap->evt_ring != NULL) {
                size = (sizeof(struct event) * EVT_RING_ENTRIES);
-               pci_free_consistent(ap->pdev, size, ap->evt_ring,
-                                   ap->evt_ring_dma);
+               dma_free_coherent(&ap->pdev->dev, size, ap->evt_ring,
+                                 ap->evt_ring_dma);
                ap->evt_ring = NULL;
        }
        if (ap->tx_ring != NULL && !ACE_IS_TIGON_I(ap)) {
                size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
-               pci_free_consistent(ap->pdev, size, ap->tx_ring,
-                                   ap->tx_ring_dma);
+               dma_free_coherent(&ap->pdev->dev, size, ap->tx_ring,
+                                 ap->tx_ring_dma);
        }
        ap->tx_ring = NULL;
 
        if (ap->evt_prd != NULL) {
-               pci_free_consistent(ap->pdev, sizeof(u32),
-                                   (void *)ap->evt_prd, ap->evt_prd_dma);
+               dma_free_coherent(&ap->pdev->dev, sizeof(u32),
+                                 (void *)ap->evt_prd, ap->evt_prd_dma);
                ap->evt_prd = NULL;
        }
        if (ap->rx_ret_prd != NULL) {
-               pci_free_consistent(ap->pdev, sizeof(u32),
-                                   (void *)ap->rx_ret_prd,
-                                   ap->rx_ret_prd_dma);
+               dma_free_coherent(&ap->pdev->dev, sizeof(u32),
+                                 (void *)ap->rx_ret_prd, ap->rx_ret_prd_dma);
                ap->rx_ret_prd = NULL;
        }
        if (ap->tx_csm != NULL) {
-               pci_free_consistent(ap->pdev, sizeof(u32),
-                                   (void *)ap->tx_csm, ap->tx_csm_dma);
+               dma_free_coherent(&ap->pdev->dev, sizeof(u32),
+                                 (void *)ap->tx_csm, ap->tx_csm_dma);
                ap->tx_csm = NULL;
        }
 }
@@ -763,8 +760,8 @@ static int ace_allocate_descriptors(struct net_device *dev)
                 RX_MINI_RING_ENTRIES +
                 RX_RETURN_RING_ENTRIES));
 
-       ap->rx_std_ring = pci_alloc_consistent(ap->pdev, size,
-                                              &ap->rx_ring_base_dma);
+       ap->rx_std_ring = dma_alloc_coherent(&ap->pdev->dev, size,
+                                            &ap->rx_ring_base_dma, GFP_KERNEL);
        if (ap->rx_std_ring == NULL)
                goto fail;
 
@@ -774,7 +771,8 @@ static int ace_allocate_descriptors(struct net_device *dev)
 
        size = (sizeof(struct event) * EVT_RING_ENTRIES);
 
-       ap->evt_ring = pci_alloc_consistent(ap->pdev, size, &ap->evt_ring_dma);
+       ap->evt_ring = dma_alloc_coherent(&ap->pdev->dev, size,
+                                         &ap->evt_ring_dma, GFP_KERNEL);
 
        if (ap->evt_ring == NULL)
                goto fail;
@@ -786,25 +784,25 @@ static int ace_allocate_descriptors(struct net_device *dev)
        if (!ACE_IS_TIGON_I(ap)) {
                size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES);
 
-               ap->tx_ring = pci_alloc_consistent(ap->pdev, size,
-                                                  &ap->tx_ring_dma);
+               ap->tx_ring = dma_alloc_coherent(&ap->pdev->dev, size,
+                                                &ap->tx_ring_dma, GFP_KERNEL);
 
                if (ap->tx_ring == NULL)
                        goto fail;
        }
 
-       ap->evt_prd = pci_alloc_consistent(ap->pdev, sizeof(u32),
-                                          &ap->evt_prd_dma);
+       ap->evt_prd = dma_alloc_coherent(&ap->pdev->dev, sizeof(u32),
+                                        &ap->evt_prd_dma, GFP_KERNEL);
        if (ap->evt_prd == NULL)
                goto fail;
 
-       ap->rx_ret_prd = pci_alloc_consistent(ap->pdev, sizeof(u32),
-                                             &ap->rx_ret_prd_dma);
+       ap->rx_ret_prd = dma_alloc_coherent(&ap->pdev->dev, sizeof(u32),
+                                           &ap->rx_ret_prd_dma, GFP_KERNEL);
        if (ap->rx_ret_prd == NULL)
                goto fail;
 
-       ap->tx_csm = pci_alloc_consistent(ap->pdev, sizeof(u32),
-                                         &ap->tx_csm_dma);
+       ap->tx_csm = dma_alloc_coherent(&ap->pdev->dev, sizeof(u32),
+                                       &ap->tx_csm_dma, GFP_KERNEL);
        if (ap->tx_csm == NULL)
                goto fail;
 
@@ -830,8 +828,8 @@ static void ace_init_cleanup(struct net_device *dev)
        ace_free_descriptors(dev);
 
        if (ap->info)
-               pci_free_consistent(ap->pdev, sizeof(struct ace_info),
-                                   ap->info, ap->info_dma);
+               dma_free_coherent(&ap->pdev->dev, sizeof(struct ace_info),
+                                 ap->info, ap->info_dma);
        kfree(ap->skb);
        kfree(ap->trace_buf);
 
@@ -1129,9 +1127,9 @@ static int ace_init(struct net_device *dev)
        /*
         * Configure DMA attributes.
         */
-       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
                ap->pci_using_dac = 1;
-       } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+       } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
                ap->pci_using_dac = 0;
        } else {
                ecode = -ENODEV;
@@ -1143,8 +1141,8 @@ static int ace_init(struct net_device *dev)
         * and the control blocks for the transmit and receive rings
         * as they need to be setup once and for all.
         */
-       if (!(info = pci_alloc_consistent(ap->pdev, sizeof(struct ace_info),
-                                         &ap->info_dma))) {
+       if (!(info = dma_alloc_coherent(&ap->pdev->dev, sizeof(struct ace_info),
+                                       &ap->info_dma, GFP_KERNEL))) {
                ecode = -EAGAIN;
                goto init_error;
        }
@@ -1153,7 +1151,7 @@ static int ace_init(struct net_device *dev)
        /*
         * Get the memory for the skb rings.
         */
-       if (!(ap->skb = kmalloc(sizeof(struct ace_skb), GFP_KERNEL))) {
+       if (!(ap->skb = kzalloc(sizeof(struct ace_skb), GFP_KERNEL))) {
                ecode = -EAGAIN;
                goto init_error;
        }
@@ -1174,9 +1172,6 @@ static int ace_init(struct net_device *dev)
        ap->last_mini_rx = 0;
 #endif
 
-       memset(ap->info, 0, sizeof(struct ace_info));
-       memset(ap->skb, 0, sizeof(struct ace_skb));
-
        ecode = ace_load_firmware(dev);
        if (ecode)
                goto init_error;
@@ -1646,10 +1641,10 @@ static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs)
                if (!skb)
                        break;
 
-               mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
+               mapping = dma_map_page(&ap->pdev->dev,
+                                      virt_to_page(skb->data),
                                       offset_in_page(skb->data),
-                                      ACE_STD_BUFSIZE,
-                                      PCI_DMA_FROMDEVICE);
+                                      ACE_STD_BUFSIZE, DMA_FROM_DEVICE);
                ap->skb->rx_std_skbuff[idx].skb = skb;
                dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
                                   mapping, mapping);
@@ -1707,10 +1702,10 @@ static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs)
                if (!skb)
                        break;
 
-               mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
+               mapping = dma_map_page(&ap->pdev->dev,
+                                      virt_to_page(skb->data),
                                       offset_in_page(skb->data),
-                                      ACE_MINI_BUFSIZE,
-                                      PCI_DMA_FROMDEVICE);
+                                      ACE_MINI_BUFSIZE, DMA_FROM_DEVICE);
                ap->skb->rx_mini_skbuff[idx].skb = skb;
                dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
                                   mapping, mapping);
@@ -1763,10 +1758,10 @@ static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs)
                if (!skb)
                        break;
 
-               mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
+               mapping = dma_map_page(&ap->pdev->dev,
+                                      virt_to_page(skb->data),
                                       offset_in_page(skb->data),
-                                      ACE_JUMBO_BUFSIZE,
-                                      PCI_DMA_FROMDEVICE);
+                                      ACE_JUMBO_BUFSIZE, DMA_FROM_DEVICE);
                ap->skb->rx_jumbo_skbuff[idx].skb = skb;
                dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
                                   mapping, mapping);
@@ -1977,10 +1972,8 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
 
                skb = rip->skb;
                rip->skb = NULL;
-               pci_unmap_page(ap->pdev,
-                              dma_unmap_addr(rip, mapping),
-                              mapsize,
-                              PCI_DMA_FROMDEVICE);
+               dma_unmap_page(&ap->pdev->dev, dma_unmap_addr(rip, mapping),
+                              mapsize, DMA_FROM_DEVICE);
                skb_put(skb, retdesc->size);
 
                /*
@@ -2046,9 +2039,10 @@ static inline void ace_tx_int(struct net_device *dev,
                skb = info->skb;
 
                if (dma_unmap_len(info, maplen)) {
-                       pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
+                       dma_unmap_page(&ap->pdev->dev,
+                                      dma_unmap_addr(info, mapping),
                                       dma_unmap_len(info, maplen),
-                                      PCI_DMA_TODEVICE);
+                                      DMA_TO_DEVICE);
                        dma_unmap_len_set(info, maplen, 0);
                }
 
@@ -2337,9 +2331,10 @@ static int ace_close(struct net_device *dev)
                        } else
                                memset(ap->tx_ring + i, 0,
                                       sizeof(struct tx_desc));
-                       pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
+                       dma_unmap_page(&ap->pdev->dev,
+                                      dma_unmap_addr(info, mapping),
                                       dma_unmap_len(info, maplen),
-                                      PCI_DMA_TODEVICE);
+                                      DMA_TO_DEVICE);
                        dma_unmap_len_set(info, maplen, 0);
                }
                if (skb) {
@@ -2369,9 +2364,9 @@ ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb,
        dma_addr_t mapping;
        struct tx_ring_info *info;
 
-       mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
-                              offset_in_page(skb->data),
-                              skb->len, PCI_DMA_TODEVICE);
+       mapping = dma_map_page(&ap->pdev->dev, virt_to_page(skb->data),
+                              offset_in_page(skb->data), skb->len,
+                              DMA_TO_DEVICE);
 
        info = ap->skb->tx_skbuff + idx;
        info->skb = tail;
index 336742f..b818a16 100644 (file)
@@ -491,6 +491,36 @@ enum ena_admin_llq_stride_ctrl {
        ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY          = 2,
 };
 
+enum ena_admin_accel_mode_feat {
+       ENA_ADMIN_DISABLE_META_CACHING              = 0,
+       ENA_ADMIN_LIMIT_TX_BURST                    = 1,
+};
+
+struct ena_admin_accel_mode_get {
+       /* bit field of enum ena_admin_accel_mode_feat */
+       u16 supported_flags;
+
+       /* maximum burst size between two doorbells. The size is in bytes */
+       u16 max_tx_burst_size;
+};
+
+struct ena_admin_accel_mode_set {
+       /* bit field of enum ena_admin_accel_mode_feat */
+       u16 enabled_flags;
+
+       u16 reserved;
+};
+
+struct ena_admin_accel_mode_req {
+       union {
+               u32 raw[2];
+
+               struct ena_admin_accel_mode_get get;
+
+               struct ena_admin_accel_mode_set set;
+       } u;
+};
+
 struct ena_admin_feature_llq_desc {
        u32 max_llq_num;
 
@@ -536,10 +566,13 @@ struct ena_admin_feature_llq_desc {
        /* the stride control the driver selected to use */
        u16 descriptors_stride_ctrl_enabled;
 
-       /* Maximum size in bytes taken by llq entries in a single tx burst.
-        * Set to 0 when there is no such limit.
+       /* reserved */
+       u32 reserved1;
+
+       /* accelerated low latency queues requirement. driver needs to
+        * support those requirements in order to use accelerated llq
         */
-       u32 max_tx_burst_size;
+       struct ena_admin_accel_mode_req accel_mode;
 };
 
 struct ena_admin_queue_ext_feature_fields {
@@ -816,7 +849,9 @@ struct ena_admin_host_info {
        /* 0 : reserved
         * 1 : rx_offset
         * 2 : interrupt_moderation
-        * 31:3 : reserved
+        * 3 : rx_buf_mirroring
+        * 4 : rss_configurable_function_key
+        * 31:5 : reserved
         */
        u32 driver_supported_features;
 };
@@ -1129,6 +1164,10 @@ struct ena_admin_ena_mmio_req_read_less_resp {
 #define ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK                  BIT(1)
 #define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT      2
 #define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK       BIT(2)
+#define ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_SHIFT          3
+#define ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK           BIT(3)
+#define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_SHIFT 4
+#define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK BIT(4)
 
 /* aenq_common_desc */
 #define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK               BIT(0)
index 432f143..435bf05 100644 (file)
@@ -403,6 +403,8 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
                       0x0, io_sq->llq_info.desc_list_entry_size);
                io_sq->llq_buf_ctrl.descs_left_in_line =
                        io_sq->llq_info.descs_num_before_header;
+               io_sq->disable_meta_caching =
+                       io_sq->llq_info.disable_meta_caching;
 
                if (io_sq->llq_info.max_entries_in_tx_burst > 0)
                        io_sq->entries_in_tx_burst_left =
@@ -626,6 +628,10 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev)
        cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
        cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
 
+       cmd.u.llq.accel_mode.u.set.enabled_flags =
+               BIT(ENA_ADMIN_DISABLE_META_CACHING) |
+               BIT(ENA_ADMIN_LIMIT_TX_BURST);
+
        ret = ena_com_execute_admin_command(admin_queue,
                                            (struct ena_admin_aq_entry *)&cmd,
                                            sizeof(cmd),
@@ -643,6 +649,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
                                   struct ena_llq_configurations *llq_default_cfg)
 {
        struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
+       struct ena_admin_accel_mode_get llq_accel_mode_get;
        u16 supported_feat;
        int rc;
 
@@ -742,9 +749,17 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
                       llq_default_cfg->llq_num_decs_before_header,
                       supported_feat, llq_info->descs_num_before_header);
        }
+       /* Check for accelerated queue supported */
+       llq_accel_mode_get = llq_features->accel_mode.u.get;
+
+       llq_info->disable_meta_caching =
+               !!(llq_accel_mode_get.supported_flags &
+                  BIT(ENA_ADMIN_DISABLE_META_CACHING));
 
-       llq_info->max_entries_in_tx_burst =
-               (u16)(llq_features->max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value);
+       if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST))
+               llq_info->max_entries_in_tx_burst =
+                       llq_accel_mode_get.max_tx_burst_size /
+                       llq_default_cfg->llq_ring_entry_size_value;
 
        rc = ena_com_set_llq(ena_dev);
        if (rc)
index bc187ad..4287d47 100644 (file)
@@ -127,6 +127,7 @@ struct ena_com_llq_info {
        u16 descs_num_before_header;
        u16 descs_per_entry;
        u16 max_entries_in_tx_burst;
+       bool disable_meta_caching;
 };
 
 struct ena_com_io_cq {
@@ -189,6 +190,8 @@ struct ena_com_io_sq {
        enum queue_direction direction;
        enum ena_admin_placement_policy_type mem_queue_type;
 
+       bool disable_meta_caching;
+
        u32 msix_vector;
        struct ena_com_tx_meta cached_tx_meta;
        struct ena_com_llq_info llq_info;
@@ -230,11 +233,11 @@ struct ena_com_admin_sq {
 };
 
 struct ena_com_stats_admin {
-       u32 aborted_cmd;
-       u32 submitted_cmd;
-       u32 completed_cmd;
-       u32 out_of_space;
-       u32 no_completion;
+       u64 aborted_cmd;
+       u64 submitted_cmd;
+       u64 completed_cmd;
+       u64 out_of_space;
+       u64 no_completion;
 };
 
 struct ena_com_admin_queue {
index ec8ea25..ccd4405 100644 (file)
@@ -285,11 +285,10 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
        return count;
 }
 
-static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
-                                                       struct ena_com_tx_ctx *ena_tx_ctx)
+static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
+                              struct ena_com_tx_meta *ena_meta)
 {
        struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
-       struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
 
        meta_desc = get_sq_desc(io_sq);
        memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
@@ -309,12 +308,13 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
 
        /* Extended meta desc */
        meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
-       meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
        meta_desc->len_ctrl |= (io_sq->phase <<
                ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
                ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
 
        meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
+       meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+
        meta_desc->word2 |= ena_meta->l3_hdr_len &
                ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
        meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
@@ -325,13 +325,36 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
                ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
                ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
 
-       meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+       return ena_com_sq_update_tail(io_sq);
+}
+
+static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
+                                                struct ena_com_tx_ctx *ena_tx_ctx,
+                                                bool *have_meta)
+{
+       struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
 
-       /* Cached the meta desc */
-       memcpy(&io_sq->cached_tx_meta, ena_meta,
-              sizeof(struct ena_com_tx_meta));
+       /* When disable meta caching is set, don't bother to save the meta and
+        * compare it to the stored version, just create the meta
+        */
+       if (io_sq->disable_meta_caching) {
+               if (unlikely(!ena_tx_ctx->meta_valid))
+                       return -EINVAL;
 
-       return ena_com_sq_update_tail(io_sq);
+               *have_meta = true;
+               return ena_com_create_meta(io_sq, ena_meta);
+       }
+
+       if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
+               *have_meta = true;
+               /* Cache the meta desc */
+               memcpy(&io_sq->cached_tx_meta, ena_meta,
+                      sizeof(struct ena_com_tx_meta));
+               return ena_com_create_meta(io_sq, ena_meta);
+       }
+
+       *have_meta = false;
+       return 0;
 }
 
 static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
@@ -402,12 +425,10 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
        if (unlikely(rc))
                return rc;
 
-       have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
-                       ena_tx_ctx);
-       if (have_meta) {
-               rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
-               if (unlikely(rc))
-                       return rc;
+       rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
+       if (unlikely(rc)) {
+               pr_err("failed to create and store tx meta desc\n");
+               return rc;
        }
 
        /* If the caller doesn't want to send packets */
index 8b1afd3..b6592cb 100644 (file)
@@ -157,7 +157,8 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
        llq_info = &io_sq->llq_info;
        num_descs = ena_tx_ctx->num_bufs;
 
-       if (unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx)))
+       if (llq_info->disable_meta_caching ||
+           unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx)))
                ++num_descs;
 
        if (num_descs > llq_info->descs_num_before_header) {
index e340b65..430275b 100644 (file)
@@ -164,13 +164,13 @@ static void ena_queue_stats(struct ena_adapter *adapter, u64 **data)
 static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data)
 {
        const struct ena_stats *ena_stats;
-       u32 *ptr;
+       u64 *ptr;
        int i;
 
        for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) {
                ena_stats = &ena_stats_ena_com_strings[i];
 
-               ptr = (u32 *)((uintptr_t)&adapter->ena_dev->admin_queue.stats +
+               ptr = (u64 *)((uintptr_t)&adapter->ena_dev->admin_queue.stats +
                        (uintptr_t)ena_stats->stat_offset);
 
                *(*data)++ = *ptr;
index dda4b8f..6478c1e 100644 (file)
@@ -307,7 +307,7 @@ static int ena_xdp_xmit_buff(struct net_device *dev,
                             struct ena_rx_buffer *rx_info)
 {
        struct ena_adapter *adapter = netdev_priv(dev);
-       struct ena_com_tx_ctx ena_tx_ctx = {0};
+       struct ena_com_tx_ctx ena_tx_ctx = {};
        struct ena_tx_buffer *tx_info;
        struct ena_ring *xdp_ring;
        u16 next_to_use, req_id;
@@ -655,6 +655,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter,
                txr->sgl_size = adapter->max_tx_sgl_size;
                txr->smoothed_interval =
                        ena_com_get_nonadaptive_moderation_interval_tx(ena_dev);
+               txr->disable_meta_caching = adapter->disable_meta_caching;
 
                /* Don't init RX queues for xdp queues */
                if (!ENA_IS_XDP_INDEX(adapter, i)) {
@@ -959,8 +960,11 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
                return -ENOMEM;
        }
 
+       /* To enable NIC-side port-mirroring, AKA SPAN port,
+        * we make the buffer readable from the nic as well
+        */
        dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
-                          DMA_FROM_DEVICE);
+                          DMA_BIDIRECTIONAL);
        if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
                u64_stats_update_begin(&rx_ring->syncp);
                rx_ring->rx_stats.dma_mapping_err++;
@@ -993,10 +997,9 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
                return;
        }
 
-       dma_unmap_page(rx_ring->dev,
-                      ena_buf->paddr - rx_ring->rx_headroom,
+       dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom,
                       ENA_PAGE_SIZE,
-                      DMA_FROM_DEVICE);
+                      DMA_BIDIRECTIONAL);
 
        __free_page(page);
        rx_info->page = NULL;
@@ -1431,7 +1434,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
        do {
                dma_unmap_page(rx_ring->dev,
                               dma_unmap_addr(&rx_info->ena_buf, paddr),
-                              ENA_PAGE_SIZE, DMA_FROM_DEVICE);
+                              ENA_PAGE_SIZE, DMA_BIDIRECTIONAL);
 
                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
                                rx_info->page_offset, len, ENA_PAGE_SIZE);
@@ -1913,7 +1916,10 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
                /* Update numa and unmask the interrupt only when schedule
                 * from the interrupt context (vs from sk_busy_loop)
                 */
-               if (napi_complete_done(napi, rx_work_done)) {
+               if (napi_complete_done(napi, rx_work_done) &&
+                   READ_ONCE(ena_napi->interrupts_masked)) {
+                       smp_rmb(); /* make sure interrupts_masked is read */
+                       WRITE_ONCE(ena_napi->interrupts_masked, false);
                        /* We apply adaptive moderation on Rx path only.
                         * Tx uses static interrupt moderation.
                         */
@@ -1961,6 +1967,9 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data)
 
        ena_napi->first_interrupt = true;
 
+       WRITE_ONCE(ena_napi->interrupts_masked, true);
+       smp_wmb(); /* write interrupts_masked before calling napi */
+
        napi_schedule_irqoff(&ena_napi->napi);
 
        return IRQ_HANDLED;
@@ -2190,14 +2199,13 @@ static void ena_del_napi_in_range(struct ena_adapter *adapter,
 static void ena_init_napi_in_range(struct ena_adapter *adapter,
                                   int first_index, int count)
 {
-       struct ena_napi *napi = {0};
        int i;
 
        for (i = first_index; i < first_index + count; i++) {
-               napi = &adapter->ena_napi[i];
+               struct ena_napi *napi = &adapter->ena_napi[i];
 
                netif_napi_add(adapter->netdev,
-                              &adapter->ena_napi[i].napi,
+                              &napi->napi,
                               ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll,
                               ENA_NAPI_BUDGET);
 
@@ -2776,7 +2784,9 @@ int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
        return dev_was_up ? ena_open(adapter->netdev) : 0;
 }
 
-static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb)
+static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx,
+                       struct sk_buff *skb,
+                       bool disable_meta_caching)
 {
        u32 mss = skb_shinfo(skb)->gso_size;
        struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
@@ -2820,7 +2830,9 @@ static void ena_tx_csum(struct ena_com_tx_ctx *ena_tx_ctx, struct sk_buff *skb)
                ena_meta->l3_hdr_len = skb_network_header_len(skb);
                ena_meta->l3_hdr_offset = skb_network_offset(skb);
                ena_tx_ctx->meta_valid = 1;
-
+       } else if (disable_meta_caching) {
+               memset(ena_meta, 0, sizeof(*ena_meta));
+               ena_tx_ctx->meta_valid = 1;
        } else {
                ena_tx_ctx->meta_valid = 0;
        }
@@ -3004,7 +3016,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
        ena_tx_ctx.header_len = header_len;
 
        /* set flags and meta data */
-       ena_tx_csum(&ena_tx_ctx, skb);
+       ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching);
 
        rc = ena_xmit_common(dev,
                             tx_ring,
@@ -3118,7 +3130,9 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
 
        host_info->driver_supported_features =
                ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
-               ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK;
+               ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK |
+               ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK |
+               ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK;
 
        rc = ena_com_set_host_attributes(ena_dev);
        if (rc) {
@@ -3271,10 +3285,71 @@ static int ena_device_validate_params(struct ena_adapter *adapter,
        return 0;
 }
 
+static void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
+{
+       llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
+       llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
+       llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
+       llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
+       llq_config->llq_ring_entry_size_value = 128;
+}
+
+static int ena_set_queues_placement_policy(struct pci_dev *pdev,
+                                          struct ena_com_dev *ena_dev,
+                                          struct ena_admin_feature_llq_desc *llq,
+                                          struct ena_llq_configurations *llq_default_configurations)
+{
+       int rc;
+       u32 llq_feature_mask;
+
+       llq_feature_mask = 1 << ENA_ADMIN_LLQ;
+       if (!(ena_dev->supported_features & llq_feature_mask)) {
+               dev_err(&pdev->dev,
+                       "LLQ is not supported Fallback to host mode policy.\n");
+               ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+               return 0;
+       }
+
+       rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
+       if (unlikely(rc)) {
+               dev_err(&pdev->dev,
+                       "Failed to configure the device mode.  Fallback to host mode policy.\n");
+               ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+       }
+
+       return 0;
+}
+
+static int ena_map_llq_mem_bar(struct pci_dev *pdev, struct ena_com_dev *ena_dev,
+                              int bars)
+{
+       bool has_mem_bar = !!(bars & BIT(ENA_MEM_BAR));
+
+       if (!has_mem_bar) {
+               if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+                       dev_err(&pdev->dev,
+                               "ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
+                       ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+               }
+
+               return 0;
+       }
+
+       ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
+                                          pci_resource_start(pdev, ENA_MEM_BAR),
+                                          pci_resource_len(pdev, ENA_MEM_BAR));
+
+       if (!ena_dev->mem_bar)
+               return -EFAULT;
+
+       return 0;
+}
+
 static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
                           struct ena_com_dev_get_features_ctx *get_feat_ctx,
                           bool *wd_state)
 {
+       struct ena_llq_configurations llq_config;
        struct device *dev = &pdev->dev;
        bool readless_supported;
        u32 aenq_groups;
@@ -3365,6 +3440,15 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
 
        *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
 
+       set_default_llq_configurations(&llq_config);
+
+       rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
+                                            &llq_config);
+       if (rc) {
+               dev_err(&pdev->dev, "ena device init failed\n");
+               goto err_admin_init;
+       }
+
        return 0;
 
 err_admin_init:
@@ -3871,54 +3955,6 @@ static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
        return max_num_io_queues;
 }
 
-static int ena_set_queues_placement_policy(struct pci_dev *pdev,
-                                          struct ena_com_dev *ena_dev,
-                                          struct ena_admin_feature_llq_desc *llq,
-                                          struct ena_llq_configurations *llq_default_configurations)
-{
-       bool has_mem_bar;
-       int rc;
-       u32 llq_feature_mask;
-
-       llq_feature_mask = 1 << ENA_ADMIN_LLQ;
-       if (!(ena_dev->supported_features & llq_feature_mask)) {
-               dev_err(&pdev->dev,
-                       "LLQ is not supported Fallback to host mode policy.\n");
-               ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
-               return 0;
-       }
-
-       has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR);
-
-       rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations);
-       if (unlikely(rc)) {
-               dev_err(&pdev->dev,
-                       "Failed to configure the device mode.  Fallback to host mode policy.\n");
-               ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
-               return 0;
-       }
-
-       /* Nothing to config, exit */
-       if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
-               return 0;
-
-       if (!has_mem_bar) {
-               dev_err(&pdev->dev,
-                       "ENA device does not expose LLQ bar. Fallback to host mode policy.\n");
-               ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
-               return 0;
-       }
-
-       ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev,
-                                          pci_resource_start(pdev, ENA_MEM_BAR),
-                                          pci_resource_len(pdev, ENA_MEM_BAR));
-
-       if (!ena_dev->mem_bar)
-               return -EFAULT;
-
-       return 0;
-}
-
 static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
                                 struct net_device *netdev)
 {
@@ -4034,14 +4070,6 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
        pci_release_selected_regions(pdev, release_bars);
 }
 
-static void set_default_llq_configurations(struct ena_llq_configurations *llq_config)
-{
-       llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
-       llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
-       llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
-       llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
-       llq_config->llq_ring_entry_size_value = 128;
-}
 
 static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
 {
@@ -4123,7 +4151,6 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
        struct ena_com_dev_get_features_ctx get_feat_ctx;
-       struct ena_llq_configurations llq_config;
        struct ena_com_dev *ena_dev = NULL;
        struct ena_adapter *adapter;
        struct net_device *netdev;
@@ -4178,13 +4205,10 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_free_region;
        }
 
-       set_default_llq_configurations(&llq_config);
-
-       rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq,
-                                            &llq_config);
+       rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
        if (rc) {
-               dev_err(&pdev->dev, "ena device init failed\n");
-               goto err_device_destroy;
+               dev_err(&pdev->dev, "ena llq bar mapping failed\n");
+               goto err_free_ena_dev;
        }
 
        calc_queue_ctx.ena_dev = ena_dev;
@@ -4241,6 +4265,11 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        adapter->xdp_num_queues = 0;
 
        adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK;
+       if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+               adapter->disable_meta_caching =
+                       !!(get_feat_ctx.llq.accel_mode.u.get.supported_flags &
+                          BIT(ENA_ADMIN_DISABLE_META_CACHING));
+
        adapter->wd_state = wd_state;
 
        snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found);
@@ -4420,13 +4449,12 @@ static void ena_shutdown(struct pci_dev *pdev)
        __ena_shutoff(pdev, true);
 }
 
-#ifdef CONFIG_PM
 /* ena_suspend - PM suspend callback
- * @pdev: PCI device information struct
- * @state:power state
+ * @dev_d: Device information struct
  */
-static int ena_suspend(struct pci_dev *pdev,  pm_message_t state)
+static int __maybe_unused ena_suspend(struct device *dev_d)
 {
+       struct pci_dev *pdev = to_pci_dev(dev_d);
        struct ena_adapter *adapter = pci_get_drvdata(pdev);
 
        u64_stats_update_begin(&adapter->syncp);
@@ -4445,12 +4473,11 @@ static int ena_suspend(struct pci_dev *pdev,  pm_message_t state)
 }
 
 /* ena_resume - PM resume callback
- * @pdev: PCI device information struct
- *
+ * @dev_d: Device information struct
  */
-static int ena_resume(struct pci_dev *pdev)
+static int __maybe_unused ena_resume(struct device *dev_d)
 {
-       struct ena_adapter *adapter = pci_get_drvdata(pdev);
+       struct ena_adapter *adapter = dev_get_drvdata(dev_d);
        int rc;
 
        u64_stats_update_begin(&adapter->syncp);
@@ -4462,7 +4489,8 @@ static int ena_resume(struct pci_dev *pdev)
        rtnl_unlock();
        return rc;
 }
-#endif
+
+static SIMPLE_DEV_PM_OPS(ena_pm_ops, ena_suspend, ena_resume);
 
 static struct pci_driver ena_pci_driver = {
        .name           = DRV_MODULE_NAME,
@@ -4470,10 +4498,7 @@ static struct pci_driver ena_pci_driver = {
        .probe          = ena_probe,
        .remove         = ena_remove,
        .shutdown       = ena_shutdown,
-#ifdef CONFIG_PM
-       .suspend    = ena_suspend,
-       .resume     = ena_resume,
-#endif
+       .driver.pm      = &ena_pm_ops,
        .sriov_configure = pci_sriov_configure_simple,
 };
 
index ba030d2..0c85040 100644 (file)
@@ -167,6 +167,7 @@ struct ena_napi {
        struct ena_ring *rx_ring;
        struct ena_ring *xdp_ring;
        bool first_interrupt;
+       bool interrupts_masked;
        u32 qid;
        struct dim dim;
 };
@@ -297,6 +298,7 @@ struct ena_ring {
        u8 tx_max_header_size;
 
        bool first_interrupt;
+       bool disable_meta_caching;
        u16 no_interrupt_event_cnt;
 
        /* cpu for TPH */
@@ -398,6 +400,7 @@ struct ena_adapter {
 
        bool wd_state;
        bool dev_up_before_reset;
+       bool disable_meta_caching;
        unsigned long last_keep_alive_jiffies;
 
        struct u64_stats_sync syncp;
index f80d2a4..426e57e 100644 (file)
 #define PCI_DEV_ID_ENA_LLQ_VF  0xec21
 #endif
 
+#ifndef PCI_DEV_ID_ENA_RESRV0
+#define PCI_DEV_ID_ENA_RESRV0  0x0051
+#endif
+
 #define ENA_PCI_ID_TABLE_ENTRY(devid) \
        {PCI_DEVICE(PCI_VENDOR_ID_AMAZON, devid)},
 
 static const struct pci_device_id ena_pci_tbl[] = {
+       ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_RESRV0)
        ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_PF)
        ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_LLQ_PF)
        ENA_PCI_ID_TABLE_ENTRY(PCI_DEV_ID_ENA_VF)
index 7a1286f..b6c43b5 100644 (file)
@@ -226,7 +226,9 @@ static int amd8111e_free_skbs(struct net_device *dev)
        /* Freeing transmit skbs */
        for(i = 0; i < NUM_TX_BUFFERS; i++){
                if(lp->tx_skbuff[i]){
-                       pci_unmap_single(lp->pci_dev,lp->tx_dma_addr[i],                                        lp->tx_skbuff[i]->len,PCI_DMA_TODEVICE);
+                       dma_unmap_single(&lp->pci_dev->dev,
+                                        lp->tx_dma_addr[i],
+                                        lp->tx_skbuff[i]->len, DMA_TO_DEVICE);
                        dev_kfree_skb (lp->tx_skbuff[i]);
                        lp->tx_skbuff[i] = NULL;
                        lp->tx_dma_addr[i] = 0;
@@ -236,8 +238,9 @@ static int amd8111e_free_skbs(struct net_device *dev)
        for (i = 0; i < NUM_RX_BUFFERS; i++){
                rx_skbuff = lp->rx_skbuff[i];
                if(rx_skbuff != NULL){
-                       pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[i],
-                                 lp->rx_buff_len - 2,PCI_DMA_FROMDEVICE);
+                       dma_unmap_single(&lp->pci_dev->dev,
+                                        lp->rx_dma_addr[i],
+                                        lp->rx_buff_len - 2, DMA_FROM_DEVICE);
                        dev_kfree_skb(lp->rx_skbuff[i]);
                        lp->rx_skbuff[i] = NULL;
                        lp->rx_dma_addr[i] = 0;
@@ -287,20 +290,20 @@ static int amd8111e_init_ring(struct net_device *dev)
                amd8111e_free_skbs(dev);
 
        else{
-                /* allocate the tx and rx descriptors */
-               if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
-                       sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
-                       &lp->tx_ring_dma_addr)) == NULL)
-
+               /* allocate the tx and rx descriptors */
+               lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
+                       sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR,
+                       &lp->tx_ring_dma_addr, GFP_ATOMIC);
+               if (!lp->tx_ring)
                        goto err_no_mem;
 
-               if((lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
-                       sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
-                       &lp->rx_ring_dma_addr)) == NULL)
-
+               lp->rx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
+                       sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR,
+                       &lp->rx_ring_dma_addr, GFP_ATOMIC);
+               if (!lp->rx_ring)
                        goto err_free_tx_ring;
-
        }
+
        /* Set new receive buff size */
        amd8111e_set_rx_buff_len(dev);
 
@@ -318,8 +321,10 @@ static int amd8111e_init_ring(struct net_device *dev)
        }
         /* Initilaizing receive descriptors */
        for (i = 0; i < NUM_RX_BUFFERS; i++) {
-               lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev,
-                       lp->rx_skbuff[i]->data,lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
+               lp->rx_dma_addr[i] = dma_map_single(&lp->pci_dev->dev,
+                                                   lp->rx_skbuff[i]->data,
+                                                   lp->rx_buff_len - 2,
+                                                   DMA_FROM_DEVICE);
 
                lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
                lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2);
@@ -338,15 +343,15 @@ static int amd8111e_init_ring(struct net_device *dev)
 
 err_free_rx_ring:
 
-       pci_free_consistent(lp->pci_dev,
-               sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,lp->rx_ring,
-               lp->rx_ring_dma_addr);
+       dma_free_coherent(&lp->pci_dev->dev,
+                         sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR,
+                         lp->rx_ring, lp->rx_ring_dma_addr);
 
 err_free_tx_ring:
 
-       pci_free_consistent(lp->pci_dev,
-                sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring,
-                lp->tx_ring_dma_addr);
+       dma_free_coherent(&lp->pci_dev->dev,
+                         sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR,
+                         lp->tx_ring, lp->tx_ring_dma_addr);
 
 err_no_mem:
        return -ENOMEM;
@@ -612,16 +617,16 @@ static void amd8111e_free_ring(struct amd8111e_priv *lp)
 {
        /* Free transmit and receive descriptor rings */
        if(lp->rx_ring){
-               pci_free_consistent(lp->pci_dev,
-                       sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
-                       lp->rx_ring, lp->rx_ring_dma_addr);
+               dma_free_coherent(&lp->pci_dev->dev,
+                                 sizeof(struct amd8111e_rx_dr) * NUM_RX_RING_DR,
+                                 lp->rx_ring, lp->rx_ring_dma_addr);
                lp->rx_ring = NULL;
        }
 
        if(lp->tx_ring){
-               pci_free_consistent(lp->pci_dev,
-                       sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
-                       lp->tx_ring, lp->tx_ring_dma_addr);
+               dma_free_coherent(&lp->pci_dev->dev,
+                                 sizeof(struct amd8111e_tx_dr) * NUM_TX_RING_DR,
+                                 lp->tx_ring, lp->tx_ring_dma_addr);
 
                lp->tx_ring = NULL;
        }
@@ -649,9 +654,10 @@ static int amd8111e_tx(struct net_device *dev)
 
                /* We must free the original skb */
                if (lp->tx_skbuff[tx_index]) {
-                       pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index],
-                                       lp->tx_skbuff[tx_index]->len,
-                                       PCI_DMA_TODEVICE);
+                       dma_unmap_single(&lp->pci_dev->dev,
+                                        lp->tx_dma_addr[tx_index],
+                                        lp->tx_skbuff[tx_index]->len,
+                                        DMA_TO_DEVICE);
                        dev_consume_skb_irq(lp->tx_skbuff[tx_index]);
                        lp->tx_skbuff[tx_index] = NULL;
                        lp->tx_dma_addr[tx_index] = 0;
@@ -737,14 +743,14 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
 
                skb_reserve(new_skb, 2);
                skb = lp->rx_skbuff[rx_index];
-               pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
-                                lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
+               dma_unmap_single(&lp->pci_dev->dev, lp->rx_dma_addr[rx_index],
+                                lp->rx_buff_len - 2, DMA_FROM_DEVICE);
                skb_put(skb, pkt_len);
                lp->rx_skbuff[rx_index] = new_skb;
-               lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
+               lp->rx_dma_addr[rx_index] = dma_map_single(&lp->pci_dev->dev,
                                                           new_skb->data,
-                                                          lp->rx_buff_len-2,
-                                                          PCI_DMA_FROMDEVICE);
+                                                          lp->rx_buff_len - 2,
+                                                          DMA_FROM_DEVICE);
 
                skb->protocol = eth_type_trans(skb, dev);
 
@@ -1270,7 +1276,8 @@ static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
        }
 #endif
        lp->tx_dma_addr[tx_index] =
-           pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
+           dma_map_single(&lp->pci_dev->dev, skb->data, skb->len,
+                          DMA_TO_DEVICE);
        lp->tx_ring[tx_index].buff_phy_addr =
            cpu_to_le32(lp->tx_dma_addr[tx_index]);
 
@@ -1580,9 +1587,10 @@ static void amd8111e_tx_timeout(struct net_device *dev, unsigned int txqueue)
        if(!err)
                netif_wake_queue(dev);
 }
-static int amd8111e_suspend(struct pci_dev *pci_dev, pm_message_t state)
+
+static int __maybe_unused amd8111e_suspend(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pci_dev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
        struct amd8111e_priv *lp = netdev_priv(dev);
 
        if (!netif_running(dev))
@@ -1609,34 +1617,24 @@ static int amd8111e_suspend(struct pci_dev *pci_dev, pm_message_t state)
                if(lp->options & OPTION_WAKE_PHY_ENABLE)
                        amd8111e_enable_link_change(lp);
 
-               pci_enable_wake(pci_dev, PCI_D3hot, 1);
-               pci_enable_wake(pci_dev, PCI_D3cold, 1);
+               device_set_wakeup_enable(dev_d, 1);
 
        }
        else{
-               pci_enable_wake(pci_dev, PCI_D3hot, 0);
-               pci_enable_wake(pci_dev, PCI_D3cold, 0);
+               device_set_wakeup_enable(dev_d, 0);
        }
 
-       pci_save_state(pci_dev);
-       pci_set_power_state(pci_dev, PCI_D3hot);
-
        return 0;
 }
-static int amd8111e_resume(struct pci_dev *pci_dev)
+
+static int __maybe_unused amd8111e_resume(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pci_dev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
        struct amd8111e_priv *lp = netdev_priv(dev);
 
        if (!netif_running(dev))
                return 0;
 
-       pci_set_power_state(pci_dev, PCI_D0);
-       pci_restore_state(pci_dev);
-
-       pci_enable_wake(pci_dev, PCI_D3hot, 0);
-       pci_enable_wake(pci_dev, PCI_D3cold, 0); /* D3 cold */
-
        netif_device_attach(dev);
 
        spin_lock_irq(&lp->lock);
@@ -1782,7 +1780,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
        }
 
        /* Initialize DMA */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) {
+       if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) < 0) {
                dev_err(&pdev->dev, "DMA not supported\n");
                err = -ENODEV;
                goto err_free_reg;
@@ -1918,13 +1916,14 @@ static const struct pci_device_id amd8111e_pci_tbl[] = {
 };
 MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
 
+static SIMPLE_DEV_PM_OPS(amd8111e_pm_ops, amd8111e_suspend, amd8111e_resume);
+
 static struct pci_driver amd8111e_driver = {
        .name           = MODULE_NAME,
        .id_table       = amd8111e_pci_tbl,
        .probe          = amd8111e_probe_one,
        .remove         = amd8111e_remove_one,
-       .suspend        = amd8111e_suspend,
-       .resume         = amd8111e_resume
+       .driver.pm      = &amd8111e_pm_ops
 };
 
 module_pci_driver(amd8111e_driver);
index 9f6e3cc..75dbd22 100644 (file)
@@ -241,7 +241,6 @@ MODULE_LICENSE("GPL");
  * ps: make sure the used irqs are configured properly in the board
  * specific irq-map
  */
-
 static void au1000_enable_mac(struct net_device *dev, int force_reset)
 {
        unsigned long flags;
@@ -556,7 +555,6 @@ static int au1000_mii_probe(struct net_device *dev)
        return 0;
 }
 
-
 /*
  * Buffer allocation/deallocation routines. The buffer descriptor returned
  * has the virtual and dma address of a buffer suitable for
@@ -647,7 +645,6 @@ au1000_setup_hw_rings(struct au1000_private *aup, void __iomem *tx_base)
 /*
  * ethtool operations
  */
-
 static void
 au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
@@ -679,7 +676,6 @@ static const struct ethtool_ops au1000_ethtool_ops = {
        .set_link_ksettings = phy_ethtool_set_link_ksettings,
 };
 
-
 /*
  * Initialize the interface.
  *
@@ -1255,7 +1251,6 @@ static int au1000_probe(struct platform_device *pdev)
                aup->rx_db_inuse[i] = pDB;
        }
 
-       err = -ENODEV;
        for (i = 0; i < NUM_TX_DMA; i++) {
                pDB = au1000_GetFreeDB(aup);
                if (!pDB)
index 07e8211..187b0b9 100644 (file)
@@ -250,7 +250,7 @@ struct pcnet32_access {
 
 /*
  * The first field of pcnet32_private is read by the ethernet device
- * so the structure should be allocated using pci_alloc_consistent().
+ * so the structure should be allocated using dma_alloc_coherent().
  */
 struct pcnet32_private {
        struct pcnet32_init_block *init_block;
@@ -258,7 +258,7 @@ struct pcnet32_private {
        struct pcnet32_rx_head  *rx_ring;
        struct pcnet32_tx_head  *tx_ring;
        dma_addr_t              init_dma_addr;/* DMA address of beginning of the init block,
-                                  returned by pci_alloc_consistent */
+                                  returned by dma_alloc_coherent */
        struct pci_dev          *pci_dev;
        const char              *name;
        /* The saved address of a sent-in-place packet/buffer, for skfree(). */
@@ -485,9 +485,9 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
        pcnet32_purge_tx_ring(dev);
 
        new_tx_ring =
-               pci_zalloc_consistent(lp->pci_dev,
-                                     sizeof(struct pcnet32_tx_head) * entries,
-                                     &new_ring_dma_addr);
+               dma_alloc_coherent(&lp->pci_dev->dev,
+                                  sizeof(struct pcnet32_tx_head) * entries,
+                                  &new_ring_dma_addr, GFP_ATOMIC);
        if (new_tx_ring == NULL)
                return;
 
@@ -501,9 +501,9 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
 
        kfree(lp->tx_skbuff);
        kfree(lp->tx_dma_addr);
-       pci_free_consistent(lp->pci_dev,
-                           sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
-                           lp->tx_ring, lp->tx_ring_dma_addr);
+       dma_free_coherent(&lp->pci_dev->dev,
+                         sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
+                         lp->tx_ring, lp->tx_ring_dma_addr);
 
        lp->tx_ring_size = entries;
        lp->tx_mod_mask = lp->tx_ring_size - 1;
@@ -517,10 +517,9 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
 free_new_lists:
        kfree(new_dma_addr_list);
 free_new_tx_ring:
-       pci_free_consistent(lp->pci_dev,
-                           sizeof(struct pcnet32_tx_head) * entries,
-                           new_tx_ring,
-                           new_ring_dma_addr);
+       dma_free_coherent(&lp->pci_dev->dev,
+                         sizeof(struct pcnet32_tx_head) * entries,
+                         new_tx_ring, new_ring_dma_addr);
 }
 
 /*
@@ -545,9 +544,9 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
        unsigned int entries = BIT(size);
 
        new_rx_ring =
-               pci_zalloc_consistent(lp->pci_dev,
-                                     sizeof(struct pcnet32_rx_head) * entries,
-                                     &new_ring_dma_addr);
+               dma_alloc_coherent(&lp->pci_dev->dev,
+                                  sizeof(struct pcnet32_rx_head) * entries,
+                                  &new_ring_dma_addr, GFP_ATOMIC);
        if (new_rx_ring == NULL)
                return;
 
@@ -580,10 +579,9 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
                skb_reserve(rx_skbuff, NET_IP_ALIGN);
 
                new_dma_addr_list[new] =
-                           pci_map_single(lp->pci_dev, rx_skbuff->data,
-                                          PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
-               if (pci_dma_mapping_error(lp->pci_dev,
-                                         new_dma_addr_list[new])) {
+                           dma_map_single(&lp->pci_dev->dev, rx_skbuff->data,
+                                          PKT_BUF_SIZE, DMA_FROM_DEVICE);
+               if (dma_mapping_error(&lp->pci_dev->dev, new_dma_addr_list[new])) {
                        netif_err(lp, drv, dev, "%s dma mapping failed\n",
                                  __func__);
                        dev_kfree_skb(new_skb_list[new]);
@@ -596,22 +594,20 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
        /* and free any unneeded buffers */
        for (; new < lp->rx_ring_size; new++) {
                if (lp->rx_skbuff[new]) {
-                       if (!pci_dma_mapping_error(lp->pci_dev,
-                                                  lp->rx_dma_addr[new]))
-                               pci_unmap_single(lp->pci_dev,
+                       if (!dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[new]))
+                               dma_unmap_single(&lp->pci_dev->dev,
                                                 lp->rx_dma_addr[new],
                                                 PKT_BUF_SIZE,
-                                                PCI_DMA_FROMDEVICE);
+                                                DMA_FROM_DEVICE);
                        dev_kfree_skb(lp->rx_skbuff[new]);
                }
        }
 
        kfree(lp->rx_skbuff);
        kfree(lp->rx_dma_addr);
-       pci_free_consistent(lp->pci_dev,
-                           sizeof(struct pcnet32_rx_head) *
-                           lp->rx_ring_size, lp->rx_ring,
-                           lp->rx_ring_dma_addr);
+       dma_free_coherent(&lp->pci_dev->dev,
+                         sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
+                         lp->rx_ring, lp->rx_ring_dma_addr);
 
        lp->rx_ring_size = entries;
        lp->rx_mod_mask = lp->rx_ring_size - 1;
@@ -625,12 +621,11 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
 free_all_new:
        while (--new >= lp->rx_ring_size) {
                if (new_skb_list[new]) {
-                       if (!pci_dma_mapping_error(lp->pci_dev,
-                                                  new_dma_addr_list[new]))
-                               pci_unmap_single(lp->pci_dev,
+                       if (!dma_mapping_error(&lp->pci_dev->dev, new_dma_addr_list[new]))
+                               dma_unmap_single(&lp->pci_dev->dev,
                                                 new_dma_addr_list[new],
                                                 PKT_BUF_SIZE,
-                                                PCI_DMA_FROMDEVICE);
+                                                DMA_FROM_DEVICE);
                        dev_kfree_skb(new_skb_list[new]);
                }
        }
@@ -638,10 +633,9 @@ free_all_new:
 free_new_lists:
        kfree(new_dma_addr_list);
 free_new_rx_ring:
-       pci_free_consistent(lp->pci_dev,
-                           sizeof(struct pcnet32_rx_head) * entries,
-                           new_rx_ring,
-                           new_ring_dma_addr);
+       dma_free_coherent(&lp->pci_dev->dev,
+                         sizeof(struct pcnet32_rx_head) * entries,
+                         new_rx_ring, new_ring_dma_addr);
 }
 
 static void pcnet32_purge_rx_ring(struct net_device *dev)
@@ -654,12 +648,11 @@ static void pcnet32_purge_rx_ring(struct net_device *dev)
                lp->rx_ring[i].status = 0;      /* CPU owns buffer */
                wmb();          /* Make sure adapter sees owner change */
                if (lp->rx_skbuff[i]) {
-                       if (!pci_dma_mapping_error(lp->pci_dev,
-                                                  lp->rx_dma_addr[i]))
-                               pci_unmap_single(lp->pci_dev,
+                       if (!dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[i]))
+                               dma_unmap_single(&lp->pci_dev->dev,
                                                 lp->rx_dma_addr[i],
                                                 PKT_BUF_SIZE,
-                                                PCI_DMA_FROMDEVICE);
+                                                DMA_FROM_DEVICE);
                        dev_kfree_skb_any(lp->rx_skbuff[i]);
                }
                lp->rx_skbuff[i] = NULL;
@@ -1036,9 +1029,9 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
                        *packet++ = i;
 
                lp->tx_dma_addr[x] =
-                       pci_map_single(lp->pci_dev, skb->data, skb->len,
-                                      PCI_DMA_TODEVICE);
-               if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[x])) {
+                       dma_map_single(&lp->pci_dev->dev, skb->data, skb->len,
+                                      DMA_TO_DEVICE);
+               if (dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[x])) {
                        netif_printk(lp, hw, KERN_DEBUG, dev,
                                     "DMA mapping error at line: %d!\n",
                                     __LINE__);
@@ -1226,21 +1219,21 @@ static void pcnet32_rx_entry(struct net_device *dev,
                 */
                if (newskb) {
                        skb_reserve(newskb, NET_IP_ALIGN);
-                       new_dma_addr = pci_map_single(lp->pci_dev,
+                       new_dma_addr = dma_map_single(&lp->pci_dev->dev,
                                                      newskb->data,
                                                      PKT_BUF_SIZE,
-                                                     PCI_DMA_FROMDEVICE);
-                       if (pci_dma_mapping_error(lp->pci_dev, new_dma_addr)) {
+                                                     DMA_FROM_DEVICE);
+                       if (dma_mapping_error(&lp->pci_dev->dev, new_dma_addr)) {
                                netif_err(lp, rx_err, dev,
                                          "DMA mapping error.\n");
                                dev_kfree_skb(newskb);
                                skb = NULL;
                        } else {
                                skb = lp->rx_skbuff[entry];
-                               pci_unmap_single(lp->pci_dev,
+                               dma_unmap_single(&lp->pci_dev->dev,
                                                 lp->rx_dma_addr[entry],
                                                 PKT_BUF_SIZE,
-                                                PCI_DMA_FROMDEVICE);
+                                                DMA_FROM_DEVICE);
                                skb_put(skb, pkt_len);
                                lp->rx_skbuff[entry] = newskb;
                                lp->rx_dma_addr[entry] = new_dma_addr;
@@ -1259,17 +1252,15 @@ static void pcnet32_rx_entry(struct net_device *dev,
        if (!rx_in_place) {
                skb_reserve(skb, NET_IP_ALIGN);
                skb_put(skb, pkt_len);  /* Make room */
-               pci_dma_sync_single_for_cpu(lp->pci_dev,
-                                           lp->rx_dma_addr[entry],
-                                           pkt_len,
-                                           PCI_DMA_FROMDEVICE);
+               dma_sync_single_for_cpu(&lp->pci_dev->dev,
+                                       lp->rx_dma_addr[entry], pkt_len,
+                                       DMA_FROM_DEVICE);
                skb_copy_to_linear_data(skb,
                                 (unsigned char *)(lp->rx_skbuff[entry]->data),
                                 pkt_len);
-               pci_dma_sync_single_for_device(lp->pci_dev,
-                                              lp->rx_dma_addr[entry],
-                                              pkt_len,
-                                              PCI_DMA_FROMDEVICE);
+               dma_sync_single_for_device(&lp->pci_dev->dev,
+                                          lp->rx_dma_addr[entry], pkt_len,
+                                          DMA_FROM_DEVICE);
        }
        dev->stats.rx_bytes += skb->len;
        skb->protocol = eth_type_trans(skb, dev);
@@ -1358,10 +1349,10 @@ static int pcnet32_tx(struct net_device *dev)
 
                /* We must free the original skb */
                if (lp->tx_skbuff[entry]) {
-                       pci_unmap_single(lp->pci_dev,
+                       dma_unmap_single(&lp->pci_dev->dev,
                                         lp->tx_dma_addr[entry],
-                                        lp->tx_skbuff[entry]->
-                                        len, PCI_DMA_TODEVICE);
+                                        lp->tx_skbuff[entry]->len,
+                                        DMA_TO_DEVICE);
                        dev_kfree_skb_any(lp->tx_skbuff[entry]);
                        lp->tx_skbuff[entry] = NULL;
                        lp->tx_dma_addr[entry] = 0;
@@ -1551,7 +1542,7 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_disable_dev;
        }
 
-       err = pci_set_dma_mask(pdev, PCNET32_DMA_MASK);
+       err = dma_set_mask(&pdev->dev, PCNET32_DMA_MASK);
        if (err) {
                if (pcnet32_debug & NETIF_MSG_PROBE)
                        pr_err("architecture does not support 32bit PCI busmaster DMA\n");
@@ -1834,12 +1825,13 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
 
        dev->base_addr = ioaddr;
        lp = netdev_priv(dev);
-       /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */
-       lp->init_block = pci_alloc_consistent(pdev, sizeof(*lp->init_block),
-                                             &lp->init_dma_addr);
+       /* dma_alloc_coherent returns page-aligned memory, so we do not have to check the alignment */
+       lp->init_block = dma_alloc_coherent(&pdev->dev,
+                                           sizeof(*lp->init_block),
+                                           &lp->init_dma_addr, GFP_KERNEL);
        if (!lp->init_block) {
                if (pcnet32_debug & NETIF_MSG_PROBE)
-                       pr_err("Consistent memory allocation failed\n");
+                       pr_err("Coherent memory allocation failed\n");
                ret = -ENOMEM;
                goto err_free_netdev;
        }
@@ -1998,8 +1990,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
 
 err_free_ring:
        pcnet32_free_ring(dev);
-       pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
-                           lp->init_block, lp->init_dma_addr);
+       dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block),
+                         lp->init_block, lp->init_dma_addr);
 err_free_netdev:
        free_netdev(dev);
 err_release_region:
@@ -2012,21 +2004,19 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
 {
        struct pcnet32_private *lp = netdev_priv(dev);
 
-       lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
-                                          sizeof(struct pcnet32_tx_head) *
-                                          lp->tx_ring_size,
-                                          &lp->tx_ring_dma_addr);
+       lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
+                                        sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
+                                        &lp->tx_ring_dma_addr, GFP_KERNEL);
        if (lp->tx_ring == NULL) {
-               netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
+               netif_err(lp, drv, dev, "Coherent memory allocation failed\n");
                return -ENOMEM;
        }
 
-       lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
-                                          sizeof(struct pcnet32_rx_head) *
-                                          lp->rx_ring_size,
-                                          &lp->rx_ring_dma_addr);
+       lp->rx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
+                                        sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
+                                        &lp->rx_ring_dma_addr, GFP_KERNEL);
        if (lp->rx_ring == NULL) {
-               netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
+               netif_err(lp, drv, dev, "Coherent memory allocation failed\n");
                return -ENOMEM;
        }
 
@@ -2070,18 +2060,16 @@ static void pcnet32_free_ring(struct net_device *dev)
        lp->rx_dma_addr = NULL;
 
        if (lp->tx_ring) {
-               pci_free_consistent(lp->pci_dev,
-                                   sizeof(struct pcnet32_tx_head) *
-                                   lp->tx_ring_size, lp->tx_ring,
-                                   lp->tx_ring_dma_addr);
+               dma_free_coherent(&lp->pci_dev->dev,
+                                 sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
+                                 lp->tx_ring, lp->tx_ring_dma_addr);
                lp->tx_ring = NULL;
        }
 
        if (lp->rx_ring) {
-               pci_free_consistent(lp->pci_dev,
-                                   sizeof(struct pcnet32_rx_head) *
-                                   lp->rx_ring_size, lp->rx_ring,
-                                   lp->rx_ring_dma_addr);
+               dma_free_coherent(&lp->pci_dev->dev,
+                                 sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
+                                 lp->rx_ring, lp->rx_ring_dma_addr);
                lp->rx_ring = NULL;
        }
 }
@@ -2342,12 +2330,11 @@ static void pcnet32_purge_tx_ring(struct net_device *dev)
                lp->tx_ring[i].status = 0;      /* CPU owns buffer */
                wmb();          /* Make sure adapter sees owner change */
                if (lp->tx_skbuff[i]) {
-                       if (!pci_dma_mapping_error(lp->pci_dev,
-                                                  lp->tx_dma_addr[i]))
-                               pci_unmap_single(lp->pci_dev,
+                       if (!dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[i]))
+                               dma_unmap_single(&lp->pci_dev->dev,
                                                 lp->tx_dma_addr[i],
                                                 lp->tx_skbuff[i]->len,
-                                                PCI_DMA_TODEVICE);
+                                                DMA_TO_DEVICE);
                        dev_kfree_skb_any(lp->tx_skbuff[i]);
                }
                lp->tx_skbuff[i] = NULL;
@@ -2382,10 +2369,9 @@ static int pcnet32_init_ring(struct net_device *dev)
                rmb();
                if (lp->rx_dma_addr[i] == 0) {
                        lp->rx_dma_addr[i] =
-                           pci_map_single(lp->pci_dev, rx_skbuff->data,
-                                          PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
-                       if (pci_dma_mapping_error(lp->pci_dev,
-                                                 lp->rx_dma_addr[i])) {
+                           dma_map_single(&lp->pci_dev->dev, rx_skbuff->data,
+                                          PKT_BUF_SIZE, DMA_FROM_DEVICE);
+                       if (dma_mapping_error(&lp->pci_dev->dev, lp->rx_dma_addr[i])) {
                                /* there is not much we can do at this point */
                                netif_err(lp, drv, dev,
                                          "%s pci dma mapping error\n",
@@ -2523,8 +2509,9 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
        lp->tx_ring[entry].misc = 0x00000000;
 
        lp->tx_dma_addr[entry] =
-           pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
-       if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[entry])) {
+           dma_map_single(&lp->pci_dev->dev, skb->data, skb->len,
+                          DMA_TO_DEVICE);
+       if (dma_mapping_error(&lp->pci_dev->dev, lp->tx_dma_addr[entry])) {
                dev_kfree_skb_any(skb);
                dev->stats.tx_dropped++;
                goto drop_packet;
@@ -2913,30 +2900,27 @@ static void pcnet32_watchdog(struct timer_list *t)
        mod_timer(&lp->watchdog_timer, round_jiffies(PCNET32_WATCHDOG_TIMEOUT));
 }
 
-static int pcnet32_pm_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused pcnet32_pm_suspend(struct device *device_d)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
+       struct net_device *dev = dev_get_drvdata(device_d);
 
        if (netif_running(dev)) {
                netif_device_detach(dev);
                pcnet32_close(dev);
        }
-       pci_save_state(pdev);
-       pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
        return 0;
 }
 
-static int pcnet32_pm_resume(struct pci_dev *pdev)
+static int __maybe_unused pcnet32_pm_resume(struct device *device_d)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
-
-       pci_set_power_state(pdev, PCI_D0);
-       pci_restore_state(pdev);
+       struct net_device *dev = dev_get_drvdata(device_d);
 
        if (netif_running(dev)) {
                pcnet32_open(dev);
                netif_device_attach(dev);
        }
+
        return 0;
 }
 
@@ -2950,20 +2934,23 @@ static void pcnet32_remove_one(struct pci_dev *pdev)
                unregister_netdev(dev);
                pcnet32_free_ring(dev);
                release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
-               pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
-                                   lp->init_block, lp->init_dma_addr);
+               dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block),
+                                 lp->init_block, lp->init_dma_addr);
                free_netdev(dev);
                pci_disable_device(pdev);
        }
 }
 
+static SIMPLE_DEV_PM_OPS(pcnet32_pm_ops, pcnet32_pm_suspend, pcnet32_pm_resume);
+
 static struct pci_driver pcnet32_driver = {
        .name = DRV_NAME,
        .probe = pcnet32_probe_pci,
        .remove = pcnet32_remove_one,
        .id_table = pcnet32_pci_tbl,
-       .suspend = pcnet32_pm_suspend,
-       .resume = pcnet32_pm_resume,
+       .driver = {
+               .pm = &pcnet32_pm_ops,
+       },
 };
 
 /* An additional parameter that may be passed in... */
@@ -3030,8 +3017,8 @@ static void __exit pcnet32_cleanup_module(void)
                unregister_netdev(pcnet32_dev);
                pcnet32_free_ring(pcnet32_dev);
                release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
-               pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
-                                   lp->init_block, lp->init_dma_addr);
+               dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block),
+                                 lp->init_block, lp->init_dma_addr);
                free_netdev(pcnet32_dev);
                pcnet32_dev = next_dev;
        }
index a87264f..43294a1 100644 (file)
@@ -904,114 +904,40 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
        }
 }
 
-static void xgbe_disable_vxlan_offloads(struct xgbe_prv_data *pdata)
+static int xgbe_vxlan_set_port(struct net_device *netdev, unsigned int table,
+                              unsigned int entry, struct udp_tunnel_info *ti)
 {
-       struct net_device *netdev = pdata->netdev;
-
-       if (!pdata->vxlan_offloads_set)
-               return;
-
-       netdev_info(netdev, "disabling VXLAN offloads\n");
-
-       netdev->hw_enc_features &= ~(NETIF_F_SG |
-                                    NETIF_F_IP_CSUM |
-                                    NETIF_F_IPV6_CSUM |
-                                    NETIF_F_RXCSUM |
-                                    NETIF_F_TSO |
-                                    NETIF_F_TSO6 |
-                                    NETIF_F_GRO |
-                                    NETIF_F_GSO_UDP_TUNNEL |
-                                    NETIF_F_GSO_UDP_TUNNEL_CSUM);
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
 
-       netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL |
-                             NETIF_F_GSO_UDP_TUNNEL_CSUM);
+       pdata->vxlan_port = be16_to_cpu(ti->port);
+       pdata->hw_if.enable_vxlan(pdata);
 
-       pdata->vxlan_offloads_set = 0;
+       return 0;
 }
 
-static void xgbe_disable_vxlan_hw(struct xgbe_prv_data *pdata)
+static int xgbe_vxlan_unset_port(struct net_device *netdev, unsigned int table,
+                                unsigned int entry, struct udp_tunnel_info *ti)
 {
-       if (!pdata->vxlan_port_set)
-               return;
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
 
        pdata->hw_if.disable_vxlan(pdata);
-
-       pdata->vxlan_port_set = 0;
        pdata->vxlan_port = 0;
-}
-
-static void xgbe_disable_vxlan_accel(struct xgbe_prv_data *pdata)
-{
-       xgbe_disable_vxlan_offloads(pdata);
-
-       xgbe_disable_vxlan_hw(pdata);
-}
-
-static void xgbe_enable_vxlan_offloads(struct xgbe_prv_data *pdata)
-{
-       struct net_device *netdev = pdata->netdev;
-
-       if (pdata->vxlan_offloads_set)
-               return;
-
-       netdev_info(netdev, "enabling VXLAN offloads\n");
-
-       netdev->hw_enc_features |= NETIF_F_SG |
-                                  NETIF_F_IP_CSUM |
-                                  NETIF_F_IPV6_CSUM |
-                                  NETIF_F_RXCSUM |
-                                  NETIF_F_TSO |
-                                  NETIF_F_TSO6 |
-                                  NETIF_F_GRO |
-                                  pdata->vxlan_features;
-
-       netdev->features |= pdata->vxlan_features;
-
-       pdata->vxlan_offloads_set = 1;
-}
-
-static void xgbe_enable_vxlan_hw(struct xgbe_prv_data *pdata)
-{
-       struct xgbe_vxlan_data *vdata;
 
-       if (pdata->vxlan_port_set)
-               return;
-
-       if (list_empty(&pdata->vxlan_ports))
-               return;
-
-       vdata = list_first_entry(&pdata->vxlan_ports,
-                                struct xgbe_vxlan_data, list);
-
-       pdata->vxlan_port_set = 1;
-       pdata->vxlan_port = be16_to_cpu(vdata->port);
-
-       pdata->hw_if.enable_vxlan(pdata);
+       return 0;
 }
 
-static void xgbe_enable_vxlan_accel(struct xgbe_prv_data *pdata)
-{
-       /* VXLAN acceleration desired? */
-       if (!pdata->vxlan_features)
-               return;
-
-       /* VXLAN acceleration possible? */
-       if (pdata->vxlan_force_disable)
-               return;
-
-       xgbe_enable_vxlan_hw(pdata);
-
-       xgbe_enable_vxlan_offloads(pdata);
-}
+static const struct udp_tunnel_nic_info xgbe_udp_tunnels = {
+       .set_port       = xgbe_vxlan_set_port,
+       .unset_port     = xgbe_vxlan_unset_port,
+       .flags          = UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+       .tables         = {
+               { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+       },
+};
 
-static void xgbe_reset_vxlan_accel(struct xgbe_prv_data *pdata)
+const struct udp_tunnel_nic_info *xgbe_get_udp_tunnel_info(void)
 {
-       xgbe_disable_vxlan_hw(pdata);
-
-       if (pdata->vxlan_features)
-               xgbe_enable_vxlan_offloads(pdata);
-
-       pdata->vxlan_force_disable = 0;
+       return &xgbe_udp_tunnels;
 }
 
 static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
@@ -1406,7 +1332,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
        hw_if->enable_tx(pdata);
        hw_if->enable_rx(pdata);
 
-       udp_tunnel_get_rx_info(netdev);
+       udp_tunnel_nic_reset_ntf(netdev);
 
        netif_tx_start_all_queues(netdev);
 
@@ -1447,7 +1373,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
        xgbe_stop_timers(pdata);
        flush_workqueue(pdata->dev_workqueue);
 
-       xgbe_reset_vxlan_accel(pdata);
+       xgbe_vxlan_unset_port(netdev, 0, 0, NULL);
 
        hw_if->disable_tx(pdata);
        hw_if->disable_rx(pdata);
@@ -1773,13 +1699,8 @@ static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
        return 0;
 }
 
-static bool xgbe_is_vxlan(struct xgbe_prv_data *pdata, struct sk_buff *skb)
+static bool xgbe_is_vxlan(struct sk_buff *skb)
 {
-       struct xgbe_vxlan_data *vdata;
-
-       if (pdata->vxlan_force_disable)
-               return false;
-
        if (!skb->encapsulation)
                return false;
 
@@ -1801,19 +1722,13 @@ static bool xgbe_is_vxlan(struct xgbe_prv_data *pdata, struct sk_buff *skb)
                return false;
        }
 
-       /* See if we have the UDP port in our list */
-       list_for_each_entry(vdata, &pdata->vxlan_ports, list) {
-               if ((skb->protocol == htons(ETH_P_IP)) &&
-                   (vdata->sa_family == AF_INET) &&
-                   (vdata->port == udp_hdr(skb)->dest))
-                       return true;
-               else if ((skb->protocol == htons(ETH_P_IPV6)) &&
-                        (vdata->sa_family == AF_INET6) &&
-                        (vdata->port == udp_hdr(skb)->dest))
-                       return true;
-       }
+       if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+           skb->inner_protocol != htons(ETH_P_TEB) ||
+           (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
+            sizeof(struct udphdr) + sizeof(struct vxlanhdr)))
+               return false;
 
-       return false;
+       return true;
 }
 
 static int xgbe_is_tso(struct sk_buff *skb)
@@ -1864,7 +1779,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
                XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
                               CSUM_ENABLE, 1);
 
-       if (xgbe_is_vxlan(pdata, skb))
+       if (xgbe_is_vxlan(skb))
                XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
                               VXLAN, 1);
 
@@ -2271,23 +2186,12 @@ static netdev_features_t xgbe_fix_features(struct net_device *netdev,
                                           netdev_features_t features)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       netdev_features_t vxlan_base, vxlan_mask;
+       netdev_features_t vxlan_base;
 
        vxlan_base = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RX_UDP_TUNNEL_PORT;
-       vxlan_mask = vxlan_base | NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
-       pdata->vxlan_features = features & vxlan_mask;
 
-       /* Only fix VXLAN-related features */
-       if (!pdata->vxlan_features)
-               return features;
-
-       /* If VXLAN isn't supported then clear any features:
-        *   This is needed because NETIF_F_RX_UDP_TUNNEL_PORT gets
-        *   automatically set if ndo_udp_tunnel_add is set.
-        */
        if (!pdata->hw_feat.vxn)
-               return features & ~vxlan_mask;
+               return features;
 
        /* VXLAN CSUM requires VXLAN base */
        if ((features & NETIF_F_GSO_UDP_TUNNEL_CSUM) &&
@@ -2318,15 +2222,6 @@ static netdev_features_t xgbe_fix_features(struct net_device *netdev,
                }
        }
 
-       pdata->vxlan_features = features & vxlan_mask;
-
-       /* Adjust UDP Tunnel based on current state */
-       if (pdata->vxlan_force_disable) {
-               netdev_notice(netdev,
-                             "VXLAN acceleration disabled, turning off udp tunnel features\n");
-               features &= ~vxlan_mask;
-       }
-
        return features;
 }
 
@@ -2336,14 +2231,12 @@ static int xgbe_set_features(struct net_device *netdev,
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
        netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
-       netdev_features_t udp_tunnel;
        int ret = 0;
 
        rxhash = pdata->netdev_features & NETIF_F_RXHASH;
        rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
        rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
        rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
-       udp_tunnel = pdata->netdev_features & NETIF_F_GSO_UDP_TUNNEL;
 
        if ((features & NETIF_F_RXHASH) && !rxhash)
                ret = hw_if->enable_rss(pdata);
@@ -2367,11 +2260,6 @@ static int xgbe_set_features(struct net_device *netdev,
        else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
                hw_if->disable_rx_vlan_filtering(pdata);
 
-       if ((features & NETIF_F_GSO_UDP_TUNNEL) && !udp_tunnel)
-               xgbe_enable_vxlan_accel(pdata);
-       else if (!(features & NETIF_F_GSO_UDP_TUNNEL) && udp_tunnel)
-               xgbe_disable_vxlan_accel(pdata);
-
        pdata->netdev_features = features;
 
        DBGPR("<--xgbe_set_features\n");
@@ -2379,101 +2267,6 @@ static int xgbe_set_features(struct net_device *netdev,
        return 0;
 }
 
-static void xgbe_udp_tunnel_add(struct net_device *netdev,
-                               struct udp_tunnel_info *ti)
-{
-       struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       struct xgbe_vxlan_data *vdata;
-
-       if (!pdata->hw_feat.vxn)
-               return;
-
-       if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
-               return;
-
-       pdata->vxlan_port_count++;
-
-       netif_dbg(pdata, drv, netdev,
-                 "adding VXLAN tunnel, family=%hx/port=%hx\n",
-                 ti->sa_family, be16_to_cpu(ti->port));
-
-       if (pdata->vxlan_force_disable)
-               return;
-
-       vdata = kzalloc(sizeof(*vdata), GFP_ATOMIC);
-       if (!vdata) {
-               /* Can no longer properly track VXLAN ports */
-               pdata->vxlan_force_disable = 1;
-               netif_dbg(pdata, drv, netdev,
-                         "internal error, disabling VXLAN accelerations\n");
-
-               xgbe_disable_vxlan_accel(pdata);
-
-               return;
-       }
-       vdata->sa_family = ti->sa_family;
-       vdata->port = ti->port;
-
-       list_add_tail(&vdata->list, &pdata->vxlan_ports);
-
-       /* First port added? */
-       if (pdata->vxlan_port_count == 1) {
-               xgbe_enable_vxlan_accel(pdata);
-
-               return;
-       }
-}
-
-static void xgbe_udp_tunnel_del(struct net_device *netdev,
-                               struct udp_tunnel_info *ti)
-{
-       struct xgbe_prv_data *pdata = netdev_priv(netdev);
-       struct xgbe_vxlan_data *vdata;
-
-       if (!pdata->hw_feat.vxn)
-               return;
-
-       if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
-               return;
-
-       netif_dbg(pdata, drv, netdev,
-                 "deleting VXLAN tunnel, family=%hx/port=%hx\n",
-                 ti->sa_family, be16_to_cpu(ti->port));
-
-       /* Don't need safe version since loop terminates with deletion */
-       list_for_each_entry(vdata, &pdata->vxlan_ports, list) {
-               if (vdata->sa_family != ti->sa_family)
-                       continue;
-
-               if (vdata->port != ti->port)
-                       continue;
-
-               list_del(&vdata->list);
-               kfree(vdata);
-
-               break;
-       }
-
-       pdata->vxlan_port_count--;
-       if (!pdata->vxlan_port_count) {
-               xgbe_reset_vxlan_accel(pdata);
-
-               return;
-       }
-
-       if (pdata->vxlan_force_disable)
-               return;
-
-       /* See if VXLAN tunnel id needs to be changed */
-       vdata = list_first_entry(&pdata->vxlan_ports,
-                                struct xgbe_vxlan_data, list);
-       if (pdata->vxlan_port == be16_to_cpu(vdata->port))
-               return;
-
-       pdata->vxlan_port = be16_to_cpu(vdata->port);
-       pdata->hw_if.set_vxlan_id(pdata);
-}
-
 static netdev_features_t xgbe_features_check(struct sk_buff *skb,
                                             struct net_device *netdev,
                                             netdev_features_t features)
@@ -2503,8 +2296,8 @@ static const struct net_device_ops xgbe_netdev_ops = {
        .ndo_setup_tc           = xgbe_setup_tc,
        .ndo_fix_features       = xgbe_fix_features,
        .ndo_set_features       = xgbe_set_features,
-       .ndo_udp_tunnel_add     = xgbe_udp_tunnel_add,
-       .ndo_udp_tunnel_del     = xgbe_udp_tunnel_del,
+       .ndo_udp_tunnel_add     = udp_tunnel_nic_add_port,
+       .ndo_udp_tunnel_del     = udp_tunnel_nic_del_port,
        .ndo_features_check     = xgbe_features_check,
 };
 
index 2a70714..a218dc6 100644 (file)
@@ -192,7 +192,6 @@ struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev)
        mutex_init(&pdata->i2c_mutex);
        init_completion(&pdata->i2c_complete);
        init_completion(&pdata->mdio_complete);
-       INIT_LIST_HEAD(&pdata->vxlan_ports);
 
        pdata->msg_enable = netif_msg_init(debug, default_msg_level);
 
@@ -366,17 +365,12 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
                                          NETIF_F_TSO6 |
                                          NETIF_F_GRO |
                                          NETIF_F_GSO_UDP_TUNNEL |
-                                         NETIF_F_GSO_UDP_TUNNEL_CSUM |
-                                         NETIF_F_RX_UDP_TUNNEL_PORT;
+                                         NETIF_F_GSO_UDP_TUNNEL_CSUM;
 
                netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
-                                      NETIF_F_GSO_UDP_TUNNEL_CSUM |
-                                      NETIF_F_RX_UDP_TUNNEL_PORT;
+                                      NETIF_F_GSO_UDP_TUNNEL_CSUM;
 
-               pdata->vxlan_offloads_set = 1;
-               pdata->vxlan_features = NETIF_F_GSO_UDP_TUNNEL |
-                                       NETIF_F_GSO_UDP_TUNNEL_CSUM |
-                                       NETIF_F_RX_UDP_TUNNEL_PORT;
+               netdev->udp_tunnel_nic_info = xgbe_get_udp_tunnel_info();
        }
 
        netdev->vlan_features |= NETIF_F_SG |
index 7b86240..90cb55e 100644 (file)
@@ -421,10 +421,9 @@ static void xgbe_pci_remove(struct pci_dev *pdev)
        xgbe_free_pdata(pdata);
 }
 
-#ifdef CONFIG_PM
-static int xgbe_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused xgbe_pci_suspend(struct device *dev)
 {
-       struct xgbe_prv_data *pdata = pci_get_drvdata(pdev);
+       struct xgbe_prv_data *pdata = dev_get_drvdata(dev);
        struct net_device *netdev = pdata->netdev;
        int ret = 0;
 
@@ -438,9 +437,9 @@ static int xgbe_pci_suspend(struct pci_dev *pdev, pm_message_t state)
        return ret;
 }
 
-static int xgbe_pci_resume(struct pci_dev *pdev)
+static int __maybe_unused xgbe_pci_resume(struct device *dev)
 {
-       struct xgbe_prv_data *pdata = pci_get_drvdata(pdev);
+       struct xgbe_prv_data *pdata = dev_get_drvdata(dev);
        struct net_device *netdev = pdata->netdev;
        int ret = 0;
 
@@ -460,7 +459,6 @@ static int xgbe_pci_resume(struct pci_dev *pdev)
 
        return ret;
 }
-#endif /* CONFIG_PM */
 
 static const struct xgbe_version_data xgbe_v2a = {
        .init_function_ptrs_phy_impl    = xgbe_init_function_ptrs_phy_v2,
@@ -502,15 +500,16 @@ static const struct pci_device_id xgbe_pci_table[] = {
 };
 MODULE_DEVICE_TABLE(pci, xgbe_pci_table);
 
+static SIMPLE_DEV_PM_OPS(xgbe_pci_pm_ops, xgbe_pci_suspend, xgbe_pci_resume);
+
 static struct pci_driver xgbe_driver = {
        .name = XGBE_DRV_NAME,
        .id_table = xgbe_pci_table,
        .probe = xgbe_pci_probe,
        .remove = xgbe_pci_remove,
-#ifdef CONFIG_PM
-       .suspend = xgbe_pci_suspend,
-       .resume = xgbe_pci_resume,
-#endif
+       .driver = {
+               .pm = &xgbe_pci_pm_ops,
+       }
 };
 
 int xgbe_pci_init(void)
index 5897e46..ba8321e 100644 (file)
@@ -1014,12 +1014,6 @@ struct xgbe_version_data {
        unsigned int an_cdr_workaround;
 };
 
-struct xgbe_vxlan_data {
-       struct list_head list;
-       sa_family_t sa_family;
-       __be16 port;
-};
-
 struct xgbe_prv_data {
        struct net_device *netdev;
        struct pci_dev *pcidev;
@@ -1172,13 +1166,7 @@ struct xgbe_prv_data {
        u32 rss_options;
 
        /* VXLAN settings */
-       unsigned int vxlan_port_set;
-       unsigned int vxlan_offloads_set;
-       unsigned int vxlan_force_disable;
-       unsigned int vxlan_port_count;
-       struct list_head vxlan_ports;
        u16 vxlan_port;
-       netdev_features_t vxlan_features;
 
        /* Netdev related settings */
        unsigned char mac_addr[ETH_ALEN];
@@ -1321,6 +1309,7 @@ void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
 void xgbe_init_function_ptrs_i2c(struct xgbe_i2c_if *);
 const struct net_device_ops *xgbe_get_netdev_ops(void);
 const struct ethtool_ops *xgbe_get_ethtool_ops(void);
+const struct udp_tunnel_nic_info *xgbe_get_udp_tunnel_info(void);
 
 #ifdef CONFIG_AMD_XGBE_DCB
 const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void);
index 52ad943..23b2d39 100644 (file)
 #define AQ_NIC_RATE_1G         BIT(4)
 #define AQ_NIC_RATE_100M       BIT(5)
 #define AQ_NIC_RATE_10M                BIT(6)
+#define AQ_NIC_RATE_1G_HALF    BIT(7)
+#define AQ_NIC_RATE_100M_HALF  BIT(8)
+#define AQ_NIC_RATE_10M_HALF   BIT(9)
 
-#define AQ_NIC_RATE_EEE_10G    BIT(7)
-#define AQ_NIC_RATE_EEE_5G     BIT(8)
-#define AQ_NIC_RATE_EEE_2G5    BIT(9)
-#define AQ_NIC_RATE_EEE_1G     BIT(10)
-#define AQ_NIC_RATE_EEE_100M   BIT(11)
+#define AQ_NIC_RATE_EEE_10G    BIT(10)
+#define AQ_NIC_RATE_EEE_5G     BIT(11)
+#define AQ_NIC_RATE_EEE_2G5    BIT(12)
+#define AQ_NIC_RATE_EEE_1G     BIT(13)
+#define AQ_NIC_RATE_EEE_100M   BIT(14)
+#define AQ_NIC_RATE_EEE_MSK     (AQ_NIC_RATE_EEE_10G |\
+                                AQ_NIC_RATE_EEE_5G |\
+                                AQ_NIC_RATE_EEE_2G5 |\
+                                AQ_NIC_RATE_EEE_1G |\
+                                AQ_NIC_RATE_EEE_100M)
 
 #endif /* AQ_COMMON_H */
index 6da6509..d3526cd 100644 (file)
@@ -1,5 +1,9 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (C) 2014-2019 aQuantia Corporation. */
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
 
 /* File aq_drvinfo.c: Definition of common code for firmware info in sys.*/
 
 #include <linux/uaccess.h>
 
 #include "aq_drvinfo.h"
+#include "aq_nic.h"
 
 #if IS_REACHABLE(CONFIG_HWMON)
+static const char * const atl_temp_label[] = {
+       "PHY Temperature",
+       "MAC Temperature",
+};
+
 static int aq_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
                         u32 attr, int channel, long *value)
 {
        struct aq_nic_s *aq_nic = dev_get_drvdata(dev);
+       int err = 0;
        int temp;
-       int err;
 
        if (!aq_nic)
                return -EIO;
 
-       if (type != hwmon_temp)
+       if (type != hwmon_temp || attr != hwmon_temp_input)
                return -EOPNOTSUPP;
 
-       if (!aq_nic->aq_fw_ops->get_phy_temp)
-               return -EOPNOTSUPP;
+       switch (channel) {
+       case 0:
+               if (!aq_nic->aq_fw_ops->get_phy_temp)
+                       return -EOPNOTSUPP;
 
-       switch (attr) {
-       case hwmon_temp_input:
                err = aq_nic->aq_fw_ops->get_phy_temp(aq_nic->aq_hw, &temp);
                *value = temp;
-               return err;
+               break;
+       case 1:
+               if (!aq_nic->aq_fw_ops->get_mac_temp &&
+                   !aq_nic->aq_hw_ops->hw_get_mac_temp)
+                       return -EOPNOTSUPP;
+
+               if (aq_nic->aq_fw_ops->get_mac_temp)
+                       err = aq_nic->aq_fw_ops->get_mac_temp(aq_nic->aq_hw, &temp);
+               else
+                       err = aq_nic->aq_hw_ops->hw_get_mac_temp(aq_nic->aq_hw, &temp);
+               *value = temp;
+               break;
        default:
                return -EOPNOTSUPP;
        }
+
+       return err;
 }
 
 static int aq_hwmon_read_string(struct device *dev,
@@ -49,28 +72,32 @@ static int aq_hwmon_read_string(struct device *dev,
        if (!aq_nic)
                return -EIO;
 
-       if (type != hwmon_temp)
+       if (type != hwmon_temp || attr != hwmon_temp_label)
                return -EOPNOTSUPP;
 
-       if (!aq_nic->aq_fw_ops->get_phy_temp)
+       if (channel < ARRAY_SIZE(atl_temp_label))
+               *str = atl_temp_label[channel];
+       else
                return -EOPNOTSUPP;
 
-       switch (attr) {
-       case hwmon_temp_label:
-               *str = "PHY Temperature";
-               return 0;
-       default:
-               return -EOPNOTSUPP;
-       }
+       return 0;
 }
 
 static umode_t aq_hwmon_is_visible(const void *data,
                                   enum hwmon_sensor_types type,
                                   u32 attr, int channel)
 {
+       const struct aq_nic_s *nic = data;
+
        if (type != hwmon_temp)
                return 0;
 
+       if (channel == 0 && !nic->aq_fw_ops->get_phy_temp)
+               return 0;
+       else if (channel == 1 && !nic->aq_fw_ops->get_mac_temp &&
+                !nic->aq_hw_ops->hw_get_mac_temp)
+               return 0;
+
        switch (attr) {
        case hwmon_temp_input:
        case hwmon_temp_label:
@@ -88,6 +115,7 @@ static const struct hwmon_ops aq_hwmon_ops = {
 
 static u32 aq_hwmon_temp_config[] = {
        HWMON_T_INPUT | HWMON_T_LABEL,
+       HWMON_T_INPUT | HWMON_T_LABEL,
        0,
 };
 
index 23a0487..59113a2 100644 (file)
@@ -1,14 +1,16 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (C) 2014-2017 aQuantia Corporation. */
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
+ */
 
 /* File aq_drvinfo.h: Declaration of common code for firmware info in sys.*/
 
 #ifndef AQ_DRVINFO_H
 #define AQ_DRVINFO_H
 
-#include "aq_nic.h"
-#include "aq_hw.h"
-#include "hw_atl/hw_atl_utils.h"
+struct net_device;
 
 int aq_drvinfo_init(struct net_device *ndev);
 
index 743d3b1..1ab5314 100644 (file)
@@ -1,7 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
  */
 
 /* File aq_ethtool.c: Definition of ethertool related functions. */
@@ -88,13 +89,19 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
        "InDroppedDma",
 };
 
-static const char * const aq_ethtool_queue_stat_names[] = {
+static const char * const aq_ethtool_queue_rx_stat_names[] = {
        "%sQueue[%d] InPackets",
-       "%sQueue[%d] OutPackets",
-       "%sQueue[%d] Restarts",
        "%sQueue[%d] InJumboPackets",
        "%sQueue[%d] InLroPackets",
        "%sQueue[%d] InErrors",
+       "%sQueue[%d] AllocFails",
+       "%sQueue[%d] SkbAllocFails",
+       "%sQueue[%d] Polls",
+};
+
+static const char * const aq_ethtool_queue_tx_stat_names[] = {
+       "%sQueue[%d] OutPackets",
+       "%sQueue[%d] Restarts",
 };
 
 #if IS_ENABLED(CONFIG_MACSEC)
@@ -123,21 +130,21 @@ static const char aq_macsec_stat_names[][ETH_GSTRING_LEN] = {
        "MACSec OutUnctrlHitDropRedir",
 };
 
-static const char *aq_macsec_txsc_stat_names[] = {
+static const char * const aq_macsec_txsc_stat_names[] = {
        "MACSecTXSC%d ProtectedPkts",
        "MACSecTXSC%d EncryptedPkts",
        "MACSecTXSC%d ProtectedOctets",
        "MACSecTXSC%d EncryptedOctets",
 };
 
-static const char *aq_macsec_txsa_stat_names[] = {
+static const char * const aq_macsec_txsa_stat_names[] = {
        "MACSecTXSC%dSA%d HitDropRedirect",
        "MACSecTXSC%dSA%d Protected2Pkts",
        "MACSecTXSC%dSA%d ProtectedPkts",
        "MACSecTXSC%dSA%d EncryptedPkts",
 };
 
-static const char *aq_macsec_rxsa_stat_names[] = {
+static const char * const aq_macsec_rxsa_stat_names[] = {
        "MACSecRXSC%dSA%d UntaggedHitPkts",
        "MACSecRXSC%dSA%d CtrlHitDrpRedir",
        "MACSecRXSC%dSA%d NotUsingSa",
@@ -163,11 +170,17 @@ static const char aq_ethtool_priv_flag_names[][ETH_GSTRING_LEN] = {
 
 static u32 aq_ethtool_n_stats(struct net_device *ndev)
 {
+       const int rx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_rx_stat_names);
+       const int tx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_tx_stat_names);
        struct aq_nic_s *nic = netdev_priv(ndev);
        struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(nic);
        u32 n_stats = ARRAY_SIZE(aq_ethtool_stat_names) +
-                     ARRAY_SIZE(aq_ethtool_queue_stat_names) * cfg->vecs *
-                       cfg->tcs;
+                     (rx_stat_cnt + tx_stat_cnt) * cfg->vecs * cfg->tcs;
+
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+       n_stats += rx_stat_cnt * aq_ptp_get_ring_cnt(nic, ATL_RING_RX) +
+                  tx_stat_cnt * aq_ptp_get_ring_cnt(nic, ATL_RING_TX);
+#endif
 
 #if IS_ENABLED(CONFIG_MACSEC)
        if (nic->macsec_cfg) {
@@ -191,6 +204,9 @@ static void aq_ethtool_stats(struct net_device *ndev,
 
        memset(data, 0, aq_ethtool_n_stats(ndev) * sizeof(u64));
        data = aq_nic_get_stats(aq_nic, data);
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+       data = aq_ptp_get_stats(aq_nic, data);
+#endif
 #if IS_ENABLED(CONFIG_MACSEC)
        data = aq_macsec_get_stats(aq_nic, data);
 #endif
@@ -236,7 +252,8 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
 
        switch (stringset) {
        case ETH_SS_STATS: {
-               const int stat_cnt = ARRAY_SIZE(aq_ethtool_queue_stat_names);
+               const int rx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_rx_stat_names);
+               const int tx_stat_cnt = ARRAY_SIZE(aq_ethtool_queue_tx_stat_names);
                char tc_string[8];
                int tc;
 
@@ -250,15 +267,51 @@ static void aq_ethtool_get_strings(struct net_device *ndev,
                                snprintf(tc_string, 8, "TC%d ", tc);
 
                        for (i = 0; i < cfg->vecs; i++) {
-                               for (si = 0; si < stat_cnt; si++) {
+                               for (si = 0; si < rx_stat_cnt; si++) {
+                                       snprintf(p, ETH_GSTRING_LEN,
+                                            aq_ethtool_queue_rx_stat_names[si],
+                                            tc_string,
+                                            AQ_NIC_CFG_TCVEC2RING(cfg, tc, i));
+                                       p += ETH_GSTRING_LEN;
+                               }
+                               for (si = 0; si < tx_stat_cnt; si++) {
                                        snprintf(p, ETH_GSTRING_LEN,
-                                            aq_ethtool_queue_stat_names[si],
+                                            aq_ethtool_queue_tx_stat_names[si],
                                             tc_string,
                                             AQ_NIC_CFG_TCVEC2RING(cfg, tc, i));
                                        p += ETH_GSTRING_LEN;
                                }
                        }
                }
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+               if (nic->aq_ptp) {
+                       const int rx_ring_cnt = aq_ptp_get_ring_cnt(nic, ATL_RING_RX);
+                       const int tx_ring_cnt = aq_ptp_get_ring_cnt(nic, ATL_RING_TX);
+                       unsigned int ptp_ring_idx =
+                               aq_ptp_ring_idx(nic->aq_nic_cfg.tc_mode);
+
+                       snprintf(tc_string, 8, "PTP ");
+
+                       for (i = 0; i < max(rx_ring_cnt, tx_ring_cnt); i++) {
+                               for (si = 0; si < rx_stat_cnt; si++) {
+                                       snprintf(p, ETH_GSTRING_LEN,
+                                                aq_ethtool_queue_rx_stat_names[si],
+                                                tc_string,
+                                                i ? PTP_HWST_RING_IDX : ptp_ring_idx);
+                                       p += ETH_GSTRING_LEN;
+                               }
+                               if (i >= tx_ring_cnt)
+                                       continue;
+                               for (si = 0; si < tx_stat_cnt; si++) {
+                                       snprintf(p, ETH_GSTRING_LEN,
+                                                aq_ethtool_queue_tx_stat_names[si],
+                                                tc_string,
+                                                i ? PTP_HWST_RING_IDX : ptp_ring_idx);
+                                       p += ETH_GSTRING_LEN;
+                               }
+                       }
+               }
+#endif
 #if IS_ENABLED(CONFIG_MACSEC)
                if (!nic->macsec_cfg)
                        break;
@@ -606,21 +659,20 @@ static int aq_ethtool_get_ts_info(struct net_device *ndev,
                            BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
                            BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
 
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
        info->phc_index = ptp_clock_index(aq_ptp_get_ptp_clock(aq_nic->aq_ptp));
+#endif
 
        return 0;
 }
 
-static enum hw_atl_fw2x_rate eee_mask_to_ethtool_mask(u32 speed)
+static u32 eee_mask_to_ethtool_mask(u32 speed)
 {
        u32 rate = 0;
 
        if (speed & AQ_NIC_RATE_EEE_10G)
                rate |= SUPPORTED_10000baseT_Full;
 
-       if (speed & AQ_NIC_RATE_EEE_2G5)
-               rate |= SUPPORTED_2500baseX_Full;
-
        if (speed & AQ_NIC_RATE_EEE_1G)
                rate |= SUPPORTED_1000baseT_Full;
 
@@ -656,7 +708,7 @@ static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee)
        eee->eee_enabled = !!eee->advertised;
 
        eee->tx_lpi_enabled = eee->eee_enabled;
-       if (eee->advertised & eee->lp_advertised)
+       if ((supported_rates & rate) & AQ_NIC_RATE_EEE_MSK)
                eee->eee_active = true;
 
        return 0;
@@ -718,13 +770,12 @@ static void aq_ethtool_get_pauseparam(struct net_device *ndev,
                                      struct ethtool_pauseparam *pause)
 {
        struct aq_nic_s *aq_nic = netdev_priv(ndev);
-       u32 fc = aq_nic->aq_nic_cfg.fc.req;
+       int fc = aq_nic->aq_nic_cfg.fc.req;
 
        pause->autoneg = 0;
 
        pause->rx_pause = !!(fc & AQ_NIC_FC_RX);
        pause->tx_pause = !!(fc & AQ_NIC_FC_TX);
-
 }
 
 static int aq_ethtool_set_pauseparam(struct net_device *ndev,
@@ -838,6 +889,7 @@ static int aq_ethtool_set_priv_flags(struct net_device *ndev, u32 flags)
        struct aq_nic_s *aq_nic = netdev_priv(ndev);
        struct aq_nic_cfg_s *cfg;
        u32 priv_flags;
+       int ret = 0;
 
        cfg = aq_nic_get_cfg(aq_nic);
        priv_flags = cfg->priv_flags;
@@ -859,10 +911,10 @@ static int aq_ethtool_set_priv_flags(struct net_device *ndev, u32 flags)
                        dev_open(ndev, NULL);
                }
        } else if ((priv_flags ^ flags) & AQ_HW_LOOPBACK_MASK) {
-               aq_nic_set_loopback(aq_nic);
+               ret = aq_nic_set_loopback(aq_nic);
        }
 
-       return 0;
+       return ret;
 }
 
 const struct ethtool_ops aq_ethtool_ops = {
index ed5b465..95ee133 100644 (file)
@@ -1,7 +1,8 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
  */
 
 /* File aq_hw.h: Declaration of abstract interface for NIC hardware specific
@@ -35,6 +36,8 @@ enum aq_tc_mode {
                        (AQ_RX_LAST_LOC_FVLANID - AQ_RX_FIRST_LOC_FVLANID + 1U)
 #define AQ_RX_QUEUE_NOT_ASSIGNED   0xFFU
 
+#define AQ_FRAC_PER_NS 0x100000000LL
+
 /* Used for rate to Mbps conversion */
 #define AQ_MBPS_DIVISOR         125000 /* 1000000 / 8 */
 
@@ -64,11 +67,15 @@ struct aq_hw_caps_s {
        u8 rx_rings;
        bool flow_control;
        bool is_64_dma;
+       bool op64bit;
        u32 priv_data_len;
 };
 
 struct aq_hw_link_status_s {
        unsigned int mbps;
+       bool full_duplex;
+       u32 lp_link_speed_msk;
+       u32 lp_flow_control;
 };
 
 struct aq_stats_s {
@@ -326,6 +333,8 @@ struct aq_hw_ops {
        int (*hw_set_fc)(struct aq_hw_s *self, u32 fc, u32 tc);
 
        int (*hw_set_loopback)(struct aq_hw_s *self, u32 mode, bool enable);
+
+       int (*hw_get_mac_temp)(struct aq_hw_s *self, u32 *temp);
 };
 
 struct aq_fw_ops {
@@ -348,6 +357,8 @@ struct aq_fw_ops {
 
        int (*update_stats)(struct aq_hw_s *self);
 
+       int (*get_mac_temp)(struct aq_hw_s *self, int *temp);
+
        int (*get_phy_temp)(struct aq_hw_s *self, int *temp);
 
        u32 (*get_flow_control)(struct aq_hw_s *self, u32 *fcmode);
index 342c517..1921741 100644 (file)
@@ -1,7 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
  */
 
 /* File aq_hw_utils.c: Definitions of helper functions used across
@@ -9,6 +10,9 @@
  */
 
 #include "aq_hw_utils.h"
+
+#include <linux/io-64-nonatomic-lo-hi.h>
+
 #include "aq_hw.h"
 #include "aq_nic.h"
 
@@ -37,9 +41,8 @@ u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg)
 {
        u32 value = readl(hw->mmio + reg);
 
-       if ((~0U) == value &&
-           (~0U) == readl(hw->mmio +
-                          hw->aq_nic_cfg->aq_hw_caps->hw_alive_check_addr))
+       if (value == U32_MAX &&
+           readl(hw->mmio + hw->aq_nic_cfg->aq_hw_caps->hw_alive_check_addr) == U32_MAX)
                aq_utils_obj_set(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG);
 
        return value;
@@ -56,13 +59,28 @@ void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value)
  */
 u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg)
 {
-       u64 value = aq_hw_read_reg(hw, reg);
+       u64 value = U64_MAX;
 
-       value |= (u64)aq_hw_read_reg(hw, reg + 4) << 32;
+       if (hw->aq_nic_cfg->aq_hw_caps->op64bit)
+               value = readq(hw->mmio + reg);
+       else
+               value = lo_hi_readq(hw->mmio + reg);
+
+       if (value == U64_MAX &&
+           readl(hw->mmio + hw->aq_nic_cfg->aq_hw_caps->hw_alive_check_addr) == U32_MAX)
+               aq_utils_obj_set(&hw->flags, AQ_HW_FLAG_ERR_UNPLUG);
 
        return value;
 }
 
+void aq_hw_write_reg64(struct aq_hw_s *hw, u32 reg, u64 value)
+{
+       if (hw->aq_nic_cfg->aq_hw_caps->op64bit)
+               writeq(value, hw->mmio + reg);
+       else
+               lo_hi_writeq(value, hw->mmio + reg);
+}
+
 int aq_hw_err_from_flags(struct aq_hw_s *hw)
 {
        int err = 0;
index 32aa5f2..ffa6e40 100644 (file)
@@ -1,7 +1,8 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
  */
 
 /* File aq_hw_utils.h: Declaration of helper functions used across hardware
@@ -33,6 +34,7 @@ u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift);
 u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg);
 void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value);
 u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg);
+void aq_hw_write_reg64(struct aq_hw_s *hw, u32 reg, u64 value);
 int aq_hw_err_from_flags(struct aq_hw_s *hw);
 int aq_hw_num_tcs(struct aq_hw_s *hw);
 int aq_hw_q_per_tc(struct aq_hw_s *hw);
index 8a1da04..8f70a39 100644 (file)
@@ -1,7 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
  */
 
 /* File aq_main.c: Main file for aQuantia Linux driver. */
@@ -94,10 +95,11 @@ err_exit:
        return err;
 }
 
-static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
        struct aq_nic_s *aq_nic = netdev_priv(ndev);
 
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
        if (unlikely(aq_utils_obj_test(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP))) {
                /* Hardware adds the Timestamp for PTPv2 802.AS1
                 * and PTPv2 IPv4 UDP.
@@ -114,6 +116,7 @@ static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                    unlikely(eth_hdr(skb)->h_proto == htons(ETH_P_1588)))
                        return aq_ptp_xmit(aq_nic, skb);
        }
+#endif
 
        skb_tx_timestamp(skb);
        return aq_nic_xmit(aq_nic, skb);
@@ -222,6 +225,7 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
        (void)aq_nic_set_multicast_list(aq_nic, ndev);
 }
 
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
 static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic,
                                   struct hwtstamp_config *config)
 {
@@ -256,26 +260,31 @@ static int aq_ndev_config_hwtstamp(struct aq_nic_s *aq_nic,
 
        return aq_ptp_hwtstamp_config_set(aq_nic->aq_ptp, config);
 }
+#endif
 
 static int aq_ndev_hwtstamp_set(struct aq_nic_s *aq_nic, struct ifreq *ifr)
 {
        struct hwtstamp_config config;
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
        int ret_val;
+#endif
 
        if (!aq_nic->aq_ptp)
                return -EOPNOTSUPP;
 
        if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
                return -EFAULT;
-
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
        ret_val = aq_ndev_config_hwtstamp(aq_nic, &config);
        if (ret_val)
                return ret_val;
+#endif
 
        return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
               -EFAULT : 0;
 }
 
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
 static int aq_ndev_hwtstamp_get(struct aq_nic_s *aq_nic, struct ifreq *ifr)
 {
        struct hwtstamp_config config;
@@ -287,6 +296,7 @@ static int aq_ndev_hwtstamp_get(struct aq_nic_s *aq_nic, struct ifreq *ifr)
        return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
               -EFAULT : 0;
 }
+#endif
 
 static int aq_ndev_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
 {
@@ -296,8 +306,10 @@ static int aq_ndev_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
        case SIOCSHWTSTAMP:
                return aq_ndev_hwtstamp_set(aq_nic, ifr);
 
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
        case SIOCGHWTSTAMP:
                return aq_ndev_hwtstamp_get(aq_nic, ifr);
+#endif
        }
 
        return -EOPNOTSUPP;
index 4435c63..c6b0981 100644 (file)
@@ -371,7 +371,7 @@ void aq_nic_ndev_init(struct aq_nic_s *self)
        self->ndev->features = aq_hw_caps->hw_features;
        self->ndev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
                                     NETIF_F_RXHASH | NETIF_F_SG |
-                                    NETIF_F_LRO | NETIF_F_TSO;
+                                    NETIF_F_LRO | NETIF_F_TSO | NETIF_F_TSO6;
        self->ndev->gso_partial_features = NETIF_F_GSO_UDP_L4;
        self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
        self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
@@ -907,13 +907,13 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
                     aq_vec && self->aq_vecs > i;
                     ++i, aq_vec = self->aq_vec[i]) {
                        data += count;
-                       aq_vec_get_sw_stats(aq_vec, tc, data, &count);
+                       count = aq_vec_get_sw_stats(aq_vec, tc, data);
                }
        }
 
        data += count;
 
-err_exit:;
+err_exit:
        return data;
 }
 
@@ -935,12 +935,17 @@ static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
 void aq_nic_get_link_ksettings(struct aq_nic_s *self,
                               struct ethtool_link_ksettings *cmd)
 {
+       u32 lp_link_speed_msk;
+
        if (self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_FIBRE)
                cmd->base.port = PORT_FIBRE;
        else
                cmd->base.port = PORT_TP;
-       /* This driver supports only 10G capable adapters, so DUPLEX_FULL */
-       cmd->base.duplex = DUPLEX_FULL;
+
+       cmd->base.duplex = DUPLEX_UNKNOWN;
+       if (self->link_status.mbps)
+               cmd->base.duplex = self->link_status.full_duplex ?
+                                  DUPLEX_FULL : DUPLEX_HALF;
        cmd->base.autoneg = self->aq_nic_cfg.is_autoneg;
 
        ethtool_link_ksettings_zero_link_mode(cmd, supported);
@@ -961,14 +966,26 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
                ethtool_link_ksettings_add_link_mode(cmd, supported,
                                                     1000baseT_Full);
 
+       if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_1G_HALF)
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    1000baseT_Half);
+
        if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M)
                ethtool_link_ksettings_add_link_mode(cmd, supported,
                                                     100baseT_Full);
 
+       if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_100M_HALF)
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    100baseT_Half);
+
        if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10M)
                ethtool_link_ksettings_add_link_mode(cmd, supported,
                                                     10baseT_Full);
 
+       if (self->aq_nic_cfg.aq_hw_caps->link_speed_msk & AQ_NIC_RATE_10M_HALF)
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    10baseT_Half);
+
        if (self->aq_nic_cfg.aq_hw_caps->flow_control) {
                ethtool_link_ksettings_add_link_mode(cmd, supported,
                                                     Pause);
@@ -988,30 +1005,42 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
        if (self->aq_nic_cfg.is_autoneg)
                ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
 
-       if (self->aq_nic_cfg.link_speed_msk  & AQ_NIC_RATE_10G)
+       if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G)
                ethtool_link_ksettings_add_link_mode(cmd, advertising,
                                                     10000baseT_Full);
 
-       if (self->aq_nic_cfg.link_speed_msk  & AQ_NIC_RATE_5G)
+       if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_5G)
                ethtool_link_ksettings_add_link_mode(cmd, advertising,
                                                     5000baseT_Full);
 
-       if (self->aq_nic_cfg.link_speed_msk  & AQ_NIC_RATE_2G5)
+       if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_2G5)
                ethtool_link_ksettings_add_link_mode(cmd, advertising,
                                                     2500baseT_Full);
 
-       if (self->aq_nic_cfg.link_speed_msk  & AQ_NIC_RATE_1G)
+       if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G)
                ethtool_link_ksettings_add_link_mode(cmd, advertising,
                                                     1000baseT_Full);
 
-       if (self->aq_nic_cfg.link_speed_msk  & AQ_NIC_RATE_100M)
+       if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G_HALF)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    1000baseT_Half);
+
+       if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M)
                ethtool_link_ksettings_add_link_mode(cmd, advertising,
                                                     100baseT_Full);
 
-       if (self->aq_nic_cfg.link_speed_msk  & AQ_NIC_RATE_10M)
+       if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M_HALF)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    100baseT_Half);
+
+       if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10M)
                ethtool_link_ksettings_add_link_mode(cmd, advertising,
                                                     10baseT_Full);
 
+       if (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10M_HALF)
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    10baseT_Half);
+
        if (self->aq_nic_cfg.fc.cur & AQ_NIC_FC_RX)
                ethtool_link_ksettings_add_link_mode(cmd, advertising,
                                                     Pause);
@@ -1026,32 +1055,84 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
                ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
        else
                ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
+
+       ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising);
+       lp_link_speed_msk = self->aq_hw->aq_link_status.lp_link_speed_msk;
+
+       if (lp_link_speed_msk & AQ_NIC_RATE_10G)
+               ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
+                                                    10000baseT_Full);
+
+       if (lp_link_speed_msk & AQ_NIC_RATE_5G)
+               ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
+                                                    5000baseT_Full);
+
+       if (lp_link_speed_msk & AQ_NIC_RATE_2G5)
+               ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
+                                                    2500baseT_Full);
+
+       if (lp_link_speed_msk & AQ_NIC_RATE_1G)
+               ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
+                                                    1000baseT_Full);
+
+       if (lp_link_speed_msk & AQ_NIC_RATE_1G_HALF)
+               ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
+                                                    1000baseT_Half);
+
+       if (lp_link_speed_msk & AQ_NIC_RATE_100M)
+               ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
+                                                    100baseT_Full);
+
+       if (lp_link_speed_msk & AQ_NIC_RATE_100M_HALF)
+               ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
+                                                    100baseT_Half);
+
+       if (lp_link_speed_msk & AQ_NIC_RATE_10M)
+               ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
+                                                    10baseT_Full);
+
+       if (lp_link_speed_msk & AQ_NIC_RATE_10M_HALF)
+               ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
+                                                    10baseT_Half);
+
+       if (self->aq_hw->aq_link_status.lp_flow_control & AQ_NIC_FC_RX)
+               ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
+                                                    Pause);
+       if (!!(self->aq_hw->aq_link_status.lp_flow_control & AQ_NIC_FC_TX) ^
+           !!(self->aq_hw->aq_link_status.lp_flow_control & AQ_NIC_FC_RX))
+               ethtool_link_ksettings_add_link_mode(cmd, lp_advertising,
+                                                    Asym_Pause);
 }
 
 int aq_nic_set_link_ksettings(struct aq_nic_s *self,
                              const struct ethtool_link_ksettings *cmd)
 {
-       u32 speed = 0U;
+       int fduplex = (cmd->base.duplex == DUPLEX_FULL);
+       u32 speed = cmd->base.speed;
        u32 rate = 0U;
        int err = 0;
 
+       if (!fduplex && speed > SPEED_1000) {
+               err = -EINVAL;
+               goto err_exit;
+       }
+
        if (cmd->base.autoneg == AUTONEG_ENABLE) {
                rate = self->aq_nic_cfg.aq_hw_caps->link_speed_msk;
                self->aq_nic_cfg.is_autoneg = true;
        } else {
-               speed = cmd->base.speed;
-
                switch (speed) {
                case SPEED_10:
-                       rate = AQ_NIC_RATE_10M;
+                       rate = fduplex ? AQ_NIC_RATE_10M : AQ_NIC_RATE_10M_HALF;
                        break;
 
                case SPEED_100:
-                       rate = AQ_NIC_RATE_100M;
+                       rate = fduplex ? AQ_NIC_RATE_100M
+                                      : AQ_NIC_RATE_100M_HALF;
                        break;
 
                case SPEED_1000:
-                       rate = AQ_NIC_RATE_1G;
+                       rate = fduplex ? AQ_NIC_RATE_1G : AQ_NIC_RATE_1G_HALF;
                        break;
 
                case SPEED_2500:
@@ -1107,7 +1188,7 @@ int aq_nic_set_loopback(struct aq_nic_s *self)
 
        if (!self->aq_hw_ops->hw_set_loopback ||
            !self->aq_fw_ops->set_phyloopback)
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        mutex_lock(&self->fwreq_mutex);
        self->aq_hw_ops->hw_set_loopback(self->aq_hw,
index 2ab0030..317bfc6 100644 (file)
@@ -1,7 +1,8 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
  */
 
 /* File aq_nic.h: Declaration of common code for NIC. */
@@ -111,7 +112,7 @@ struct aq_hw_rx_fltrs_s {
        u16                   active_filters;
        struct aq_hw_rx_fl2   fl2;
        struct aq_hw_rx_fl3l4 fl3l4;
-       /*filter ether type */
+       /* filter ether type */
        u8 fet_reserved_count;
 };
 
index 41c0f56..5925384 100644 (file)
@@ -1,7 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
  */
 
 /* File aq_pci_func.c: Definition of PCI functions. */
@@ -114,7 +115,7 @@ static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev,
        return 0;
 }
 
-int aq_pci_func_init(struct pci_dev *pdev)
+static int aq_pci_func_init(struct pci_dev *pdev)
 {
        int err;
 
index 77be7ee..3fa5f7a 100644 (file)
@@ -1,7 +1,8 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
  */
 
 /* File aq_pci_func.h: Declaration of PCI functions. */
@@ -19,7 +20,6 @@ struct aq_board_revision_s {
        const struct aq_hw_caps_s *caps;
 };
 
-int aq_pci_func_init(struct pci_dev *pdev);
 int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
                          char *name, irq_handler_t irq_handler,
                          void *irq_arg, cpumask_t *affinity_mask);
index 599ced2..06de19f 100644 (file)
@@ -1,6 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/* Aquantia Corporation Network Driver
- * Copyright (C) 2014-2019 Aquantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
  */
 
 /* File aq_ptp.c:
@@ -18,6 +20,8 @@
 #include "aq_phy.h"
 #include "aq_filters.h"
 
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
+
 #define AQ_PTP_TX_TIMEOUT        (HZ *  10)
 
 #define POLL_SYNC_TIMER_MS 15
@@ -77,6 +81,8 @@ struct aq_ptp_s {
 
        bool extts_pin_enabled;
        u64 last_sync1588_ts;
+
+       bool a1_ptp;
 };
 
 struct ptp_tm_offset {
@@ -778,8 +784,10 @@ int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb)
                err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw,
                                                       ring, frags);
                if (err >= 0) {
+                       u64_stats_update_begin(&ring->stats.tx.syncp);
                        ++ring->stats.tx.packets;
                        ring->stats.tx.bytes += skb->len;
+                       u64_stats_update_end(&ring->stats.tx.syncp);
                }
        } else {
                err = NETDEV_TX_BUSY;
@@ -840,7 +848,7 @@ int aq_ptp_ring_init(struct aq_nic_s *aq_nic)
        if (!aq_ptp)
                return 0;
 
-       err = aq_ring_init(&aq_ptp->ptp_tx);
+       err = aq_ring_init(&aq_ptp->ptp_tx, ATL_RING_TX);
        if (err < 0)
                goto err_exit;
        err = aq_nic->aq_hw_ops->hw_ring_tx_init(aq_nic->aq_hw,
@@ -849,7 +857,7 @@ int aq_ptp_ring_init(struct aq_nic_s *aq_nic)
        if (err < 0)
                goto err_exit;
 
-       err = aq_ring_init(&aq_ptp->ptp_rx);
+       err = aq_ring_init(&aq_ptp->ptp_rx, ATL_RING_RX);
        if (err < 0)
                goto err_exit;
        err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw,
@@ -867,7 +875,7 @@ int aq_ptp_ring_init(struct aq_nic_s *aq_nic)
        if (err < 0)
                goto err_rx_free;
 
-       err = aq_ring_init(&aq_ptp->hwts_rx);
+       err = aq_ring_init(&aq_ptp->hwts_rx, ATL_RING_RX);
        if (err < 0)
                goto err_rx_free;
        err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw,
@@ -941,21 +949,6 @@ void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic)
        aq_ring_rx_deinit(&aq_ptp->ptp_rx);
 }
 
-#define PTP_8TC_RING_IDX             8
-#define PTP_4TC_RING_IDX            16
-#define PTP_HWST_RING_IDX           31
-
-/* Index must be 8 (8 TCs) or 16 (4 TCs).
- * It depends on Traffic Class mode.
- */
-static unsigned int ptp_ring_idx(const enum aq_tc_mode tc_mode)
-{
-       if (tc_mode == AQ_TC_MODE_8TCS)
-               return PTP_8TC_RING_IDX;
-
-       return PTP_4TC_RING_IDX;
-}
-
 int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
 {
        struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
@@ -967,7 +960,7 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
        if (!aq_ptp)
                return 0;
 
-       tx_ring_idx = ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
+       tx_ring_idx = aq_ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
 
        ring = aq_ring_tx_alloc(&aq_ptp->ptp_tx, aq_nic,
                                tx_ring_idx, &aq_nic->aq_nic_cfg);
@@ -976,7 +969,7 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
                goto err_exit;
        }
 
-       rx_ring_idx = ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
+       rx_ring_idx = aq_ptp_ring_idx(aq_nic->aq_nic_cfg.tc_mode);
 
        ring = aq_ring_rx_alloc(&aq_ptp->ptp_rx, aq_nic,
                                rx_ring_idx, &aq_nic->aq_nic_cfg);
@@ -1168,11 +1161,17 @@ static void aq_ptp_poll_sync_work_cb(struct work_struct *w);
 
 int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
 {
+       bool a1_ptp = ATL_HW_IS_CHIP_FEATURE(aq_nic->aq_hw, ATLANTIC);
        struct hw_atl_utils_mbox mbox;
        struct ptp_clock *clock;
        struct aq_ptp_s *aq_ptp;
        int err = 0;
 
+       if (!a1_ptp) {
+               aq_nic->aq_ptp = NULL;
+               return 0;
+       }
+
        if (!aq_nic->aq_hw_ops->hw_get_ptp_ts) {
                aq_nic->aq_ptp = NULL;
                return 0;
@@ -1199,6 +1198,7 @@ int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
        }
 
        aq_ptp->aq_nic = aq_nic;
+       aq_ptp->a1_ptp = a1_ptp;
 
        spin_lock_init(&aq_ptp->ptp_lock);
        spin_lock_init(&aq_ptp->ptp_ring_lock);
@@ -1389,3 +1389,36 @@ static void aq_ptp_poll_sync_work_cb(struct work_struct *w)
                schedule_delayed_work(&aq_ptp->poll_sync, timeout);
        }
 }
+
+int aq_ptp_get_ring_cnt(struct aq_nic_s *aq_nic, const enum atl_ring_type ring_type)
+{
+       if (!aq_nic->aq_ptp)
+               return 0;
+
+       /* Additional RX ring is allocated for PTP HWTS on A1 */
+       return (aq_nic->aq_ptp->a1_ptp && ring_type == ATL_RING_RX) ? 2 : 1;
+}
+
+u64 *aq_ptp_get_stats(struct aq_nic_s *aq_nic, u64 *data)
+{
+       struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
+       unsigned int count = 0U;
+
+       if (!aq_ptp)
+               return data;
+
+       count = aq_ring_fill_stats_data(&aq_ptp->ptp_rx, data);
+       data += count;
+       count = aq_ring_fill_stats_data(&aq_ptp->ptp_tx, data);
+       data += count;
+
+       if (aq_ptp->a1_ptp) {
+               /* Only Receive ring for HWTS */
+               count = aq_ring_fill_stats_data(&aq_ptp->hwts_rx, data);
+               data += count;
+       }
+
+       return data;
+}
+
+#endif
index 2319064..28ccb7c 100644 (file)
@@ -1,6 +1,8 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/* Aquantia Corporation Network Driver
- * Copyright (C) 2014-2019 Aquantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
  */
 
 /* File aq_ptp.h: Declaration of PTP functions.
 
 #include <linux/net_tstamp.h>
 
+#include "aq_ring.h"
+
+#define PTP_8TC_RING_IDX             8
+#define PTP_4TC_RING_IDX            16
+#define PTP_HWST_RING_IDX           31
+
+/* Index must to be 8 (8 TCs) or 16 (4 TCs).
+ * It depends from Traffic Class mode.
+ */
+static inline unsigned int aq_ptp_ring_idx(const enum aq_tc_mode tc_mode)
+{
+       if (tc_mode == AQ_TC_MODE_8TCS)
+               return PTP_8TC_RING_IDX;
+
+       return PTP_4TC_RING_IDX;
+}
+
 #if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
 
 /* Common functions */
@@ -55,6 +74,10 @@ struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp);
 
 int aq_ptp_link_change(struct aq_nic_s *aq_nic);
 
+/* PTP ring statistics */
+int aq_ptp_get_ring_cnt(struct aq_nic_s *aq_nic, const enum atl_ring_type ring_type);
+u64 *aq_ptp_get_stats(struct aq_nic_s *aq_nic, u64 *data);
+
 #else
 
 static inline int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
index 68fdb39..4f91365 100644 (file)
@@ -1,7 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
  */
 
 /* File aq_ring.c: Definition of functions for Rx/Tx rings. */
@@ -69,24 +70,35 @@ static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
                        rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX;
                        if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <=
                                (PAGE_SIZE << order)) {
+                               u64_stats_update_begin(&self->stats.rx.syncp);
                                self->stats.rx.pg_flips++;
+                               u64_stats_update_end(&self->stats.rx.syncp);
                        } else {
                                /* Buffer exhausted. We have other users and
                                 * should release this page and realloc
                                 */
                                aq_free_rxpage(&rxbuf->rxdata,
                                               aq_nic_get_dev(self->aq_nic));
+                               u64_stats_update_begin(&self->stats.rx.syncp);
                                self->stats.rx.pg_losts++;
+                               u64_stats_update_end(&self->stats.rx.syncp);
                        }
                } else {
                        rxbuf->rxdata.pg_off = 0;
+                       u64_stats_update_begin(&self->stats.rx.syncp);
                        self->stats.rx.pg_reuses++;
+                       u64_stats_update_end(&self->stats.rx.syncp);
                }
        }
 
        if (!rxbuf->rxdata.page) {
                ret = aq_get_rxpage(&rxbuf->rxdata, order,
                                    aq_nic_get_dev(self->aq_nic));
+               if (ret) {
+                       u64_stats_update_begin(&self->stats.rx.syncp);
+                       self->stats.rx.alloc_fails++;
+                       u64_stats_update_end(&self->stats.rx.syncp);
+               }
                return ret;
        }
 
@@ -205,11 +217,17 @@ aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic,
        return self;
 }
 
-int aq_ring_init(struct aq_ring_s *self)
+int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type)
 {
        self->hw_head = 0;
        self->sw_head = 0;
        self->sw_tail = 0;
+       self->ring_type = ring_type;
+
+       if (self->ring_type == ATL_RING_RX)
+               u64_stats_init(&self->stats.rx.syncp);
+       else
+               u64_stats_init(&self->stats.tx.syncp);
 
        return 0;
 }
@@ -237,7 +255,9 @@ void aq_ring_queue_wake(struct aq_ring_s *ring)
                                                      ring->idx))) {
                netif_wake_subqueue(ndev,
                                    AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx));
+               u64_stats_update_begin(&ring->stats.tx.syncp);
                ring->stats.tx.queue_restarts++;
+               u64_stats_update_end(&ring->stats.tx.syncp);
        }
 }
 
@@ -279,8 +299,10 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
                }
 
                if (unlikely(buff->is_eop)) {
-                       ++self->stats.rx.packets;
+                       u64_stats_update_begin(&self->stats.tx.syncp);
+                       ++self->stats.tx.packets;
                        self->stats.tx.bytes += buff->skb->len;
+                       u64_stats_update_end(&self->stats.tx.syncp);
 
                        dev_kfree_skb_any(buff->skb);
                }
@@ -300,7 +322,9 @@ static void aq_rx_checksum(struct aq_ring_s *self,
                return;
 
        if (unlikely(buff->is_cso_err)) {
+               u64_stats_update_begin(&self->stats.rx.syncp);
                ++self->stats.rx.errors;
+               u64_stats_update_end(&self->stats.rx.syncp);
                skb->ip_summed = CHECKSUM_NONE;
                return;
        }
@@ -370,13 +394,17 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
                                        buff_->is_cleaned = true;
                                } while (!buff_->is_eop);
 
+                               u64_stats_update_begin(&self->stats.rx.syncp);
                                ++self->stats.rx.errors;
+                               u64_stats_update_end(&self->stats.rx.syncp);
                                continue;
                        }
                }
 
                if (buff->is_error) {
+                       u64_stats_update_begin(&self->stats.rx.syncp);
                        ++self->stats.rx.errors;
+                       u64_stats_update_end(&self->stats.rx.syncp);
                        continue;
                }
 
@@ -391,6 +419,9 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
                        skb = build_skb(aq_buf_vaddr(&buff->rxdata),
                                        AQ_CFG_RX_FRAME_MAX);
                        if (unlikely(!skb)) {
+                               u64_stats_update_begin(&self->stats.rx.syncp);
+                               self->stats.rx.skb_alloc_fails++;
+                               u64_stats_update_end(&self->stats.rx.syncp);
                                err = -ENOMEM;
                                goto err_exit;
                        }
@@ -404,6 +435,9 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
                } else {
                        skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
                        if (unlikely(!skb)) {
+                               u64_stats_update_begin(&self->stats.rx.syncp);
+                               self->stats.rx.skb_alloc_fails++;
+                               u64_stats_update_end(&self->stats.rx.syncp);
                                err = -ENOMEM;
                                goto err_exit;
                        }
@@ -477,8 +511,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
                                                : AQ_NIC_RING2QMAP(self->aq_nic,
                                                                   self->idx));
 
+               u64_stats_update_begin(&self->stats.rx.syncp);
                ++self->stats.rx.packets;
                self->stats.rx.bytes += skb->len;
+               u64_stats_update_end(&self->stats.rx.syncp);
 
                napi_gro_receive(napi, skb);
        }
@@ -489,6 +525,7 @@ err_exit:
 
 void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
 {
+#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK)
        while (self->sw_head != self->hw_head) {
                u64 ns;
 
@@ -500,6 +537,7 @@ void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
 
                self->sw_head = aq_ring_next_dx(self, self->sw_head);
        }
+#endif
 }
 
 int aq_ring_rx_fill(struct aq_ring_s *self)
@@ -535,7 +573,7 @@ err_exit:
 void aq_ring_rx_deinit(struct aq_ring_s *self)
 {
        if (!self)
-               goto err_exit;
+               return;
 
        for (; self->sw_head != self->sw_tail;
                self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
@@ -543,14 +581,12 @@ void aq_ring_rx_deinit(struct aq_ring_s *self)
 
                aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic));
        }
-
-err_exit:;
 }
 
 void aq_ring_free(struct aq_ring_s *self)
 {
        if (!self)
-               goto err_exit;
+               return;
 
        kfree(self->buff_ring);
 
@@ -558,6 +594,35 @@ void aq_ring_free(struct aq_ring_s *self)
                dma_free_coherent(aq_nic_get_dev(self->aq_nic),
                                  self->size * self->dx_size, self->dx_ring,
                                  self->dx_ring_pa);
+}
+
+unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data)
+{
+       unsigned int count;
+       unsigned int start;
+
+       if (self->ring_type == ATL_RING_RX) {
+               /* This data should mimic aq_ethtool_queue_rx_stat_names structure */
+               do {
+                       count = 0;
+                       start = u64_stats_fetch_begin_irq(&self->stats.rx.syncp);
+                       data[count] = self->stats.rx.packets;
+                       data[++count] = self->stats.rx.jumbo_packets;
+                       data[++count] = self->stats.rx.lro_packets;
+                       data[++count] = self->stats.rx.errors;
+                       data[++count] = self->stats.rx.alloc_fails;
+                       data[++count] = self->stats.rx.skb_alloc_fails;
+                       data[++count] = self->stats.rx.polls;
+               } while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start));
+       } else {
+               /* This data should mimic aq_ethtool_queue_tx_stat_names structure */
+               do {
+                       count = 0;
+                       start = u64_stats_fetch_begin_irq(&self->stats.tx.syncp);
+                       data[count] = self->stats.tx.packets;
+                       data[++count] = self->stats.tx.queue_restarts;
+               } while (u64_stats_fetch_retry_irq(&self->stats.tx.syncp, start));
+       }
 
-err_exit:;
+       return ++count;
 }
index 2c96f20..93659e5 100644 (file)
@@ -1,7 +1,8 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
  */
 
 /* File aq_ring.h: Declaration of functions for Rx/Tx rings. */
@@ -88,17 +89,22 @@ struct __packed aq_ring_buff_s {
 };
 
 struct aq_ring_stats_rx_s {
+       struct u64_stats_sync syncp;    /* must be first */
        u64 errors;
        u64 packets;
        u64 bytes;
        u64 lro_packets;
        u64 jumbo_packets;
+       u64 alloc_fails;
+       u64 skb_alloc_fails;
+       u64 polls;
        u64 pg_losts;
        u64 pg_flips;
        u64 pg_reuses;
 };
 
 struct aq_ring_stats_tx_s {
+       struct u64_stats_sync syncp;    /* must be first */
        u64 errors;
        u64 packets;
        u64 bytes;
@@ -110,6 +116,11 @@ union aq_ring_stats_s {
        struct aq_ring_stats_tx_s tx;
 };
 
+enum atl_ring_type {
+       ATL_RING_TX,
+       ATL_RING_RX,
+};
+
 struct aq_ring_s {
        struct aq_ring_buff_s *buff_ring;
        u8 *dx_ring;            /* descriptors ring, dma shared mem */
@@ -124,6 +135,7 @@ struct aq_ring_s {
        unsigned int page_order;
        union aq_ring_stats_s stats;
        dma_addr_t dx_ring_pa;
+       enum atl_ring_type ring_type;
 };
 
 struct aq_ring_param_s {
@@ -163,7 +175,7 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
                                   struct aq_nic_s *aq_nic,
                                   unsigned int idx,
                                   struct aq_nic_cfg_s *aq_nic_cfg);
-int aq_ring_init(struct aq_ring_s *self);
+int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type);
 void aq_ring_rx_deinit(struct aq_ring_s *self);
 void aq_ring_free(struct aq_ring_s *self);
 void aq_ring_update_queue_state(struct aq_ring_s *ring);
@@ -181,4 +193,6 @@ struct aq_ring_s *aq_ring_hwts_rx_alloc(struct aq_ring_s *self,
                unsigned int size, unsigned int dx_size);
 void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic);
 
+unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data);
+
 #endif /* AQ_RING_H */
index d1d43c8..d281322 100644 (file)
@@ -1,7 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
  */
 
 /* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings.
@@ -44,6 +45,9 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
        } else {
                for (i = 0U, ring = self->ring[0];
                        self->tx_rings > i; ++i, ring = self->ring[i]) {
+                       u64_stats_update_begin(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
+                       ring[AQ_VEC_RX_ID].stats.rx.polls++;
+                       u64_stats_update_end(&ring[AQ_VEC_RX_ID].stats.rx.syncp);
                        if (self->aq_hw_ops->hw_ring_tx_head_update) {
                                err = self->aq_hw_ops->hw_ring_tx_head_update(
                                                        self->aq_hw,
@@ -180,7 +184,7 @@ int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
 
        for (i = 0U, ring = self->ring[0];
                self->tx_rings > i; ++i, ring = self->ring[i]) {
-               err = aq_ring_init(&ring[AQ_VEC_TX_ID]);
+               err = aq_ring_init(&ring[AQ_VEC_TX_ID], ATL_RING_TX);
                if (err < 0)
                        goto err_exit;
 
@@ -190,7 +194,7 @@ int aq_vec_init(struct aq_vec_s *self, const struct aq_hw_ops *aq_hw_ops,
                if (err < 0)
                        goto err_exit;
 
-               err = aq_ring_init(&ring[AQ_VEC_RX_ID]);
+               err = aq_ring_init(&ring[AQ_VEC_RX_ID], ATL_RING_RX);
                if (err < 0)
                        goto err_exit;
 
@@ -349,59 +353,23 @@ cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self)
        return &self->aq_ring_param.affinity_mask;
 }
 
-static void aq_vec_add_stats(struct aq_vec_s *self,
-                            const unsigned int tc,
-                            struct aq_ring_stats_rx_s *stats_rx,
-                            struct aq_ring_stats_tx_s *stats_tx)
+bool aq_vec_is_valid_tc(struct aq_vec_s *self, const unsigned int tc)
 {
-       struct aq_ring_s *ring = self->ring[tc];
-
-       if (tc < self->rx_rings) {
-               struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx;
-
-               stats_rx->packets += rx->packets;
-               stats_rx->bytes += rx->bytes;
-               stats_rx->errors += rx->errors;
-               stats_rx->jumbo_packets += rx->jumbo_packets;
-               stats_rx->lro_packets += rx->lro_packets;
-               stats_rx->pg_losts += rx->pg_losts;
-               stats_rx->pg_flips += rx->pg_flips;
-               stats_rx->pg_reuses += rx->pg_reuses;
-       }
-
-       if (tc < self->tx_rings) {
-               struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx;
-
-               stats_tx->packets += tx->packets;
-               stats_tx->bytes += tx->bytes;
-               stats_tx->errors += tx->errors;
-               stats_tx->queue_restarts += tx->queue_restarts;
-       }
+       return tc < self->rx_rings && tc < self->tx_rings;
 }
 
-int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data,
-                       unsigned int *p_count)
+unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data)
 {
-       struct aq_ring_stats_rx_s stats_rx;
-       struct aq_ring_stats_tx_s stats_tx;
-       unsigned int count = 0U;
-
-       memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
-       memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
-
-       aq_vec_add_stats(self, tc, &stats_rx, &stats_tx);
+       unsigned int count;
 
-       /* This data should mimic aq_ethtool_queue_stat_names structure
-        */
-       data[count] += stats_rx.packets;
-       data[++count] += stats_tx.packets;
-       data[++count] += stats_tx.queue_restarts;
-       data[++count] += stats_rx.jumbo_packets;
-       data[++count] += stats_rx.lro_packets;
-       data[++count] += stats_rx.errors;
+       WARN_ONCE(!aq_vec_is_valid_tc(self, tc),
+                 "Invalid tc %u (#rx=%u, #tx=%u)\n",
+                 tc, self->rx_rings, self->tx_rings);
+       if (!aq_vec_is_valid_tc(self, tc))
+               return 0;
 
-       if (p_count)
-               *p_count = ++count;
+       count = aq_ring_fill_stats_data(&self->ring[tc][AQ_VEC_RX_ID], data);
+       count += aq_ring_fill_stats_data(&self->ring[tc][AQ_VEC_TX_ID], data + count);
 
-       return 0;
+       return count;
 }
index 541af85..567f3d4 100644 (file)
@@ -1,7 +1,8 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
  */
 
 /* File aq_vec.h: Definition of common structures for vector of Rx and Tx rings.
@@ -35,7 +36,7 @@ void aq_vec_ring_free(struct aq_vec_s *self);
 int aq_vec_start(struct aq_vec_s *self);
 void aq_vec_stop(struct aq_vec_s *self);
 cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self);
-int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data,
-                       unsigned int *p_count);
+bool aq_vec_is_valid_tc(struct aq_vec_s *self, const unsigned int tc);
+unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u64 *data);
 
 #endif /* AQ_VEC_H */
index a312864..c38a4b8 100644 (file)
 #include "hw_atl_llh.h"
 #include "hw_atl_a0_internal.h"
 
-#define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \
-       .is_64_dma = true,                \
-       .msix_irqs = 4U,                  \
-       .irq_mask = ~0U,                  \
-       .vecs = HW_ATL_A0_RSS_MAX,        \
-       .tcs_max = HW_ATL_A0_TC_MAX,      \
-       .rxd_alignment = 1U,              \
-       .rxd_size = HW_ATL_A0_RXD_SIZE,   \
-       .rxds_max = HW_ATL_A0_MAX_RXD,    \
-       .rxds_min = HW_ATL_A0_MIN_RXD,    \
-       .txd_alignment = 1U,              \
-       .txd_size = HW_ATL_A0_TXD_SIZE,   \
-       .txds_max = HW_ATL_A0_MAX_TXD,    \
-       .txds_min = HW_ATL_A0_MIN_RXD,    \
-       .txhwb_alignment = 4096U,         \
-       .tx_rings = HW_ATL_A0_TX_RINGS,   \
-       .rx_rings = HW_ATL_A0_RX_RINGS,   \
-       .hw_features = NETIF_F_HW_CSUM |  \
-                       NETIF_F_RXHASH |  \
-                       NETIF_F_RXCSUM |  \
-                       NETIF_F_SG |      \
-                       NETIF_F_TSO,      \
-       .hw_priv_flags = IFF_UNICAST_FLT, \
-       .flow_control = true,             \
-       .mtu = HW_ATL_A0_MTU_JUMBO,       \
-       .mac_regs_count = 88,             \
+#define DEFAULT_A0_BOARD_BASIC_CAPABILITIES         \
+       .is_64_dma = true,                           \
+       .op64bit = false,                            \
+       .msix_irqs = 4U,                             \
+       .irq_mask = ~0U,                             \
+       .vecs = HW_ATL_A0_RSS_MAX,                   \
+       .tcs_max = HW_ATL_A0_TC_MAX,                 \
+       .rxd_alignment = 1U,                         \
+       .rxd_size = HW_ATL_A0_RXD_SIZE,              \
+       .rxds_max = HW_ATL_A0_MAX_RXD,               \
+       .rxds_min = HW_ATL_A0_MIN_RXD,               \
+       .txd_alignment = 1U,                         \
+       .txd_size = HW_ATL_A0_TXD_SIZE,              \
+       .txds_max = HW_ATL_A0_MAX_TXD,               \
+       .txds_min = HW_ATL_A0_MIN_RXD,               \
+       .txhwb_alignment = 4096U,                    \
+       .tx_rings = HW_ATL_A0_TX_RINGS,              \
+       .rx_rings = HW_ATL_A0_RX_RINGS,              \
+       .hw_features = NETIF_F_HW_CSUM |             \
+                       NETIF_F_RXHASH |             \
+                       NETIF_F_RXCSUM |             \
+                       NETIF_F_SG |                 \
+                       NETIF_F_TSO |                \
+                       NETIF_F_NTUPLE |             \
+                       NETIF_F_HW_VLAN_CTAG_FILTER, \
+       .hw_priv_flags = IFF_UNICAST_FLT,            \
+       .flow_control = true,                        \
+       .mtu = HW_ATL_A0_MTU_JUMBO,                  \
+       .mac_regs_count = 88,                        \
        .hw_alive_check_addr = 0x10U
 
 const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = {
@@ -329,6 +332,7 @@ static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
                err = -EINVAL;
                goto err_exit;
        }
+
        h = (mac_addr[0] << 8) | (mac_addr[1]);
        l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
            (mac_addr[4] << 8) | mac_addr[5];
@@ -355,7 +359,6 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
        struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
        int err = 0;
 
-
        hw_atl_a0_hw_init_tx_path(self);
        hw_atl_a0_hw_init_rx_path(self);
 
@@ -751,6 +754,7 @@ static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
 static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
                                          unsigned int packet_filter)
 {
+       struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
        unsigned int i = 0U;
 
        hw_atl_rpfl2promiscuous_mode_en_set(self,
@@ -759,14 +763,13 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
                                         IS_FILTER_ENABLED(IFF_MULTICAST), 0);
        hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
 
-       self->aq_nic_cfg->is_mc_list_enabled =
-                       IS_FILTER_ENABLED(IFF_MULTICAST);
+       cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
 
        for (i = HW_ATL_A0_MAC_MIN; i < HW_ATL_A0_MAC_MAX; ++i)
                hw_atl_rpfl2_uc_flr_en_set(self,
-                                          (self->aq_nic_cfg->is_mc_list_enabled &&
-                                          (i <= self->aq_nic_cfg->mc_list_count)) ?
-                                          1U : 0U, i);
+                                          (cfg->is_mc_list_enabled &&
+                                           (i <= cfg->mc_list_count)) ? 1U : 0U,
+                                          i);
 
        return aq_hw_err_from_flags(self);
 }
@@ -779,19 +782,18 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
                                           [ETH_ALEN],
                                           u32 count)
 {
+       struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
        int err = 0;
 
        if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) {
                err = EBADRQC;
                goto err_exit;
        }
-       for (self->aq_nic_cfg->mc_list_count = 0U;
-                       self->aq_nic_cfg->mc_list_count < count;
-                       ++self->aq_nic_cfg->mc_list_count) {
-               u32 i = self->aq_nic_cfg->mc_list_count;
+       for (cfg->mc_list_count = 0U; cfg->mc_list_count < count; ++cfg->mc_list_count) {
+               u32 i = cfg->mc_list_count;
                u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
                u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
-                                       (ar_mac[i][4] << 8) | ar_mac[i][5];
+                       (ar_mac[i][4] << 8) | ar_mac[i][5];
 
                hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i);
 
@@ -804,7 +806,7 @@ static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
                                                        HW_ATL_A0_MAC_MIN + i);
 
                hw_atl_rpfl2_uc_flr_en_set(self,
-                                          (self->aq_nic_cfg->is_mc_list_enabled),
+                                          (cfg->is_mc_list_enabled),
                                           HW_ATL_A0_MAC_MIN + i);
        }
 
@@ -885,6 +887,63 @@ static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
        return aq_hw_err_from_flags(self);
 }
 
+static int hw_atl_a0_hw_fl3l4_clear(struct aq_hw_s *self,
+                                   struct aq_rx_filter_l3l4 *data)
+{
+       u8 location = data->location;
+
+       if (!data->is_ipv6) {
+               hw_atl_rpfl3l4_cmd_clear(self, location);
+               hw_atl_rpf_l4_spd_set(self, 0U, location);
+               hw_atl_rpf_l4_dpd_set(self, 0U, location);
+               hw_atl_rpfl3l4_ipv4_src_addr_clear(self, location);
+               hw_atl_rpfl3l4_ipv4_dest_addr_clear(self, location);
+       } else {
+               int i;
+
+               for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
+                       hw_atl_rpfl3l4_cmd_clear(self, location + i);
+                       hw_atl_rpf_l4_spd_set(self, 0U, location + i);
+                       hw_atl_rpf_l4_dpd_set(self, 0U, location + i);
+               }
+               hw_atl_rpfl3l4_ipv6_src_addr_clear(self, location);
+               hw_atl_rpfl3l4_ipv6_dest_addr_clear(self, location);
+       }
+
+       return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_fl3l4_set(struct aq_hw_s *self,
+                                 struct aq_rx_filter_l3l4 *data)
+{
+       u8 location = data->location;
+
+       hw_atl_a0_hw_fl3l4_clear(self, data);
+
+       if (data->cmd) {
+               if (!data->is_ipv6) {
+                       hw_atl_rpfl3l4_ipv4_dest_addr_set(self,
+                                                         location,
+                                                         data->ip_dst[0]);
+                       hw_atl_rpfl3l4_ipv4_src_addr_set(self,
+                                                        location,
+                                                        data->ip_src[0]);
+               } else {
+                       hw_atl_rpfl3l4_ipv6_dest_addr_set(self,
+                                                         location,
+                                                         data->ip_dst);
+                       hw_atl_rpfl3l4_ipv6_src_addr_set(self,
+                                                        location,
+                                                        data->ip_src);
+               }
+       }
+       hw_atl_rpf_l4_dpd_set(self, data->p_dst, location);
+       hw_atl_rpf_l4_spd_set(self, data->p_src, location);
+       hw_atl_rpfl3l4_cmd_set(self, location, data->cmd);
+
+       return aq_hw_err_from_flags(self);
+}
+
 const struct aq_hw_ops hw_atl_ops_a0 = {
        .hw_soft_reset        = hw_atl_utils_soft_reset,
        .hw_prepare           = hw_atl_utils_initfw,
@@ -911,6 +970,7 @@ const struct aq_hw_ops hw_atl_ops_a0 = {
        .hw_ring_rx_init             = hw_atl_a0_hw_ring_rx_init,
        .hw_ring_tx_init             = hw_atl_a0_hw_ring_tx_init,
        .hw_packet_filter_set        = hw_atl_a0_hw_packet_filter_set,
+       .hw_filter_l3l4_set          = hw_atl_a0_hw_fl3l4_set,
        .hw_multicast_list_set       = hw_atl_a0_hw_multicast_list_set,
        .hw_interrupt_moderation_set = hw_atl_a0_hw_interrupt_moderation_set,
        .hw_rss_set                  = hw_atl_a0_hw_rss_set,
index 14d79f7..34626ee 100644 (file)
@@ -20,6 +20,7 @@
 
 #define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \
        .is_64_dma = true,                \
+       .op64bit = false,                 \
        .msix_irqs = 8U,                  \
        .irq_mask = ~0U,                  \
        .vecs = HW_ATL_B0_RSS_MAX,        \
@@ -40,6 +41,7 @@
                        NETIF_F_RXHASH |  \
                        NETIF_F_SG |      \
                        NETIF_F_TSO |     \
+                       NETIF_F_TSO6 |    \
                        NETIF_F_LRO |     \
                        NETIF_F_NTUPLE |  \
                        NETIF_F_HW_VLAN_CTAG_FILTER | \
@@ -54,8 +56,6 @@
        .mac_regs_count = 88,             \
        .hw_alive_check_addr = 0x10U
 
-#define FRAC_PER_NS 0x100000000LL
-
 const struct aq_hw_caps_s hw_atl_b0_caps_aqc100 = {
        DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
        .media_type = AQ_HW_MEDIA_TYPE_FIBRE,
@@ -108,7 +108,7 @@ static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
        return err;
 }
 
-static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
+int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
 {
        hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc);
 
@@ -1233,7 +1233,7 @@ static void hw_atl_b0_adj_params_get(u64 freq, s64 adj, u32 *ns, u32 *fns)
        if (base_ns != nsi * NSEC_PER_SEC) {
                s64 divisor = div64_s64((s64)NSEC_PER_SEC * NSEC_PER_SEC,
                                        base_ns - nsi * NSEC_PER_SEC);
-               nsi_frac = div64_s64(FRAC_PER_NS * NSEC_PER_SEC, divisor);
+               nsi_frac = div64_s64(AQ_FRAC_PER_NS * NSEC_PER_SEC, divisor);
        }
 
        *ns = (u32)nsi;
@@ -1246,23 +1246,23 @@ hw_atl_b0_mac_adj_param_calc(struct hw_fw_request_ptp_adj_freq *ptp_adj_freq,
 {
        s64 adj_fns_val;
        s64 fns_in_sec_phy = phyfreq * (ptp_adj_freq->fns_phy +
-                                       FRAC_PER_NS * ptp_adj_freq->ns_phy);
+                                       AQ_FRAC_PER_NS * ptp_adj_freq->ns_phy);
        s64 fns_in_sec_mac = macfreq * (ptp_adj_freq->fns_mac +
-                                       FRAC_PER_NS * ptp_adj_freq->ns_mac);
-       s64 fault_in_sec_phy = FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_phy;
-       s64 fault_in_sec_mac = FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_mac;
+                                       AQ_FRAC_PER_NS * ptp_adj_freq->ns_mac);
+       s64 fault_in_sec_phy = AQ_FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_phy;
+       s64 fault_in_sec_mac = AQ_FRAC_PER_NS * NSEC_PER_SEC - fns_in_sec_mac;
        /* MAC MCP counter freq is macfreq / 4 */
        s64 diff_in_mcp_overflow = (fault_in_sec_mac - fault_in_sec_phy) *
-                                  4 * FRAC_PER_NS;
+                                  4 * AQ_FRAC_PER_NS;
 
        diff_in_mcp_overflow = div64_s64(diff_in_mcp_overflow,
                                         AQ_HW_MAC_COUNTER_HZ);
-       adj_fns_val = (ptp_adj_freq->fns_mac + FRAC_PER_NS *
+       adj_fns_val = (ptp_adj_freq->fns_mac + AQ_FRAC_PER_NS *
                       ptp_adj_freq->ns_mac) + diff_in_mcp_overflow;
 
-       ptp_adj_freq->mac_ns_adj = div64_s64(adj_fns_val, FRAC_PER_NS);
+       ptp_adj_freq->mac_ns_adj = div64_s64(adj_fns_val, AQ_FRAC_PER_NS);
        ptp_adj_freq->mac_fns_adj = adj_fns_val - ptp_adj_freq->mac_ns_adj *
-                                   FRAC_PER_NS;
+                                   AQ_FRAC_PER_NS;
 }
 
 static int hw_atl_b0_adj_sys_clock(struct aq_hw_s *self, s64 delta)
@@ -1556,7 +1556,7 @@ static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
        return aq_hw_err_from_flags(self);
 }
 
-static int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable)
+int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable)
 {
        switch (mode) {
        case AQ_HW_LOOPBACK_DMA_SYS:
@@ -1581,6 +1581,48 @@ static int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable)
        return 0;
 }
 
+static u32 hw_atl_b0_ts_ready_and_latch_high_get(struct aq_hw_s *self)
+{
+       if (hw_atl_ts_ready_get(self) && hw_atl_ts_ready_latch_high_get(self))
+               return 1;
+
+       return 0;
+}
+
+static int hw_atl_b0_get_mac_temp(struct aq_hw_s *self, u32 *temp)
+{
+       bool ts_disabled;
+       int err;
+       u32 val;
+       u32 ts;
+
+       ts_disabled = (hw_atl_ts_power_down_get(self) == 1U);
+
+       if (ts_disabled) {
+               // Set AFE Temperature Sensor to on (off by default)
+               hw_atl_ts_power_down_set(self, 0U);
+
+               // Reset internal capacitors, biasing, and counters
+               hw_atl_ts_reset_set(self, 1);
+               hw_atl_ts_reset_set(self, 0);
+       }
+
+       err = readx_poll_timeout_atomic(hw_atl_b0_ts_ready_and_latch_high_get,
+                                       self, val, val == 1, 10000U, 500000U);
+       if (err)
+               return err;
+
+       ts = hw_atl_ts_data_get(self);
+       *temp = ts * ts * 16 / 100000 + 60 * ts - 83410;
+
+       if (ts_disabled) {
+               // Set AFE Temperature Sensor back to off
+               hw_atl_ts_power_down_set(self, 1U);
+       }
+
+       return 0;
+}
+
 const struct aq_hw_ops hw_atl_ops_b0 = {
        .hw_soft_reset        = hw_atl_utils_soft_reset,
        .hw_prepare           = hw_atl_utils_initfw,
@@ -1637,4 +1679,6 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
        .hw_set_offload          = hw_atl_b0_hw_offload_set,
        .hw_set_loopback         = hw_atl_b0_set_loopback,
        .hw_set_fc               = hw_atl_b0_set_fc,
+
+       .hw_get_mac_temp         = hw_atl_b0_get_mac_temp,
 };
index 30f468f..66d1589 100644 (file)
@@ -62,6 +62,9 @@ void hw_atl_b0_hw_init_rx_rss_ctrl1(struct aq_hw_s *self);
 
 int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr);
 
+int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc);
+int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable);
+
 int hw_atl_b0_hw_start(struct aq_hw_s *self);
 
 int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask);
index 3c8e804..7b67bdd 100644 (file)
@@ -1,7 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
  */
 
 /* File hw_atl_llh.c: Definitions of bitfield and register access functions for
 #include "hw_atl_llh_internal.h"
 #include "../aq_hw_utils.h"
 
+void hw_atl_ts_reset_set(struct aq_hw_s *aq_hw, u32 val)
+{
+       aq_hw_write_reg_bit(aq_hw, HW_ATL_TS_RESET_ADR,
+                           HW_ATL_TS_RESET_MSK,
+                           HW_ATL_TS_RESET_SHIFT,
+                           val);
+}
+
+void hw_atl_ts_power_down_set(struct aq_hw_s *aq_hw, u32 val)
+{
+       aq_hw_write_reg_bit(aq_hw, HW_ATL_TS_POWER_DOWN_ADR,
+                           HW_ATL_TS_POWER_DOWN_MSK,
+                           HW_ATL_TS_POWER_DOWN_SHIFT,
+                           val);
+}
+
+u32 hw_atl_ts_power_down_get(struct aq_hw_s *aq_hw)
+{
+       return aq_hw_read_reg_bit(aq_hw, HW_ATL_TS_POWER_DOWN_ADR,
+                                 HW_ATL_TS_POWER_DOWN_MSK,
+                                 HW_ATL_TS_POWER_DOWN_SHIFT);
+}
+
+u32 hw_atl_ts_ready_get(struct aq_hw_s *aq_hw)
+{
+       return aq_hw_read_reg_bit(aq_hw, HW_ATL_TS_READY_ADR,
+                                 HW_ATL_TS_READY_MSK,
+                                 HW_ATL_TS_READY_SHIFT);
+}
+
+u32 hw_atl_ts_ready_latch_high_get(struct aq_hw_s *aq_hw)
+{
+       return aq_hw_read_reg_bit(aq_hw, HW_ATL_TS_READY_LATCH_HIGH_ADR,
+                                 HW_ATL_TS_READY_LATCH_HIGH_MSK,
+                                 HW_ATL_TS_READY_LATCH_HIGH_SHIFT);
+}
+
+u32 hw_atl_ts_data_get(struct aq_hw_s *aq_hw)
+{
+       return aq_hw_read_reg_bit(aq_hw, HW_ATL_TS_DATA_OUT_ADR,
+                                 HW_ATL_TS_DATA_OUT_MSK,
+                                 HW_ATL_TS_DATA_OUT_SHIFT);
+}
+
 /* global */
 void hw_atl_reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
                                u32 semaphore)
@@ -1700,7 +1745,7 @@ void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
        for (i = 0; i < 4; ++i)
                aq_hw_write_reg(aq_hw,
                                HW_ATL_RPF_L3_SRCA_ADR(location + i),
-                               ipv6_src[i]);
+                               ipv6_src[3 - i]);
 }
 
 void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
@@ -1711,7 +1756,7 @@ void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
        for (i = 0; i < 4; ++i)
                aq_hw_write_reg(aq_hw,
                                HW_ATL_RPF_L3_DSTA_ADR(location + i),
-                               ipv6_dest[i]);
+                               ipv6_dest[3 - i]);
 }
 
 u32 hw_atl_sem_ram_get(struct aq_hw_s *self)
@@ -1724,6 +1769,16 @@ u32 hw_atl_sem_mdio_get(struct aq_hw_s *self)
        return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_MDIO);
 }
 
+u32 hw_atl_sem_reset1_get(struct aq_hw_s *self)
+{
+       return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RESET1);
+}
+
+u32 hw_atl_sem_reset2_get(struct aq_hw_s *self)
+{
+       return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RESET2);
+}
+
 u32 hw_atl_scrpad_get(struct aq_hw_s *aq_hw, u32 scratch_scp)
 {
        return aq_hw_read_reg(aq_hw,
index 61a6f70..58f5ee0 100644 (file)
@@ -1,7 +1,8 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
  */
 
 /* File hw_atl_llh.h: Declarations of bitfield and register access functions for
 
 struct aq_hw_s;
 
+/* set temperature sense reset */
+void hw_atl_ts_reset_set(struct aq_hw_s *aq_hw, u32 val);
+
+/* set temperature sense power down */
+void hw_atl_ts_power_down_set(struct aq_hw_s *aq_hw, u32 val);
+
+/* get temperature sense power down */
+u32 hw_atl_ts_power_down_get(struct aq_hw_s *aq_hw);
+
+/* get temperature sense ready */
+u32 hw_atl_ts_ready_get(struct aq_hw_s *aq_hw);
+
+/* get temperature sense ready latch high */
+u32 hw_atl_ts_ready_latch_high_get(struct aq_hw_s *aq_hw);
+
+/* get temperature sense data */
+u32 hw_atl_ts_data_get(struct aq_hw_s *aq_hw);
+
 /* global */
 
 /* set global microprocessor semaphore */
@@ -838,6 +857,9 @@ u32 hw_atl_sem_ram_get(struct aq_hw_s *self);
 /* get global microprocessor mdio semaphore */
 u32 hw_atl_sem_mdio_get(struct aq_hw_s *self);
 
+u32 hw_atl_sem_reset1_get(struct aq_hw_s *self);
+u32 hw_atl_sem_reset2_get(struct aq_hw_s *self);
+
 /* get global microprocessor scratch pad register */
 u32 hw_atl_scrpad_get(struct aq_hw_s *aq_hw, u32 scratch_scp);
 
index 0622079..4a64670 100644 (file)
@@ -1,7 +1,8 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * aQuantia Corporation Network Driver
- * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2014-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
  */
 
 /* File hw_atl_llh_internal.h: Preprocessor definitions
 #ifndef HW_ATL_LLH_INTERNAL_H
 #define HW_ATL_LLH_INTERNAL_H
 
+/* COM Temperature Sense Reset Bitfield Definitions */
+#define HW_ATL_TS_RESET_ADR 0x00003100
+#define HW_ATL_TS_RESET_MSK 0x00000004
+#define HW_ATL_TS_RESET_SHIFT 2
+#define HW_ATL_TS_RESET_WIDTH 1
+
+/* COM Temperature Sense Power Down Bitfield Definitions */
+#define HW_ATL_TS_POWER_DOWN_ADR 0x00003100
+#define HW_ATL_TS_POWER_DOWN_MSK 0x00000001
+#define HW_ATL_TS_POWER_DOWN_SHIFT 0
+#define HW_ATL_TS_POWER_DOWN_WIDTH 1
+
+/* COM Temperature Sense Ready Bitfield Definitions */
+#define HW_ATL_TS_READY_ADR 0x00003120
+#define HW_ATL_TS_READY_MSK 0x80000000
+#define HW_ATL_TS_READY_SHIFT 31
+#define HW_ATL_TS_READY_WIDTH 1
+
+/*  COM Temperature Sense Ready Latch High Bitfield Definitions */
+#define HW_ATL_TS_READY_LATCH_HIGH_ADR 0x00003120
+#define HW_ATL_TS_READY_LATCH_HIGH_MSK 0x40000000
+#define HW_ATL_TS_READY_LATCH_HIGH_SHIFT 30
+#define HW_ATL_TS_READY_LATCH_HIGH_WIDTH 1
+
+/* COM Temperature Sense Data Out [B:0] Bitfield Definitions */
+#define HW_ATL_TS_DATA_OUT_ADR 0x00003120
+#define HW_ATL_TS_DATA_OUT_MSK 0x00000FFF
+#define HW_ATL_TS_DATA_OUT_SHIFT 0
+#define HW_ATL_TS_DATA_OUT_WIDTH 12
+
 /* global microprocessor semaphore  definitions
  * base address: 0x000003a0
  * parameter: semaphore {s} | stride size 0x4 | range [0, 15]
  */
 
  /* Register address for bitfield pif_rpf_l3_da0_i[31:0] */
-#define HW_ATL_RPF_L3_DSTA_ADR(filter) (0x000053B0 + (filter) * 0x4)
+#define HW_ATL_RPF_L3_DSTA_ADR(filter) (0x000053D0 + (filter) * 0x4)
 /* Bitmask for bitfield l3_da0[1F:0] */
 #define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu
 /* Inverted bitmask for bitfield l3_da0[1F:0] */
 /* Default value of bitfield MDIO Address [F:0] */
 #define HW_ATL_MDIO_ADDRESS_DEFAULT 0x0
 
+#define HW_ATL_MIF_RESET_TIMEOUT_ADR 0x00000348
+
 #define HW_ATL_FW_SM_MDIO       0x0U
 #define HW_ATL_FW_SM_RAM        0x2U
+#define HW_ATL_FW_SM_RESET1     0x3U
+#define HW_ATL_FW_SM_RESET2     0x4U
 
 #endif /* HW_ATL_LLH_INTERNAL_H */
index 73c0f41..404cbf6 100644 (file)
@@ -46,6 +46,7 @@
 #define HW_ATL_FW_VER_1X 0x01050006U
 #define HW_ATL_FW_VER_2X 0x02000000U
 #define HW_ATL_FW_VER_3X 0x03000000U
+#define HW_ATL_FW_VER_4X 0x04000000U
 
 #define FORCE_FLASHLESS 0
 
@@ -72,14 +73,13 @@ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
 
        self->fw_ver_actual = hw_atl_utils_get_fw_version(self);
 
-       if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X,
-                                  self->fw_ver_actual) == 0) {
+       if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X, self->fw_ver_actual)) {
                *fw_ops = &aq_fw_1x_ops;
-       } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_2X,
-                                         self->fw_ver_actual) == 0) {
+       } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_2X, self->fw_ver_actual)) {
                *fw_ops = &aq_fw_2x_ops;
-       } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_3X,
-                                         self->fw_ver_actual) == 0) {
+       } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_3X, self->fw_ver_actual)) {
+               *fw_ops = &aq_fw_2x_ops;
+       } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_4X, self->fw_ver_actual)) {
                *fw_ops = &aq_fw_2x_ops;
        } else {
                aq_pr_err("Bad FW version detected: %x\n",
@@ -217,7 +217,7 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
 
        if (rbl_status == 0xF1A7) {
                aq_pr_err("No FW detected. Dynamic FW load not implemented\n");
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
        }
 
        for (k = 0; k < 1000; k++) {
@@ -239,6 +239,7 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
 
 int hw_atl_utils_soft_reset(struct aq_hw_s *self)
 {
+       int ver = hw_atl_utils_get_fw_version(self);
        u32 boot_exit_code = 0;
        u32 val;
        int k;
@@ -259,14 +260,12 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self)
 
        self->rbl_enabled = (boot_exit_code != 0);
 
-       /* FW 1.x may bootup in an invalid POWER state (WOL feature).
-        * We should work around this by forcing its state back to DEINIT
-        */
-       if (!hw_atl_utils_ver_match(HW_ATL_FW_VER_1X,
-                                   aq_hw_read_reg(self,
-                                                  HW_ATL_MPI_FW_VERSION))) {
+       if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X, ver)) {
                int err = 0;
 
+               /* FW 1.x may bootup in an invalid POWER state (WOL feature).
+                * We should work around this by forcing its state back to DEINIT
+                */
                hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
                err = readx_poll_timeout_atomic(hw_atl_utils_mpi_get_state,
                                                self, val,
@@ -275,6 +274,27 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self)
                                                10, 10000U);
                if (err)
                        return err;
+       } else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_4X, ver)) {
+               u64 sem_timeout = aq_hw_read_reg(self, HW_ATL_MIF_RESET_TIMEOUT_ADR);
+
+               /* Acquire 2 semaphores before issuing reset for FW 4.x */
+               if (sem_timeout > 3000)
+                       sem_timeout = 3000;
+               sem_timeout = sem_timeout * 1000;
+
+               if (sem_timeout != 0) {
+                       int err;
+
+                       err = readx_poll_timeout_atomic(hw_atl_sem_reset1_get, self, val,
+                                                       val == 1U, 1U, sem_timeout);
+                       if (err)
+                               aq_pr_err("reset sema1 timeout");
+
+                       err = readx_poll_timeout_atomic(hw_atl_sem_reset2_get, self, val,
+                                                       val == 1U, 1U, sem_timeout);
+                       if (err)
+                               aq_pr_err("reset sema2 timeout");
+               }
        }
 
        if (self->rbl_enabled)
@@ -434,20 +454,20 @@ int hw_atl_write_fwsettings_dwords(struct aq_hw_s *self, u32 offset, u32 *p,
                                             p, cnt, MCP_AREA_SETTINGS);
 }
 
-int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
+bool hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
 {
        const u32 dw_major_mask = 0xff000000U;
        const u32 dw_minor_mask = 0x00ffffffU;
-       int err = 0;
+       bool ver_match;
 
-       err = (dw_major_mask & (ver_expected ^ ver_actual)) ? -EOPNOTSUPP : 0;
-       if (err < 0)
+       ver_match = (dw_major_mask & (ver_expected ^ ver_actual)) ? false : true;
+       if (!ver_match)
                goto err_exit;
-       err = ((dw_minor_mask & ver_expected) > (dw_minor_mask & ver_actual)) ?
-               -EOPNOTSUPP : 0;
+       ver_match = ((dw_minor_mask & ver_expected) > (dw_minor_mask & ver_actual)) ?
+               false : true;
 
 err_exit:
-       return err;
+       return ver_match;
 }
 
 static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
@@ -704,6 +724,7 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
                        return -EBUSY;
                }
        }
+       link_status->full_duplex = true;
 
        return 0;
 }
@@ -1045,6 +1066,7 @@ const struct aq_fw_ops aq_fw_1x_ops = {
        .set_state = hw_atl_utils_mpi_set_state,
        .update_link_status = hw_atl_utils_mpi_get_link_status,
        .update_stats = hw_atl_utils_update_stats,
+       .get_mac_temp = NULL,
        .get_phy_temp = NULL,
        .set_power = aq_fw1x_set_power,
        .set_eee_rate = NULL,
index 0b4b54f..f5901f8 100644 (file)
@@ -635,7 +635,7 @@ int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size);
 int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
                             struct hw_atl_utils_fw_rpc **rpc);
 
-int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
+bool hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
 
 extern const struct aq_fw_ops aq_fw_1x_ops;
 extern const struct aq_fw_ops aq_fw_2x_ops;
index eeedd8c..93c06df 100644 (file)
@@ -274,6 +274,7 @@ static int aq_fw2x_update_link_status(struct aq_hw_s *self)
        } else {
                link_status->mbps = 0;
        }
+       link_status->full_duplex = true;
 
        return 0;
 }
@@ -352,7 +353,7 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp)
        /* Convert PHY temperature from 1/256 degree Celsius
         * to 1/1000 degree Celsius.
         */
-       *temp = (temp_res & 0xFFFF) * 1000 / 256;
+       *temp = (int16_t)(temp_res & 0xFFFF) * 1000 / 256;
 
        return 0;
 }
@@ -680,6 +681,7 @@ const struct aq_fw_ops aq_fw_2x_ops = {
        .set_state          = aq_fw2x_set_state,
        .update_link_status = aq_fw2x_update_link_status,
        .update_stats       = aq_fw2x_update_stats,
+       .get_mac_temp       = NULL,
        .get_phy_temp       = aq_fw2x_get_phy_temp,
        .set_power          = aq_fw2x_set_power,
        .set_eee_rate       = aq_fw2x_set_eee_rate,
index 8df9d4e..92f6404 100644 (file)
@@ -21,6 +21,7 @@ static int hw_atl2_act_rslvr_table_set(struct aq_hw_s *self, u8 location,
 
 #define DEFAULT_BOARD_BASIC_CAPABILITIES \
        .is_64_dma = true,                \
+       .op64bit = true,                  \
        .msix_irqs = 8U,                  \
        .irq_mask = ~0U,                  \
        .vecs = HW_ATL2_RSS_MAX,          \
@@ -64,8 +65,11 @@ const struct aq_hw_caps_s hw_atl2_caps_aqc113 = {
                          AQ_NIC_RATE_5G  |
                          AQ_NIC_RATE_2G5 |
                          AQ_NIC_RATE_1G  |
+                         AQ_NIC_RATE_1G_HALF   |
                          AQ_NIC_RATE_100M      |
-                         AQ_NIC_RATE_10M,
+                         AQ_NIC_RATE_100M_HALF |
+                         AQ_NIC_RATE_10M       |
+                         AQ_NIC_RATE_10M_HALF,
 };
 
 static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self)
@@ -178,6 +182,8 @@ static int hw_atl2_hw_qos_set(struct aq_hw_s *self)
 
                threshold = (rx_buff_size * (1024U / 32U) * 50U) / 100U;
                hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, threshold, tc);
+
+               hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc);
        }
 
        /* QoS 802.1p priority -> TC mapping */
@@ -838,4 +844,6 @@ const struct aq_hw_ops hw_atl2_ops = {
        .hw_get_hw_stats             = hw_atl2_utils_get_hw_stats,
        .hw_get_fw_version           = hw_atl2_utils_get_fw_version,
        .hw_set_offload              = hw_atl_b0_hw_offload_set,
+       .hw_set_loopback             = hw_atl_b0_set_loopback,
+       .hw_set_fc                   = hw_atl_b0_set_fc,
 };
index f376678..0fe6257 100644 (file)
@@ -36,8 +36,7 @@ int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
 
        self->fw_ver_actual = hw_atl2_utils_get_fw_version(self);
 
-       if (hw_atl_utils_ver_match(HW_ATL2_FW_VER_1X,
-                                  self->fw_ver_actual) == 0) {
+       if (hw_atl_utils_ver_match(HW_ATL2_FW_VER_1X, self->fw_ver_actual)) {
                *fw_ops = &aq_a2_fw_ops;
        } else {
                aq_pr_err("Bad FW version detected: %x, but continue\n",
index 0ffc33b..85628ac 100644 (file)
@@ -7,6 +7,7 @@
 
 #include "aq_hw.h"
 #include "aq_hw_utils.h"
+#include "aq_nic.h"
 #include "hw_atl/hw_atl_llh.h"
 #include "hw_atl2_utils.h"
 #include "hw_atl2_llh.h"
 #define AQ_A2_FW_READ_TRY_MAX 1000
 
 #define hw_atl2_shared_buffer_write(HW, ITEM, VARIABLE) \
+{\
+       BUILD_BUG_ON_MSG((offsetof(struct fw_interface_in, ITEM) % \
+                        sizeof(u32)) != 0,\
+                        "Unaligned write " # ITEM);\
+       BUILD_BUG_ON_MSG((sizeof(VARIABLE) %  sizeof(u32)) != 0,\
+                        "Unaligned write length " # ITEM);\
        hw_atl2_mif_shared_buf_write(HW,\
                (offsetof(struct fw_interface_in, ITEM) / sizeof(u32)),\
-               (u32 *)&(VARIABLE), sizeof(VARIABLE) / sizeof(u32))
+               (u32 *)&(VARIABLE), sizeof(VARIABLE) / sizeof(u32));\
+}
 
 #define hw_atl2_shared_buffer_get(HW, ITEM, VARIABLE) \
+{\
+       BUILD_BUG_ON_MSG((offsetof(struct fw_interface_in, ITEM) % \
+                        sizeof(u32)) != 0,\
+                        "Unaligned get " # ITEM);\
+       BUILD_BUG_ON_MSG((sizeof(VARIABLE) %  sizeof(u32)) != 0,\
+                        "Unaligned get length " # ITEM);\
        hw_atl2_mif_shared_buf_get(HW, \
                (offsetof(struct fw_interface_in, ITEM) / sizeof(u32)),\
                (u32 *)&(VARIABLE), \
-               sizeof(VARIABLE) / sizeof(u32))
+               sizeof(VARIABLE) / sizeof(u32));\
+}
 
 /* This should never be used on non atomic fields,
  * treat any > u32 read as non atomic.
@@ -32,7 +47,9 @@
 {\
        BUILD_BUG_ON_MSG((offsetof(struct fw_interface_out, ITEM) % \
                         sizeof(u32)) != 0,\
-                        "Non aligned read " # ITEM);\
+                        "Unaligned read " # ITEM);\
+       BUILD_BUG_ON_MSG((sizeof(VARIABLE) %  sizeof(u32)) != 0,\
+                        "Unaligned read length " # ITEM);\
        BUILD_BUG_ON_MSG(sizeof(VARIABLE) > sizeof(u32),\
                         "Non atomic read " # ITEM);\
        hw_atl2_mif_shared_buf_read(HW, \
 }
 
 #define hw_atl2_shared_buffer_read_safe(HW, ITEM, DATA) \
+({\
+       BUILD_BUG_ON_MSG((offsetof(struct fw_interface_out, ITEM) % \
+                        sizeof(u32)) != 0,\
+                        "Unaligned read_safe " # ITEM);\
+       BUILD_BUG_ON_MSG((sizeof(((struct fw_interface_out *)0)->ITEM) % \
+                        sizeof(u32)) != 0,\
+                        "Unaligned read_safe length " # ITEM);\
        hw_atl2_shared_buffer_read_block((HW), \
                (offsetof(struct fw_interface_out, ITEM) / sizeof(u32)),\
                sizeof(((struct fw_interface_out *)0)->ITEM) / sizeof(u32),\
-               (DATA))
+               (DATA));\
+})
 
 static int hw_atl2_shared_buffer_read_block(struct aq_hw_s *self,
                                            u32 offset, u32 dwords, void *data)
@@ -135,6 +160,67 @@ static void a2_link_speed_mask2fw(u32 speed,
        link_options->rate_1G = !!(speed & AQ_NIC_RATE_1G);
        link_options->rate_100M = !!(speed & AQ_NIC_RATE_100M);
        link_options->rate_10M = !!(speed & AQ_NIC_RATE_10M);
+
+       link_options->rate_1G_hd = !!(speed & AQ_NIC_RATE_1G_HALF);
+       link_options->rate_100M_hd = !!(speed & AQ_NIC_RATE_100M_HALF);
+       link_options->rate_10M_hd = !!(speed & AQ_NIC_RATE_10M_HALF);
+}
+
+static u32 a2_fw_dev_to_eee_mask(struct device_link_caps_s *device_link_caps)
+{
+       u32 rate = 0;
+
+       if (device_link_caps->eee_10G)
+               rate |= AQ_NIC_RATE_EEE_10G;
+       if (device_link_caps->eee_5G)
+               rate |= AQ_NIC_RATE_EEE_5G;
+       if (device_link_caps->eee_2P5G)
+               rate |= AQ_NIC_RATE_EEE_2G5;
+       if (device_link_caps->eee_1G)
+               rate |= AQ_NIC_RATE_EEE_1G;
+       if (device_link_caps->eee_100M)
+               rate |= AQ_NIC_RATE_EEE_100M;
+
+       return rate;
+}
+
+static u32 a2_fw_lkp_to_mask(struct lkp_link_caps_s *lkp_link_caps)
+{
+       u32 rate = 0;
+
+       if (lkp_link_caps->rate_10G)
+               rate |= AQ_NIC_RATE_10G;
+       if (lkp_link_caps->rate_5G)
+               rate |= AQ_NIC_RATE_5G;
+       if (lkp_link_caps->rate_N5G)
+               rate |= AQ_NIC_RATE_5GSR;
+       if (lkp_link_caps->rate_2P5G)
+               rate |= AQ_NIC_RATE_2G5;
+       if (lkp_link_caps->rate_1G)
+               rate |= AQ_NIC_RATE_1G;
+       if (lkp_link_caps->rate_1G_hd)
+               rate |= AQ_NIC_RATE_1G_HALF;
+       if (lkp_link_caps->rate_100M)
+               rate |= AQ_NIC_RATE_100M;
+       if (lkp_link_caps->rate_100M_hd)
+               rate |= AQ_NIC_RATE_100M_HALF;
+       if (lkp_link_caps->rate_10M)
+               rate |= AQ_NIC_RATE_10M;
+       if (lkp_link_caps->rate_10M_hd)
+               rate |= AQ_NIC_RATE_10M_HALF;
+
+       if (lkp_link_caps->eee_10G)
+               rate |= AQ_NIC_RATE_EEE_10G;
+       if (lkp_link_caps->eee_5G)
+               rate |= AQ_NIC_RATE_EEE_5G;
+       if (lkp_link_caps->eee_2P5G)
+               rate |= AQ_NIC_RATE_EEE_2G5;
+       if (lkp_link_caps->eee_1G)
+               rate |= AQ_NIC_RATE_EEE_1G;
+       if (lkp_link_caps->eee_100M)
+               rate |= AQ_NIC_RATE_EEE_100M;
+
+       return rate;
 }
 
 static int aq_a2_fw_set_link_speed(struct aq_hw_s *self, u32 speed)
@@ -149,6 +235,26 @@ static int aq_a2_fw_set_link_speed(struct aq_hw_s *self, u32 speed)
        return hw_atl2_shared_buffer_finish_ack(self);
 }
 
+static void aq_a2_fw_set_mpi_flow_control(struct aq_hw_s *self,
+                                         struct link_options_s *link_options)
+{
+       u32 flow_control = self->aq_nic_cfg->fc.req;
+
+       link_options->pause_rx = !!(flow_control & AQ_NIC_FC_RX);
+       link_options->pause_tx = !!(flow_control & AQ_NIC_FC_TX);
+}
+
+static void aq_a2_fw_upd_eee_rate_bits(struct aq_hw_s *self,
+                                      struct link_options_s *link_options,
+                                      u32 eee_speeds)
+{
+       link_options->eee_10G =  !!(eee_speeds & AQ_NIC_RATE_EEE_10G);
+       link_options->eee_5G = !!(eee_speeds & AQ_NIC_RATE_EEE_5G);
+       link_options->eee_2P5G = !!(eee_speeds & AQ_NIC_RATE_EEE_2G5);
+       link_options->eee_1G = !!(eee_speeds & AQ_NIC_RATE_EEE_1G);
+       link_options->eee_100M = !!(eee_speeds & AQ_NIC_RATE_EEE_100M);
+}
+
 static int aq_a2_fw_set_state(struct aq_hw_s *self,
                              enum hal_atl_utils_fw_state_e state)
 {
@@ -159,6 +265,9 @@ static int aq_a2_fw_set_state(struct aq_hw_s *self,
        switch (state) {
        case MPI_INIT:
                link_options.link_up = 1U;
+               aq_a2_fw_upd_eee_rate_bits(self, &link_options,
+                                          self->aq_nic_cfg->eee_speeds);
+               aq_a2_fw_set_mpi_flow_control(self, &link_options);
                break;
        case MPI_DEINIT:
                link_options.link_up = 0U;
@@ -176,6 +285,7 @@ static int aq_a2_fw_set_state(struct aq_hw_s *self,
 
 static int aq_a2_fw_update_link_status(struct aq_hw_s *self)
 {
+       struct lkp_link_caps_s lkp_link_caps;
        struct link_status_s link_status;
 
        hw_atl2_shared_buffer_read(self, link_status, link_status);
@@ -202,6 +312,15 @@ static int aq_a2_fw_update_link_status(struct aq_hw_s *self)
        default:
                self->aq_link_status.mbps = 0;
        }
+       self->aq_link_status.full_duplex = link_status.duplex;
+
+       hw_atl2_shared_buffer_read(self, lkp_link_caps, lkp_link_caps);
+
+       self->aq_link_status.lp_link_speed_msk =
+                                a2_fw_lkp_to_mask(&lkp_link_caps);
+       self->aq_link_status.lp_flow_control =
+                               ((lkp_link_caps.pause_rx) ? AQ_NIC_FC_RX : 0) |
+                               ((lkp_link_caps.pause_tx) ? AQ_NIC_FC_TX : 0);
 
        return 0;
 }
@@ -260,6 +379,53 @@ static int aq_a2_fw_update_stats(struct aq_hw_s *self)
        return 0;
 }
 
+static int aq_a2_fw_get_phy_temp(struct aq_hw_s *self, int *temp)
+{
+       struct phy_health_monitor_s phy_health_monitor;
+
+       hw_atl2_shared_buffer_read_safe(self, phy_health_monitor,
+                                       &phy_health_monitor);
+
+       *temp = (int8_t)phy_health_monitor.phy_temperature * 1000;
+       return 0;
+}
+
+static int aq_a2_fw_get_mac_temp(struct aq_hw_s *self, int *temp)
+{
+       /* There's only one temperature sensor on A2, use it for
+        * both MAC and PHY.
+        */
+       return aq_a2_fw_get_phy_temp(self, temp);
+}
+
+static int aq_a2_fw_set_eee_rate(struct aq_hw_s *self, u32 speed)
+{
+       struct link_options_s link_options;
+
+       hw_atl2_shared_buffer_get(self, link_options, link_options);
+
+       aq_a2_fw_upd_eee_rate_bits(self, &link_options, speed);
+
+       hw_atl2_shared_buffer_write(self, link_options, link_options);
+
+       return hw_atl2_shared_buffer_finish_ack(self);
+}
+
+static int aq_a2_fw_get_eee_rate(struct aq_hw_s *self, u32 *rate,
+                                u32 *supported_rates)
+{
+       struct device_link_caps_s device_link_caps;
+       struct lkp_link_caps_s lkp_link_caps;
+
+       hw_atl2_shared_buffer_read(self, device_link_caps, device_link_caps);
+       hw_atl2_shared_buffer_read(self, lkp_link_caps, lkp_link_caps);
+
+       *supported_rates = a2_fw_dev_to_eee_mask(&device_link_caps);
+       *rate = a2_fw_lkp_to_mask(&lkp_link_caps);
+
+       return 0;
+}
+
 static int aq_a2_fw_renegotiate(struct aq_hw_s *self)
 {
        struct link_options_s link_options;
@@ -280,6 +446,52 @@ static int aq_a2_fw_renegotiate(struct aq_hw_s *self)
        return err;
 }
 
+static int aq_a2_fw_set_flow_control(struct aq_hw_s *self)
+{
+       struct link_options_s link_options;
+
+       hw_atl2_shared_buffer_get(self, link_options, link_options);
+
+       aq_a2_fw_set_mpi_flow_control(self, &link_options);
+
+       hw_atl2_shared_buffer_write(self, link_options, link_options);
+
+       return hw_atl2_shared_buffer_finish_ack(self);
+}
+
+static u32 aq_a2_fw_get_flow_control(struct aq_hw_s *self, u32 *fcmode)
+{
+       struct link_status_s link_status;
+
+       hw_atl2_shared_buffer_read(self, link_status, link_status);
+
+       *fcmode = ((link_status.pause_rx) ? AQ_NIC_FC_RX : 0) |
+                 ((link_status.pause_tx) ? AQ_NIC_FC_TX : 0);
+       return 0;
+}
+
+static int aq_a2_fw_set_phyloopback(struct aq_hw_s *self, u32 mode, bool enable)
+{
+       struct link_options_s link_options;
+
+       hw_atl2_shared_buffer_get(self, link_options, link_options);
+
+       switch (mode) {
+       case AQ_HW_LOOPBACK_PHYINT_SYS:
+               link_options.internal_loopback = enable;
+               break;
+       case AQ_HW_LOOPBACK_PHYEXT_SYS:
+               link_options.external_loopback = enable;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       hw_atl2_shared_buffer_write(self, link_options, link_options);
+
+       return hw_atl2_shared_buffer_finish_ack(self);
+}
+
 u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self)
 {
        struct version_s version;
@@ -317,4 +529,11 @@ const struct aq_fw_ops aq_a2_fw_ops = {
        .set_state          = aq_a2_fw_set_state,
        .update_link_status = aq_a2_fw_update_link_status,
        .update_stats       = aq_a2_fw_update_stats,
+       .get_mac_temp       = aq_a2_fw_get_mac_temp,
+       .get_phy_temp       = aq_a2_fw_get_phy_temp,
+       .set_eee_rate       = aq_a2_fw_set_eee_rate,
+       .get_eee_rate       = aq_a2_fw_get_eee_rate,
+       .set_flow_control   = aq_a2_fw_set_flow_control,
+       .get_flow_control   = aq_a2_fw_get_flow_control,
+       .set_phyloopback    = aq_a2_fw_set_phyloopback,
 };
index 38cd968..b56a9e2 100644 (file)
@@ -673,7 +673,7 @@ static struct net_device_stats *arc_emac_stats(struct net_device *ndev)
  *
  * This function is invoked from upper layers to initiate transmission.
  */
-static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
 {
        struct arc_emac_priv *priv = netdev_priv(ndev);
        unsigned int len, *txbd_curr = &priv->txbd_curr;
index b9b4edb..9b7f1af 100644 (file)
@@ -1249,8 +1249,12 @@ out_disable_adv_intr:
 
 static void __alx_stop(struct alx_priv *alx)
 {
-       alx_halt(alx);
        alx_free_irq(alx);
+
+       cancel_work_sync(&alx->link_check_wk);
+       cancel_work_sync(&alx->reset_wk);
+
+       alx_halt(alx);
        alx_free_rings(alx);
        alx_free_napis(alx);
 }
@@ -1855,9 +1859,6 @@ static void alx_remove(struct pci_dev *pdev)
        struct alx_priv *alx = pci_get_drvdata(pdev);
        struct alx_hw *hw = &alx->hw;
 
-       cancel_work_sync(&alx->link_check_wk);
-       cancel_work_sync(&alx->reset_wk);
-
        /* restore permanent mac address */
        alx_set_macaddr(hw, hw->perm_addr);
 
index bc273e0..5b20185 100644 (file)
@@ -384,7 +384,7 @@ static void nb8800_tx_dma_start_irq(struct net_device *dev)
        spin_unlock(&priv->tx_lock);
 }
 
-static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct nb8800_priv *priv = netdev_priv(dev);
        struct nb8800_tx_desc *txd;
index b25356e..dfed9ad 100644 (file)
@@ -160,13 +160,26 @@ static void bcm_sysport_set_tx_csum(struct net_device *dev,
        /* Hardware transmit checksum requires us to enable the Transmit status
         * block prepended to the packet contents
         */
-       priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
+       priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                                   NETIF_F_HW_VLAN_CTAG_TX));
        reg = tdma_readl(priv, TDMA_CONTROL);
        if (priv->tsb_en)
                reg |= tdma_control_bit(priv, TSB_EN);
        else
                reg &= ~tdma_control_bit(priv, TSB_EN);
+       /* Indicating that software inserts Broadcom tags is needed for the TX
+        * checksum to be computed correctly when using VLAN HW acceleration,
+        * else it has no effect, so it can always be turned on.
+        */
+       if (netdev_uses_dsa(dev))
+               reg |= tdma_control_bit(priv, SW_BRCM_TAG);
+       else
+               reg &= ~tdma_control_bit(priv, SW_BRCM_TAG);
        tdma_writel(priv, reg, TDMA_CONTROL);
+
+       /* Default TPID is ETH_P_8021AD, change to ETH_P_8021Q */
+       if (wanted & NETIF_F_HW_VLAN_CTAG_TX)
+               tdma_writel(priv, ETH_P_8021Q, TDMA_TPID);
 }
 
 static int bcm_sysport_set_features(struct net_device *dev,
@@ -1236,6 +1249,11 @@ static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
        /* Zero-out TSB by default */
        memset(tsb, 0, sizeof(*tsb));
 
+       if (skb_vlan_tag_present(skb)) {
+               tsb->pcp_dei_vid = skb_vlan_tag_get_prio(skb) & PCP_DEI_MASK;
+               tsb->pcp_dei_vid |= (u32)skb_vlan_tag_get_id(skb) << VID_SHIFT;
+       }
+
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                ip_ver = skb->protocol;
                switch (ip_ver) {
@@ -1251,6 +1269,9 @@ static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
 
                /* Get the checksum offset and the L4 (transport) offset */
                csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
+               /* Account for the HW inserted VLAN tag */
+               if (skb_vlan_tag_present(skb))
+                       csum_start += VLAN_HLEN;
                csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
                csum_info |= (csum_start << L4_PTR_SHIFT);
 
@@ -1330,6 +1351,8 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
                       DESC_STATUS_SHIFT;
        if (skb->ip_summed == CHECKSUM_PARTIAL)
                len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
+       if (skb_vlan_tag_present(skb))
+               len_status |= (TX_STATUS_VLAN_VID_TSB << DESC_STATUS_SHIFT);
 
        ring->curr_desc++;
        if (ring->curr_desc == ring->size)
@@ -1503,7 +1526,13 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
                reg |= RING_IGNORE_STATUS;
        }
        tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index));
-       tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
+       reg = 0;
+       /* Adjust the packet size calculations if SYSTEMPORT is responsible
+        * for HW insertion of VLAN tags
+        */
+       if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)
+               reg = VLAN_HLEN << RING_PKT_SIZE_ADJ_SHIFT;
+       tdma_writel(priv, reg, TDMA_DESC_RING_PCP_DEI_VID(index));
 
        /* Enable ACB algorithm 2 */
        reg = tdma_readl(priv, TDMA_CONTROL);
@@ -2523,7 +2552,8 @@ static int bcm_sysport_probe(struct platform_device *pdev)
        netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
 
        dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
-                        NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+                        NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+                        NETIF_F_HW_VLAN_CTAG_TX;
        dev->hw_features |= dev->features;
        dev->vlan_features |= dev->features;
 
index 9fdfaa2..2523cfc 100644 (file)
@@ -5,5 +5,5 @@
 
 obj-$(CONFIG_BNX2X) += bnx2x.o
 
-bnx2x-y := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o
+bnx2x-y := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o bnx2x_self_test.o
 bnx2x-$(CONFIG_BNX2X_SRIOV) += bnx2x_vfpf.o bnx2x_sriov.o
index 4f5b2b8..d049948 100644 (file)
@@ -1287,7 +1287,6 @@ enum sp_rtnl_flag {
        BNX2X_SP_RTNL_HYPERVISOR_VLAN,
        BNX2X_SP_RTNL_TX_STOP,
        BNX2X_SP_RTNL_GET_DRV_VERSION,
-       BNX2X_SP_RTNL_CHANGE_UDP_PORT,
        BNX2X_SP_RTNL_UPDATE_SVID,
 };
 
@@ -1343,11 +1342,6 @@ enum bnx2x_udp_port_type {
        BNX2X_UDP_PORT_MAX,
 };
 
-struct bnx2x_udp_tunnel {
-       u16 dst_port;
-       u8 count;
-};
-
 struct bnx2x {
        /* Fields used in the tx and intr/napi performance paths
         * are grouped together in the beginning of the structure
@@ -1855,7 +1849,7 @@ struct bnx2x {
        bool accept_any_vlan;
 
        /* Vxlan/Geneve related information */
-       struct bnx2x_udp_tunnel udp_tunnel_ports[BNX2X_UDP_PORT_MAX];
+       u16 udp_tunnel_ports[BNX2X_UDP_PORT_MAX];
 };
 
 /* Tx queues may be less or equal to Rx queues */
@@ -1979,6 +1973,9 @@ struct bnx2x_func_init_params {
 
 #define skip_queue(bp, idx)    (NO_FCOE(bp) && IS_FCOE_IDX(idx))
 
+/*self test*/
+int bnx2x_idle_chk(struct bnx2x *bp);
+
 /**
  * bnx2x_set_mac_one - configure a single MAC address
  *
@@ -2430,13 +2427,6 @@ int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err);
 #define HC_SEG_ACCESS_ATTN             4
 #define HC_SEG_ACCESS_NORM             0   /*Driver decision 0-1*/
 
-static const u32 dmae_reg_go_c[] = {
-       DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
-       DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
-       DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
-       DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
-};
-
 void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev);
 void bnx2x_notify_link_changed(struct bnx2x *bp);
 
index ee9e929..e3d92e4 100644 (file)
@@ -4988,8 +4988,9 @@ void bnx2x_tx_timeout(struct net_device *dev, unsigned int txqueue)
        bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
 }
 
-int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused bnx2x_suspend(struct device *dev_d)
 {
+       struct pci_dev *pdev = to_pci_dev(dev_d);
        struct net_device *dev = pci_get_drvdata(pdev);
        struct bnx2x *bp;
 
@@ -5001,8 +5002,6 @@ int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
 
        rtnl_lock();
 
-       pci_save_state(pdev);
-
        if (!netif_running(dev)) {
                rtnl_unlock();
                return 0;
@@ -5012,15 +5011,14 @@ int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
 
        bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
 
-       bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
-
        rtnl_unlock();
 
        return 0;
 }
 
-int bnx2x_resume(struct pci_dev *pdev)
+static int __maybe_unused bnx2x_resume(struct device *dev_d)
 {
+       struct pci_dev *pdev = to_pci_dev(dev_d);
        struct net_device *dev = pci_get_drvdata(pdev);
        struct bnx2x *bp;
        int rc;
@@ -5038,14 +5036,11 @@ int bnx2x_resume(struct pci_dev *pdev)
 
        rtnl_lock();
 
-       pci_restore_state(pdev);
-
        if (!netif_running(dev)) {
                rtnl_unlock();
                return 0;
        }
 
-       bnx2x_set_power_state(bp, PCI_D0);
        netif_device_attach(dev);
 
        rc = bnx2x_nic_load(bp, LOAD_OPEN);
@@ -5055,6 +5050,8 @@ int bnx2x_resume(struct pci_dev *pdev)
        return rc;
 }
 
+SIMPLE_DEV_PM_OPS(bnx2x_pm_ops, bnx2x_suspend, bnx2x_resume);
+
 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
                              u32 cid)
 {
index 6f1352d..7e4c93b 100644 (file)
@@ -541,9 +541,7 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p);
 /* NAPI poll Tx part */
 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
 
-/* suspend/resume callbacks */
-int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
-int bnx2x_resume(struct pci_dev *pdev);
+extern const struct dev_pm_ops bnx2x_pm_ops;
 
 /* Release IRQ vectors */
 void bnx2x_free_irq(struct bnx2x *bp);
@@ -962,12 +960,12 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
                start_params->network_cos_mode = STATIC_COS;
        else /* CHIP_IS_E1X */
                start_params->network_cos_mode = FW_WRR;
-       if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) {
-               port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].dst_port;
+       if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]) {
+               port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN];
                start_params->vxlan_dst_port = port;
        }
-       if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) {
-               port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].dst_port;
+       if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]) {
+               port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE];
                start_params->geneve_dst_port = port;
        }
 
index db5107e..7f24d26 100644 (file)
@@ -276,6 +276,13 @@ static const struct pci_device_id bnx2x_pci_tbl[] = {
 
 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
 
+const u32 dmae_reg_go_c[] = {
+       DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
+       DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
+       DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
+       DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
+};
+
 /* Global resources for unloading a previously loaded device */
 #define BNX2X_PREV_WAIT_NEEDED 1
 static DEFINE_SEMAPHORE(bnx2x_prev_sem);
@@ -1169,9 +1176,18 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
        }
 #endif
        if (IS_PF(bp)) {
+               int tmp_msg_en = bp->msg_enable;
+
                bnx2x_fw_dump(bp);
+               bp->msg_enable |= NETIF_MSG_HW;
+               BNX2X_ERR("Idle check (1st round) ----------\n");
+               bnx2x_idle_chk(bp);
+               BNX2X_ERR("Idle check (2nd round) ----------\n");
+               bnx2x_idle_chk(bp);
+               bp->msg_enable = tmp_msg_en;
                bnx2x_mc_assert(bp);
        }
+
        BNX2X_ERR("end crash dump -----------------\n");
 }
 
@@ -10136,7 +10152,6 @@ static int bnx2x_udp_port_update(struct bnx2x *bp)
 {
        struct bnx2x_func_switch_update_params *switch_update_params;
        struct bnx2x_func_state_params func_params = {NULL};
-       struct bnx2x_udp_tunnel *udp_tunnel;
        u16 vxlan_port = 0, geneve_port = 0;
        int rc;
 
@@ -10153,15 +10168,13 @@ static int bnx2x_udp_port_update(struct bnx2x *bp)
        __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
                  &switch_update_params->changes);
 
-       if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) {
-               udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE];
-               geneve_port = udp_tunnel->dst_port;
+       if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]) {
+               geneve_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE];
                switch_update_params->geneve_dst_port = geneve_port;
        }
 
-       if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) {
-               udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN];
-               vxlan_port = udp_tunnel->dst_port;
+       if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]) {
+               vxlan_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN];
                switch_update_params->vxlan_dst_port = vxlan_port;
        }
 
@@ -10181,94 +10194,27 @@ static int bnx2x_udp_port_update(struct bnx2x *bp)
        return rc;
 }
 
-static void __bnx2x_add_udp_port(struct bnx2x *bp, u16 port,
-                                enum bnx2x_udp_port_type type)
-{
-       struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
-
-       if (!netif_running(bp->dev) || !IS_PF(bp) || CHIP_IS_E1x(bp))
-               return;
-
-       if (udp_port->count && udp_port->dst_port == port) {
-               udp_port->count++;
-               return;
-       }
-
-       if (udp_port->count) {
-               DP(BNX2X_MSG_SP,
-                  "UDP tunnel [%d] -  destination port limit reached\n",
-                  type);
-               return;
-       }
-
-       udp_port->dst_port = port;
-       udp_port->count = 1;
-       bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0);
-}
-
-static void __bnx2x_del_udp_port(struct bnx2x *bp, u16 port,
-                                enum bnx2x_udp_port_type type)
-{
-       struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
-
-       if (!IS_PF(bp) || CHIP_IS_E1x(bp))
-               return;
-
-       if (!udp_port->count || udp_port->dst_port != port) {
-               DP(BNX2X_MSG_SP, "Invalid UDP tunnel [%d] port\n",
-                  type);
-               return;
-       }
-
-       /* Remove reference, and make certain it's no longer in use */
-       udp_port->count--;
-       if (udp_port->count)
-               return;
-       udp_port->dst_port = 0;
-
-       if (netif_running(bp->dev))
-               bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0);
-       else
-               DP(BNX2X_MSG_SP, "Deleted UDP tunnel [%d] port %d\n",
-                  type, port);
-}
-
-static void bnx2x_udp_tunnel_add(struct net_device *netdev,
-                                struct udp_tunnel_info *ti)
+static int bnx2x_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
 {
        struct bnx2x *bp = netdev_priv(netdev);
-       u16 t_port = ntohs(ti->port);
+       struct udp_tunnel_info ti;
 
-       switch (ti->type) {
-       case UDP_TUNNEL_TYPE_VXLAN:
-               __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
-               break;
-       case UDP_TUNNEL_TYPE_GENEVE:
-               __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
-               break;
-       default:
-               break;
-       }
-}
-
-static void bnx2x_udp_tunnel_del(struct net_device *netdev,
-                                struct udp_tunnel_info *ti)
-{
-       struct bnx2x *bp = netdev_priv(netdev);
-       u16 t_port = ntohs(ti->port);
+       udp_tunnel_nic_get_port(netdev, table, 0, &ti);
+       bp->udp_tunnel_ports[table] = be16_to_cpu(ti.port);
 
-       switch (ti->type) {
-       case UDP_TUNNEL_TYPE_VXLAN:
-               __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
-               break;
-       case UDP_TUNNEL_TYPE_GENEVE:
-               __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
-               break;
-       default:
-               break;
-       }
+       return bnx2x_udp_port_update(bp);
 }
 
+static const struct udp_tunnel_nic_info bnx2x_udp_tunnels = {
+       .sync_table     = bnx2x_udp_tunnel_sync,
+       .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
+                         UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+       .tables         = {
+               { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
+               { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+       },
+};
+
 static int bnx2x_close(struct net_device *dev);
 
 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
@@ -10391,24 +10337,6 @@ sp_rtnl_not_reset:
        if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state))
                bnx2x_handle_update_svid_cmd(bp);
 
-       if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT,
-                              &bp->sp_rtnl_state)) {
-               if (bnx2x_udp_port_update(bp)) {
-                       /* On error, forget configuration */
-                       memset(bp->udp_tunnel_ports, 0,
-                              sizeof(struct bnx2x_udp_tunnel) *
-                              BNX2X_UDP_PORT_MAX);
-               } else {
-                       /* Since we don't store additional port information,
-                        * if no ports are configured for any feature ask for
-                        * information about currently configured ports.
-                        */
-                       if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count &&
-                           !bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count)
-                               udp_tunnel_get_rx_info(bp->dev);
-               }
-       }
-
        /* work which needs rtnl lock not-taken (as it takes the lock itself and
         * can be called from other contexts as well)
         */
@@ -12604,9 +12532,6 @@ static int bnx2x_open(struct net_device *dev)
        if (rc)
                return rc;
 
-       if (IS_PF(bp))
-               udp_tunnel_get_rx_info(dev);
-
        return 0;
 }
 
@@ -13146,8 +13071,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_get_phys_port_id   = bnx2x_get_phys_port_id,
        .ndo_set_vf_link_state  = bnx2x_set_vf_link_state,
        .ndo_features_check     = bnx2x_features_check,
-       .ndo_udp_tunnel_add     = bnx2x_udp_tunnel_add,
-       .ndo_udp_tunnel_del     = bnx2x_udp_tunnel_del,
+       .ndo_udp_tunnel_add     = udp_tunnel_nic_add_port,
+       .ndo_udp_tunnel_del     = udp_tunnel_nic_del_port,
 };
 
 static int bnx2x_set_coherency_mask(struct bnx2x *bp)
@@ -13342,6 +13267,9 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
 
                dev->gso_partial_features = NETIF_F_GSO_GRE_CSUM |
                                            NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+               if (IS_PF(bp))
+                       dev->udp_tunnel_nic_info = &bnx2x_udp_tunnels;
        }
 
        dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -14462,8 +14390,7 @@ static struct pci_driver bnx2x_pci_driver = {
        .id_table    = bnx2x_pci_tbl,
        .probe       = bnx2x_init_one,
        .remove      = bnx2x_remove_one,
-       .suspend     = bnx2x_suspend,
-       .resume      = bnx2x_resume,
+       .driver.pm   = &bnx2x_pm_ops,
        .err_handler = &bnx2x_err_handler,
 #ifdef CONFIG_BNX2X_SRIOV
        .sriov_configure = bnx2x_sriov_configure,
index a43dea2..bfc0e45 100644 (file)
@@ -7639,6 +7639,82 @@ Theotherbitsarereservedandshouldbezero*/
        (0x80 | ((_type)&0xf << 3) | ((CDU_CRC8(_cid, _region, _type)) & 0x7))
 #define CDU_RSRVD_INVALIDATE_CONTEXT_VALUE(_val) ((_val) & ~0x80)
 
+/* IdleChk registers */
+#define PXP_REG_HST_VF_DISABLED_ERROR_VALID                     0x1030bc
+#define PXP_REG_HST_VF_DISABLED_ERROR_DATA                      0x1030b8
+#define PXP_REG_HST_PER_VIOLATION_VALID                                 0x1030e0
+#define PXP_REG_HST_INCORRECT_ACCESS_VALID                      0x1030cc
+#define PXP2_REG_RD_CPL_ERR_DETAILS                             0x120778
+#define PXP2_REG_RD_CPL_ERR_DETAILS2                            0x12077c
+#define PXP2_REG_RQ_GARB                                        0x120748
+#define PBF_REG_DISABLE_NEW_TASK_PROC_Q0                        0x15c1bc
+#define PBF_REG_DISABLE_NEW_TASK_PROC_Q1                        0x15c1c0
+#define PBF_REG_DISABLE_NEW_TASK_PROC_Q2                        0x15c1c4
+#define PBF_REG_DISABLE_NEW_TASK_PROC_Q3                        0x15c1c8
+#define PBF_REG_DISABLE_NEW_TASK_PROC_Q4                        0x15c1cc
+#define PBF_REG_DISABLE_NEW_TASK_PROC_Q5                        0x15c1d0
+#define PBF_REG_CREDIT_Q2                                       0x140344
+#define PBF_REG_CREDIT_Q3                                       0x140348
+#define PBF_REG_CREDIT_Q4                                       0x14034c
+#define PBF_REG_CREDIT_Q5                                       0x140350
+#define PBF_REG_INIT_CRD_Q2                                     0x15c238
+#define PBF_REG_INIT_CRD_Q3                                     0x15c23c
+#define PBF_REG_INIT_CRD_Q4                                     0x15c240
+#define PBF_REG_INIT_CRD_Q5                                     0x15c244
+#define PBF_REG_TASK_CNT_Q0                                     0x140374
+#define PBF_REG_TASK_CNT_Q1                                     0x140378
+#define PBF_REG_TASK_CNT_Q2                                     0x14037c
+#define PBF_REG_TASK_CNT_Q3                                     0x140380
+#define PBF_REG_TASK_CNT_Q4                                     0x140384
+#define PBF_REG_TASK_CNT_Q5                                     0x140388
+#define PBF_REG_TASK_CNT_LB_Q                                   0x140370
+#define QM_REG_BYTECRD0                                                 0x16e6fc
+#define QM_REG_BYTECRD1                                                 0x16e700
+#define QM_REG_BYTECRD2                                                 0x16e704
+#define QM_REG_BYTECRD3                                                 0x16e7ac
+#define QM_REG_BYTECRD4                                                 0x16e7b0
+#define QM_REG_BYTECRD5                                                 0x16e7b4
+#define QM_REG_BYTECRD6                                                 0x16e7b8
+#define QM_REG_BYTECRDCMDQ_0                                    0x16e6e8
+#define QM_REG_BYTECRDERRREG                                    0x16e708
+#define MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID                      0xa714
+#define QM_REG_VOQCREDIT_2                                      0x1682d8
+#define QM_REG_VOQCREDIT_3                                      0x1682dc
+#define QM_REG_VOQCREDIT_5                                      0x1682e4
+#define QM_REG_VOQCREDIT_6                                      0x1682e8
+#define QM_REG_VOQINITCREDIT_3                                  0x16806c
+#define QM_REG_VOQINITCREDIT_6                                  0x168078
+#define QM_REG_FWVOQ0TOHWVOQ                                    0x16e7bc
+#define QM_REG_FWVOQ1TOHWVOQ                                    0x16e7c0
+#define QM_REG_FWVOQ2TOHWVOQ                                    0x16e7c4
+#define QM_REG_FWVOQ3TOHWVOQ                                    0x16e7c8
+#define QM_REG_FWVOQ4TOHWVOQ                                    0x16e7cc
+#define QM_REG_FWVOQ5TOHWVOQ                                    0x16e7d0
+#define QM_REG_FWVOQ6TOHWVOQ                                    0x16e7d4
+#define QM_REG_FWVOQ7TOHWVOQ                                    0x16e7d8
+#define NIG_REG_INGRESS_EOP_PORT0_EMPTY                                 0x104ec
+#define NIG_REG_INGRESS_EOP_PORT1_EMPTY                                 0x104f8
+#define NIG_REG_INGRESS_RMP0_DSCR_EMPTY                                 0x10530
+#define NIG_REG_INGRESS_RMP1_DSCR_EMPTY                                 0x10538
+#define NIG_REG_INGRESS_LB_PBF_DELAY_EMPTY                      0x10508
+#define NIG_REG_EGRESS_MNG0_FIFO_EMPTY                          0x10460
+#define NIG_REG_EGRESS_MNG1_FIFO_EMPTY                          0x10474
+#define NIG_REG_EGRESS_DEBUG_FIFO_EMPTY                                 0x10418
+#define NIG_REG_EGRESS_DELAY0_EMPTY                             0x10420
+#define NIG_REG_EGRESS_DELAY1_EMPTY                             0x10428
+#define NIG_REG_LLH0_FIFO_EMPTY                                         0x10548
+#define NIG_REG_LLH1_FIFO_EMPTY                                         0x10558
+#define NIG_REG_P0_TX_MNG_HOST_FIFO_EMPTY                       0x182a8
+#define NIG_REG_P0_TLLH_FIFO_EMPTY                              0x18308
+#define NIG_REG_P0_HBUF_DSCR_EMPTY                              0x18318
+#define NIG_REG_P1_HBUF_DSCR_EMPTY                              0x18348
+#define NIG_REG_P0_RX_MACFIFO_EMPTY                             0x18570
+#define NIG_REG_P0_TX_MACFIFO_EMPTY                             0x18578
+#define NIG_REG_EGRESS_DELAY2_EMPTY                             0x1862c
+#define NIG_REG_EGRESS_DELAY3_EMPTY                             0x18630
+#define NIG_REG_EGRESS_DELAY4_EMPTY                             0x18634
+#define NIG_REG_EGRESS_DELAY5_EMPTY                             0x18638
+
 /******************************************************************************
  * Description:
  *        Calculates crc 8 on a word value: polynomial 0-1-2-8
@@ -7697,6 +7773,4 @@ static inline u8 calc_crc8(u32 data, u8 crc)
 
        return crc_res;
 }
-
-
 #endif /* BNX2X_REG_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_self_test.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_self_test.c
new file mode 100644 (file)
index 0000000..3f8bdad
--- /dev/null
@@ -0,0 +1,3183 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include "bnx2x.h"
+
+#define NA 0xCD
+
+#define IDLE_CHK_E1                    0x01
+#define IDLE_CHK_E1H                   0x02
+#define IDLE_CHK_E2                    0x04
+#define IDLE_CHK_E3A0                  0x08
+#define IDLE_CHK_E3B0                  0x10
+
+#define IDLE_CHK_ERROR                 1
+#define IDLE_CHK_ERROR_NO_TRAFFIC      2
+#define IDLE_CHK_WARNING               3
+
+#define MAX_FAIL_MSG 256
+
+/* statistics and error reporting */
+static int idle_chk_errors, idle_chk_warnings;
+
+/* masks for all chip types */
+static int is_e1, is_e1h, is_e2, is_e3a0, is_e3b0;
+
+/* struct for the argument list for a predicate in the self test databasei */
+struct st_pred_args {
+       u32 val1; /* value read from first register */
+       u32 val2; /* value read from second register, if applicable */
+       u32 imm1; /* 1st value in predicate condition, left-to-right */
+       u32 imm2; /* 2nd value in predicate condition, left-to-right */
+       u32 imm3; /* 3rd value in predicate condition, left-to-right */
+       u32 imm4; /* 4th value in predicate condition, left-to-right */
+};
+
+/* struct representing self test record - a single test */
+struct st_record {
+       u8 chip_mask;
+       u8 macro;
+       u32 reg1;
+       u32 reg2;
+       u16 loop;
+       u16 incr;
+       int (*bnx2x_predicate)(struct st_pred_args *pred_args);
+       u32 reg3;
+       u8 severity;
+       char *fail_msg;
+       struct st_pred_args pred_args;
+};
+
+/* predicates for self test */
+static int peq(struct st_pred_args *args)
+{
+       return (args->val1 == args->imm1);
+}
+
+static int pneq(struct st_pred_args *args)
+{
+       return (args->val1 != args->imm1);
+}
+
+static int pand_neq(struct st_pred_args *args)
+{
+       return ((args->val1 & args->imm1) != args->imm2);
+}
+
+static int pand_neq_x2(struct st_pred_args *args)
+{
+       return (((args->val1 & args->imm1) != args->imm2) &&
+               ((args->val1 & args->imm3) != args->imm4));
+}
+
+static int pneq_err(struct st_pred_args *args)
+{
+       return ((args->val1 != args->imm1) && (idle_chk_errors > args->imm2));
+}
+
+static int pgt(struct st_pred_args *args)
+{
+       return (args->val1 > args->imm1);
+}
+
+static int pneq_r2(struct st_pred_args *args)
+{
+       return (args->val1 != args->val2);
+}
+
+static int plt_sub_r2(struct st_pred_args *args)
+{
+       return (args->val1 < (args->val2 - args->imm1));
+}
+
+static int pne_sub_r2(struct st_pred_args *args)
+{
+       return (args->val1 != (args->val2 - args->imm1));
+}
+
+static int prsh_and_neq(struct st_pred_args *args)
+{
+       return (((args->val1 >> args->imm1) & args->imm2) != args->imm3);
+}
+
+static int peq_neq_r2(struct st_pred_args *args)
+{
+       return ((args->val1 == args->imm1) && (args->val2 != args->imm2));
+}
+
+static int peq_neq_neq_r2(struct st_pred_args *args)
+{
+       return ((args->val1 == args->imm1) && (args->val2 != args->imm2) &&
+               (args->val2 != args->imm3));
+}
+
+/* struct holding the database of self test checks (registers and predicates) */
+/* lines start from 2 since line 1 is heading in csv */
+#define ST_DB_LINES 468
+static struct st_record st_database[ST_DB_LINES] = {
+/*line 2*/{(0x3), 1, 0x2114,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "PCIE: ucorr_err_status is not 0",
+       {NA, NA, 0x0FF010, 0, NA, NA} },
+
+/*line 3*/{(0x3), 1, 0x2114,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "PCIE: ucorr_err_status - Unsupported request error",
+       {NA, NA, 0x100000, 0, NA, NA} },
+
+/*line 4*/{(0x3), 1, 0x2120,
+       NA, 1, 0, pand_neq_x2,
+       NA, IDLE_CHK_WARNING,
+       "PCIE: corr_err_status is not 0x2000",
+       {NA, NA, 0x31C1, 0x2000, 0x31C1, 0} },
+
+/*line 5*/{(0x3), 1, 0x2814,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "PCIE: attentions register is not 0x40100",
+       {NA, NA, ~0x40100, 0, NA, NA} },
+
+/*line 6*/{(0x2), 1, 0x281c,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "PCIE: attentions register is not 0x40040100",
+       {NA, NA, ~0x40040100, 0, NA, NA} },
+
+/*line 7*/{(0x2), 1, 0x2820,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "PCIE: attentions register is not 0x40040100",
+       {NA, NA, ~0x40040100, 0, NA, NA} },
+
+/*line 8*/{(0x3), 1, PXP2_REG_PGL_EXP_ROM2,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PXP2: There are outstanding read requests. Not all completios have arrived for read requests on tags that are marked with 0",
+       {NA, NA, 0xffffffff, NA, NA, NA} },
+
+/*line 9*/{(0x3), 2, 0x212c,
+       NA, 4, 4, pneq_err,
+       NA, IDLE_CHK_WARNING,
+       "PCIE: error packet header is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 10*/{(0x1C), 1, 0x2104,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "PCIE: ucorr_err_status is not 0",
+       {NA, NA, 0x0FD010, 0, NA, NA} },
+
+/*line 11*/{(0x1C), 1, 0x2104,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "PCIE: ucorr_err_status - Unsupported request error",
+       {NA, NA, 0x100000, 0, NA, NA} },
+
+/*line 12*/{(0x1C), 1, 0x2104,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "PCIE: ucorr_err_status - Flow Control Protocol Error",
+       {NA, NA, 0x2000, 0, NA, NA} },
+
+/*line 13*/{(0x1C), 1, 0x2110,
+       NA, 1, 0, pand_neq_x2,
+       NA, IDLE_CHK_WARNING,
+       "PCIE: corr_err_status is not 0x2000",
+       {NA, NA, 0x31C1, 0x2000, 0x31C1, 0} },
+
+/*line 14*/{(0x1C), 1, 0x2814,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "PCIE: TTX_BRIDGE_FORWARD_ERR - Received master request while BME was 0",
+       {NA, NA, 0x2000000, 0, NA, NA} },
+
+/*line 15*/{(0x1C), 1, 0x2814,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "PCIE: Func 0 1: attentions register is not 0x2040902",
+       {NA, NA, ~0x2040902, 0, NA, NA} },
+
+/*line 16*/{(0x1C), 1, 0x2854,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "PCIE: Func 2 3 4: attentions register is not 0x10240902",
+       {NA, NA, ~0x10240902, 0, NA, NA} },
+
+/*line 17*/{(0x1C), 1, 0x285c,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "PCIE: Func 5 6 7: attentions register is not 0x10240902",
+       {NA, NA, ~0x10240902, 0, NA, NA} },
+
+/*line 18*/{(0x18), 1, 0x3040,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "PCIE: Overflow in DLP2TLP buffer",
+       {NA, NA, 0x2, 0, NA, NA} },
+
+/*line 19*/{(0x1C), 1, PXP2_REG_PGL_EXP_ROM2,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PXP2: There are outstanding read requests for tags 0-31. Not all completios have arrived for read requests on tags that are marked with 0",
+       {NA, NA, 0xffffffff, NA, NA, NA} },
+
+/*line 20*/{(0x1C), 2, 0x211c,
+       NA, 4, 4, pneq_err,
+       NA, IDLE_CHK_WARNING,
+       "PCIE: error packet header is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 21*/{(0x1C), 1, PGLUE_B_REG_INCORRECT_RCV_DETAILS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "PGLUE_B: Packet received from PCIe not according to the rules",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 22*/{(0x1C), 1, PGLUE_B_REG_WAS_ERROR_VF_31_0,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGLUE_B: was_error for VFs 0-31 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 23*/{(0x1C), 1, PGLUE_B_REG_WAS_ERROR_VF_63_32,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGLUE_B: was_error for VFs 32-63 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 24*/{(0x1C), 1, PGLUE_B_REG_WAS_ERROR_VF_95_64,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGLUE_B: was_error for VFs 64-95 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 25*/{(0x1C), 1, PGLUE_B_REG_WAS_ERROR_VF_127_96,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGLUE_B: was_error for VFs 96-127 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 26*/{(0x1C), 1, PGLUE_B_REG_WAS_ERROR_PF_7_0,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGLUE_B: was_error for PFs 0-7 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 27*/{(0x1C), 1, PGLUE_B_REG_RX_ERR_DETAILS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGLUE_B: Completion received with error. (2:0) - PFID. (3) - VF_VALID. (9:4) - VFID. (11:10) - Error code : 0 - Completion Timeout; 1 - Unsupported Request; 2 - Completer Abort. (12) - valid bit",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 28*/{(0x1C), 1, PGLUE_B_REG_RX_TCPL_ERR_DETAILS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGLUE_B: ATS TCPL received with error. (2:0) - PFID. (3) - VF_VALID. (9:4) - VFID. (11:10) - Error code : 0 - Completion Timeout ; 1 - Unsupported Request; 2 - Completer Abort. (16:12) - OTB Entry ID. (17) - valid bit",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 29*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_WR_ADD_31_0,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGLUE_B: Error in master write. Address(31:0) is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 30*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_WR_ADD_63_32,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGLUE_B: Error in master write. Address(63:32) is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 31*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_WR_DETAILS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGLUE_B: Error in master write. Error details register is not 0. (4:0) VQID. (23:21) - PFID. (24) - VF_VALID. (30:25) - VFID",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 32*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_WR_DETAILS2,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGLUE_B: Error in master write. Error details 2nd register is not 0. (21) - was_error set; (22) - BME cleared; (23) - FID_enable cleared; (24) - VF with parent PF FLR_request or IOV_disable_request",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 33*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_RD_ADD_31_0,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGLUE: Error in master read address(31:0) is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 34*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_RD_ADD_63_32,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGLUE_B: Error in master read address(63:32) is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 35*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_RD_DETAILS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGLUE_B: Error in master read Error details register is not 0. (4:0) VQID. (23:21) - PFID. (24) - VF_VALID. (30:25) - VFID",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 36*/{(0x1C), 1, PGLUE_B_REG_TX_ERR_RD_DETAILS2,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGLUE_B: Error in master read Error details 2nd register is not 0. (21) - was_error set; (22) - BME cleared; (23) - FID_enable cleared; (24) - VF with parent PF FLR_request or IOV_disable_request",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 37*/{(0x1C), 1, PGLUE_B_REG_VF_LENGTH_VIOLATION_DETAILS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGLUE_B: Target VF length violation access",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 38*/{(0x1C), 1, PGLUE_B_REG_VF_GRC_SPACE_VIOLATION_DETAILS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGLUE_B: Target VF GRC space access failed permission check",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 39*/{(0x1C), 1, PGLUE_B_REG_TAGS_63_32,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGLUE_B: There are outstanding read requests for tags 32-63. Not all completios have arrived for read requests on tags that are marked with 0",
+       {NA, NA, 0xffffffff, NA, NA, NA} },
+
+/*line 40*/{(0x1C), 3, PXP_REG_HST_VF_DISABLED_ERROR_VALID,
+       PXP_REG_HST_VF_DISABLED_ERROR_DATA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PXP: Access to disabled VF took place",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 41*/{(0x1C), 1, PXP_REG_HST_PER_VIOLATION_VALID,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PXP: Zone A permission violation occurred",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 42*/{(0x1C), 1, PXP_REG_HST_INCORRECT_ACCESS_VALID,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PXP: Incorrect transaction took place",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 43*/{(0x1C), 1, PXP2_REG_RD_CPL_ERR_DETAILS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PXP2: Completion received with error. Error details register is not 0. (15:0) - ECHO. (28:16) - Sub Request length plus start_offset_2_0 minus 1",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 44*/{(0x1C), 1, PXP2_REG_RD_CPL_ERR_DETAILS2,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PXP2: Completion received with error. Error details 2nd register is not 0. (4:0) - VQ ID. (8:5) - client ID. (9) - valid bit",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 45*/{(0x1F), 1, PXP2_REG_RQ_VQ0_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ0 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 46*/{(0x1F), 1, PXP2_REG_RQ_VQ1_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ1 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 47*/{(0x1F), 1, PXP2_REG_RQ_VQ2_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ2 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 48*/{(0x1F), 1, PXP2_REG_RQ_VQ3_ENTRY_CNT,
+       NA, 1, 0, pgt,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ3 is not empty",
+       {NA, NA, 2, NA, NA, NA} },
+
+/*line 49*/{(0x1F), 1, PXP2_REG_RQ_VQ4_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ4 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 50*/{(0x1F), 1, PXP2_REG_RQ_VQ5_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ5 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 51*/{(0x1F), 1, PXP2_REG_RQ_VQ6_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ6 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 52*/{(0x1F), 1, PXP2_REG_RQ_VQ7_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ7 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 53*/{(0x1F), 1, PXP2_REG_RQ_VQ8_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ8 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 54*/{(0x1F), 1, PXP2_REG_RQ_VQ9_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ9 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 55*/{(0x1F), 1, PXP2_REG_RQ_VQ10_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ10 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 56*/{(0x1F), 1, PXP2_REG_RQ_VQ11_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ11 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 57*/{(0x1F), 1, PXP2_REG_RQ_VQ12_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ12 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 58*/{(0x1F), 1, PXP2_REG_RQ_VQ13_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ13 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 59*/{(0x1F), 1, PXP2_REG_RQ_VQ14_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ14 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 60*/{(0x1F), 1, PXP2_REG_RQ_VQ15_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ15 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 61*/{(0x1F), 1, PXP2_REG_RQ_VQ16_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ16 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 62*/{(0x1F), 1, PXP2_REG_RQ_VQ17_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ17 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 63*/{(0x1F), 1, PXP2_REG_RQ_VQ18_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ18 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 64*/{(0x1F), 1, PXP2_REG_RQ_VQ19_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ19 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 65*/{(0x1F), 1, PXP2_REG_RQ_VQ20_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ20 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 66*/{(0x1F), 1, PXP2_REG_RQ_VQ21_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ21 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 67*/{(0x1F), 1, PXP2_REG_RQ_VQ22_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ22 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 68*/{(0x1F), 1, PXP2_REG_RQ_VQ23_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ23 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 69*/{(0x1F), 1, PXP2_REG_RQ_VQ24_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ24 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 70*/{(0x1F), 1, PXP2_REG_RQ_VQ25_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ25 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 71*/{(0x1F), 1, PXP2_REG_RQ_VQ26_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ26 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 72*/{(0x1F), 1, PXP2_REG_RQ_VQ27_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ27 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 73*/{(0x1F), 1, PXP2_REG_RQ_VQ28_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ28 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 74*/{(0x1F), 1, PXP2_REG_RQ_VQ29_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ29 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 75*/{(0x1F), 1, PXP2_REG_RQ_VQ30_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ30 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 76*/{(0x1F), 1, PXP2_REG_RQ_VQ31_ENTRY_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: VQ31 is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 77*/{(0x1F), 1, PXP2_REG_RQ_UFIFO_NUM_OF_ENTRY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: rq_ufifo_num_of_entry is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 78*/{(0x1F), 1, PXP2_REG_RQ_RBC_DONE,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "PXP2: rq_rbc_done is not 1",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 79*/{(0x1F), 1, PXP2_REG_RQ_CFG_DONE,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "PXP2: rq_cfg_done is not 1",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 80*/{(0x3), 1, PXP2_REG_PSWRQ_BW_CREDIT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: rq_read_credit and rq_write_credit are not 3",
+       {NA, NA, 0x1B, NA, NA, NA} },
+
+/*line 81*/{(0x1F), 1, PXP2_REG_RD_START_INIT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "PXP2: rd_start_init is not 1",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 82*/{(0x1F), 1, PXP2_REG_RD_INIT_DONE,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "PXP2: rd_init_done is not 1",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 83*/{(0x1F), 3, PXP2_REG_RD_SR_CNT,
+       PXP2_REG_RD_SR_NUM_CFG, 1, 0, pne_sub_r2,
+       NA, IDLE_CHK_WARNING,
+       "PXP2: rd_sr_cnt is not equal to rd_sr_num_cfg",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 84*/{(0x1F), 3, PXP2_REG_RD_BLK_CNT,
+       PXP2_REG_RD_BLK_NUM_CFG, 1, 0, pneq_r2,
+       NA, IDLE_CHK_WARNING,
+       "PXP2: rd_blk_cnt is not equal to rd_blk_num_cfg",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 85*/{(0x1F), 3, PXP2_REG_RD_SR_CNT,
+       PXP2_REG_RD_SR_NUM_CFG, 1, 0, plt_sub_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: There are more than two unused SRs",
+       {NA, NA, 3, NA, NA, NA} },
+
+/*line 86*/{(0x1F), 3, PXP2_REG_RD_BLK_CNT,
+       PXP2_REG_RD_BLK_NUM_CFG, 1, 0, plt_sub_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: There are more than two unused blocks",
+       {NA, NA, 2, NA, NA, NA} },
+
+/*line 87*/{(0x1F), 1, PXP2_REG_RD_PORT_IS_IDLE_0,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: P0 All delivery ports are not idle",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 88*/{(0x1F), 1, PXP2_REG_RD_PORT_IS_IDLE_1,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: P1 All delivery ports are not idle",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 89*/{(0x1F), 2, PXP2_REG_RD_ALMOST_FULL_0,
+       NA, 11, 4, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: rd_almost_full is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 90*/{(0x1F), 1, PXP2_REG_RD_DISABLE_INPUTS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "PXP2: PSWRD inputs are disabled",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 91*/{(0x1F), 1, PXP2_REG_HST_HEADER_FIFO_STATUS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: HST header FIFO status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 92*/{(0x1F), 1, PXP2_REG_HST_DATA_FIFO_STATUS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: HST data FIFO status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 93*/{(0x3), 1, PXP2_REG_PGL_WRITE_BLOCKED,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "PXP2: pgl_write_blocked is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 94*/{(0x3), 1, PXP2_REG_PGL_READ_BLOCKED,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "PXP2: pgl_read_blocked is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 95*/{(0x1C), 1, PXP2_REG_PGL_WRITE_BLOCKED,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PXP2: pgl_write_blocked is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 96*/{(0x1C), 1, PXP2_REG_PGL_READ_BLOCKED,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PXP2: pgl_read_blocked is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 97*/{(0x1F), 1, PXP2_REG_PGL_TXW_CDTS,
+       NA, 1, 0, prsh_and_neq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PXP2: There is data which is ready",
+       {NA, NA, 17, 1, 0, NA} },
+
+/*line 98*/{(0x1F), 1, PXP_REG_HST_ARB_IS_IDLE,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PXP: HST arbiter is not idle",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 99*/{(0x1F), 1, PXP_REG_HST_CLIENTS_WAITING_TO_ARB,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PXP: HST one of the clients is waiting for delivery",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 100*/{(0x1E), 1, PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PXP: HST Close the gates: Discarding internal writes",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 101*/{(0x1E), 1, PXP_REG_HST_DISCARD_DOORBELLS_STATUS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PXP: HST Close the gates: Discarding doorbells",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 102*/{(0x1C), 1, PXP2_REG_RQ_GARB,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "PXP2: PSWRQ Close the gates is asserted. Check AEU AFTER_INVERT registers for parity errors",
+       {NA, NA, 0x1000, 0, NA, NA} },
+
+/*line 103*/{(0x1F), 1, DMAE_REG_GO_C0,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "DMAE: command 0 go is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 104*/{(0x1F), 1, DMAE_REG_GO_C1,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "DMAE: command 1 go is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 105*/{(0x1F), 1, DMAE_REG_GO_C2,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "DMAE: command 2 go is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 106*/{(0x1F), 1, DMAE_REG_GO_C3,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "DMAE: command 3 go is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 107*/{(0x1F), 1, DMAE_REG_GO_C4,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "DMAE: command 4 go is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 108*/{(0x1F), 1, DMAE_REG_GO_C5,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "DMAE: command 5 go is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 109*/{(0x1F), 1, DMAE_REG_GO_C6,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "DMAE: command 6 go is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 110*/{(0x1F), 1, DMAE_REG_GO_C7,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "DMAE: command 7 go is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 111*/{(0x1F), 1, DMAE_REG_GO_C8,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "DMAE: command 8 go is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 112*/{(0x1F), 1, DMAE_REG_GO_C9,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "DMAE: command 9 go is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 113*/{(0x1F), 1, DMAE_REG_GO_C10,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "DMAE: command 10 go is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 114*/{(0x1F), 1, DMAE_REG_GO_C11,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "DMAE: command 11 go is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 115*/{(0x1F), 1, DMAE_REG_GO_C12,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "DMAE: command 12 go is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 116*/{(0x1F), 1, DMAE_REG_GO_C13,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "DMAE: command 13 go is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 117*/{(0x1F), 1, DMAE_REG_GO_C14,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "DMAE: command 14 go is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 118*/{(0x1F), 1, DMAE_REG_GO_C15,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "DMAE: command 15 go is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 119*/{(0x1F), 1, CFC_REG_ERROR_VECTOR,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "CFC: error vector is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 120*/{(0x1F), 1, CFC_REG_NUM_LCIDS_ARRIVING,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "CFC: number of arriving LCIDs is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 121*/{(0x1F), 1, CFC_REG_NUM_LCIDS_ALLOC,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "CFC: number of alloc LCIDs is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 122*/{(0x1F), 1, CFC_REG_NUM_LCIDS_LEAVING,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "CFC: number of leaving LCIDs is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 123*/{(0x1F), 7, CFC_REG_INFO_RAM,
+       CFC_REG_CID_CAM, (CFC_REG_INFO_RAM_SIZE >> 4), 16, peq_neq_neq_r2,
+       CFC_REG_ACTIVITY_COUNTER, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "CFC: AC is neither 0 nor 2 on connType 0 (ETH)",
+       {NA, NA, 0, 0, 2, NA} },
+
+/*line 124*/{(0x1F), 7, CFC_REG_INFO_RAM,
+       CFC_REG_CID_CAM, (CFC_REG_INFO_RAM_SIZE >> 4), 16, peq_neq_r2,
+       CFC_REG_ACTIVITY_COUNTER, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "CFC: AC is not 0 on connType 1 (TOE)",
+       {NA, NA, 1, 0, NA, NA} },
+
+/*line 125*/{(0x1F), 7, CFC_REG_INFO_RAM,
+       CFC_REG_CID_CAM, (CFC_REG_INFO_RAM_SIZE >> 4), 16, peq_neq_r2,
+       CFC_REG_ACTIVITY_COUNTER, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "CFC: AC is not 0 on connType 3 (iSCSI)",
+       {NA, NA, 3, 0, NA, NA} },
+
+/*line 126*/{(0x1F), 7, CFC_REG_INFO_RAM,
+       CFC_REG_CID_CAM, (CFC_REG_INFO_RAM_SIZE >> 4), 16, peq_neq_r2,
+       CFC_REG_ACTIVITY_COUNTER, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "CFC: AC is not 0 on connType 4 (FCoE)",
+       {NA, NA, 4, 0, NA, NA} },
+
+/*line 127*/{(0x1F), 2, QM_REG_QTASKCTR_0,
+       NA, 64, 4, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: Queue is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 128*/{(0xF), 3, QM_REG_VOQCREDIT_0,
+       QM_REG_VOQINITCREDIT_0, 1, 0, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: VOQ_0, VOQ credit is not equal to initial credit",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 129*/{(0xF), 3, QM_REG_VOQCREDIT_1,
+       QM_REG_VOQINITCREDIT_1, 1, 0, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: VOQ_1, VOQ credit is not equal to initial credit",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 130*/{(0xF), 3, QM_REG_VOQCREDIT_4,
+       QM_REG_VOQINITCREDIT_4, 1, 0, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: VOQ_4, VOQ credit is not equal to initial credit",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 131*/{(0x3), 3, QM_REG_PORT0BYTECRD,
+       QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: P0 Byte credit is not equal to initial credit",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 132*/{(0x3), 3, QM_REG_PORT1BYTECRD,
+       QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: P1 Byte credit is not equal to initial credit",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 133*/{(0x1F), 1, CCM_REG_CAM_OCCUP,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "CCM: XX protection CAM is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 134*/{(0x1F), 1, TCM_REG_CAM_OCCUP,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "TCM: XX protection CAM is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 135*/{(0x1F), 1, UCM_REG_CAM_OCCUP,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "UCM: XX protection CAM is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 136*/{(0x1F), 1, XCM_REG_CAM_OCCUP,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "XCM: XX protection CAM is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 137*/{(0x1F), 1, BRB1_REG_NUM_OF_FULL_BLOCKS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "BRB1: BRB is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 138*/{(0x1F), 1, CSEM_REG_SLEEP_THREADS_VALID,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "CSEM: There are sleeping threads",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 139*/{(0x1F), 1, TSEM_REG_SLEEP_THREADS_VALID,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "TSEM: There are sleeping threads",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 140*/{(0x1F), 1, USEM_REG_SLEEP_THREADS_VALID,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "USEM: There are sleeping threads",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 141*/{(0x1F), 1, XSEM_REG_SLEEP_THREADS_VALID,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "XSEM: There are sleeping threads",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 142*/{(0x1F), 1, CSEM_REG_SLOW_EXT_STORE_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "CSEM: External store FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 143*/{(0x1F), 1, TSEM_REG_SLOW_EXT_STORE_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "TSEM: External store FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 144*/{(0x1F), 1, USEM_REG_SLOW_EXT_STORE_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "USEM: External store FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 145*/{(0x1F), 1, XSEM_REG_SLOW_EXT_STORE_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "XSEM: External store FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 146*/{(0x1F), 1, CSDM_REG_SYNC_PARSER_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "CSDM: Parser serial FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 147*/{(0x1F), 1, TSDM_REG_SYNC_PARSER_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "TSDM: Parser serial FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 148*/{(0x1F), 1, USDM_REG_SYNC_PARSER_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "USDM: Parser serial FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 149*/{(0x1F), 1, XSDM_REG_SYNC_PARSER_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "XSDM: Parser serial FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 150*/{(0x1F), 1, CSDM_REG_SYNC_SYNC_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "CSDM: Parser SYNC serial FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 151*/{(0x1F), 1, TSDM_REG_SYNC_SYNC_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "TSDM: Parser SYNC serial FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 152*/{(0x1F), 1, USDM_REG_SYNC_SYNC_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "USDM: Parser SYNC serial FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 153*/{(0x1F), 1, XSDM_REG_SYNC_SYNC_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "XSDM: Parser SYNC serial FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 154*/{(0x1F), 1, CSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "CSDM: pxp_ctrl rd_data fifo is not empty in sdm_dma_rsp block",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 155*/{(0x1F), 1, TSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "TSDM: pxp_ctrl rd_data fifo is not empty in sdm_dma_rsp block",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 156*/{(0x1F), 1, USDM_REG_RSP_PXP_CTRL_RDATA_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "USDM: pxp_ctrl rd_data fifo is not empty in sdm_dma_rsp block",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 157*/{(0x1F), 1, XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "XSDM: pxp_ctrl rd_data fifo is not empty in sdm_dma_rsp block",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 158*/{(0x1F), 1, DORQ_REG_DQ_FILL_LVLF,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "DORQ: DORQ queue is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 159*/{(0x1F), 1, CFC_REG_CFC_INT_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "CFC: Interrupt status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 160*/{(0x1F), 1, CDU_REG_CDU_INT_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "CDU: Interrupt status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 161*/{(0x1F), 1, CCM_REG_CCM_INT_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "CCM: Interrupt status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 162*/{(0x1F), 1, TCM_REG_TCM_INT_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "TCM: Interrupt status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 163*/{(0x1F), 1, UCM_REG_UCM_INT_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "UCM: Interrupt status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 164*/{(0x1F), 1, XCM_REG_XCM_INT_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "XCM: Interrupt status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 165*/{(0xF), 1, PBF_REG_PBF_INT_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "PBF: Interrupt status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 166*/{(0x1F), 1, TM_REG_TM_INT_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "TIMERS: Interrupt status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 167*/{(0x1F), 1, DORQ_REG_DORQ_INT_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "DORQ: Interrupt status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 168*/{(0x1F), 1, SRC_REG_SRC_INT_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "SRCH: Interrupt status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 169*/{(0x1F), 1, PRS_REG_PRS_INT_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "PRS: Interrupt status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 170*/{(0x1F), 1, BRB1_REG_BRB1_INT_STS,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "BRB1: Interrupt status is not 0",
+       {NA, NA, ~0xFC00, 0, NA, NA} },
+
+/*line 171*/{(0x1F), 1, GRCBASE_XPB + PB_REG_PB_INT_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "XPB: Interrupt status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 172*/{(0x1F), 1, GRCBASE_UPB + PB_REG_PB_INT_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "UPB: Interrupt status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 173*/{(0x1), 1, PXP2_REG_PXP2_INT_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PXP2: Interrupt status 0 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 174*/{(0x1E), 1, PXP2_REG_PXP2_INT_STS_0,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PXP2: Interrupt status 0 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 175*/{(0x1E), 1, PXP2_REG_PXP2_INT_STS_1,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PXP2: Interrupt status 1 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 176*/{(0x1F), 1, QM_REG_QM_INT_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "QM: Interrupt status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 177*/{(0x1F), 1, PXP_REG_PXP_INT_STS_0,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PXP: P0 Interrupt status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 178*/{(0x1F), 1, PXP_REG_PXP_INT_STS_1,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PXP: P1 Interrupt status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 179*/{(0x1C), 1, PGLUE_B_REG_PGLUE_B_INT_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGLUE_B: Interrupt status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 180*/{(0x1F), 1, DORQ_REG_RSPA_CRD_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "DORQ: Credit to XCM is not full",
+       {NA, NA, 2, NA, NA, NA} },
+
+/*line 181*/{(0x1F), 1, DORQ_REG_RSPB_CRD_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "DORQ: Credit to UCM is not full",
+       {NA, NA, 2, NA, NA, NA} },
+
+/*line 182*/{(0x3), 1, QM_REG_VOQCRDERRREG,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "QM: Credit error register is not 0 (byte or credit overflow/underflow)",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 183*/{(0x1F), 1, DORQ_REG_DQ_FULL_ST,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "DORQ: DORQ queue is full",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 184*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "AEU: P0 AFTER_INVERT_1 is not 0",
+       {NA, NA, ~0xCFFC, 0, NA, NA} },
+
+/*line 185*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "AEU: P0 AFTER_INVERT_2 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 186*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "AEU: P0 AFTER_INVERT_3 is not 0",
+       {NA, NA, ~0xFFFF0000, 0, NA, NA} },
+
+/*line 187*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "AEU: P0 AFTER_INVERT_4 is not 0",
+       {NA, NA, ~0x801FFFFF, 0, NA, NA} },
+
+/*line 188*/{(0x3), 1, MISC_REG_AEU_AFTER_INVERT_1_FUNC_1,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "AEU: P1 AFTER_INVERT_1 is not 0",
+       {NA, NA, ~0xCFFC, 0, NA, NA} },
+
+/*line 189*/{(0x3), 1, MISC_REG_AEU_AFTER_INVERT_2_FUNC_1,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "AEU: P1 AFTER_INVERT_2 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 190*/{(0x3), 1, MISC_REG_AEU_AFTER_INVERT_3_FUNC_1,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "AEU: P1 AFTER_INVERT_3 is not 0",
+       {NA, NA, ~0xFFFF0000, 0, NA, NA} },
+
+/*line 191*/{(0x3), 1, MISC_REG_AEU_AFTER_INVERT_4_FUNC_1,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "AEU: P1 AFTER_INVERT_4 is not 0",
+       {NA, NA, ~0x801FFFFF, 0, NA, NA} },
+
+/*line 192*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_1_MCP,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "AEU: MCP AFTER_INVERT_1 is not 0",
+       {NA, NA, ~0xCFFC, 0, NA, NA} },
+
+/*line 193*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_2_MCP,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "AEU: MCP AFTER_INVERT_2 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 194*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_3_MCP,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "AEU: MCP AFTER_INVERT_3 is not 0",
+       {NA, NA, ~0xFFFF0000, 0, NA, NA} },
+
+/*line 195*/{(0x1F), 1, MISC_REG_AEU_AFTER_INVERT_4_MCP,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "AEU: MCP AFTER_INVERT_4 is not 0",
+       {NA, NA, ~0x801FFFFF, 0, NA, NA} },
+
+/*line 196*/{(0xF), 5, PBF_REG_P0_CREDIT,
+       PBF_REG_P0_INIT_CRD, 1, 0, pneq_r2,
+       PBF_REG_DISABLE_NEW_TASK_PROC_P0, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PBF: P0 credit is not equal to init_crd",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 197*/{(0xF), 5, PBF_REG_P1_CREDIT,
+       PBF_REG_P1_INIT_CRD, 1, 0, pneq_r2,
+       PBF_REG_DISABLE_NEW_TASK_PROC_P1, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PBF: P1 credit is not equal to init_crd",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 198*/{(0xF), 3, PBF_REG_P4_CREDIT,
+       PBF_REG_P4_INIT_CRD, 1, 0, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PBF: P4 credit is not equal to init_crd",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 199*/{(0x10), 5, PBF_REG_CREDIT_Q0,
+       PBF_REG_INIT_CRD_Q0, 1, 0, pneq_r2,
+       PBF_REG_DISABLE_NEW_TASK_PROC_Q0, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PBF: Q0 credit is not equal to init_crd",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 200*/{(0x10), 5, PBF_REG_CREDIT_Q1,
+       PBF_REG_INIT_CRD_Q1, 1, 0, pneq_r2,
+       PBF_REG_DISABLE_NEW_TASK_PROC_Q1, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PBF: Q1 credit is not equal to init_crd",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 201*/{(0x10), 5, PBF_REG_CREDIT_Q2,
+       PBF_REG_INIT_CRD_Q2, 1, 0, pneq_r2,
+       PBF_REG_DISABLE_NEW_TASK_PROC_Q2, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PBF: Q2 credit is not equal to init_crd",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 202*/{(0x10), 5, PBF_REG_CREDIT_Q3,
+       PBF_REG_INIT_CRD_Q3, 1, 0, pneq_r2,
+       PBF_REG_DISABLE_NEW_TASK_PROC_Q3, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PBF: Q3 credit is not equal to init_crd",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 203*/{(0x10), 5, PBF_REG_CREDIT_Q4,
+       PBF_REG_INIT_CRD_Q4, 1, 0, pneq_r2,
+       PBF_REG_DISABLE_NEW_TASK_PROC_Q4, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PBF: Q4 credit is not equal to init_crd",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 204*/{(0x10), 5, PBF_REG_CREDIT_Q5,
+       PBF_REG_INIT_CRD_Q5, 1, 0, pneq_r2,
+       PBF_REG_DISABLE_NEW_TASK_PROC_Q5, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PBF: Q5 credit is not equal to init_crd",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 205*/{(0x10), 3, PBF_REG_CREDIT_LB_Q,
+       PBF_REG_INIT_CRD_LB_Q, 1, 0, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PBF: LB Q credit is not equal to init_crd",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 206*/{(0xF), 1, PBF_REG_P0_TASK_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PBF: P0 task_cnt is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 207*/{(0xF), 1, PBF_REG_P1_TASK_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PBF: P1 task_cnt is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 208*/{(0xF), 1, PBF_REG_P4_TASK_CNT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PBF: P4 task_cnt is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 209*/{(0x10), 1, PBF_REG_TASK_CNT_Q0,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PBF: Q0 task_cnt is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 210*/{(0x10), 1, PBF_REG_TASK_CNT_Q1,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PBF: Q1 task_cnt is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 211*/{(0x10), 1, PBF_REG_TASK_CNT_Q2,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PBF: Q2 task_cnt is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 212*/{(0x10), 1, PBF_REG_TASK_CNT_Q3,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PBF: Q3 task_cnt is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 213*/{(0x10), 1, PBF_REG_TASK_CNT_Q4,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PBF: Q4 task_cnt is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 214*/{(0x10), 1, PBF_REG_TASK_CNT_Q5,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PBF: Q5 task_cnt is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 215*/{(0x10), 1, PBF_REG_TASK_CNT_LB_Q,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PBF: LB Q task_cnt is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 216*/{(0x1F), 1, XCM_REG_CFC_INIT_CRD,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "XCM: CFC_INIT_CRD is not 1",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 217*/{(0x1F), 1, UCM_REG_CFC_INIT_CRD,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "UCM: CFC_INIT_CRD is not 1",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 218*/{(0x1F), 1, TCM_REG_CFC_INIT_CRD,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "TCM: CFC_INIT_CRD is not 1",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 219*/{(0x1F), 1, CCM_REG_CFC_INIT_CRD,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "CCM: CFC_INIT_CRD is not 1",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 220*/{(0x1F), 1, XCM_REG_XQM_INIT_CRD,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "XCM: XQM_INIT_CRD is not 32",
+       {NA, NA, 32, NA, NA, NA} },
+
+/*line 221*/{(0x1F), 1, UCM_REG_UQM_INIT_CRD,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "UCM: UQM_INIT_CRD is not 32",
+       {NA, NA, 32, NA, NA, NA} },
+
+/*line 222*/{(0x1F), 1, TCM_REG_TQM_INIT_CRD,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "TCM: TQM_INIT_CRD is not 32",
+       {NA, NA, 32, NA, NA, NA} },
+
+/*line 223*/{(0x1F), 1, CCM_REG_CQM_INIT_CRD,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "CCM: CQM_INIT_CRD is not 32",
+       {NA, NA, 32, NA, NA, NA} },
+
+/*line 224*/{(0x1F), 1, XCM_REG_TM_INIT_CRD,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "XCM: TM_INIT_CRD is not 4",
+       {NA, NA, 4, NA, NA, NA} },
+
+/*line 225*/{(0x1F), 1, UCM_REG_TM_INIT_CRD,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "UCM: TM_INIT_CRD is not 4",
+       {NA, NA, 4, NA, NA, NA} },
+
+/*line 226*/{(0x1F), 1, XCM_REG_FIC0_INIT_CRD,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "XCM: FIC0_INIT_CRD is not 64",
+       {NA, NA, 64, NA, NA, NA} },
+
+/*line 227*/{(0x1F), 1, UCM_REG_FIC0_INIT_CRD,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "UCM: FIC0_INIT_CRD is not 64",
+       {NA, NA, 64, NA, NA, NA} },
+
+/*line 228*/{(0x1F), 1, TCM_REG_FIC0_INIT_CRD,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "TCM: FIC0_INIT_CRD is not 64",
+       {NA, NA, 64, NA, NA, NA} },
+
+/*line 229*/{(0x1F), 1, CCM_REG_FIC0_INIT_CRD,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "CCM: FIC0_INIT_CRD is not 64",
+       {NA, NA, 64, NA, NA, NA} },
+
+/*line 230*/{(0x1F), 1, XCM_REG_FIC1_INIT_CRD,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "XCM: FIC1_INIT_CRD is not 64",
+       {NA, NA, 64, NA, NA, NA} },
+
+/*line 231*/{(0x1F), 1, UCM_REG_FIC1_INIT_CRD,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "UCM: FIC1_INIT_CRD is not 64",
+       {NA, NA, 64, NA, NA, NA} },
+
+/*line 232*/{(0x1F), 1, TCM_REG_FIC1_INIT_CRD,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "TCM: FIC1_INIT_CRD is not 64",
+       {NA, NA, 64, NA, NA, NA} },
+
+/*line 233*/{(0x1F), 1, CCM_REG_FIC1_INIT_CRD,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "CCM: FIC1_INIT_CRD is not 64",
+       {NA, NA, 64, NA, NA, NA} },
+
+/*line 234*/{(0x1), 1, XCM_REG_XX_FREE,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "XCM: XX_FREE differs from expected 31",
+       {NA, NA, 31, NA, NA, NA} },
+
+/*line 235*/{(0x1E), 1, XCM_REG_XX_FREE,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "XCM: XX_FREE differs from expected 32",
+       {NA, NA, 32, NA, NA, NA} },
+
+/*line 236*/{(0x1F), 1, UCM_REG_XX_FREE,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "UCM: XX_FREE differs from expected 27",
+       {NA, NA, 27, NA, NA, NA} },
+
+/*line 237*/{(0x7), 1, TCM_REG_XX_FREE,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "TCM: XX_FREE differs from expected 32",
+       {NA, NA, 32, NA, NA, NA} },
+
+/*line 238*/{(0x18), 1, TCM_REG_XX_FREE,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "TCM: XX_FREE differs from expected 29",
+       {NA, NA, 29, NA, NA, NA} },
+
+/*line 239*/{(0x1F), 1, CCM_REG_XX_FREE,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "CCM: XX_FREE differs from expected 24",
+       {NA, NA, 24, NA, NA, NA} },
+
+/*line 240*/{(0x1F), 1, XSEM_REG_FAST_MEMORY + 0x18000,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "XSEM: FOC0 credit less than initial credit",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 241*/{(0x1F), 1, XSEM_REG_FAST_MEMORY + 0x18040,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "XSEM: FOC1 credit less than initial credit",
+       {NA, NA, 24, NA, NA, NA} },
+
+/*line 242*/{(0x1F), 1, XSEM_REG_FAST_MEMORY + 0x18080,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "XSEM: FOC2 credit less than initial credit",
+       {NA, NA, 12, NA, NA, NA} },
+
+/*line 243*/{(0x1F), 1, USEM_REG_FAST_MEMORY + 0x18000,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "USEM: FOC0 credit less than initial credit",
+       {NA, NA, 26, NA, NA, NA} },
+
+/*line 244*/{(0x1F), 1, USEM_REG_FAST_MEMORY + 0x18040,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "USEM: FOC1 credit less than initial credit",
+       {NA, NA, 78, NA, NA, NA} },
+
+/*line 245*/{(0x1F), 1, USEM_REG_FAST_MEMORY + 0x18080,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "USEM: FOC2 credit less than initial credit",
+       {NA, NA, 16, NA, NA, NA} },
+
+/*line 246*/{(0x1F), 1, USEM_REG_FAST_MEMORY + 0x180C0,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "USEM: FOC3 credit less than initial credit",
+       {NA, NA, 32, NA, NA, NA} },
+
+/*line 247*/{(0x1F), 1, TSEM_REG_FAST_MEMORY + 0x18000,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "TSEM: FOC0 credit less than initial credit",
+       {NA, NA, 52, NA, NA, NA} },
+
+/*line 248*/{(0x1F), 1, TSEM_REG_FAST_MEMORY + 0x18040,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "TSEM: FOC1 credit less than initial credit",
+       {NA, NA, 24, NA, NA, NA} },
+
+/*line 249*/{(0x1F), 1, TSEM_REG_FAST_MEMORY + 0x18080,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "TSEM: FOC2 credit less than initial credit",
+       {NA, NA, 12, NA, NA, NA} },
+
+/*line 250*/{(0x1F), 1, TSEM_REG_FAST_MEMORY + 0x180C0,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "TSEM: FOC3 credit less than initial credit",
+       {NA, NA, 32, NA, NA, NA} },
+
+/*line 251*/{(0x1F), 1, CSEM_REG_FAST_MEMORY + 0x18000,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "CSEM: FOC0 credit less than initial credit",
+       {NA, NA, 16, NA, NA, NA} },
+
+/*line 252*/{(0x1F), 1, CSEM_REG_FAST_MEMORY + 0x18040,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "CSEM: FOC1 credit less than initial credit",
+       {NA, NA, 18, NA, NA, NA} },
+
+/*line 253*/{(0x1F), 1, CSEM_REG_FAST_MEMORY + 0x18080,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "CSEM: FOC2 credit less than initial credit",
+       {NA, NA, 48, NA, NA, NA} },
+
+/*line 254*/{(0x1F), 1, CSEM_REG_FAST_MEMORY + 0x180C0,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "CSEM: FOC3 credit less than initial credit",
+       {NA, NA, 14, NA, NA, NA} },
+
+/*line 255*/{(0x1F), 1, PRS_REG_TSDM_CURRENT_CREDIT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PRS: TSDM current credit is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 256*/{(0x1F), 1, PRS_REG_TCM_CURRENT_CREDIT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PRS: TCM current credit is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 257*/{(0x1F), 1, PRS_REG_CFC_LD_CURRENT_CREDIT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PRS: CFC_LD current credit is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 258*/{(0x1F), 1, PRS_REG_CFC_SEARCH_CURRENT_CREDIT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PRS: CFC_SEARCH current credit is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 259*/{(0x1F), 1, PRS_REG_SRC_CURRENT_CREDIT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PRS: SRCH current credit is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 260*/{(0x1F), 1, PRS_REG_PENDING_BRB_PRS_RQ,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PRS: PENDING_BRB_PRS_RQ is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 261*/{(0x1F), 2, PRS_REG_PENDING_BRB_CAC0_RQ,
+       NA, 5, 4, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PRS: PENDING_BRB_CAC_RQ is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 262*/{(0x1F), 1, PRS_REG_SERIAL_NUM_STATUS_LSB,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PRS: SERIAL_NUM_STATUS_LSB is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 263*/{(0x1F), 1, PRS_REG_SERIAL_NUM_STATUS_MSB,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "PRS: SERIAL_NUM_STATUS_MSB is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 264*/{(0x1F), 1, CDU_REG_ERROR_DATA,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "CDU: ERROR_DATA is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 265*/{(0x1F), 1, CCM_REG_STORM_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "CCM: STORM declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 266*/{(0x1F), 1, CCM_REG_CSDM_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "CCM: CSDM declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 267*/{(0x1F), 1, CCM_REG_TSEM_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "CCM: TSEM declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 268*/{(0x1F), 1, CCM_REG_XSEM_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "CCM: XSEM declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 269*/{(0x1F), 1, CCM_REG_USEM_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "CCM: USEM declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 270*/{(0x1F), 1, CCM_REG_PBF_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "CCM: PBF declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 271*/{(0x1F), 1, TCM_REG_STORM_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "TCM: STORM declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 272*/{(0x1F), 1, TCM_REG_TSDM_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "TCM: TSDM declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 273*/{(0x1F), 1, TCM_REG_PRS_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "TCM: PRS declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 274*/{(0x1F), 1, TCM_REG_PBF_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "TCM: PBF declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 275*/{(0x1F), 1, TCM_REG_USEM_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "TCM: USEM declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 276*/{(0x1F), 1, TCM_REG_CSEM_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "TCM: CSEM declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 277*/{(0x1F), 1, UCM_REG_STORM_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "UCM: STORM declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 278*/{(0x1F), 1, UCM_REG_USDM_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "UCM: USDM declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 279*/{(0x1F), 1, UCM_REG_TSEM_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "UCM: TSEM declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 280*/{(0x1F), 1, UCM_REG_CSEM_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "UCM: CSEM declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 281*/{(0x1F), 1, UCM_REG_XSEM_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "UCM: XSEM declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 282*/{(0x1F), 1, UCM_REG_DORQ_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "UCM: DORQ declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 283*/{(0x1F), 1, XCM_REG_STORM_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "XCM: STORM declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 284*/{(0x1F), 1, XCM_REG_XSDM_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "XCM: XSDM declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 285*/{(0x1F), 1, XCM_REG_TSEM_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "XCM: TSEM declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 286*/{(0x1F), 1, XCM_REG_CSEM_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "XCM: CSEM declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 287*/{(0x1F), 1, XCM_REG_USEM_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "XCM: USEM declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 288*/{(0x1F), 1, XCM_REG_DORQ_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "XCM: DORQ declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 289*/{(0x1F), 1, XCM_REG_PBF_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "XCM: PBF declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 290*/{(0x1F), 1, XCM_REG_NIG0_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "XCM: NIG0 declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 291*/{(0x1F), 1, XCM_REG_NIG1_LENGTH_MIS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "XCM: NIG1 declared message length unequal to actual",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 292*/{(0x1F), 1, QM_REG_XQM_WRC_FIFOLVL,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: XQM wrc_fifolvl is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 293*/{(0x1F), 1, QM_REG_UQM_WRC_FIFOLVL,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: UQM wrc_fifolvl is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 294*/{(0x1F), 1, QM_REG_TQM_WRC_FIFOLVL,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: TQM wrc_fifolvl is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 295*/{(0x1F), 1, QM_REG_CQM_WRC_FIFOLVL,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: CQM wrc_fifolvl is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 296*/{(0x1F), 1, QM_REG_QSTATUS_LOW,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: QSTATUS_LOW is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 297*/{(0x1F), 1, QM_REG_QSTATUS_HIGH,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: QSTATUS_HIGH is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 298*/{(0x1F), 1, QM_REG_PAUSESTATE0,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: PAUSESTATE0 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 299*/{(0x1F), 1, QM_REG_PAUSESTATE1,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: PAUSESTATE1 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 300*/{(0x1F), 1, QM_REG_OVFQNUM,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "QM: OVFQNUM is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 301*/{(0x1F), 1, QM_REG_OVFERROR,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "QM: OVFERROR is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 302*/{(0x1F), 6, QM_REG_PTRTBL,
+       NA, 64, 8, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: read and write variables not equal",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 303*/{(0x1F), 1, BRB1_REG_BRB1_PRTY_STS,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "BRB1: parity status is not 0",
+       {NA, NA, ~0x8, 0, NA, NA} },
+
+/*line 304*/{(0x1F), 1, CDU_REG_CDU_PRTY_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "CDU: parity status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 305*/{(0x1F), 1, CFC_REG_CFC_PRTY_STS,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "CFC: parity status is not 0",
+       {NA, NA, ~0x2, 0, NA, NA} },
+
+/*line 306*/{(0x1F), 1, CSDM_REG_CSDM_PRTY_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "CSDM: parity status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 307*/{(0x3), 1, DBG_REG_DBG_PRTY_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "DBG: parity status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 308*/{(0x1F), 1, DMAE_REG_DMAE_PRTY_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "DMAE: parity status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 309*/{(0x1F), 1, DORQ_REG_DORQ_PRTY_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "DORQ: parity status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 310*/{(0x1), 1, TCM_REG_TCM_PRTY_STS,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "TCM: parity status is not 0",
+       {NA, NA, ~0x3ffc0, 0, NA, NA} },
+
+/*line 311*/{(0x1E), 1, TCM_REG_TCM_PRTY_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "TCM: parity status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 312*/{(0x1), 1, CCM_REG_CCM_PRTY_STS,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "CCM: parity status is not 0",
+       {NA, NA, ~0x3ffc0, 0, NA, NA} },
+
+/*line 313*/{(0x1E), 1, CCM_REG_CCM_PRTY_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "CCM: parity status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 314*/{(0x1), 1, UCM_REG_UCM_PRTY_STS,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "UCM: parity status is not 0",
+       {NA, NA, ~0x3ffc0, 0, NA, NA} },
+
+/*line 315*/{(0x1E), 1, UCM_REG_UCM_PRTY_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "UCM: parity status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 316*/{(0x1), 1, XCM_REG_XCM_PRTY_STS,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "XCM: parity status is not 0",
+       {NA, NA, ~0x3ffc0, 0, NA, NA} },
+
+/*line 317*/{(0x1E), 1, XCM_REG_XCM_PRTY_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "XCM: parity status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 318*/{(0x1), 1, HC_REG_HC_PRTY_STS,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "HC: parity status is not 0",
+       {NA, NA, ~0x1, 0, NA, NA} },
+
+/*line 319*/{(0x1), 1, MISC_REG_MISC_PRTY_STS,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "MISC: parity status is not 0",
+       {NA, NA, ~0x1, 0, NA, NA} },
+
+/*line 320*/{(0x1F), 1, PRS_REG_PRS_PRTY_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PRS: parity status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 321*/{(0x1F), 1, PXP_REG_PXP_PRTY_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PXP: parity status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 322*/{(0x1F), 1, QM_REG_QM_PRTY_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "QM: parity status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 323*/{(0x1), 1, SRC_REG_SRC_PRTY_STS,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "SRCH: parity status is not 0",
+       {NA, NA, ~0x4, 0, NA, NA} },
+
+/*line 324*/{(0x1F), 1, TSDM_REG_TSDM_PRTY_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "TSDM: parity status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 325*/{(0x1F), 1, USDM_REG_USDM_PRTY_STS,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "USDM: parity status is not 0",
+       {NA, NA, ~0x20, 0, NA, NA} },
+
+/*line 326*/{(0x1F), 1, XSDM_REG_XSDM_PRTY_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "XSDM: parity status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 327*/{(0x1F), 1, GRCBASE_XPB + PB_REG_PB_PRTY_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "XPB: parity status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 328*/{(0x1F), 1, GRCBASE_UPB + PB_REG_PB_PRTY_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "UPB: parity status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 329*/{(0x1F), 1, CSEM_REG_CSEM_PRTY_STS_0,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "CSEM: parity status 0 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 330*/{(0x1), 1, PXP2_REG_PXP2_PRTY_STS_0,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "PXP2: parity status 0 is not 0",
+       {NA, NA, ~0xfff40020, 0, NA, NA} },
+
+/*line 331*/{(0x1E), 1, PXP2_REG_PXP2_PRTY_STS_0,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "PXP2: parity status 0 is not 0",
+       {NA, NA, ~0x20, 0, NA, NA} },
+
+/*line 332*/{(0x1F), 1, TSEM_REG_TSEM_PRTY_STS_0,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "TSEM: parity status 0 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 333*/{(0x1F), 1, USEM_REG_USEM_PRTY_STS_0,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "USEM: parity status 0 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 334*/{(0x1F), 1, XSEM_REG_XSEM_PRTY_STS_0,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "XSEM: parity status 0 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 335*/{(0x1F), 1, CSEM_REG_CSEM_PRTY_STS_1,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "CSEM: parity status 1 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 336*/{(0x1), 1, PXP2_REG_PXP2_PRTY_STS_1,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "PXP2: parity status 1 is not 0",
+       {NA, NA, ~0x20, 0, NA, NA} },
+
+/*line 337*/{(0x1E), 1, PXP2_REG_PXP2_PRTY_STS_1,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PXP2: parity status 1 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 338*/{(0x1F), 1, TSEM_REG_TSEM_PRTY_STS_1,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "TSEM: parity status 1 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 339*/{(0x1F), 1, USEM_REG_USEM_PRTY_STS_1,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "USEM: parity status 1 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 340*/{(0x1F), 1, XSEM_REG_XSEM_PRTY_STS_1,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "XSEM: parity status 1 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 341*/{(0x1C), 1, PGLUE_B_REG_PGLUE_B_PRTY_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGLUE_B: parity status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 342*/{(0x2), 2, QM_REG_QTASKCTR_EXT_A_0,
+       NA, 64, 4, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: Q_EXT_A (upper 64 queues), Queue is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 343*/{(0x2), 1, QM_REG_QSTATUS_LOW_EXT_A,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "QM: QSTATUS_LOW_EXT_A is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 344*/{(0x2), 1, QM_REG_QSTATUS_HIGH_EXT_A,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "QM: QSTATUS_HIGH_EXT_A is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 345*/{(0x1E), 1, QM_REG_PAUSESTATE2,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: PAUSESTATE2 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 346*/{(0x1E), 1, QM_REG_PAUSESTATE3,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: PAUSESTATE3 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 347*/{(0x2), 1, QM_REG_PAUSESTATE4,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "QM: PAUSESTATE4 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 348*/{(0x2), 1, QM_REG_PAUSESTATE5,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "QM: PAUSESTATE5 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 349*/{(0x2), 1, QM_REG_PAUSESTATE6,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "QM: PAUSESTATE6 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 350*/{(0x2), 1, QM_REG_PAUSESTATE7,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "QM: PAUSESTATE7 is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 351*/{(0x2), 6, QM_REG_PTRTBL_EXT_A,
+       NA, 64, 8, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: read and write variables not equal in ext table",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 352*/{(0x1E), 1, MISC_REG_AEU_SYS_KILL_OCCURRED,
+       NA, NA, NA, pneq,
+       NA, IDLE_CHK_ERROR,
+       "MISC: system kill occurred;",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 353*/{(0x1E), 1, MISC_REG_AEU_SYS_KILL_STATUS_0,
+       NA, NA, NA, pneq,
+       NA, IDLE_CHK_ERROR,
+       "MISC: system kill occurred; status_0 register",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 354*/{(0x1E), 1, MISC_REG_AEU_SYS_KILL_STATUS_1,
+       NA, NA, NA, pneq,
+       NA, IDLE_CHK_ERROR,
+       "MISC: system kill occurred; status_1 register",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 355*/{(0x1E), 1, MISC_REG_AEU_SYS_KILL_STATUS_2,
+       NA, NA, NA, pneq,
+       NA, IDLE_CHK_ERROR,
+       "MISC: system kill occurred; status_2 register",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 356*/{(0x1E), 1, MISC_REG_AEU_SYS_KILL_STATUS_3,
+       NA, NA, NA, pneq,
+       NA, IDLE_CHK_ERROR,
+       "MISC: system kill occurred; status_3 register",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 357*/{(0x1E), 1, MISC_REG_PCIE_HOT_RESET,
+       NA, NA, NA, pneq,
+       NA, IDLE_CHK_WARNING,
+       "MISC: pcie_rst_b was asserted without perst assertion",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 358*/{(0x1F), 1, NIG_REG_NIG_INT_STS_0,
+       NA, NA, NA, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "NIG: interrupt 0 is active",
+       {NA, NA, ~0x300, 0, NA, NA} },
+
+/*line 359*/{(0x1F), 1, NIG_REG_NIG_INT_STS_0,
+       NA, NA, NA, peq,
+       NA, IDLE_CHK_WARNING,
+       "NIG: Access to BMAC while not active. If tested on FPGA, ignore this warning",
+       {NA, NA, 0x300, NA, NA, NA} },
+
+/*line 360*/{(0x1F), 1, NIG_REG_NIG_INT_STS_1,
+       NA, NA, NA, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "NIG: interrupt 1 is active",
+       {NA, NA, 0x783FF03, 0, NA, NA} },
+
+/*line 361*/{(0x1F), 1, NIG_REG_NIG_INT_STS_1,
+       NA, NA, NA, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "NIG: port cos was paused too long",
+       {NA, NA, ~0x783FF0F, 0, NA, NA} },
+
+/*line 362*/{(0x1F), 1, NIG_REG_NIG_INT_STS_1,
+       NA, NA, NA, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "NIG: Got packets w/o Outer-VLAN in MF mode",
+       {NA, NA, 0xC, 0, NA, NA} },
+
+/*line 363*/{(0x2), 1, NIG_REG_NIG_PRTY_STS,
+       NA, NA, NA, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "NIG: parity interrupt is active",
+       {NA, NA, ~0xFFC00000, 0, NA, NA} },
+
+/*line 364*/{(0x1C), 1, NIG_REG_NIG_PRTY_STS_0,
+       NA, NA, NA, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "NIG: parity 0 interrupt is active",
+       {NA, NA, ~0xFFC00000, 0, NA, NA} },
+
+/*line 365*/{(0x4), 1, NIG_REG_NIG_PRTY_STS_1,
+       NA, NA, NA, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "NIG: parity 1 interrupt is active",
+       {NA, NA, 0xff, 0, NA, NA} },
+
+/*line 366*/{(0x18), 1, NIG_REG_NIG_PRTY_STS_1,
+       NA, NA, NA, pneq,
+       NA, IDLE_CHK_ERROR,
+       "NIG: parity 1 interrupt is active",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 367*/{(0x1F), 1, TSEM_REG_TSEM_INT_STS_0,
+       NA, NA, NA, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "TSEM: interrupt 0 is active",
+       {NA, NA, ~0x10000000, 0, NA, NA} },
+
+/*line 368*/{(0x1F), 1, TSEM_REG_TSEM_INT_STS_0,
+       NA, NA, NA, peq,
+       NA, IDLE_CHK_WARNING,
+       "TSEM: interrupt 0 is active",
+       {NA, NA, 0x10000000, NA, NA, NA} },
+
+/*line 369*/{(0x1F), 1, TSEM_REG_TSEM_INT_STS_1,
+       NA, NA, NA, pneq,
+       NA, IDLE_CHK_ERROR,
+       "TSEM: interrupt 1 is active",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 370*/{(0x1F), 1, CSEM_REG_CSEM_INT_STS_0,
+       NA, NA, NA, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "CSEM: interrupt 0 is active",
+       {NA, NA, ~0x10000000, 0, NA, NA} },
+
+/*line 371*/{(0x1F), 1, CSEM_REG_CSEM_INT_STS_0,
+       NA, NA, NA, peq,
+       NA, IDLE_CHK_WARNING,
+       "CSEM: interrupt 0 is active",
+       {NA, NA, 0x10000000, NA, NA, NA} },
+
+/*line 372*/{(0x1F), 1, CSEM_REG_CSEM_INT_STS_1,
+       NA, NA, NA, pneq,
+       NA, IDLE_CHK_ERROR,
+       "CSEM: interrupt 1 is active",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 373*/{(0x1F), 1, USEM_REG_USEM_INT_STS_0,
+       NA, NA, NA, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "USEM: interrupt 0 is active",
+       {NA, NA, ~0x10000000, 0, NA, NA} },
+
+/*line 374*/{(0x1F), 1, USEM_REG_USEM_INT_STS_0,
+       NA, NA, NA, peq,
+       NA, IDLE_CHK_WARNING,
+       "USEM: interrupt 0 is active",
+       {NA, NA, 0x10000000, NA, NA, NA} },
+
+/*line 375*/{(0x1F), 1, USEM_REG_USEM_INT_STS_1,
+       NA, NA, NA, pneq,
+       NA, IDLE_CHK_ERROR,
+       "USEM: interrupt 1 is active",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 376*/{(0x1F), 1, XSEM_REG_XSEM_INT_STS_0,
+       NA, NA, NA, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "XSEM: interrupt 0 is active",
+       {NA, NA, ~0x10000000, 0, NA, NA} },
+
+/*line 377*/{(0x1F), 1, XSEM_REG_XSEM_INT_STS_0,
+       NA, NA, NA, peq,
+       NA, IDLE_CHK_WARNING,
+       "XSEM: interrupt 0 is active",
+       {NA, NA, 0x10000000, NA, NA, NA} },
+
+/*line 378*/{(0x1F), 1, XSEM_REG_XSEM_INT_STS_1,
+       NA, NA, NA, pneq,
+       NA, IDLE_CHK_ERROR,
+       "XSEM: interrupt 1 is active",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 379*/{(0x1F), 1, TSDM_REG_TSDM_INT_STS_0,
+       NA, NA, NA, pneq,
+       NA, IDLE_CHK_ERROR,
+       "TSDM: interrupt 0 is active",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 380*/{(0x1F), 1, TSDM_REG_TSDM_INT_STS_1,
+       NA, NA, NA, pneq,
+       NA, IDLE_CHK_ERROR,
+       "TSDM: interrupt 0 is active",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 381*/{(0x1F), 1, CSDM_REG_CSDM_INT_STS_0,
+       NA, NA, NA, pneq,
+       NA, IDLE_CHK_ERROR,
+       "CSDM: interrupt 0 is active",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 382*/{(0x1F), 1, CSDM_REG_CSDM_INT_STS_1,
+       NA, NA, NA, pneq,
+       NA, IDLE_CHK_ERROR,
+       "CSDM: interrupt 0 is active",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 383*/{(0x1F), 1, USDM_REG_USDM_INT_STS_0,
+       NA, NA, NA, pneq,
+       NA, IDLE_CHK_ERROR,
+       "USDM: interrupt 0 is active",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 384*/{(0x1F), 1, USDM_REG_USDM_INT_STS_1,
+       NA, NA, NA, pneq,
+       NA, IDLE_CHK_ERROR,
+       "USDM: interrupt 0 is active",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 385*/{(0x1F), 1, XSDM_REG_XSDM_INT_STS_0,
+       NA, NA, NA, pneq,
+       NA, IDLE_CHK_ERROR,
+       "XSDM: interrupt 0 is active",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 386*/{(0x1F), 1, XSDM_REG_XSDM_INT_STS_1,
+       NA, NA, NA, pneq,
+       NA, IDLE_CHK_ERROR,
+       "XSDM: interrupt 0 is active",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 387*/{(0x2), 1, HC_REG_HC_PRTY_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "HC: parity status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 388*/{(0x1E), 1, MISC_REG_MISC_PRTY_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "MISC: parity status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 389*/{(0x1E), 1, SRC_REG_SRC_PRTY_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "SRCH: parity status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 390*/{(0xC), 3, QM_REG_BYTECRD0,
+       QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: Byte credit 0 is not equal to initial credit",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 391*/{(0xC), 3, QM_REG_BYTECRD1,
+       QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: Byte credit 1 is not equal to initial credit",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 392*/{(0xC), 3, QM_REG_BYTECRD2,
+       QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: Byte credit 2 is not equal to initial credit",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 393*/{(0x1C), 1, QM_REG_VOQCRDERRREG,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "QM: VOQ credit error register is not 0 (VOQ credit overflow/underflow)",
+       {NA, NA, 0xFFFF, 0, NA, NA} },
+
+/*line 394*/{(0x1C), 1, QM_REG_BYTECRDERRREG,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "QM: Byte credit error register is not 0 (Byte credit overflow/underflow)",
+       {NA, NA, 0xFFF, 0, NA, NA} },
+
+/*line 395*/{(0x1C), 1, PGLUE_B_REG_FLR_REQUEST_VF_31_0,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGL: FLR request is set for VF addresses 31-0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 396*/{(0x1C), 1, PGLUE_B_REG_FLR_REQUEST_VF_63_32,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGL: FLR request is set for VF addresses 63-32",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 397*/{(0x1C), 1, PGLUE_B_REG_FLR_REQUEST_VF_95_64,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGL: FLR request is set for VF addresses 95-64",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 398*/{(0x1C), 1, PGLUE_B_REG_FLR_REQUEST_VF_127_96,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGL: FLR request is set for VF addresses 127-96",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 399*/{(0x1C), 1, PGLUE_B_REG_FLR_REQUEST_PF_7_0,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGL: FLR request is set for PF addresses 7-0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 400*/{(0x1C), 1, PGLUE_B_REG_SR_IOV_DISABLED_REQUEST,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGL: SR-IOV disable request is set",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 401*/{(0x1C), 1, PGLUE_B_REG_CFG_SPACE_A_REQUEST,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGL: Cfg-Space A request is set",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 402*/{(0x1C), 1, PGLUE_B_REG_CFG_SPACE_B_REQUEST,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "PGL: Cfg-Space B request is set",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 403*/{(0x1C), 1, IGU_REG_ERROR_HANDLING_DATA_VALID,
+       NA, NA, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "IGU: some unauthorized commands arrived to the IGU. Use igu_dump_fifo utility for more details",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 404*/{(0x1C), 1, IGU_REG_ATTN_WRITE_DONE_PENDING,
+       NA, NA, NA, pneq,
+       NA, IDLE_CHK_WARNING,
+       "IGU attention message write done pending is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 405*/{(0x1C), 1, IGU_REG_WRITE_DONE_PENDING,
+       NA, 5, 4, pneq,
+       NA, IDLE_CHK_WARNING,
+       "IGU MSI/MSIX message write done pending is not empty",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 406*/{(0x1C), 1, IGU_REG_IGU_PRTY_STS,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "IGU: parity status is not 0",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 407*/{(0x1E), 3, MISC_REG_GRC_TIMEOUT_ATTN,
+       MISC_REG_AEU_AFTER_INVERT_4_FUNC_0, 1, 0, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "MISC_REG_GRC_TIMEOUT_ATTN: GRC timeout attention parameters (FUNC_0)",
+       {NA, NA, 0x4000000, 0, NA, NA} },
+
+/*line 408*/{(0x1C), 3, MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID,
+       MISC_REG_AEU_AFTER_INVERT_4_FUNC_0, 1, 0, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID: GRC timeout attention FID (FUNC_0)",
+       {NA, NA, 0x4000000, 0, NA, NA} },
+
+/*line 409*/{(0x1E), 3, MISC_REG_GRC_TIMEOUT_ATTN,
+       MISC_REG_AEU_AFTER_INVERT_4_FUNC_1, 1, 0, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "MISC_REG_GRC_TIMEOUT_ATTN: GRC timeout attention parameters (FUNC_1)",
+       {NA, NA, 0x4000000, 0, NA, NA} },
+
+/*line 410*/{(0x1C), 3, MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID,
+       MISC_REG_AEU_AFTER_INVERT_4_FUNC_1, 1, 0, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID: GRC timeout attention FID (FUNC_1)",
+       {NA, NA, 0x4000000, 0, NA, NA} },
+
+/*line 411*/{(0x1E), 3, MISC_REG_GRC_TIMEOUT_ATTN,
+       MISC_REG_AEU_AFTER_INVERT_4_MCP, 1, 0, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "MISC_REG_GRC_TIMEOUT_ATTN: GRC timeout attention parameters (MCP)",
+       {NA, NA, 0x4000000, 0, NA, NA} },
+
+/*line 412*/{(0x1C), 3, MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID,
+       MISC_REG_AEU_AFTER_INVERT_4_MCP, 1, 0, pand_neq,
+       NA, IDLE_CHK_ERROR,
+       "MISC_REG_GRC_TIMEOUT_ATTN_FULL_FID: GRC timeout attention FID (MCP)",
+       {NA, NA, 0x4000000, 0, NA, NA} },
+
+/*line 413*/{(0x1C), 1, IGU_REG_SILENT_DROP,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "Some messages were not executed in the IGU",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 414*/{(0x1C), 1, PXP2_REG_PSWRQ_BW_CREDIT,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR,
+       "PXP2: rq_read_credit and rq_write_credit are not 5",
+       {NA, NA, 0x2D, NA, NA, NA} },
+
+/*line 415*/{(0x1C), 1, IGU_REG_SB_CTRL_FSM,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "IGU: block is not in idle. SB_CTRL_FSM should be zero in idle state",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 416*/{(0x1C), 1, IGU_REG_INT_HANDLE_FSM,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "IGU: block is not in idle. INT_HANDLE_FSM should be zero in idle state",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 417*/{(0x1C), 1, IGU_REG_ATTN_FSM,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "IGU: block is not in idle. SB_ATTN_FSMshould be zeroor two in idle state",
+       {NA, NA, ~0x2, 0, NA, NA} },
+
+/*line 418*/{(0x1C), 1, IGU_REG_CTRL_FSM,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "IGU: block is not in idle. SB_CTRL_FSM should be zero in idle state",
+       {NA, NA, ~0x1, 0, NA, NA} },
+
+/*line 419*/{(0x1C), 1, IGU_REG_PXP_ARB_FSM,
+       NA, 1, 0, pand_neq,
+       NA, IDLE_CHK_WARNING,
+       "IGU: block is not in idle. SB_ARB_FSM should be zero in idle state",
+       {NA, NA, ~0x1, 0, NA, NA} },
+
+/*line 420*/{(0x1C), 1, IGU_REG_PENDING_BITS_STATUS,
+       NA, 5, 4, pneq,
+       NA, IDLE_CHK_WARNING,
+       "IGU: block is not in idle. There are pending write done",
+       {NA, NA, 0, NA, NA, NA} },
+
+/*line 421*/{(0x10), 3, QM_REG_VOQCREDIT_0,
+       QM_REG_VOQINITCREDIT_0, 1, 0, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: VOQ_0, VOQ credit is not equal to initial credit",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 422*/{(0x10), 3, QM_REG_VOQCREDIT_1,
+       QM_REG_VOQINITCREDIT_1, 1, 0, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: VOQ_1, VOQ credit is not equal to initial credit",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 423*/{(0x10), 3, QM_REG_VOQCREDIT_2,
+       QM_REG_VOQINITCREDIT_2, 1, 0, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: VOQ_2, VOQ credit is not equal to initial credit",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 424*/{(0x10), 3, QM_REG_VOQCREDIT_3,
+       QM_REG_VOQINITCREDIT_3, 1, 0, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: VOQ_3, VOQ credit is not equal to initial credit",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 425*/{(0x10), 3, QM_REG_VOQCREDIT_4,
+       QM_REG_VOQINITCREDIT_4, 1, 0, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: VOQ_4, VOQ credit is not equal to initial credit",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 426*/{(0x10), 3, QM_REG_VOQCREDIT_5,
+       QM_REG_VOQINITCREDIT_5, 1, 0, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: VOQ_5, VOQ credit is not equal to initial credit",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 427*/{(0x10), 3, QM_REG_VOQCREDIT_6,
+       QM_REG_VOQINITCREDIT_6, 1, 0, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: VOQ_6 (LB VOQ), VOQ credit is not equal to initial credit",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 428*/{(0x10), 3, QM_REG_BYTECRD0,
+       QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: Byte credit 0 is not equal to initial credit",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 429*/{(0x10), 3, QM_REG_BYTECRD1,
+       QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: Byte credit 1 is not equal to initial credit",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 430*/{(0x10), 3, QM_REG_BYTECRD2,
+       QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: Byte credit 2 is not equal to initial credit",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 431*/{(0x10), 3, QM_REG_BYTECRD3,
+       QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: Byte credit 3 is not equal to initial credit",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 432*/{(0x10), 3, QM_REG_BYTECRD4,
+       QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: Byte credit 4 is not equal to initial credit",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 433*/{(0x10), 3, QM_REG_BYTECRD5,
+       QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: Byte credit 5 is not equal to initial credit",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 434*/{(0x10), 3, QM_REG_BYTECRD6,
+       QM_REG_BYTECRDINITVAL, 1, 0, pneq_r2,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "QM: Byte credit 6 is not equal to initial credit",
+       {NA, NA, NA, NA, NA, NA} },
+
+/*line 435*/{(0x10), 1, QM_REG_FWVOQ0TOHWVOQ,
+       NA, 1, 0, peq,
+       NA, IDLE_CHK_ERROR,
+       "QM: FwVoq0 is mapped to HwVoq7 (non-TX HwVoq)",
+       {NA, NA, 0x7, NA, NA, NA} },
+
+/*line 436*/{(0x10), 1, QM_REG_FWVOQ1TOHWVOQ,
+       NA, 1, 0, peq,
+       NA, IDLE_CHK_ERROR,
+       "QM: FwVoq1 is mapped to HwVoq7 (non-TX HwVoq)",
+       {NA, NA, 0x7, NA, NA, NA} },
+
+/*line 437*/{(0x10), 1, QM_REG_FWVOQ2TOHWVOQ,
+       NA, 1, 0, peq,
+       NA, IDLE_CHK_ERROR,
+       "QM: FwVoq2 is mapped to HwVoq7 (non-TX HwVoq)",
+       {NA, NA, 0x7, NA, NA, NA} },
+
+/*line 438*/{(0x10), 1, QM_REG_FWVOQ3TOHWVOQ,
+       NA, 1, 0, peq,
+       NA, IDLE_CHK_ERROR,
+       "QM: FwVoq3 is mapped to HwVoq7 (non-TX HwVoq)",
+       {NA, NA, 0x7, NA, NA, NA} },
+
+/*line 439*/{(0x10), 1, QM_REG_FWVOQ4TOHWVOQ,
+       NA, 1, 0, peq,
+       NA, IDLE_CHK_ERROR,
+       "QM: FwVoq4 is mapped to HwVoq7 (non-TX HwVoq)",
+       {NA, NA, 0x7, NA, NA, NA} },
+
+/*line 440*/{(0x10), 1, QM_REG_FWVOQ5TOHWVOQ,
+       NA, 1, 0, peq,
+       NA, IDLE_CHK_ERROR,
+       "QM: FwVoq5 is mapped to HwVoq7 (non-TX HwVoq)",
+       {NA, NA, 0x7, NA, NA, NA} },
+
+/*line 441*/{(0x10), 1, QM_REG_FWVOQ6TOHWVOQ,
+       NA, 1, 0, peq,
+       NA, IDLE_CHK_ERROR,
+       "QM: FwVoq6 is mapped to HwVoq7 (non-TX HwVoq)",
+       {NA, NA, 0x7, NA, NA, NA} },
+
+/*line 442*/{(0x10), 1, QM_REG_FWVOQ7TOHWVOQ,
+       NA, 1, 0, peq,
+       NA, IDLE_CHK_ERROR,
+       "QM: FwVoq7 is mapped to HwVoq7 (non-TX HwVoq)",
+       {NA, NA, 0x7, NA, NA, NA} },
+
+/*line 443*/{(0x1F), 1, NIG_REG_INGRESS_EOP_PORT0_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "NIG: Port 0 EOP FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 444*/{(0x1F), 1, NIG_REG_INGRESS_EOP_PORT1_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "NIG: Port 1 EOP FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 445*/{(0x1F), 1, NIG_REG_INGRESS_EOP_LB_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "NIG: LB EOP FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 446*/{(0x1F), 1, NIG_REG_INGRESS_RMP0_DSCR_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "NIG: Port 0 RX MCP descriptor FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 447*/{(0x1F), 1, NIG_REG_INGRESS_RMP1_DSCR_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "NIG: Port 1 RX MCP descriptor FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 448*/{(0x1F), 1, NIG_REG_INGRESS_LB_PBF_DELAY_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "NIG: PBF LB FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 449*/{(0x1F), 1, NIG_REG_EGRESS_MNG0_FIFO_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "NIG: Port 0 TX MCP FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 450*/{(0x1F), 1, NIG_REG_EGRESS_MNG1_FIFO_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "NIG: Port 1 TX MCP FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 451*/{(0x1F), 1, NIG_REG_EGRESS_DEBUG_FIFO_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "NIG: Debug FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 452*/{(0x1F), 1, NIG_REG_EGRESS_DELAY0_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "NIG: PBF IF0 FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 453*/{(0x1F), 1, NIG_REG_EGRESS_DELAY1_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "NIG: PBF IF1 FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 454*/{(0x1F), 1, NIG_REG_LLH0_FIFO_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "NIG: Port 0 RX LLH FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 455*/{(0x1F), 1, NIG_REG_LLH1_FIFO_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "NIG: Port 1 RX LLH FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 456*/{(0x1C), 1, NIG_REG_P0_TX_MNG_HOST_FIFO_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "NIG: Port 0 TX MCP FIFO for traffic going to the host is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 457*/{(0x1C), 1, NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "NIG: Port 1 TX MCP FIFO for traffic going to the host is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 458*/{(0x1C), 1, NIG_REG_P0_TLLH_FIFO_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "NIG: Port 0 TX LLH FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 459*/{(0x1C), 1, NIG_REG_P1_TLLH_FIFO_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "NIG: Port 1 TX LLH FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 460*/{(0x1C), 1, NIG_REG_P0_HBUF_DSCR_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "NIG: Port 0 RX MCP descriptor FIFO for traffic from the host is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 461*/{(0x1C), 1, NIG_REG_P1_HBUF_DSCR_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_WARNING,
+       "NIG: Port 1 RX MCP descriptor FIFO for traffic from the host is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 462*/{(0x18), 1, NIG_REG_P0_RX_MACFIFO_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "NIG: Port 0 RX MAC interface FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 463*/{(0x18), 1, NIG_REG_P1_RX_MACFIFO_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "NIG: Port 1 RX MAC interface FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 464*/{(0x18), 1, NIG_REG_P0_TX_MACFIFO_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "NIG: Port 0 TX MAC interface FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 465*/{(0x18), 1, NIG_REG_P1_TX_MACFIFO_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "NIG: Port 1 TX MAC interface FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 466*/{(0x10), 1, NIG_REG_EGRESS_DELAY2_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "NIG: PBF IF2 FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 467*/{(0x10), 1, NIG_REG_EGRESS_DELAY3_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "NIG: PBF IF3 FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 468*/{(0x10), 1, NIG_REG_EGRESS_DELAY4_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "NIG: PBF IF4 FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+
+/*line 469*/{(0x10), 1, NIG_REG_EGRESS_DELAY5_EMPTY,
+       NA, 1, 0, pneq,
+       NA, IDLE_CHK_ERROR_NO_TRAFFIC,
+       "NIG: PBF IF5 FIFO is not empty",
+       {NA, NA, 1, NA, NA, NA} },
+};
+
+/* handle self test fails according to severity and type */
+static void bnx2x_self_test_log(struct bnx2x *bp, u8 severity, char *message)
+{
+       switch (severity) {
+       case IDLE_CHK_ERROR:
+               BNX2X_ERR("ERROR %s", message);
+               idle_chk_errors++;
+               break;
+       case IDLE_CHK_ERROR_NO_TRAFFIC:
+               DP(NETIF_MSG_HW, "INFO %s", message);
+               break;
+       case IDLE_CHK_WARNING:
+               DP(NETIF_MSG_HW, "WARNING %s", message);
+               idle_chk_warnings++;
+               break;
+       }
+}
+
+/* specific test for QM rd/wr pointers and rd/wr banks */
+static void bnx2x_idle_chk6(struct bnx2x *bp,
+                           struct st_record *rec, char *message)
+{
+       u32 rd_ptr, wr_ptr, rd_bank, wr_bank;
+       int i;
+
+       for (i = 0; i < rec->loop; i++) {
+               /* read regs */
+               rec->pred_args.val1 =
+                       REG_RD(bp, rec->reg1 + i * rec->incr);
+               rec->pred_args.val2 =
+                       REG_RD(bp, rec->reg1 + i * rec->incr + 4);
+
+               /* calc read and write pointers */
+               rd_ptr = ((rec->pred_args.val1 & 0x3FFFFFC0) >> 6);
+               wr_ptr = ((((rec->pred_args.val1 & 0xC0000000) >> 30) & 0x3) |
+                       ((rec->pred_args.val2 & 0x3FFFFF) << 2));
+
+               /* perfrom pointer test */
+               if (rd_ptr != wr_ptr) {
+                       snprintf(message, MAX_FAIL_MSG,
+                                "QM: PTRTBL entry %d- rd_ptr is not equal to wr_ptr. Values are 0x%x and 0x%x\n",
+                                i, rd_ptr, wr_ptr);
+                       bnx2x_self_test_log(bp, rec->severity, message);
+               }
+
+               /* calculate read and write banks */
+               rd_bank = ((rec->pred_args.val1 & 0x30) >> 4);
+               wr_bank = (rec->pred_args.val1 & 0x03);
+
+               /* perform bank test */
+               if (rd_bank != wr_bank) {
+                       snprintf(message, MAX_FAIL_MSG,
+                                "QM: PTRTBL entry %d - rd_bank is not equal to wr_bank. Values are 0x%x 0x%x\n",
+                                i, rd_bank, wr_bank);
+                       bnx2x_self_test_log(bp, rec->severity, message);
+               }
+       }
+}
+
+/* specific test for cfc info ram and cid cam */
+static void bnx2x_idle_chk7(struct bnx2x *bp,
+                           struct st_record *rec, char *message)
+{
+       int i;
+
+       /* iterate through lcids */
+       for (i = 0; i < rec->loop; i++) {
+               /* make sure cam entry is valid (bit 0) */
+               if ((REG_RD(bp, (rec->reg2 + i * 4)) & 0x1) != 0x1)
+                       continue;
+
+               /* get connection type (multiple reads due to widebus) */
+               REG_RD(bp, (rec->reg1 + i * rec->incr));
+               REG_RD(bp, (rec->reg1 + i * rec->incr + 4));
+               rec->pred_args.val1 =
+                       REG_RD(bp, (rec->reg1 + i * rec->incr + 8));
+               REG_RD(bp, (rec->reg1 + i * rec->incr + 12));
+
+               /* obtain connection type */
+               if (is_e1 || is_e1h) {
+                       /* E1 E1H (bits 4..7) */
+                       rec->pred_args.val1 &= 0x78;
+                       rec->pred_args.val1 >>= 3;
+               } else {
+                       /* E2 E3A0 E3B0 (bits 26..29) */
+                       rec->pred_args.val1 &= 0x1E000000;
+                       rec->pred_args.val1 >>= 25;
+               }
+
+               /* get activity counter value */
+               rec->pred_args.val2 = REG_RD(bp, rec->reg3 + i * 4);
+
+               /* validate ac value is legal for con_type at idle state */
+               if (rec->bnx2x_predicate(&rec->pred_args)) {
+                       snprintf(message, MAX_FAIL_MSG,
+                                "%s. Values are 0x%x 0x%x\n", rec->fail_msg,
+                                rec->pred_args.val1, rec->pred_args.val2);
+                       bnx2x_self_test_log(bp, rec->severity, message);
+               }
+       }
+}
+
+/* self test procedure
+ * scan auto-generated database
+ * for each line:
+ * 1.  compare chip mask
+ * 2.  determine type (according to maro number)
+ * 3.  read registers
+ * 4.  call predicate
+ * 5.  collate results and statistics
+ */
+int bnx2x_idle_chk(struct bnx2x *bp)
+{
+       u16 i;                          /* loop counter */
+       u16 st_ind;                     /* self test database access index */
+       struct st_record rec;           /* current record variable */
+       char message[MAX_FAIL_MSG];     /* message to log */
+
+       /*init stats*/
+       idle_chk_errors = 0;
+       idle_chk_warnings = 0;
+
+       /*create masks for all chip types*/
+       is_e1   = CHIP_IS_E1(bp);
+       is_e1h  = CHIP_IS_E1H(bp);
+       is_e2   = CHIP_IS_E2(bp);
+       is_e3a0 = CHIP_IS_E3A0(bp);
+       is_e3b0 = CHIP_IS_E3B0(bp);
+
+       /*database main loop*/
+       for (st_ind = 0; st_ind < ST_DB_LINES; st_ind++) {
+               rec = st_database[st_ind];
+
+               /*check if test applies to chip*/
+               if (!((rec.chip_mask & IDLE_CHK_E1) && is_e1) &&
+                   !((rec.chip_mask & IDLE_CHK_E1H) && is_e1h) &&
+                   !((rec.chip_mask & IDLE_CHK_E2) && is_e2) &&
+                   !((rec.chip_mask & IDLE_CHK_E3A0) && is_e3a0) &&
+                   !((rec.chip_mask & IDLE_CHK_E3B0) && is_e3b0))
+                       continue;
+
+               /* identify macro */
+               switch (rec.macro) {
+               case 1:
+                       /* read single reg and call predicate */
+                       rec.pred_args.val1 = REG_RD(bp, rec.reg1);
+                       DP(BNX2X_MSG_IDLE, "mac1 add %x\n", rec.reg1);
+                       if (rec.bnx2x_predicate(&rec.pred_args)) {
+                               snprintf(message, sizeof(message),
+                                        "%s.Value is 0x%x\n", rec.fail_msg,
+                                        rec.pred_args.val1);
+                               bnx2x_self_test_log(bp, rec.severity, message);
+                       }
+                       break;
+               case 2:
+                       /* read repeatedly starting from reg1 and call
+                        * predicate after each read
+                        */
+                       for (i = 0; i < rec.loop; i++) {
+                               rec.pred_args.val1 =
+                                       REG_RD(bp, rec.reg1 + i * rec.incr);
+                               DP(BNX2X_MSG_IDLE, "mac2 add %x\n", rec.reg1);
+                               if (rec.bnx2x_predicate(&rec.pred_args)) {
+                                       snprintf(message, sizeof(message),
+                                                "%s. Value is 0x%x in loop %d\n",
+                                                rec.fail_msg,
+                                                rec.pred_args.val1, i);
+                                       bnx2x_self_test_log(bp, rec.severity,
+                                                           message);
+                               }
+                       }
+                       break;
+               case 3:
+                       /* read two regs and call predicate */
+                       rec.pred_args.val1 = REG_RD(bp, rec.reg1);
+                       rec.pred_args.val2 = REG_RD(bp, rec.reg2);
+                       DP(BNX2X_MSG_IDLE, "mac3 add1 %x add2 %x\n",
+                          rec.reg1, rec.reg2);
+                       if (rec.bnx2x_predicate(&rec.pred_args)) {
+                               snprintf(message, sizeof(message),
+                                        "%s. Values are 0x%x 0x%x\n",
+                                        rec.fail_msg, rec.pred_args.val1,
+                                        rec.pred_args.val2);
+                               bnx2x_self_test_log(bp, rec.severity, message);
+                       }
+                       break;
+               case 4:
+                       /*unused to-date*/
+                       for (i = 0; i < rec.loop; i++) {
+                               rec.pred_args.val1 =
+                                       REG_RD(bp, rec.reg1 + i * rec.incr);
+                               rec.pred_args.val2 =
+                                       (REG_RD(bp,
+                                               rec.reg2 + i * rec.incr)) >> 1;
+                               if (rec.bnx2x_predicate(&rec.pred_args)) {
+                                       snprintf(message, sizeof(message),
+                                                "%s. Values are 0x%x 0x%x in loop %d\n",
+                                                rec.fail_msg,
+                                                rec.pred_args.val1,
+                                                rec.pred_args.val2, i);
+                                       bnx2x_self_test_log(bp, rec.severity,
+                                                           message);
+                               }
+                       }
+                       break;
+               case 5:
+                       /* compare two regs, pending
+                        * the value of a condition reg
+                        */
+                       rec.pred_args.val1 = REG_RD(bp, rec.reg1);
+                       rec.pred_args.val2 = REG_RD(bp, rec.reg2);
+                       DP(BNX2X_MSG_IDLE, "mac3 add1 %x add2 %x add3 %x\n",
+                          rec.reg1, rec.reg2, rec.reg3);
+                       if (REG_RD(bp, rec.reg3) != 0) {
+                               if (rec.bnx2x_predicate(&rec.pred_args)) {
+                                       snprintf(message, sizeof(message),
+                                                "%s. Values are 0x%x 0x%x\n",
+                                                rec.fail_msg,
+                                                rec.pred_args.val1,
+                                                rec.pred_args.val2);
+                                       bnx2x_self_test_log(bp, rec.severity,
+                                                           message);
+                               }
+                       }
+                       break;
+               case 6:
+                       /* compare read and write pointers
+                        * and read and write banks in QM
+                        */
+                       bnx2x_idle_chk6(bp, &rec, message);
+                       break;
+               case 7:
+                       /* compare cfc info cam with cid cam */
+                       bnx2x_idle_chk7(bp, &rec, message);
+                       break;
+               default:
+                       DP(BNX2X_MSG_IDLE,
+                          "unknown macro in self test data base. macro %d line %d",
+                          rec.macro, st_ind);
+               }
+       }
+
+       /* abort if interface is not running */
+       if (!netif_running(bp->dev))
+               return idle_chk_errors;
+
+       /* return value accorindg to statistics */
+       if (idle_chk_errors == 0) {
+               DP(BNX2X_MSG_IDLE,
+                  "completed successfully (logged %d warnings)\n",
+                  idle_chk_warnings);
+       } else {
+               BNX2X_ERR("failed (with %d errors, %d warnings)\n",
+                         idle_chk_errors, idle_chk_warnings);
+       }
+       return idle_chk_errors;
+}
index 7e0919a..0b193ed 100644 (file)
@@ -23,6 +23,8 @@
 #include "bnx2x_cmn.h"
 #include "bnx2x_sriov.h"
 
+extern const u32 dmae_reg_go_c[];
+
 /* Statistics */
 
 /*
index c62589c..a7e5ebe 100644 (file)
@@ -1614,7 +1614,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
                skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
 
        if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
-           (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
+           (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
                u16 vlan_proto = tpa_info->metadata >>
                        RX_CMP_FLAGS2_METADATA_TPID_SFT;
                u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
@@ -1832,7 +1832,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 
        if ((rxcmp1->rx_cmp_flags2 &
             cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
-           (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
+           (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
                u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
                u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
                u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
@@ -3538,7 +3538,7 @@ static void bnxt_free_vnic_attributes(struct bnxt *bp)
                }
 
                if (vnic->rss_table) {
-                       dma_free_coherent(&pdev->dev, PAGE_SIZE,
+                       dma_free_coherent(&pdev->dev, vnic->rss_table_size,
                                          vnic->rss_table,
                                          vnic->rss_table_dma_addr);
                        vnic->rss_table = NULL;
@@ -3603,7 +3603,13 @@ vnic_skip_grps:
                        continue;
 
                /* Allocate rss table and hash key */
-               vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
+               size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
+               if (bp->flags & BNXT_FLAG_CHIP_P5)
+                       size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
+
+               vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
+               vnic->rss_table = dma_alloc_coherent(&pdev->dev,
+                                                    vnic->rss_table_size,
                                                     &vnic->rss_table_dma_addr,
                                                     GFP_KERNEL);
                if (!vnic->rss_table) {
@@ -3611,8 +3617,6 @@ vnic_skip_grps:
                        goto out;
                }
 
-               size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
-
                vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
                vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
        }
@@ -4505,10 +4509,12 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
 
        switch (tunnel_type) {
        case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
-               req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
+               req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
+               bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
                break;
        case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
-               req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
+               req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
+               bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
                break;
        default:
                break;
@@ -4543,10 +4549,11 @@ static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
 
        switch (tunnel_type) {
        case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
-               bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
+               bp->vxlan_fw_dst_port_id =
+                       le16_to_cpu(resp->tunnel_dst_port_id);
                break;
        case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
-               bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
+               bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
                break;
        default:
                break;
@@ -4826,9 +4833,112 @@ static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
        }
 }
 
+static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
+{
+       int entries;
+
+       if (bp->flags & BNXT_FLAG_CHIP_P5)
+               entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
+       else
+               entries = HW_HASH_INDEX_SIZE;
+
+       bp->rss_indir_tbl_entries = entries;
+       bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
+                                         GFP_KERNEL);
+       if (!bp->rss_indir_tbl)
+               return -ENOMEM;
+       return 0;
+}
+
+static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
+{
+       u16 max_rings, max_entries, pad, i;
+
+       if (!bp->rx_nr_rings)
+               return;
+
+       if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+               max_rings = bp->rx_nr_rings - 1;
+       else
+               max_rings = bp->rx_nr_rings;
+
+       max_entries = bnxt_get_rxfh_indir_size(bp->dev);
+
+       for (i = 0; i < max_entries; i++)
+               bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
+
+       pad = bp->rss_indir_tbl_entries - max_entries;
+       if (pad)
+               memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
+}
+
+static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
+{
+       u16 i, tbl_size, max_ring = 0;
+
+       if (!bp->rss_indir_tbl)
+               return 0;
+
+       tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
+       for (i = 0; i < tbl_size; i++)
+               max_ring = max(max_ring, bp->rss_indir_tbl[i]);
+       return max_ring;
+}
+
+int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
+{
+       if (bp->flags & BNXT_FLAG_CHIP_P5)
+               return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
+       if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+               return 2;
+       return 1;
+}
+
+static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+       bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
+       u16 i, j;
+
+       /* Fill the RSS indirection table with ring group ids */
+       for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
+               if (!no_rss)
+                       j = bp->rss_indir_tbl[i];
+               vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
+       }
+}
+
+static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
+                                     struct bnxt_vnic_info *vnic)
+{
+       __le16 *ring_tbl = vnic->rss_table;
+       struct bnxt_rx_ring_info *rxr;
+       u16 tbl_size, i;
+
+       tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
+
+       for (i = 0; i < tbl_size; i++) {
+               u16 ring_id, j;
+
+               j = bp->rss_indir_tbl[i];
+               rxr = &bp->rx_ring[j];
+
+               ring_id = rxr->rx_ring_struct.fw_ring_id;
+               *ring_tbl++ = cpu_to_le16(ring_id);
+               ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+               *ring_tbl++ = cpu_to_le16(ring_id);
+       }
+}
+
+static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+       if (bp->flags & BNXT_FLAG_CHIP_P5)
+               __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
+       else
+               __bnxt_fill_hw_rss_tbl(bp, vnic);
+}
+
 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
 {
-       u32 i, j, max_rings;
        struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
        struct hwrm_vnic_rss_cfg_input req = {0};
 
@@ -4838,24 +4948,9 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
 
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
        if (set_rss) {
+               bnxt_fill_hw_rss_tbl(bp, vnic);
                req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
                req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
-               if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
-                       if (BNXT_CHIP_TYPE_NITRO_A0(bp))
-                               max_rings = bp->rx_nr_rings - 1;
-                       else
-                               max_rings = bp->rx_nr_rings;
-               } else {
-                       max_rings = 1;
-               }
-
-               /* Fill the RSS indirection table with ring group ids */
-               for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
-                       if (j == max_rings)
-                               j = 0;
-                       vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
-               }
-
                req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
                req.hash_key_tbl_addr =
                        cpu_to_le64(vnic->rss_hash_key_dma_addr);
@@ -4867,9 +4962,9 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
 {
        struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
-       u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
-       struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
        struct hwrm_vnic_rss_cfg_input req = {0};
+       dma_addr_t ring_tbl_map;
+       u32 i, nr_ctxs;
 
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
        req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
@@ -4877,31 +4972,18 @@ static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
                hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
                return 0;
        }
+       bnxt_fill_hw_rss_tbl(bp, vnic);
        req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
        req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
-       req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
        req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
-       nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
-       for (i = 0, k = 0; i < nr_ctxs; i++) {
-               __le16 *ring_tbl = vnic->rss_table;
+       ring_tbl_map = vnic->rss_table_dma_addr;
+       nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
+       for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
                int rc;
 
+               req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
                req.ring_table_pair_index = i;
                req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
-               for (j = 0; j < 64; j++) {
-                       u16 ring_id;
-
-                       ring_id = rxr->rx_ring_struct.fw_ring_id;
-                       *ring_tbl++ = cpu_to_le16(ring_id);
-                       ring_id = bnxt_cp_ring_for_rx(bp, rxr);
-                       *ring_tbl++ = cpu_to_le16(ring_id);
-                       rxr++;
-                       k++;
-                       if (k == max_rings) {
-                               k = 0;
-                               rxr = &bp->rx_ring[0];
-                       }
-               }
                rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
                if (rc)
                        return rc;
@@ -5139,6 +5221,14 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
                if (flags &
                    VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
                        bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
+
+               /* Older P5 fw before EXT_HW_STATS support did not set
+                * VLAN_STRIP_CAP properly.
+                */
+               if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
+                   ((bp->flags & BNXT_FLAG_CHIP_P5) &&
+                    !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
+                       bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
                bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
                if (bp->max_tpa_v2)
                        bp->hw_ring_stats_size =
@@ -5992,6 +6082,21 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
                rx = rx_rings << 1;
        cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
        bp->tx_nr_rings = tx;
+
+       /* If we cannot reserve all the RX rings, reset the RSS map only
+        * if absolutely necessary
+        */
+       if (rx_rings != bp->rx_nr_rings) {
+               netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
+                           rx_rings, bp->rx_nr_rings);
+               if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
+                   (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
+                    bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
+                    bnxt_get_max_rss_ring(bp) >= rx_rings)) {
+                       netdev_warn(bp->dev, "RSS table entries reverting to default\n");
+                       bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
+               }
+       }
        bp->rx_nr_rings = rx_rings;
        bp->cp_nr_rings = cp;
 
@@ -6292,6 +6397,7 @@ int bnxt_hwrm_set_coal(struct bnxt *bp)
 
 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
 {
+       struct hwrm_stat_ctx_clr_stats_input req0 = {0};
        struct hwrm_stat_ctx_free_input req = {0};
        int i;
 
@@ -6301,6 +6407,7 @@ static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
        if (BNXT_CHIP_TYPE_NITRO_A0(bp))
                return;
 
+       bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1);
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
 
        mutex_lock(&bp->hwrm_cmd_lock);
@@ -6310,7 +6417,11 @@ static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
 
                if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
                        req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
-
+                       if (BNXT_FW_MAJ(bp) <= 20) {
+                               req0.stat_ctx_id = req.stat_ctx_id;
+                               _hwrm_send_message(bp, &req0, sizeof(req0),
+                                                  HWRM_CMD_TIMEOUT);
+                       }
                        _hwrm_send_message(bp, &req, sizeof(req),
                                           HWRM_CMD_TIMEOUT);
 
@@ -6949,7 +7060,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
        struct hwrm_func_qcaps_input req = {0};
        struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
        struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
-       u32 flags;
+       u32 flags, flags_ext;
 
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
        req.fid = cpu_to_le16(0xffff);
@@ -6974,9 +7085,16 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
                bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
        if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
                bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
+       if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
+               bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
+
+       flags_ext = le32_to_cpu(resp->flags_ext);
+       if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
+               bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
 
        bp->tx_push_thresh = 0;
-       if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
+       if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
+           BNXT_FW_MAJ(bp) > 217)
                bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
 
        hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
@@ -7240,8 +7358,9 @@ static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
 static int bnxt_hwrm_ver_get(struct bnxt *bp)
 {
        struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
+       u16 fw_maj, fw_min, fw_bld, fw_rsv;
        u32 dev_caps_cfg, hwrm_ver;
-       int rc;
+       int rc, len;
 
        bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
        mutex_lock(&bp->hwrm_cmd_lock);
@@ -7273,9 +7392,22 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
                         resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
                         resp->hwrm_intf_upd_8b);
 
-       snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
-                resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
-                resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
+       fw_maj = le16_to_cpu(resp->hwrm_fw_major);
+       if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
+               fw_min = le16_to_cpu(resp->hwrm_fw_minor);
+               fw_bld = le16_to_cpu(resp->hwrm_fw_build);
+               fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
+               len = FW_VER_STR_LEN;
+       } else {
+               fw_maj = resp->hwrm_fw_maj_8b;
+               fw_min = resp->hwrm_fw_min_8b;
+               fw_bld = resp->hwrm_fw_bld_8b;
+               fw_rsv = resp->hwrm_fw_rsvd_8b;
+               len = BC_HWRM_STR_LEN;
+       }
+       bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
+       snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
+                fw_rsv);
 
        if (strlen(resp->active_pkg_name)) {
                int fw_ver_len = strlen(bp->fw_ver_str);
@@ -7449,16 +7581,12 @@ static int bnxt_hwrm_pcie_qstats(struct bnxt *bp)
 
 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
 {
-       if (bp->vxlan_port_cnt) {
+       if (bp->vxlan_fw_dst_port_id != INVALID_HW_RING_ID)
                bnxt_hwrm_tunnel_dst_port_free(
                        bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
-       }
-       bp->vxlan_port_cnt = 0;
-       if (bp->nge_port_cnt) {
+       if (bp->nge_fw_dst_port_id != INVALID_HW_RING_ID)
                bnxt_hwrm_tunnel_dst_port_free(
                        bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
-       }
-       bp->nge_port_cnt = 0;
 }
 
 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
@@ -7613,7 +7741,7 @@ static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
 {
        int rc, i, nr_ctxs;
 
-       nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
+       nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
        for (i = 0; i < nr_ctxs; i++) {
                rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
                if (rc) {
@@ -8175,6 +8303,9 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
                        rc = bnxt_init_int_mode(bp);
                bnxt_ulp_irq_restart(bp, rc);
        }
+       if (!netif_is_rxfh_configured(bp->dev))
+               bnxt_set_dflt_rss_indir_tbl(bp);
+
        if (rc) {
                netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
                return rc;
@@ -9173,7 +9304,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
        }
 
        if (irq_re_init)
-               udp_tunnel_get_rx_info(bp->dev);
+               udp_tunnel_nic_reset_ntf(bp->dev);
 
        set_bit(BNXT_STATE_OPEN, &bp->state);
        bnxt_enable_int(bp);
@@ -9814,24 +9945,16 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
        /* Both CTAG and STAG VLAN accelaration on the RX side have to be
         * turned on or off together.
         */
-       vlan_features = features & (NETIF_F_HW_VLAN_CTAG_RX |
-                                   NETIF_F_HW_VLAN_STAG_RX);
-       if (vlan_features != (NETIF_F_HW_VLAN_CTAG_RX |
-                             NETIF_F_HW_VLAN_STAG_RX)) {
-               if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
-                       features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
-                                     NETIF_F_HW_VLAN_STAG_RX);
+       vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
+       if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
+               if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
+                       features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
                else if (vlan_features)
-                       features |= NETIF_F_HW_VLAN_CTAG_RX |
-                                   NETIF_F_HW_VLAN_STAG_RX;
+                       features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
        }
 #ifdef CONFIG_BNXT_SRIOV
-       if (BNXT_VF(bp)) {
-               if (bp->vf.vlan) {
-                       features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
-                                     NETIF_F_HW_VLAN_STAG_RX);
-               }
-       }
+       if (BNXT_VF(bp) && bp->vf.vlan)
+               features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
 #endif
        return features;
 }
@@ -9854,7 +9977,7 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
        if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
                flags &= ~BNXT_FLAG_TPA;
 
-       if (features & NETIF_F_HW_VLAN_CTAG_RX)
+       if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
                flags |= BNXT_FLAG_STRIP_VLAN;
 
        if (features & NETIF_F_NTUPLE)
@@ -10037,7 +10160,7 @@ static void bnxt_timer(struct timer_list *t)
        struct bnxt *bp = from_timer(bp, t, timer);
        struct net_device *dev = bp->dev;
 
-       if (!netif_running(dev))
+       if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
                return;
 
        if (atomic_read(&bp->intr_sem) != 0)
@@ -10332,24 +10455,6 @@ static void bnxt_sp_task(struct work_struct *work)
                bnxt_cfg_ntp_filters(bp);
        if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
                bnxt_hwrm_exec_fwd_req(bp);
-       if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
-               bnxt_hwrm_tunnel_dst_port_alloc(
-                       bp, bp->vxlan_port,
-                       TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
-       }
-       if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
-               bnxt_hwrm_tunnel_dst_port_free(
-                       bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
-       }
-       if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
-               bnxt_hwrm_tunnel_dst_port_alloc(
-                       bp, bp->nge_port,
-                       TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
-       }
-       if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
-               bnxt_hwrm_tunnel_dst_port_free(
-                       bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
-       }
        if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
                bnxt_hwrm_port_qstats(bp);
                bnxt_hwrm_port_qstats_ext(bp);
@@ -10946,6 +11051,9 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
        timer_setup(&bp->timer, bnxt_timer, 0);
        bp->current_interval = BNXT_TIMER_INTERVAL;
 
+       bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
+       bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
+
        clear_bit(BNXT_STATE_OPEN, &bp->state);
        return 0;
 
@@ -11273,84 +11381,33 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
 
 #endif /* CONFIG_RFS_ACCEL */
 
-static void bnxt_udp_tunnel_add(struct net_device *dev,
-                               struct udp_tunnel_info *ti)
+static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
 {
-       struct bnxt *bp = netdev_priv(dev);
-
-       if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
-               return;
-
-       if (!netif_running(dev))
-               return;
-
-       switch (ti->type) {
-       case UDP_TUNNEL_TYPE_VXLAN:
-               if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
-                       return;
+       struct bnxt *bp = netdev_priv(netdev);
+       struct udp_tunnel_info ti;
+       unsigned int cmd;
 
-               bp->vxlan_port_cnt++;
-               if (bp->vxlan_port_cnt == 1) {
-                       bp->vxlan_port = ti->port;
-                       set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
-                       bnxt_queue_sp_work(bp);
-               }
-               break;
-       case UDP_TUNNEL_TYPE_GENEVE:
-               if (bp->nge_port_cnt && bp->nge_port != ti->port)
-                       return;
+       udp_tunnel_nic_get_port(netdev, table, 0, &ti);
+       if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
+               cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
+       else
+               cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
 
-               bp->nge_port_cnt++;
-               if (bp->nge_port_cnt == 1) {
-                       bp->nge_port = ti->port;
-                       set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
-               }
-               break;
-       default:
-               return;
-       }
+       if (ti.port)
+               return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
 
-       bnxt_queue_sp_work(bp);
+       return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
 }
 
-static void bnxt_udp_tunnel_del(struct net_device *dev,
-                               struct udp_tunnel_info *ti)
-{
-       struct bnxt *bp = netdev_priv(dev);
-
-       if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
-               return;
-
-       if (!netif_running(dev))
-               return;
-
-       switch (ti->type) {
-       case UDP_TUNNEL_TYPE_VXLAN:
-               if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
-                       return;
-               bp->vxlan_port_cnt--;
-
-               if (bp->vxlan_port_cnt != 0)
-                       return;
-
-               set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
-               break;
-       case UDP_TUNNEL_TYPE_GENEVE:
-               if (!bp->nge_port_cnt || bp->nge_port != ti->port)
-                       return;
-               bp->nge_port_cnt--;
-
-               if (bp->nge_port_cnt != 0)
-                       return;
-
-               set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
-               break;
-       default:
-               return;
-       }
-
-       bnxt_queue_sp_work(bp);
-}
+static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
+       .sync_table     = bnxt_udp_tunnel_sync,
+       .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
+                         UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+       .tables         = {
+               { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
+               { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+       },
+};
 
 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
                               struct net_device *dev, u32 filter_mask,
@@ -11448,8 +11505,8 @@ static const struct net_device_ops bnxt_netdev_ops = {
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
 #endif
-       .ndo_udp_tunnel_add     = bnxt_udp_tunnel_add,
-       .ndo_udp_tunnel_del     = bnxt_udp_tunnel_del,
+       .ndo_udp_tunnel_add     = udp_tunnel_nic_add_port,
+       .ndo_udp_tunnel_del     = udp_tunnel_nic_del_port,
        .ndo_bpf                = bnxt_xdp,
        .ndo_xdp_xmit           = bnxt_xdp_xmit,
        .ndo_bridge_getlink     = bnxt_bridge_getlink,
@@ -11489,6 +11546,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
        bnxt_free_ctx_mem(bp);
        kfree(bp->ctx);
        bp->ctx = NULL;
+       kfree(bp->rss_indir_tbl);
+       bp->rss_indir_tbl = NULL;
        bnxt_free_port_stats(bp);
        free_netdev(dev);
 }
@@ -11892,7 +11951,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        dev->ethtool_ops = &bnxt_ethtool_ops;
        pci_set_drvdata(pdev, dev);
 
-       bnxt_vpd_read_info(bp);
+       if (BNXT_PF(bp))
+               bnxt_vpd_read_info(bp);
 
        rc = bnxt_alloc_hwrm_resources(bp);
        if (rc)
@@ -11936,11 +11996,15 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                        NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
                        NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
                        NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
+       dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
+
        dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
                                    NETIF_F_GSO_GRE_CSUM;
        dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
-       dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
-                           NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
+       if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
+               dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
+       if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
+               dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
        if (BNXT_SUPPORTS_TPA(bp))
                dev->hw_features |= NETIF_F_GRO_HW;
        dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
@@ -11996,7 +12060,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        bnxt_fw_init_one_p3(bp);
 
-       if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
+       if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
                bp->flags |= BNXT_FLAG_STRIP_VLAN;
 
        rc = bnxt_init_int_mode(bp);
@@ -12008,6 +12072,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
         */
        bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
 
+       rc = bnxt_alloc_rss_indir_tbl(bp);
+       if (rc)
+               goto init_err_pci_clean;
+       bnxt_set_dflt_rss_indir_tbl(bp);
+
        if (BNXT_PF(bp)) {
                if (!bnxt_pf_wq) {
                        bnxt_pf_wq =
@@ -12017,7 +12086,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                                goto init_err_pci_clean;
                        }
                }
-               bnxt_init_tc(bp);
+               rc = bnxt_init_tc(bp);
+               if (rc)
+                       netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
+                                  rc);
        }
 
        bnxt_dl_register(bp);
@@ -12052,6 +12124,8 @@ init_err_pci_clean:
        bnxt_free_ctx_mem(bp);
        kfree(bp->ctx);
        bp->ctx = NULL;
+       kfree(bp->rss_indir_tbl);
+       bp->rss_indir_tbl = NULL;
 
 init_err_free:
        free_netdev(dev);
@@ -12133,19 +12207,9 @@ static int bnxt_resume(struct device *device)
                goto resume_exit;
        }
 
-       if (bnxt_hwrm_queue_qportcfg(bp)) {
-               rc = -ENODEV;
+       rc = bnxt_hwrm_func_qcaps(bp);
+       if (rc)
                goto resume_exit;
-       }
-
-       if (bp->hwrm_spec_code >= 0x10803) {
-               if (bnxt_alloc_ctx_mem(bp)) {
-                       rc = -ENODEV;
-                       goto resume_exit;
-               }
-       }
-       if (BNXT_NEW_RM(bp))
-               bnxt_hwrm_func_resc_qcaps(bp, false);
 
        if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
                rc = -ENODEV;
@@ -12161,6 +12225,8 @@ static int bnxt_resume(struct device *device)
 
 resume_exit:
        bnxt_ulp_start(bp, rc);
+       if (!rc)
+               bnxt_reenable_sriov(bp);
        rtnl_unlock();
        return rc;
 }
@@ -12204,6 +12270,9 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
                bnxt_close(netdev);
 
        pci_disable_device(pdev);
+       bnxt_free_ctx_mem(bp);
+       kfree(bp->ctx);
+       bp->ctx = NULL;
        rtnl_unlock();
 
        /* Request a slot slot reset. */
@@ -12237,12 +12306,16 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
                pci_set_master(pdev);
 
                err = bnxt_hwrm_func_reset(bp);
-               if (!err && netif_running(netdev))
-                       err = bnxt_open(netdev);
-
-               if (!err)
-                       result = PCI_ERS_RESULT_RECOVERED;
+               if (!err) {
+                       err = bnxt_hwrm_func_qcaps(bp);
+                       if (!err && netif_running(netdev))
+                               err = bnxt_open(netdev);
+               }
                bnxt_ulp_start(bp, err);
+               if (!err) {
+                       bnxt_reenable_sriov(bp);
+                       result = PCI_ERS_RESULT_RECOVERED;
+               }
        }
 
        if (result != PCI_ERS_RESULT_RECOVERED) {
index 9e173d7..2acd7f9 100644 (file)
@@ -1017,6 +1017,15 @@ struct bnxt_vnic_info {
        __le16          *rss_table;
        dma_addr_t      rss_hash_key_dma_addr;
        u64             *rss_hash_key;
+       int             rss_table_size;
+#define BNXT_RSS_TABLE_ENTRIES_P5      64
+#define BNXT_RSS_TABLE_SIZE_P5         (BNXT_RSS_TABLE_ENTRIES_P5 * 4)
+#define BNXT_RSS_TABLE_MAX_TBL_P5      8
+#define BNXT_MAX_RSS_TABLE_SIZE_P5                             \
+       (BNXT_RSS_TABLE_SIZE_P5 * BNXT_RSS_TABLE_MAX_TBL_P5)
+#define BNXT_MAX_RSS_TABLE_ENTRIES_P5                          \
+       (BNXT_RSS_TABLE_ENTRIES_P5 * BNXT_RSS_TABLE_MAX_TBL_P5)
+
        u32             rx_mask;
 
        u8              *mc_list;
@@ -1648,6 +1657,8 @@ struct bnxt {
        struct bnxt_ring_grp_info       *grp_info;
        struct bnxt_vnic_info   *vnic_info;
        int                     nr_vnics;
+       u16                     *rss_indir_tbl;
+       u16                     rss_indir_tbl_entries;
        u32                     rss_hash_cfg;
 
        u16                     max_mtu;
@@ -1705,6 +1716,9 @@ struct bnxt {
        #define BNXT_FW_CAP_ERR_RECOVER_RELOAD          0x00100000
        #define BNXT_FW_CAP_HOT_RESET                   0x00200000
        #define BNXT_FW_CAP_SHARED_PORT_CFG             0x00400000
+       #define BNXT_FW_CAP_VLAN_RX_STRIP               0x01000000
+       #define BNXT_FW_CAP_VLAN_TX_INSERT              0x02000000
+       #define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED      0x04000000
 
 #define BNXT_NEW_RM(bp)                ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
        u32                     hwrm_spec_code;
@@ -1746,12 +1760,13 @@ struct bnxt {
 #define PHY_VER_STR_LEN         (FW_VER_STR_LEN - BC_HWRM_STR_LEN)
        char                    fw_ver_str[FW_VER_STR_LEN];
        char                    hwrm_ver_supp[FW_VER_STR_LEN];
-       __be16                  vxlan_port;
-       u8                      vxlan_port_cnt;
-       __le16                  vxlan_fw_dst_port_id;
-       __be16                  nge_port;
-       u8                      nge_port_cnt;
-       __le16                  nge_fw_dst_port_id;
+       u64                     fw_ver_code;
+#define BNXT_FW_VER_CODE(maj, min, bld, rsv)                   \
+       ((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv))
+#define BNXT_FW_MAJ(bp)                ((bp)->fw_ver_code >> 48)
+
+       u16                     vxlan_fw_dst_port_id;
+       u16                     nge_fw_dst_port_id;
        u8                      port_partition_type;
        u8                      port_count;
        u16                     br_mode;
@@ -1771,16 +1786,12 @@ struct bnxt {
 #define BNXT_RX_NTP_FLTR_SP_EVENT      1
 #define BNXT_LINK_CHNG_SP_EVENT                2
 #define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT        3
-#define BNXT_VXLAN_ADD_PORT_SP_EVENT   4
-#define BNXT_VXLAN_DEL_PORT_SP_EVENT   5
 #define BNXT_RESET_TASK_SP_EVENT       6
 #define BNXT_RST_RING_SP_EVENT         7
 #define BNXT_HWRM_PF_UNLOAD_SP_EVENT   8
 #define BNXT_PERIODIC_STATS_SP_EVENT   9
 #define BNXT_HWRM_PORT_MODULE_SP_EVENT 10
 #define BNXT_RESET_TASK_SILENT_SP_EVENT        11
-#define BNXT_GENEVE_ADD_PORT_SP_EVENT  12
-#define BNXT_GENEVE_DEL_PORT_SP_EVENT  13
 #define BNXT_LINK_SPEED_CHNG_SP_EVENT  14
 #define BNXT_FLOW_STATS_SP_EVENT       15
 #define BNXT_UPDATE_PHY_SP_EVENT       16
@@ -1890,6 +1901,11 @@ struct bnxt {
 #define BNXT_PCIE_STATS_OFFSET(counter)                        \
        (offsetof(struct pcie_ctx_hw_stats, counter) / 8)
 
+#define BNXT_HW_FEATURE_VLAN_ALL_RX                            \
+       (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)
+#define BNXT_HW_FEATURE_VLAN_ALL_TX                            \
+       (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX)
+
 #define I2C_DEV_ADDR_A0                                0xa0
 #define I2C_DEV_ADDR_A2                                0xa2
 #define SFF_DIAG_SUPPORT_OFFSET                        0x5c
@@ -2023,6 +2039,7 @@ int hwrm_send_message(struct bnxt *, void *, u32, int);
 int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
                            int bmap_size, bool async_only);
+int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings);
 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
 int bnxt_nq_rings_in_use(struct bnxt *bp);
index a812beb..3a85419 100644 (file)
@@ -411,6 +411,12 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
                        return rc;
        }
 
+       if (strlen(bp->board_serialno)) {
+               rc = devlink_info_board_serial_number_put(req, bp->board_serialno);
+               if (rc)
+                       return rc;
+       }
+
        sprintf(buf, "%X", bp->chip_num);
        rc = devlink_info_version_fixed_put(req,
                        DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf);
@@ -685,6 +691,7 @@ static void bnxt_dl_params_unregister(struct bnxt *bp)
 
 int bnxt_dl_register(struct bnxt *bp)
 {
+       struct devlink_port_attrs attrs = {};
        struct devlink *dl;
        int rc;
 
@@ -713,9 +720,11 @@ int bnxt_dl_register(struct bnxt *bp)
        if (!BNXT_PF(bp))
                return 0;
 
-       devlink_port_attrs_set(&bp->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
-                              bp->pf.port_id, false, 0, bp->dsn,
-                              sizeof(bp->dsn));
+       attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+       attrs.phys.port_number = bp->pf.port_id;
+       memcpy(attrs.switch_id.id, bp->dsn, sizeof(bp->dsn));
+       attrs.switch_id.id_len = sizeof(bp->dsn);
+       devlink_port_attrs_set(&bp->dl_port, &attrs);
        rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id);
        if (rc) {
                netdev_err(bp->dev, "devlink_port_register failed\n");
index 6b88143..538c976 100644 (file)
@@ -926,6 +926,13 @@ static int bnxt_set_channels(struct net_device *dev,
                return rc;
        }
 
+       if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
+           bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
+           (dev->priv_flags & IFF_RXFH_CONFIGURED)) {
+               netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n");
+               return -EINVAL;
+       }
+
        if (netif_running(dev)) {
                if (BNXT_PF(bp)) {
                        /* TODO CHIMP_FW: Send message to all VF's
@@ -1273,8 +1280,12 @@ static int bnxt_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
        return rc;
 }
 
-static u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
+u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
 {
+       struct bnxt *bp = netdev_priv(dev);
+
+       if (bp->flags & BNXT_FLAG_CHIP_P5)
+               return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5);
        return HW_HASH_INDEX_SIZE;
 }
 
@@ -1288,7 +1299,7 @@ static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
 {
        struct bnxt *bp = netdev_priv(dev);
        struct bnxt_vnic_info *vnic;
-       int i = 0;
+       u32 i, tbl_size;
 
        if (hfunc)
                *hfunc = ETH_RSS_HASH_TOP;
@@ -1297,9 +1308,10 @@ static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
                return 0;
 
        vnic = &bp->vnic_info[0];
-       if (indir && vnic->rss_table) {
-               for (i = 0; i < HW_HASH_INDEX_SIZE; i++)
-                       indir[i] = le16_to_cpu(vnic->rss_table[i]);
+       if (indir && bp->rss_indir_tbl) {
+               tbl_size = bnxt_get_rxfh_indir_size(dev);
+               for (i = 0; i < tbl_size; i++)
+                       indir[i] = bp->rss_indir_tbl[i];
        }
 
        if (key && vnic->rss_hash_key)
@@ -1308,6 +1320,35 @@ static int bnxt_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
        return 0;
 }
 
+static int bnxt_set_rxfh(struct net_device *dev, const u32 *indir,
+                        const u8 *key, const u8 hfunc)
+{
+       struct bnxt *bp = netdev_priv(dev);
+       int rc = 0;
+
+       if (hfunc && hfunc != ETH_RSS_HASH_TOP)
+               return -EOPNOTSUPP;
+
+       if (key)
+               return -EOPNOTSUPP;
+
+       if (indir) {
+               u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
+
+               for (i = 0; i < tbl_size; i++)
+                       bp->rss_indir_tbl[i] = indir[i];
+               pad = bp->rss_indir_tbl_entries - tbl_size;
+               if (pad)
+                       memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
+       }
+
+       if (netif_running(bp->dev)) {
+               bnxt_close_nic(bp, false, false);
+               rc = bnxt_open_nic(bp, false, false);
+       }
+       return rc;
+}
+
 static void bnxt_get_drvinfo(struct net_device *dev,
                             struct ethtool_drvinfo *info)
 {
@@ -3614,6 +3655,7 @@ const struct ethtool_ops bnxt_ethtool_ops = {
        .get_rxfh_indir_size    = bnxt_get_rxfh_indir_size,
        .get_rxfh_key_size      = bnxt_get_rxfh_key_size,
        .get_rxfh               = bnxt_get_rxfh,
+       .set_rxfh               = bnxt_set_rxfh,
        .flash_device           = bnxt_flash_device,
        .get_eeprom_len         = bnxt_get_eeprom_len,
        .get_eeprom             = bnxt_get_eeprom,
index ce7585f..dddbca1 100644 (file)
@@ -86,6 +86,7 @@ struct hwrm_dbg_cmn_output {
 
 extern const struct ethtool_ops bnxt_ethtool_ops;
 
+u32 bnxt_get_rxfh_indir_size(struct net_device *dev);
 u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
 u32 bnxt_fw_to_ethtool_speed(u16);
 u16 bnxt_get_fw_auto_link_speeds(u32);
index 3a9a51f..392e32c 100644 (file)
@@ -396,6 +396,7 @@ static void bnxt_free_vf_resources(struct bnxt *bp)
                }
        }
 
+       bp->pf.active_vfs = 0;
        kfree(bp->pf.vf);
        bp->pf.vf = NULL;
 }
@@ -835,7 +836,6 @@ void bnxt_sriov_disable(struct bnxt *bp)
 
        bnxt_free_vf_resources(bp);
 
-       bp->pf.active_vfs = 0;
        /* Reclaim all resources for the PF. */
        rtnl_lock();
        bnxt_restore_pf_fw_resources(bp);
index 0eef4f5..5e4429b 100644 (file)
@@ -1638,7 +1638,7 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp,
        lastused = flow->lastused;
        spin_unlock(&flow->stats_lock);
 
-       flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets,
+       flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets, 0,
                          lastused, FLOW_ACTION_HW_STATS_DELAYED);
        return 0;
 }
@@ -1888,8 +1888,9 @@ static void bnxt_tc_setup_indr_rel(void *cb_priv)
        kfree(priv);
 }
 
-static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
-                                   struct flow_block_offload *f)
+static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct Qdisc *sch, struct bnxt *bp,
+                                   struct flow_block_offload *f, void *data,
+                                   void (*cleanup)(struct flow_block_cb *block_cb))
 {
        struct bnxt_flower_indr_block_cb_priv *cb_priv;
        struct flow_block_cb *block_cb;
@@ -1907,9 +1908,10 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
                cb_priv->bp = bp;
                list_add(&cb_priv->list, &bp->tc_indr_block_list);
 
-               block_cb = flow_block_cb_alloc(bnxt_tc_setup_indr_block_cb,
-                                              cb_priv, cb_priv,
-                                              bnxt_tc_setup_indr_rel);
+               block_cb = flow_indr_block_cb_alloc(bnxt_tc_setup_indr_block_cb,
+                                                   cb_priv, cb_priv,
+                                                   bnxt_tc_setup_indr_rel, f,
+                                                   netdev, sch, data, bp, cleanup);
                if (IS_ERR(block_cb)) {
                        list_del(&cb_priv->list);
                        kfree(cb_priv);
@@ -1930,7 +1932,7 @@ static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct bnxt *bp,
                if (!block_cb)
                        return -ENOENT;
 
-               flow_block_cb_remove(block_cb, f);
+               flow_indr_block_cb_remove(block_cb, f);
                list_del(&block_cb->driver_list);
                break;
        default:
@@ -1944,15 +1946,17 @@ static bool bnxt_is_netdev_indr_offload(struct net_device *netdev)
        return netif_is_vxlan(netdev);
 }
 
-static int bnxt_tc_setup_indr_cb(struct net_device *netdev, void *cb_priv,
-                                enum tc_setup_type type, void *type_data)
+static int bnxt_tc_setup_indr_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
+                                enum tc_setup_type type, void *type_data,
+                                void *data,
+                                void (*cleanup)(struct flow_block_cb *block_cb))
 {
        if (!bnxt_is_netdev_indr_offload(netdev))
                return -EOPNOTSUPP;
 
        switch (type) {
        case TC_SETUP_BLOCK:
-               return bnxt_tc_setup_indr_block(netdev, cb_priv, type_data);
+               return bnxt_tc_setup_indr_block(netdev, sch, cb_priv, type_data, data, cleanup);
        default:
                break;
        }
@@ -1996,11 +2000,8 @@ int bnxt_init_tc(struct bnxt *bp)
        struct bnxt_tc_info *tc_info;
        int rc;
 
-       if (bp->hwrm_spec_code < 0x10803) {
-               netdev_warn(bp->dev,
-                           "Firmware does not support TC flower offload.\n");
-               return -ENOTSUPP;
-       }
+       if (bp->hwrm_spec_code < 0x10803)
+               return 0;
 
        tc_info = kzalloc(sizeof(*tc_info), GFP_KERNEL);
        if (!tc_info)
@@ -2074,7 +2075,7 @@ void bnxt_shutdown_tc(struct bnxt *bp)
                return;
 
        flow_indr_dev_unregister(bnxt_tc_setup_indr_cb, bp,
-                                bnxt_tc_setup_indr_block_cb);
+                                bnxt_tc_setup_indr_rel);
        rhashtable_destroy(&tc_info->flow_table);
        rhashtable_destroy(&tc_info->l2_table);
        rhashtable_destroy(&tc_info->decap_l2_table);
index ff31da0..ee84a26 100644 (file)
@@ -459,17 +459,6 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
                        genet_dma_ring_regs[r]);
 }
 
-static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv,
-                                          u32 f_index)
-{
-       u32 offset;
-       u32 reg;
-
-       offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
-       reg = bcmgenet_hfb_reg_readl(priv, offset);
-       return !!(reg & (1 << (f_index % 32)));
-}
-
 static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
 {
        u32 offset;
@@ -533,19 +522,6 @@ static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
        bcmgenet_hfb_reg_writel(priv, reg, offset);
 }
 
-static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv)
-{
-       u32 f_index;
-
-       /* First MAX_NUM_OF_FS_RULES are reserved for Rx NFC filters */
-       for (f_index = MAX_NUM_OF_FS_RULES;
-            f_index < priv->hw_params->hfb_filter_cnt; f_index++)
-               if (!bcmgenet_hfb_is_filter_enabled(priv, f_index))
-                       return f_index;
-
-       return -ENOMEM;
-}
-
 static int bcmgenet_hfb_validate_mask(void *mask, size_t size)
 {
        while (size) {
@@ -634,8 +610,9 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
 {
        struct ethtool_rx_flow_spec *fs = &rule->fs;
        int err = 0, offset = 0, f_length = 0;
-       u16 val_16, mask_16;
        u8 val_8, mask_8;
+       __be16 val_16;
+       u16 mask_16;
        size_t size;
        u32 *f_data;
 
@@ -744,59 +721,6 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
        return err;
 }
 
-/* bcmgenet_hfb_add_filter
- *
- * Add new filter to Hardware Filter Block to match and direct Rx traffic to
- * desired Rx queue.
- *
- * f_data is an array of unsigned 32-bit integers where each 32-bit integer
- * provides filter data for 2 bytes (4 nibbles) of Rx frame:
- *
- * bits 31:20 - unused
- * bit  19    - nibble 0 match enable
- * bit  18    - nibble 1 match enable
- * bit  17    - nibble 2 match enable
- * bit  16    - nibble 3 match enable
- * bits 15:12 - nibble 0 data
- * bits 11:8  - nibble 1 data
- * bits 7:4   - nibble 2 data
- * bits 3:0   - nibble 3 data
- *
- * Example:
- * In order to match:
- * - Ethernet frame type = 0x0800 (IP)
- * - IP version field = 4
- * - IP protocol field = 0x11 (UDP)
- *
- * The following filter is needed:
- * u32 hfb_filter_ipv4_udp[] = {
- *   Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- *   Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000,
- *   Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011,
- * };
- *
- * To add the filter to HFB and direct the traffic to Rx queue 0, call:
- * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp,
- *                         ARRAY_SIZE(hfb_filter_ipv4_udp), 0);
- */
-int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data,
-                           u32 f_length, u32 rx_queue)
-{
-       int f_index;
-
-       f_index = bcmgenet_hfb_find_unused_filter(priv);
-       if (f_index < 0)
-               return -ENOMEM;
-
-       if (f_length > priv->hw_params->hfb_filter_size)
-               return -EINVAL;
-
-       bcmgenet_hfb_set_filter(priv, f_data, f_length, rx_queue, f_index);
-       bcmgenet_hfb_enable_filter(priv, f_index);
-
-       return 0;
-}
-
 /* bcmgenet_hfb_clear
  *
  * Clear Hardware Filter Block and disable all filtering.
@@ -2118,11 +2042,6 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
                goto out;
        }
 
-       if (skb_padto(skb, ETH_ZLEN)) {
-               ret = NETDEV_TX_OK;
-               goto out;
-       }
-
        /* Retain how many bytes will be sent on the wire, without TSB inserted
         * by transmit checksum offload
         */
@@ -2169,6 +2088,9 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
                len_stat = (size << DMA_BUFLENGTH_SHIFT) |
                           (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
 
+               /* Note: if we ever change from DMA_TX_APPEND_CRC below we
+                * will need to restore software padding of "runt" packets
+                */
                if (!i) {
                        len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
                        if (skb->ip_summed == CHECKSUM_PARTIAL)
@@ -3725,6 +3647,22 @@ static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
        return &dev->stats;
 }
 
+static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+
+       if (!dev->phydev || !phy_is_pseudo_fixed_link(dev->phydev) ||
+           priv->phy_interface != PHY_INTERFACE_MODE_MOCA)
+               return -EOPNOTSUPP;
+
+       if (new_carrier)
+               netif_carrier_on(dev);
+       else
+               netif_carrier_off(dev);
+
+       return 0;
+}
+
 static const struct net_device_ops bcmgenet_netdev_ops = {
        .ndo_open               = bcmgenet_open,
        .ndo_stop               = bcmgenet_close,
@@ -3738,6 +3676,7 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
        .ndo_poll_controller    = bcmgenet_poll_controller,
 #endif
        .ndo_get_stats          = bcmgenet_get_stats,
+       .ndo_change_carrier     = bcmgenet_change_carrier,
 };
 
 /* Array of GENET hardware parameters/characteristics */
index 7a3b22b..ebff1fc 100644 (file)
@@ -18168,8 +18168,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
 
        rtnl_lock();
 
-       /* We probably don't have netdev yet */
-       if (!netdev || !netif_running(netdev))
+       /* Could be second call or maybe we don't have netdev yet */
+       if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
                goto done;
 
        /* We needn't recover from permanent error */
index 6953d05..1000c89 100644 (file)
@@ -2847,7 +2847,7 @@ struct tg3_ocir {
        u32                             port1_flags;
        u32                             port2_flags;
        u32                             port3_flags;
-       u32                             reserved2[1];
+       u32                             reserved2;
 };
 
 
index e17bfc8..49358d4 100644 (file)
@@ -1535,7 +1535,6 @@ static int
 bfa_flash_fifo_flush(void __iomem *pci_bar)
 {
        u32 i;
-       u32 t;
        union bfa_flash_dev_status_reg dev_status;
 
        dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
@@ -1545,7 +1544,7 @@ bfa_flash_fifo_flush(void __iomem *pci_bar)
 
        /* fifo counter in terms of words */
        for (i = 0; i < dev_status.r.fifo_cnt; i++)
-               t = readl(pci_bar + FLI_RDDATA_REG);
+               readl(pci_bar + FLI_RDDATA_REG);
 
        /* Check the device status. It may take some time. */
        for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
index 09c912e..f780d42 100644 (file)
@@ -389,7 +389,7 @@ struct bfi_msgq_mhdr {
        u16     msg_token;
        u16     num_entries;
        u8      enet_id;
-       u8      rsvd[1];
+       u8      rsvd;
 } __packed;
 
 #define bfi_msgq_mhdr_set(_mh, _mc, _mid, _tok, _enet_id) do { \
index ab827fb..4f1b415 100644 (file)
@@ -90,6 +90,7 @@
 #define GEM_SA3T               0x009C /* Specific3 Top */
 #define GEM_SA4B               0x00A0 /* Specific4 Bottom */
 #define GEM_SA4T               0x00A4 /* Specific4 Top */
+#define GEM_WOL                        0x00b8 /* Wake on LAN */
 #define GEM_EFTSH              0x00e8 /* PTP Event Frame Transmitted Seconds Register 47:32 */
 #define GEM_EFRSH              0x00ec /* PTP Event Frame Received Seconds Register 47:32 */
 #define GEM_PEFTSH             0x00f0 /* PTP Peer Event Frame Transmitted Seconds Register 47:32 */
 #define MACB_PDRSFT_SIZE       1
 #define MACB_SRI_OFFSET                26 /* TSU Seconds Register Increment */
 #define MACB_SRI_SIZE          1
+#define GEM_WOL_OFFSET         28 /* Enable wake-on-lan interrupt */
+#define GEM_WOL_SIZE           1
 
 /* Timer increment fields */
 #define MACB_TI_CNS_OFFSET     0
index 5b9d7c6..a6a35e1 100644 (file)
@@ -1467,7 +1467,7 @@ static void macb_hresp_error_task(unsigned long data)
 {
        struct macb *bp = (struct macb *)data;
        struct net_device *dev = bp->dev;
-       struct macb_queue *queue = bp->queues;
+       struct macb_queue *queue;
        unsigned int q;
        u32 ctrl;
 
@@ -1517,6 +1517,64 @@ static void macb_tx_restart(struct macb_queue *queue)
        macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
 }
 
+static irqreturn_t macb_wol_interrupt(int irq, void *dev_id)
+{
+       struct macb_queue *queue = dev_id;
+       struct macb *bp = queue->bp;
+       u32 status;
+
+       status = queue_readl(queue, ISR);
+
+       if (unlikely(!status))
+               return IRQ_NONE;
+
+       spin_lock(&bp->lock);
+
+       if (status & MACB_BIT(WOL)) {
+               queue_writel(queue, IDR, MACB_BIT(WOL));
+               macb_writel(bp, WOL, 0);
+               netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n",
+                           (unsigned int)(queue - bp->queues),
+                           (unsigned long)status);
+               if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+                       queue_writel(queue, ISR, MACB_BIT(WOL));
+               pm_wakeup_event(&bp->pdev->dev, 0);
+       }
+
+       spin_unlock(&bp->lock);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t gem_wol_interrupt(int irq, void *dev_id)
+{
+       struct macb_queue *queue = dev_id;
+       struct macb *bp = queue->bp;
+       u32 status;
+
+       status = queue_readl(queue, ISR);
+
+       if (unlikely(!status))
+               return IRQ_NONE;
+
+       spin_lock(&bp->lock);
+
+       if (status & GEM_BIT(WOL)) {
+               queue_writel(queue, IDR, GEM_BIT(WOL));
+               gem_writel(bp, WOL, 0);
+               netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n",
+                           (unsigned int)(queue - bp->queues),
+                           (unsigned long)status);
+               if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+                       queue_writel(queue, ISR, GEM_BIT(WOL));
+               pm_wakeup_event(&bp->pdev->dev, 0);
+       }
+
+       spin_unlock(&bp->lock);
+
+       return IRQ_HANDLED;
+}
+
 static irqreturn_t macb_interrupt(int irq, void *dev_id)
 {
        struct macb_queue *queue = dev_id;
@@ -1933,7 +1991,7 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
        unsigned long flags;
        unsigned int desc_cnt, nr_frags, frag_size, f;
        unsigned int hdrlen;
-       bool is_lso, is_udp = 0;
+       bool is_lso;
        netdev_tx_t ret = NETDEV_TX_OK;
 
        if (macb_clear_csum(skb)) {
@@ -1949,10 +2007,8 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
        is_lso = (skb_shinfo(skb)->gso_size != 0);
 
        if (is_lso) {
-               is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP);
-
                /* length of headers */
-               if (is_udp)
+               if (ip_hdr(skb)->protocol == IPPROTO_UDP)
                        /* only queue eth + ip headers separately for UDP */
                        hdrlen = skb_transport_offset(skb);
                else
@@ -2558,22 +2614,23 @@ static int macb_open(struct net_device *dev)
 
        err = macb_phylink_connect(bp);
        if (err)
-               goto napi_exit;
+               goto reset_hw;
 
        netif_tx_start_all_queues(dev);
 
        if (bp->ptp_info)
                bp->ptp_info->ptp_init(dev);
 
-napi_exit:
+       return 0;
+
+reset_hw:
+       macb_reset_hw(bp);
        for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
                napi_disable(&queue->napi);
+       macb_free_consistent(bp);
 pm_exit:
-       if (err) {
-               pm_runtime_put_sync(&bp->pdev->dev);
-               return err;
-       }
-       return 0;
+       pm_runtime_put_sync(&bp->pdev->dev);
+       return err;
 }
 
 static int macb_close(struct net_device *dev)
@@ -2820,11 +2877,13 @@ static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
 {
        struct macb *bp = netdev_priv(netdev);
 
-       wol->supported = 0;
-       wol->wolopts = 0;
-
-       if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET)
+       if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
                phylink_ethtool_get_wol(bp->phylink, wol);
+               wol->supported |= WAKE_MAGIC;
+
+               if (bp->wol & MACB_WOL_ENABLED)
+                       wol->wolopts |= WAKE_MAGIC;
+       }
 }
 
 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -2832,9 +2891,13 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
        struct macb *bp = netdev_priv(netdev);
        int ret;
 
+       /* Pass the order to phylink layer */
        ret = phylink_ethtool_set_wol(bp->phylink, wol);
-       if (!ret)
-               return 0;
+       /* Don't manage WoL on MAC if handled by the PHY
+        * or if there's a failure in talking to the PHY
+        */
+       if (!ret || ret != -EOPNOTSUPP)
+               return ret;
 
        if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
            (wol->wolopts & ~WAKE_MAGIC))
@@ -3311,6 +3374,8 @@ static const struct ethtool_ops macb_ethtool_ops = {
 static const struct ethtool_ops gem_ethtool_ops = {
        .get_regs_len           = macb_get_regs_len,
        .get_regs               = macb_get_regs,
+       .get_wol                = macb_get_wol,
+       .set_wol                = macb_set_wol,
        .get_link               = ethtool_op_get_link,
        .get_ts_info            = macb_get_ts_info,
        .get_ethtool_stats      = gem_get_ethtool_stats,
@@ -3481,8 +3546,6 @@ static void macb_probe_queues(void __iomem *mem,
                              unsigned int *queue_mask,
                              unsigned int *num_queues)
 {
-       unsigned int hw_q;
-
        *queue_mask = 0x1;
        *num_queues = 1;
 
@@ -3496,13 +3559,8 @@ static void macb_probe_queues(void __iomem *mem,
                return;
 
        /* bit 0 is never set but queue 0 always exists */
-       *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
-
-       *queue_mask |= 0x1;
-
-       for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
-               if (*queue_mask & (1 << hw_q))
-                       (*num_queues)++;
+       *queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xff;
+       *num_queues = hweight32(*queue_mask);
 }
 
 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
@@ -3761,15 +3819,9 @@ static int macb_init(struct platform_device *pdev)
 
 static struct sifive_fu540_macb_mgmt *mgmt;
 
-/* Initialize and start the Receiver and Transmit subsystems */
-static int at91ether_start(struct net_device *dev)
+static int at91ether_alloc_coherent(struct macb *lp)
 {
-       struct macb *lp = netdev_priv(dev);
        struct macb_queue *q = &lp->queues[0];
-       struct macb_dma_desc *desc;
-       dma_addr_t addr;
-       u32 ctl;
-       int i;
 
        q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
                                         (AT91ETHER_MAX_RX_DESCR *
@@ -3791,6 +3843,43 @@ static int at91ether_start(struct net_device *dev)
                return -ENOMEM;
        }
 
+       return 0;
+}
+
+static void at91ether_free_coherent(struct macb *lp)
+{
+       struct macb_queue *q = &lp->queues[0];
+
+       if (q->rx_ring) {
+               dma_free_coherent(&lp->pdev->dev,
+                                 AT91ETHER_MAX_RX_DESCR *
+                                 macb_dma_desc_get_size(lp),
+                                 q->rx_ring, q->rx_ring_dma);
+               q->rx_ring = NULL;
+       }
+
+       if (q->rx_buffers) {
+               dma_free_coherent(&lp->pdev->dev,
+                                 AT91ETHER_MAX_RX_DESCR *
+                                 AT91ETHER_MAX_RBUFF_SZ,
+                                 q->rx_buffers, q->rx_buffers_dma);
+               q->rx_buffers = NULL;
+       }
+}
+
+/* Initialize and start the Receiver and Transmit subsystems */
+static int at91ether_start(struct macb *lp)
+{
+       struct macb_queue *q = &lp->queues[0];
+       struct macb_dma_desc *desc;
+       dma_addr_t addr;
+       u32 ctl;
+       int i, ret;
+
+       ret = at91ether_alloc_coherent(lp);
+       if (ret)
+               return ret;
+
        addr = q->rx_buffers_dma;
        for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
                desc = macb_rx_desc(q, i);
@@ -3812,9 +3901,39 @@ static int at91ether_start(struct net_device *dev)
        ctl = macb_readl(lp, NCR);
        macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
 
+       /* Enable MAC interrupts */
+       macb_writel(lp, IER, MACB_BIT(RCOMP)    |
+                            MACB_BIT(RXUBR)    |
+                            MACB_BIT(ISR_TUND) |
+                            MACB_BIT(ISR_RLE)  |
+                            MACB_BIT(TCOMP)    |
+                            MACB_BIT(ISR_ROVR) |
+                            MACB_BIT(HRESP));
+
        return 0;
 }
 
+static void at91ether_stop(struct macb *lp)
+{
+       u32 ctl;
+
+       /* Disable MAC interrupts */
+       macb_writel(lp, IDR, MACB_BIT(RCOMP)    |
+                            MACB_BIT(RXUBR)    |
+                            MACB_BIT(ISR_TUND) |
+                            MACB_BIT(ISR_RLE)  |
+                            MACB_BIT(TCOMP)    |
+                            MACB_BIT(ISR_ROVR) |
+                            MACB_BIT(HRESP));
+
+       /* Disable Receiver and Transmitter */
+       ctl = macb_readl(lp, NCR);
+       macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
+
+       /* Free resources. */
+       at91ether_free_coherent(lp);
+}
+
 /* Open the ethernet interface */
 static int at91ether_open(struct net_device *dev)
 {
@@ -3834,63 +3953,36 @@ static int at91ether_open(struct net_device *dev)
 
        macb_set_hwaddr(lp);
 
-       ret = at91ether_start(dev);
+       ret = at91ether_start(lp);
        if (ret)
-               return ret;
-
-       /* Enable MAC interrupts */
-       macb_writel(lp, IER, MACB_BIT(RCOMP)    |
-                            MACB_BIT(RXUBR)    |
-                            MACB_BIT(ISR_TUND) |
-                            MACB_BIT(ISR_RLE)  |
-                            MACB_BIT(TCOMP)    |
-                            MACB_BIT(ISR_ROVR) |
-                            MACB_BIT(HRESP));
+               goto pm_exit;
 
        ret = macb_phylink_connect(lp);
        if (ret)
-               return ret;
+               goto stop;
 
        netif_start_queue(dev);
 
        return 0;
+
+stop:
+       at91ether_stop(lp);
+pm_exit:
+       pm_runtime_put_sync(&lp->pdev->dev);
+       return ret;
 }
 
 /* Close the interface */
 static int at91ether_close(struct net_device *dev)
 {
        struct macb *lp = netdev_priv(dev);
-       struct macb_queue *q = &lp->queues[0];
-       u32 ctl;
-
-       /* Disable Receiver and Transmitter */
-       ctl = macb_readl(lp, NCR);
-       macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
-
-       /* Disable MAC interrupts */
-       macb_writel(lp, IDR, MACB_BIT(RCOMP)    |
-                            MACB_BIT(RXUBR)    |
-                            MACB_BIT(ISR_TUND) |
-                            MACB_BIT(ISR_RLE)  |
-                            MACB_BIT(TCOMP)    |
-                            MACB_BIT(ISR_ROVR) |
-                            MACB_BIT(HRESP));
 
        netif_stop_queue(dev);
 
        phylink_stop(lp->phylink);
        phylink_disconnect_phy(lp->phylink);
 
-       dma_free_coherent(&lp->pdev->dev,
-                         AT91ETHER_MAX_RX_DESCR *
-                         macb_dma_desc_get_size(lp),
-                         q->rx_ring, q->rx_ring_dma);
-       q->rx_ring = NULL;
-
-       dma_free_coherent(&lp->pdev->dev,
-                         AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
-                         q->rx_buffers, q->rx_buffers_dma);
-       q->rx_buffers = NULL;
+       at91ether_stop(lp);
 
        return pm_runtime_put(&lp->pdev->dev);
 }
@@ -4387,7 +4479,7 @@ static int macb_probe(struct platform_device *pdev)
        bp->wol = 0;
        if (of_get_property(np, "magic-packet", NULL))
                bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
-       device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
+       device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
 
        spin_lock_init(&bp->lock);
 
@@ -4535,38 +4627,82 @@ static int __maybe_unused macb_suspend(struct device *dev)
        struct macb_queue *queue = bp->queues;
        unsigned long flags;
        unsigned int q;
+       int err;
 
        if (!netif_running(netdev))
                return 0;
 
        if (bp->wol & MACB_WOL_ENABLED) {
-               macb_writel(bp, IER, MACB_BIT(WOL));
-               macb_writel(bp, WOL, MACB_BIT(MAG));
-               enable_irq_wake(bp->queues[0].irq);
-               netif_device_detach(netdev);
-       } else {
-               netif_device_detach(netdev);
+               spin_lock_irqsave(&bp->lock, flags);
+               /* Flush all status bits */
+               macb_writel(bp, TSR, -1);
+               macb_writel(bp, RSR, -1);
                for (q = 0, queue = bp->queues; q < bp->num_queues;
-                    ++q, ++queue)
-                       napi_disable(&queue->napi);
+                    ++q, ++queue) {
+                       /* Disable all interrupts */
+                       queue_writel(queue, IDR, -1);
+                       queue_readl(queue, ISR);
+                       if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+                               queue_writel(queue, ISR, -1);
+               }
+               /* Change interrupt handler and
+                * Enable WoL IRQ on queue 0
+                */
+               devm_free_irq(dev, bp->queues[0].irq, bp->queues);
+               if (macb_is_gem(bp)) {
+                       err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt,
+                                              IRQF_SHARED, netdev->name, bp->queues);
+                       if (err) {
+                               dev_err(dev,
+                                       "Unable to request IRQ %d (error %d)\n",
+                                       bp->queues[0].irq, err);
+                               spin_unlock_irqrestore(&bp->lock, flags);
+                               return err;
+                       }
+                       queue_writel(bp->queues, IER, GEM_BIT(WOL));
+                       gem_writel(bp, WOL, MACB_BIT(MAG));
+               } else {
+                       err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt,
+                                              IRQF_SHARED, netdev->name, bp->queues);
+                       if (err) {
+                               dev_err(dev,
+                                       "Unable to request IRQ %d (error %d)\n",
+                                       bp->queues[0].irq, err);
+                               spin_unlock_irqrestore(&bp->lock, flags);
+                               return err;
+                       }
+                       queue_writel(bp->queues, IER, MACB_BIT(WOL));
+                       macb_writel(bp, WOL, MACB_BIT(MAG));
+               }
+               spin_unlock_irqrestore(&bp->lock, flags);
+
+               enable_irq_wake(bp->queues[0].irq);
+       }
+
+       netif_device_detach(netdev);
+       for (q = 0, queue = bp->queues; q < bp->num_queues;
+            ++q, ++queue)
+               napi_disable(&queue->napi);
+
+       if (!(bp->wol & MACB_WOL_ENABLED)) {
                rtnl_lock();
                phylink_stop(bp->phylink);
                rtnl_unlock();
                spin_lock_irqsave(&bp->lock, flags);
                macb_reset_hw(bp);
                spin_unlock_irqrestore(&bp->lock, flags);
+       }
 
-               if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
-                       bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO);
+       if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
+               bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO);
 
-               if (netdev->hw_features & NETIF_F_NTUPLE)
-                       bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT);
-       }
+       if (netdev->hw_features & NETIF_F_NTUPLE)
+               bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT);
 
-       netif_carrier_off(netdev);
        if (bp->ptp_info)
                bp->ptp_info->ptp_remove(netdev);
-       pm_runtime_force_suspend(dev);
+       if (!device_may_wakeup(dev))
+               pm_runtime_force_suspend(dev);
 
        return 0;
 }
@@ -4576,37 +4712,71 @@ static int __maybe_unused macb_resume(struct device *dev)
        struct net_device *netdev = dev_get_drvdata(dev);
        struct macb *bp = netdev_priv(netdev);
        struct macb_queue *queue = bp->queues;
+       unsigned long flags;
        unsigned int q;
+       int err;
 
        if (!netif_running(netdev))
                return 0;
 
-       pm_runtime_force_resume(dev);
+       if (!device_may_wakeup(dev))
+               pm_runtime_force_resume(dev);
 
        if (bp->wol & MACB_WOL_ENABLED) {
-               macb_writel(bp, IDR, MACB_BIT(WOL));
-               macb_writel(bp, WOL, 0);
-               disable_irq_wake(bp->queues[0].irq);
-       } else {
-               macb_writel(bp, NCR, MACB_BIT(MPE));
-
-               if (netdev->hw_features & NETIF_F_NTUPLE)
-                       gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2);
+               spin_lock_irqsave(&bp->lock, flags);
+               /* Disable WoL */
+               if (macb_is_gem(bp)) {
+                       queue_writel(bp->queues, IDR, GEM_BIT(WOL));
+                       gem_writel(bp, WOL, 0);
+               } else {
+                       queue_writel(bp->queues, IDR, MACB_BIT(WOL));
+                       macb_writel(bp, WOL, 0);
+               }
+               /* Clear ISR on queue 0 */
+               queue_readl(bp->queues, ISR);
+               if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+                       queue_writel(bp->queues, ISR, -1);
+               /* Replace interrupt handler on queue 0 */
+               devm_free_irq(dev, bp->queues[0].irq, bp->queues);
+               err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt,
+                                      IRQF_SHARED, netdev->name, bp->queues);
+               if (err) {
+                       dev_err(dev,
+                               "Unable to request IRQ %d (error %d)\n",
+                               bp->queues[0].irq, err);
+                       spin_unlock_irqrestore(&bp->lock, flags);
+                       return err;
+               }
+               spin_unlock_irqrestore(&bp->lock, flags);
 
-               if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
-                       macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio);
+               disable_irq_wake(bp->queues[0].irq);
 
-               for (q = 0, queue = bp->queues; q < bp->num_queues;
-                    ++q, ++queue)
-                       napi_enable(&queue->napi);
+               /* Now make sure we disable phy before moving
+                * to common restore path
+                */
                rtnl_lock();
-               phylink_start(bp->phylink);
+               phylink_stop(bp->phylink);
                rtnl_unlock();
        }
 
+       for (q = 0, queue = bp->queues; q < bp->num_queues;
+            ++q, ++queue)
+               napi_enable(&queue->napi);
+
+       if (netdev->hw_features & NETIF_F_NTUPLE)
+               gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2);
+
+       if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
+               macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio);
+
+       macb_writel(bp, NCR, MACB_BIT(MPE));
        macb_init_hw(bp);
        macb_set_rx_mode(netdev);
        macb_restore_features(bp);
+       rtnl_lock();
+       phylink_start(bp->phylink);
+       rtnl_unlock();
+
        netif_device_attach(netdev);
        if (bp->ptp_info)
                bp->ptp_info->ptp_init(netdev);
@@ -4619,7 +4789,7 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev)
        struct net_device *netdev = dev_get_drvdata(dev);
        struct macb *bp = netdev_priv(netdev);
 
-       if (!(device_may_wakeup(&bp->dev->dev))) {
+       if (!(device_may_wakeup(dev))) {
                clk_disable_unprepare(bp->tx_clk);
                clk_disable_unprepare(bp->hclk);
                clk_disable_unprepare(bp->pclk);
@@ -4635,7 +4805,7 @@ static int __maybe_unused macb_runtime_resume(struct device *dev)
        struct net_device *netdev = dev_get_drvdata(dev);
        struct macb *bp = netdev_priv(netdev);
 
-       if (!(device_may_wakeup(&bp->dev->dev))) {
+       if (!(device_may_wakeup(dev))) {
                clk_prepare_enable(bp->pclk);
                clk_prepare_enable(bp->hclk);
                clk_prepare_enable(bp->tx_clk);
index 617b3b7..cd7d033 100644 (file)
@@ -2,7 +2,7 @@
 /**
  * Cadence GEM PCI wrapper.
  *
- * Copyright (C) 2016 Cadence Design Systems - http://www.cadence.com
+ * Copyright (C) 2016 Cadence Design Systems - https://www.cadence.com
  *
  * Authors: Rafal Ozieblo <rafalo@cadence.com>
  *         Bartosz Folta <bfolta@cadence.com>
index 43a3f0d..31ebf3e 100644 (file)
@@ -2,7 +2,7 @@
 /**
  * 1588 PTP support for Cadence GEM device.
  *
- * Copyright (C) 2017 Cadence Design Systems - http://www.cadence.com
+ * Copyright (C) 2017 Cadence Design Systems - https://www.cadence.com
  *
  * Authors: Rafal Ozieblo <rafalo@cadence.com>
  *          Bartosz Folta <bfolta@cadence.com>
index 66d31c0..e73bc21 100644 (file)
@@ -405,27 +405,8 @@ static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
        /* Nothing to be done here. */
 }
 
-#ifdef CONFIG_PM
-/**
- * \brief called when suspending
- * @param pdev Pointer to PCI device
- * @param state state to suspend to
- */
-static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
-                           pm_message_t state __attribute__((unused)))
-{
-       return 0;
-}
-
-/**
- * \brief called when resuming
- * @param pdev Pointer to PCI device
- */
-static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
-{
-       return 0;
-}
-#endif
+#define liquidio_suspend NULL
+#define liquidio_resume NULL
 
 /* For PCI-E Advanced Error Recovery (AER) Interface */
 static const struct pci_error_handlers liquidio_err_handler = {
@@ -451,17 +432,15 @@ static const struct pci_device_id liquidio_pci_tbl[] = {
 };
 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
 
+static SIMPLE_DEV_PM_OPS(liquidio_pm_ops, liquidio_suspend, liquidio_resume);
+
 static struct pci_driver liquidio_pci_driver = {
        .name           = "LiquidIO",
        .id_table       = liquidio_pci_tbl,
        .probe          = liquidio_probe,
        .remove         = liquidio_remove,
        .err_handler    = &liquidio_err_handler,    /* For AER */
-
-#ifdef CONFIG_PM
-       .suspend        = liquidio_suspend,
-       .resume         = liquidio_resume,
-#endif
+       .driver.pm      = &liquidio_pm_ops,
 #ifdef CONFIG_PCI_IOV
        .sriov_configure = liquidio_enable_sriov,
 #endif
@@ -2691,6 +2670,35 @@ static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
        return ret;
 }
 
+static int liquidio_udp_tunnel_set_port(struct net_device *netdev,
+                                       unsigned int table, unsigned int entry,
+                                       struct udp_tunnel_info *ti)
+{
+       return liquidio_vxlan_port_command(netdev,
+                                          OCTNET_CMD_VXLAN_PORT_CONFIG,
+                                          htons(ti->port),
+                                          OCTNET_CMD_VXLAN_PORT_ADD);
+}
+
+static int liquidio_udp_tunnel_unset_port(struct net_device *netdev,
+                                         unsigned int table,
+                                         unsigned int entry,
+                                         struct udp_tunnel_info *ti)
+{
+       return liquidio_vxlan_port_command(netdev,
+                                          OCTNET_CMD_VXLAN_PORT_CONFIG,
+                                          htons(ti->port),
+                                          OCTNET_CMD_VXLAN_PORT_DEL);
+}
+
+static const struct udp_tunnel_nic_info liquidio_udp_tunnels = {
+       .set_port       = liquidio_udp_tunnel_set_port,
+       .unset_port     = liquidio_udp_tunnel_unset_port,
+       .tables         = {
+               { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+       },
+};
+
 /** \brief Net device fix features
  * @param netdev  pointer to network device
  * @param request features requested
@@ -2779,30 +2787,6 @@ static int liquidio_set_features(struct net_device *netdev,
        return 0;
 }
 
-static void liquidio_add_vxlan_port(struct net_device *netdev,
-                                   struct udp_tunnel_info *ti)
-{
-       if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
-               return;
-
-       liquidio_vxlan_port_command(netdev,
-                                   OCTNET_CMD_VXLAN_PORT_CONFIG,
-                                   htons(ti->port),
-                                   OCTNET_CMD_VXLAN_PORT_ADD);
-}
-
-static void liquidio_del_vxlan_port(struct net_device *netdev,
-                                   struct udp_tunnel_info *ti)
-{
-       if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
-               return;
-
-       liquidio_vxlan_port_command(netdev,
-                                   OCTNET_CMD_VXLAN_PORT_CONFIG,
-                                   htons(ti->port),
-                                   OCTNET_CMD_VXLAN_PORT_DEL);
-}
-
 static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
                                 u8 *mac, bool is_admin_assigned)
 {
@@ -3229,8 +3213,8 @@ static const struct net_device_ops lionetdevops = {
        .ndo_do_ioctl           = liquidio_ioctl,
        .ndo_fix_features       = liquidio_fix_features,
        .ndo_set_features       = liquidio_set_features,
-       .ndo_udp_tunnel_add     = liquidio_add_vxlan_port,
-       .ndo_udp_tunnel_del     = liquidio_del_vxlan_port,
+       .ndo_udp_tunnel_add     = udp_tunnel_nic_add_port,
+       .ndo_udp_tunnel_del     = udp_tunnel_nic_del_port,
        .ndo_set_vf_mac         = liquidio_set_vf_mac,
        .ndo_set_vf_vlan        = liquidio_set_vf_vlan,
        .ndo_get_vf_config      = liquidio_get_vf_config,
@@ -3585,6 +3569,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
                netdev->hw_enc_features = (lio->enc_dev_capability &
                                           ~NETIF_F_LRO);
 
+               netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels;
+
                lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
 
                netdev->vlan_features = lio->dev_capability;
index bbd9bfa..90ef210 100644 (file)
@@ -1767,6 +1767,35 @@ static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
        return ret;
 }
 
+static int liquidio_udp_tunnel_set_port(struct net_device *netdev,
+                                       unsigned int table, unsigned int entry,
+                                       struct udp_tunnel_info *ti)
+{
+       return liquidio_vxlan_port_command(netdev,
+                                          OCTNET_CMD_VXLAN_PORT_CONFIG,
+                                          htons(ti->port),
+                                          OCTNET_CMD_VXLAN_PORT_ADD);
+}
+
+static int liquidio_udp_tunnel_unset_port(struct net_device *netdev,
+                                         unsigned int table,
+                                         unsigned int entry,
+                                         struct udp_tunnel_info *ti)
+{
+       return liquidio_vxlan_port_command(netdev,
+                                          OCTNET_CMD_VXLAN_PORT_CONFIG,
+                                          htons(ti->port),
+                                          OCTNET_CMD_VXLAN_PORT_DEL);
+}
+
+static const struct udp_tunnel_nic_info liquidio_udp_tunnels = {
+       .set_port       = liquidio_udp_tunnel_set_port,
+       .unset_port     = liquidio_udp_tunnel_unset_port,
+       .tables         = {
+               { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+       },
+};
+
 /** \brief Net device fix features
  * @param netdev  pointer to network device
  * @param request features requested
@@ -1835,30 +1864,6 @@ static int liquidio_set_features(struct net_device *netdev,
        return 0;
 }
 
-static void liquidio_add_vxlan_port(struct net_device *netdev,
-                                   struct udp_tunnel_info *ti)
-{
-       if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
-               return;
-
-       liquidio_vxlan_port_command(netdev,
-                                   OCTNET_CMD_VXLAN_PORT_CONFIG,
-                                   htons(ti->port),
-                                   OCTNET_CMD_VXLAN_PORT_ADD);
-}
-
-static void liquidio_del_vxlan_port(struct net_device *netdev,
-                                   struct udp_tunnel_info *ti)
-{
-       if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
-               return;
-
-       liquidio_vxlan_port_command(netdev,
-                                   OCTNET_CMD_VXLAN_PORT_CONFIG,
-                                   htons(ti->port),
-                                   OCTNET_CMD_VXLAN_PORT_DEL);
-}
-
 static const struct net_device_ops lionetdevops = {
        .ndo_open               = liquidio_open,
        .ndo_stop               = liquidio_stop,
@@ -1873,8 +1878,8 @@ static const struct net_device_ops lionetdevops = {
        .ndo_do_ioctl           = liquidio_ioctl,
        .ndo_fix_features       = liquidio_fix_features,
        .ndo_set_features       = liquidio_set_features,
-       .ndo_udp_tunnel_add     = liquidio_add_vxlan_port,
-       .ndo_udp_tunnel_del     = liquidio_del_vxlan_port,
+       .ndo_udp_tunnel_add     = udp_tunnel_nic_add_port,
+       .ndo_udp_tunnel_del     = udp_tunnel_nic_del_port,
 };
 
 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
@@ -2095,6 +2100,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
 
                netdev->hw_enc_features =
                    (lio->enc_dev_capability & ~NETIF_F_LRO);
+               netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels;
+
                netdev->vlan_features = lio->dev_capability;
                /* Add any unchangeable hw features */
                lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
index 50201fc..ebe56bd 100644 (file)
@@ -612,7 +612,7 @@ static inline struct list_head *lio_list_delete_head(struct list_head *root)
 {
        struct list_head *node;
 
-       if (root->prev == root && root->next == root)
+       if (list_empty_careful(root))
                node = NULL;
        else
                node = root->next;
index 6dd65f9..8e59c28 100644 (file)
@@ -95,12 +95,10 @@ int octeon_init_instr_queue(struct octeon_device *oct,
        /* Initialize a list to holds requests that have been posted to Octeon
         * but has yet to be fetched by octeon
         */
-       iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs),
-                                              numa_node);
+       iq->request_list = vzalloc_node(array_size(num_descs, sizeof(*iq->request_list)),
+                                       numa_node);
        if (!iq->request_list)
-               iq->request_list =
-                       vmalloc(array_size(num_descs,
-                                          sizeof(*iq->request_list)));
+               iq->request_list = vzalloc(array_size(num_descs, sizeof(*iq->request_list)));
        if (!iq->request_list) {
                lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
                dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
@@ -108,8 +106,6 @@ int octeon_init_instr_queue(struct octeon_device *oct,
                return 1;
        }
 
-       memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);
-
        dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %pad count: %d\n",
                iq_no, iq->base_addr, &iq->base_addr_dma, iq->max_count);
 
index cbaa192..3e17ce0 100644 (file)
@@ -961,7 +961,7 @@ static int octeon_mgmt_init_phy(struct net_device *netdev)
                                PHY_INTERFACE_MODE_MII);
 
        if (!phydev)
-               return -ENODEV;
+               return -EPROBE_DEFER;
 
        return 0;
 }
@@ -1554,12 +1554,8 @@ static struct platform_driver octeon_mgmt_driver = {
        .remove         = octeon_mgmt_remove,
 };
 
-extern void octeon_mdiobus_force_mod_depencency(void);
-
 static int __init octeon_mgmt_mod_init(void)
 {
-       /* Force our mdiobus driver module to be loaded first. */
-       octeon_mdiobus_force_mod_depencency();
        return platform_driver_register(&octeon_mgmt_driver);
 }
 
@@ -1571,6 +1567,7 @@ static void __exit octeon_mgmt_mod_exit(void)
 module_init(octeon_mgmt_mod_init);
 module_exit(octeon_mgmt_mod_exit);
 
+MODULE_SOFTDEP("pre: mdio-cavium");
 MODULE_DESCRIPTION(DRV_DESCRIPTION);
 MODULE_AUTHOR("David Daney");
 MODULE_LICENSE("GPL");
index 069e741..a45223f 100644 (file)
@@ -1489,9 +1489,10 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
        int seg_subdescs = 0, desc_cnt = 0;
        int seg_len, total_len, data_left;
        int hdr_qentry = qentry;
-       int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       int hdr_len;
+
+       hdr_len = tso_start(skb, &tso);
 
-       tso_start(skb, &tso);
        total_len = skb->len - hdr_len;
        while (total_len > 0) {
                char *hdr;
index b7f4325..f6f3ef9 100644 (file)
@@ -26,7 +26,7 @@ config CHELSIO_T1
          This driver supports Chelsio gigabit and 10-gigabit
          Ethernet cards. More information about adapter features and
          performance tuning is in
-         <file:Documentation/networking/device_drivers/chelsio/cxgb.rst>.
+         <file:Documentation/networking/device_drivers/ethernet/chelsio/cxgb.rst>.
 
          For general information about Chelsio and our products, visit
          our website at <http://www.chelsio.com>.
index dcab94c..876f90e 100644 (file)
@@ -350,167 +350,6 @@ struct cudbg_qdesc_info {
 
 #define IREG_NUM_ELEM 4
 
-static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = {
-       {0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */
-       {0x7e40, 0x7e44, 0x040, 10}, /* t6_tp_pio_regs_40_to_49 */
-       {0x7e40, 0x7e44, 0x050, 10}, /* t6_tp_pio_regs_50_to_59 */
-       {0x7e40, 0x7e44, 0x060, 14}, /* t6_tp_pio_regs_60_to_6d */
-       {0x7e40, 0x7e44, 0x06F, 1}, /* t6_tp_pio_regs_6f */
-       {0x7e40, 0x7e44, 0x070, 6}, /* t6_tp_pio_regs_70_to_75 */
-       {0x7e40, 0x7e44, 0x130, 18}, /* t6_tp_pio_regs_130_to_141 */
-       {0x7e40, 0x7e44, 0x145, 19}, /* t6_tp_pio_regs_145_to_157 */
-       {0x7e40, 0x7e44, 0x160, 1}, /* t6_tp_pio_regs_160 */
-       {0x7e40, 0x7e44, 0x230, 25}, /* t6_tp_pio_regs_230_to_248 */
-       {0x7e40, 0x7e44, 0x24a, 3}, /* t6_tp_pio_regs_24c */
-       {0x7e40, 0x7e44, 0x8C0, 1} /* t6_tp_pio_regs_8c0 */
-};
-
-static const u32 t5_tp_pio_array[][IREG_NUM_ELEM] = {
-       {0x7e40, 0x7e44, 0x020, 28}, /* t5_tp_pio_regs_20_to_3b */
-       {0x7e40, 0x7e44, 0x040, 19}, /* t5_tp_pio_regs_40_to_52 */
-       {0x7e40, 0x7e44, 0x054, 2}, /* t5_tp_pio_regs_54_to_55 */
-       {0x7e40, 0x7e44, 0x060, 13}, /* t5_tp_pio_regs_60_to_6c */
-       {0x7e40, 0x7e44, 0x06F, 1}, /* t5_tp_pio_regs_6f */
-       {0x7e40, 0x7e44, 0x120, 4}, /* t5_tp_pio_regs_120_to_123 */
-       {0x7e40, 0x7e44, 0x12b, 2}, /* t5_tp_pio_regs_12b_to_12c */
-       {0x7e40, 0x7e44, 0x12f, 21}, /* t5_tp_pio_regs_12f_to_143 */
-       {0x7e40, 0x7e44, 0x145, 19}, /* t5_tp_pio_regs_145_to_157 */
-       {0x7e40, 0x7e44, 0x230, 25}, /* t5_tp_pio_regs_230_to_248 */
-       {0x7e40, 0x7e44, 0x8C0, 1} /* t5_tp_pio_regs_8c0 */
-};
-
-static const u32 t6_tp_tm_pio_array[][IREG_NUM_ELEM] = {
-       {0x7e18, 0x7e1c, 0x0, 12}
-};
-
-static const u32 t5_tp_tm_pio_array[][IREG_NUM_ELEM] = {
-       {0x7e18, 0x7e1c, 0x0, 12}
-};
-
-static const u32 t6_tp_mib_index_array[6][IREG_NUM_ELEM] = {
-       {0x7e50, 0x7e54, 0x0, 13},
-       {0x7e50, 0x7e54, 0x10, 6},
-       {0x7e50, 0x7e54, 0x18, 21},
-       {0x7e50, 0x7e54, 0x30, 32},
-       {0x7e50, 0x7e54, 0x50, 22},
-       {0x7e50, 0x7e54, 0x68, 12}
-};
-
-static const u32 t5_tp_mib_index_array[9][IREG_NUM_ELEM] = {
-       {0x7e50, 0x7e54, 0x0, 13},
-       {0x7e50, 0x7e54, 0x10, 6},
-       {0x7e50, 0x7e54, 0x18, 8},
-       {0x7e50, 0x7e54, 0x20, 13},
-       {0x7e50, 0x7e54, 0x30, 16},
-       {0x7e50, 0x7e54, 0x40, 16},
-       {0x7e50, 0x7e54, 0x50, 16},
-       {0x7e50, 0x7e54, 0x60, 6},
-       {0x7e50, 0x7e54, 0x68, 4}
-};
-
-static const u32 t5_sge_dbg_index_array[2][IREG_NUM_ELEM] = {
-       {0x10cc, 0x10d0, 0x0, 16},
-       {0x10cc, 0x10d4, 0x0, 16},
-};
-
-static const u32 t6_sge_qbase_index_array[] = {
-       /* 1 addr reg SGE_QBASE_INDEX and 4 data reg SGE_QBASE_MAP[0-3] */
-       0x1250, 0x1240, 0x1244, 0x1248, 0x124c,
-};
-
-static const u32 t5_pcie_pdbg_array[][IREG_NUM_ELEM] = {
-       {0x5a04, 0x5a0c, 0x00, 0x20}, /* t5_pcie_pdbg_regs_00_to_20 */
-       {0x5a04, 0x5a0c, 0x21, 0x20}, /* t5_pcie_pdbg_regs_21_to_40 */
-       {0x5a04, 0x5a0c, 0x41, 0x10}, /* t5_pcie_pdbg_regs_41_to_50 */
-};
-
-static const u32 t5_pcie_cdbg_array[][IREG_NUM_ELEM] = {
-       {0x5a10, 0x5a18, 0x00, 0x20}, /* t5_pcie_cdbg_regs_00_to_20 */
-       {0x5a10, 0x5a18, 0x21, 0x18}, /* t5_pcie_cdbg_regs_21_to_37 */
-};
-
-static const u32 t5_pm_rx_array[][IREG_NUM_ELEM] = {
-       {0x8FD0, 0x8FD4, 0x10000, 0x20}, /* t5_pm_rx_regs_10000_to_10020 */
-       {0x8FD0, 0x8FD4, 0x10021, 0x0D}, /* t5_pm_rx_regs_10021_to_1002c */
-};
-
-static const u32 t5_pm_tx_array[][IREG_NUM_ELEM] = {
-       {0x8FF0, 0x8FF4, 0x10000, 0x20}, /* t5_pm_tx_regs_10000_to_10020 */
-       {0x8FF0, 0x8FF4, 0x10021, 0x1D}, /* t5_pm_tx_regs_10021_to_1003c */
-};
-
 #define CUDBG_NUM_PCIE_CONFIG_REGS 0x61
 
-static const u32 t5_pcie_config_array[][2] = {
-       {0x0, 0x34},
-       {0x3c, 0x40},
-       {0x50, 0x64},
-       {0x70, 0x80},
-       {0x94, 0xa0},
-       {0xb0, 0xb8},
-       {0xd0, 0xd4},
-       {0x100, 0x128},
-       {0x140, 0x148},
-       {0x150, 0x164},
-       {0x170, 0x178},
-       {0x180, 0x194},
-       {0x1a0, 0x1b8},
-       {0x1c0, 0x208},
-};
-
-static const u32 t6_ma_ireg_array[][IREG_NUM_ELEM] = {
-       {0x78f8, 0x78fc, 0xa000, 23}, /* t6_ma_regs_a000_to_a016 */
-       {0x78f8, 0x78fc, 0xa400, 30}, /* t6_ma_regs_a400_to_a41e */
-       {0x78f8, 0x78fc, 0xa800, 20} /* t6_ma_regs_a800_to_a813 */
-};
-
-static const u32 t6_ma_ireg_array2[][IREG_NUM_ELEM] = {
-       {0x78f8, 0x78fc, 0xe400, 17}, /* t6_ma_regs_e400_to_e600 */
-       {0x78f8, 0x78fc, 0xe640, 13} /* t6_ma_regs_e640_to_e7c0 */
-};
-
-static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
-       {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */
-       {0x7b50, 0x7b54, 0x2080, 0x1d, 0}, /* up_cim_2080_to_20fc */
-       {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */
-       {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */
-       {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */
-       {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */
-       {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */
-       {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */
-       {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */
-       {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */
-       {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
-       {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
-       {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
-       {0x7b50, 0x7b54, 0x4900, 0x4, 0x4}, /* up_cim_4900_to_4c60 */
-       {0x7b50, 0x7b54, 0x4904, 0x4, 0x4}, /* up_cim_4904_to_4c64 */
-       {0x7b50, 0x7b54, 0x4908, 0x4, 0x4}, /* up_cim_4908_to_4c68 */
-       {0x7b50, 0x7b54, 0x4910, 0x4, 0x4}, /* up_cim_4910_to_4c70 */
-       {0x7b50, 0x7b54, 0x4914, 0x4, 0x4}, /* up_cim_4914_to_4c74 */
-       {0x7b50, 0x7b54, 0x4920, 0x10, 0x10}, /* up_cim_4920_to_4a10 */
-       {0x7b50, 0x7b54, 0x4924, 0x10, 0x10}, /* up_cim_4924_to_4a14 */
-       {0x7b50, 0x7b54, 0x4928, 0x10, 0x10}, /* up_cim_4928_to_4a18 */
-       {0x7b50, 0x7b54, 0x492c, 0x10, 0x10}, /* up_cim_492c_to_4a1c */
-};
-
-static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
-       {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */
-       {0x7b50, 0x7b54, 0x2080, 0x19, 0}, /* up_cim_2080_to_20ec */
-       {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */
-       {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */
-       {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */
-       {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */
-       {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */
-       {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */
-       {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */
-       {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */
-       {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
-       {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
-       {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
-};
-
-static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = {
-       {0x51320, 0x51324, 0xa000, 32} /* t6_hma_regs_a000_to_a01f */
-};
 #endif /* __CUDBG_ENTITY_H__ */
index fc38130..c84719e 100644 (file)
@@ -70,7 +70,8 @@ enum cudbg_dbg_entity_type {
        CUDBG_HMA_INDIRECT = 67,
        CUDBG_HMA = 68,
        CUDBG_QDESC = 70,
-       CUDBG_MAX_ENTITY = 71,
+       CUDBG_FLASH = 71,
+       CUDBG_MAX_ENTITY = 72,
 };
 
 struct cudbg_init {
index 7b9cd69..75474f8 100644 (file)
 #include "cudbg_lib.h"
 #include "cudbg_zlib.h"
 
+static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = {
+       {0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */
+       {0x7e40, 0x7e44, 0x040, 10}, /* t6_tp_pio_regs_40_to_49 */
+       {0x7e40, 0x7e44, 0x050, 10}, /* t6_tp_pio_regs_50_to_59 */
+       {0x7e40, 0x7e44, 0x060, 14}, /* t6_tp_pio_regs_60_to_6d */
+       {0x7e40, 0x7e44, 0x06F, 1}, /* t6_tp_pio_regs_6f */
+       {0x7e40, 0x7e44, 0x070, 6}, /* t6_tp_pio_regs_70_to_75 */
+       {0x7e40, 0x7e44, 0x130, 18}, /* t6_tp_pio_regs_130_to_141 */
+       {0x7e40, 0x7e44, 0x145, 19}, /* t6_tp_pio_regs_145_to_157 */
+       {0x7e40, 0x7e44, 0x160, 1}, /* t6_tp_pio_regs_160 */
+       {0x7e40, 0x7e44, 0x230, 25}, /* t6_tp_pio_regs_230_to_248 */
+       {0x7e40, 0x7e44, 0x24a, 3}, /* t6_tp_pio_regs_24c */
+       {0x7e40, 0x7e44, 0x8C0, 1} /* t6_tp_pio_regs_8c0 */
+};
+
+static const u32 t5_tp_pio_array[][IREG_NUM_ELEM] = {
+       {0x7e40, 0x7e44, 0x020, 28}, /* t5_tp_pio_regs_20_to_3b */
+       {0x7e40, 0x7e44, 0x040, 19}, /* t5_tp_pio_regs_40_to_52 */
+       {0x7e40, 0x7e44, 0x054, 2}, /* t5_tp_pio_regs_54_to_55 */
+       {0x7e40, 0x7e44, 0x060, 13}, /* t5_tp_pio_regs_60_to_6c */
+       {0x7e40, 0x7e44, 0x06F, 1}, /* t5_tp_pio_regs_6f */
+       {0x7e40, 0x7e44, 0x120, 4}, /* t5_tp_pio_regs_120_to_123 */
+       {0x7e40, 0x7e44, 0x12b, 2}, /* t5_tp_pio_regs_12b_to_12c */
+       {0x7e40, 0x7e44, 0x12f, 21}, /* t5_tp_pio_regs_12f_to_143 */
+       {0x7e40, 0x7e44, 0x145, 19}, /* t5_tp_pio_regs_145_to_157 */
+       {0x7e40, 0x7e44, 0x230, 25}, /* t5_tp_pio_regs_230_to_248 */
+       {0x7e40, 0x7e44, 0x8C0, 1} /* t5_tp_pio_regs_8c0 */
+};
+
+static const u32 t6_tp_tm_pio_array[][IREG_NUM_ELEM] = {
+       {0x7e18, 0x7e1c, 0x0, 12}
+};
+
+static const u32 t5_tp_tm_pio_array[][IREG_NUM_ELEM] = {
+       {0x7e18, 0x7e1c, 0x0, 12}
+};
+
+static const u32 t6_tp_mib_index_array[6][IREG_NUM_ELEM] = {
+       {0x7e50, 0x7e54, 0x0, 13},
+       {0x7e50, 0x7e54, 0x10, 6},
+       {0x7e50, 0x7e54, 0x18, 21},
+       {0x7e50, 0x7e54, 0x30, 32},
+       {0x7e50, 0x7e54, 0x50, 22},
+       {0x7e50, 0x7e54, 0x68, 12}
+};
+
+static const u32 t5_tp_mib_index_array[9][IREG_NUM_ELEM] = {
+       {0x7e50, 0x7e54, 0x0, 13},
+       {0x7e50, 0x7e54, 0x10, 6},
+       {0x7e50, 0x7e54, 0x18, 8},
+       {0x7e50, 0x7e54, 0x20, 13},
+       {0x7e50, 0x7e54, 0x30, 16},
+       {0x7e50, 0x7e54, 0x40, 16},
+       {0x7e50, 0x7e54, 0x50, 16},
+       {0x7e50, 0x7e54, 0x60, 6},
+       {0x7e50, 0x7e54, 0x68, 4}
+};
+
+static const u32 t5_sge_dbg_index_array[2][IREG_NUM_ELEM] = {
+       {0x10cc, 0x10d0, 0x0, 16},
+       {0x10cc, 0x10d4, 0x0, 16},
+};
+
+static const u32 t6_sge_qbase_index_array[] = {
+       /* 1 addr reg SGE_QBASE_INDEX and 4 data reg SGE_QBASE_MAP[0-3] */
+       0x1250, 0x1240, 0x1244, 0x1248, 0x124c,
+};
+
+static const u32 t5_pcie_pdbg_array[][IREG_NUM_ELEM] = {
+       {0x5a04, 0x5a0c, 0x00, 0x20}, /* t5_pcie_pdbg_regs_00_to_20 */
+       {0x5a04, 0x5a0c, 0x21, 0x20}, /* t5_pcie_pdbg_regs_21_to_40 */
+       {0x5a04, 0x5a0c, 0x41, 0x10}, /* t5_pcie_pdbg_regs_41_to_50 */
+};
+
+static const u32 t5_pcie_cdbg_array[][IREG_NUM_ELEM] = {
+       {0x5a10, 0x5a18, 0x00, 0x20}, /* t5_pcie_cdbg_regs_00_to_20 */
+       {0x5a10, 0x5a18, 0x21, 0x18}, /* t5_pcie_cdbg_regs_21_to_37 */
+};
+
+static const u32 t5_pm_rx_array[][IREG_NUM_ELEM] = {
+       {0x8FD0, 0x8FD4, 0x10000, 0x20}, /* t5_pm_rx_regs_10000_to_10020 */
+       {0x8FD0, 0x8FD4, 0x10021, 0x0D}, /* t5_pm_rx_regs_10021_to_1002c */
+};
+
+static const u32 t5_pm_tx_array[][IREG_NUM_ELEM] = {
+       {0x8FF0, 0x8FF4, 0x10000, 0x20}, /* t5_pm_tx_regs_10000_to_10020 */
+       {0x8FF0, 0x8FF4, 0x10021, 0x1D}, /* t5_pm_tx_regs_10021_to_1003c */
+};
+
+static const u32 t5_pcie_config_array[][2] = {
+       {0x0, 0x34},
+       {0x3c, 0x40},
+       {0x50, 0x64},
+       {0x70, 0x80},
+       {0x94, 0xa0},
+       {0xb0, 0xb8},
+       {0xd0, 0xd4},
+       {0x100, 0x128},
+       {0x140, 0x148},
+       {0x150, 0x164},
+       {0x170, 0x178},
+       {0x180, 0x194},
+       {0x1a0, 0x1b8},
+       {0x1c0, 0x208},
+};
+
+static const u32 t6_ma_ireg_array[][IREG_NUM_ELEM] = {
+       {0x78f8, 0x78fc, 0xa000, 23}, /* t6_ma_regs_a000_to_a016 */
+       {0x78f8, 0x78fc, 0xa400, 30}, /* t6_ma_regs_a400_to_a41e */
+       {0x78f8, 0x78fc, 0xa800, 20} /* t6_ma_regs_a800_to_a813 */
+};
+
+static const u32 t6_ma_ireg_array2[][IREG_NUM_ELEM] = {
+       {0x78f8, 0x78fc, 0xe400, 17}, /* t6_ma_regs_e400_to_e600 */
+       {0x78f8, 0x78fc, 0xe640, 13} /* t6_ma_regs_e640_to_e7c0 */
+};
+
+static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
+       {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */
+       {0x7b50, 0x7b54, 0x2080, 0x1d, 0}, /* up_cim_2080_to_20fc */
+       {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */
+       {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */
+       {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */
+       {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */
+       {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */
+       {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */
+       {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */
+       {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */
+       {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
+       {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
+       {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
+       {0x7b50, 0x7b54, 0x4900, 0x4, 0x4}, /* up_cim_4900_to_4c60 */
+       {0x7b50, 0x7b54, 0x4904, 0x4, 0x4}, /* up_cim_4904_to_4c64 */
+       {0x7b50, 0x7b54, 0x4908, 0x4, 0x4}, /* up_cim_4908_to_4c68 */
+       {0x7b50, 0x7b54, 0x4910, 0x4, 0x4}, /* up_cim_4910_to_4c70 */
+       {0x7b50, 0x7b54, 0x4914, 0x4, 0x4}, /* up_cim_4914_to_4c74 */
+       {0x7b50, 0x7b54, 0x4920, 0x10, 0x10}, /* up_cim_4920_to_4a10 */
+       {0x7b50, 0x7b54, 0x4924, 0x10, 0x10}, /* up_cim_4924_to_4a14 */
+       {0x7b50, 0x7b54, 0x4928, 0x10, 0x10}, /* up_cim_4928_to_4a18 */
+       {0x7b50, 0x7b54, 0x492c, 0x10, 0x10}, /* up_cim_492c_to_4a1c */
+};
+
+static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
+       {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */
+       {0x7b50, 0x7b54, 0x2080, 0x19, 0}, /* up_cim_2080_to_20ec */
+       {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */
+       {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */
+       {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */
+       {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */
+       {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */
+       {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */
+       {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */
+       {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */
+       {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
+       {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
+       {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
+};
+
+static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = {
+       {0x51320, 0x51324, 0xa000, 32} /* t6_hma_regs_a000_to_a01f */
+};
+
+u32 cudbg_get_entity_length(struct adapter *adap, u32 entity)
+{
+       struct cudbg_tcam tcam_region = { 0 };
+       u32 value, n = 0, len = 0;
+
+       switch (entity) {
+       case CUDBG_REG_DUMP:
+               switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
+               case CHELSIO_T4:
+                       len = T4_REGMAP_SIZE;
+                       break;
+               case CHELSIO_T5:
+               case CHELSIO_T6:
+                       len = T5_REGMAP_SIZE;
+                       break;
+               default:
+                       break;
+               }
+               break;
+       case CUDBG_DEV_LOG:
+               len = adap->params.devlog.size;
+               break;
+       case CUDBG_CIM_LA:
+               if (is_t6(adap->params.chip)) {
+                       len = adap->params.cim_la_size / 10 + 1;
+                       len *= 10 * sizeof(u32);
+               } else {
+                       len = adap->params.cim_la_size / 8;
+                       len *= 8 * sizeof(u32);
+               }
+               len += sizeof(u32); /* for reading CIM LA configuration */
+               break;
+       case CUDBG_CIM_MA_LA:
+               len = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
+               break;
+       case CUDBG_CIM_QCFG:
+               len = sizeof(struct cudbg_cim_qcfg);
+               break;
+       case CUDBG_CIM_IBQ_TP0:
+       case CUDBG_CIM_IBQ_TP1:
+       case CUDBG_CIM_IBQ_ULP:
+       case CUDBG_CIM_IBQ_SGE0:
+       case CUDBG_CIM_IBQ_SGE1:
+       case CUDBG_CIM_IBQ_NCSI:
+               len = CIM_IBQ_SIZE * 4 * sizeof(u32);
+               break;
+       case CUDBG_CIM_OBQ_ULP0:
+               len = cudbg_cim_obq_size(adap, 0);
+               break;
+       case CUDBG_CIM_OBQ_ULP1:
+               len = cudbg_cim_obq_size(adap, 1);
+               break;
+       case CUDBG_CIM_OBQ_ULP2:
+               len = cudbg_cim_obq_size(adap, 2);
+               break;
+       case CUDBG_CIM_OBQ_ULP3:
+               len = cudbg_cim_obq_size(adap, 3);
+               break;
+       case CUDBG_CIM_OBQ_SGE:
+               len = cudbg_cim_obq_size(adap, 4);
+               break;
+       case CUDBG_CIM_OBQ_NCSI:
+               len = cudbg_cim_obq_size(adap, 5);
+               break;
+       case CUDBG_CIM_OBQ_RXQ0:
+               len = cudbg_cim_obq_size(adap, 6);
+               break;
+       case CUDBG_CIM_OBQ_RXQ1:
+               len = cudbg_cim_obq_size(adap, 7);
+               break;
+       case CUDBG_EDC0:
+               value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+               if (value & EDRAM0_ENABLE_F) {
+                       value = t4_read_reg(adap, MA_EDRAM0_BAR_A);
+                       len = EDRAM0_SIZE_G(value);
+               }
+               len = cudbg_mbytes_to_bytes(len);
+               break;
+       case CUDBG_EDC1:
+               value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+               if (value & EDRAM1_ENABLE_F) {
+                       value = t4_read_reg(adap, MA_EDRAM1_BAR_A);
+                       len = EDRAM1_SIZE_G(value);
+               }
+               len = cudbg_mbytes_to_bytes(len);
+               break;
+       case CUDBG_MC0:
+               value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+               if (value & EXT_MEM0_ENABLE_F) {
+                       value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
+                       len = EXT_MEM0_SIZE_G(value);
+               }
+               len = cudbg_mbytes_to_bytes(len);
+               break;
+       case CUDBG_MC1:
+               value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+               if (value & EXT_MEM1_ENABLE_F) {
+                       value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+                       len = EXT_MEM1_SIZE_G(value);
+               }
+               len = cudbg_mbytes_to_bytes(len);
+               break;
+       case CUDBG_RSS:
+               len = t4_chip_rss_size(adap) * sizeof(u16);
+               break;
+       case CUDBG_RSS_VF_CONF:
+               len = adap->params.arch.vfcount *
+                     sizeof(struct cudbg_rss_vf_conf);
+               break;
+       case CUDBG_PATH_MTU:
+               len = NMTUS * sizeof(u16);
+               break;
+       case CUDBG_PM_STATS:
+               len = sizeof(struct cudbg_pm_stats);
+               break;
+       case CUDBG_HW_SCHED:
+               len = sizeof(struct cudbg_hw_sched);
+               break;
+       case CUDBG_TP_INDIRECT:
+               switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
+               case CHELSIO_T5:
+                       n = sizeof(t5_tp_pio_array) +
+                           sizeof(t5_tp_tm_pio_array) +
+                           sizeof(t5_tp_mib_index_array);
+                       break;
+               case CHELSIO_T6:
+                       n = sizeof(t6_tp_pio_array) +
+                           sizeof(t6_tp_tm_pio_array) +
+                           sizeof(t6_tp_mib_index_array);
+                       break;
+               default:
+                       break;
+               }
+               n = n / (IREG_NUM_ELEM * sizeof(u32));
+               len = sizeof(struct ireg_buf) * n;
+               break;
+       case CUDBG_SGE_INDIRECT:
+               len = sizeof(struct ireg_buf) * 2 +
+                     sizeof(struct sge_qbase_reg_field);
+               break;
+       case CUDBG_ULPRX_LA:
+               len = sizeof(struct cudbg_ulprx_la);
+               break;
+       case CUDBG_TP_LA:
+               len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
+               break;
+       case CUDBG_MEMINFO:
+               len = sizeof(struct cudbg_ver_hdr) +
+                     sizeof(struct cudbg_meminfo);
+               break;
+       case CUDBG_CIM_PIF_LA:
+               len = sizeof(struct cudbg_cim_pif_la);
+               len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
+               break;
+       case CUDBG_CLK:
+               len = sizeof(struct cudbg_clk_info);
+               break;
+       case CUDBG_PCIE_INDIRECT:
+               n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
+               len = sizeof(struct ireg_buf) * n * 2;
+               break;
+       case CUDBG_PM_INDIRECT:
+               n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
+               len = sizeof(struct ireg_buf) * n * 2;
+               break;
+       case CUDBG_TID_INFO:
+               len = sizeof(struct cudbg_tid_info_region_rev1);
+               break;
+       case CUDBG_PCIE_CONFIG:
+               len = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
+               break;
+       case CUDBG_DUMP_CONTEXT:
+               len = cudbg_dump_context_size(adap);
+               break;
+       case CUDBG_MPS_TCAM:
+               len = sizeof(struct cudbg_mps_tcam) *
+                     adap->params.arch.mps_tcam_size;
+               break;
+       case CUDBG_VPD_DATA:
+               len = sizeof(struct cudbg_vpd_data);
+               break;
+       case CUDBG_LE_TCAM:
+               cudbg_fill_le_tcam_info(adap, &tcam_region);
+               len = sizeof(struct cudbg_tcam) +
+                     sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
+               break;
+       case CUDBG_CCTRL:
+               len = sizeof(u16) * NMTUS * NCCTRL_WIN;
+               break;
+       case CUDBG_MA_INDIRECT:
+               if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
+                       n = sizeof(t6_ma_ireg_array) /
+                           (IREG_NUM_ELEM * sizeof(u32));
+                       len = sizeof(struct ireg_buf) * n * 2;
+               }
+               break;
+       case CUDBG_ULPTX_LA:
+               len = sizeof(struct cudbg_ver_hdr) +
+                     sizeof(struct cudbg_ulptx_la);
+               break;
+       case CUDBG_UP_CIM_INDIRECT:
+               n = 0;
+               if (is_t5(adap->params.chip))
+                       n = sizeof(t5_up_cim_reg_array) /
+                           ((IREG_NUM_ELEM + 1) * sizeof(u32));
+               else if (is_t6(adap->params.chip))
+                       n = sizeof(t6_up_cim_reg_array) /
+                           ((IREG_NUM_ELEM + 1) * sizeof(u32));
+               len = sizeof(struct ireg_buf) * n;
+               break;
+       case CUDBG_PBT_TABLE:
+               len = sizeof(struct cudbg_pbt_tables);
+               break;
+       case CUDBG_MBOX_LOG:
+               len = sizeof(struct cudbg_mbox_log) * adap->mbox_log->size;
+               break;
+       case CUDBG_HMA_INDIRECT:
+               if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
+                       n = sizeof(t6_hma_ireg_array) /
+                           (IREG_NUM_ELEM * sizeof(u32));
+                       len = sizeof(struct ireg_buf) * n;
+               }
+               break;
+       case CUDBG_HMA:
+               value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+               if (value & HMA_MUX_F) {
+                       /* In T6, there's no MC1.  So, HMA shares MC1
+                        * address space.
+                        */
+                       value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+                       len = EXT_MEM1_SIZE_G(value);
+               }
+               len = cudbg_mbytes_to_bytes(len);
+               break;
+       case CUDBG_QDESC:
+               cudbg_fill_qdesc_num_and_size(adap, NULL, &len);
+               break;
+       default:
+               break;
+       }
+
+       return len;
+}
+
 static int cudbg_do_compression(struct cudbg_init *pdbg_init,
                                struct cudbg_buffer *pin_buff,
                                struct cudbg_buffer *dbg_buff)
@@ -1975,7 +2381,6 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
        u8 mem_type[CTXT_INGRESS + 1] = { 0 };
        struct cudbg_buffer temp_buff = { 0 };
        struct cudbg_ch_cntxt *buff;
-       u64 *dst_off, *src_off;
        u8 *ctx_buf;
        u8 i, k;
        int rc;
@@ -2044,8 +2449,11 @@ int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
                }
 
                for (j = 0; j < max_ctx_qid; j++) {
+                       __be64 *dst_off;
+                       u64 *src_off;
+
                        src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE);
-                       dst_off = (u64 *)buff->data;
+                       dst_off = (__be64 *)buff->data;
 
                        /* The data is stored in 64-bit cpu order.  Convert it
                         * to big endian before parsing.
@@ -3156,3 +3564,40 @@ out_free:
 
        return rc;
 }
+
+int cudbg_collect_flash(struct cudbg_init *pdbg_init,
+                       struct cudbg_buffer *dbg_buff,
+                       struct cudbg_error *cudbg_err)
+{
+       struct adapter *padap = pdbg_init->adap;
+       u32 count = padap->params.sf_size, n;
+       struct cudbg_buffer temp_buff = {0};
+       u32 addr, i;
+       int rc;
+
+       addr = FLASH_EXP_ROM_START;
+
+       for (i = 0; i < count; i += SF_PAGE_SIZE) {
+               n = min_t(u32, count - i, SF_PAGE_SIZE);
+
+               rc = cudbg_get_buff(pdbg_init, dbg_buff, n, &temp_buff);
+               if (rc) {
+                       cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
+                       goto out;
+               }
+               rc = t4_read_flash(padap, addr, n, (u32 *)temp_buff.data, 0);
+               if (rc)
+                       goto out;
+
+               addr += (n * 4);
+               rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff,
+                                                 dbg_buff);
+               if (rc) {
+                       cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
+                       goto out;
+               }
+       }
+
+out:
+       return rc;
+}
index 10ee6ed..d6d6cd2 100644 (file)
@@ -162,7 +162,11 @@ int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init,
 int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
                        struct cudbg_buffer *dbg_buff,
                        struct cudbg_error *cudbg_err);
+int cudbg_collect_flash(struct cudbg_init *pdbg_init,
+                       struct cudbg_buffer *dbg_buff,
+                       struct cudbg_error *cudbg_err);
 
+u32 cudbg_get_entity_length(struct adapter *adap, u32 entity);
 struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i);
 void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
                              struct cudbg_entity_hdr *entity_hdr);
index cf69c6e..adbc0d0 100644 (file)
@@ -139,6 +139,64 @@ enum cc_fec {
        FEC_BASER_RS  = 1 << 2   /* BaseR/Reed-Solomon */
 };
 
+enum {
+       CXGB4_ETHTOOL_FLASH_FW = 1,
+       CXGB4_ETHTOOL_FLASH_PHY = 2,
+       CXGB4_ETHTOOL_FLASH_BOOT = 3,
+       CXGB4_ETHTOOL_FLASH_BOOTCFG = 4
+};
+
+struct cxgb4_bootcfg_data {
+       __le16 signature;
+       __u8 reserved[2];
+};
+
+struct cxgb4_pcir_data {
+       __le32 signature;       /* Signature. The string "PCIR" */
+       __le16 vendor_id;       /* Vendor Identification */
+       __le16 device_id;       /* Device Identification */
+       __u8 vital_product[2];  /* Pointer to Vital Product Data */
+       __u8 length[2];         /* PCIR Data Structure Length */
+       __u8 revision;          /* PCIR Data Structure Revision */
+       __u8 class_code[3];     /* Class Code */
+       __u8 image_length[2];   /* Image Length. Multiple of 512B */
+       __u8 code_revision[2];  /* Revision Level of Code/Data */
+       __u8 code_type;
+       __u8 indicator;
+       __u8 reserved[2];
+};
+
+/* BIOS boot headers */
+struct cxgb4_pci_exp_rom_header {
+       __le16 signature;       /* ROM Signature. Should be 0xaa55 */
+       __u8 reserved[22];      /* Reserved per processor Architecture data */
+       __le16 pcir_offset;     /* Offset to PCI Data Structure */
+};
+
+/* Legacy PCI Expansion ROM Header */
+struct legacy_pci_rom_hdr {
+       __u8 signature[2];      /* ROM Signature. Should be 0xaa55 */
+       __u8 size512;           /* Current Image Size in units of 512 bytes */
+       __u8 initentry_point[4];
+       __u8 cksum;             /* Checksum computed on the entire Image */
+       __u8 reserved[16];      /* Reserved */
+       __le16 pcir_offset;     /* Offset to PCI Data Struture */
+};
+
+#define CXGB4_HDR_CODE1 0x00
+#define CXGB4_HDR_CODE2 0x03
+#define CXGB4_HDR_INDI 0x80
+
+/* BOOT constants */
+enum {
+       BOOT_CFG_SIG = 0x4243,
+       BOOT_SIZE_INC = 512,
+       BOOT_SIGNATURE = 0xaa55,
+       BOOT_MIN_SIZE = sizeof(struct cxgb4_pci_exp_rom_header),
+       BOOT_MAX_SIZE = 1024 * BOOT_SIZE_INC,
+       PCIR_SIGNATURE = 0x52494350
+};
+
 struct port_stats {
        u64 tx_octets;            /* total # of octets in good frames */
        u64 tx_frames;            /* all good frames */
@@ -474,6 +532,12 @@ static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log,
                FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD))
 #define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf)
 
+struct cxgb4_ethtool_lb_test {
+       struct completion completion;
+       int result;
+       int loopback;
+};
+
 struct fw_info {
        u8 chip;
        char *fs_name;
@@ -492,6 +556,11 @@ struct trace_params {
        unsigned char port;
 };
 
+struct cxgb4_fw_data {
+       __be32 signature;
+       __u8 reserved[4];
+};
+
 /* Firmware Port Capabilities types. */
 
 typedef u16 fw_port_cap16_t;   /* 16-bit Port Capabilities integral value */
@@ -616,6 +685,13 @@ struct port_info {
        u8 rx_cchan;
 
        bool tc_block_shared;
+
+       /* Mirror VI information */
+       u16 viid_mirror;
+       u16 nmirrorqsets;
+       u32 vi_mirror_count;
+       struct mutex vi_mirror_mutex; /* Sync access to Mirror VI info */
+       struct cxgb4_ethtool_lb_test ethtool_lb;
 };
 
 struct dentry;
@@ -642,6 +718,13 @@ enum {
        ULP_CRYPTO_KTLS_INLINE  = 1 << 3,
 };
 
+#define CXGB4_MIRROR_RXQ_DEFAULT_DESC_NUM 1024
+#define CXGB4_MIRROR_RXQ_DEFAULT_DESC_SIZE 64
+#define CXGB4_MIRROR_RXQ_DEFAULT_INTR_USEC 5
+#define CXGB4_MIRROR_RXQ_DEFAULT_PKT_CNT 8
+
+#define CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM 72
+
 struct rx_sw_desc;
 
 struct sge_fl {                     /* SGE free-buffer queue state */
@@ -891,12 +974,15 @@ struct sge {
        struct sge_eohw_txq *eohw_txq;
        struct sge_ofld_rxq *eohw_rxq;
 
+       struct sge_eth_rxq *mirror_rxq[NCHAN];
+
        u16 max_ethqsets;           /* # of available Ethernet queue sets */
        u16 ethqsets;               /* # of active Ethernet queue sets */
        u16 ethtxq_rover;           /* Tx queue to clean up next */
        u16 ofldqsets;              /* # of active ofld queue sets */
        u16 nqs_per_uld;            /* # of Rx queues per ULD */
        u16 eoqsets;                /* # of ETHOFLD queues */
+       u16 mirrorqsets;            /* # of Mirror queues */
 
        u16 timer_val[SGE_NTIMERS];
        u8 counter_val[SGE_NCOUNTERS];
@@ -1003,6 +1089,17 @@ struct mps_entries_ref {
        refcount_t refcnt;
 };
 
+struct cxgb4_ethtool_filter_info {
+       u32 *loc_array; /* Array holding the actual TIDs set to filters */
+       unsigned long *bmap; /* Bitmap for managing filters in use */
+       u32 in_use; /* # of filters in use */
+};
+
+struct cxgb4_ethtool_filter {
+       u32 nentries; /* Adapter wide number of supported filters */
+       struct cxgb4_ethtool_filter_info *port; /* Per port entry */
+};
+
 struct adapter {
        void __iomem *regs;
        void __iomem *bar2;
@@ -1019,9 +1116,7 @@ struct adapter {
 
        int msg_enable;
        __be16 vxlan_port;
-       u8 vxlan_port_cnt;
        __be16 geneve_port;
-       u8 geneve_port_cnt;
 
        struct adapter_params params;
        struct cxgb4_virt_res vres;
@@ -1128,6 +1223,9 @@ struct adapter {
 
        /* TC MATCHALL classifier offload */
        struct cxgb4_tc_matchall *tc_matchall;
+
+       /* Ethtool n-tuple */
+       struct cxgb4_ethtool_filter *ethtool_filters;
 };
 
 /* Support for "sched-class" command to allow a TX Scheduling Class to be
@@ -1504,6 +1602,7 @@ void t4_free_sge_resources(struct adapter *adap);
 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q);
 irq_handler_t t4_intr_handler(struct adapter *adap);
 netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev);
+int cxgb4_selftest_lb_pkt(struct net_device *netdev);
 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
                     const struct pkt_gl *gl);
 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
@@ -1736,8 +1835,7 @@ int t4_get_pfres(struct adapter *adapter);
 int t4_read_flash(struct adapter *adapter, unsigned int addr,
                  unsigned int nwords, u32 *data, int byte_oriented);
 int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
-int t4_load_phy_fw(struct adapter *adap,
-                  int win, spinlock_t *lock,
+int t4_load_phy_fw(struct adapter *adap, int win,
                   int (*phy_fw_version)(const u8 *, size_t),
                   const u8 *phy_fw_data, size_t phy_fw_size);
 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver);
@@ -1781,6 +1879,8 @@ int t4_init_rss_mode(struct adapter *adap, int mbox);
 int t4_init_portinfo(struct port_info *pi, int mbox,
                     int port, int pf, int vf, u8 mac[]);
 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
+int t4_init_port_mirror(struct port_info *pi, u8 mbox, u8 port, u8 pf, u8 vf,
+                       u16 *mirror_viid);
 void t4_fatal_err(struct adapter *adapter);
 unsigned int t4_chip_rss_size(struct adapter *adapter);
 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
@@ -1890,8 +1990,8 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox,
               unsigned int pf, unsigned int vf,
               unsigned int viid);
 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
-               int mtu, int promisc, int all_multi, int bcast, int vlanex,
-               bool sleep_ok);
+                 unsigned int viid_mirror, int mtu, int promisc, int all_multi,
+                 int bcast, int vlanex, bool sleep_ok);
 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
                         const u8 *addr, const u8 *mask, unsigned int idx,
                         u8 lookup_type, u8 port_id, bool sleep_ok);
@@ -1988,6 +2088,10 @@ void t4_register_netevent_notifier(void);
 int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
              unsigned int devid, unsigned int offset,
              unsigned int len, u8 *buf);
+int t4_load_boot(struct adapter *adap, u8 *boot_data,
+                unsigned int boot_addr, unsigned int size);
+int t4_load_bootcfg(struct adapter *adap,
+                   const u8 *cfg_data, unsigned int size);
 void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
 void free_tx_desc(struct adapter *adap, struct sge_txq *q,
                  unsigned int n, bool unmap);
@@ -2061,6 +2165,8 @@ int cxgb_open(struct net_device *dev);
 int cxgb_close(struct net_device *dev);
 void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
 void cxgb4_quiesce_rx(struct sge_rspq *q);
+int cxgb4_port_mirror_alloc(struct net_device *dev);
+void cxgb4_port_mirror_free(struct net_device *dev);
 #ifdef CONFIG_CHELSIO_TLS_DEVICE
 int cxgb4_set_ktls_feature(struct adapter *adap, bool enable);
 #endif
index e374b41..77648e4 100644 (file)
@@ -66,249 +66,9 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
        { CUDBG_HMA_INDIRECT, cudbg_collect_hma_indirect },
 };
 
-static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
-{
-       struct cudbg_tcam tcam_region = { 0 };
-       u32 value, n = 0, len = 0;
-
-       switch (entity) {
-       case CUDBG_REG_DUMP:
-               switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
-               case CHELSIO_T4:
-                       len = T4_REGMAP_SIZE;
-                       break;
-               case CHELSIO_T5:
-               case CHELSIO_T6:
-                       len = T5_REGMAP_SIZE;
-                       break;
-               default:
-                       break;
-               }
-               break;
-       case CUDBG_DEV_LOG:
-               len = adap->params.devlog.size;
-               break;
-       case CUDBG_CIM_LA:
-               if (is_t6(adap->params.chip)) {
-                       len = adap->params.cim_la_size / 10 + 1;
-                       len *= 10 * sizeof(u32);
-               } else {
-                       len = adap->params.cim_la_size / 8;
-                       len *= 8 * sizeof(u32);
-               }
-               len += sizeof(u32); /* for reading CIM LA configuration */
-               break;
-       case CUDBG_CIM_MA_LA:
-               len = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
-               break;
-       case CUDBG_CIM_QCFG:
-               len = sizeof(struct cudbg_cim_qcfg);
-               break;
-       case CUDBG_CIM_IBQ_TP0:
-       case CUDBG_CIM_IBQ_TP1:
-       case CUDBG_CIM_IBQ_ULP:
-       case CUDBG_CIM_IBQ_SGE0:
-       case CUDBG_CIM_IBQ_SGE1:
-       case CUDBG_CIM_IBQ_NCSI:
-               len = CIM_IBQ_SIZE * 4 * sizeof(u32);
-               break;
-       case CUDBG_CIM_OBQ_ULP0:
-               len = cudbg_cim_obq_size(adap, 0);
-               break;
-       case CUDBG_CIM_OBQ_ULP1:
-               len = cudbg_cim_obq_size(adap, 1);
-               break;
-       case CUDBG_CIM_OBQ_ULP2:
-               len = cudbg_cim_obq_size(adap, 2);
-               break;
-       case CUDBG_CIM_OBQ_ULP3:
-               len = cudbg_cim_obq_size(adap, 3);
-               break;
-       case CUDBG_CIM_OBQ_SGE:
-               len = cudbg_cim_obq_size(adap, 4);
-               break;
-       case CUDBG_CIM_OBQ_NCSI:
-               len = cudbg_cim_obq_size(adap, 5);
-               break;
-       case CUDBG_CIM_OBQ_RXQ0:
-               len = cudbg_cim_obq_size(adap, 6);
-               break;
-       case CUDBG_CIM_OBQ_RXQ1:
-               len = cudbg_cim_obq_size(adap, 7);
-               break;
-       case CUDBG_EDC0:
-               value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
-               if (value & EDRAM0_ENABLE_F) {
-                       value = t4_read_reg(adap, MA_EDRAM0_BAR_A);
-                       len = EDRAM0_SIZE_G(value);
-               }
-               len = cudbg_mbytes_to_bytes(len);
-               break;
-       case CUDBG_EDC1:
-               value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
-               if (value & EDRAM1_ENABLE_F) {
-                       value = t4_read_reg(adap, MA_EDRAM1_BAR_A);
-                       len = EDRAM1_SIZE_G(value);
-               }
-               len = cudbg_mbytes_to_bytes(len);
-               break;
-       case CUDBG_MC0:
-               value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
-               if (value & EXT_MEM0_ENABLE_F) {
-                       value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
-                       len = EXT_MEM0_SIZE_G(value);
-               }
-               len = cudbg_mbytes_to_bytes(len);
-               break;
-       case CUDBG_MC1:
-               value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
-               if (value & EXT_MEM1_ENABLE_F) {
-                       value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
-                       len = EXT_MEM1_SIZE_G(value);
-               }
-               len = cudbg_mbytes_to_bytes(len);
-               break;
-       case CUDBG_RSS:
-               len = t4_chip_rss_size(adap) * sizeof(u16);
-               break;
-       case CUDBG_RSS_VF_CONF:
-               len = adap->params.arch.vfcount *
-                     sizeof(struct cudbg_rss_vf_conf);
-               break;
-       case CUDBG_PATH_MTU:
-               len = NMTUS * sizeof(u16);
-               break;
-       case CUDBG_PM_STATS:
-               len = sizeof(struct cudbg_pm_stats);
-               break;
-       case CUDBG_HW_SCHED:
-               len = sizeof(struct cudbg_hw_sched);
-               break;
-       case CUDBG_TP_INDIRECT:
-               switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
-               case CHELSIO_T5:
-                       n = sizeof(t5_tp_pio_array) +
-                           sizeof(t5_tp_tm_pio_array) +
-                           sizeof(t5_tp_mib_index_array);
-                       break;
-               case CHELSIO_T6:
-                       n = sizeof(t6_tp_pio_array) +
-                           sizeof(t6_tp_tm_pio_array) +
-                           sizeof(t6_tp_mib_index_array);
-                       break;
-               default:
-                       break;
-               }
-               n = n / (IREG_NUM_ELEM * sizeof(u32));
-               len = sizeof(struct ireg_buf) * n;
-               break;
-       case CUDBG_SGE_INDIRECT:
-               len = sizeof(struct ireg_buf) * 2 +
-                     sizeof(struct sge_qbase_reg_field);
-               break;
-       case CUDBG_ULPRX_LA:
-               len = sizeof(struct cudbg_ulprx_la);
-               break;
-       case CUDBG_TP_LA:
-               len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
-               break;
-       case CUDBG_MEMINFO:
-               len = sizeof(struct cudbg_ver_hdr) +
-                     sizeof(struct cudbg_meminfo);
-               break;
-       case CUDBG_CIM_PIF_LA:
-               len = sizeof(struct cudbg_cim_pif_la);
-               len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
-               break;
-       case CUDBG_CLK:
-               len = sizeof(struct cudbg_clk_info);
-               break;
-       case CUDBG_PCIE_INDIRECT:
-               n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
-               len = sizeof(struct ireg_buf) * n * 2;
-               break;
-       case CUDBG_PM_INDIRECT:
-               n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
-               len = sizeof(struct ireg_buf) * n * 2;
-               break;
-       case CUDBG_TID_INFO:
-               len = sizeof(struct cudbg_tid_info_region_rev1);
-               break;
-       case CUDBG_PCIE_CONFIG:
-               len = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
-               break;
-       case CUDBG_DUMP_CONTEXT:
-               len = cudbg_dump_context_size(adap);
-               break;
-       case CUDBG_MPS_TCAM:
-               len = sizeof(struct cudbg_mps_tcam) *
-                     adap->params.arch.mps_tcam_size;
-               break;
-       case CUDBG_VPD_DATA:
-               len = sizeof(struct cudbg_vpd_data);
-               break;
-       case CUDBG_LE_TCAM:
-               cudbg_fill_le_tcam_info(adap, &tcam_region);
-               len = sizeof(struct cudbg_tcam) +
-                     sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
-               break;
-       case CUDBG_CCTRL:
-               len = sizeof(u16) * NMTUS * NCCTRL_WIN;
-               break;
-       case CUDBG_MA_INDIRECT:
-               if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
-                       n = sizeof(t6_ma_ireg_array) /
-                           (IREG_NUM_ELEM * sizeof(u32));
-                       len = sizeof(struct ireg_buf) * n * 2;
-               }
-               break;
-       case CUDBG_ULPTX_LA:
-               len = sizeof(struct cudbg_ver_hdr) +
-                     sizeof(struct cudbg_ulptx_la);
-               break;
-       case CUDBG_UP_CIM_INDIRECT:
-               n = 0;
-               if (is_t5(adap->params.chip))
-                       n = sizeof(t5_up_cim_reg_array) /
-                           ((IREG_NUM_ELEM + 1) * sizeof(u32));
-               else if (is_t6(adap->params.chip))
-                       n = sizeof(t6_up_cim_reg_array) /
-                           ((IREG_NUM_ELEM + 1) * sizeof(u32));
-               len = sizeof(struct ireg_buf) * n;
-               break;
-       case CUDBG_PBT_TABLE:
-               len = sizeof(struct cudbg_pbt_tables);
-               break;
-       case CUDBG_MBOX_LOG:
-               len = sizeof(struct cudbg_mbox_log) * adap->mbox_log->size;
-               break;
-       case CUDBG_HMA_INDIRECT:
-               if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
-                       n = sizeof(t6_hma_ireg_array) /
-                           (IREG_NUM_ELEM * sizeof(u32));
-                       len = sizeof(struct ireg_buf) * n;
-               }
-               break;
-       case CUDBG_HMA:
-               value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
-               if (value & HMA_MUX_F) {
-                       /* In T6, there's no MC1.  So, HMA shares MC1
-                        * address space.
-                        */
-                       value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
-                       len = EXT_MEM1_SIZE_G(value);
-               }
-               len = cudbg_mbytes_to_bytes(len);
-               break;
-       case CUDBG_QDESC:
-               cudbg_fill_qdesc_num_and_size(adap, NULL, &len);
-               break;
-       default:
-               break;
-       }
-
-       return len;
-}
+static const struct cxgb4_collect_entity cxgb4_collect_flash_dump[] = {
+       { CUDBG_FLASH, cudbg_collect_flash },
+};
 
 u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag)
 {
@@ -319,17 +79,20 @@ u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag)
        if (flag & CXGB4_ETH_DUMP_HW) {
                for (i = 0; i < ARRAY_SIZE(cxgb4_collect_hw_dump); i++) {
                        entity = cxgb4_collect_hw_dump[i].entity;
-                       len += cxgb4_get_entity_length(adap, entity);
+                       len += cudbg_get_entity_length(adap, entity);
                }
        }
 
        if (flag & CXGB4_ETH_DUMP_MEM) {
                for (i = 0; i < ARRAY_SIZE(cxgb4_collect_mem_dump); i++) {
                        entity = cxgb4_collect_mem_dump[i].entity;
-                       len += cxgb4_get_entity_length(adap, entity);
+                       len += cudbg_get_entity_length(adap, entity);
                }
        }
 
+       if (flag & CXGB4_ETH_DUMP_FLASH)
+               len += adap->params.sf_size;
+
        /* If compression is enabled, a smaller destination buffer is enough */
        wsize = cudbg_get_workspace_size();
        if (wsize && len > CUDBG_DUMP_BUFF_SIZE)
@@ -468,6 +231,13 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
                                           buf,
                                           &total_size);
 
+       if (flag & CXGB4_ETH_DUMP_FLASH)
+               cxgb4_cudbg_collect_entity(&cudbg_init, &dbg_buff,
+                                          cxgb4_collect_flash_dump,
+                                          ARRAY_SIZE(cxgb4_collect_flash_dump),
+                                          buf,
+                                          &total_size);
+
        cudbg_free_compress_buff(&cudbg_init);
        cudbg_hdr->data_len = total_size;
        if (cudbg_init.compress_type != CUDBG_COMPRESSION_NONE)
index 66b805c..c04a49b 100644 (file)
@@ -27,6 +27,7 @@ enum CXGB4_ETHTOOL_DUMP_FLAGS {
        CXGB4_ETH_DUMP_NONE = ETH_FW_DUMP_DISABLE,
        CXGB4_ETH_DUMP_MEM = (1 << 0), /* On-Chip Memory Dumps */
        CXGB4_ETH_DUMP_HW = (1 << 1), /* various FW and HW dumps */
+       CXGB4_ETH_DUMP_FLASH = (1 << 2), /* Dump flash memory */
 };
 
 #define CXGB4_ETH_DUMP_ALL (CXGB4_ETH_DUMP_MEM | CXGB4_ETH_DUMP_HW)
index d3c654b..80c6627 100644 (file)
@@ -136,6 +136,9 @@ static inline __u8 bitswap_1(unsigned char val)
               ((val & 0x02) << 5) |
               ((val & 0x01) << 7);
 }
+
+extern const char * const dcb_ver_array[];
+
 #define CXGB4_DCB_ENABLED true
 
 #else /* !CONFIG_CHELSIO_T4_DCB */
index 8284992..05f33b7 100644 (file)
@@ -2379,7 +2379,6 @@ static const struct file_operations rss_vf_config_debugfs_fops = {
 };
 
 #ifdef CONFIG_CHELSIO_T4_DCB
-extern char *dcb_ver_array[];
 
 /* Data Center Briging information for each port.
  */
@@ -2743,6 +2742,58 @@ do { \
        }
 
        r -= eth_entries;
+       for_each_port(adap, j) {
+               struct port_info *pi = adap2pinfo(adap, j);
+               const struct sge_eth_rxq *rx;
+
+               mutex_lock(&pi->vi_mirror_mutex);
+               if (!pi->vi_mirror_count) {
+                       mutex_unlock(&pi->vi_mirror_mutex);
+                       continue;
+               }
+
+               if (r >= DIV_ROUND_UP(pi->nmirrorqsets, 4)) {
+                       r -= DIV_ROUND_UP(pi->nmirrorqsets, 4);
+                       mutex_unlock(&pi->vi_mirror_mutex);
+                       continue;
+               }
+
+               rx = &s->mirror_rxq[j][r * 4];
+               n = min(4, pi->nmirrorqsets - 4 * r);
+
+               S("QType:", "Mirror-Rxq");
+               S("Interface:",
+                 rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A");
+               R("RspQ ID:", rspq.abs_id);
+               R("RspQ size:", rspq.size);
+               R("RspQE size:", rspq.iqe_len);
+               R("RspQ CIDX:", rspq.cidx);
+               R("RspQ Gen:", rspq.gen);
+               S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq));
+               S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]);
+               R("FL ID:", fl.cntxt_id);
+               R("FL size:", fl.size - 8);
+               R("FL pend:", fl.pend_cred);
+               R("FL avail:", fl.avail);
+               R("FL PIDX:", fl.pidx);
+               R("FL CIDX:", fl.cidx);
+               RL("RxPackets:", stats.pkts);
+               RL("RxCSO:", stats.rx_cso);
+               RL("VLANxtract:", stats.vlan_ex);
+               RL("LROmerged:", stats.lro_merged);
+               RL("LROpackets:", stats.lro_pkts);
+               RL("RxDrops:", stats.rx_drops);
+               RL("RxBadPkts:", stats.bad_rx_pkts);
+               RL("FLAllocErr:", fl.alloc_failed);
+               RL("FLLrgAlcErr:", fl.large_alloc_failed);
+               RL("FLMapErr:", fl.mapping_err);
+               RL("FLLow:", fl.low);
+               RL("FLStarving:", fl.starving);
+
+               mutex_unlock(&pi->vi_mirror_mutex);
+               goto out;
+       }
+
        if (!adap->tc_mqprio)
                goto skip_mqprio;
 
@@ -3099,9 +3150,10 @@ unlock:
        return 0;
 }
 
-static int sge_queue_entries(const struct adapter *adap)
+static int sge_queue_entries(struct adapter *adap)
 {
        int i, tot_uld_entries = 0, eohw_entries = 0, eosw_entries = 0;
+       int mirror_rxq_entries = 0;
 
        if (adap->tc_mqprio) {
                struct cxgb4_tc_port_mqprio *port_mqprio;
@@ -3124,6 +3176,15 @@ static int sge_queue_entries(const struct adapter *adap)
                mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
        }
 
+       for_each_port(adap, i) {
+               struct port_info *pi = adap2pinfo(adap, i);
+
+               mutex_lock(&pi->vi_mirror_mutex);
+               if (pi->vi_mirror_count)
+                       mirror_rxq_entries += DIV_ROUND_UP(pi->nmirrorqsets, 4);
+               mutex_unlock(&pi->vi_mirror_mutex);
+       }
+
        if (!is_uld(adap))
                goto lld_only;
 
@@ -3138,7 +3199,7 @@ static int sge_queue_entries(const struct adapter *adap)
        mutex_unlock(&uld_mutex);
 
 lld_only:
-       return DIV_ROUND_UP(adap->sge.ethqsets, 4) +
+       return DIV_ROUND_UP(adap->sge.ethqsets, 4) + mirror_rxq_entries +
               eohw_entries + eosw_entries + tot_uld_entries +
               DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1;
 }
index 9fd4967..12ef9dd 100644 (file)
@@ -10,6 +10,8 @@
 #include "t4_regs.h"
 #include "t4fw_api.h"
 #include "cxgb4_cudbg.h"
+#include "cxgb4_filter.h"
+#include "cxgb4_tc_flower.h"
 
 #define EEPROM_MAGIC 0x38E2F10C
 
@@ -23,6 +25,23 @@ static void set_msglevel(struct net_device *dev, u32 val)
        netdev2adap(dev)->msg_enable = val;
 }
 
+enum cxgb4_ethtool_tests {
+       CXGB4_ETHTOOL_LB_TEST,
+       CXGB4_ETHTOOL_MAX_TEST,
+};
+
+static const char cxgb4_selftest_strings[CXGB4_ETHTOOL_MAX_TEST][ETH_GSTRING_LEN] = {
+       "Loop back test",
+};
+
+static const char * const flash_region_strings[] = {
+       "All",
+       "Firmware",
+       "PHY Firmware",
+       "Boot",
+       "Boot CFG",
+};
+
 static const char stats_strings[][ETH_GSTRING_LEN] = {
        "tx_octets_ok           ",
        "tx_frames_ok           ",
@@ -156,6 +175,8 @@ static int get_sset_count(struct net_device *dev, int sset)
                       ARRAY_SIZE(loopback_stats_strings);
        case ETH_SS_PRIV_FLAGS:
                return ARRAY_SIZE(cxgb4_priv_flags_strings);
+       case ETH_SS_TEST:
+               return ARRAY_SIZE(cxgb4_selftest_strings);
        default:
                return -EOPNOTSUPP;
        }
@@ -218,6 +239,9 @@ static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
        } else if (stringset == ETH_SS_PRIV_FLAGS) {
                memcpy(data, cxgb4_priv_flags_strings,
                       sizeof(cxgb4_priv_flags_strings));
+       } else if (stringset == ETH_SS_TEST) {
+               memcpy(data, cxgb4_selftest_strings,
+                      sizeof(cxgb4_selftest_strings));
        }
 }
 
@@ -588,7 +612,7 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
 /**
  *     lmm_to_fw_caps - translate ethtool Link Mode Mask to Firmware
  *     capabilities
- *     @et_lmm: ethtool Link Mode Mask
+ *     @link_mode_mask: ethtool Link Mode Mask
  *
  *     Translate ethtool Link Mode Mask into a Firmware Port capabilities
  *     value.
@@ -1235,15 +1259,211 @@ out:
        return err;
 }
 
-static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
+static int cxgb4_ethtool_flash_bootcfg(struct net_device *netdev,
+                                      const u8 *data, u32 size)
 {
+       struct adapter *adap = netdev2adap(netdev);
        int ret;
-       const struct firmware *fw;
+
+       ret = t4_load_bootcfg(adap, data, size);
+       if (ret)
+               dev_err(adap->pdev_dev, "Failed to load boot cfg image\n");
+
+       return ret;
+}
+
+static int cxgb4_ethtool_flash_boot(struct net_device *netdev,
+                                   const u8 *bdata, u32 size)
+{
+       struct adapter *adap = netdev2adap(netdev);
+       unsigned int offset;
+       u8 *data;
+       int ret;
+
+       data = kmemdup(bdata, size, GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       offset = OFFSET_G(t4_read_reg(adap, PF_REG(0, PCIE_PF_EXPROM_OFST_A)));
+
+       ret = t4_load_boot(adap, data, offset, size);
+       if (ret)
+               dev_err(adap->pdev_dev, "Failed to load boot image\n");
+
+       kfree(data);
+       return ret;
+}
+
+#define CXGB4_PHY_SIG 0x130000ea
+
+static int cxgb4_validate_phy_image(const u8 *data, u32 *size)
+{
+       struct cxgb4_fw_data *header;
+
+       header = (struct cxgb4_fw_data *)data;
+       if (be32_to_cpu(header->signature) != CXGB4_PHY_SIG)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int cxgb4_ethtool_flash_phy(struct net_device *netdev,
+                                  const u8 *data, u32 size)
+{
+       struct adapter *adap = netdev2adap(netdev);
+       int ret;
+
+       ret = cxgb4_validate_phy_image(data, NULL);
+       if (ret) {
+               dev_err(adap->pdev_dev, "PHY signature mismatch\n");
+               return ret;
+       }
+
+       spin_lock_bh(&adap->win0_lock);
+       ret = t4_load_phy_fw(adap, MEMWIN_NIC, NULL, data, size);
+       spin_unlock_bh(&adap->win0_lock);
+       if (ret)
+               dev_err(adap->pdev_dev, "Failed to load PHY FW\n");
+
+       return ret;
+}
+
+static int cxgb4_ethtool_flash_fw(struct net_device *netdev,
+                                 const u8 *data, u32 size)
+{
        struct adapter *adap = netdev2adap(netdev);
        unsigned int mbox = PCIE_FW_MASTER_M + 1;
-       u32 pcie_fw;
+       int ret;
+
+       /* If the adapter has been fully initialized then we'll go ahead and
+        * try to get the firmware's cooperation in upgrading to the new
+        * firmware image otherwise we'll try to do the entire job from the
+        * host ... and we always "force" the operation in this path.
+        */
+       if (adap->flags & CXGB4_FULL_INIT_DONE)
+               mbox = adap->mbox;
+
+       ret = t4_fw_upgrade(adap, mbox, data, size, 1);
+       if (ret)
+               dev_err(adap->pdev_dev,
+                       "Failed to flash firmware\n");
+
+       return ret;
+}
+
+static int cxgb4_ethtool_flash_region(struct net_device *netdev,
+                                     const u8 *data, u32 size, u32 region)
+{
+       struct adapter *adap = netdev2adap(netdev);
+       int ret;
+
+       switch (region) {
+       case CXGB4_ETHTOOL_FLASH_FW:
+               ret = cxgb4_ethtool_flash_fw(netdev, data, size);
+               break;
+       case CXGB4_ETHTOOL_FLASH_PHY:
+               ret = cxgb4_ethtool_flash_phy(netdev, data, size);
+               break;
+       case CXGB4_ETHTOOL_FLASH_BOOT:
+               ret = cxgb4_ethtool_flash_boot(netdev, data, size);
+               break;
+       case CXGB4_ETHTOOL_FLASH_BOOTCFG:
+               ret = cxgb4_ethtool_flash_bootcfg(netdev, data, size);
+               break;
+       default:
+               ret = -EOPNOTSUPP;
+               break;
+       }
+
+       if (!ret)
+               dev_info(adap->pdev_dev,
+                        "loading %s successful, reload cxgb4 driver\n",
+                        flash_region_strings[region]);
+       return ret;
+}
+
+#define CXGB4_FW_SIG 0x4368656c
+#define CXGB4_FW_SIG_OFFSET 0x160
+
+static int cxgb4_validate_fw_image(const u8 *data, u32 *size)
+{
+       struct cxgb4_fw_data *header;
+
+       header = (struct cxgb4_fw_data *)&data[CXGB4_FW_SIG_OFFSET];
+       if (be32_to_cpu(header->signature) != CXGB4_FW_SIG)
+               return -EINVAL;
+
+       if (size)
+               *size = be16_to_cpu(((struct fw_hdr *)data)->len512) * 512;
+
+       return 0;
+}
+
+static int cxgb4_validate_bootcfg_image(const u8 *data, u32 *size)
+{
+       struct cxgb4_bootcfg_data *header;
+
+       header = (struct cxgb4_bootcfg_data *)data;
+       if (le16_to_cpu(header->signature) != BOOT_CFG_SIG)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int cxgb4_validate_boot_image(const u8 *data, u32 *size)
+{
+       struct cxgb4_pci_exp_rom_header *exp_header;
+       struct cxgb4_pcir_data *pcir_header;
+       struct legacy_pci_rom_hdr *header;
+       const u8 *cur_header = data;
+       u16 pcir_offset;
+
+       exp_header = (struct cxgb4_pci_exp_rom_header *)data;
+
+       if (le16_to_cpu(exp_header->signature) != BOOT_SIGNATURE)
+               return -EINVAL;
+
+       if (size) {
+               do {
+                       header = (struct legacy_pci_rom_hdr *)cur_header;
+                       pcir_offset = le16_to_cpu(header->pcir_offset);
+                       pcir_header = (struct cxgb4_pcir_data *)(cur_header +
+                                     pcir_offset);
+
+                       *size += header->size512 * 512;
+                       cur_header += header->size512 * 512;
+               } while (!(pcir_header->indicator & CXGB4_HDR_INDI));
+       }
+
+       return 0;
+}
+
+static int cxgb4_ethtool_get_flash_region(const u8 *data, u32 *size)
+{
+       if (!cxgb4_validate_fw_image(data, size))
+               return CXGB4_ETHTOOL_FLASH_FW;
+       if (!cxgb4_validate_boot_image(data, size))
+               return CXGB4_ETHTOOL_FLASH_BOOT;
+       if (!cxgb4_validate_phy_image(data, size))
+               return CXGB4_ETHTOOL_FLASH_PHY;
+       if (!cxgb4_validate_bootcfg_image(data, size))
+               return CXGB4_ETHTOOL_FLASH_BOOTCFG;
+
+       return -EOPNOTSUPP;
+}
+
+static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
+{
+       struct adapter *adap = netdev2adap(netdev);
+       const struct firmware *fw;
        unsigned int master;
        u8 master_vld = 0;
+       const u8 *fw_data;
+       size_t fw_size;
+       u32 size = 0;
+       u32 pcie_fw;
+       int region;
+       int ret;
 
        pcie_fw = t4_read_reg(adap, PCIE_FW_A);
        master = PCIE_FW_MASTER_G(pcie_fw);
@@ -1261,19 +1481,32 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
        if (ret < 0)
                return ret;
 
-       /* If the adapter has been fully initialized then we'll go ahead and
-        * try to get the firmware's cooperation in upgrading to the new
-        * firmware image otherwise we'll try to do the entire job from the
-        * host ... and we always "force" the operation in this path.
-        */
-       if (adap->flags & CXGB4_FULL_INIT_DONE)
-               mbox = adap->mbox;
+       fw_data = fw->data;
+       fw_size = fw->size;
+       if (ef->region == ETHTOOL_FLASH_ALL_REGIONS) {
+               while (fw_size > 0) {
+                       size = 0;
+                       region = cxgb4_ethtool_get_flash_region(fw_data, &size);
+                       if (region < 0 || !size) {
+                               ret = region;
+                               goto out_free_fw;
+                       }
+
+                       ret = cxgb4_ethtool_flash_region(netdev, fw_data, size,
+                                                        region);
+                       if (ret)
+                               goto out_free_fw;
 
-       ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
+                       fw_data += size;
+                       fw_size -= size;
+               }
+       } else {
+               ret = cxgb4_ethtool_flash_region(netdev, fw_data, fw_size,
+                                                ef->region);
+       }
+
+out_free_fw:
        release_firmware(fw);
-       if (!ret)
-               dev_info(adap->pdev_dev,
-                        "loaded firmware %s, reload cxgb4 driver\n", ef->data);
        return ret;
 }
 
@@ -1355,10 +1588,120 @@ static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
        return -EPERM;
 }
 
+static struct filter_entry *cxgb4_get_filter_entry(struct adapter *adap,
+                                                  u32 ftid)
+{
+       struct tid_info *t = &adap->tids;
+       struct filter_entry *f;
+
+       if (ftid < t->nhpftids)
+               f = &adap->tids.hpftid_tab[ftid];
+       else if (ftid < t->nftids)
+               f = &adap->tids.ftid_tab[ftid - t->nhpftids];
+       else
+               f = lookup_tid(&adap->tids, ftid);
+
+       return f;
+}
+
+static void cxgb4_fill_filter_rule(struct ethtool_rx_flow_spec *fs,
+                                  struct ch_filter_specification *dfs)
+{
+       switch (dfs->val.proto) {
+       case IPPROTO_TCP:
+               if (dfs->type)
+                       fs->flow_type = TCP_V6_FLOW;
+               else
+                       fs->flow_type = TCP_V4_FLOW;
+               break;
+       case IPPROTO_UDP:
+               if (dfs->type)
+                       fs->flow_type = UDP_V6_FLOW;
+               else
+                       fs->flow_type = UDP_V4_FLOW;
+               break;
+       }
+
+       if (dfs->type) {
+               fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(dfs->val.fport);
+               fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(dfs->mask.fport);
+               fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(dfs->val.lport);
+               fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(dfs->mask.lport);
+               memcpy(&fs->h_u.tcp_ip6_spec.ip6src, &dfs->val.fip[0],
+                      sizeof(fs->h_u.tcp_ip6_spec.ip6src));
+               memcpy(&fs->m_u.tcp_ip6_spec.ip6src, &dfs->mask.fip[0],
+                      sizeof(fs->m_u.tcp_ip6_spec.ip6src));
+               memcpy(&fs->h_u.tcp_ip6_spec.ip6dst, &dfs->val.lip[0],
+                      sizeof(fs->h_u.tcp_ip6_spec.ip6dst));
+               memcpy(&fs->m_u.tcp_ip6_spec.ip6dst, &dfs->mask.lip[0],
+                      sizeof(fs->m_u.tcp_ip6_spec.ip6dst));
+               fs->h_u.tcp_ip6_spec.tclass = dfs->val.tos;
+               fs->m_u.tcp_ip6_spec.tclass = dfs->mask.tos;
+       } else {
+               fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(dfs->val.fport);
+               fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(dfs->mask.fport);
+               fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(dfs->val.lport);
+               fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(dfs->mask.lport);
+               memcpy(&fs->h_u.tcp_ip4_spec.ip4src, &dfs->val.fip[0],
+                      sizeof(fs->h_u.tcp_ip4_spec.ip4src));
+               memcpy(&fs->m_u.tcp_ip4_spec.ip4src, &dfs->mask.fip[0],
+                      sizeof(fs->m_u.tcp_ip4_spec.ip4src));
+               memcpy(&fs->h_u.tcp_ip4_spec.ip4dst, &dfs->val.lip[0],
+                      sizeof(fs->h_u.tcp_ip4_spec.ip4dst));
+               memcpy(&fs->m_u.tcp_ip4_spec.ip4dst, &dfs->mask.lip[0],
+                      sizeof(fs->m_u.tcp_ip4_spec.ip4dst));
+               fs->h_u.tcp_ip4_spec.tos = dfs->val.tos;
+               fs->m_u.tcp_ip4_spec.tos = dfs->mask.tos;
+       }
+       fs->h_ext.vlan_tci = cpu_to_be16(dfs->val.ivlan);
+       fs->m_ext.vlan_tci = cpu_to_be16(dfs->mask.ivlan);
+       fs->flow_type |= FLOW_EXT;
+
+       if (dfs->action == FILTER_DROP)
+               fs->ring_cookie = RX_CLS_FLOW_DISC;
+       else
+               fs->ring_cookie = dfs->iq;
+}
+
+static int cxgb4_ntuple_get_filter(struct net_device *dev,
+                                  struct ethtool_rxnfc *cmd,
+                                  unsigned int loc)
+{
+       const struct port_info *pi = netdev_priv(dev);
+       struct adapter *adap = netdev2adap(dev);
+       struct filter_entry *f;
+       int ftid;
+
+       if (!(adap->flags & CXGB4_FULL_INIT_DONE))
+               return -EAGAIN;
+
+       /* Check for maximum filter range */
+       if (!adap->ethtool_filters)
+               return -EOPNOTSUPP;
+
+       if (loc >= adap->ethtool_filters->nentries)
+               return -ERANGE;
+
+       if (!test_bit(loc, adap->ethtool_filters->port[pi->port_id].bmap))
+               return -ENOENT;
+
+       ftid = adap->ethtool_filters->port[pi->port_id].loc_array[loc];
+
+       /* Fetch filter_entry */
+       f = cxgb4_get_filter_entry(adap, ftid);
+
+       cxgb4_fill_filter_rule(&cmd->fs, &f->fs);
+
+       return 0;
+}
+
 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
                     u32 *rules)
 {
        const struct port_info *pi = netdev_priv(dev);
+       struct adapter *adap = netdev2adap(dev);
+       unsigned int count = 0, index = 0;
+       int ret = 0;
 
        switch (info->cmd) {
        case ETHTOOL_GRXFH: {
@@ -1414,10 +1757,144 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
        case ETHTOOL_GRXRINGS:
                info->data = pi->nqsets;
                return 0;
+       case ETHTOOL_GRXCLSRLCNT:
+               info->rule_cnt =
+                      adap->ethtool_filters->port[pi->port_id].in_use;
+               return 0;
+       case ETHTOOL_GRXCLSRULE:
+               return cxgb4_ntuple_get_filter(dev, info, info->fs.location);
+       case ETHTOOL_GRXCLSRLALL:
+               info->data = adap->ethtool_filters->nentries;
+               while (count < info->rule_cnt) {
+                       ret = cxgb4_ntuple_get_filter(dev, info, index);
+                       if (!ret)
+                               rules[count++] = index;
+                       index++;
+               }
+               return 0;
        }
+
        return -EOPNOTSUPP;
 }
 
+static int cxgb4_ntuple_del_filter(struct net_device *dev,
+                                  struct ethtool_rxnfc *cmd)
+{
+       struct cxgb4_ethtool_filter_info *filter_info;
+       struct adapter *adapter = netdev2adap(dev);
+       struct port_info *pi = netdev_priv(dev);
+       struct filter_entry *f;
+       u32 filter_id;
+       int ret;
+
+       if (!(adapter->flags & CXGB4_FULL_INIT_DONE))
+               return -EAGAIN;  /* can still change nfilters */
+
+       if (!adapter->ethtool_filters)
+               return -EOPNOTSUPP;
+
+       if (cmd->fs.location >= adapter->ethtool_filters->nentries) {
+               dev_err(adapter->pdev_dev,
+                       "Location must be < %u",
+                       adapter->ethtool_filters->nentries);
+               return -ERANGE;
+       }
+
+       filter_info = &adapter->ethtool_filters->port[pi->port_id];
+
+       if (!test_bit(cmd->fs.location, filter_info->bmap))
+               return -ENOENT;
+
+       filter_id = filter_info->loc_array[cmd->fs.location];
+       f = cxgb4_get_filter_entry(adapter, filter_id);
+
+       ret = cxgb4_flow_rule_destroy(dev, f->fs.tc_prio, &f->fs, filter_id);
+       if (ret)
+               goto err;
+
+       clear_bit(cmd->fs.location, filter_info->bmap);
+       filter_info->in_use--;
+
+err:
+       return ret;
+}
+
+/* Add Ethtool n-tuple filters. */
+static int cxgb4_ntuple_set_filter(struct net_device *netdev,
+                                  struct ethtool_rxnfc *cmd)
+{
+       struct ethtool_rx_flow_spec_input input = {};
+       struct cxgb4_ethtool_filter_info *filter_info;
+       struct adapter *adapter = netdev2adap(netdev);
+       struct port_info *pi = netdev_priv(netdev);
+       struct ch_filter_specification fs;
+       struct ethtool_rx_flow_rule *flow;
+       u32 tid;
+       int ret;
+
+       if (!(adapter->flags & CXGB4_FULL_INIT_DONE))
+               return -EAGAIN;  /* can still change nfilters */
+
+       if (!adapter->ethtool_filters)
+               return -EOPNOTSUPP;
+
+       if (cmd->fs.location >= adapter->ethtool_filters->nentries) {
+               dev_err(adapter->pdev_dev,
+                       "Location must be < %u",
+                       adapter->ethtool_filters->nentries);
+               return -ERANGE;
+       }
+
+       if (test_bit(cmd->fs.location,
+                    adapter->ethtool_filters->port[pi->port_id].bmap))
+               return -EEXIST;
+
+       memset(&fs, 0, sizeof(fs));
+
+       input.fs = &cmd->fs;
+       flow = ethtool_rx_flow_rule_create(&input);
+       if (IS_ERR(flow)) {
+               ret = PTR_ERR(flow);
+               goto exit;
+       }
+
+       fs.hitcnts = 1;
+
+       ret = cxgb4_flow_rule_replace(netdev, flow->rule, cmd->fs.location,
+                                     NULL, &fs, &tid);
+       if (ret)
+               goto free;
+
+       filter_info = &adapter->ethtool_filters->port[pi->port_id];
+
+       filter_info->loc_array[cmd->fs.location] = tid;
+       set_bit(cmd->fs.location, filter_info->bmap);
+       filter_info->in_use++;
+
+free:
+       ethtool_rx_flow_rule_destroy(flow);
+exit:
+       return ret;
+}
+
+static int set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+       int ret = -EOPNOTSUPP;
+
+       switch (cmd->cmd) {
+       case ETHTOOL_SRXCLSRLINS:
+               ret = cxgb4_ntuple_set_filter(dev, cmd);
+               break;
+       case ETHTOOL_SRXCLSRLDEL:
+               ret = cxgb4_ntuple_del_filter(dev, cmd);
+               break;
+       default:
+               break;
+       }
+
+       return ret;
+}
+
 static int set_dump(struct net_device *dev, struct ethtool_dump *eth_dump)
 {
        struct adapter *adapter = netdev2adap(dev);
@@ -1593,6 +2070,43 @@ static int cxgb4_set_priv_flags(struct net_device *netdev, u32 flags)
        return 0;
 }
 
+static void cxgb4_lb_test(struct net_device *netdev, u64 *lb_status)
+{
+       int dev_state = netif_running(netdev);
+
+       if (dev_state) {
+               netif_tx_stop_all_queues(netdev);
+               netif_carrier_off(netdev);
+       }
+
+       *lb_status = cxgb4_selftest_lb_pkt(netdev);
+
+       if (dev_state) {
+               netif_tx_start_all_queues(netdev);
+               netif_carrier_on(netdev);
+       }
+}
+
+static void cxgb4_self_test(struct net_device *netdev,
+                           struct ethtool_test *eth_test, u64 *data)
+{
+       struct port_info *pi = netdev_priv(netdev);
+       struct adapter *adap = pi->adapter;
+
+       memset(data, 0, sizeof(u64) * CXGB4_ETHTOOL_MAX_TEST);
+
+       if (!(adap->flags & CXGB4_FW_OK)) {
+               eth_test->flags |= ETH_TEST_FL_FAILED;
+               return;
+       }
+
+       if (eth_test->flags == ETH_TEST_FL_OFFLINE)
+               cxgb4_lb_test(netdev, &data[CXGB4_ETHTOOL_LB_TEST]);
+
+       if (data[CXGB4_ETHTOOL_LB_TEST])
+               eth_test->flags |= ETH_TEST_FL_FAILED;
+}
+
 static const struct ethtool_ops cxgb_ethtool_ops = {
        .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
                                     ETHTOOL_COALESCE_RX_MAX_FRAMES |
@@ -1623,9 +2137,11 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
        .get_regs_len      = get_regs_len,
        .get_regs          = get_regs,
        .get_rxnfc         = get_rxnfc,
+       .set_rxnfc         = set_rxnfc,
        .get_rxfh_indir_size = get_rss_table_size,
        .get_rxfh          = get_rss_table,
        .set_rxfh          = set_rss_table,
+       .self_test         = cxgb4_self_test,
        .flash_device      = set_flash,
        .get_ts_info       = get_ts_info,
        .set_dump          = set_dump,
@@ -1637,6 +2153,87 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
        .set_priv_flags    = cxgb4_set_priv_flags,
 };
 
+void cxgb4_cleanup_ethtool_filters(struct adapter *adap)
+{
+       struct cxgb4_ethtool_filter_info *eth_filter_info;
+       u8 i;
+
+       if (!adap->ethtool_filters)
+               return;
+
+       eth_filter_info = adap->ethtool_filters->port;
+
+       if (eth_filter_info) {
+               for (i = 0; i < adap->params.nports; i++) {
+                       kvfree(eth_filter_info[i].loc_array);
+                       kfree(eth_filter_info[i].bmap);
+               }
+               kfree(eth_filter_info);
+       }
+
+       kfree(adap->ethtool_filters);
+}
+
+int cxgb4_init_ethtool_filters(struct adapter *adap)
+{
+       struct cxgb4_ethtool_filter_info *eth_filter_info;
+       struct cxgb4_ethtool_filter *eth_filter;
+       struct tid_info *tids = &adap->tids;
+       u32 nentries, i;
+       int ret;
+
+       eth_filter = kzalloc(sizeof(*eth_filter), GFP_KERNEL);
+       if (!eth_filter)
+               return -ENOMEM;
+
+       eth_filter_info = kcalloc(adap->params.nports,
+                                 sizeof(*eth_filter_info),
+                                 GFP_KERNEL);
+       if (!eth_filter_info) {
+               ret = -ENOMEM;
+               goto free_eth_filter;
+       }
+
+       eth_filter->port = eth_filter_info;
+
+       nentries = tids->nhpftids + tids->nftids;
+       if (is_hashfilter(adap))
+               nentries += tids->nhash +
+                           (adap->tids.stid_base - adap->tids.tid_base);
+       eth_filter->nentries = nentries;
+
+       for (i = 0; i < adap->params.nports; i++) {
+               eth_filter->port[i].loc_array = kvzalloc(nentries, GFP_KERNEL);
+               if (!eth_filter->port[i].loc_array) {
+                       ret = -ENOMEM;
+                       goto free_eth_finfo;
+               }
+
+               eth_filter->port[i].bmap = kcalloc(BITS_TO_LONGS(nentries),
+                                                  sizeof(unsigned long),
+                                                  GFP_KERNEL);
+               if (!eth_filter->port[i].bmap) {
+                       ret = -ENOMEM;
+                       goto free_eth_finfo;
+               }
+       }
+
+       adap->ethtool_filters = eth_filter;
+       return 0;
+
+free_eth_finfo:
+       while (i-- > 0) {
+               kfree(eth_filter->port[i].bmap);
+               kvfree(eth_filter->port[i].loc_array);
+       }
+       kfree(eth_filter_info);
+
+free_eth_filter:
+       kfree(eth_filter);
+
+       return ret;
+}
+
 void cxgb4_set_ethtool_ops(struct net_device *netdev)
 {
        netdev->ethtool_ops = &cxgb_ethtool_ops;
index 7965552..650db92 100644 (file)
@@ -165,6 +165,9 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
                           unsigned int tid, bool dip, bool sip, bool dp,
                           bool sp)
 {
+       u8 *nat_lp = (u8 *)&f->fs.nat_lport;
+       u8 *nat_fp = (u8 *)&f->fs.nat_fport;
+
        if (dip) {
                if (f->fs.type) {
                        set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W,
@@ -236,8 +239,9 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
        }
 
        set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK,
-                     (dp ? f->fs.nat_lport : 0) |
-                     (sp ? f->fs.nat_fport << 16 : 0), 1);
+                     (dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) |
+                     (sp ? (nat_fp[1] << 16 | nat_fp[0] << 24) : 0),
+                     1);
 }
 
 /* Validate filter spec against configuration done on the card. */
@@ -909,6 +913,9 @@ int set_filter_wr(struct adapter *adapter, int fidx)
        fwr->fpm = htons(f->fs.mask.fport);
 
        if (adapter->params.filter2_wr_support) {
+               u8 *nat_lp = (u8 *)&f->fs.nat_lport;
+               u8 *nat_fp = (u8 *)&f->fs.nat_fport;
+
                fwr->natmode_to_ulp_type =
                        FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ?
                                                 ULP_MODE_TCPDDP :
@@ -916,8 +923,8 @@ int set_filter_wr(struct adapter *adapter, int fidx)
                        FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode);
                memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
                memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
-               fwr->newlport = htons(f->fs.nat_lport);
-               fwr->newfport = htons(f->fs.nat_fport);
+               fwr->newlport = htons(nat_lp[1] | nat_lp[0] << 8);
+               fwr->newfport = htons(nat_fp[1] | nat_fp[0] << 8);
        }
 
        /* Mark the filter as "pending" and ship off the Filter Work Request.
@@ -1105,16 +1112,16 @@ static bool is_addr_all_mask(u8 *ipmask, int family)
                struct in_addr *addr;
 
                addr = (struct in_addr *)ipmask;
-               if (addr->s_addr == 0xffffffff)
+               if (addr->s_addr == htonl(0xffffffff))
                        return true;
        } else if (family == AF_INET6) {
                struct in6_addr *addr6;
 
                addr6 = (struct in6_addr *)ipmask;
-               if (addr6->s6_addr32[0] == 0xffffffff &&
-                   addr6->s6_addr32[1] == 0xffffffff &&
-                   addr6->s6_addr32[2] == 0xffffffff &&
-                   addr6->s6_addr32[3] == 0xffffffff)
+               if (addr6->s6_addr32[0] == htonl(0xffffffff) &&
+                   addr6->s6_addr32[1] == htonl(0xffffffff) &&
+                   addr6->s6_addr32[2] == htonl(0xffffffff) &&
+                   addr6->s6_addr32[3] == htonl(0xffffffff))
                        return true;
        }
        return false;
@@ -1152,6 +1159,11 @@ bool is_filter_exact_match(struct adapter *adap,
        if (!is_hashfilter(adap))
                return false;
 
+       if ((atomic_read(&adap->tids.hash_tids_in_use) +
+            atomic_read(&adap->tids.tids_in_use)) >=
+           (adap->tids.nhash + (adap->tids.stid_base - adap->tids.tid_base)))
+               return false;
+
         /* Keep tunnel VNI match disabled for hash-filters for now */
        if (fs->mask.encap_vld)
                return false;
index b0751c0..807a8da 100644 (file)
@@ -53,4 +53,6 @@ void clear_all_filters(struct adapter *adapter);
 void init_hash_filter(struct adapter *adap);
 bool is_filter_exact_match(struct adapter *adap,
                           struct ch_filter_specification *fs);
+void cxgb4_cleanup_ethtool_filters(struct adapter *adap);
+int cxgb4_init_ethtool_filters(struct adapter *adap);
 #endif /* __CXGB4_FILTER_H */
index 854b171..de078a5 100644 (file)
@@ -435,8 +435,8 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
        __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
        __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
 
-       return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu,
-                            (dev->flags & IFF_PROMISC) ? 1 : 0,
+       return t4_set_rxmode(adapter, adapter->mbox, pi->viid, pi->viid_mirror,
+                            mtu, (dev->flags & IFF_PROMISC) ? 1 : 0,
                             (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
                             sleep_ok);
 }
@@ -449,7 +449,7 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
  *                or -1
  *     @addr: the new MAC address value
  *     @persist: whether a new MAC allocation should be persistent
- *     @add_smt: if true also add the address to the HW SMT
+ *     @smt_idx: the destination to store the new SMT index.
  *
  *     Modifies an MPS filter and sets it to the new MAC address if
  *     @tcam_idx >= 0, or adds the MAC address to a new filter if
@@ -503,15 +503,16 @@ set_hash:
  */
 static int link_start(struct net_device *dev)
 {
-       int ret;
        struct port_info *pi = netdev_priv(dev);
-       unsigned int mb = pi->adapter->pf;
+       unsigned int mb = pi->adapter->mbox;
+       int ret;
 
        /*
         * We do not set address filters and promiscuity here, the stack does
         * that step explicitly.
         */
-       ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
+       ret = t4_set_rxmode(pi->adapter, mb, pi->viid, pi->viid_mirror,
+                           dev->mtu, -1, -1, -1,
                            !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
        if (ret == 0)
                ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
@@ -822,6 +823,31 @@ static void adap_config_hpfilter(struct adapter *adapter)
                        "HP filter region isn't supported by FW\n");
 }
 
+static int cxgb4_config_rss(const struct port_info *pi, u16 *rss,
+                           u16 rss_size, u16 viid)
+{
+       struct adapter *adap = pi->adapter;
+       int ret;
+
+       ret = t4_config_rss_range(adap, adap->mbox, viid, 0, rss_size, rss,
+                                 rss_size);
+       if (ret)
+               return ret;
+
+       /* If Tunnel All Lookup isn't specified in the global RSS
+        * Configuration, then we need to specify a default Ingress
+        * Queue for any ingress packets which aren't hashed.  We'll
+        * use our first ingress queue ...
+        */
+       return t4_config_vi_rss(adap, adap->mbox, viid,
+                               FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
+                               FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
+                               FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
+                               FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
+                               FW_RSS_VI_CONFIG_CMD_UDPEN_F,
+                               rss[0]);
+}
+
 /**
  *     cxgb4_write_rss - write the RSS table for a given port
  *     @pi: the port
@@ -833,10 +859,10 @@ static void adap_config_hpfilter(struct adapter *adapter)
  */
 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
 {
-       u16 *rss;
-       int i, err;
        struct adapter *adapter = pi->adapter;
        const struct sge_eth_rxq *rxq;
+       int i, err;
+       u16 *rss;
 
        rxq = &adapter->sge.ethrxq[pi->first_qset];
        rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL);
@@ -847,21 +873,7 @@ int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
        for (i = 0; i < pi->rss_size; i++, queues++)
                rss[i] = rxq[*queues].rspq.abs_id;
 
-       err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
-                                 pi->rss_size, rss, pi->rss_size);
-       /* If Tunnel All Lookup isn't specified in the global RSS
-        * Configuration, then we need to specify a default Ingress
-        * Queue for any ingress packets which aren't hashed.  We'll
-        * use our first ingress queue ...
-        */
-       if (!err)
-               err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
-                                      FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
-                                      FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
-                                      FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
-                                      FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
-                                      FW_RSS_VI_CONFIG_CMD_UDPEN_F,
-                                      rss[0]);
+       err = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid);
        kfree(rss);
        return err;
 }
@@ -1259,15 +1271,15 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
 
 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
 {
-       const struct port_info *pi = netdev_priv(dev);
        netdev_features_t changed = dev->features ^ features;
+       const struct port_info *pi = netdev_priv(dev);
        int err;
 
        if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
                return 0;
 
-       err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
-                           -1, -1, -1,
+       err = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
+                           pi->viid_mirror, -1, -1, -1, -1,
                            !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
        if (unlikely(err))
                dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
@@ -1285,6 +1297,292 @@ static int setup_debugfs(struct adapter *adap)
        return 0;
 }
 
+static void cxgb4_port_mirror_free_rxq(struct adapter *adap,
+                                      struct sge_eth_rxq *mirror_rxq)
+{
+       if ((adap->flags & CXGB4_FULL_INIT_DONE) &&
+           !(adap->flags & CXGB4_SHUTTING_DOWN))
+               cxgb4_quiesce_rx(&mirror_rxq->rspq);
+
+       if (adap->flags & CXGB4_USING_MSIX) {
+               cxgb4_clear_msix_aff(mirror_rxq->msix->vec,
+                                    mirror_rxq->msix->aff_mask);
+               free_irq(mirror_rxq->msix->vec, &mirror_rxq->rspq);
+               cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx);
+       }
+
+       free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl);
+}
+
+static int cxgb4_port_mirror_alloc_queues(struct net_device *dev)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = netdev2adap(dev);
+       struct sge_eth_rxq *mirror_rxq;
+       struct sge *s = &adap->sge;
+       int ret = 0, msix = 0;
+       u16 i, rxqid;
+       u16 *rss;
+
+       if (!pi->vi_mirror_count)
+               return 0;
+
+       if (s->mirror_rxq[pi->port_id])
+               return 0;
+
+       mirror_rxq = kcalloc(pi->nmirrorqsets, sizeof(*mirror_rxq), GFP_KERNEL);
+       if (!mirror_rxq)
+               return -ENOMEM;
+
+       s->mirror_rxq[pi->port_id] = mirror_rxq;
+
+       if (!(adap->flags & CXGB4_USING_MSIX))
+               msix = -((int)adap->sge.intrq.abs_id + 1);
+
+       for (i = 0, rxqid = 0; i < pi->nmirrorqsets; i++, rxqid++) {
+               mirror_rxq = &s->mirror_rxq[pi->port_id][i];
+
+               /* Allocate Mirror Rxqs */
+               if (msix >= 0) {
+                       msix = cxgb4_get_msix_idx_from_bmap(adap);
+                       if (msix < 0) {
+                               ret = msix;
+                               goto out_free_queues;
+                       }
+
+                       mirror_rxq->msix = &adap->msix_info[msix];
+                       snprintf(mirror_rxq->msix->desc,
+                                sizeof(mirror_rxq->msix->desc),
+                                "%s-mirrorrxq%d", dev->name, i);
+               }
+
+               init_rspq(adap, &mirror_rxq->rspq,
+                         CXGB4_MIRROR_RXQ_DEFAULT_INTR_USEC,
+                         CXGB4_MIRROR_RXQ_DEFAULT_PKT_CNT,
+                         CXGB4_MIRROR_RXQ_DEFAULT_DESC_NUM,
+                         CXGB4_MIRROR_RXQ_DEFAULT_DESC_SIZE);
+
+               mirror_rxq->fl.size = CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM;
+
+               ret = t4_sge_alloc_rxq(adap, &mirror_rxq->rspq, false,
+                                      dev, msix, &mirror_rxq->fl,
+                                      t4_ethrx_handler, NULL, 0);
+               if (ret)
+                       goto out_free_msix_idx;
+
+               /* Setup MSI-X vectors for Mirror Rxqs */
+               if (adap->flags & CXGB4_USING_MSIX) {
+                       ret = request_irq(mirror_rxq->msix->vec,
+                                         t4_sge_intr_msix, 0,
+                                         mirror_rxq->msix->desc,
+                                         &mirror_rxq->rspq);
+                       if (ret)
+                               goto out_free_rxq;
+
+                       cxgb4_set_msix_aff(adap, mirror_rxq->msix->vec,
+                                          &mirror_rxq->msix->aff_mask, i);
+               }
+
+               /* Start NAPI for Mirror Rxqs */
+               cxgb4_enable_rx(adap, &mirror_rxq->rspq);
+       }
+
+       /* Setup RSS for Mirror Rxqs */
+       rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
+       if (!rss) {
+               ret = -ENOMEM;
+               goto out_free_queues;
+       }
+
+       mirror_rxq = &s->mirror_rxq[pi->port_id][0];
+       for (i = 0; i < pi->rss_size; i++)
+               rss[i] = mirror_rxq[i % pi->nmirrorqsets].rspq.abs_id;
+
+       ret = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid_mirror);
+       kfree(rss);
+       if (ret)
+               goto out_free_queues;
+
+       return 0;
+
+out_free_rxq:
+       free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl);
+
+out_free_msix_idx:
+       cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx);
+
+out_free_queues:
+       while (rxqid-- > 0)
+               cxgb4_port_mirror_free_rxq(adap,
+                                          &s->mirror_rxq[pi->port_id][rxqid]);
+
+       kfree(s->mirror_rxq[pi->port_id]);
+       s->mirror_rxq[pi->port_id] = NULL;
+       return ret;
+}
+
+static void cxgb4_port_mirror_free_queues(struct net_device *dev)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = netdev2adap(dev);
+       struct sge *s = &adap->sge;
+       u16 i;
+
+       if (!pi->vi_mirror_count)
+               return;
+
+       if (!s->mirror_rxq[pi->port_id])
+               return;
+
+       for (i = 0; i < pi->nmirrorqsets; i++)
+               cxgb4_port_mirror_free_rxq(adap,
+                                          &s->mirror_rxq[pi->port_id][i]);
+
+       kfree(s->mirror_rxq[pi->port_id]);
+       s->mirror_rxq[pi->port_id] = NULL;
+}
+
+static int cxgb4_port_mirror_start(struct net_device *dev)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = netdev2adap(dev);
+       int ret, idx = -1;
+
+       if (!pi->vi_mirror_count)
+               return 0;
+
+       /* Mirror VIs can be created dynamically after stack had
+        * already setup Rx modes like MTU, promisc, allmulti, etc.
+        * on main VI. So, parse what the stack had setup on the
+        * main VI and update the same on the mirror VI.
+        */
+       ret = t4_set_rxmode(adap, adap->mbox, pi->viid, pi->viid_mirror,
+                           dev->mtu, (dev->flags & IFF_PROMISC) ? 1 : 0,
+                           (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1,
+                           !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
+       if (ret) {
+               dev_err(adap->pdev_dev,
+                       "Failed start up Rx mode for Mirror VI 0x%x, ret: %d\n",
+                       pi->viid_mirror, ret);
+               return ret;
+       }
+
+       /* Enable replication bit for the device's MAC address
+        * in MPS TCAM, so that the packets for the main VI are
+        * replicated to mirror VI.
+        */
+       ret = cxgb4_update_mac_filt(pi, pi->viid_mirror, &idx,
+                                   dev->dev_addr, true, NULL);
+       if (ret) {
+               dev_err(adap->pdev_dev,
+                       "Failed updating MAC filter for Mirror VI 0x%x, ret: %d\n",
+                       pi->viid_mirror, ret);
+               return ret;
+       }
+
+       /* Enabling a Virtual Interface can result in an interrupt
+        * during the processing of the VI Enable command and, in some
+        * paths, result in an attempt to issue another command in the
+        * interrupt context. Thus, we disable interrupts during the
+        * course of the VI Enable command ...
+        */
+       local_bh_disable();
+       ret = t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, true, true,
+                                 false);
+       local_bh_enable();
+       if (ret)
+               dev_err(adap->pdev_dev,
+                       "Failed starting Mirror VI 0x%x, ret: %d\n",
+                       pi->viid_mirror, ret);
+
+       return ret;
+}
+
+static void cxgb4_port_mirror_stop(struct net_device *dev)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = netdev2adap(dev);
+
+       if (!pi->vi_mirror_count)
+               return;
+
+       t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, false, false,
+                           false);
+}
+
+int cxgb4_port_mirror_alloc(struct net_device *dev)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = netdev2adap(dev);
+       int ret = 0;
+
+       if (!pi->nmirrorqsets)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&pi->vi_mirror_mutex);
+       if (pi->viid_mirror) {
+               pi->vi_mirror_count++;
+               goto out_unlock;
+       }
+
+       ret = t4_init_port_mirror(pi, adap->mbox, pi->port_id, adap->pf, 0,
+                                 &pi->viid_mirror);
+       if (ret)
+               goto out_unlock;
+
+       pi->vi_mirror_count = 1;
+
+       if (adap->flags & CXGB4_FULL_INIT_DONE) {
+               ret = cxgb4_port_mirror_alloc_queues(dev);
+               if (ret)
+                       goto out_free_vi;
+
+               ret = cxgb4_port_mirror_start(dev);
+               if (ret)
+                       goto out_free_queues;
+       }
+
+       mutex_unlock(&pi->vi_mirror_mutex);
+       return 0;
+
+out_free_queues:
+       cxgb4_port_mirror_free_queues(dev);
+
+out_free_vi:
+       pi->vi_mirror_count = 0;
+       t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror);
+       pi->viid_mirror = 0;
+
+out_unlock:
+       mutex_unlock(&pi->vi_mirror_mutex);
+       return ret;
+}
+
+void cxgb4_port_mirror_free(struct net_device *dev)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = netdev2adap(dev);
+
+       mutex_lock(&pi->vi_mirror_mutex);
+       if (!pi->viid_mirror)
+               goto out_unlock;
+
+       if (pi->vi_mirror_count > 1) {
+               pi->vi_mirror_count--;
+               goto out_unlock;
+       }
+
+       cxgb4_port_mirror_stop(dev);
+       cxgb4_port_mirror_free_queues(dev);
+
+       pi->vi_mirror_count = 0;
+       t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror);
+       pi->viid_mirror = 0;
+
+out_unlock:
+       mutex_unlock(&pi->vi_mirror_mutex);
+}
+
 /*
  * upper-layer driver support
  */
@@ -1615,6 +1913,7 @@ static int tid_init(struct tid_info *t)
  *     @stid: the server TID
  *     @sip: local IP address to bind server to
  *     @sport: the server's TCP port
+ *     @vlan: the VLAN header information
  *     @queue: queue to direct messages from this server to
  *
  *     Create an IP server for the given port and address.
@@ -2556,8 +2855,29 @@ int cxgb_open(struct net_device *dev)
                return err;
 
        err = link_start(dev);
-       if (!err)
-               netif_tx_start_all_queues(dev);
+       if (err)
+               return err;
+
+       if (pi->nmirrorqsets) {
+               mutex_lock(&pi->vi_mirror_mutex);
+               err = cxgb4_port_mirror_alloc_queues(dev);
+               if (err)
+                       goto out_unlock;
+
+               err = cxgb4_port_mirror_start(dev);
+               if (err)
+                       goto out_free_queues;
+               mutex_unlock(&pi->vi_mirror_mutex);
+       }
+
+       netif_tx_start_all_queues(dev);
+       return 0;
+
+out_free_queues:
+       cxgb4_port_mirror_free_queues(dev);
+
+out_unlock:
+       mutex_unlock(&pi->vi_mirror_mutex);
        return err;
 }
 
@@ -2575,7 +2895,17 @@ int cxgb_close(struct net_device *dev)
        cxgb4_dcb_reset(dev);
        dcb_tx_queue_prio_enable(dev, false);
 #endif
-       return ret;
+       if (ret)
+               return ret;
+
+       if (pi->nmirrorqsets) {
+               mutex_lock(&pi->vi_mirror_mutex);
+               cxgb4_port_mirror_stop(dev);
+               cxgb4_port_mirror_free_queues(dev);
+               mutex_unlock(&pi->vi_mirror_mutex);
+       }
+
+       return 0;
 }
 
 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
@@ -2609,7 +2939,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
 
        /* Clear out filter specifications */
        memset(&f->fs, 0, sizeof(struct ch_filter_specification));
-       f->fs.val.lport = cpu_to_be16(sport);
+       f->fs.val.lport = be16_to_cpu(sport);
        f->fs.mask.lport  = ~0;
        val = (u8 *)&sip;
        if ((val[0] | val[1] | val[2] | val[3]) != 0) {
@@ -2841,11 +3171,11 @@ static void cxgb_set_rxmode(struct net_device *dev)
 
 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
 {
-       int ret;
        struct port_info *pi = netdev_priv(dev);
+       int ret;
 
-       ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
-                           -1, -1, -1, true);
+       ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
+                           pi->viid_mirror, new_mtu, -1, -1, -1, -1, true);
        if (!ret)
                dev->mtu = new_mtu;
        return ret;
@@ -3402,129 +3732,71 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
        }
 }
 
-static void cxgb_del_udp_tunnel(struct net_device *netdev,
-                               struct udp_tunnel_info *ti)
+static int cxgb_udp_tunnel_unset_port(struct net_device *netdev,
+                                     unsigned int table, unsigned int entry,
+                                     struct udp_tunnel_info *ti)
 {
        struct port_info *pi = netdev_priv(netdev);
        struct adapter *adapter = pi->adapter;
-       unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
        u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
        int ret = 0, i;
 
-       if (chip_ver < CHELSIO_T6)
-               return;
-
        switch (ti->type) {
        case UDP_TUNNEL_TYPE_VXLAN:
-               if (!adapter->vxlan_port_cnt ||
-                   adapter->vxlan_port != ti->port)
-                       return; /* Invalid VxLAN destination port */
-
-               adapter->vxlan_port_cnt--;
-               if (adapter->vxlan_port_cnt)
-                       return;
-
                adapter->vxlan_port = 0;
                t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
                break;
        case UDP_TUNNEL_TYPE_GENEVE:
-               if (!adapter->geneve_port_cnt ||
-                   adapter->geneve_port != ti->port)
-                       return; /* Invalid GENEVE destination port */
-
-               adapter->geneve_port_cnt--;
-               if (adapter->geneve_port_cnt)
-                       return;
-
                adapter->geneve_port = 0;
                t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
                break;
        default:
-               return;
+               return -EINVAL;
        }
 
        /* Matchall mac entries can be deleted only after all tunnel ports
         * are brought down or removed.
         */
        if (!adapter->rawf_cnt)
-               return;
+               return 0;
        for_each_port(adapter, i) {
                pi = adap2pinfo(adapter, i);
                ret = t4_free_raw_mac_filt(adapter, pi->viid,
                                           match_all_mac, match_all_mac,
-                                          adapter->rawf_start +
-                                           pi->port_id,
+                                          adapter->rawf_start + pi->port_id,
                                           1, pi->port_id, false);
                if (ret < 0) {
                        netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
                                    i);
-                       return;
+                       return ret;
                }
        }
+
+       return 0;
 }
 
-static void cxgb_add_udp_tunnel(struct net_device *netdev,
-                               struct udp_tunnel_info *ti)
+static int cxgb_udp_tunnel_set_port(struct net_device *netdev,
+                                   unsigned int table, unsigned int entry,
+                                   struct udp_tunnel_info *ti)
 {
        struct port_info *pi = netdev_priv(netdev);
        struct adapter *adapter = pi->adapter;
-       unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
        u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
        int i, ret;
 
-       if (chip_ver < CHELSIO_T6 || !adapter->rawf_cnt)
-               return;
-
        switch (ti->type) {
        case UDP_TUNNEL_TYPE_VXLAN:
-               /* Callback for adding vxlan port can be called with the same
-                * port for both IPv4 and IPv6. We should not disable the
-                * offloading when the same port for both protocols is added
-                * and later one of them is removed.
-                */
-               if (adapter->vxlan_port_cnt &&
-                   adapter->vxlan_port == ti->port) {
-                       adapter->vxlan_port_cnt++;
-                       return;
-               }
-
-               /* We will support only one VxLAN port */
-               if (adapter->vxlan_port_cnt) {
-                       netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
-                                   be16_to_cpu(adapter->vxlan_port),
-                                   be16_to_cpu(ti->port));
-                       return;
-               }
-
                adapter->vxlan_port = ti->port;
-               adapter->vxlan_port_cnt = 1;
-
                t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
                             VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
                break;
        case UDP_TUNNEL_TYPE_GENEVE:
-               if (adapter->geneve_port_cnt &&
-                   adapter->geneve_port == ti->port) {
-                       adapter->geneve_port_cnt++;
-                       return;
-               }
-
-               /* We will support only one GENEVE port */
-               if (adapter->geneve_port_cnt) {
-                       netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
-                                   be16_to_cpu(adapter->geneve_port),
-                                   be16_to_cpu(ti->port));
-                       return;
-               }
-
                adapter->geneve_port = ti->port;
-               adapter->geneve_port_cnt = 1;
-
                t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
                             GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
                break;
        default:
-               return;
+               return -EINVAL;
        }
 
        /* Create a 'match all' mac filter entry for inner mac,
@@ -3539,18 +3811,27 @@ static void cxgb_add_udp_tunnel(struct net_device *netdev,
                ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
                                            match_all_mac,
                                            match_all_mac,
-                                           adapter->rawf_start +
-                                           pi->port_id,
+                                           adapter->rawf_start + pi->port_id,
                                            1, pi->port_id, false);
                if (ret < 0) {
                        netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
                                    be16_to_cpu(ti->port));
-                       cxgb_del_udp_tunnel(netdev, ti);
-                       return;
+                       return ret;
                }
        }
+
+       return 0;
 }
 
+static const struct udp_tunnel_nic_info cxgb_udp_tunnels = {
+       .set_port       = cxgb_udp_tunnel_set_port,
+       .unset_port     = cxgb_udp_tunnel_unset_port,
+       .tables         = {
+               { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
+               { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+       },
+};
+
 static netdev_features_t cxgb_features_check(struct sk_buff *skb,
                                             struct net_device *dev,
                                             netdev_features_t features)
@@ -3600,8 +3881,8 @@ static const struct net_device_ops cxgb4_netdev_ops = {
 #endif /* CONFIG_CHELSIO_T4_FCOE */
        .ndo_set_tx_maxrate   = cxgb_set_tx_maxrate,
        .ndo_setup_tc         = cxgb_setup_tc,
-       .ndo_udp_tunnel_add   = cxgb_add_udp_tunnel,
-       .ndo_udp_tunnel_del   = cxgb_del_udp_tunnel,
+       .ndo_udp_tunnel_add   = udp_tunnel_nic_add_port,
+       .ndo_udp_tunnel_del   = udp_tunnel_nic_del_port,
        .ndo_features_check   = cxgb_features_check,
        .ndo_fix_features     = cxgb_fix_features,
 };
@@ -4146,9 +4427,10 @@ static int adap_init0_phy(struct adapter *adap)
 
        /* Load PHY Firmware onto adapter.
         */
-       ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
-                            phy_info->phy_fw_version,
+       spin_lock_bh(&adap->win0_lock);
+       ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version,
                             (u8 *)phyf->data, phyf->size);
+       spin_unlock_bh(&adap->win0_lock);
        if (ret < 0)
                dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
                        -ret);
@@ -5377,10 +5659,10 @@ static inline bool is_x_10g_port(const struct link_config *lc)
 static int cfg_queues(struct adapter *adap)
 {
        u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
-       u32 i, n10g = 0, qidx = 0, n1g = 0;
        u32 ncpus = num_online_cpus();
        u32 niqflint, neq, num_ulds;
        struct sge *s = &adap->sge;
+       u32 i, n10g = 0, qidx = 0;
        u32 q10g = 0, q1g;
 
        /* Reduce memory usage in kdump environment, disable all offload. */
@@ -5426,7 +5708,6 @@ static int cfg_queues(struct adapter *adap)
        if (n10g)
                q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
 
-       n1g = adap->params.nports - n10g;
 #ifdef CONFIG_CHELSIO_T4_DCB
        /* For Data Center Bridging support we need to be able to support up
         * to 8 Traffic Priorities; each of which will be assigned to its
@@ -5444,7 +5725,8 @@ static int cfg_queues(struct adapter *adap)
        else
                q10g = max(8U, q10g);
 
-       while ((q10g * n10g) > (avail_eth_qsets - n1g * q1g))
+       while ((q10g * n10g) >
+              (avail_eth_qsets - (adap->params.nports - n10g) * q1g))
                q10g--;
 
 #else /* !CONFIG_CHELSIO_T4_DCB */
@@ -5502,6 +5784,19 @@ static int cfg_queues(struct adapter *adap)
                avail_qsets -= s->eoqsets;
        }
 
+       /* Mirror queues must follow same scheme as normal Ethernet
+        * Queues, when there are enough queues available. Otherwise,
+        * allocate at least 1 queue per port. If even 1 queue is not
+        * available, then disable mirror queues support.
+        */
+       if (avail_qsets >= s->max_ethqsets)
+               s->mirrorqsets = s->max_ethqsets;
+       else if (avail_qsets >= adap->params.nports)
+               s->mirrorqsets = adap->params.nports;
+       else
+               s->mirrorqsets = 0;
+       avail_qsets -= s->mirrorqsets;
+
        for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
                struct sge_eth_rxq *r = &s->ethrxq[i];
 
@@ -5615,8 +5910,8 @@ void cxgb4_free_msix_idx_in_bmap(struct adapter *adap,
 
 static int enable_msix(struct adapter *adap)
 {
-       u32 eth_need, uld_need = 0, ethofld_need = 0;
-       u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0;
+       u32 eth_need, uld_need = 0, ethofld_need = 0, mirror_need = 0;
+       u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0, mirrorqsets = 0;
        u8 num_uld = 0, nchan = adap->params.nports;
        u32 i, want, need, num_vec;
        struct sge *s = &adap->sge;
@@ -5647,6 +5942,12 @@ static int enable_msix(struct adapter *adap)
                need += ethofld_need;
        }
 
+       if (s->mirrorqsets) {
+               want += s->mirrorqsets;
+               mirror_need = nchan;
+               need += mirror_need;
+       }
+
        want += EXTRA_VECS;
        need += EXTRA_VECS;
 
@@ -5680,8 +5981,10 @@ static int enable_msix(struct adapter *adap)
                adap->params.ethofld = 0;
                s->ofldqsets = 0;
                s->eoqsets = 0;
+               s->mirrorqsets = 0;
                uld_need = 0;
                ethofld_need = 0;
+               mirror_need = 0;
        }
 
        num_vec = allocated;
@@ -5695,6 +5998,8 @@ static int enable_msix(struct adapter *adap)
                        ofldqsets = nchan;
                if (is_ethofld(adap))
                        eoqsets = ethofld_need;
+               if (s->mirrorqsets)
+                       mirrorqsets = mirror_need;
 
                num_vec -= need;
                while (num_vec) {
@@ -5726,12 +6031,25 @@ static int enable_msix(struct adapter *adap)
                                num_vec -= uld_need;
                        }
                }
+
+               if (s->mirrorqsets) {
+                       while (num_vec) {
+                               if (num_vec < mirror_need ||
+                                   mirrorqsets > s->mirrorqsets)
+                                       break;
+
+                               mirrorqsets++;
+                               num_vec -= mirror_need;
+                       }
+               }
        } else {
                ethqsets = s->max_ethqsets;
                if (is_uld(adap))
                        ofldqsets = s->ofldqsets;
                if (is_ethofld(adap))
                        eoqsets = s->eoqsets;
+               if (s->mirrorqsets)
+                       mirrorqsets = s->mirrorqsets;
        }
 
        if (ethqsets < s->max_ethqsets) {
@@ -5747,6 +6065,15 @@ static int enable_msix(struct adapter *adap)
        if (is_ethofld(adap))
                s->eoqsets = eoqsets;
 
+       if (s->mirrorqsets) {
+               s->mirrorqsets = mirrorqsets;
+               for_each_port(adap, i) {
+                       pi = adap2pinfo(adap, i);
+                       pi->nmirrorqsets = s->mirrorqsets / nchan;
+                       mutex_init(&pi->vi_mirror_mutex);
+               }
+       }
+
        /* map for msix */
        ret = alloc_msix_info(adap, allocated);
        if (ret)
@@ -5758,8 +6085,9 @@ static int enable_msix(struct adapter *adap)
        }
 
        dev_info(adap->pdev_dev,
-                "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d\n",
-                allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld);
+                "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d mirrorqsets %d\n",
+                allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld,
+                s->mirrorqsets);
 
        kfree(entries);
        return 0;
@@ -5860,6 +6188,7 @@ static void free_some_resources(struct adapter *adapter)
        cxgb4_cleanup_tc_mqprio(adapter);
        cxgb4_cleanup_tc_flower(adapter);
        cxgb4_cleanup_tc_u32(adapter);
+       cxgb4_cleanup_ethtool_filters(adapter);
        kfree(adapter->sge.egr_map);
        kfree(adapter->sge.ingr_map);
        kfree(adapter->sge.starving_fl);
@@ -6370,7 +6699,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                        NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                        NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO |
                        NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
-                       NETIF_F_HW_TC;
+                       NETIF_F_HW_TC | NETIF_F_NTUPLE;
 
                if (chip_ver > CHELSIO_T5) {
                        netdev->hw_enc_features |= NETIF_F_IP_CSUM |
@@ -6383,6 +6712,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                        netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
                                               NETIF_F_GSO_UDP_TUNNEL_CSUM |
                                               NETIF_F_HW_TLS_RECORD;
+
+                       if (adapter->rawf_cnt)
+                               netdev->udp_tunnel_nic_info = &cxgb_udp_tunnels;
                }
 
                if (highdma)
@@ -6493,6 +6825,24 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                                 i);
        }
 
+       if (is_offload(adapter) || is_hashfilter(adapter)) {
+               if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
+                       u32 v;
+
+                       v = t4_read_reg(adapter, LE_DB_HASH_CONFIG_A);
+                       if (chip_ver <= CHELSIO_T5) {
+                               adapter->tids.nhash = 1 << HASHTIDSIZE_G(v);
+                               v = t4_read_reg(adapter, LE_DB_TID_HASHBASE_A);
+                               adapter->tids.hash_base = v / 4;
+                       } else {
+                               adapter->tids.nhash = HASHTBLSIZE_G(v) << 3;
+                               v = t4_read_reg(adapter,
+                                               T6_LE_DB_HASH_TID_BASE_A);
+                               adapter->tids.hash_base = v;
+                       }
+               }
+       }
+
        if (tid_init(&adapter->tids) < 0) {
                dev_warn(&pdev->dev, "could not allocate TID table, "
                         "continuing\n");
@@ -6514,22 +6864,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                if (cxgb4_init_tc_matchall(adapter))
                        dev_warn(&pdev->dev,
                                 "could not offload tc matchall, continuing\n");
-       }
-
-       if (is_offload(adapter) || is_hashfilter(adapter)) {
-               if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
-                       u32 hash_base, hash_reg;
-
-                       if (chip_ver <= CHELSIO_T5) {
-                               hash_reg = LE_DB_TID_HASHBASE_A;
-                               hash_base = t4_read_reg(adapter, hash_reg);
-                               adapter->tids.hash_base = hash_base / 4;
-                       } else {
-                               hash_reg = T6_LE_DB_HASH_TID_BASE_A;
-                               hash_base = t4_read_reg(adapter, hash_reg);
-                               adapter->tids.hash_base = hash_base;
-                       }
-               }
+               if (cxgb4_init_ethtool_filters(adapter))
+                       dev_warn(&pdev->dev,
+                                "could not initialize ethtool filters, continuing\n");
        }
 
        /* See what interrupts we'll be using */
index f5bc996..70dbee8 100644 (file)
@@ -194,6 +194,7 @@ int cxgb4_ptp_redirect_rx_packet(struct adapter *adapter, struct port_info *pi)
 }
 
 /**
+ * cxgb4_ptp_adjfreq - Adjust frequency of PHC cycle counter
  * @ptp: ptp clock structure
  * @ppb: Desired frequency change in parts per billion
  *
@@ -229,7 +230,7 @@ static int cxgb4_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
 
 /**
  * cxgb4_ptp_fineadjtime - Shift the time of the hardware clock
- * @ptp: ptp clock structure
+ * @adapter: board private structure
  * @delta: Desired change in nanoseconds
  *
  * Adjust the timer by resetting the timecounter structure.
index 4a5fa9e..6251444 100644 (file)
@@ -58,10 +58,6 @@ static struct ch_tc_pedit_fields pedits[] = {
        PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4),
        PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8),
        PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
-       PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0),
-       PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0),
-       PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0),
-       PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0),
 };
 
 static struct ch_tc_flower_entry *allocate_flower_entry(void)
@@ -81,19 +77,9 @@ static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap,
 }
 
 static void cxgb4_process_flow_match(struct net_device *dev,
-                                    struct flow_cls_offload *cls,
+                                    struct flow_rule *rule,
                                     struct ch_filter_specification *fs)
 {
-       struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
-       u16 addr_type = 0;
-
-       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
-               struct flow_match_control match;
-
-               flow_rule_match_control(rule, &match);
-               addr_type = match.key->addr_type;
-       }
-
        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
                struct flow_match_basic match;
                u16 ethtype_key, ethtype_mask;
@@ -116,7 +102,7 @@ static void cxgb4_process_flow_match(struct net_device *dev,
                fs->mask.proto = match.mask->ip_proto;
        }
 
-       if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
                struct flow_match_ipv4_addrs match;
 
                flow_rule_match_ipv4_addrs(rule, &match);
@@ -131,7 +117,7 @@ static void cxgb4_process_flow_match(struct net_device *dev,
                memcpy(&fs->nat_fip[0], &match.key->src, sizeof(match.key->src));
        }
 
-       if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
                struct flow_match_ipv6_addrs match;
 
                flow_rule_match_ipv6_addrs(rule, &match);
@@ -156,14 +142,14 @@ static void cxgb4_process_flow_match(struct net_device *dev,
                struct flow_match_ports match;
 
                flow_rule_match_ports(rule, &match);
-               fs->val.lport = cpu_to_be16(match.key->dst);
-               fs->mask.lport = cpu_to_be16(match.mask->dst);
-               fs->val.fport = cpu_to_be16(match.key->src);
-               fs->mask.fport = cpu_to_be16(match.mask->src);
+               fs->val.lport = be16_to_cpu(match.key->dst);
+               fs->mask.lport = be16_to_cpu(match.mask->dst);
+               fs->val.fport = be16_to_cpu(match.key->src);
+               fs->mask.fport = be16_to_cpu(match.mask->src);
 
                /* also initialize nat_lport/fport to same values */
-               fs->nat_lport = cpu_to_be16(match.key->dst);
-               fs->nat_fport = cpu_to_be16(match.key->src);
+               fs->nat_lport = fs->val.lport;
+               fs->nat_fport = fs->val.fport;
        }
 
        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
@@ -224,9 +210,8 @@ static void cxgb4_process_flow_match(struct net_device *dev,
 }
 
 static int cxgb4_validate_flow_match(struct net_device *dev,
-                                    struct flow_cls_offload *cls)
+                                    struct flow_rule *rule)
 {
-       struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
        struct flow_dissector *dissector = rule->match.dissector;
        u16 ethtype_mask = 0;
        u16 ethtype_key = 0;
@@ -354,12 +339,9 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
                switch (offset) {
                case PEDIT_TCP_SPORT_DPORT:
                        if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
-                               offload_pedit(fs, cpu_to_be32(val) >> 16,
-                                             cpu_to_be32(mask) >> 16,
-                                             TCP_SPORT);
+                               fs->nat_fport = val;
                        else
-                               offload_pedit(fs, cpu_to_be32(val),
-                                             cpu_to_be32(mask), TCP_DPORT);
+                               fs->nat_lport = val >> 16;
                }
                fs->nat_mode = NAT_MODE_ALL;
                break;
@@ -367,12 +349,9 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
                switch (offset) {
                case PEDIT_UDP_SPORT_DPORT:
                        if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
-                               offload_pedit(fs, cpu_to_be32(val) >> 16,
-                                             cpu_to_be32(mask) >> 16,
-                                             UDP_SPORT);
+                               fs->nat_fport = val;
                        else
-                               offload_pedit(fs, cpu_to_be32(val),
-                                             cpu_to_be32(mask), UDP_DPORT);
+                               fs->nat_lport = val >> 16;
                }
                fs->nat_mode = NAT_MODE_ALL;
        }
@@ -393,6 +372,7 @@ void cxgb4_process_flow_actions(struct net_device *in,
                case FLOW_ACTION_DROP:
                        fs->action = FILTER_DROP;
                        break;
+               case FLOW_ACTION_MIRRED:
                case FLOW_ACTION_REDIRECT: {
                        struct net_device *out = act->dev;
                        struct port_info *pi = netdev_priv(out);
@@ -436,6 +416,11 @@ void cxgb4_process_flow_actions(struct net_device *in,
                        process_pedit_field(fs, val, mask, offset, htype);
                        }
                        break;
+               case FLOW_ACTION_QUEUE:
+                       fs->action = FILTER_PASS;
+                       fs->dirsteer = 1;
+                       fs->iq = act->queue.index;
+                       break;
                default:
                        break;
                }
@@ -545,7 +530,8 @@ static bool valid_pedit_action(struct net_device *dev,
 
 int cxgb4_validate_flow_actions(struct net_device *dev,
                                struct flow_action *actions,
-                               struct netlink_ext_ack *extack)
+                               struct netlink_ext_ack *extack,
+                               u8 matchall_filter)
 {
        struct flow_action_entry *act;
        bool act_redir = false;
@@ -562,11 +548,19 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
                case FLOW_ACTION_DROP:
                        /* Do nothing */
                        break;
+               case FLOW_ACTION_MIRRED:
                case FLOW_ACTION_REDIRECT: {
                        struct adapter *adap = netdev2adap(dev);
                        struct net_device *n_dev, *target_dev;
-                       unsigned int i;
                        bool found = false;
+                       unsigned int i;
+
+                       if (act->id == FLOW_ACTION_MIRRED &&
+                           !matchall_filter) {
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "Egress mirror action is only supported for tc-matchall");
+                               return -EOPNOTSUPP;
+                       }
 
                        target_dev = act->dev;
                        for_each_port(adap, i) {
@@ -620,6 +614,9 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
                        act_pedit = true;
                        }
                        break;
+               case FLOW_ACTION_QUEUE:
+                       /* Do nothing. cxgb4_set_filter will validate */
+                       break;
                default:
                        netdev_err(dev, "%s: Unsupported action\n", __func__);
                        return -EOPNOTSUPP;
@@ -693,33 +690,22 @@ out_unlock:
        spin_unlock_bh(&t->ftid_lock);
 }
 
-int cxgb4_tc_flower_replace(struct net_device *dev,
-                           struct flow_cls_offload *cls)
+int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule,
+                           u32 tc_prio, struct netlink_ext_ack *extack,
+                           struct ch_filter_specification *fs, u32 *tid)
 {
-       struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
-       struct netlink_ext_ack *extack = cls->common.extack;
        struct adapter *adap = netdev2adap(dev);
-       struct ch_tc_flower_entry *ch_flower;
-       struct ch_filter_specification *fs;
        struct filter_ctx ctx;
        u8 inet_family;
        int fidx, ret;
 
-       if (cxgb4_validate_flow_actions(dev, &rule->action, extack))
+       if (cxgb4_validate_flow_actions(dev, &rule->action, extack, 0))
                return -EOPNOTSUPP;
 
-       if (cxgb4_validate_flow_match(dev, cls))
+       if (cxgb4_validate_flow_match(dev, rule))
                return -EOPNOTSUPP;
 
-       ch_flower = allocate_flower_entry();
-       if (!ch_flower) {
-               netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__);
-               return -ENOMEM;
-       }
-
-       fs = &ch_flower->fs;
-       fs->hitcnts = 1;
-       cxgb4_process_flow_match(dev, cls, fs);
+       cxgb4_process_flow_match(dev, rule, fs);
        cxgb4_process_flow_actions(dev, &rule->action, fs);
 
        fs->hash = is_filter_exact_match(adap, fs);
@@ -730,12 +716,11 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
         * existing rules.
         */
        fidx = cxgb4_get_free_ftid(dev, inet_family, fs->hash,
-                                  cls->common.prio);
+                                  tc_prio);
        if (fidx < 0) {
                NL_SET_ERR_MSG_MOD(extack,
                                   "No free LETCAM index available");
-               ret = -ENOMEM;
-               goto free_entry;
+               return -ENOMEM;
        }
 
        if (fidx < adap->tids.nhpftids) {
@@ -749,42 +734,70 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
        if (fs->hash)
                fidx = 0;
 
-       fs->tc_prio = cls->common.prio;
-       fs->tc_cookie = cls->cookie;
+       fs->tc_prio = tc_prio;
 
        init_completion(&ctx.completion);
        ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
        if (ret) {
                netdev_err(dev, "%s: filter creation err %d\n",
                           __func__, ret);
-               goto free_entry;
+               return ret;
        }
 
        /* Wait for reply */
        ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
-       if (!ret) {
-               ret = -ETIMEDOUT;
-               goto free_entry;
-       }
+       if (!ret)
+               return -ETIMEDOUT;
 
-       ret = ctx.result;
        /* Check if hw returned error for filter creation */
+       if (ctx.result)
+               return ctx.result;
+
+       *tid = ctx.tid;
+
+       if (fs->hash)
+               cxgb4_tc_flower_hash_prio_add(adap, tc_prio);
+
+       return 0;
+}
+
+int cxgb4_tc_flower_replace(struct net_device *dev,
+                           struct flow_cls_offload *cls)
+{
+       struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+       struct netlink_ext_ack *extack = cls->common.extack;
+       struct adapter *adap = netdev2adap(dev);
+       struct ch_tc_flower_entry *ch_flower;
+       struct ch_filter_specification *fs;
+       int ret;
+
+       ch_flower = allocate_flower_entry();
+       if (!ch_flower) {
+               netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__);
+               return -ENOMEM;
+       }
+
+       fs = &ch_flower->fs;
+       fs->hitcnts = 1;
+       fs->tc_cookie = cls->cookie;
+
+       ret = cxgb4_flow_rule_replace(dev, rule, cls->common.prio, extack, fs,
+                                     &ch_flower->filter_id);
        if (ret)
                goto free_entry;
 
        ch_flower->tc_flower_cookie = cls->cookie;
-       ch_flower->filter_id = ctx.tid;
        ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node,
                                     adap->flower_ht_params);
        if (ret)
                goto del_filter;
 
-       if (fs->hash)
-               cxgb4_tc_flower_hash_prio_add(adap, cls->common.prio);
-
        return 0;
 
 del_filter:
+       if (fs->hash)
+               cxgb4_tc_flower_hash_prio_del(adap, cls->common.prio);
+
        cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
 
 free_entry:
@@ -792,23 +805,38 @@ free_entry:
        return ret;
 }
 
+int cxgb4_flow_rule_destroy(struct net_device *dev, u32 tc_prio,
+                           struct ch_filter_specification *fs, int tid)
+{
+       struct adapter *adap = netdev2adap(dev);
+       u8 hash;
+       int ret;
+
+       hash = fs->hash;
+
+       ret = cxgb4_del_filter(dev, tid, fs);
+       if (ret)
+               return ret;
+
+       if (hash)
+               cxgb4_tc_flower_hash_prio_del(adap, tc_prio);
+
+       return ret;
+}
+
 int cxgb4_tc_flower_destroy(struct net_device *dev,
                            struct flow_cls_offload *cls)
 {
        struct adapter *adap = netdev2adap(dev);
        struct ch_tc_flower_entry *ch_flower;
-       u32 tc_prio;
-       bool hash;
        int ret;
 
        ch_flower = ch_flower_lookup(adap, cls->cookie);
        if (!ch_flower)
                return -ENOENT;
 
-       hash = ch_flower->fs.hash;
-       tc_prio = ch_flower->fs.tc_prio;
-
-       ret = cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
+       ret = cxgb4_flow_rule_destroy(dev, ch_flower->fs.tc_prio,
+                                     &ch_flower->fs, ch_flower->filter_id);
        if (ret)
                goto err;
 
@@ -820,9 +848,6 @@ int cxgb4_tc_flower_destroy(struct net_device *dev,
        }
        kfree_rcu(ch_flower, rcu);
 
-       if (hash)
-               cxgb4_tc_flower_hash_prio_del(adap, tc_prio);
-
 err:
        return ret;
 }
@@ -902,7 +927,7 @@ int cxgb4_tc_flower_stats(struct net_device *dev,
                if (ofld_stats->prev_packet_count != packets)
                        ofld_stats->last_used = jiffies;
                flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count,
-                                 packets - ofld_stats->packet_count,
+                                 packets - ofld_stats->packet_count, 0,
                                  ofld_stats->last_used,
                                  FLOW_ACTION_HW_STATS_IMMEDIATE);
 
index 0a30c96..6296e1d 100644 (file)
@@ -113,7 +113,8 @@ void cxgb4_process_flow_actions(struct net_device *in,
                                struct ch_filter_specification *fs);
 int cxgb4_validate_flow_actions(struct net_device *dev,
                                struct flow_action *actions,
-                               struct netlink_ext_ack *extack);
+                               struct netlink_ext_ack *extack,
+                               u8 matchall_filter);
 
 int cxgb4_tc_flower_replace(struct net_device *dev,
                            struct flow_cls_offload *cls);
@@ -121,6 +122,11 @@ int cxgb4_tc_flower_destroy(struct net_device *dev,
                            struct flow_cls_offload *cls);
 int cxgb4_tc_flower_stats(struct net_device *dev,
                          struct flow_cls_offload *cls);
+int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule,
+                           u32 tc_prio, struct netlink_ext_ack *extack,
+                           struct ch_filter_specification *fs, u32 *tid);
+int cxgb4_flow_rule_destroy(struct net_device *dev, u32 tc_prio,
+                           struct ch_filter_specification *fs, int tid);
 
 int cxgb4_init_tc_flower(struct adapter *adap);
 void cxgb4_cleanup_tc_flower(struct adapter *adap);
index c88c47a..e377e50 100644 (file)
@@ -188,6 +188,49 @@ static void cxgb4_matchall_free_tc(struct net_device *dev)
        tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED;
 }
 
+static int cxgb4_matchall_mirror_alloc(struct net_device *dev,
+                                      struct tc_cls_matchall_offload *cls)
+{
+       struct netlink_ext_ack *extack = cls->common.extack;
+       struct cxgb4_tc_port_matchall *tc_port_matchall;
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = netdev2adap(dev);
+       struct flow_action_entry *act;
+       int ret;
+       u32 i;
+
+       tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+       flow_action_for_each(i, act, &cls->rule->action) {
+               if (act->id == FLOW_ACTION_MIRRED) {
+                       ret = cxgb4_port_mirror_alloc(dev);
+                       if (ret) {
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "Couldn't allocate mirror");
+                               return ret;
+                       }
+
+                       tc_port_matchall->ingress.viid_mirror = pi->viid_mirror;
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+static void cxgb4_matchall_mirror_free(struct net_device *dev)
+{
+       struct cxgb4_tc_port_matchall *tc_port_matchall;
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = netdev2adap(dev);
+
+       tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
+       if (!tc_port_matchall->ingress.viid_mirror)
+               return;
+
+       cxgb4_port_mirror_free(dev);
+       tc_port_matchall->ingress.viid_mirror = 0;
+}
+
 static int cxgb4_matchall_alloc_filter(struct net_device *dev,
                                       struct tc_cls_matchall_offload *cls)
 {
@@ -211,6 +254,10 @@ static int cxgb4_matchall_alloc_filter(struct net_device *dev,
                return -ENOMEM;
        }
 
+       ret = cxgb4_matchall_mirror_alloc(dev, cls);
+       if (ret)
+               return ret;
+
        tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
        fs = &tc_port_matchall->ingress.fs;
        memset(fs, 0, sizeof(*fs));
@@ -229,11 +276,15 @@ static int cxgb4_matchall_alloc_filter(struct net_device *dev,
 
        ret = cxgb4_set_filter(dev, fidx, fs);
        if (ret)
-               return ret;
+               goto out_free;
 
        tc_port_matchall->ingress.tid = fidx;
        tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED;
        return 0;
+
+out_free:
+       cxgb4_matchall_mirror_free(dev);
+       return ret;
 }
 
 static int cxgb4_matchall_free_filter(struct net_device *dev)
@@ -250,6 +301,8 @@ static int cxgb4_matchall_free_filter(struct net_device *dev)
        if (ret)
                return ret;
 
+       cxgb4_matchall_mirror_free(dev);
+
        tc_port_matchall->ingress.packets = 0;
        tc_port_matchall->ingress.bytes = 0;
        tc_port_matchall->ingress.last_used = 0;
@@ -279,7 +332,7 @@ int cxgb4_tc_matchall_replace(struct net_device *dev,
 
                ret = cxgb4_validate_flow_actions(dev,
                                                  &cls_matchall->rule->action,
-                                                 extack);
+                                                 extack, 1);
                if (ret)
                        return ret;
 
@@ -346,7 +399,7 @@ int cxgb4_tc_matchall_stats(struct net_device *dev,
                flow_stats_update(&cls_matchall->stats,
                                  bytes - tc_port_matchall->ingress.bytes,
                                  packets - tc_port_matchall->ingress.packets,
-                                 tc_port_matchall->ingress.last_used,
+                                 0, tc_port_matchall->ingress.last_used,
                                  FLOW_ACTION_HW_STATS_IMMEDIATE);
 
                tc_port_matchall->ingress.packets = packets;
index ab6b568..e264b6e 100644 (file)
@@ -21,6 +21,7 @@ struct cxgb4_matchall_ingress_entry {
        enum cxgb4_matchall_state state; /* Current MATCHALL offload state */
        u32 tid; /* Index to hardware filter entry */
        struct ch_filter_specification fs; /* Filter entry */
+       u16 viid_mirror; /* Identifier for allocated Mirror VI */
        u64 bytes; /* # of bytes hitting the filter */
        u64 packets; /* # of packets hitting the filter */
        u64 last_used; /* Last updated jiffies time */
index 3f3c11e..dede025 100644 (file)
@@ -48,7 +48,7 @@ static int fill_match_fields(struct adapter *adap,
                             bool next_header)
 {
        unsigned int i, j;
-       u32 val, mask;
+       __be32 val, mask;
        int off, err;
        bool found;
 
@@ -228,7 +228,7 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
                const struct cxgb4_next_header *next;
                bool found = false;
                unsigned int i, j;
-               u32 val, mask;
+               __be32 val, mask;
                int off;
 
                if (t->table[link_uhtid - 1].link_handle) {
@@ -242,10 +242,10 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
 
                /* Try to find matches that allow jumps to next header. */
                for (i = 0; next[i].jump; i++) {
-                       if (next[i].offoff != cls->knode.sel->offoff ||
-                           next[i].shift != cls->knode.sel->offshift ||
-                           next[i].mask != cls->knode.sel->offmask ||
-                           next[i].offset != cls->knode.sel->off)
+                       if (next[i].sel.offoff != cls->knode.sel->offoff ||
+                           next[i].sel.offshift != cls->knode.sel->offshift ||
+                           next[i].sel.offmask != cls->knode.sel->offmask ||
+                           next[i].sel.off != cls->knode.sel->off)
                                continue;
 
                        /* Found a possible candidate.  Find a key that
@@ -257,9 +257,9 @@ int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls)
                                val = cls->knode.sel->keys[j].val;
                                mask = cls->knode.sel->keys[j].mask;
 
-                               if (next[i].match_off == off &&
-                                   next[i].match_val == val &&
-                                   next[i].match_mask == mask) {
+                               if (next[i].key.off == off &&
+                                   next[i].key.val == val &&
+                                   next[i].key.mask == mask) {
                                        found = true;
                                        break;
                                }
index 125868c..f59dd4b 100644 (file)
 struct cxgb4_match_field {
        int off; /* Offset from the beginning of the header to match */
        /* Fill the value/mask pair in the spec if matched */
-       int (*val)(struct ch_filter_specification *f, u32 val, u32 mask);
+       int (*val)(struct ch_filter_specification *f, __be32 val, __be32 mask);
 };
 
 /* IPv4 match fields */
 static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f,
-                                     u32 val, u32 mask)
+                                     __be32 val, __be32 mask)
 {
        f->val.tos  = (ntohl(val)  >> 16) & 0x000000FF;
        f->mask.tos = (ntohl(mask) >> 16) & 0x000000FF;
@@ -52,7 +52,7 @@ static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f,
-                                      u32 val, u32 mask)
+                                      __be32 val, __be32 mask)
 {
        u32 mask_val;
        u8 frag_val;
@@ -74,7 +74,7 @@ static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f,
-                                       u32 val, u32 mask)
+                                       __be32 val, __be32 mask)
 {
        f->val.proto  = (ntohl(val)  >> 16) & 0x000000FF;
        f->mask.proto = (ntohl(mask) >> 16) & 0x000000FF;
@@ -83,7 +83,7 @@ static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f,
-                                        u32 val, u32 mask)
+                                        __be32 val, __be32 mask)
 {
        memcpy(&f->val.fip[0],  &val,  sizeof(u32));
        memcpy(&f->mask.fip[0], &mask, sizeof(u32));
@@ -92,7 +92,7 @@ static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv4_dst_ip(struct ch_filter_specification *f,
-                                        u32 val, u32 mask)
+                                        __be32 val, __be32 mask)
 {
        memcpy(&f->val.lip[0],  &val,  sizeof(u32));
        memcpy(&f->mask.lip[0], &mask, sizeof(u32));
@@ -111,7 +111,7 @@ static const struct cxgb4_match_field cxgb4_ipv4_fields[] = {
 
 /* IPv6 match fields */
 static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f,
-                                     u32 val, u32 mask)
+                                     __be32 val, __be32 mask)
 {
        f->val.tos  = (ntohl(val)  >> 20) & 0x000000FF;
        f->mask.tos = (ntohl(mask) >> 20) & 0x000000FF;
@@ -120,7 +120,7 @@ static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f,
-                                       u32 val, u32 mask)
+                                       __be32 val, __be32 mask)
 {
        f->val.proto  = (ntohl(val)  >> 8) & 0x000000FF;
        f->mask.proto = (ntohl(mask) >> 8) & 0x000000FF;
@@ -129,7 +129,7 @@ static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f,
-                                         u32 val, u32 mask)
+                                         __be32 val, __be32 mask)
 {
        memcpy(&f->val.fip[0],  &val,  sizeof(u32));
        memcpy(&f->mask.fip[0], &mask, sizeof(u32));
@@ -138,7 +138,7 @@ static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f,
-                                         u32 val, u32 mask)
+                                         __be32 val, __be32 mask)
 {
        memcpy(&f->val.fip[4],  &val,  sizeof(u32));
        memcpy(&f->mask.fip[4], &mask, sizeof(u32));
@@ -147,7 +147,7 @@ static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f,
-                                         u32 val, u32 mask)
+                                         __be32 val, __be32 mask)
 {
        memcpy(&f->val.fip[8],  &val,  sizeof(u32));
        memcpy(&f->mask.fip[8], &mask, sizeof(u32));
@@ -156,7 +156,7 @@ static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f,
-                                         u32 val, u32 mask)
+                                         __be32 val, __be32 mask)
 {
        memcpy(&f->val.fip[12],  &val,  sizeof(u32));
        memcpy(&f->mask.fip[12], &mask, sizeof(u32));
@@ -165,7 +165,7 @@ static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f,
-                                         u32 val, u32 mask)
+                                         __be32 val, __be32 mask)
 {
        memcpy(&f->val.lip[0],  &val,  sizeof(u32));
        memcpy(&f->mask.lip[0], &mask, sizeof(u32));
@@ -174,7 +174,7 @@ static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f,
-                                         u32 val, u32 mask)
+                                         __be32 val, __be32 mask)
 {
        memcpy(&f->val.lip[4],  &val,  sizeof(u32));
        memcpy(&f->mask.lip[4], &mask, sizeof(u32));
@@ -183,7 +183,7 @@ static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f,
-                                         u32 val, u32 mask)
+                                         __be32 val, __be32 mask)
 {
        memcpy(&f->val.lip[8],  &val,  sizeof(u32));
        memcpy(&f->mask.lip[8], &mask, sizeof(u32));
@@ -192,7 +192,7 @@ static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f,
 }
 
 static inline int cxgb4_fill_ipv6_dst_ip3(struct ch_filter_specification *f,
-                                         u32 val, u32 mask)
+                                         __be32 val, __be32 mask)
 {
        memcpy(&f->val.lip[12],  &val,  sizeof(u32));
        memcpy(&f->mask.lip[12], &mask, sizeof(u32));
@@ -216,7 +216,7 @@ static const struct cxgb4_match_field cxgb4_ipv6_fields[] = {
 
 /* TCP/UDP match */
 static inline int cxgb4_fill_l4_ports(struct ch_filter_specification *f,
-                                     u32 val, u32 mask)
+                                     __be32 val, __be32 mask)
 {
        f->val.fport  = ntohl(val)  >> 16;
        f->mask.fport = ntohl(mask) >> 16;
@@ -237,19 +237,13 @@ static const struct cxgb4_match_field cxgb4_udp_fields[] = {
 };
 
 struct cxgb4_next_header {
-       unsigned int offset; /* Offset to next header */
-       /* offset, shift, and mask added to offset above
+       /* Offset, shift, and mask added to beginning of the header
         * to get to next header.  Useful when using a header
         * field's value to jump to next header such as IHL field
         * in IPv4 header.
         */
-       unsigned int offoff;
-       u32 shift;
-       u32 mask;
-       /* match criteria to make this jump */
-       unsigned int match_off;
-       u32 match_val;
-       u32 match_mask;
+       struct tc_u32_sel sel;
+       struct tc_u32_key key;
        /* location of jump to make */
        const struct cxgb4_match_field *jump;
 };
@@ -258,26 +252,74 @@ struct cxgb4_next_header {
  * IPv4 header.
  */
 static const struct cxgb4_next_header cxgb4_ipv4_jumps[] = {
-       { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
-         .match_off = 8, .match_val = 0x600, .match_mask = 0xFF00,
-         .jump = cxgb4_tcp_fields },
-       { .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
-         .match_off = 8, .match_val = 0x1100, .match_mask = 0xFF00,
-         .jump = cxgb4_udp_fields },
-       { .jump = NULL }
+       {
+               /* TCP Jump */
+               .sel = {
+                       .off = 0,
+                       .offoff = 0,
+                       .offshift = 6,
+                       .offmask = cpu_to_be16(0x0f00),
+               },
+               .key = {
+                       .off = 8,
+                       .val = cpu_to_be32(0x00060000),
+                       .mask = cpu_to_be32(0x00ff0000),
+               },
+               .jump = cxgb4_tcp_fields,
+       },
+       {
+               /* UDP Jump */
+               .sel = {
+                       .off = 0,
+                       .offoff = 0,
+                       .offshift = 6,
+                       .offmask = cpu_to_be16(0x0f00),
+               },
+               .key = {
+                       .off = 8,
+                       .val = cpu_to_be32(0x00110000),
+                       .mask = cpu_to_be32(0x00ff0000),
+               },
+               .jump = cxgb4_udp_fields,
+       },
+       { .jump = NULL },
 };
 
 /* Accept a rule with a jump directly past the 40 Bytes of IPv6 fixed header
  * to get to transport layer header.
  */
 static const struct cxgb4_next_header cxgb4_ipv6_jumps[] = {
-       { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
-         .match_off = 4, .match_val = 0x60000, .match_mask = 0xFF0000,
-         .jump = cxgb4_tcp_fields },
-       { .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
-         .match_off = 4, .match_val = 0x110000, .match_mask = 0xFF0000,
-         .jump = cxgb4_udp_fields },
-       { .jump = NULL }
+       {
+               /* TCP Jump */
+               .sel = {
+                       .off = 40,
+                       .offoff = 0,
+                       .offshift = 0,
+                       .offmask = 0,
+               },
+               .key = {
+                       .off = 4,
+                       .val = cpu_to_be32(0x00000600),
+                       .mask = cpu_to_be32(0x0000ff00),
+               },
+               .jump = cxgb4_tcp_fields,
+       },
+       {
+               /* UDP Jump */
+               .sel = {
+                       .off = 40,
+                       .offoff = 0,
+                       .offshift = 0,
+                       .offmask = 0,
+               },
+               .key = {
+                       .off = 4,
+                       .val = cpu_to_be32(0x00001100),
+                       .mask = cpu_to_be32(0x0000ff00),
+               },
+               .jump = cxgb4_udp_fields,
+       },
+       { .jump = NULL },
 };
 
 struct cxgb4_link {
index dbce99b..a963fd0 100644 (file)
@@ -106,6 +106,8 @@ struct tid_info {
        unsigned long *stid_bmap;
        unsigned int nstids;
        unsigned int stid_base;
+
+       unsigned int nhash;
        unsigned int hash_base;
 
        union aopen_entry *atid_tab;
index 72b37a6..c486412 100644 (file)
@@ -503,40 +503,19 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
 EXPORT_SYMBOL(cxgb4_select_ntuple);
 
 /*
- * Called when address resolution fails for an L2T entry to handle packets
- * on the arpq head.  If a packet specifies a failure handler it is invoked,
- * otherwise the packet is sent to the device.
- */
-static void handle_failed_resolution(struct adapter *adap, struct l2t_entry *e)
-{
-       struct sk_buff *skb;
-
-       while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
-               const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
-
-               spin_unlock(&e->lock);
-               if (cb->arp_err_handler)
-                       cb->arp_err_handler(cb->handle, skb);
-               else
-                       t4_ofld_send(adap, skb);
-               spin_lock(&e->lock);
-       }
-}
-
-/*
  * Called when the host's neighbor layer makes a change to some entry that is
  * loaded into the HW L2 table.
  */
 void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
 {
-       struct l2t_entry *e;
-       struct sk_buff_head *arpq = NULL;
-       struct l2t_data *d = adap->l2t;
        unsigned int addr_len = neigh->tbl->key_len;
        u32 *addr = (u32 *) neigh->primary_key;
-       int ifidx = neigh->dev->ifindex;
-       int hash = addr_hash(d, addr, addr_len, ifidx);
+       int hash, ifidx = neigh->dev->ifindex;
+       struct sk_buff_head *arpq = NULL;
+       struct l2t_data *d = adap->l2t;
+       struct l2t_entry *e;
 
+       hash = addr_hash(d, addr, addr_len, ifidx);
        read_lock_bh(&d->lock);
        for (e = d->l2tab[hash].first; e; e = e->next)
                if (!addreq(e, addr) && e->ifindex == ifidx) {
@@ -569,8 +548,25 @@ void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
                        write_l2e(adap, e, 0);
        }
 
-       if (arpq)
-               handle_failed_resolution(adap, e);
+       if (arpq) {
+               struct sk_buff *skb;
+
+               /* Called when address resolution fails for an L2T
+                * entry to handle packets on the arpq head. If a
+                * packet specifies a failure handler it is invoked,
+                * otherwise the packet is sent to the device.
+                */
+               while ((skb = __skb_dequeue(&e->arpq)) != NULL) {
+                       const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
+
+                       spin_unlock(&e->lock);
+                       if (cb->arp_err_handler)
+                               cb->arp_err_handler(cb->handle, skb);
+                       else
+                               t4_ofld_send(adap, skb);
+                       spin_lock(&e->lock);
+               }
+       }
        spin_unlock_bh(&e->lock);
 }
 
@@ -613,6 +609,7 @@ struct l2t_entry *t4_l2t_alloc_switching(struct adapter *adap, u16 vlan,
 }
 
 /**
+ * cxgb4_l2t_alloc_switching - Allocates an L2T entry for switch filters
  * @dev: net_device pointer
  * @vlan: VLAN Id
  * @port: Associated port
index fde93c5..a1b1446 100644 (file)
@@ -598,7 +598,7 @@ struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
 /**
  * cxgb4_sched_class_free - free a scheduling class
  * @dev: net_device pointer
- * @e: scheduling class
+ * @classid: scheduling class id to free
  *
  * Frees a scheduling class if there are no users.
  */
index 1359158..3f0fdff 100644 (file)
@@ -302,7 +302,7 @@ static void deferred_unmap_destructor(struct sk_buff *skb)
 
 /**
  *     free_tx_desc - reclaims Tx descriptors and their buffers
- *     @adapter: the adapter
+ *     @adap: the adapter
  *     @q: the Tx queue to reclaim descriptors from
  *     @n: the number of descriptors to reclaim
  *     @unmap: whether the buffers should be unmapped for DMA
@@ -722,6 +722,7 @@ static inline unsigned int flits_to_desc(unsigned int n)
 /**
  *     is_eth_imm - can an Ethernet packet be sent as immediate data?
  *     @skb: the packet
+ *     @chip_ver: chip version
  *
  *     Returns whether an Ethernet packet is small enough to fit as
  *     immediate data. Return value corresponds to headroom required.
@@ -749,6 +750,7 @@ static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
 /**
  *     calc_tx_flits - calculate the number of flits for a packet Tx WR
  *     @skb: the packet
+ *     @chip_ver: chip version
  *
  *     Returns the number of flits needed for a Tx WR for the given Ethernet
  *     packet, including the needed WR and CPL headers.
@@ -804,6 +806,7 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
 /**
  *     calc_tx_descs - calculate the number of Tx descriptors for a packet
  *     @skb: the packet
+ *     @chip_ver: chip version
  *
  *     Returns the number of Tx descriptors needed for the given Ethernet
  *     packet, including the needed WR and CPL headers.
@@ -1425,12 +1428,10 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 
        qidx = skb_get_queue_mapping(skb);
        if (ptp_enabled) {
-               spin_lock(&adap->ptp_lock);
                if (!(adap->ptp_tx_skb)) {
                        skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
                        adap->ptp_tx_skb = skb_get(skb);
                } else {
-                       spin_unlock(&adap->ptp_lock);
                        goto out_free;
                }
                q = &adap->sge.ptptxq;
@@ -1444,11 +1445,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 
 #ifdef CONFIG_CHELSIO_T4_FCOE
        ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
-       if (unlikely(ret == -ENOTSUPP)) {
-               if (ptp_enabled)
-                       spin_unlock(&adap->ptp_lock);
+       if (unlikely(ret == -EOPNOTSUPP))
                goto out_free;
-       }
 #endif /* CONFIG_CHELSIO_T4_FCOE */
 
        chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
@@ -1461,8 +1459,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
                dev_err(adap->pdev_dev,
                        "%s: Tx ring %u full while queue awake!\n",
                        dev->name, qidx);
-               if (ptp_enabled)
-                       spin_unlock(&adap->ptp_lock);
                return NETDEV_TX_BUSY;
        }
 
@@ -1481,8 +1477,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
            unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
                memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
                q->mapping_err++;
-               if (ptp_enabled)
-                       spin_unlock(&adap->ptp_lock);
                goto out_free;
        }
 
@@ -1533,8 +1527,7 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
                        if (iph->version == 4) {
                                iph->check = 0;
                                iph->tot_len = 0;
-                               iph->check = (u16)(~ip_fast_csum((u8 *)iph,
-                                                                iph->ihl));
+                               iph->check = ~ip_fast_csum((u8 *)iph, iph->ihl);
                        }
                        if (skb->ip_summed == CHECKSUM_PARTIAL)
                                cntrl = hwcsum(adap->params.chip, skb);
@@ -1630,8 +1623,6 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
        txq_advance(&q->q, ndesc);
 
        cxgb4_ring_tx_db(adap, &q->q, ndesc);
-       if (ptp_enabled)
-               spin_unlock(&adap->ptp_lock);
        return NETDEV_TX_OK;
 
 out_free:
@@ -2377,6 +2368,16 @@ netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (unlikely(qid >= pi->nqsets))
                return cxgb4_ethofld_xmit(skb, dev);
 
+       if (is_ptp_enabled(skb, dev)) {
+               struct adapter *adap = netdev2adap(dev);
+               netdev_tx_t ret;
+
+               spin_lock(&adap->ptp_lock);
+               ret = cxgb4_eth_xmit(skb, dev);
+               spin_unlock(&adap->ptp_lock);
+               return ret;
+       }
+
        return cxgb4_eth_xmit(skb, dev);
 }
 
@@ -2410,9 +2411,9 @@ static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq)
 
 /**
  * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
- * @dev - netdevice
- * @eotid - ETHOFLD tid to bind/unbind
- * @tc - traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid
+ * @dev: netdevice
+ * @eotid: ETHOFLD tid to bind/unbind
+ * @tc: traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid
  *
  * Send a FLOWC work request to bind an ETHOFLD TID to a traffic class.
  * If @tc is set to FW_SCHED_CLS_NONE, then the @eotid is unbound from
@@ -2431,7 +2432,7 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
        struct sk_buff *skb;
        int ret = 0;
 
-       len = sizeof(*flowc) + sizeof(struct fw_flowc_mnemval) * nparams;
+       len = struct_size(flowc, mnemval, nparams);
        len16 = DIV_ROUND_UP(len, 16);
 
        entry = cxgb4_lookup_eotid(&adap->tids, eotid);
@@ -2536,6 +2537,80 @@ static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
        }
 }
 
+#define CXGB4_SELFTEST_LB_STR "CHELSIO_SELFTEST"
+
+int cxgb4_selftest_lb_pkt(struct net_device *netdev)
+{
+       struct port_info *pi = netdev_priv(netdev);
+       struct adapter *adap = pi->adapter;
+       struct cxgb4_ethtool_lb_test *lb;
+       int ret, i = 0, pkt_len, credits;
+       struct fw_eth_tx_pkt_wr *wr;
+       struct cpl_tx_pkt_core *cpl;
+       u32 ctrl0, ndesc, flits;
+       struct sge_eth_txq *q;
+       u8 *sgl;
+
+       pkt_len = ETH_HLEN + sizeof(CXGB4_SELFTEST_LB_STR);
+
+       flits = DIV_ROUND_UP(pkt_len + sizeof(struct cpl_tx_pkt) +
+                            sizeof(*wr), sizeof(__be64));
+       ndesc = flits_to_desc(flits);
+
+       lb = &pi->ethtool_lb;
+       lb->loopback = 1;
+
+       q = &adap->sge.ethtxq[pi->first_qset];
+
+       reclaim_completed_tx(adap, &q->q, -1, true);
+       credits = txq_avail(&q->q) - ndesc;
+       if (unlikely(credits < 0))
+               return -ENOMEM;
+
+       wr = (void *)&q->q.desc[q->q.pidx];
+       memset(wr, 0, sizeof(struct tx_desc));
+
+       wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
+                              FW_WR_IMMDLEN_V(pkt_len +
+                              sizeof(*cpl)));
+       wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)));
+       wr->r3 = cpu_to_be64(0);
+
+       cpl = (void *)(wr + 1);
+       sgl = (u8 *)(cpl + 1);
+
+       ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_PF_V(adap->pf) |
+               TXPKT_INTF_V(pi->tx_chan + 4);
+
+       cpl->ctrl0 = htonl(ctrl0);
+       cpl->pack = htons(0);
+       cpl->len = htons(pkt_len);
+       cpl->ctrl1 = cpu_to_be64(TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F);
+
+       eth_broadcast_addr(sgl);
+       i += ETH_ALEN;
+       ether_addr_copy(&sgl[i], netdev->dev_addr);
+       i += ETH_ALEN;
+
+       snprintf(&sgl[i], sizeof(CXGB4_SELFTEST_LB_STR), "%s",
+                CXGB4_SELFTEST_LB_STR);
+
+       init_completion(&lb->completion);
+       txq_advance(&q->q, ndesc);
+       cxgb4_ring_tx_db(adap, &q->q, ndesc);
+
+       /* wait for the pkt to return */
+       ret = wait_for_completion_timeout(&lb->completion, 10 * HZ);
+       if (!ret)
+               ret = -ETIMEDOUT;
+       else
+               ret = lb->result;
+
+       lb->loopback = 0;
+
+       return ret;
+}
+
 /**
  *     ctrl_xmit - send a packet through an SGE control Tx queue
  *     @q: the control queue
@@ -2691,7 +2766,6 @@ static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
 
 /**
  *     txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
- *     @adap: the adapter
  *     @q: the queue to stop
  *
  *     Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
@@ -3286,7 +3360,7 @@ enum {
 
 /**
  *     t4_systim_to_hwstamp - read hardware time stamp
- *     @adap: the adapter
+ *     @adapter: the adapter
  *     @skb: the packet
  *
  *     Read Time Stamp from MPS packet and insert in skb which
@@ -3313,15 +3387,16 @@ static noinline int t4_systim_to_hwstamp(struct adapter *adapter,
 
        hwtstamps = skb_hwtstamps(skb);
        memset(hwtstamps, 0, sizeof(*hwtstamps));
-       hwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*((u64 *)data)));
+       hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data));
 
        return RX_PTP_PKT_SUC;
 }
 
 /**
  *     t4_rx_hststamp - Recv PTP Event Message
- *     @adap: the adapter
+ *     @adapter: the adapter
  *     @rsp: the response queue descriptor holding the RX_PKT message
+ *     @rxq: the response queue holding the RX_PKT message
  *     @skb: the packet
  *
  *     PTP enabled and MPS packet, read HW timestamp
@@ -3345,7 +3420,7 @@ static int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp,
 
 /**
  *      t4_tx_hststamp - Loopback PTP Transmit Event Message
- *      @adap: the adapter
+ *      @adapter: the adapter
  *      @skb: the packet
  *      @dev: the ingress net device
  *
@@ -3412,6 +3487,31 @@ static void t4_tx_completion_handler(struct sge_rspq *rspq,
        t4_sge_eth_txq_egress_update(adapter, txq, -1);
 }
 
+static int cxgb4_validate_lb_pkt(struct port_info *pi, const struct pkt_gl *si)
+{
+       struct adapter *adap = pi->adapter;
+       struct cxgb4_ethtool_lb_test *lb;
+       struct sge *s = &adap->sge;
+       struct net_device *netdev;
+       u8 *data;
+       int i;
+
+       netdev = adap->port[pi->port_id];
+       lb = &pi->ethtool_lb;
+       data = si->va + s->pktshift;
+
+       i = ETH_ALEN;
+       if (!ether_addr_equal(data + i, netdev->dev_addr))
+               return -1;
+
+       i += ETH_ALEN;
+       if (strcmp(&data[i], CXGB4_SELFTEST_LB_STR))
+               lb->result = -EIO;
+
+       complete(&lb->completion);
+       return 0;
+}
+
 /**
  *     t4_ethrx_handler - process an ingress ethernet packet
  *     @q: the response queue that received the packet
@@ -3435,6 +3535,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
        struct port_info *pi;
        int ret = 0;
 
+       pi = netdev_priv(q->netdev);
        /* If we're looking at TX Queue CIDX Update, handle that separately
         * and return.
         */
@@ -3462,6 +3563,12 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
        if (err_vec)
                rxq->stats.bad_rx_pkts++;
 
+       if (unlikely(pi->ethtool_lb.loopback && pkt->iff >= NCHAN)) {
+               ret = cxgb4_validate_lb_pkt(pi, si);
+               if (!ret)
+                       return 0;
+       }
+
        if (((pkt->l2info & htonl(RXF_TCP_F)) ||
             tnl_hdr_len) &&
            (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
@@ -3475,7 +3582,6 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
                rxq->stats.rx_drops++;
                return 0;
        }
-       pi = netdev_priv(q->netdev);
 
        /* Handle PTP Event Rx packet */
        if (unlikely(pi->ptp_enable)) {
index 01c65d1..e617e4a 100644 (file)
@@ -55,7 +55,7 @@ struct smt_data *t4_init_smt(void)
        for (i = 0; i < s->smt_size; ++i) {
                s->smtab[i].idx = i;
                s->smtab[i].state = SMT_STATE_UNUSED;
-               memset(&s->smtab[i].src_mac, 0, ETH_ALEN);
+               eth_zero_addr(s->smtab[i].src_mac);
                spin_lock_init(&s->smtab[i].lock);
                s->smtab[i].refcnt = 0;
        }
@@ -103,6 +103,7 @@ static void t4_smte_free(struct smt_entry *e)
 }
 
 /**
+ * cxgb4_smt_release - Release SMT entry
  * @e: smt entry to release
  *
  * Releases ref count and frees up an smt entry from SMT table
@@ -231,6 +232,7 @@ static struct smt_entry *t4_smt_alloc_switching(struct adapter *adap, u16 pfvf,
 }
 
 /**
+ * cxgb4_smt_alloc_switching - Allocates an SMT entry for switch filters.
  * @dev: net_device pointer
  * @smac: MAC address to add to SMT
  * Returns pointer to the SMT entry created
index 1c8068c..8a56491 100644 (file)
@@ -3163,7 +3163,7 @@ int t4_get_tp_version(struct adapter *adapter, u32 *vers)
 
 /**
  *     t4_get_exprom_version - return the Expansion ROM version (if any)
- *     @adapter: the adapter
+ *     @adap: the adapter
  *     @vers: where to place the version
  *
  *     Reads the Expansion ROM header from FLASH and returns the version
@@ -3493,7 +3493,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
        drv_fw = &fw_info->fw_hdr;
 
        /* Read the header of the firmware on the card */
-       ret = -t4_read_flash(adap, FLASH_FW_START,
+       ret = t4_read_flash(adap, FLASH_FW_START,
                            sizeof(*card_fw) / sizeof(uint32_t),
                            (uint32_t *)card_fw, 1);
        if (ret == 0) {
@@ -3522,8 +3522,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
                   should_install_fs_fw(adap, card_fw_usable,
                                        be32_to_cpu(fs_fw->fw_ver),
                                        be32_to_cpu(card_fw->fw_ver))) {
-               ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
-                                    fw_size, 0);
+               ret = t4_fw_upgrade(adap, adap->mbox, fw_data,
+                                   fw_size, 0);
                if (ret != 0) {
                        dev_err(adap->pdev_dev,
                                "failed to install firmware: %d\n", ret);
@@ -3554,7 +3554,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
                        FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
                        FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
                        FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
-               ret = EINVAL;
+               ret = -EINVAL;
                goto bye;
        }
 
@@ -3752,7 +3752,6 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
  *     t4_load_phy_fw - download port PHY firmware
  *     @adap: the adapter
  *     @win: the PCI-E Memory Window index to use for t4_memory_rw()
- *     @win_lock: the lock to use to guard the memory copy
  *     @phy_fw_version: function to check PHY firmware versions
  *     @phy_fw_data: the PHY firmware image to write
  *     @phy_fw_size: image size
@@ -3761,9 +3760,7 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
  *     @phy_fw_version is supplied, then it will be used to determine if
  *     it's necessary to perform the transfer by comparing the version
  *     of any existing adapter PHY firmware with that of the passed in
- *     PHY firmware image.  If @win_lock is non-NULL then it will be used
- *     around the call to t4_memory_rw() which transfers the PHY firmware
- *     to the adapter.
+ *     PHY firmware image.
  *
  *     A negative error number will be returned if an error occurs.  If
  *     version number support is available and there's no need to upgrade
@@ -3775,14 +3772,13 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
  *     contents.  Thus, loading PHY firmware on such adapters must happen
  *     after any FW_RESET_CMDs ...
  */
-int t4_load_phy_fw(struct adapter *adap,
-                  int win, spinlock_t *win_lock,
+int t4_load_phy_fw(struct adapter *adap, int win,
                   int (*phy_fw_version)(const u8 *, size_t),
                   const u8 *phy_fw_data, size_t phy_fw_size)
 {
+       int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
        unsigned long mtype = 0, maddr = 0;
        u32 param, val;
-       int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
        int ret;
 
        /* If we have version number support, then check to see if the adapter
@@ -3822,13 +3818,9 @@ int t4_load_phy_fw(struct adapter *adap,
        /* Copy the supplied PHY Firmware image to the adapter memory location
         * allocated by the adapter firmware.
         */
-       if (win_lock)
-               spin_lock_bh(win_lock);
        ret = t4_memory_rw(adap, win, mtype, maddr,
                           phy_fw_size, (__be32 *)phy_fw_data,
                           T4_MEMORY_WRITE);
-       if (win_lock)
-               spin_unlock_bh(win_lock);
        if (ret)
                return ret;
 
@@ -5310,7 +5302,7 @@ static unsigned int t4_use_ldst(struct adapter *adap)
  * @cmd: TP fw ldst address space type
  * @vals: where the indirect register values are stored/written
  * @nregs: how many indirect registers to read/write
- * @start_idx: index of first indirect register to read/write
+ * @start_index: index of first indirect register to read/write
  * @rw: Read (1) or Write (0)
  * @sleep_ok: if true we may sleep while awaiting command completion
  *
@@ -6115,7 +6107,7 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
 
 /**
  *     compute_mps_bg_map - compute the MPS Buffer Group Map for a Port
- *     @adap: the adapter
+ *     @adapter: the adapter
  *     @pidx: the port index
  *
  *     Computes and returns a bitmap indicating which MPS buffer groups are
@@ -6252,7 +6244,7 @@ static unsigned int t4_get_tp_e2c_map(struct adapter *adapter, int pidx)
 
 /**
  *     t4_get_tp_ch_map - return TP ingress channels associated with a port
- *     @adapter: the adapter
+ *     @adap: the adapter
  *     @pidx: the port index
  *
  *     Returns a bitmap indicating which TP Ingress Channels are associated
@@ -6589,7 +6581,7 @@ int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
  *     @phy_addr: the PHY address
  *     @mmd: the PHY MMD to access (0 for clause 22 PHYs)
  *     @reg: the register to write
- *     @valp: value to write
+ *     @val: value to write
  *
  *     Issues a FW command through the given mailbox to write a PHY register.
  */
@@ -6615,7 +6607,7 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
 
 /**
  *     t4_sge_decode_idma_state - decode the idma state
- *     @adap: the adapter
+ *     @adapter: the adapter
  *     @state: the state idma is stuck in
  */
 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
@@ -6782,7 +6774,7 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
  *      t4_sge_ctxt_flush - flush the SGE context cache
  *      @adap: the adapter
  *      @mbox: mailbox to use for the FW command
- *      @ctx_type: Egress or Ingress
+ *      @ctxt_type: Egress or Ingress
  *
  *      Issues a FW command through the given mailbox to flush the
  *      SGE context cache.
@@ -6809,7 +6801,7 @@ int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
 
 /**
  *     t4_read_sge_dbqtimers - read SGE Doorbell Queue Timer values
- *     @adap - the adapter
+ *     @adap: the adapter
  *     @ndbqtimers: size of the provided SGE Doorbell Queue Timer table
  *     @dbqtimers: SGE Doorbell Queue Timer table
  *
@@ -7092,6 +7084,7 @@ static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
 /**
  *     t4_fw_restart - restart the firmware by taking the uP out of RESET
  *     @adap: the adapter
+ *     @mbox: mailbox to use for the FW command
  *     @reset: if we want to do a RESET to restart things
  *
  *     Restart firmware previously halted by t4_fw_halt().  On successful
@@ -7630,6 +7623,8 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
  *     @nmac: number of MAC addresses needed (1 to 5)
  *     @mac: the MAC addresses of the VI
  *     @rss_size: size of RSS table slice associated with this VI
+ *     @vivld: the destination to store the VI Valid value.
+ *     @vin: the destination to store the VIN value.
  *
  *     Allocates a virtual interface for the given physical port.  If @mac is
  *     not %NULL it contains the MAC addresses of the VI as assigned by FW.
@@ -7716,6 +7711,7 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
  *     @adap: the adapter
  *     @mbox: mailbox to use for the FW command
  *     @viid: the VI id
+ *     @viid_mirror: the mirror VI id
  *     @mtu: the new MTU or -1
  *     @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
  *     @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
@@ -7726,10 +7722,11 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
  *     Sets Rx properties of a virtual interface.
  */
 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
-                 int mtu, int promisc, int all_multi, int bcast, int vlanex,
-                 bool sleep_ok)
+                 unsigned int viid_mirror, int mtu, int promisc, int all_multi,
+                 int bcast, int vlanex, bool sleep_ok)
 {
-       struct fw_vi_rxmode_cmd c;
+       struct fw_vi_rxmode_cmd c, c_mirror;
+       int ret;
 
        /* convert to FW values */
        if (mtu < 0)
@@ -7754,7 +7751,24 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
                            FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
                            FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
                            FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
-       return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
+
+       if (viid_mirror) {
+               memcpy(&c_mirror, &c, sizeof(c_mirror));
+               c_mirror.op_to_viid =
+                       cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
+                                   FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+                                   FW_VI_RXMODE_CMD_VIID_V(viid_mirror));
+       }
+
+       ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
+       if (ret)
+               return ret;
+
+       if (viid_mirror)
+               ret = t4_wr_mbox_meat(adap, mbox, &c_mirror, sizeof(c_mirror),
+                                     NULL, sleep_ok);
+
+       return ret;
 }
 
 /**
@@ -7848,7 +7862,7 @@ int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
  *      t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support
  *      @adap: the adapter
  *      @viid: the VI id
- *      @mac: the MAC address
+ *      @addr: the MAC address
  *      @mask: the mask
  *      @vni: the VNI id for the tunnel protocol
  *      @vni_mask: mask for the VNI id
@@ -7897,11 +7911,11 @@ int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
  *     t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
  *     @adap: the adapter
  *     @viid: the VI id
- *     @mac: the MAC address
+ *     @addr: the MAC address
  *     @mask: the mask
  *     @idx: index at which to add this entry
- *     @port_id: the port index
  *     @lookup_type: MAC address for inner (1) or outer (0) header
+ *     @port_id: the port index
  *     @sleep_ok: call is allowed to sleep
  *
  *     Adds the mac entry at the specified index using raw mac interface.
@@ -8126,7 +8140,7 @@ int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
  *     @idx: index of existing filter for old value of MAC address, or -1
  *     @addr: the new MAC address value
  *     @persist: whether a new MAC allocation should be persistent
- *     @add_smt: if true also add the address to the HW SMT
+ *     @smt_idx: the destination to store the new SMT index.
  *
  *     Modifies an exact-match filter and sets it to the new MAC address.
  *     Note that in general it is not possible to modify the value of a given
@@ -8448,7 +8462,6 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
 
 /**
  *     t4_link_down_rc_str - return a string for a Link Down Reason Code
- *     @adap: the adapter
  *     @link_down_rc: Link Down Reason Code
  *
  *     Returns a string representation of the Link Down Reason Code.
@@ -8472,9 +8485,7 @@ static const char *t4_link_down_rc_str(unsigned char link_down_rc)
        return reason[link_down_rc];
 }
 
-/**
- * Return the highest speed set in the port capabilities, in Mb/s.
- */
+/* Return the highest speed set in the port capabilities, in Mb/s. */
 static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
 {
        #define TEST_SPEED_RETURN(__caps_speed, __speed) \
@@ -9110,7 +9121,6 @@ found:
 /**
  *     t4_prep_adapter - prepare SW and HW for operation
  *     @adapter: the adapter
- *     @reset: if true perform a HW reset
  *
  *     Initialize adapter SW state for the various HW modules, set initial
  *     values for some adapter tunables, take PHYs out of reset, and
@@ -9720,6 +9730,22 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
        return 0;
 }
 
+int t4_init_port_mirror(struct port_info *pi, u8 mbox, u8 port, u8 pf, u8 vf,
+                       u16 *mirror_viid)
+{
+       int ret;
+
+       ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, NULL, NULL,
+                         NULL, NULL);
+       if (ret < 0)
+               return ret;
+
+       if (mirror_viid)
+               *mirror_viid = ret;
+
+       return 0;
+}
+
 /**
  *     t4_read_cimq_cfg - read CIM queue configuration
  *     @adap: the adapter
@@ -10395,6 +10421,7 @@ int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode,
 /**
  *     t4_i2c_rd - read I2C data from adapter
  *     @adap: the adapter
+ *     @mbox: mailbox to use for the FW command
  *     @port: Port number if per-port device; <0 if not
  *     @devid: per-port device ID or absolute device ID
  *     @offset: byte offset into device I2C space
@@ -10450,7 +10477,7 @@ int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
 
 /**
  *      t4_set_vlan_acl - Set a VLAN id for the specified VF
- *      @adapter: the adapter
+ *      @adap: the adapter
  *      @mbox: mailbox to use for the FW command
  *      @vf: one of the VFs instantiated by the specified PF
  *      @vlan: The vlanid to be set
@@ -10481,3 +10508,280 @@ int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
 
        return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL);
 }
+
+/**
+ *     modify_device_id - Modifies the device ID of the Boot BIOS image
+ *     @device_id: the device ID to write.
+ *     @boot_data: the boot image to modify.
+ *
+ *     Write the supplied device ID to the boot BIOS image.
+ */
+static void modify_device_id(int device_id, u8 *boot_data)
+{
+       struct cxgb4_pcir_data *pcir_header;
+       struct legacy_pci_rom_hdr *header;
+       u8 *cur_header = boot_data;
+       u16 pcir_offset;
+
+        /* Loop through all chained images and change the device ID's */
+       do {
+               header = (struct legacy_pci_rom_hdr *)cur_header;
+               pcir_offset = le16_to_cpu(header->pcir_offset);
+               pcir_header = (struct cxgb4_pcir_data *)(cur_header +
+                             pcir_offset);
+
+               /**
+                * Only modify the Device ID if code type is Legacy or HP.
+                * 0x00: Okay to modify
+                * 0x01: FCODE. Do not modify
+                * 0x03: Okay to modify
+                * 0x04-0xFF: Do not modify
+                */
+               if (pcir_header->code_type == CXGB4_HDR_CODE1) {
+                       u8 csum = 0;
+                       int i;
+
+                       /**
+                        * Modify Device ID to match current adatper
+                        */
+                       pcir_header->device_id = cpu_to_le16(device_id);
+
+                       /**
+                        * Set checksum temporarily to 0.
+                        * We will recalculate it later.
+                        */
+                       header->cksum = 0x0;
+
+                       /**
+                        * Calculate and update checksum
+                        */
+                       for (i = 0; i < (header->size512 * 512); i++)
+                               csum += cur_header[i];
+
+                       /**
+                        * Invert summed value to create the checksum
+                        * Writing new checksum value directly to the boot data
+                        */
+                       cur_header[7] = -csum;
+
+               } else if (pcir_header->code_type == CXGB4_HDR_CODE2) {
+                       /**
+                        * Modify Device ID to match current adatper
+                        */
+                       pcir_header->device_id = cpu_to_le16(device_id);
+               }
+
+               /**
+                * Move header pointer up to the next image in the ROM.
+                */
+               cur_header += header->size512 * 512;
+       } while (!(pcir_header->indicator & CXGB4_HDR_INDI));
+}
+
+/**
+ *     t4_load_boot - download boot flash
+ *     @adap: the adapter
+ *     @boot_data: the boot image to write
+ *     @boot_addr: offset in flash to write boot_data
+ *     @size: image size
+ *
+ *     Write the supplied boot image to the card's serial flash.
+ *     The boot image has the following sections: a 28-byte header and the
+ *     boot image.
+ */
+int t4_load_boot(struct adapter *adap, u8 *boot_data,
+                unsigned int boot_addr, unsigned int size)
+{
+       unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
+       unsigned int boot_sector = (boot_addr * 1024);
+       struct cxgb4_pci_exp_rom_header *header;
+       struct cxgb4_pcir_data *pcir_header;
+       int pcir_offset;
+       unsigned int i;
+       u16 device_id;
+       int ret, addr;
+
+       /**
+        * Make sure the boot image does not encroach on the firmware region
+        */
+       if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
+               dev_err(adap->pdev_dev, "boot image encroaching on firmware region\n");
+               return -EFBIG;
+       }
+
+       /* Get boot header */
+       header = (struct cxgb4_pci_exp_rom_header *)boot_data;
+       pcir_offset = le16_to_cpu(header->pcir_offset);
+       /* PCIR Data Structure */
+       pcir_header = (struct cxgb4_pcir_data *)&boot_data[pcir_offset];
+
+       /**
+        * Perform some primitive sanity testing to avoid accidentally
+        * writing garbage over the boot sectors.  We ought to check for
+        * more but it's not worth it for now ...
+        */
+       if (size < BOOT_MIN_SIZE || size > BOOT_MAX_SIZE) {
+               dev_err(adap->pdev_dev, "boot image too small/large\n");
+               return -EFBIG;
+       }
+
+       if (le16_to_cpu(header->signature) != BOOT_SIGNATURE) {
+               dev_err(adap->pdev_dev, "Boot image missing signature\n");
+               return -EINVAL;
+       }
+
+       /* Check PCI header signature */
+       if (le32_to_cpu(pcir_header->signature) != PCIR_SIGNATURE) {
+               dev_err(adap->pdev_dev, "PCI header missing signature\n");
+               return -EINVAL;
+       }
+
+       /* Check Vendor ID matches Chelsio ID*/
+       if (le16_to_cpu(pcir_header->vendor_id) != PCI_VENDOR_ID_CHELSIO) {
+               dev_err(adap->pdev_dev, "Vendor ID missing signature\n");
+               return -EINVAL;
+       }
+
+       /**
+        * The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
+        * and Boot configuration data sections. These 3 boot sections span
+        * sectors 0 to 7 in flash and live right before the FW image location.
+        */
+       i = DIV_ROUND_UP(size ? size : FLASH_FW_START,  sf_sec_size);
+       ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
+                                    (boot_sector >> 16) + i - 1);
+
+       /**
+        * If size == 0 then we're simply erasing the FLASH sectors associated
+        * with the on-adapter option ROM file
+        */
+       if (ret || size == 0)
+               goto out;
+       /* Retrieve adapter's device ID */
+       pci_read_config_word(adap->pdev, PCI_DEVICE_ID, &device_id);
+       /* Want to deal with PF 0 so I strip off PF 4 indicator */
+       device_id = device_id & 0xf0ff;
+
+        /* Check PCIE Device ID */
+       if (le16_to_cpu(pcir_header->device_id) != device_id) {
+               /**
+                * Change the device ID in the Boot BIOS image to match
+                * the Device ID of the current adapter.
+                */
+               modify_device_id(device_id, boot_data);
+       }
+
+       /**
+        * Skip over the first SF_PAGE_SIZE worth of data and write it after
+        * we finish copying the rest of the boot image. This will ensure
+        * that the BIOS boot header will only be written if the boot image
+        * was written in full.
+        */
+       addr = boot_sector;
+       for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
+               addr += SF_PAGE_SIZE;
+               boot_data += SF_PAGE_SIZE;
+               ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data);
+               if (ret)
+                       goto out;
+       }
+
+       ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
+                            (const u8 *)header);
+
+out:
+       if (ret)
+               dev_err(adap->pdev_dev, "boot image load failed, error %d\n",
+                       ret);
+       return ret;
+}
+
+/**
+ *     t4_flash_bootcfg_addr - return the address of the flash
+ *     optionrom configuration
+ *     @adapter: the adapter
+ *
+ *     Return the address within the flash where the OptionROM Configuration
+ *     is stored, or an error if the device FLASH is too small to contain
+ *     a OptionROM Configuration.
+ */
+static int t4_flash_bootcfg_addr(struct adapter *adapter)
+{
+       /**
+        * If the device FLASH isn't large enough to hold a Firmware
+        * Configuration File, return an error.
+        */
+       if (adapter->params.sf_size <
+           FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
+               return -ENOSPC;
+
+       return FLASH_BOOTCFG_START;
+}
+
+int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
+{
+       unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
+       struct cxgb4_bootcfg_data *header;
+       unsigned int flash_cfg_start_sec;
+       unsigned int addr, npad;
+       int ret, i, n, cfg_addr;
+
+       cfg_addr = t4_flash_bootcfg_addr(adap);
+       if (cfg_addr < 0)
+               return cfg_addr;
+
+       addr = cfg_addr;
+       flash_cfg_start_sec = addr / SF_SEC_SIZE;
+
+       if (size > FLASH_BOOTCFG_MAX_SIZE) {
+               dev_err(adap->pdev_dev, "bootcfg file too large, max is %u bytes\n",
+                       FLASH_BOOTCFG_MAX_SIZE);
+               return -EFBIG;
+       }
+
+       header = (struct cxgb4_bootcfg_data *)cfg_data;
+       if (le16_to_cpu(header->signature) != BOOT_CFG_SIG) {
+               dev_err(adap->pdev_dev, "Wrong bootcfg signature\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,
+                        sf_sec_size);
+       ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
+                                    flash_cfg_start_sec + i - 1);
+
+       /**
+        * If size == 0 then we're simply erasing the FLASH sectors associated
+        * with the on-adapter OptionROM Configuration File.
+        */
+       if (ret || size == 0)
+               goto out;
+
+       /* this will write to the flash up to SF_PAGE_SIZE at a time */
+       for (i = 0; i < size; i += SF_PAGE_SIZE) {
+               n = min_t(u32, size - i, SF_PAGE_SIZE);
+
+               ret = t4_write_flash(adap, addr, n, cfg_data);
+               if (ret)
+                       goto out;
+
+               addr += SF_PAGE_SIZE;
+               cfg_data += SF_PAGE_SIZE;
+       }
+
+       npad = ((size + 4 - 1) & ~3) - size;
+       for (i = 0; i < npad; i++) {
+               u8 data = 0;
+
+               ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data);
+               if (ret)
+                       goto out;
+       }
+
+out:
+       if (ret)
+               dev_err(adap->pdev_dev, "boot config data %s failed %d\n",
+                       (size == 0 ? "clear" : "download"), ret);
+       return ret;
+}
index 4a9fcd6..065c01c 100644 (file)
 #define AIVEC_V(x) ((x) << AIVEC_S)
 
 #define PCIE_PF_CLI_A  0x44
+
+#define PCIE_PF_EXPROM_OFST_A 0x4c
+#define OFFSET_S    10
+#define OFFSET_M    0x3fffU
+#define OFFSET_G(x) (((x) >> OFFSET_S) & OFFSET_M)
+
 #define PCIE_INT_CAUSE_A       0x3004
 
 #define UNXSPLCPLERR_S    29
 #define HASHTIDSIZE_M    0x3fU
 #define HASHTIDSIZE_G(x) (((x) >> HASHTIDSIZE_S) & HASHTIDSIZE_M)
 
+#define HASHTBLSIZE_S    3
+#define HASHTBLSIZE_M    0x1ffffU
+#define HASHTBLSIZE_G(x) (((x) >> HASHTBLSIZE_S) & HASHTBLSIZE_M)
+
 #define LE_DB_HASH_TID_BASE_A 0x19c30
 #define LE_DB_HASH_TBL_BASE_ADDR_A 0x19c30
 #define LE_DB_INT_CAUSE_A 0x19c3c
index 3782e48..f55105a 100644 (file)
@@ -562,7 +562,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *, struct sge_eth_txq *,
                           unsigned int);
 void t4vf_free_sge_resources(struct adapter *);
 
-int t4vf_eth_xmit(struct sk_buff *, struct net_device *);
+netdev_tx_t t4vf_eth_xmit(struct sk_buff *, struct net_device *);
 int t4vf_ethrx_handler(struct sge_rspq *, const __be64 *,
                       const struct pkt_gl *);
 
index cec865a..dbe8ee7 100644 (file)
@@ -260,8 +260,7 @@ static int cxgb4vf_set_addr_hash(struct port_info *pi)
  *     @tcam_idx: TCAM index of existing filter for old value of MAC address,
  *                or -1
  *     @addr: the new MAC address value
- *     @persist: whether a new MAC allocation should be persistent
- *     @add_smt: if true also add the address to the HW SMT
+ *     @persistent: whether a new MAC allocation should be persistent
  *
  *     Modifies an MPS filter and sets it to the new MAC address if
  *     @tcam_idx >= 0, or adds the MAC address to a new filter if
@@ -2917,6 +2916,39 @@ static const struct net_device_ops cxgb4vf_netdev_ops    = {
 #endif
 };
 
+/**
+ *     cxgb4vf_get_port_mask - Get port mask for the VF based on mac
+ *                             address stored on the adapter
+ *     @adapter: The adapter
+ *
+ *     Find the the port mask for the VF based on the index of mac
+ *     address stored in the adapter. If no mac address is stored on
+ *     the adapter for the VF, use the port mask received from the
+ *     firmware.
+ */
+static unsigned int cxgb4vf_get_port_mask(struct adapter *adapter)
+{
+       unsigned int naddr = 1, pidx = 0;
+       unsigned int pmask, rmask = 0;
+       u8 mac[ETH_ALEN];
+       int err;
+
+       pmask = adapter->params.vfres.pmask;
+       while (pmask) {
+               if (pmask & 1) {
+                       err = t4vf_get_vf_mac_acl(adapter, pidx, &naddr, mac);
+                       if (!err && !is_zero_ether_addr(mac))
+                               rmask |= (1 << pidx);
+               }
+               pmask >>= 1;
+               pidx++;
+       }
+       if (!rmask)
+               rmask = adapter->params.vfres.pmask;
+
+       return rmask;
+}
+
 /*
  * "Probe" a device: initialize a device and construct all kernel and driver
  * state needed to manage the device.  This routine is called "init_one" in
@@ -2925,13 +2957,12 @@ static const struct net_device_ops cxgb4vf_netdev_ops   = {
 static int cxgb4vf_pci_probe(struct pci_dev *pdev,
                             const struct pci_device_id *ent)
 {
-       int pci_using_dac;
-       int err, pidx;
-       unsigned int pmask;
        struct adapter *adapter;
-       struct port_info *pi;
        struct net_device *netdev;
-       unsigned int pf;
+       struct port_info *pi;
+       unsigned int pmask;
+       int pci_using_dac;
+       int err, pidx;
 
        /*
         * Initialize generic PCI device state.
@@ -3074,8 +3105,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
        /*
         * Allocate our "adapter ports" and stitch everything together.
         */
-       pmask = adapter->params.vfres.pmask;
-       pf = t4vf_get_pf_from_vf(adapter);
+       pmask = cxgb4vf_get_port_mask(adapter);
        for_each_port(adapter, pidx) {
                int port_id, viid;
                u8 mac[ETH_ALEN];
@@ -3158,7 +3188,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
                        goto err_free_dev;
                }
 
-               err = t4vf_get_vf_mac_acl(adapter, pf, &naddr, mac);
+               err = t4vf_get_vf_mac_acl(adapter, port_id, &naddr, mac);
                if (err) {
                        dev_err(&pdev->dev,
                                "unable to determine MAC ACL address, "
index f71c973..95657da 100644 (file)
@@ -1154,7 +1154,7 @@ static inline void txq_advance(struct sge_txq *tq, unsigned int n)
  *
  *     Add a packet to an SGE Ethernet TX queue.  Runs with softirqs disabled.
  */
-int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        u32 wr_mid;
        u64 cntrl, *end;
@@ -1692,7 +1692,7 @@ static inline bool is_new_response(const struct rsp_ctrl *rc,
  *     restore_rx_bufs - put back a packet's RX buffers
  *     @gl: the packet gather list
  *     @fl: the SGE Free List
- *     @nfrags: how many fragments in @si
+ *     @frags: how many fragments in @si
  *
  *     Called when we find out that the current packet, @si, can't be
  *     processed right away for some reason.  This is a very rare event and
@@ -2054,7 +2054,7 @@ irq_handler_t t4vf_intr_handler(struct adapter *adapter)
 
 /**
  *     sge_rx_timer_cb - perform periodic maintenance of SGE RX queues
- *     @data: the adapter
+ *     @t: Rx timer
  *
  *     Runs periodically from a timer to perform maintenance of SGE RX queues.
  *
@@ -2113,7 +2113,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
 
 /**
  *     sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues
- *     @data: the adapter
+ *     @t: Tx timer
  *
  *     Runs periodically from a timer to perform maintenance of SGE TX queues.
  *
@@ -2405,6 +2405,7 @@ err:
  *     t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue
  *     @adapter: the adapter
  *     @txq: pointer to the new txq to be filled in
+ *     @dev: the network device
  *     @devq: the network TX queue associated with the new txq
  *     @iqid: the relative ingress queue ID to which events relating to
  *             the new txq should be directed
index 57cfd10..0377714 100644 (file)
@@ -415,7 +415,7 @@ int t4vf_eth_eq_free(struct adapter *, unsigned int);
 int t4vf_update_port_info(struct port_info *pi);
 int t4vf_handle_fw_rpl(struct adapter *, const __be64 *);
 int t4vf_prep_adapter(struct adapter *);
-int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf,
+int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int port,
                        unsigned int *naddr, u8 *addr);
 int t4vf_get_vf_vlan_acl(struct adapter *adapter);
 
index 9d49ff2..cd8f9a4 100644 (file)
@@ -389,9 +389,7 @@ static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
        return cc_fec;
 }
 
-/**
- * Return the highest speed set in the port capabilities, in Mb/s.
- */
+/* Return the highest speed set in the port capabilities, in Mb/s. */
 static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
 {
        #define TEST_SPEED_RETURN(__caps_speed, __speed) \
@@ -1467,6 +1465,7 @@ int t4vf_identify_port(struct adapter *adapter, unsigned int viid,
  *     @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
  *     @vlanex: 1 to enable hardware VLAN Tag extraction, 0 to disable it,
  *             -1 no change
+ *     @sleep_ok: call is allowed to sleep
  *
  *     Sets Rx properties of a virtual interface.
  */
@@ -1906,7 +1905,7 @@ static const char *t4vf_link_down_rc_str(unsigned char link_down_rc)
 /**
  *     t4vf_handle_get_port_info - process a FW reply message
  *     @pi: the port info
- *     @rpl: start of the FW message
+ *     @cmd: start of the FW message
  *
  *     Processes a GET_PORT_INFO FW reply message.
  */
@@ -2137,8 +2136,6 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
        return 0;
 }
 
-/**
- */
 int t4vf_prep_adapter(struct adapter *adapter)
 {
        int err;
@@ -2190,14 +2187,14 @@ int t4vf_prep_adapter(struct adapter *adapter)
  *     t4vf_get_vf_mac_acl - Get the MAC address to be set to
  *                           the VI of this VF.
  *     @adapter: The adapter
- *     @pf: The pf associated with vf
+ *     @port: The port associated with vf
  *     @naddr: the number of ACL MAC addresses returned in addr
  *     @addr: Placeholder for MAC addresses
  *
  *     Find the MAC address to be set to the VF's VI. The requested MAC address
  *     is from the host OS via callback in the PF driver.
  */
-int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf,
+int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int port,
                        unsigned int *naddr, u8 *addr)
 {
        struct fw_acl_mac_cmd cmd;
@@ -2215,7 +2212,7 @@ int t4vf_get_vf_mac_acl(struct adapter *adapter, unsigned int pf,
        if (cmd.nmac < *naddr)
                *naddr = cmd.nmac;
 
-       switch (pf) {
+       switch (port) {
        case 3:
                memcpy(addr, cmd.macaddr3, sizeof(cmd.macaddr3));
                break;
index 7cf4526..d8af9e6 100644 (file)
@@ -24,7 +24,7 @@ config CS89x0
        help
          Support for CS89x0 chipset based Ethernet cards. If you have a
          network (Ethernet) card of this type, say Y and read the file
-         <file:Documentation/networking/device_drivers/cirrus/cs89x0.rst>.
+         <file:Documentation/networking/device_drivers/ethernet/cirrus/cs89x0.rst>.
 
          To compile this driver as a module, choose M here. The module
          will be called cs89x0.
index cd5fe4f..6bc7e7b 100644 (file)
@@ -176,50 +176,18 @@ static void enic_unset_affinity_hint(struct enic *enic)
                irq_set_affinity_hint(enic->msix_entry[i].vector, NULL);
 }
 
-static void enic_udp_tunnel_add(struct net_device *netdev,
-                               struct udp_tunnel_info *ti)
+static int enic_udp_tunnel_set_port(struct net_device *netdev,
+                                   unsigned int table, unsigned int entry,
+                                   struct udp_tunnel_info *ti)
 {
        struct enic *enic = netdev_priv(netdev);
-       __be16 port = ti->port;
        int err;
 
        spin_lock_bh(&enic->devcmd_lock);
 
-       if (ti->type != UDP_TUNNEL_TYPE_VXLAN) {
-               netdev_info(netdev, "udp_tnl: only vxlan tunnel offload supported");
-               goto error;
-       }
-
-       switch (ti->sa_family) {
-       case AF_INET6:
-               if (!(enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6)) {
-                       netdev_info(netdev, "vxlan: only IPv4 offload supported");
-                       goto error;
-               }
-               /* Fall through */
-       case AF_INET:
-               break;
-       default:
-               goto error;
-       }
-
-       if (enic->vxlan.vxlan_udp_port_number) {
-               if (ntohs(port) == enic->vxlan.vxlan_udp_port_number)
-                       netdev_warn(netdev, "vxlan: udp port already offloaded");
-               else
-                       netdev_info(netdev, "vxlan: offload supported for only one UDP port");
-
-               goto error;
-       }
-       if ((vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ) != 1) &&
-           !(enic->vxlan.flags & ENIC_VXLAN_MULTI_WQ)) {
-               netdev_info(netdev, "vxlan: vxlan offload with multi wq not supported on this adapter");
-               goto error;
-       }
-
        err = vnic_dev_overlay_offload_cfg(enic->vdev,
                                           OVERLAY_CFG_VXLAN_PORT_UPDATE,
-                                          ntohs(port));
+                                          ntohs(ti->port));
        if (err)
                goto error;
 
@@ -228,52 +196,50 @@ static void enic_udp_tunnel_add(struct net_device *netdev,
        if (err)
                goto error;
 
-       enic->vxlan.vxlan_udp_port_number = ntohs(port);
-
-       netdev_info(netdev, "vxlan fw-vers-%d: offload enabled for udp port: %d, sa_family: %d ",
-                   (int)enic->vxlan.patch_level, ntohs(port), ti->sa_family);
-
-       goto unlock;
-
+       enic->vxlan.vxlan_udp_port_number = ntohs(ti->port);
 error:
-       netdev_info(netdev, "failed to offload udp port: %d, sa_family: %d, type: %d",
-                   ntohs(port), ti->sa_family, ti->type);
-unlock:
        spin_unlock_bh(&enic->devcmd_lock);
+
+       return err;
 }
 
-static void enic_udp_tunnel_del(struct net_device *netdev,
-                               struct udp_tunnel_info *ti)
+static int enic_udp_tunnel_unset_port(struct net_device *netdev,
+                                     unsigned int table, unsigned int entry,
+                                     struct udp_tunnel_info *ti)
 {
        struct enic *enic = netdev_priv(netdev);
        int err;
 
        spin_lock_bh(&enic->devcmd_lock);
 
-       if ((ntohs(ti->port) != enic->vxlan.vxlan_udp_port_number) ||
-           ti->type != UDP_TUNNEL_TYPE_VXLAN) {
-               netdev_info(netdev, "udp_tnl: port:%d, sa_family: %d, type: %d not offloaded",
-                           ntohs(ti->port), ti->sa_family, ti->type);
-               goto unlock;
-       }
-
        err = vnic_dev_overlay_offload_ctrl(enic->vdev, OVERLAY_FEATURE_VXLAN,
                                            OVERLAY_OFFLOAD_DISABLE);
-       if (err) {
-               netdev_err(netdev, "vxlan: del offload udp port: %d failed",
-                          ntohs(ti->port));
+       if (err)
                goto unlock;
-       }
 
        enic->vxlan.vxlan_udp_port_number = 0;
 
-       netdev_info(netdev, "vxlan: del offload udp port %d, family %d\n",
-                   ntohs(ti->port), ti->sa_family);
-
 unlock:
        spin_unlock_bh(&enic->devcmd_lock);
+
+       return err;
 }
 
+static const struct udp_tunnel_nic_info enic_udp_tunnels = {
+       .set_port       = enic_udp_tunnel_set_port,
+       .unset_port     = enic_udp_tunnel_unset_port,
+       .tables         = {
+               { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+       },
+}, enic_udp_tunnels_v4 = {
+       .set_port       = enic_udp_tunnel_set_port,
+       .unset_port     = enic_udp_tunnel_unset_port,
+       .flags          = UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
+       .tables         = {
+               { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+       },
+};
+
 static netdev_features_t enic_features_check(struct sk_buff *skb,
                                             struct net_device *dev,
                                             netdev_features_t features)
@@ -2526,8 +2492,8 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = enic_rx_flow_steer,
 #endif
-       .ndo_udp_tunnel_add     = enic_udp_tunnel_add,
-       .ndo_udp_tunnel_del     = enic_udp_tunnel_del,
+       .ndo_udp_tunnel_add     = udp_tunnel_nic_add_port,
+       .ndo_udp_tunnel_del     = udp_tunnel_nic_del_port,
        .ndo_features_check     = enic_features_check,
 };
 
@@ -2552,8 +2518,8 @@ static const struct net_device_ops enic_netdev_ops = {
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = enic_rx_flow_steer,
 #endif
-       .ndo_udp_tunnel_add     = enic_udp_tunnel_add,
-       .ndo_udp_tunnel_del     = enic_udp_tunnel_del,
+       .ndo_udp_tunnel_add     = udp_tunnel_nic_add_port,
+       .ndo_udp_tunnel_del     = udp_tunnel_nic_del_port,
        .ndo_features_check     = enic_features_check,
 };
 
@@ -2963,6 +2929,13 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                patch_level = fls(patch_level);
                patch_level = patch_level ? patch_level - 1 : 0;
                enic->vxlan.patch_level = patch_level;
+
+               if (vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ) == 1 ||
+                   enic->vxlan.flags & ENIC_VXLAN_MULTI_WQ) {
+                       netdev->udp_tunnel_nic_info = &enic_udp_tunnels_v4;
+                       if (enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6)
+                               netdev->udp_tunnel_nic_info = &enic_udp_tunnels;
+               }
        }
 
        netdev->features |= netdev->hw_features;
index ac8cb57..aaf9e29 100644 (file)
@@ -7,7 +7,7 @@ config NET_VENDOR_CORTINA
        help
          If you have a network (Ethernet) card belonging to this class, say Y
          and read the Ethernet-HOWTO, available from
-         <http://www.tldp.org/docs.html#howto>.
+         <https://www.tldp.org/docs.html#howto>.
 
 if NET_VENDOR_CORTINA
 
index 8afc594..79dc336 100644 (file)
@@ -114,7 +114,7 @@ config DE4X5
          These include the DE425, DE434, DE435, DE450 and DE500 models.  If
          you have a network card of this type, say Y.  More specific
          information is contained in
-         <file:Documentation/networking/device_drivers/dec/de4x5.rst>.
+         <file:Documentation/networking/device_drivers/ethernet/dec/de4x5.rst>.
 
          To compile this driver as a module, choose M here. The module will
          be called de4x5.
@@ -138,7 +138,7 @@ config DM9102
          This driver is for DM9102(A)/DM9132/DM9801 compatible PCI cards from
          Davicom (<http://www.davicom.com.tw/>).  If you have such a network
          (Ethernet) card, say Y.  Some information is contained in the file
-         <file:Documentation/networking/device_drivers/dec/dmfe.rst>.
+         <file:Documentation/networking/device_drivers/ethernet/dec/dmfe.rst>.
 
          To compile this driver as a module, choose M here. The module will
          be called dmfe.
index 592454f..cb116b5 100644 (file)
@@ -2105,11 +2105,10 @@ static void de_remove_one(struct pci_dev *pdev)
        free_netdev(dev);
 }
 
-#ifdef CONFIG_PM
-
-static int de_suspend (struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused de_suspend(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata (pdev);
+       struct pci_dev *pdev = to_pci_dev(dev_d);
+       struct net_device *dev = pci_get_drvdata(pdev);
        struct de_private *de = netdev_priv(dev);
 
        rtnl_lock();
@@ -2136,7 +2135,6 @@ static int de_suspend (struct pci_dev *pdev, pm_message_t state)
                de_clean_rings(de);
 
                de_adapter_sleep(de);
-               pci_disable_device(pdev);
        } else {
                netif_device_detach(dev);
        }
@@ -2144,21 +2142,17 @@ static int de_suspend (struct pci_dev *pdev, pm_message_t state)
        return 0;
 }
 
-static int de_resume (struct pci_dev *pdev)
+static int __maybe_unused de_resume(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata (pdev);
+       struct pci_dev *pdev = to_pci_dev(dev_d);
+       struct net_device *dev = pci_get_drvdata(pdev);
        struct de_private *de = netdev_priv(dev);
-       int retval = 0;
 
        rtnl_lock();
        if (netif_device_present(dev))
                goto out;
        if (!netif_running(dev))
                goto out_attach;
-       if ((retval = pci_enable_device(pdev))) {
-               netdev_err(dev, "pci_enable_device failed in resume\n");
-               goto out;
-       }
        pci_set_master(pdev);
        de_init_rings(de);
        de_init_hw(de);
@@ -2169,17 +2163,14 @@ out:
        return 0;
 }
 
-#endif /* CONFIG_PM */
+static SIMPLE_DEV_PM_OPS(de_pm_ops, de_suspend, de_resume);
 
 static struct pci_driver de_driver = {
        .name           = DRV_NAME,
        .id_table       = de_pci_tbl,
        .probe          = de_init_one,
        .remove         = de_remove_one,
-#ifdef CONFIG_PM
-       .suspend        = de_suspend,
-       .resume         = de_resume,
-#endif
+       .driver.pm      = &de_pm_ops,
 };
 
 static int __init de_init (void)
index c1884fc..c3b4abf 100644 (file)
@@ -2081,14 +2081,11 @@ static const struct pci_device_id dmfe_pci_tbl[] = {
 };
 MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
 
-
-#ifdef CONFIG_PM
-static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
+static int __maybe_unused dmfe_suspend(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pci_dev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
        struct dmfe_board_info *db = netdev_priv(dev);
        void __iomem *ioaddr = db->ioaddr;
-       u32 tmp;
 
        /* Disable upper layer interface */
        netif_device_detach(dev);
@@ -2105,63 +2102,35 @@ static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
        dmfe_free_rxbuffer(db);
 
        /* Enable WOL */
-       pci_read_config_dword(pci_dev, 0x40, &tmp);
-       tmp &= ~(DMFE_WOL_LINKCHANGE|DMFE_WOL_MAGICPACKET);
-
-       if (db->wol_mode & WAKE_PHY)
-               tmp |= DMFE_WOL_LINKCHANGE;
-       if (db->wol_mode & WAKE_MAGIC)
-               tmp |= DMFE_WOL_MAGICPACKET;
-
-       pci_write_config_dword(pci_dev, 0x40, tmp);
-
-       pci_enable_wake(pci_dev, PCI_D3hot, 1);
-       pci_enable_wake(pci_dev, PCI_D3cold, 1);
-
-       /* Power down device*/
-       pci_save_state(pci_dev);
-       pci_set_power_state(pci_dev, pci_choose_state (pci_dev, state));
+       device_wakeup_enable(dev_d);
 
        return 0;
 }
 
-static int dmfe_resume(struct pci_dev *pci_dev)
+static int __maybe_unused dmfe_resume(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pci_dev);
-       u32 tmp;
-
-       pci_set_power_state(pci_dev, PCI_D0);
-       pci_restore_state(pci_dev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
 
        /* Re-initialize DM910X board */
        dmfe_init_dm910x(dev);
 
        /* Disable WOL */
-       pci_read_config_dword(pci_dev, 0x40, &tmp);
-
-       tmp &= ~(DMFE_WOL_LINKCHANGE | DMFE_WOL_MAGICPACKET);
-       pci_write_config_dword(pci_dev, 0x40, tmp);
-
-       pci_enable_wake(pci_dev, PCI_D3hot, 0);
-       pci_enable_wake(pci_dev, PCI_D3cold, 0);
+       device_wakeup_disable(dev_d);
 
        /* Restart upper layer interface */
        netif_device_attach(dev);
 
        return 0;
 }
-#else
-#define dmfe_suspend NULL
-#define dmfe_resume NULL
-#endif
+
+static SIMPLE_DEV_PM_OPS(dmfe_pm_ops, dmfe_suspend, dmfe_resume);
 
 static struct pci_driver dmfe_driver = {
        .name           = "dmfe",
        .id_table       = dmfe_pci_tbl,
        .probe          = dmfe_init_one,
        .remove         = dmfe_remove_one,
-       .suspend        = dmfe_suspend,
-       .resume         = dmfe_resume
+       .driver.pm      = &dmfe_pm_ops,
 };
 
 MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
index 15efc29..9db2352 100644 (file)
@@ -1803,13 +1803,9 @@ static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
        }
 }
 
-#ifdef CONFIG_PM
-
-
-static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused tulip_suspend(struct device *dev_d)
 {
-       pci_power_t pstate;
-       struct net_device *dev = pci_get_drvdata(pdev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
        struct tulip_private *tp = netdev_priv(dev);
 
        if (!dev)
@@ -1825,45 +1821,27 @@ static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
        free_irq(tp->pdev->irq, dev);
 
 save_state:
-       pci_save_state(pdev);
-       pci_disable_device(pdev);
-       pstate = pci_choose_state(pdev, state);
-       if (state.event == PM_EVENT_SUSPEND && pstate != PCI_D0) {
-               int rc;
-
-               tulip_set_wolopts(pdev, tp->wolinfo.wolopts);
-               rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts);
-               if (rc)
-                       pr_err("pci_enable_wake failed (%d)\n", rc);
-       }
-       pci_set_power_state(pdev, pstate);
+       tulip_set_wolopts(to_pci_dev(dev_d), tp->wolinfo.wolopts);
+       device_set_wakeup_enable(dev_d, !!tp->wolinfo.wolopts);
 
        return 0;
 }
 
-
-static int tulip_resume(struct pci_dev *pdev)
+static int __maybe_unused tulip_resume(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
+       struct pci_dev *pdev = to_pci_dev(dev_d);
+       struct net_device *dev = dev_get_drvdata(dev_d);
        struct tulip_private *tp = netdev_priv(dev);
        void __iomem *ioaddr = tp->base_addr;
-       int retval;
        unsigned int tmp;
+       int retval = 0;
 
        if (!dev)
                return -EINVAL;
 
-       pci_set_power_state(pdev, PCI_D0);
-       pci_restore_state(pdev);
-
        if (!netif_running(dev))
                return 0;
 
-       if ((retval = pci_enable_device(pdev))) {
-               pr_err("pci_enable_device failed in resume\n");
-               return retval;
-       }
-
        retval = request_irq(pdev->irq, tulip_interrupt, IRQF_SHARED,
                             dev->name, dev);
        if (retval) {
@@ -1872,8 +1850,7 @@ static int tulip_resume(struct pci_dev *pdev)
        }
 
        if (tp->flags & COMET_PM) {
-               pci_enable_wake(pdev, PCI_D3hot, 0);
-               pci_enable_wake(pdev, PCI_D3cold, 0);
+               device_set_wakeup_enable(dev_d, 0);
 
                /* Clear the PMES flag */
                tmp = ioread32(ioaddr + CSR20);
@@ -1891,9 +1868,6 @@ static int tulip_resume(struct pci_dev *pdev)
        return 0;
 }
 
-#endif /* CONFIG_PM */
-
-
 static void tulip_remove_one(struct pci_dev *pdev)
 {
        struct net_device *dev = pci_get_drvdata (pdev);
@@ -1937,15 +1911,14 @@ static void poll_tulip (struct net_device *dev)
 }
 #endif
 
+static SIMPLE_DEV_PM_OPS(tulip_pm_ops, tulip_suspend, tulip_resume);
+
 static struct pci_driver tulip_driver = {
        .name           = DRV_NAME,
        .id_table       = tulip_pci_tbl,
        .probe          = tulip_init_one,
        .remove         = tulip_remove_one,
-#ifdef CONFIG_PM
-       .suspend        = tulip_suspend,
-       .resume         = tulip_resume,
-#endif /* CONFIG_PM */
+       .driver.pm      = &tulip_pm_ops,
 };
 
 
index f726436..f942399 100644 (file)
@@ -1163,65 +1163,41 @@ static void uli526x_dynamic_reset(struct net_device *dev)
        netif_wake_queue(dev);
 }
 
-
-#ifdef CONFIG_PM
-
 /*
  *     Suspend the interface.
  */
 
-static int uli526x_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused uli526x_suspend(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
-       pci_power_t power_state;
-       int err;
+       struct net_device *dev = dev_get_drvdata(dev_d);
 
        ULI526X_DBUG(0, "uli526x_suspend", 0);
 
-       pci_save_state(pdev);
-
        if (!netif_running(dev))
                return 0;
 
        netif_device_detach(dev);
        uli526x_reset_prepare(dev);
 
-       power_state = pci_choose_state(pdev, state);
-       pci_enable_wake(pdev, power_state, 0);
-       err = pci_set_power_state(pdev, power_state);
-       if (err) {
-               netif_device_attach(dev);
-               /* Re-initialize ULI526X board */
-               uli526x_init(dev);
-               /* Restart upper layer interface */
-               netif_wake_queue(dev);
-       }
+       device_set_wakeup_enable(dev_d, 0);
 
-       return err;
+       return 0;
 }
 
 /*
  *     Resume the interface.
  */
 
-static int uli526x_resume(struct pci_dev *pdev)
+static int __maybe_unused uli526x_resume(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
-       int err;
+       struct net_device *dev = dev_get_drvdata(dev_d);
 
        ULI526X_DBUG(0, "uli526x_resume", 0);
 
-       pci_restore_state(pdev);
 
        if (!netif_running(dev))
                return 0;
 
-       err = pci_set_power_state(pdev, PCI_D0);
-       if (err) {
-               netdev_warn(dev, "Could not put device into D0\n");
-               return err;
-       }
-
        netif_device_attach(dev);
        /* Re-initialize ULI526X board */
        uli526x_init(dev);
@@ -1231,14 +1207,6 @@ static int uli526x_resume(struct pci_dev *pdev)
        return 0;
 }
 
-#else /* !CONFIG_PM */
-
-#define uli526x_suspend        NULL
-#define uli526x_resume NULL
-
-#endif /* !CONFIG_PM */
-
-
 /*
  *     free all allocated rx buffer
  */
@@ -1761,14 +1729,14 @@ static const struct pci_device_id uli526x_pci_tbl[] = {
 };
 MODULE_DEVICE_TABLE(pci, uli526x_pci_tbl);
 
+static SIMPLE_DEV_PM_OPS(uli526x_pm_ops, uli526x_suspend, uli526x_resume);
 
 static struct pci_driver uli526x_driver = {
        .name           = "uli526x",
        .id_table       = uli526x_pci_tbl,
        .probe          = uli526x_init_one,
        .remove         = uli526x_remove_one,
-       .suspend        = uli526x_suspend,
-       .resume         = uli526x_resume,
+       .driver.pm      = &uli526x_pm_ops,
 };
 
 MODULE_AUTHOR("Peer Chen, peer.chen@uli.com.tw");
index 4d5e4fa..5dcc66f 100644 (file)
@@ -1530,8 +1530,6 @@ static void w840_remove1(struct pci_dev *pdev)
        }
 }
 
-#ifdef CONFIG_PM
-
 /*
  * suspend/resume synchronization:
  * - open, close, do_ioctl:
@@ -1555,9 +1553,9 @@ static void w840_remove1(struct pci_dev *pdev)
  * Detach must occur under spin_unlock_irq(), interrupts from a detached
  * device would cause an irq storm.
  */
-static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused w840_suspend(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata (pdev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
        struct netdev_private *np = netdev_priv(dev);
        void __iomem *ioaddr = np->base_addr;
 
@@ -1590,21 +1588,15 @@ static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
        return 0;
 }
 
-static int w840_resume (struct pci_dev *pdev)
+static int __maybe_unused w840_resume(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata (pdev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
        struct netdev_private *np = netdev_priv(dev);
-       int retval = 0;
 
        rtnl_lock();
        if (netif_device_present(dev))
                goto out; /* device not suspended */
        if (netif_running(dev)) {
-               if ((retval = pci_enable_device(pdev))) {
-                       dev_err(&dev->dev,
-                               "pci_enable_device failed in resume\n");
-                       goto out;
-               }
                spin_lock_irq(&np->lock);
                iowrite32(1, np->base_addr+PCIBusCfg);
                ioread32(np->base_addr+PCIBusCfg);
@@ -1622,19 +1614,17 @@ static int w840_resume (struct pci_dev *pdev)
        }
 out:
        rtnl_unlock();
-       return retval;
+       return 0;
 }
-#endif
+
+static SIMPLE_DEV_PM_OPS(w840_pm_ops, w840_suspend, w840_resume);
 
 static struct pci_driver w840_driver = {
        .name           = DRV_NAME,
        .id_table       = w840_pci_tbl,
        .probe          = w840_probe1,
        .remove         = w840_remove1,
-#ifdef CONFIG_PM
-       .suspend        = w840_suspend,
-       .resume         = w840_resume,
-#endif
+       .driver.pm      = &w840_pm_ops,
 };
 
 static int __init w840_init(void)
index 5143722..be6d8a9 100644 (file)
@@ -1863,13 +1863,5 @@ static struct pci_driver rio_driver = {
 };
 
 module_pci_driver(rio_driver);
-/*
-
-Compile command:
-
-gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c
-
-Read Documentation/networking/device_drivers/dlink/dl2k.rst for details.
-
-*/
 
+/* Read Documentation/networking/device_drivers/ethernet/dlink/dl2k.rst. */
index dc566fc..b3f8597 100644 (file)
@@ -18,7 +18,7 @@
        http://www.scyld.com/network/sundance.html
        [link no longer provides useful info -jgarzik]
        Archives of the mailing list are still available at
-       http://www.beowulf.org/pipermail/netdrivers/
+       https://www.beowulf.org/pipermail/netdrivers/
 
 */
 
@@ -1928,11 +1928,9 @@ static void sundance_remove1(struct pci_dev *pdev)
        }
 }
 
-#ifdef CONFIG_PM
-
-static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
+static int __maybe_unused sundance_suspend(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pci_dev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
        struct netdev_private *np = netdev_priv(dev);
        void __iomem *ioaddr = np->base;
 
@@ -1942,30 +1940,24 @@ static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state)
        netdev_close(dev);
        netif_device_detach(dev);
 
-       pci_save_state(pci_dev);
        if (np->wol_enabled) {
                iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
                iowrite16(RxEnable, ioaddr + MACCtrl1);
        }
-       pci_enable_wake(pci_dev, pci_choose_state(pci_dev, state),
-                       np->wol_enabled);
-       pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
+
+       device_set_wakeup_enable(dev_d, np->wol_enabled);
 
        return 0;
 }
 
-static int sundance_resume(struct pci_dev *pci_dev)
+static int __maybe_unused sundance_resume(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pci_dev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
        int err = 0;
 
        if (!netif_running(dev))
                return 0;
 
-       pci_set_power_state(pci_dev, PCI_D0);
-       pci_restore_state(pci_dev);
-       pci_enable_wake(pci_dev, PCI_D0, 0);
-
        err = netdev_open(dev);
        if (err) {
                printk(KERN_ERR "%s: Can't resume interface!\n",
@@ -1979,17 +1971,14 @@ out:
        return err;
 }
 
-#endif /* CONFIG_PM */
+static SIMPLE_DEV_PM_OPS(sundance_pm_ops, sundance_suspend, sundance_resume);
 
 static struct pci_driver sundance_driver = {
        .name           = DRV_NAME,
        .id_table       = sundance_pci_tbl,
        .probe          = sundance_probe1,
        .remove         = sundance_remove1,
-#ifdef CONFIG_PM
-       .suspend        = sundance_suspend,
-       .resume         = sundance_resume,
-#endif /* CONFIG_PM */
+       .driver.pm      = &sundance_pm_ops,
 };
 
 static int __init sundance_init(void)
index 6e90220..8689d4a 100644 (file)
@@ -654,8 +654,6 @@ struct be_adapter {
        u8 hba_port_num;
        u16 pvid;
        __be16 vxlan_port;              /* offloaded vxlan port num */
-       int vxlan_port_count;           /* active vxlan port count */
-       struct list_head vxlan_port_list;       /* vxlan port list */
        struct phy_info phy;
        u8 wol_cap;
        bool wol_en;
@@ -679,9 +677,6 @@ struct be_adapter {
 struct be_cmd_work {
        struct work_struct work;
        struct be_adapter *adapter;
-       union {
-               __be16 vxlan_port;
-       } info;
 };
 
 #define be_physfn(adapter)             (!adapter->virtfn)
index a7ac23a..676e437 100644 (file)
@@ -3829,8 +3829,8 @@ static int be_open(struct net_device *netdev)
                be_link_status_update(adapter, link_status);
 
        netif_tx_start_all_queues(netdev);
-       if (skyhawk_chip(adapter))
-               udp_tunnel_get_rx_info(netdev);
+
+       udp_tunnel_nic_reset_ntf(netdev);
 
        return 0;
 err:
@@ -3967,18 +3967,23 @@ static void be_cancel_err_detection(struct be_adapter *adapter)
        }
 }
 
-static int be_enable_vxlan_offloads(struct be_adapter *adapter)
+/* VxLAN offload Notes:
+ *
+ * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
+ * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
+ * is expected to work across all types of IP tunnels once exported. Skyhawk
+ * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
+ * offloads in hw_enc_features only when a VxLAN port is added. If other (non
+ * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
+ * those other tunnels are unexported on the fly through ndo_features_check().
+ */
+static int be_vxlan_set_port(struct net_device *netdev, unsigned int table,
+                            unsigned int entry, struct udp_tunnel_info *ti)
 {
-       struct net_device *netdev = adapter->netdev;
+       struct be_adapter *adapter = netdev_priv(netdev);
        struct device *dev = &adapter->pdev->dev;
-       struct be_vxlan_port *vxlan_port;
-       __be16 port;
        int status;
 
-       vxlan_port = list_first_entry(&adapter->vxlan_port_list,
-                                     struct be_vxlan_port, list);
-       port = vxlan_port->port;
-
        status = be_cmd_manage_iface(adapter, adapter->if_handle,
                                     OP_CONVERT_NORMAL_TO_TUNNEL);
        if (status) {
@@ -3987,25 +3992,26 @@ static int be_enable_vxlan_offloads(struct be_adapter *adapter)
        }
        adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
 
-       status = be_cmd_set_vxlan_port(adapter, port);
+       status = be_cmd_set_vxlan_port(adapter, ti->port);
        if (status) {
                dev_warn(dev, "Failed to add VxLAN port\n");
                return status;
        }
-       adapter->vxlan_port = port;
+       adapter->vxlan_port = ti->port;
 
        netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                                   NETIF_F_TSO | NETIF_F_TSO6 |
                                   NETIF_F_GSO_UDP_TUNNEL;
 
        dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
-                be16_to_cpu(port));
+                be16_to_cpu(ti->port));
        return 0;
 }
 
-static void be_disable_vxlan_offloads(struct be_adapter *adapter)
+static int be_vxlan_unset_port(struct net_device *netdev, unsigned int table,
+                              unsigned int entry, struct udp_tunnel_info *ti)
 {
-       struct net_device *netdev = adapter->netdev;
+       struct be_adapter *adapter = netdev_priv(netdev);
 
        if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
                be_cmd_manage_iface(adapter, adapter->if_handle,
@@ -4018,8 +4024,19 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
        adapter->vxlan_port = 0;
 
        netdev->hw_enc_features = 0;
+       return 0;
 }
 
+static const struct udp_tunnel_nic_info be_udp_tunnels = {
+       .set_port       = be_vxlan_set_port,
+       .unset_port     = be_vxlan_unset_port,
+       .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
+                         UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+       .tables         = {
+               { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+       },
+};
+
 static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
                                struct be_resources *vft_res)
 {
@@ -4135,7 +4152,7 @@ static int be_clear(struct be_adapter *adapter)
                                        &vft_res);
        }
 
-       be_disable_vxlan_offloads(adapter);
+       be_vxlan_unset_port(adapter->netdev, 0, 0, NULL);
 
        be_if_destroy(adapter);
 
@@ -5053,147 +5070,6 @@ static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
        return work;
 }
 
-/* VxLAN offload Notes:
- *
- * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
- * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
- * is expected to work across all types of IP tunnels once exported. Skyhawk
- * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
- * offloads in hw_enc_features only when a VxLAN port is added. If other (non
- * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
- * those other tunnels are unexported on the fly through ndo_features_check().
- *
- * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
- * adds more than one port, disable offloads and re-enable them again when
- * there's only one port left. We maintain a list of ports for this purpose.
- */
-static void be_work_add_vxlan_port(struct work_struct *work)
-{
-       struct be_cmd_work *cmd_work =
-                               container_of(work, struct be_cmd_work, work);
-       struct be_adapter *adapter = cmd_work->adapter;
-       struct device *dev = &adapter->pdev->dev;
-       __be16 port = cmd_work->info.vxlan_port;
-       struct be_vxlan_port *vxlan_port;
-       int status;
-
-       /* Bump up the alias count if it is an existing port */
-       list_for_each_entry(vxlan_port, &adapter->vxlan_port_list, list) {
-               if (vxlan_port->port == port) {
-                       vxlan_port->port_aliases++;
-                       goto done;
-               }
-       }
-
-       /* Add a new port to our list. We don't need a lock here since port
-        * add/delete are done only in the context of a single-threaded work
-        * queue (be_wq).
-        */
-       vxlan_port = kzalloc(sizeof(*vxlan_port), GFP_KERNEL);
-       if (!vxlan_port)
-               goto done;
-
-       vxlan_port->port = port;
-       INIT_LIST_HEAD(&vxlan_port->list);
-       list_add_tail(&vxlan_port->list, &adapter->vxlan_port_list);
-       adapter->vxlan_port_count++;
-
-       if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
-               dev_info(dev,
-                        "Only one UDP port supported for VxLAN offloads\n");
-               dev_info(dev, "Disabling VxLAN offloads\n");
-               goto err;
-       }
-
-       if (adapter->vxlan_port_count > 1)
-               goto done;
-
-       status = be_enable_vxlan_offloads(adapter);
-       if (!status)
-               goto done;
-
-err:
-       be_disable_vxlan_offloads(adapter);
-done:
-       kfree(cmd_work);
-       return;
-}
-
-static void be_work_del_vxlan_port(struct work_struct *work)
-{
-       struct be_cmd_work *cmd_work =
-                               container_of(work, struct be_cmd_work, work);
-       struct be_adapter *adapter = cmd_work->adapter;
-       __be16 port = cmd_work->info.vxlan_port;
-       struct be_vxlan_port *vxlan_port;
-
-       /* Nothing to be done if a port alias is being deleted */
-       list_for_each_entry(vxlan_port, &adapter->vxlan_port_list, list) {
-               if (vxlan_port->port == port) {
-                       if (vxlan_port->port_aliases) {
-                               vxlan_port->port_aliases--;
-                               goto done;
-                       }
-                       break;
-               }
-       }
-
-       /* No port aliases left; delete the port from the list */
-       list_del(&vxlan_port->list);
-       adapter->vxlan_port_count--;
-
-       /* Disable VxLAN offload if this is the offloaded port */
-       if (adapter->vxlan_port == vxlan_port->port) {
-               WARN_ON(adapter->vxlan_port_count);
-               be_disable_vxlan_offloads(adapter);
-               dev_info(&adapter->pdev->dev,
-                        "Disabled VxLAN offloads for UDP port %d\n",
-                        be16_to_cpu(port));
-               goto out;
-       }
-
-       /* If only 1 port is left, re-enable VxLAN offload */
-       if (adapter->vxlan_port_count == 1)
-               be_enable_vxlan_offloads(adapter);
-
-out:
-       kfree(vxlan_port);
-done:
-       kfree(cmd_work);
-}
-
-static void be_cfg_vxlan_port(struct net_device *netdev,
-                             struct udp_tunnel_info *ti,
-                             void (*func)(struct work_struct *))
-{
-       struct be_adapter *adapter = netdev_priv(netdev);
-       struct be_cmd_work *cmd_work;
-
-       if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
-               return;
-
-       if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
-               return;
-
-       cmd_work = be_alloc_work(adapter, func);
-       if (cmd_work) {
-               cmd_work->info.vxlan_port = ti->port;
-               queue_work(be_wq, &cmd_work->work);
-       }
-}
-
-static void be_del_vxlan_port(struct net_device *netdev,
-                             struct udp_tunnel_info *ti)
-{
-       be_cfg_vxlan_port(netdev, ti, be_work_del_vxlan_port);
-}
-
-static void be_add_vxlan_port(struct net_device *netdev,
-                             struct udp_tunnel_info *ti)
-{
-       be_cfg_vxlan_port(netdev, ti, be_work_add_vxlan_port);
-}
-
 static netdev_features_t be_features_check(struct sk_buff *skb,
                                           struct net_device *dev,
                                           netdev_features_t features)
@@ -5309,8 +5185,8 @@ static const struct net_device_ops be_netdev_ops = {
 #endif
        .ndo_bridge_setlink     = be_ndo_bridge_setlink,
        .ndo_bridge_getlink     = be_ndo_bridge_getlink,
-       .ndo_udp_tunnel_add     = be_add_vxlan_port,
-       .ndo_udp_tunnel_del     = be_del_vxlan_port,
+       .ndo_udp_tunnel_add     = udp_tunnel_nic_add_port,
+       .ndo_udp_tunnel_del     = udp_tunnel_nic_del_port,
        .ndo_features_check     = be_features_check,
        .ndo_get_phys_port_id   = be_get_phys_port_id,
 };
@@ -5342,6 +5218,9 @@ static void be_netdev_init(struct net_device *netdev)
 
        netdev->ethtool_ops = &be_ethtool_ops;
 
+       if (!lancer_chip(adapter) && !BEx_chip(adapter) && !be_is_mc(adapter))
+               netdev->udp_tunnel_nic_info = &be_udp_tunnels;
+
        /* MTU range: 256 - 9000 */
        netdev->min_mtu = BE_MIN_MTU;
        netdev->max_mtu = BE_MAX_MTU;
@@ -5819,7 +5698,6 @@ static int be_drv_init(struct be_adapter *adapter)
        /* Must be a power of 2 or else MODULO will BUG_ON */
        adapter->be_get_temp_freq = 64;
 
-       INIT_LIST_HEAD(&adapter->vxlan_port_list);
        return 0;
 
 free_rx_filter:
@@ -6037,32 +5915,23 @@ do_none:
        return status;
 }
 
-static int be_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused be_suspend(struct device *dev_d)
 {
-       struct be_adapter *adapter = pci_get_drvdata(pdev);
+       struct be_adapter *adapter = dev_get_drvdata(dev_d);
 
        be_intr_set(adapter, false);
        be_cancel_err_detection(adapter);
 
        be_cleanup(adapter);
 
-       pci_save_state(pdev);
-       pci_disable_device(pdev);
-       pci_set_power_state(pdev, pci_choose_state(pdev, state));
        return 0;
 }
 
-static int be_pci_resume(struct pci_dev *pdev)
+static int __maybe_unused be_pci_resume(struct device *dev_d)
 {
-       struct be_adapter *adapter = pci_get_drvdata(pdev);
+       struct be_adapter *adapter = dev_get_drvdata(dev_d);
        int status = 0;
 
-       status = pci_enable_device(pdev);
-       if (status)
-               return status;
-
-       pci_restore_state(pdev);
-
        status = be_resume(adapter);
        if (status)
                return status;
@@ -6234,13 +6103,14 @@ static const struct pci_error_handlers be_eeh_handlers = {
        .resume = be_eeh_resume,
 };
 
+static SIMPLE_DEV_PM_OPS(be_pci_pm_ops, be_suspend, be_pci_resume);
+
 static struct pci_driver be_driver = {
        .name = DRV_NAME,
        .id_table = be_dev_ids,
        .probe = be_probe,
        .remove = be_remove,
-       .suspend = be_suspend,
-       .resume = be_pci_resume,
+       .driver.pm = &be_pci_pm_ops,
        .shutdown = be_shutdown,
        .sriov_configure = be_pci_sriov_configure,
        .err_handler = &be_eeh_handlers
index 73e896a..c696651 100644 (file)
@@ -543,7 +543,8 @@ static int fealnx_init_one(struct pci_dev *pdev,
        np->mii.phy_id_mask = 0x1f;
        np->mii.reg_num_mask = 0x1f;
 
-       ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
+       ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
+                                       GFP_KERNEL);
        if (!ring_space) {
                err = -ENOMEM;
                goto err_out_free_dev;
@@ -551,7 +552,8 @@ static int fealnx_init_one(struct pci_dev *pdev,
        np->rx_ring = ring_space;
        np->rx_ring_dma = ring_dma;
 
-       ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
+       ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
+                                       GFP_KERNEL);
        if (!ring_space) {
                err = -ENOMEM;
                goto err_out_free_rx;
@@ -656,9 +658,11 @@ static int fealnx_init_one(struct pci_dev *pdev,
        return 0;
 
 err_out_free_tx:
-       pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
+       dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
+                         np->tx_ring_dma);
 err_out_free_rx:
-       pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
+       dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
+                         np->rx_ring_dma);
 err_out_free_dev:
        free_netdev(dev);
 err_out_unmap:
@@ -676,10 +680,10 @@ static void fealnx_remove_one(struct pci_dev *pdev)
        if (dev) {
                struct netdev_private *np = netdev_priv(dev);
 
-               pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
-                       np->tx_ring_dma);
-               pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
-                       np->rx_ring_dma);
+               dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
+                                 np->tx_ring_dma);
+               dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
+                                 np->rx_ring_dma);
                unregister_netdev(dev);
                pci_iounmap(pdev, np->mem);
                free_netdev(dev);
@@ -1056,8 +1060,10 @@ static void allocate_rx_buffers(struct net_device *dev)
                        np->lack_rxbuf = np->lack_rxbuf->next_desc_logical;
 
                np->lack_rxbuf->skbuff = skb;
-               np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data,
-                       np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+               np->lack_rxbuf->buffer = dma_map_single(&np->pci_dev->dev,
+                                                       skb->data,
+                                                       np->rx_buf_sz,
+                                                       DMA_FROM_DEVICE);
                np->lack_rxbuf->status = RXOWN;
                ++np->really_rx_count;
        }
@@ -1251,8 +1257,10 @@ static void init_ring(struct net_device *dev)
 
                ++np->really_rx_count;
                np->rx_ring[i].skbuff = skb;
-               np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data,
-                       np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+               np->rx_ring[i].buffer = dma_map_single(&np->pci_dev->dev,
+                                                      skb->data,
+                                                      np->rx_buf_sz,
+                                                      DMA_FROM_DEVICE);
                np->rx_ring[i].status = RXOWN;
                np->rx_ring[i].control |= RXIC;
        }
@@ -1290,8 +1298,8 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
 #define one_buffer
 #define BPT 1022
 #if defined(one_buffer)
-       np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
-               skb->len, PCI_DMA_TODEVICE);
+       np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev, skb->data,
+                                                skb->len, DMA_TO_DEVICE);
        np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
        np->cur_tx_copy->control |= (skb->len << PKTSShift);    /* pkt size */
        np->cur_tx_copy->control |= (skb->len << TBSShift);     /* buffer size */
@@ -1306,8 +1314,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
                struct fealnx_desc *next;
 
                /* for the first descriptor */
-               np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
-                       BPT, PCI_DMA_TODEVICE);
+               np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev,
+                                                        skb->data, BPT,
+                                                        DMA_TO_DEVICE);
                np->cur_tx_copy->control = TXIC | TXFD | CRCEnable | PADEnable;
                np->cur_tx_copy->control |= (skb->len << PKTSShift);    /* pkt size */
                np->cur_tx_copy->control |= (BPT << TBSShift);  /* buffer size */
@@ -1321,8 +1330,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
 // 89/12/29 add,
                if (np->pci_dev->device == 0x891)
                        np->cur_tx_copy->control |= ETIControl | RetryTxLC;
-               next->buffer = pci_map_single(ep->pci_dev, skb->data + BPT,
-                                skb->len - BPT, PCI_DMA_TODEVICE);
+               next->buffer = dma_map_single(&ep->pci_dev->dev,
+                                             skb->data + BPT, skb->len - BPT,
+                                             DMA_TO_DEVICE);
 
                next->status = TXOWN;
                np->cur_tx_copy->status = TXOWN;
@@ -1330,8 +1340,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
                np->cur_tx_copy = next->next_desc_logical;
                np->free_tx_count -= 2;
        } else {
-               np->cur_tx_copy->buffer = pci_map_single(np->pci_dev, skb->data,
-                       skb->len, PCI_DMA_TODEVICE);
+               np->cur_tx_copy->buffer = dma_map_single(&np->pci_dev->dev,
+                                                        skb->data, skb->len,
+                                                        DMA_TO_DEVICE);
                np->cur_tx_copy->control = TXIC | TXLD | TXFD | CRCEnable | PADEnable;
                np->cur_tx_copy->control |= (skb->len << PKTSShift);    /* pkt size */
                np->cur_tx_copy->control |= (skb->len << TBSShift);     /* buffer size */
@@ -1371,8 +1382,8 @@ static void reset_tx_descriptors(struct net_device *dev)
        for (i = 0; i < TX_RING_SIZE; i++) {
                cur = &np->tx_ring[i];
                if (cur->skbuff) {
-                       pci_unmap_single(np->pci_dev, cur->buffer,
-                               cur->skbuff->len, PCI_DMA_TODEVICE);
+                       dma_unmap_single(&np->pci_dev->dev, cur->buffer,
+                                        cur->skbuff->len, DMA_TO_DEVICE);
                        dev_kfree_skb_any(cur->skbuff);
                        cur->skbuff = NULL;
                }
@@ -1515,8 +1526,10 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
                        }
 
                        /* Free the original skb. */
-                       pci_unmap_single(np->pci_dev, np->cur_tx->buffer,
-                               np->cur_tx->skbuff->len, PCI_DMA_TODEVICE);
+                       dma_unmap_single(&np->pci_dev->dev,
+                                        np->cur_tx->buffer,
+                                        np->cur_tx->skbuff->len,
+                                        DMA_TO_DEVICE);
                        dev_consume_skb_irq(np->cur_tx->skbuff);
                        np->cur_tx->skbuff = NULL;
                        --np->really_tx_count;
@@ -1682,10 +1695,10 @@ static int netdev_rx(struct net_device *dev)
                        if (pkt_len < rx_copybreak &&
                            (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
-                               pci_dma_sync_single_for_cpu(np->pci_dev,
-                                                           np->cur_rx->buffer,
-                                                           np->rx_buf_sz,
-                                                           PCI_DMA_FROMDEVICE);
+                               dma_sync_single_for_cpu(&np->pci_dev->dev,
+                                                       np->cur_rx->buffer,
+                                                       np->rx_buf_sz,
+                                                       DMA_FROM_DEVICE);
                                /* Call copy + cksum if available. */
 
 #if ! defined(__alpha__)
@@ -1696,15 +1709,15 @@ static int netdev_rx(struct net_device *dev)
                                skb_put_data(skb, np->cur_rx->skbuff->data,
                                             pkt_len);
 #endif
-                               pci_dma_sync_single_for_device(np->pci_dev,
-                                                              np->cur_rx->buffer,
-                                                              np->rx_buf_sz,
-                                                              PCI_DMA_FROMDEVICE);
+                               dma_sync_single_for_device(&np->pci_dev->dev,
+                                                          np->cur_rx->buffer,
+                                                          np->rx_buf_sz,
+                                                          DMA_FROM_DEVICE);
                        } else {
-                               pci_unmap_single(np->pci_dev,
+                               dma_unmap_single(&np->pci_dev->dev,
                                                 np->cur_rx->buffer,
                                                 np->rx_buf_sz,
-                                                PCI_DMA_FROMDEVICE);
+                                                DMA_FROM_DEVICE);
                                skb_put(skb = np->cur_rx->skbuff, pkt_len);
                                np->cur_rx->skbuff = NULL;
                                --np->really_rx_count;
@@ -1896,8 +1909,9 @@ static int netdev_close(struct net_device *dev)
 
                np->rx_ring[i].status = 0;
                if (skb) {
-                       pci_unmap_single(np->pci_dev, np->rx_ring[i].buffer,
-                               np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+                       dma_unmap_single(&np->pci_dev->dev,
+                                        np->rx_ring[i].buffer, np->rx_buf_sz,
+                                        DMA_FROM_DEVICE);
                        dev_kfree_skb(skb);
                        np->rx_ring[i].skbuff = NULL;
                }
@@ -1907,8 +1921,9 @@ static int netdev_close(struct net_device *dev)
                struct sk_buff *skb = np->tx_ring[i].skbuff;
 
                if (skb) {
-                       pci_unmap_single(np->pci_dev, np->tx_ring[i].buffer,
-                               skb->len, PCI_DMA_TODEVICE);
+                       dma_unmap_single(&np->pci_dev->dev,
+                                        np->tx_ring[i].buffer, skb->len,
+                                        DMA_TO_DEVICE);
                        dev_kfree_skb(skb);
                        np->tx_ring[i].skbuff = NULL;
                }
index c453a23..56d9927 100644 (file)
@@ -21,7 +21,7 @@ static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
        seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name);
        seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s%16s\n",
                   "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf",
-                  "Tx SG", "Tx realloc", "Enq busy");
+                  "Tx SG", "Tx converted to SG", "Enq busy");
 
        for_each_online_cpu(i) {
                stats = per_cpu_ptr(priv->percpu_stats, i);
@@ -35,7 +35,7 @@ static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
                           stats->tx_errors,
                           extras->tx_conf_frames,
                           extras->tx_sg_frames,
-                          extras->tx_reallocs,
+                          extras->tx_converted_sg_frames,
                           extras->tx_portal_busy);
        }
 
@@ -90,6 +90,10 @@ static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
                if (err)
                        fcnt = 0;
 
+               /* Skip FQs with no traffic */
+               if (!fq->stats.frames && !fcnt)
+                       continue;
+
                seq_printf(file, "%5d%16d%16d%16s%16llu%16u\n",
                           fq->fqid,
                           fq->target_cpu,
index 9801528..5fb5f14 100644 (file)
@@ -10,7 +10,6 @@
 
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
-#include "dpaa2-eth.h"
 #include <linux/tracepoint.h>
 
 #define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u"
index f150cd4..c1bea91 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/fsl/mc.h>
 #include <linux/bpf.h>
 #include <linux/bpf_trace.h>
+#include <net/pkt_cls.h>
 #include <net/sock.h>
 
 #include "dpaa2-eth.h"
@@ -611,6 +612,10 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
 
        sg_init_table(scl, nr_frags + 1);
        num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
+       if (unlikely(num_sg < 0)) {
+               err = -ENOMEM;
+               goto dma_map_sg_failed;
+       }
        num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
        if (unlikely(!num_dma_bufs)) {
                err = -ENOMEM;
@@ -681,6 +686,86 @@ dma_map_sg_failed:
        return err;
 }
 
+/* Create a SG frame descriptor based on a linear skb.
+ *
+ * This function is used on the Tx path when the skb headroom is not large
+ * enough for the HW requirements, thus instead of realloc-ing the skb we
+ * create a SG frame descriptor with only one entry.
+ */
+static int build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
+                                 struct sk_buff *skb,
+                                 struct dpaa2_fd *fd)
+{
+       struct device *dev = priv->net_dev->dev.parent;
+       struct dpaa2_eth_sgt_cache *sgt_cache;
+       struct dpaa2_sg_entry *sgt;
+       struct dpaa2_eth_swa *swa;
+       dma_addr_t addr, sgt_addr;
+       void *sgt_buf = NULL;
+       int sgt_buf_size;
+       int err;
+
+       /* Prepare the HW SGT structure */
+       sgt_cache = this_cpu_ptr(priv->sgt_cache);
+       sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
+
+       if (sgt_cache->count == 0)
+               sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN,
+                                 GFP_ATOMIC);
+       else
+               sgt_buf = sgt_cache->buf[--sgt_cache->count];
+       if (unlikely(!sgt_buf))
+               return -ENOMEM;
+
+       sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
+       sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
+
+       addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL);
+       if (unlikely(dma_mapping_error(dev, addr))) {
+               err = -ENOMEM;
+               goto data_map_failed;
+       }
+
+       /* Fill in the HW SGT structure */
+       dpaa2_sg_set_addr(sgt, addr);
+       dpaa2_sg_set_len(sgt, skb->len);
+       dpaa2_sg_set_final(sgt, true);
+
+       /* Store the skb backpointer in the SGT buffer */
+       swa = (struct dpaa2_eth_swa *)sgt_buf;
+       swa->type = DPAA2_ETH_SWA_SINGLE;
+       swa->single.skb = skb;
+       swa->sg.sgt_size = sgt_buf_size;
+
+       /* Separately map the SGT buffer */
+       sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
+       if (unlikely(dma_mapping_error(dev, sgt_addr))) {
+               err = -ENOMEM;
+               goto sgt_map_failed;
+       }
+
+       dpaa2_fd_set_offset(fd, priv->tx_data_offset);
+       dpaa2_fd_set_format(fd, dpaa2_fd_sg);
+       dpaa2_fd_set_addr(fd, sgt_addr);
+       dpaa2_fd_set_len(fd, skb->len);
+       dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
+
+       if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+               enable_tx_tstamp(fd, sgt_buf);
+
+       return 0;
+
+sgt_map_failed:
+       dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL);
+data_map_failed:
+       if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
+               kfree(sgt_buf);
+       else
+               sgt_cache->buf[sgt_cache->count++] = sgt_buf;
+
+       return err;
+}
+
 /* Create a frame descriptor based on a linear skb */
 static int build_single_fd(struct dpaa2_eth_priv *priv,
                           struct sk_buff *skb,
@@ -739,13 +824,16 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
                       const struct dpaa2_fd *fd, bool in_napi)
 {
        struct device *dev = priv->net_dev->dev.parent;
-       dma_addr_t fd_addr;
+       dma_addr_t fd_addr, sg_addr;
        struct sk_buff *skb = NULL;
        unsigned char *buffer_start;
        struct dpaa2_eth_swa *swa;
        u8 fd_format = dpaa2_fd_get_format(fd);
        u32 fd_len = dpaa2_fd_get_len(fd);
 
+       struct dpaa2_eth_sgt_cache *sgt_cache;
+       struct dpaa2_sg_entry *sgt;
+
        fd_addr = dpaa2_fd_get_addr(fd);
        buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
        swa = (struct dpaa2_eth_swa *)buffer_start;
@@ -765,16 +853,29 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
                                         DMA_BIDIRECTIONAL);
                }
        } else if (fd_format == dpaa2_fd_sg) {
-               skb = swa->sg.skb;
+               if (swa->type == DPAA2_ETH_SWA_SG) {
+                       skb = swa->sg.skb;
 
-               /* Unmap the scatterlist */
-               dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
-                            DMA_BIDIRECTIONAL);
-               kfree(swa->sg.scl);
+                       /* Unmap the scatterlist */
+                       dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
+                                    DMA_BIDIRECTIONAL);
+                       kfree(swa->sg.scl);
 
-               /* Unmap the SGT buffer */
-               dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
-                                DMA_BIDIRECTIONAL);
+                       /* Unmap the SGT buffer */
+                       dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
+                                        DMA_BIDIRECTIONAL);
+               } else {
+                       skb = swa->single.skb;
+
+                       /* Unmap the SGT Buffer */
+                       dma_unmap_single(dev, fd_addr, swa->single.sgt_size,
+                                        DMA_BIDIRECTIONAL);
+
+                       sgt = (struct dpaa2_sg_entry *)(buffer_start +
+                                                       priv->tx_data_offset);
+                       sg_addr = dpaa2_sg_get_addr(sgt);
+                       dma_unmap_single(dev, sg_addr, skb->len, DMA_BIDIRECTIONAL);
+               }
        } else {
                netdev_dbg(priv->net_dev, "Invalid FD format\n");
                return;
@@ -804,8 +905,17 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
        }
 
        /* Free SGT buffer allocated on tx */
-       if (fd_format != dpaa2_fd_single)
-               skb_free_frag(buffer_start);
+       if (fd_format != dpaa2_fd_single) {
+               sgt_cache = this_cpu_ptr(priv->sgt_cache);
+               if (swa->type == DPAA2_ETH_SWA_SG) {
+                       skb_free_frag(buffer_start);
+               } else {
+                       if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
+                               kfree(buffer_start);
+                       else
+                               sgt_cache->buf[sgt_cache->count++] = buffer_start;
+               }
+       }
 
        /* Move on with skb release */
        napi_consume_skb(skb, in_napi);
@@ -829,22 +939,6 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
        percpu_extras = this_cpu_ptr(priv->percpu_extras);
 
        needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
-       if (skb_headroom(skb) < needed_headroom) {
-               struct sk_buff *ns;
-
-               ns = skb_realloc_headroom(skb, needed_headroom);
-               if (unlikely(!ns)) {
-                       percpu_stats->tx_dropped++;
-                       goto err_alloc_headroom;
-               }
-               percpu_extras->tx_reallocs++;
-
-               if (skb->sk)
-                       skb_set_owner_w(ns, skb->sk);
-
-               dev_kfree_skb(skb);
-               skb = ns;
-       }
 
        /* We'll be holding a back-reference to the skb until Tx Confirmation;
         * we don't want that overwritten by a concurrent Tx with a cloned skb.
@@ -863,6 +957,12 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
                err = build_sg_fd(priv, skb, &fd);
                percpu_extras->tx_sg_frames++;
                percpu_extras->tx_sg_bytes += skb->len;
+       } else if (skb_headroom(skb) < needed_headroom) {
+               err = build_sg_fd_single_buf(priv, skb, &fd);
+               percpu_extras->tx_sg_frames++;
+               percpu_extras->tx_sg_bytes += skb->len;
+               percpu_extras->tx_converted_sg_frames++;
+               percpu_extras->tx_converted_sg_bytes += skb->len;
        } else {
                err = build_single_fd(priv, skb, &fd);
        }
@@ -920,7 +1020,6 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
        return NETDEV_TX_OK;
 
 err_build_fd:
-err_alloc_headroom:
        dev_kfree_skb(skb);
 
        return NETDEV_TX_OK;
@@ -1109,7 +1208,7 @@ static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
                                               buf_array, count);
                if (ret < 0) {
                        if (ret == -EBUSY &&
-                           retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
+                           retries++ < DPAA2_ETH_SWP_BUSY_RETRIES)
                                continue;
                        netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
                        return;
@@ -1157,6 +1256,22 @@ static int refill_pool(struct dpaa2_eth_priv *priv,
        return 0;
 }
 
+static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv)
+{
+       struct dpaa2_eth_sgt_cache *sgt_cache;
+       u16 count;
+       int k, i;
+
+       for_each_possible_cpu(k) {
+               sgt_cache = per_cpu_ptr(priv->sgt_cache, k);
+               count = sgt_cache->count;
+
+               for (i = 0; i < count; i++)
+                       kfree(sgt_cache->buf[i]);
+               sgt_cache->count = 0;
+       }
+}
+
 static int pull_channel(struct dpaa2_eth_channel *ch)
 {
        int err;
@@ -1558,6 +1673,9 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
        /* Empty the buffer pool */
        drain_pool(priv);
 
+       /* Empty the Scatter-Gather Buffer cache */
+       dpaa2_eth_sgt_cache_drain(priv);
+
        return 0;
 }
 
@@ -2096,17 +2214,13 @@ static int update_xps(struct dpaa2_eth_priv *priv)
        return err;
 }
 
-static int dpaa2_eth_setup_tc(struct net_device *net_dev,
-                             enum tc_setup_type type, void *type_data)
+static int dpaa2_eth_setup_mqprio(struct net_device *net_dev,
+                                 struct tc_mqprio_qopt *mqprio)
 {
        struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
-       struct tc_mqprio_qopt *mqprio = type_data;
        u8 num_tc, num_queues;
        int i;
 
-       if (type != TC_SETUP_QDISC_MQPRIO)
-               return -EOPNOTSUPP;
-
        mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
        num_queues = dpaa2_eth_queue_count(priv);
        num_tc = mqprio->num_tc;
@@ -2138,6 +2252,60 @@ out:
        return 0;
 }
 
+#define bps_to_mbits(rate) (div_u64((rate), 1000000) * 8)
+
+static int dpaa2_eth_setup_tbf(struct net_device *net_dev, struct tc_tbf_qopt_offload *p)
+{
+       struct tc_tbf_qopt_offload_replace_params *cfg = &p->replace_params;
+       struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+       struct dpni_tx_shaping_cfg tx_cr_shaper = { 0 };
+       struct dpni_tx_shaping_cfg tx_er_shaper = { 0 };
+       int err;
+
+       if (p->command == TC_TBF_STATS)
+               return -EOPNOTSUPP;
+
+       /* Only per port Tx shaping */
+       if (p->parent != TC_H_ROOT)
+               return -EOPNOTSUPP;
+
+       if (p->command == TC_TBF_REPLACE) {
+               if (cfg->max_size > DPAA2_ETH_MAX_BURST_SIZE) {
+                       netdev_err(net_dev, "burst size cannot be greater than %d\n",
+                                  DPAA2_ETH_MAX_BURST_SIZE);
+                       return -EINVAL;
+               }
+
+               tx_cr_shaper.max_burst_size = cfg->max_size;
+               /* The TBF interface is in bytes/s, whereas DPAA2 expects the
+                * rate in Mbits/s
+                */
+               tx_cr_shaper.rate_limit = bps_to_mbits(cfg->rate.rate_bytes_ps);
+       }
+
+       err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &tx_cr_shaper,
+                                 &tx_er_shaper, 0);
+       if (err) {
+               netdev_err(net_dev, "dpni_set_tx_shaping() = %d\n", err);
+               return err;
+       }
+
+       return 0;
+}
+
+static int dpaa2_eth_setup_tc(struct net_device *net_dev,
+                             enum tc_setup_type type, void *type_data)
+{
+       switch (type) {
+       case TC_SETUP_QDISC_MQPRIO:
+               return dpaa2_eth_setup_mqprio(net_dev, type_data);
+       case TC_SETUP_QDISC_TBF:
+               return dpaa2_eth_setup_tbf(net_dev, type_data);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
 static const struct net_device_ops dpaa2_eth_ops = {
        .ndo_open = dpaa2_eth_open,
        .ndo_start_xmit = dpaa2_eth_tx,
@@ -3602,7 +3770,7 @@ static int netdev_init(struct net_device *net_dev)
        net_dev->features = NETIF_F_RXCSUM |
                            NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                            NETIF_F_SG | NETIF_F_HIGHDMA |
-                           NETIF_F_LLTX;
+                           NETIF_F_LLTX | NETIF_F_HW_TC;
        net_dev->hw_features = net_dev->features;
 
        return 0;
@@ -3842,6 +4010,13 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
                goto err_alloc_percpu_extras;
        }
 
+       priv->sgt_cache = alloc_percpu(*priv->sgt_cache);
+       if (!priv->sgt_cache) {
+               dev_err(dev, "alloc_percpu(sgt_cache) failed\n");
+               err = -ENOMEM;
+               goto err_alloc_sgt_cache;
+       }
+
        err = netdev_init(net_dev);
        if (err)
                goto err_netdev_init;
@@ -3910,6 +4085,8 @@ err_poll_thread:
 err_alloc_rings:
 err_csum:
 err_netdev_init:
+       free_percpu(priv->sgt_cache);
+err_alloc_sgt_cache:
        free_percpu(priv->percpu_extras);
 err_alloc_percpu_extras:
        free_percpu(priv->percpu_stats);
@@ -3955,6 +4132,7 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
                fsl_mc_free_irqs(ls_dev);
 
        free_rings(priv);
+       free_percpu(priv->sgt_cache);
        free_percpu(priv->percpu_stats);
        free_percpu(priv->percpu_extras);
 
index 2d7ada0..7f3c41d 100644 (file)
@@ -43,6 +43,9 @@
  */
 #define DPAA2_ETH_FQ_TAILDROP_THRESH   (1024 * 1024)
 
+/* Maximum burst size value for Tx shaping */
+#define DPAA2_ETH_MAX_BURST_SIZE       0xF7FF
+
 /* Maximum number of Tx confirmation frames to be processed
  * in a single NAPI call
  */
@@ -125,6 +128,7 @@ struct dpaa2_eth_swa {
        union {
                struct {
                        struct sk_buff *skb;
+                       int sgt_size;
                } single;
                struct {
                        struct sk_buff *skb;
@@ -282,9 +286,11 @@ struct dpaa2_eth_drv_stats {
        __u64   tx_conf_bytes;
        __u64   tx_sg_frames;
        __u64   tx_sg_bytes;
-       __u64   tx_reallocs;
        __u64   rx_sg_frames;
        __u64   rx_sg_bytes;
+       /* Linear skbs sent as a S/G FD due to insufficient headroom */
+       __u64   tx_converted_sg_frames;
+       __u64   tx_converted_sg_bytes;
        /* Enqueues retried due to portal busy */
        __u64   tx_portal_busy;
 };
@@ -395,6 +401,12 @@ struct dpaa2_eth_cls_rule {
        u8 in_use;
 };
 
+#define DPAA2_ETH_SGT_CACHE_SIZE       256
+struct dpaa2_eth_sgt_cache {
+       void *buf[DPAA2_ETH_SGT_CACHE_SIZE];
+       u16 count;
+};
+
 /* Driver private data */
 struct dpaa2_eth_priv {
        struct net_device *net_dev;
@@ -409,6 +421,7 @@ struct dpaa2_eth_priv {
 
        u8 num_channels;
        struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
+       struct dpaa2_eth_sgt_cache __percpu *sgt_cache;
 
        struct dpni_attr dpni_attrs;
        u16 dpni_ver_major;
index e88269f..8356f1f 100644 (file)
@@ -43,9 +43,10 @@ static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
        "[drv] tx conf bytes",
        "[drv] tx sg frames",
        "[drv] tx sg bytes",
-       "[drv] tx realloc frames",
        "[drv] rx sg frames",
        "[drv] rx sg bytes",
+       "[drv] tx converted sg frames",
+       "[drv] tx converted sg bytes",
        "[drv] enqueue portal busy",
        /* Channel stats */
        "[drv] dequeue portal busy",
index fd069f6..593e381 100644 (file)
@@ -626,4 +626,17 @@ struct dpni_cmd_set_congestion_notification {
        __le32 threshold_exit;
 };
 
+#define DPNI_COUPLED_SHIFT     0
+#define DPNI_COUPLED_SIZE      1
+
+struct dpni_cmd_set_tx_shaping {
+       __le16 tx_cr_max_burst_size;
+       __le16 tx_er_max_burst_size;
+       __le32 pad;
+       __le32 tx_cr_rate_limit;
+       __le32 tx_er_rate_limit;
+       /* from LSB: coupled:1 */
+       u8 coupled;
+};
+
 #endif /* _FSL_DPNI_CMD_H */
index 6b479ba..68ed4c4 100644 (file)
@@ -1558,10 +1558,10 @@ int dpni_get_statistics(struct fsl_mc_io *mc_io,
  * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  * @token:     Token of DPNI object
  * @cg_point:  Congestion point
- * @q_type:    Queue type on which the taildrop is configured.
+ * @qtype:     Queue type on which the taildrop is configured.
  *             Only Rx queues are supported for now
  * @tc:                Traffic class to apply this taildrop to
- * @q_index:   Index of the queue if the DPNI supports multiple queues for
+ * @index:     Index of the queue if the DPNI supports multiple queues for
  *             traffic distribution. Ignored if CONGESTION_POINT is not 0.
  * @taildrop:  Taildrop structure
  *
@@ -1602,10 +1602,10 @@ int dpni_set_taildrop(struct fsl_mc_io *mc_io,
  * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
  * @token:     Token of DPNI object
  * @cg_point:  Congestion point
- * @q_type:    Queue type on which the taildrop is configured.
+ * @qtype:     Queue type on which the taildrop is configured.
  *             Only Rx queues are supported for now
  * @tc:                Traffic class to apply this taildrop to
- * @q_index:   Index of the queue if the DPNI supports multiple queues for
+ * @index:     Index of the queue if the DPNI supports multiple queues for
  *             traffic distribution. Ignored if CONGESTION_POINT is not 0.
  * @taildrop:  Taildrop structure
  *
@@ -1963,3 +1963,39 @@ int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
        /* send command to mc*/
        return mc_send_command(mc_io, &cmd);
 }
+
+/**
+ * dpni_set_tx_shaping() - Set the transmit shaping
+ * @mc_io:             Pointer to MC portal's I/O object
+ * @cmd_flags:         Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:             Token of DPNI object
+ * @tx_cr_shaper:      TX committed rate shaping configuration
+ * @tx_er_shaper:      TX excess rate shaping configuration
+ * @coupled:           Committed and excess rate shapers are coupled
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
+                       u32 cmd_flags,
+                       u16 token,
+                       const struct dpni_tx_shaping_cfg *tx_cr_shaper,
+                       const struct dpni_tx_shaping_cfg *tx_er_shaper,
+                       int coupled)
+{
+       struct dpni_cmd_set_tx_shaping *cmd_params;
+       struct fsl_mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING,
+                                         cmd_flags,
+                                         token);
+       cmd_params = (struct dpni_cmd_set_tx_shaping *)cmd.params;
+       cmd_params->tx_cr_max_burst_size = cpu_to_le16(tx_cr_shaper->max_burst_size);
+       cmd_params->tx_er_max_burst_size = cpu_to_le16(tx_er_shaper->max_burst_size);
+       cmd_params->tx_cr_rate_limit = cpu_to_le32(tx_cr_shaper->rate_limit);
+       cmd_params->tx_er_rate_limit = cpu_to_le32(tx_er_shaper->rate_limit);
+       dpni_set_field(cmd_params->coupled, COUPLED, coupled);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
index e874d80..3938799 100644 (file)
@@ -1062,5 +1062,21 @@ int dpni_get_api_version(struct fsl_mc_io *mc_io,
                         u32 cmd_flags,
                         u16 *major_ver,
                         u16 *minor_ver);
+/**
+ * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration
+ * @rate_limit:                Rate in Mbps
+ * @max_burst_size:    Burst size in bytes (up to 64KB)
+ */
+struct dpni_tx_shaping_cfg {
+       u32 rate_limit;
+       u16 max_burst_size;
+};
+
+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
+                       u32 cmd_flags,
+                       u16 token,
+                       const struct dpni_tx_shaping_cfg *tx_cr_shaper,
+                       const struct dpni_tx_shaping_cfg *tx_er_shaper,
+                       int coupled);
 
 #endif /* __FSL_DPNI_H */
index 2b43848..37b804f 100644 (file)
@@ -4,6 +4,7 @@ config FSL_ENETC
        depends on PCI && PCI_MSI
        select FSL_ENETC_MDIO
        select PHYLIB
+       select DIMLIB
        help
          This driver supports NXP ENETC gigabit ethernet controller PCIe
          physical function (PF) devices, managing ENETC Ports at a privileged
@@ -15,6 +16,7 @@ config FSL_ENETC_VF
        tristate "ENETC VF driver"
        depends on PCI && PCI_MSI
        select PHYLIB
+       select DIMLIB
        help
          This driver supports NXP ENETC gigabit ethernet controller PCIe
          virtual function (VF) devices enabled by the ENETC PF driver.
index 298c557..f50353c 100644 (file)
@@ -265,8 +265,9 @@ static irqreturn_t enetc_msix(int irq, void *data)
 
        /* disable interrupts */
        enetc_wr_reg(v->rbier, 0);
+       enetc_wr_reg(v->ricr1, v->rx_ictt);
 
-       for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
+       for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
                enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);
 
        napi_schedule_irqoff(&v->napi);
@@ -278,6 +279,34 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget);
 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
                               struct napi_struct *napi, int work_limit);
 
+static void enetc_rx_dim_work(struct work_struct *w)
+{
+       struct dim *dim = container_of(w, struct dim, work);
+       struct dim_cq_moder moder =
+               net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+       struct enetc_int_vector *v =
+               container_of(dim, struct enetc_int_vector, rx_dim);
+
+       v->rx_ictt = enetc_usecs_to_cycles(moder.usec);
+       dim->state = DIM_START_MEASURE;
+}
+
+static void enetc_rx_net_dim(struct enetc_int_vector *v)
+{
+       struct dim_sample dim_sample;
+
+       v->comp_cnt++;
+
+       if (!v->rx_napi_work)
+               return;
+
+       dim_update_sample(v->comp_cnt,
+                         v->rx_ring.stats.packets,
+                         v->rx_ring.stats.bytes,
+                         &dim_sample);
+       net_dim(&v->rx_dim, dim_sample);
+}
+
 static int enetc_poll(struct napi_struct *napi, int budget)
 {
        struct enetc_int_vector
@@ -293,16 +322,23 @@ static int enetc_poll(struct napi_struct *napi, int budget)
        work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget);
        if (work_done == budget)
                complete = false;
+       if (work_done)
+               v->rx_napi_work = true;
 
        if (!complete)
                return budget;
 
        napi_complete_done(napi, work_done);
 
+       if (likely(v->rx_dim_en))
+               enetc_rx_net_dim(v);
+
+       v->rx_napi_work = false;
+
        /* enable interrupts */
        enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE);
 
-       for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
+       for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
                enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i),
                             ENETC_TBIER_TXTIE);
 
@@ -1064,8 +1100,8 @@ void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
        struct enetc_si *si = priv->si;
        int cpus = num_online_cpus();
 
-       priv->tx_bd_count = ENETC_BDR_DEFAULT_SIZE;
-       priv->rx_bd_count = ENETC_BDR_DEFAULT_SIZE;
+       priv->tx_bd_count = ENETC_TX_RING_DEFAULT_SIZE;
+       priv->rx_bd_count = ENETC_RX_RING_DEFAULT_SIZE;
 
        /* Enable all available TX rings in order to configure as many
         * priorities as possible, when needed.
@@ -1074,6 +1110,8 @@ void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
        priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
        priv->num_tx_rings = si->num_tx_rings;
        priv->bdr_int_num = cpus;
+       priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
+       priv->tx_ictt = ENETC_TXIC_TIMETHR;
 
        /* SI specific */
        si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE;
@@ -1140,7 +1178,7 @@ static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
        tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR);
 
        /* enable Tx ints by setting pkt thr to 1 */
-       enetc_txbdr_wr(hw, idx, ENETC_TBICIR0, ENETC_TBICIR0_ICEN | 0x1);
+       enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1);
 
        tbmr = ENETC_TBMR_EN;
        if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
@@ -1174,7 +1212,7 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
        enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
 
        /* enable Rx ints by setting pkt thr to 1 */
-       enetc_rxbdr_wr(hw, idx, ENETC_RBICIR0, ENETC_RBICIR0_ICEN | 0x1);
+       enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1);
 
        rbmr = ENETC_RBMR_EN;
 
@@ -1264,9 +1302,11 @@ static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
                        dev_err(priv->dev, "request_irq() failed!\n");
                        goto irq_err;
                }
+               disable_irq(irq);
 
                v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
                v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
+               v->ricr1 = hw->reg + ENETC_BDR(RX, i, ENETC_RBICR1);
 
                enetc_wr(hw, ENETC_SIMSIRRV(i), entry);
 
@@ -1306,23 +1346,42 @@ static void enetc_free_irqs(struct enetc_ndev_priv *priv)
        }
 }
 
-static void enetc_enable_interrupts(struct enetc_ndev_priv *priv)
+static void enetc_setup_interrupts(struct enetc_ndev_priv *priv)
 {
+       struct enetc_hw *hw = &priv->si->hw;
+       u32 icpt, ictt;
        int i;
 
        /* enable Tx & Rx event indication */
+       if (priv->ic_mode &
+           (ENETC_IC_RX_MANUAL | ENETC_IC_RX_ADAPTIVE)) {
+               icpt = ENETC_RBICR0_SET_ICPT(ENETC_RXIC_PKTTHR);
+               /* init to non-0 minimum, will be adjusted later */
+               ictt = 0x1;
+       } else {
+               icpt = 0x1; /* enable Rx ints by setting pkt thr to 1 */
+               ictt = 0;
+       }
+
        for (i = 0; i < priv->num_rx_rings; i++) {
-               enetc_rxbdr_wr(&priv->si->hw, i,
-                              ENETC_RBIER, ENETC_RBIER_RXTIE);
+               enetc_rxbdr_wr(hw, i, ENETC_RBICR1, ictt);
+               enetc_rxbdr_wr(hw, i, ENETC_RBICR0, ENETC_RBICR0_ICEN | icpt);
+               enetc_rxbdr_wr(hw, i, ENETC_RBIER, ENETC_RBIER_RXTIE);
        }
 
+       if (priv->ic_mode & ENETC_IC_TX_MANUAL)
+               icpt = ENETC_TBICR0_SET_ICPT(ENETC_TXIC_PKTTHR);
+       else
+               icpt = 0x1; /* enable Tx ints by setting pkt thr to 1 */
+
        for (i = 0; i < priv->num_tx_rings; i++) {
-               enetc_txbdr_wr(&priv->si->hw, i,
-                              ENETC_TBIER, ENETC_TBIER_TXTIE);
+               enetc_txbdr_wr(hw, i, ENETC_TBICR1, priv->tx_ictt);
+               enetc_txbdr_wr(hw, i, ENETC_TBICR0, ENETC_TBICR0_ICEN | icpt);
+               enetc_txbdr_wr(hw, i, ENETC_TBIER, ENETC_TBIER_TXTIE);
        }
 }
 
-static void enetc_disable_interrupts(struct enetc_ndev_priv *priv)
+static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
 {
        int i;
 
@@ -1369,10 +1428,33 @@ static int enetc_phy_connect(struct net_device *ndev)
        return 0;
 }
 
+void enetc_start(struct net_device *ndev)
+{
+       struct enetc_ndev_priv *priv = netdev_priv(ndev);
+       int i;
+
+       enetc_setup_interrupts(priv);
+
+       for (i = 0; i < priv->bdr_int_num; i++) {
+               int irq = pci_irq_vector(priv->si->pdev,
+                                        ENETC_BDR_INT_BASE_IDX + i);
+
+               napi_enable(&priv->int_vector[i]->napi);
+               enable_irq(irq);
+       }
+
+       if (ndev->phydev)
+               phy_start(ndev->phydev);
+       else
+               netif_carrier_on(ndev);
+
+       netif_tx_start_all_queues(ndev);
+}
+
 int enetc_open(struct net_device *ndev)
 {
        struct enetc_ndev_priv *priv = netdev_priv(ndev);
-       int i, err;
+       int err;
 
        err = enetc_setup_irqs(priv);
        if (err)
@@ -1390,8 +1472,6 @@ int enetc_open(struct net_device *ndev)
        if (err)
                goto err_alloc_rx;
 
-       enetc_setup_bdrs(priv);
-
        err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
        if (err)
                goto err_set_queues;
@@ -1400,17 +1480,8 @@ int enetc_open(struct net_device *ndev)
        if (err)
                goto err_set_queues;
 
-       for (i = 0; i < priv->bdr_int_num; i++)
-               napi_enable(&priv->int_vector[i]->napi);
-
-       enetc_enable_interrupts(priv);
-
-       if (ndev->phydev)
-               phy_start(ndev->phydev);
-       else
-               netif_carrier_on(ndev);
-
-       netif_tx_start_all_queues(ndev);
+       enetc_setup_bdrs(priv);
+       enetc_start(ndev);
 
        return 0;
 
@@ -1427,28 +1498,39 @@ err_phy_connect:
        return err;
 }
 
-int enetc_close(struct net_device *ndev)
+void enetc_stop(struct net_device *ndev)
 {
        struct enetc_ndev_priv *priv = netdev_priv(ndev);
        int i;
 
        netif_tx_stop_all_queues(ndev);
 
-       if (ndev->phydev) {
-               phy_stop(ndev->phydev);
-               phy_disconnect(ndev->phydev);
-       } else {
-               netif_carrier_off(ndev);
-       }
-
        for (i = 0; i < priv->bdr_int_num; i++) {
+               int irq = pci_irq_vector(priv->si->pdev,
+                                        ENETC_BDR_INT_BASE_IDX + i);
+
+               disable_irq(irq);
                napi_synchronize(&priv->int_vector[i]->napi);
                napi_disable(&priv->int_vector[i]->napi);
        }
 
-       enetc_disable_interrupts(priv);
+       if (ndev->phydev)
+               phy_stop(ndev->phydev);
+       else
+               netif_carrier_off(ndev);
+
+       enetc_clear_interrupts(priv);
+}
+
+int enetc_close(struct net_device *ndev)
+{
+       struct enetc_ndev_priv *priv = netdev_priv(ndev);
+
+       enetc_stop(ndev);
        enetc_clear_bdrs(priv);
 
+       if (ndev->phydev)
+               phy_disconnect(ndev->phydev);
        enetc_free_rxtx_rings(priv);
        enetc_free_rx_resources(priv);
        enetc_free_tx_resources(priv);
@@ -1595,6 +1677,24 @@ static int enetc_set_psfp(struct net_device *ndev, int en)
        return 0;
 }
 
+static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
+{
+       struct enetc_ndev_priv *priv = netdev_priv(ndev);
+       int i;
+
+       for (i = 0; i < priv->num_rx_rings; i++)
+               enetc_bdr_enable_rxvlan(&priv->si->hw, i, en);
+}
+
+static void enetc_enable_txvlan(struct net_device *ndev, bool en)
+{
+       struct enetc_ndev_priv *priv = netdev_priv(ndev);
+       int i;
+
+       for (i = 0; i < priv->num_tx_rings; i++)
+               enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
+}
+
 int enetc_set_features(struct net_device *ndev,
                       netdev_features_t features)
 {
@@ -1604,6 +1704,14 @@ int enetc_set_features(struct net_device *ndev,
        if (changed & NETIF_F_RXHASH)
                enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
 
+       if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+               enetc_enable_rxvlan(ndev,
+                                   !!(features & NETIF_F_HW_VLAN_CTAG_RX));
+
+       if (changed & NETIF_F_HW_VLAN_CTAG_TX)
+               enetc_enable_txvlan(ndev,
+                                   !!(features & NETIF_F_HW_VLAN_CTAG_TX));
+
        if (changed & NETIF_F_HW_TC)
                err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
 
@@ -1687,7 +1795,7 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
 int enetc_alloc_msix(struct enetc_ndev_priv *priv)
 {
        struct pci_dev *pdev = priv->si->pdev;
-       int size, v_tx_rings;
+       int v_tx_rings;
        int i, n, err, nvec;
 
        nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
@@ -1702,15 +1810,13 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
 
        /* # of tx rings per int vector */
        v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
-       size = sizeof(struct enetc_int_vector) +
-              sizeof(struct enetc_bdr) * v_tx_rings;
 
        for (i = 0; i < priv->bdr_int_num; i++) {
                struct enetc_int_vector *v;
                struct enetc_bdr *bdr;
                int j;
 
-               v = kzalloc(size, GFP_KERNEL);
+               v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
                if (!v) {
                        err = -ENOMEM;
                        goto fail;
@@ -1718,6 +1824,12 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
 
                priv->int_vector[i] = v;
 
+               /* init defaults for adaptive IC */
+               if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
+                       v->rx_ictt = 0x1;
+                       v->rx_dim_en = true;
+               }
+               INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
                netif_napi_add(priv->ndev, &v->napi, enetc_poll,
                               NAPI_POLL_WEIGHT);
                v->count_tx_rings = v_tx_rings;
@@ -1753,6 +1865,7 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
 fail:
        while (i--) {
                netif_napi_del(&priv->int_vector[i]->napi);
+               cancel_work_sync(&priv->int_vector[i]->rx_dim.work);
                kfree(priv->int_vector[i]);
        }
 
@@ -1769,6 +1882,7 @@ void enetc_free_msix(struct enetc_ndev_priv *priv)
                struct enetc_int_vector *v = priv->int_vector[i];
 
                netif_napi_del(&v->napi);
+               cancel_work_sync(&v->rx_dim.work);
        }
 
        for (i = 0; i < priv->num_rx_rings; i++)
index b705464..d309803 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
 #include <linux/phy.h>
+#include <linux/dim.h>
 
 #include "enetc_hw.h"
 
@@ -44,8 +45,9 @@ struct enetc_ring_stats {
        unsigned int rx_alloc_errs;
 };
 
-#define ENETC_BDR_DEFAULT_SIZE 1024
-#define ENETC_DEFAULT_TX_WORK  256
+#define ENETC_RX_RING_DEFAULT_SIZE     512
+#define ENETC_TX_RING_DEFAULT_SIZE     256
+#define ENETC_DEFAULT_TX_WORK          (ENETC_TX_RING_DEFAULT_SIZE / 2)
 
 struct enetc_bdr {
        struct device *dev; /* for DMA mapping */
@@ -189,14 +191,19 @@ static inline bool enetc_si_is_pf(struct enetc_si *si)
 struct enetc_int_vector {
        void __iomem *rbier;
        void __iomem *tbier_base;
+       void __iomem *ricr1;
        unsigned long tx_rings_map;
        int count_tx_rings;
-       struct napi_struct napi;
+       u32 rx_ictt;
+       u16 comp_cnt;
+       bool rx_dim_en, rx_napi_work;
+       struct napi_struct napi ____cacheline_aligned_in_smp;
+       struct dim rx_dim ____cacheline_aligned_in_smp;
        char name[ENETC_INT_NAME_MAX];
 
-       struct enetc_bdr rx_ring ____cacheline_aligned_in_smp;
+       struct enetc_bdr rx_ring;
        struct enetc_bdr tx_ring[];
-};
+} ____cacheline_aligned_in_smp;
 
 struct enetc_cls_rule {
        struct ethtool_rx_flow_spec fs;
@@ -220,6 +227,21 @@ enum enetc_active_offloads {
        ENETC_F_QCI             = BIT(3),
 };
 
+/* interrupt coalescing modes */
+enum enetc_ic_mode {
+       /* one interrupt per frame */
+       ENETC_IC_NONE = 0,
+       /* activated when int coalescing time is set to a non-0 value */
+       ENETC_IC_RX_MANUAL = BIT(0),
+       ENETC_IC_TX_MANUAL = BIT(1),
+       /* use dynamic interrupt moderation */
+       ENETC_IC_RX_ADAPTIVE = BIT(2),
+};
+
+#define ENETC_RXIC_PKTTHR      min_t(u32, 256, ENETC_RX_RING_DEFAULT_SIZE / 2)
+#define ENETC_TXIC_PKTTHR      min_t(u32, 128, ENETC_TX_RING_DEFAULT_SIZE / 2)
+#define ENETC_TXIC_TIMETHR     enetc_usecs_to_cycles(600)
+
 struct enetc_ndev_priv {
        struct net_device *ndev;
        struct device *dev; /* dma-mapping device */
@@ -244,6 +266,8 @@ struct enetc_ndev_priv {
 
        struct device_node *phy_node;
        phy_interface_t if_mode;
+       int ic_mode;
+       u32 tx_ictt;
 };
 
 /* Messaging */
@@ -273,6 +297,8 @@ void enetc_free_si_resources(struct enetc_ndev_priv *priv);
 
 int enetc_open(struct net_device *ndev);
 int enetc_close(struct net_device *ndev);
+void enetc_start(struct net_device *ndev);
+void enetc_stop(struct net_device *ndev);
 netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev);
 struct net_device_stats *enetc_get_stats(struct net_device *ndev);
 int enetc_set_features(struct net_device *ndev,
index 34bd1f3..1dab83f 100644 (file)
@@ -14,12 +14,14 @@ static const u32 enetc_si_regs[] = {
 
 static const u32 enetc_txbdr_regs[] = {
        ENETC_TBMR, ENETC_TBSR, ENETC_TBBAR0, ENETC_TBBAR1,
-       ENETC_TBPIR, ENETC_TBCIR, ENETC_TBLENR, ENETC_TBIER
+       ENETC_TBPIR, ENETC_TBCIR, ENETC_TBLENR, ENETC_TBIER, ENETC_TBICR0,
+       ENETC_TBICR1
 };
 
 static const u32 enetc_rxbdr_regs[] = {
        ENETC_RBMR, ENETC_RBSR, ENETC_RBBSR, ENETC_RBCIR, ENETC_RBBAR0,
-       ENETC_RBBAR1, ENETC_RBPIR, ENETC_RBLENR, ENETC_RBICIR0, ENETC_RBIER
+       ENETC_RBBAR1, ENETC_RBPIR, ENETC_RBLENR, ENETC_RBIER, ENETC_RBICR0,
+       ENETC_RBICR1
 };
 
 static const u32 enetc_port_regs[] = {
@@ -561,6 +563,74 @@ static void enetc_get_ringparam(struct net_device *ndev,
        }
 }
 
+static int enetc_get_coalesce(struct net_device *ndev,
+                             struct ethtool_coalesce *ic)
+{
+       struct enetc_ndev_priv *priv = netdev_priv(ndev);
+       struct enetc_int_vector *v = priv->int_vector[0];
+
+       ic->tx_coalesce_usecs = enetc_cycles_to_usecs(priv->tx_ictt);
+       ic->rx_coalesce_usecs = enetc_cycles_to_usecs(v->rx_ictt);
+
+       ic->tx_max_coalesced_frames = ENETC_TXIC_PKTTHR;
+       ic->rx_max_coalesced_frames = ENETC_RXIC_PKTTHR;
+
+       ic->use_adaptive_rx_coalesce = priv->ic_mode & ENETC_IC_RX_ADAPTIVE;
+
+       return 0;
+}
+
+static int enetc_set_coalesce(struct net_device *ndev,
+                             struct ethtool_coalesce *ic)
+{
+       struct enetc_ndev_priv *priv = netdev_priv(ndev);
+       u32 rx_ictt, tx_ictt;
+       int i, ic_mode;
+       bool changed;
+
+       tx_ictt = enetc_usecs_to_cycles(ic->tx_coalesce_usecs);
+       rx_ictt = enetc_usecs_to_cycles(ic->rx_coalesce_usecs);
+
+       if (ic->rx_max_coalesced_frames != ENETC_RXIC_PKTTHR)
+               return -EOPNOTSUPP;
+
+       if (ic->tx_max_coalesced_frames != ENETC_TXIC_PKTTHR)
+               return -EOPNOTSUPP;
+
+       ic_mode = ENETC_IC_NONE;
+       if (ic->use_adaptive_rx_coalesce) {
+               ic_mode |= ENETC_IC_RX_ADAPTIVE;
+               rx_ictt = 0x1;
+       } else {
+               ic_mode |= rx_ictt ? ENETC_IC_RX_MANUAL : 0;
+       }
+
+       ic_mode |= tx_ictt ? ENETC_IC_TX_MANUAL : 0;
+
+       /* commit the settings */
+       changed = (ic_mode != priv->ic_mode) || (priv->tx_ictt != tx_ictt);
+
+       priv->ic_mode = ic_mode;
+       priv->tx_ictt = tx_ictt;
+
+       for (i = 0; i < priv->bdr_int_num; i++) {
+               struct enetc_int_vector *v = priv->int_vector[i];
+
+               v->rx_ictt = rx_ictt;
+               v->rx_dim_en = !!(ic_mode & ENETC_IC_RX_ADAPTIVE);
+       }
+
+       if (netif_running(ndev) && changed) {
+               /* reconfigure the operation mode of h/w interrupts,
+                * traffic needs to be paused in the process
+                */
+               enetc_stop(ndev);
+               enetc_start(ndev);
+       }
+
+       return 0;
+}
+
 static int enetc_get_ts_info(struct net_device *ndev,
                             struct ethtool_ts_info *info)
 {
@@ -617,6 +687,9 @@ static int enetc_set_wol(struct net_device *dev,
 }
 
 static const struct ethtool_ops enetc_pf_ethtool_ops = {
+       .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+                                    ETHTOOL_COALESCE_MAX_FRAMES |
+                                    ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
        .get_regs_len = enetc_get_reglen,
        .get_regs = enetc_get_regs,
        .get_sset_count = enetc_get_sset_count,
@@ -629,6 +702,8 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
        .get_rxfh = enetc_get_rxfh,
        .set_rxfh = enetc_set_rxfh,
        .get_ringparam = enetc_get_ringparam,
+       .get_coalesce = enetc_get_coalesce,
+       .set_coalesce = enetc_set_coalesce,
        .get_link_ksettings = phy_ethtool_get_link_ksettings,
        .set_link_ksettings = phy_ethtool_set_link_ksettings,
        .get_link = ethtool_op_get_link,
@@ -638,6 +713,9 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
 };
 
 static const struct ethtool_ops enetc_vf_ethtool_ops = {
+       .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+                                    ETHTOOL_COALESCE_MAX_FRAMES |
+                                    ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
        .get_regs_len = enetc_get_reglen,
        .get_regs = enetc_get_regs,
        .get_sset_count = enetc_get_sset_count,
@@ -649,6 +727,8 @@ static const struct ethtool_ops enetc_vf_ethtool_ops = {
        .get_rxfh = enetc_get_rxfh,
        .set_rxfh = enetc_set_rxfh,
        .get_ringparam = enetc_get_ringparam,
+       .get_coalesce = enetc_get_coalesce,
+       .set_coalesce = enetc_set_coalesce,
        .get_link = ethtool_op_get_link,
        .get_ts_info = enetc_get_ts_info,
 };
index 6314051..17cf7c9 100644 (file)
@@ -121,8 +121,11 @@ enum enetc_bdr_type {TX, RX};
 #define ENETC_RBIER    0xa0
 #define ENETC_RBIER_RXTIE      BIT(0)
 #define ENETC_RBIDR    0xa4
-#define ENETC_RBICIR0  0xa8
-#define ENETC_RBICIR0_ICEN     BIT(31)
+#define ENETC_RBICR0   0xa8
+#define ENETC_RBICR0_ICEN              BIT(31)
+#define ENETC_RBICR0_ICPT_MASK         0x1ff
+#define ENETC_RBICR0_SET_ICPT(n)       ((n) & ENETC_RBICR0_ICPT_MASK)
+#define ENETC_RBICR1   0xac
 
 /* TX BDR reg offsets */
 #define ENETC_TBMR     0
@@ -141,8 +144,11 @@ enum enetc_bdr_type {TX, RX};
 #define ENETC_TBIER    0xa0
 #define ENETC_TBIER_TXTIE      BIT(0)
 #define ENETC_TBIDR    0xa4
-#define ENETC_TBICIR0  0xa8
-#define ENETC_TBICIR0_ICEN     BIT(31)
+#define ENETC_TBICR0   0xa8
+#define ENETC_TBICR0_ICEN              BIT(31)
+#define ENETC_TBICR0_ICPT_MASK         0xf
+#define ENETC_TBICR0_SET_ICPT(n) ((ilog2(n) + 1) & ENETC_TBICR0_ICPT_MASK)
+#define ENETC_TBICR1   0xac
 
 #define ENETC_RTBLENR_LEN(n)   ((n) & ~0x7)
 
@@ -224,6 +230,9 @@ enum enetc_bdr_type {TX, RX};
 #define ENETC_PM0_MAXFRM       0x8014
 #define ENETC_SET_TX_MTU(val)  ((val) << 16)
 #define ENETC_SET_MAXFRM(val)  ((val) & 0xffff)
+
+#define ENETC_PM_IMDIO_BASE    0x8030
+
 #define ENETC_PM0_IF_MODE      0x8300
 #define ENETC_PMO_IFM_RG       BIT(2)
 #define ENETC_PM0_IFM_RLP      (BIT(5) | BIT(11))
@@ -531,22 +540,22 @@ struct enetc_msg_cmd_header {
 
 /* Common H/W utility functions */
 
-static inline void enetc_enable_rxvlan(struct enetc_hw *hw, int si_idx,
-                                      bool en)
+static inline void enetc_bdr_enable_rxvlan(struct enetc_hw *hw, int idx,
+                                          bool en)
 {
-       u32 val = enetc_rxbdr_rd(hw, si_idx, ENETC_RBMR);
+       u32 val = enetc_rxbdr_rd(hw, idx, ENETC_RBMR);
 
        val = (val & ~ENETC_RBMR_VTE) | (en ? ENETC_RBMR_VTE : 0);
-       enetc_rxbdr_wr(hw, si_idx, ENETC_RBMR, val);
+       enetc_rxbdr_wr(hw, idx, ENETC_RBMR, val);
 }
 
-static inline void enetc_enable_txvlan(struct enetc_hw *hw, int si_idx,
-                                      bool en)
+static inline void enetc_bdr_enable_txvlan(struct enetc_hw *hw, int idx,
+                                          bool en)
 {
-       u32 val = enetc_txbdr_rd(hw, si_idx, ENETC_TBMR);
+       u32 val = enetc_txbdr_rd(hw, idx, ENETC_TBMR);
 
        val = (val & ~ENETC_TBMR_VIH) | (en ? ENETC_TBMR_VIH : 0);
-       enetc_txbdr_wr(hw, si_idx, ENETC_TBMR, val);
+       enetc_txbdr_wr(hw, idx, ENETC_TBMR, val);
 }
 
 static inline void enetc_set_bdr_prio(struct enetc_hw *hw, int bdr_idx,
@@ -570,6 +579,7 @@ enum bdcr_cmd_class {
        BDCR_CMD_STREAM_IDENTIFY,
        BDCR_CMD_STREAM_FILTER,
        BDCR_CMD_STREAM_GCL,
+       BDCR_CMD_FLOW_METER,
        __BDCR_CMD_MAX_LEN,
        BDCR_CMD_MAX_LEN = __BDCR_CMD_MAX_LEN - 1,
 };
@@ -736,10 +746,33 @@ struct sgcl_data {
        struct sgce     sgcl[0];
 };
 
+#define ENETC_CBDR_FMI_MR      BIT(0)
+#define ENETC_CBDR_FMI_MREN    BIT(1)
+#define ENETC_CBDR_FMI_DOY     BIT(2)
+#define        ENETC_CBDR_FMI_CM       BIT(3)
+#define ENETC_CBDR_FMI_CF      BIT(4)
+#define ENETC_CBDR_FMI_NDOR    BIT(5)
+#define ENETC_CBDR_FMI_OALEN   BIT(6)
+#define ENETC_CBDR_FMI_IRFPP_MASK GENMASK(4, 0)
+
+/* class 10: command 0/1, Flow Meter Instance Set, short Format */
+struct fmi_conf {
+       __le32  cir;
+       __le32  cbs;
+       __le32  eir;
+       __le32  ebs;
+               u8      conf;
+               u8      res1;
+               u8      ir_fpp;
+               u8      res2[4];
+               u8      en;
+};
+
 struct enetc_cbd {
        union{
                struct sfi_conf sfi_conf;
                struct sgi_table sgi_table;
+               struct fmi_conf fmi_conf;
                struct {
                        __le32  addr[2];
                        union {
@@ -760,6 +793,15 @@ struct enetc_cbd {
 };
 
 #define ENETC_CLK  400000000ULL
+static inline u32 enetc_cycles_to_usecs(u32 cycles)
+{
+       return (u32)div_u64(cycles * 1000000ULL, ENETC_CLK);
+}
+
+static inline u32 enetc_usecs_to_cycles(u32 usecs)
+{
+       return (u32)div_u64(usecs * ENETC_CLK, 1000000ULL);
+}
 
 /* port time gating control register */
 #define ENETC_QBV_PTGCR_OFFSET         0x11a00
index 824d211..1d2158f 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
 /* Copyright 2017-2019 NXP */
 
+#include <linux/mdio.h>
 #include <linux/module.h>
 #include <linux/fsl/enetc_mdio.h>
 #include <linux/of_mdio.h>
@@ -481,7 +482,8 @@ static void enetc_port_si_configure(struct enetc_si *si)
        enetc_port_wr(hw, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS);
 }
 
-static void enetc_configure_port_mac(struct enetc_hw *hw)
+static void enetc_configure_port_mac(struct enetc_hw *hw,
+                                    phy_interface_t phy_mode)
 {
        enetc_port_wr(hw, ENETC_PM0_MAXFRM,
                      ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
@@ -497,9 +499,11 @@ static void enetc_configure_port_mac(struct enetc_hw *hw)
                      ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC |
                      ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
        /* set auto-speed for RGMII */
-       if (enetc_port_rd(hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG)
+       if (enetc_port_rd(hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG ||
+           phy_interface_mode_is_rgmii(phy_mode))
                enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_RGAUTO);
-       if (enetc_global_rd(hw, ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII)
+
+       if (phy_mode == PHY_INTERFACE_MODE_USXGMII)
                enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_XGMII);
 }
 
@@ -523,7 +527,7 @@ static void enetc_configure_port(struct enetc_pf *pf)
 
        enetc_configure_port_pmac(hw);
 
-       enetc_configure_port_mac(hw);
+       enetc_configure_port_mac(hw, pf->if_mode);
 
        enetc_port_si_configure(pf->si);
 
@@ -649,14 +653,6 @@ static int enetc_pf_set_features(struct net_device *ndev,
        netdev_features_t changed = ndev->features ^ features;
        struct enetc_ndev_priv *priv = netdev_priv(ndev);
 
-       if (changed & NETIF_F_HW_VLAN_CTAG_RX)
-               enetc_enable_rxvlan(&priv->si->hw, 0,
-                                   !!(features & NETIF_F_HW_VLAN_CTAG_RX));
-
-       if (changed & NETIF_F_HW_VLAN_CTAG_TX)
-               enetc_enable_txvlan(&priv->si->hw, 0,
-                                   !!(features & NETIF_F_HW_VLAN_CTAG_TX));
-
        if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
                struct enetc_pf *pf = enetc_si_priv(priv->si);
 
@@ -783,27 +779,27 @@ static void enetc_mdio_remove(struct enetc_pf *pf)
                mdiobus_unregister(pf->mdio);
 }
 
-static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
+static int enetc_of_get_phy(struct enetc_pf *pf)
 {
-       struct enetc_pf *pf = enetc_si_priv(priv->si);
-       struct device_node *np = priv->dev->of_node;
+       struct device *dev = &pf->si->pdev->dev;
+       struct device_node *np = dev->of_node;
        struct device_node *mdio_np;
        int err;
 
-       priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
-       if (!priv->phy_node) {
+       pf->phy_node = of_parse_phandle(np, "phy-handle", 0);
+       if (!pf->phy_node) {
                if (!of_phy_is_fixed_link(np)) {
-                       dev_err(priv->dev, "PHY not specified\n");
+                       dev_err(dev, "PHY not specified\n");
                        return -ENODEV;
                }
 
                err = of_phy_register_fixed_link(np);
                if (err < 0) {
-                       dev_err(priv->dev, "fixed link registration failed\n");
+                       dev_err(dev, "fixed link registration failed\n");
                        return err;
                }
 
-               priv->phy_node = of_node_get(np);
+               pf->phy_node = of_node_get(np);
        }
 
        mdio_np = of_get_child_by_name(np, "mdio");
@@ -811,15 +807,15 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
                of_node_put(mdio_np);
                err = enetc_mdio_probe(pf);
                if (err) {
-                       of_node_put(priv->phy_node);
+                       of_node_put(pf->phy_node);
                        return err;
                }
        }
 
-       err = of_get_phy_mode(np, &priv->if_mode);
+       err = of_get_phy_mode(np, &pf->if_mode);
        if (err) {
-               dev_err(priv->dev, "missing phy type\n");
-               of_node_put(priv->phy_node);
+               dev_err(dev, "missing phy type\n");
+               of_node_put(pf->phy_node);
                if (of_phy_is_fixed_link(np))
                        of_phy_deregister_fixed_link(np);
                else
@@ -831,14 +827,150 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
        return 0;
 }
 
-static void enetc_of_put_phy(struct enetc_ndev_priv *priv)
+static void enetc_of_put_phy(struct enetc_pf *pf)
 {
-       struct device_node *np = priv->dev->of_node;
+       struct device_node *np = pf->si->pdev->dev.of_node;
 
        if (np && of_phy_is_fixed_link(np))
                of_phy_deregister_fixed_link(np);
-       if (priv->phy_node)
-               of_node_put(priv->phy_node);
+       if (pf->phy_node)
+               of_node_put(pf->phy_node);
+}
+
+static int enetc_imdio_init(struct enetc_pf *pf, bool is_c45)
+{
+       struct device *dev = &pf->si->pdev->dev;
+       struct enetc_mdio_priv *mdio_priv;
+       struct phy_device *pcs;
+       struct mii_bus *bus;
+       int err;
+
+       bus = mdiobus_alloc_size(sizeof(*mdio_priv));
+       if (!bus)
+               return -ENOMEM;
+
+       bus->name = "Freescale ENETC internal MDIO Bus";
+       bus->read = enetc_mdio_read;
+       bus->write = enetc_mdio_write;
+       bus->parent = dev;
+       bus->phy_mask = ~0;
+       mdio_priv = bus->priv;
+       mdio_priv->hw = &pf->si->hw;
+       mdio_priv->mdio_base = ENETC_PM_IMDIO_BASE;
+       snprintf(bus->id, MII_BUS_ID_SIZE, "%s-imdio", dev_name(dev));
+
+       err = mdiobus_register(bus);
+       if (err) {
+               dev_err(dev, "cannot register internal MDIO bus (%d)\n", err);
+               goto free_mdio_bus;
+       }
+
+       pcs = get_phy_device(bus, 0, is_c45);
+       if (IS_ERR(pcs)) {
+               err = PTR_ERR(pcs);
+               dev_err(dev, "cannot get internal PCS PHY (%d)\n", err);
+               goto unregister_mdiobus;
+       }
+
+       pf->imdio = bus;
+       pf->pcs = pcs;
+
+       return 0;
+
+unregister_mdiobus:
+       mdiobus_unregister(bus);
+free_mdio_bus:
+       mdiobus_free(bus);
+       return err;
+}
+
+static void enetc_imdio_remove(struct enetc_pf *pf)
+{
+       if (pf->pcs)
+               put_device(&pf->pcs->mdio.dev);
+       if (pf->imdio) {
+               mdiobus_unregister(pf->imdio);
+               mdiobus_free(pf->imdio);
+       }
+}
+
+static void enetc_configure_sgmii(struct phy_device *pcs)
+{
+       /* SGMII spec requires tx_config_Reg[15:0] to be exactly 0x4001
+        * for the MAC PCS in order to acknowledge the AN.
+        */
+       phy_write(pcs, MII_ADVERTISE, ADVERTISE_SGMII | ADVERTISE_LPACK);
+
+       phy_write(pcs, ENETC_PCS_IF_MODE,
+                 ENETC_PCS_IF_MODE_SGMII_EN |
+                 ENETC_PCS_IF_MODE_USE_SGMII_AN);
+
+       /* Adjust link timer for SGMII */
+       phy_write(pcs, ENETC_PCS_LINK_TIMER1, ENETC_PCS_LINK_TIMER1_VAL);
+       phy_write(pcs, ENETC_PCS_LINK_TIMER2, ENETC_PCS_LINK_TIMER2_VAL);
+
+       phy_write(pcs, MII_BMCR, BMCR_ANRESTART | BMCR_ANENABLE);
+}
+
+static void enetc_configure_2500basex(struct phy_device *pcs)
+{
+       phy_write(pcs, ENETC_PCS_IF_MODE,
+                 ENETC_PCS_IF_MODE_SGMII_EN |
+                 ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_2500));
+
+       phy_write(pcs, MII_BMCR, BMCR_SPEED1000 | BMCR_FULLDPLX | BMCR_RESET);
+}
+
+static void enetc_configure_usxgmii(struct phy_device *pcs)
+{
+       /* Configure device ability for the USXGMII Replicator */
+       phy_write_mmd(pcs, MDIO_MMD_VEND2, MII_ADVERTISE,
+                     ADVERTISE_SGMII | ADVERTISE_LPACK |
+                     MDIO_USXGMII_FULL_DUPLEX);
+
+       /* Restart PCS AN */
+       phy_write_mmd(pcs, MDIO_MMD_VEND2, MII_BMCR,
+                     BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART);
+}
+
+static int enetc_configure_serdes(struct enetc_ndev_priv *priv)
+{
+       bool is_c45 = priv->if_mode == PHY_INTERFACE_MODE_USXGMII;
+       struct enetc_pf *pf = enetc_si_priv(priv->si);
+       int err;
+
+       if (priv->if_mode != PHY_INTERFACE_MODE_SGMII &&
+           priv->if_mode != PHY_INTERFACE_MODE_2500BASEX &&
+           priv->if_mode != PHY_INTERFACE_MODE_USXGMII)
+               return 0;
+
+       err = enetc_imdio_init(pf, is_c45);
+       if (err)
+               return err;
+
+       switch (priv->if_mode) {
+       case PHY_INTERFACE_MODE_SGMII:
+               enetc_configure_sgmii(pf->pcs);
+               break;
+       case PHY_INTERFACE_MODE_2500BASEX:
+               enetc_configure_2500basex(pf->pcs);
+               break;
+       case PHY_INTERFACE_MODE_USXGMII:
+               enetc_configure_usxgmii(pf->pcs);
+               break;
+       default:
+               dev_err(&pf->si->pdev->dev, "Unsupported link mode %s\n",
+                       phy_modes(priv->if_mode));
+       }
+
+       return 0;
+}
+
+static void enetc_teardown_serdes(struct enetc_ndev_priv *priv)
+{
+       struct enetc_pf *pf = enetc_si_priv(priv->si);
+
+       enetc_imdio_remove(pf);
 }
 
 static int enetc_pf_probe(struct pci_dev *pdev,
@@ -872,6 +1004,10 @@ static int enetc_pf_probe(struct pci_dev *pdev,
        pf->si = si;
        pf->total_vfs = pci_sriov_get_totalvfs(pdev);
 
+       err = enetc_of_get_phy(pf);
+       if (err)
+               dev_warn(&pdev->dev, "Fallback to PHY-less operation\n");
+
        enetc_configure_port(pf);
 
        enetc_get_si_caps(si);
@@ -886,6 +1022,8 @@ static int enetc_pf_probe(struct pci_dev *pdev,
        enetc_pf_netdev_setup(si, ndev, &enetc_ndev_ops);
 
        priv = netdev_priv(ndev);
+       priv->phy_node = pf->phy_node;
+       priv->if_mode = pf->if_mode;
 
        enetc_init_si_rings_params(priv);
 
@@ -901,9 +1039,9 @@ static int enetc_pf_probe(struct pci_dev *pdev,
                goto err_alloc_msix;
        }
 
-       err = enetc_of_get_phy(priv);
+       err = enetc_configure_serdes(priv);
        if (err)
-               dev_warn(&pdev->dev, "Fallback to PHY-less operation\n");
+               dev_warn(&pdev->dev, "Attempted SerDes config but failed\n");
 
        err = register_netdev(ndev);
        if (err)
@@ -914,7 +1052,7 @@ static int enetc_pf_probe(struct pci_dev *pdev,
        return 0;
 
 err_reg_netdev:
-       enetc_of_put_phy(priv);
+       enetc_teardown_serdes(priv);
        enetc_free_msix(priv);
 err_alloc_msix:
        enetc_free_si_resources(priv);
@@ -922,6 +1060,7 @@ err_alloc_si_res:
        si->ndev = NULL;
        free_netdev(ndev);
 err_alloc_netdev:
+       enetc_of_put_phy(pf);
 err_map_pf_space:
        enetc_pci_remove(pdev);
 
@@ -940,8 +1079,9 @@ static void enetc_pf_remove(struct pci_dev *pdev)
        priv = netdev_priv(si->ndev);
        unregister_netdev(si->ndev);
 
+       enetc_teardown_serdes(priv);
        enetc_mdio_remove(pf);
-       enetc_of_put_phy(priv);
+       enetc_of_put_phy(pf);
 
        enetc_free_msix(priv);
 
index 59e65a6..0d0ee91 100644 (file)
@@ -44,6 +44,11 @@ struct enetc_pf {
        DECLARE_BITMAP(active_vlans, VLAN_N_VID);
 
        struct mii_bus *mdio; /* saved for cleanup */
+       struct mii_bus *imdio;
+       struct phy_device *pcs;
+
+       struct device_node *phy_node;
+       phy_interface_t if_mode;
 };
 
 int enetc_msg_psi_init(struct enetc_pf *pf);
index fd3df19..a993955 100644 (file)
@@ -389,6 +389,7 @@ struct enetc_psfp_filter {
        u32 index;
        s32 handle;
        s8 prio;
+       u32 maxsdu;
        u32 gate_id;
        s32 meter_id;
        refcount_t refcount;
@@ -407,10 +408,26 @@ struct enetc_psfp_gate {
        struct action_gate_entry entries[0];
 };
 
+/* Only enable the green color frame now
+ * Will add eir and ebs color blind, couple flag etc when
+ * policing action add more offloading parameters
+ */
+struct enetc_psfp_meter {
+       u32 index;
+       u32 cir;
+       u32 cbs;
+       refcount_t refcount;
+       struct hlist_node node;
+};
+
+#define ENETC_PSFP_FLAGS_FMI BIT(0)
+
 struct enetc_stream_filter {
        struct enetc_streamid sid;
        u32 sfi_index;
        u32 sgi_index;
+       u32 flags;
+       u32 fmi_index;
        struct flow_stats stats;
        struct hlist_node node;
 };
@@ -421,6 +438,7 @@ struct enetc_psfp {
        struct hlist_head stream_list;
        struct hlist_head psfp_filter_list;
        struct hlist_head psfp_gate_list;
+       struct hlist_head psfp_meter_list;
        spinlock_t psfp_lock; /* spinlock for the struct enetc_psfp r/w */
 };
 
@@ -430,6 +448,12 @@ static struct actions_fwd enetc_act_fwd[] = {
                BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS),
                FILTER_ACTION_TYPE_PSFP
        },
+       {
+               BIT(FLOW_ACTION_POLICE) |
+               BIT(FLOW_ACTION_GATE),
+               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS),
+               FILTER_ACTION_TYPE_PSFP
+       },
        /* example for ACL actions */
        {
                BIT(FLOW_ACTION_DROP),
@@ -487,7 +511,7 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
 
        cbd.addr[0] = lower_32_bits(dma);
        cbd.addr[1] = upper_32_bits(dma);
-       memset(si_data->dmac, 0xff, ETH_ALEN);
+       eth_broadcast_addr(si_data->dmac);
        si_data->vid_vidm_tg =
                cpu_to_le16(ENETC_CBDR_SID_VID_MASK
                            + ((0x3 << 14) | ENETC_CBDR_SID_VIDM));
@@ -594,8 +618,12 @@ static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv,
        /* Filter Type. Identifies the contents of the MSDU/FM_INST_INDEX
         * field as being either an MSDU value or an index into the Flow
         * Meter Instance table.
-        * TODO: no limit max sdu
         */
+       if (sfi->maxsdu) {
+               sfi_config->msdu =
+               cpu_to_le16(sfi->maxsdu);
+               sfi_config->multi |= 0x40;
+       }
 
        if (sfi->meter_id >= 0) {
                sfi_config->fm_inst_table_index = cpu_to_le16(sfi->meter_id);
@@ -831,6 +859,47 @@ exit:
        return err;
 }
 
+static int enetc_flowmeter_hw_set(struct enetc_ndev_priv *priv,
+                                 struct enetc_psfp_meter *fmi,
+                                 u8 enable)
+{
+       struct enetc_cbd cbd = { .cmd = 0 };
+       struct fmi_conf *fmi_config;
+       u64 temp = 0;
+
+       cbd.index = cpu_to_le16((u16)fmi->index);
+       cbd.cls = BDCR_CMD_FLOW_METER;
+       cbd.status_flags = 0x80;
+
+       if (!enable)
+               return enetc_send_cmd(priv->si, &cbd);
+
+       fmi_config = &cbd.fmi_conf;
+       fmi_config->en = 0x80;
+
+       if (fmi->cir) {
+               temp = (u64)8000 * fmi->cir;
+               temp = div_u64(temp, 3725);
+       }
+
+       fmi_config->cir = cpu_to_le32((u32)temp);
+       fmi_config->cbs = cpu_to_le32(fmi->cbs);
+
+       /* Default for eir ebs disable */
+       fmi_config->eir = 0;
+       fmi_config->ebs = 0;
+
+       /* Default:
+        * mark red disable
+        * drop on yellow disable
+        * color mode disable
+        * couple flag disable
+        */
+       fmi_config->conf = 0;
+
+       return enetc_send_cmd(priv->si, &cbd);
+}
+
 static struct enetc_stream_filter *enetc_get_stream_by_index(u32 index)
 {
        struct enetc_stream_filter *f;
@@ -864,6 +933,17 @@ static struct enetc_psfp_filter *enetc_get_filter_by_index(u32 index)
        return NULL;
 }
 
+static struct enetc_psfp_meter *enetc_get_meter_by_index(u32 index)
+{
+       struct enetc_psfp_meter *m;
+
+       hlist_for_each_entry(m, &epsfp.psfp_meter_list, node)
+               if (m->index == index)
+                       return m;
+
+       return NULL;
+}
+
 static struct enetc_psfp_filter
        *enetc_psfp_check_sfi(struct enetc_psfp_filter *sfi)
 {
@@ -872,6 +952,7 @@ static struct enetc_psfp_filter
        hlist_for_each_entry(s, &epsfp.psfp_filter_list, node)
                if (s->gate_id == sfi->gate_id &&
                    s->prio == sfi->prio &&
+                   s->maxsdu == sfi->maxsdu &&
                    s->meter_id == sfi->meter_id)
                        return s;
 
@@ -922,9 +1003,27 @@ static void stream_gate_unref(struct enetc_ndev_priv *priv, u32 index)
        }
 }
 
+static void flow_meter_unref(struct enetc_ndev_priv *priv, u32 index)
+{
+       struct enetc_psfp_meter *fmi;
+       u8 z;
+
+       fmi = enetc_get_meter_by_index(index);
+       WARN_ON(!fmi);
+       z = refcount_dec_and_test(&fmi->refcount);
+       if (z) {
+               enetc_flowmeter_hw_set(priv, fmi, false);
+               hlist_del(&fmi->node);
+               kfree(fmi);
+       }
+}
+
 static void remove_one_chain(struct enetc_ndev_priv *priv,
                             struct enetc_stream_filter *filter)
 {
+       if (filter->flags & ENETC_PSFP_FLAGS_FMI)
+               flow_meter_unref(priv, filter->fmi_index);
+
        stream_gate_unref(priv, filter->sgi_index);
        stream_filter_unref(priv, filter->sfi_index);
 
@@ -935,7 +1034,8 @@ static void remove_one_chain(struct enetc_ndev_priv *priv,
 static int enetc_psfp_hw_set(struct enetc_ndev_priv *priv,
                             struct enetc_streamid *sid,
                             struct enetc_psfp_filter *sfi,
-                            struct enetc_psfp_gate *sgi)
+                            struct enetc_psfp_gate *sgi,
+                            struct enetc_psfp_meter *fmi)
 {
        int err;
 
@@ -953,8 +1053,16 @@ static int enetc_psfp_hw_set(struct enetc_ndev_priv *priv,
        if (err)
                goto revert_sfi;
 
+       if (fmi) {
+               err = enetc_flowmeter_hw_set(priv, fmi, true);
+               if (err)
+                       goto revert_sgi;
+       }
+
        return 0;
 
+revert_sgi:
+       enetc_streamgate_hw_set(priv, sgi, false);
 revert_sfi:
        if (sfi)
                enetc_streamfilter_hw_set(priv, sfi, false);
@@ -979,9 +1087,11 @@ static struct actions_fwd *enetc_check_flow_actions(u64 acts,
 static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
                                      struct flow_cls_offload *f)
 {
+       struct flow_action_entry *entryg = NULL, *entryp = NULL;
        struct flow_rule *rule = flow_cls_offload_flow_rule(f);
        struct netlink_ext_ack *extack = f->common.extack;
        struct enetc_stream_filter *filter, *old_filter;
+       struct enetc_psfp_meter *fmi = NULL, *old_fmi;
        struct enetc_psfp_filter *sfi, *old_sfi;
        struct enetc_psfp_gate *sgi, *old_sgi;
        struct flow_action_entry *entry;
@@ -997,9 +1107,12 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
 
        flow_action_for_each(i, entry, &rule->action)
                if (entry->id == FLOW_ACTION_GATE)
-                       break;
+                       entryg = entry;
+               else if (entry->id == FLOW_ACTION_POLICE)
+                       entryp = entry;
 
-       if (entry->id != FLOW_ACTION_GATE)
+       /* Not support without gate action */
+       if (!entryg)
                return -EINVAL;
 
        filter = kzalloc(sizeof(*filter), GFP_KERNEL);
@@ -1079,19 +1192,19 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
        }
 
        /* parsing gate action */
-       if (entry->gate.index >= priv->psfp_cap.max_psfp_gate) {
+       if (entryg->gate.index >= priv->psfp_cap.max_psfp_gate) {
                NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!");
                err = -ENOSPC;
                goto free_filter;
        }
 
-       if (entry->gate.num_entries >= priv->psfp_cap.max_psfp_gatelist) {
+       if (entryg->gate.num_entries >= priv->psfp_cap.max_psfp_gatelist) {
                NL_SET_ERR_MSG_MOD(extack, "No Stream Gate resource!");
                err = -ENOSPC;
                goto free_filter;
        }
 
-       entries_size = struct_size(sgi, entries, entry->gate.num_entries);
+       entries_size = struct_size(sgi, entries, entryg->gate.num_entries);
        sgi = kzalloc(entries_size, GFP_KERNEL);
        if (!sgi) {
                err = -ENOMEM;
@@ -1099,18 +1212,18 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
        }
 
        refcount_set(&sgi->refcount, 1);
-       sgi->index = entry->gate.index;
-       sgi->init_ipv = entry->gate.prio;
-       sgi->basetime = entry->gate.basetime;
-       sgi->cycletime = entry->gate.cycletime;
-       sgi->num_entries = entry->gate.num_entries;
+       sgi->index = entryg->gate.index;
+       sgi->init_ipv = entryg->gate.prio;
+       sgi->basetime = entryg->gate.basetime;
+       sgi->cycletime = entryg->gate.cycletime;
+       sgi->num_entries = entryg->gate.num_entries;
 
        e = sgi->entries;
-       for (i = 0; i < entry->gate.num_entries; i++) {
-               e[i].gate_state = entry->gate.entries[i].gate_state;
-               e[i].interval = entry->gate.entries[i].interval;
-               e[i].ipv = entry->gate.entries[i].ipv;
-               e[i].maxoctets = entry->gate.entries[i].maxoctets;
+       for (i = 0; i < entryg->gate.num_entries; i++) {
+               e[i].gate_state = entryg->gate.entries[i].gate_state;
+               e[i].interval = entryg->gate.entries[i].interval;
+               e[i].ipv = entryg->gate.entries[i].ipv;
+               e[i].maxoctets = entryg->gate.entries[i].maxoctets;
        }
 
        filter->sgi_index = sgi->index;
@@ -1123,10 +1236,29 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
 
        refcount_set(&sfi->refcount, 1);
        sfi->gate_id = sgi->index;
-
-       /* flow meter not support yet */
        sfi->meter_id = ENETC_PSFP_WILDCARD;
 
+       /* Flow meter and max frame size */
+       if (entryp) {
+               if (entryp->police.burst) {
+                       fmi = kzalloc(sizeof(*fmi), GFP_KERNEL);
+                       if (!fmi) {
+                               err = -ENOMEM;
+                               goto free_sfi;
+                       }
+                       refcount_set(&fmi->refcount, 1);
+                       fmi->cir = entryp->police.rate_bytes_ps;
+                       fmi->cbs = entryp->police.burst;
+                       fmi->index = entryp->police.index;
+                       filter->flags |= ENETC_PSFP_FLAGS_FMI;
+                       filter->fmi_index = fmi->index;
+                       sfi->meter_id = fmi->index;
+               }
+
+               if (entryp->police.mtu)
+                       sfi->maxsdu = entryp->police.mtu;
+       }
+
        /* prio ref the filter prio */
        if (f->common.prio && f->common.prio <= BIT(3))
                sfi->prio = f->common.prio - 1;
@@ -1141,7 +1273,7 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
                if (sfi->handle < 0) {
                        NL_SET_ERR_MSG_MOD(extack, "No Stream Filter resource!");
                        err = -ENOSPC;
-                       goto free_sfi;
+                       goto free_fmi;
                }
 
                sfi->index = index;
@@ -1157,11 +1289,23 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
        }
 
        err = enetc_psfp_hw_set(priv, &filter->sid,
-                               sfi_overwrite ? NULL : sfi, sgi);
+                               sfi_overwrite ? NULL : sfi, sgi, fmi);
        if (err)
-               goto free_sfi;
+               goto free_fmi;
 
        spin_lock(&epsfp.psfp_lock);
+       if (filter->flags & ENETC_PSFP_FLAGS_FMI) {
+               old_fmi = enetc_get_meter_by_index(filter->fmi_index);
+               if (old_fmi) {
+                       fmi->refcount = old_fmi->refcount;
+                       refcount_set(&fmi->refcount,
+                                    refcount_read(&old_fmi->refcount) + 1);
+                       hlist_del(&old_fmi->node);
+                       kfree(old_fmi);
+               }
+               hlist_add_head(&fmi->node, &epsfp.psfp_meter_list);
+       }
+
        /* Remove the old node if exist and update with a new node */
        old_sgi = enetc_get_gate_by_index(filter->sgi_index);
        if (old_sgi) {
@@ -1192,6 +1336,8 @@ static int enetc_psfp_parse_clsflower(struct enetc_ndev_priv *priv,
 
        return 0;
 
+free_fmi:
+       kfree(fmi);
 free_sfi:
        kfree(sfi);
 free_gate:
@@ -1290,13 +1436,20 @@ static int enetc_psfp_get_stats(struct enetc_ndev_priv *priv,
                return -EINVAL;
 
        spin_lock(&epsfp.psfp_lock);
-       stats.pkts = counters.matching_frames_count - filter->stats.pkts;
+       stats.pkts = counters.matching_frames_count +
+                    counters.not_passing_sdu_count -
+                    filter->stats.pkts;
+       stats.drops = counters.not_passing_frames_count +
+                     counters.not_passing_sdu_count +
+                     counters.red_frames_count -
+                     filter->stats.drops;
        stats.lastused = filter->stats.lastused;
        filter->stats.pkts += stats.pkts;
+       filter->stats.drops += stats.drops;
        spin_unlock(&epsfp.psfp_lock);
 
-       flow_stats_update(&f->stats, 0x0, stats.pkts, stats.lastused,
-                         FLOW_ACTION_HW_STATS_DELAYED);
+       flow_stats_update(&f->stats, 0x0, stats.pkts, stats.drops,
+                         stats.lastused, FLOW_ACTION_HW_STATS_DELAYED);
 
        return 0;
 }
index a6cdd5b..d8d76da 100644 (file)
@@ -525,11 +525,6 @@ struct fec_enet_private {
        unsigned int total_tx_ring_size;
        unsigned int total_rx_ring_size;
 
-       unsigned long work_tx;
-       unsigned long work_rx;
-       unsigned long work_ts;
-       unsigned long work_mdio;
-
        struct  platform_device *pdev;
 
        int     dev_id;
index 2d0d313..a933812 100644 (file)
@@ -75,8 +75,6 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
 
 #define DRIVER_NAME    "fec"
 
-#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0))
-
 /* Pause frame feild and FIFO threshold */
 #define FEC_ENET_FCE   (1 << 5)
 #define FEC_ENET_RSEM_V        0x84
@@ -710,8 +708,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
                                   struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
-       int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
-       int total_len, data_left;
+       int hdr_len, total_len, data_left;
        struct bufdesc *bdp = txq->bd.cur;
        struct tso_t tso;
        unsigned int index = 0;
@@ -731,7 +728,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
        }
 
        /* Initialize the TSO handler, and prepare the first payload */
-       tso_start(skb, &tso);
+       hdr_len = tso_start(skb, &tso);
 
        total_len = skb->len - hdr_len;
        while (total_len > 0) {
@@ -1248,8 +1245,6 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
 
        fep = netdev_priv(ndev);
 
-       queue_id = FEC_ENET_GET_QUQUE(queue_id);
-
        txq = fep->tx_queue[queue_id];
        /* get next bdp of dirty_tx */
        nq = netdev_get_tx_queue(ndev, queue_id);
@@ -1340,17 +1335,14 @@ skb_done:
                writel(0, txq->bd.reg_desc_active);
 }
 
-static void
-fec_enet_tx(struct net_device *ndev)
+static void fec_enet_tx(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
-       u16 queue_id;
-       /* First process class A queue, then Class B and Best Effort queue */
-       for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) {
-               clear_bit(queue_id, &fep->work_tx);
-               fec_enet_tx_queue(ndev, queue_id);
-       }
-       return;
+       int i;
+
+       /* Make sure that AVB queues are processed first. */
+       for (i = fep->num_tx_queues - 1; i >= 0; i--)
+               fec_enet_tx_queue(ndev, i);
 }
 
 static int
@@ -1426,7 +1418,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
 #ifdef CONFIG_M532x
        flush_cache_all();
 #endif
-       queue_id = FEC_ENET_GET_QUQUE(queue_id);
        rxq = fep->rx_queue[queue_id];
 
        /* First, grab all of the stats for the incoming packet.
@@ -1550,6 +1541,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
                                               htons(ETH_P_8021Q),
                                               vlan_tag);
 
+               skb_record_rx_queue(skb, queue_id);
                napi_gro_receive(&fep->napi, skb);
 
                if (is_copybreak) {
@@ -1595,48 +1587,30 @@ rx_processing_done:
        return pkt_received;
 }
 
-static int
-fec_enet_rx(struct net_device *ndev, int budget)
+static int fec_enet_rx(struct net_device *ndev, int budget)
 {
-       int     pkt_received = 0;
-       u16     queue_id;
        struct fec_enet_private *fep = netdev_priv(ndev);
+       int i, done = 0;
 
-       for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
-               int ret;
+       /* Make sure that AVB queues are processed first. */
+       for (i = fep->num_rx_queues - 1; i >= 0; i--)
+               done += fec_enet_rx_queue(ndev, budget - done, i);
 
-               ret = fec_enet_rx_queue(ndev,
-                                       budget - pkt_received, queue_id);
-
-               if (ret < budget - pkt_received)
-                       clear_bit(queue_id, &fep->work_rx);
-
-               pkt_received += ret;
-       }
-       return pkt_received;
+       return done;
 }
 
-static bool
-fec_enet_collect_events(struct fec_enet_private *fep, uint int_events)
+static bool fec_enet_collect_events(struct fec_enet_private *fep)
 {
-       if (int_events == 0)
-               return false;
+       uint int_events;
+
+       int_events = readl(fep->hwp + FEC_IEVENT);
 
-       if (int_events & FEC_ENET_RXF_0)
-               fep->work_rx |= (1 << 2);
-       if (int_events & FEC_ENET_RXF_1)
-               fep->work_rx |= (1 << 0);
-       if (int_events & FEC_ENET_RXF_2)
-               fep->work_rx |= (1 << 1);
+       /* Don't clear MDIO events, we poll for those */
+       int_events &= ~FEC_ENET_MII;
 
-       if (int_events & FEC_ENET_TXF_0)
-               fep->work_tx |= (1 << 2);
-       if (int_events & FEC_ENET_TXF_1)
-               fep->work_tx |= (1 << 0);
-       if (int_events & FEC_ENET_TXF_2)
-               fep->work_tx |= (1 << 1);
+       writel(int_events, fep->hwp + FEC_IEVENT);
 
-       return true;
+       return int_events != 0;
 }
 
 static irqreturn_t
@@ -1644,18 +1618,9 @@ fec_enet_interrupt(int irq, void *dev_id)
 {
        struct net_device *ndev = dev_id;
        struct fec_enet_private *fep = netdev_priv(ndev);
-       uint int_events;
        irqreturn_t ret = IRQ_NONE;
 
-       int_events = readl(fep->hwp + FEC_IEVENT);
-
-       /* Don't clear MDIO events, we poll for those */
-       int_events &= ~FEC_ENET_MII;
-
-       writel(int_events, fep->hwp + FEC_IEVENT);
-       fec_enet_collect_events(fep, int_events);
-
-       if ((fep->work_tx || fep->work_rx) && fep->link) {
+       if (fec_enet_collect_events(fep) && fep->link) {
                ret = IRQ_HANDLED;
 
                if (napi_schedule_prep(&fep->napi)) {
@@ -1672,17 +1637,19 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
 {
        struct net_device *ndev = napi->dev;
        struct fec_enet_private *fep = netdev_priv(ndev);
-       int pkts;
+       int done = 0;
 
-       pkts = fec_enet_rx(ndev, budget);
-
-       fec_enet_tx(ndev);
+       do {
+               done += fec_enet_rx(ndev, budget - done);
+               fec_enet_tx(ndev);
+       } while ((done < budget) && fec_enet_collect_events(fep));
 
-       if (pkts < budget) {
-               napi_complete_done(napi, pkts);
+       if (done < budget) {
+               napi_complete_done(napi, done);
                writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
        }
-       return pkts;
+
+       return done;
 }
 
 /* ------------------------------------------------------------------------- */
@@ -3711,6 +3678,8 @@ fec_probe(struct platform_device *pdev)
        fec_enet_clk_enable(ndev, false);
        pinctrl_pm_select_sleep_state(&pdev->dev);
 
+       ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN;
+
        ret = register_netdev(ndev);
        if (ret)
                goto failed_register;
index 945643c..93a8655 100644 (file)
@@ -103,11 +103,6 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
        u64 ns;
        val = 0;
 
-       if (!(fep->hwts_tx_en || fep->hwts_rx_en)) {
-               dev_err(&fep->pdev->dev, "No ptp stack is running\n");
-               return -EINVAL;
-       }
-
        if (fep->pps_enable == enable)
                return 0;
 
@@ -269,7 +264,7 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev)
        fep->cc.mult = FEC_CC_MULT;
 
        /* reset the ns time counter */
-       timecounter_init(&fep->tc, &fep->cc, ktime_to_ns(ktime_get_real()));
+       timecounter_init(&fep->tc, &fep->cc, 0);
 
        spin_unlock_irqrestore(&fep->tmreg_lock, flags);
 }
@@ -478,9 +473,7 @@ int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
 
        switch (config.rx_filter) {
        case HWTSTAMP_FILTER_NONE:
-               if (fep->hwts_rx_en)
-                       fep->hwts_rx_en = 0;
-               config.rx_filter = HWTSTAMP_FILTER_NONE;
+               fep->hwts_rx_en = 0;
                break;
 
        default:
@@ -577,7 +570,7 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
        int ret;
 
        fep->ptp_caps.owner = THIS_MODULE;
-       snprintf(fep->ptp_caps.name, 16, "fec ptp");
+       strlcpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name));
 
        fep->ptp_caps.max_adj = 250000000;
        fep->ptp_caps.n_alarm = 0;
index b0d4b19..bf846b4 100644 (file)
@@ -1043,8 +1043,7 @@ out_cleanup_data:
 out_free_dev:
        free_netdev(ndev);
 out_put:
-       if (fpi->clk_per)
-               clk_disable_unprepare(fpi->clk_per);
+       clk_disable_unprepare(fpi->clk_per);
 out_deregister_fixed_link:
        of_node_put(fpi->phy_node);
        if (of_phy_is_fixed_link(ofdev->dev.of_node))
@@ -1065,8 +1064,7 @@ static int fs_enet_remove(struct platform_device *ofdev)
        fep->ops->cleanup_data(ndev);
        dev_set_drvdata(fep->dev, NULL);
        of_node_put(fep->fpi->phy_node);
-       if (fep->fpi->clk_per)
-               clk_disable_unprepare(fep->fpi->clk_per);
+       clk_disable_unprepare(fep->fpi->clk_per);
        if (of_phy_is_fixed_link(ofdev->dev.of_node))
                of_phy_deregister_fixed_link(ofdev->dev.of_node);
        free_netdev(ndev);
index c82c85e..98be51d 100644 (file)
@@ -245,14 +245,19 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
        struct mii_bus *bus;
-       struct resource res;
+       struct resource *res;
        struct mdio_fsl_priv *priv;
        int ret;
 
-       ret = of_address_to_resource(np, 0, &res);
-       if (ret) {
+       /* In DPAA-1, MDIO is one of the many FMan sub-devices. The FMan
+        * defines a register space that spans a large area, covering all the
+        * subdevice areas. Therefore, MDIO cannot claim exclusive access to
+        * this register area.
+        */
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
                dev_err(&pdev->dev, "could not obtain address\n");
-               return ret;
+               return -EINVAL;
        }
 
        bus = mdiobus_alloc_size(sizeof(struct mdio_fsl_priv));
@@ -263,21 +268,22 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
        bus->read = xgmac_mdio_read;
        bus->write = xgmac_mdio_write;
        bus->parent = &pdev->dev;
-       snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start);
+       bus->probe_capabilities = MDIOBUS_C22_C45;
+       snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res->start);
 
        /* Set the PHY base address */
        priv = bus->priv;
-       priv->mdio_base = of_iomap(np, 0);
+       priv->mdio_base = ioremap(res->start, resource_size(res));
        if (!priv->mdio_base) {
                ret = -ENOMEM;
                goto err_ioremap;
        }
 
-       priv->is_little_endian = of_property_read_bool(pdev->dev.of_node,
-                                                      "little-endian");
+       priv->is_little_endian = device_property_read_bool(&pdev->dev,
+                                                          "little-endian");
 
-       priv->has_a011043 = of_property_read_bool(pdev->dev.of_node,
-                                                 "fsl,erratum-a011043");
+       priv->has_a011043 = device_property_read_bool(&pdev->dev,
+                                                     "fsl,erratum-a011043");
 
        ret = of_mdiobus_register(bus, np);
        if (ret) {
@@ -320,10 +326,17 @@ static const struct of_device_id xgmac_mdio_match[] = {
 };
 MODULE_DEVICE_TABLE(of, xgmac_mdio_match);
 
+static const struct acpi_device_id xgmac_acpi_match[] = {
+       { "NXP0006" },
+       { }
+};
+MODULE_DEVICE_TABLE(acpi, xgmac_acpi_match);
+
 static struct platform_driver xgmac_mdio_driver = {
        .driver = {
                .name = "fsl-fman_xmdio",
                .of_match_table = xgmac_mdio_match,
+               .acpi_match_table = xgmac_acpi_match,
        },
        .probe = xgmac_mdio_probe,
        .remove = xgmac_mdio_remove,
index 4fb7769..8b2bf85 100644 (file)
@@ -1024,9 +1024,9 @@ static int hix5hd2_init_sg_desc_queue(struct hix5hd2_priv *priv)
        struct sg_desc *desc;
        dma_addr_t phys_addr;
 
-       desc = (struct sg_desc *)dma_alloc_coherent(priv->dev,
-                               TX_DESC_NUM * sizeof(struct sg_desc),
-                               &phys_addr, GFP_KERNEL);
+       desc = dma_alloc_coherent(priv->dev,
+                                 TX_DESC_NUM * sizeof(struct sg_desc),
+                                 &phys_addr, GFP_KERNEL);
        if (!desc)
                return -ENOMEM;
 
index 1c5243c..acfa86e 100644 (file)
@@ -1724,7 +1724,7 @@ static void hns_dsaf_setup_mc_mask(struct dsaf_device *dsaf_dev,
                                   u8 port_num, u8 *mask, u8 *addr)
 {
        if (MAC_IS_BROADCAST(addr))
-               memset(mask, 0xff, ETH_ALEN);
+               eth_broadcast_addr(mask);
        else
                memcpy(mask, dsaf_dev->mac_cb[port_num]->mc_mask, ETH_ALEN);
 }
index c117074..23f278e 100644 (file)
@@ -699,7 +699,7 @@ static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
        struct net_device *ndev = ring_data->napi.dev;
 
        skb->protocol = eth_type_trans(skb, ndev);
-       (void)napi_gro_receive(&ring_data->napi, skb);
+       napi_gro_receive(&ring_data->napi, skb);
 }
 
 static int hns_desc_unused(struct hnae_ring *ring)
index b14f2ab..fe7d57a 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/cpu_rmap.h>
 #endif
 #include <linux/if_vlan.h>
+#include <linux/irq.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
 #include <linux/module.h>
@@ -154,6 +155,7 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
 
                tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
 
+               irq_set_status_flags(tqp_vectors->vector_irq, IRQ_NOAUTOEN);
                ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
                                  tqp_vectors->name, tqp_vectors);
                if (ret) {
@@ -163,8 +165,6 @@ static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
                        return ret;
                }
 
-               disable_irq(tqp_vectors->vector_irq);
-
                irq_set_affinity_hint(tqp_vectors->vector_irq,
                                      &tqp_vectors->affinity_mask);
 
@@ -2097,10 +2097,8 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        pci_set_drvdata(pdev, ae_dev);
 
        ret = hnae3_register_ae_dev(ae_dev);
-       if (ret) {
-               devm_kfree(&pdev->dev, ae_dev);
+       if (ret)
                pci_set_drvdata(pdev, NULL);
-       }
 
        return ret;
 }
@@ -2157,7 +2155,6 @@ static void hns3_shutdown(struct pci_dev *pdev)
        struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
 
        hnae3_unregister_ae_dev(ae_dev);
-       devm_kfree(&pdev->dev, ae_dev);
        pci_set_drvdata(pdev, NULL);
 
        if (system_state == SYSTEM_POWER_OFF)
@@ -2408,7 +2405,7 @@ static int hns3_alloc_desc(struct hns3_enet_ring *ring)
        return 0;
 }
 
-static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
+static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring,
                                   struct hns3_desc_cb *cb)
 {
        int ret;
@@ -2429,9 +2426,9 @@ out:
        return ret;
 }
 
-static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
+static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i)
 {
-       int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
+       int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]);
 
        if (ret)
                return ret;
@@ -2447,7 +2444,7 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
        int i, j, ret;
 
        for (i = 0; i < ring->desc_num; i++) {
-               ret = hns3_alloc_buffer_attach(ring, i);
+               ret = hns3_alloc_and_attach_buffer(ring, i);
                if (ret)
                        goto out_buffer_fail;
        }
@@ -2476,6 +2473,11 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
        ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
                                         ring->desc_cb[i].page_offset);
        ring->desc[i].rx.bd_base_info = 0;
+
+       dma_sync_single_for_device(ring_to_dev(ring),
+                       ring->desc_cb[i].dma + ring->desc_cb[i].page_offset,
+                       hns3_buf_size(ring),
+                       DMA_FROM_DEVICE);
 }
 
 static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head,
@@ -2593,7 +2595,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
 
                        hns3_reuse_buffer(ring, ring->next_to_use);
                } else {
-                       ret = hns3_reserve_buffer_map(ring, &res_cbs);
+                       ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
                        if (ret) {
                                u64_stats_update_begin(&ring->syncp);
                                ring->stats.sw_err_cnt++;
@@ -2921,6 +2923,11 @@ static int hns3_add_frag(struct hns3_enet_ring *ring)
                        skb = ring->tail_skb;
                }
 
+               dma_sync_single_for_cpu(ring_to_dev(ring),
+                               desc_cb->dma + desc_cb->page_offset,
+                               hns3_buf_size(ring),
+                               DMA_FROM_DEVICE);
+
                hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
                trace_hns3_rx_desc(ring);
                ring_ptr_move_fw(ring, next_to_clean);
@@ -3072,8 +3079,14 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
        if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
                return -ENXIO;
 
-       if (!skb)
-               ring->va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
+       if (!skb) {
+               ring->va = desc_cb->buf + desc_cb->page_offset;
+
+               dma_sync_single_for_cpu(ring_to_dev(ring),
+                               desc_cb->dma + desc_cb->page_offset,
+                               hns3_buf_size(ring),
+                               DMA_FROM_DEVICE);
+       }
 
        /* Prefetch first cache line of first page
         * Idea is to cache few bytes of the header of the packet. Our L1 Cache
@@ -4127,9 +4140,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
 
        hns3_put_ring_config(priv);
 
-       hns3_dbg_uninit(handle);
-
 out_netdev_free:
+       hns3_dbg_uninit(handle);
        free_netdev(netdev);
 }
 
@@ -4187,7 +4199,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
                 * stack, so we need to replace the buffer here.
                 */
                if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
-                       ret = hns3_reserve_buffer_map(ring, &res_cbs);
+                       ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
                        if (ret) {
                                u64_stats_update_begin(&ring->syncp);
                                ring->stats.sw_err_cnt++;
index 66cd439..9f64077 100644 (file)
@@ -407,7 +407,7 @@ struct hns3_enet_ring {
 
        u32 pull_len; /* head length for current packet */
        u32 frag_num;
-       unsigned char *va; /* first buffer address for current packet */
+       void *va; /* first buffer address for current packet */
 
        u32 flag;          /* ring attribute */
 
index 6b1545f..2622e04 100644 (file)
@@ -180,18 +180,21 @@ static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring,
 {
        struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector;
        unsigned char *packet = skb->data;
+       u32 len = skb_headlen(skb);
        u32 i;
 
-       for (i = 0; i < skb->len; i++)
+       len = min_t(u32, len, HNS3_NIC_LB_TEST_PACKET_SIZE);
+
+       for (i = 0; i < len; i++)
                if (packet[i] != (unsigned char)(i & 0xff))
                        break;
 
        /* The packet is correctly received */
-       if (i == skb->len)
+       if (i == HNS3_NIC_LB_TEST_PACKET_SIZE)
                tqp_vector->rx_group.total_packets++;
        else
                print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1,
-                              skb->data, skb->len, true);
+                              skb->data, len, true);
 
        dev_kfree_skb_any(skb);
 }
index 96bfad5..d6bfdc6 100644 (file)
@@ -9859,7 +9859,7 @@ retry:
        set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
        hdev->reset_type = HNAE3_FLR_RESET;
        ret = hclge_reset_prepare(hdev);
-       if (ret) {
+       if (ret || hdev->reset_pending) {
                dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
                        ret);
                if (hdev->reset_pending ||
index 1b9578d..a10b022 100644 (file)
@@ -1793,6 +1793,11 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
        if (hdev->reset_type == HNAE3_VF_FUNC_RESET) {
                hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0);
                ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
+               if (ret) {
+                       dev_err(&hdev->pdev->dev,
+                               "failed to assert VF reset, ret = %d\n", ret);
+                       return ret;
+               }
                hdev->rst_stats.vf_func_rst_cnt++;
        }
 
index 32a011c..67b59d0 100644 (file)
@@ -4,4 +4,4 @@ obj-$(CONFIG_HINIC) += hinic.o
 hinic-y := hinic_main.o hinic_tx.o hinic_rx.o hinic_port.o hinic_hw_dev.o \
           hinic_hw_io.o hinic_hw_qp.o hinic_hw_cmdq.o hinic_hw_wq.o \
           hinic_hw_mgmt.o hinic_hw_api_cmd.o hinic_hw_eqs.o hinic_hw_if.o \
-          hinic_common.o hinic_ethtool.o hinic_hw_mbox.o hinic_sriov.o
+          hinic_common.o hinic_ethtool.o hinic_devlink.o hinic_hw_mbox.o hinic_sriov.o
index 48b40be..befd925 100644 (file)
 
 #define HINIC_DRV_NAME          "hinic"
 
+#define LP_PKT_CNT             64
+
 enum hinic_flags {
        HINIC_LINK_UP = BIT(0),
        HINIC_INTF_UP = BIT(1),
        HINIC_RSS_ENABLE = BIT(2),
        HINIC_LINK_DOWN = BIT(3),
+       HINIC_LP_TEST = BIT(4),
 };
 
 struct hinic_rx_mode_work {
@@ -49,6 +52,12 @@ enum hinic_rss_hash_type {
        HINIC_RSS_HASH_ENGINE_TYPE_MAX,
 };
 
+struct hinic_intr_coal_info {
+       u8      pending_limt;
+       u8      coalesce_timer_cfg;
+       u8      resend_timer_cfg;
+};
+
 struct hinic_dev {
        struct net_device               *netdev;
        struct hinic_hwdev              *hwdev;
@@ -82,7 +91,17 @@ struct hinic_dev {
        struct hinic_rss_type           rss_type;
        u8                              *rss_hkey_user;
        s32                             *rss_indir_user;
+       struct hinic_intr_coal_info     *rx_intr_coalesce;
+       struct hinic_intr_coal_info     *tx_intr_coalesce;
        struct hinic_sriov_info sriov_info;
+       int                             lb_test_rx_idx;
+       int                             lb_pkt_len;
+       u8                              *lb_test_rx_buf;
+       struct devlink                  *devlink;
+};
+
+struct hinic_devlink_priv {
+       struct hinic_hwdev              *hwdev;
 };
 
 #endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
new file mode 100644 (file)
index 0000000..a40a10a
--- /dev/null
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ */
+#include <linux/netlink.h>
+#include <net/devlink.h>
+#include <linux/firmware.h>
+
+#include "hinic_dev.h"
+#include "hinic_port.h"
+#include "hinic_devlink.h"
+
+static bool check_image_valid(struct hinic_devlink_priv *priv, const u8 *buf,
+                             u32 image_size, struct host_image_st *host_image)
+{
+       struct fw_image_st *fw_image = NULL;
+       u32 len = 0;
+       u32 i;
+
+       fw_image = (struct fw_image_st *)buf;
+
+       if (fw_image->fw_magic != HINIC_MAGIC_NUM) {
+               dev_err(&priv->hwdev->hwif->pdev->dev, "Wrong fw_magic read from file, fw_magic: 0x%x\n",
+                       fw_image->fw_magic);
+               return false;
+       }
+
+       if (fw_image->fw_info.fw_section_cnt > MAX_FW_TYPE_NUM) {
+               dev_err(&priv->hwdev->hwif->pdev->dev, "Wrong fw_type_num read from file, fw_type_num: 0x%x\n",
+                       fw_image->fw_info.fw_section_cnt);
+               return false;
+       }
+
+       for (i = 0; i < fw_image->fw_info.fw_section_cnt; i++) {
+               len += fw_image->fw_section_info[i].fw_section_len;
+               memcpy(&host_image->image_section_info[i],
+                      &fw_image->fw_section_info[i],
+                      sizeof(struct fw_section_info_st));
+       }
+
+       if (len != fw_image->fw_len ||
+           (fw_image->fw_len + UPDATEFW_IMAGE_HEAD_SIZE) != image_size) {
+               dev_err(&priv->hwdev->hwif->pdev->dev, "Wrong data size read from file\n");
+               return false;
+       }
+
+       host_image->image_info.up_total_len = fw_image->fw_len;
+       host_image->image_info.fw_version = fw_image->fw_version;
+       host_image->section_type_num = fw_image->fw_info.fw_section_cnt;
+       host_image->device_id = fw_image->device_id;
+
+       return true;
+}
+
+static bool check_image_integrity(struct hinic_devlink_priv *priv,
+                                 struct host_image_st *host_image,
+                                 u32 update_type)
+{
+       u32 collect_section_type = 0;
+       u32 i, type;
+
+       for (i = 0; i < host_image->section_type_num; i++) {
+               type = host_image->image_section_info[i].fw_section_type;
+               if (collect_section_type & (1U << type)) {
+                       dev_err(&priv->hwdev->hwif->pdev->dev, "Duplicate section type: %u\n",
+                               type);
+                       return false;
+               }
+               collect_section_type |= (1U << type);
+       }
+
+       if (update_type == FW_UPDATE_COLD &&
+           (((collect_section_type & _IMAGE_COLD_SUB_MODULES_MUST_IN) ==
+              _IMAGE_COLD_SUB_MODULES_MUST_IN) ||
+             collect_section_type == _IMAGE_CFG_SUB_MODULES_MUST_IN))
+               return true;
+
+       if (update_type == FW_UPDATE_HOT &&
+           (collect_section_type & _IMAGE_HOT_SUB_MODULES_MUST_IN) ==
+           _IMAGE_HOT_SUB_MODULES_MUST_IN)
+               return true;
+
+       if (update_type == FW_UPDATE_COLD)
+               dev_err(&priv->hwdev->hwif->pdev->dev, "Check file integrity failed, valid: 0x%x or 0x%lx, current: 0x%x\n",
+                       _IMAGE_COLD_SUB_MODULES_MUST_IN,
+                       _IMAGE_CFG_SUB_MODULES_MUST_IN, collect_section_type);
+       else
+               dev_err(&priv->hwdev->hwif->pdev->dev, "Check file integrity failed, valid:0x%x, current: 0x%x\n",
+                       _IMAGE_HOT_SUB_MODULES_MUST_IN, collect_section_type);
+
+       return false;
+}
+
+static int check_image_device_type(struct hinic_devlink_priv *priv,
+                                  u32 image_device_type)
+{
+       struct hinic_comm_board_info board_info = {0};
+
+       if (hinic_get_board_info(priv->hwdev, &board_info)) {
+               dev_err(&priv->hwdev->hwif->pdev->dev, "Get board info failed\n");
+               return false;
+       }
+
+       if (image_device_type == board_info.info.board_type)
+               return true;
+
+       dev_err(&priv->hwdev->hwif->pdev->dev, "The device type of upgrade file doesn't match the device type of current firmware, please check the upgrade file\n");
+       dev_err(&priv->hwdev->hwif->pdev->dev, "The image device type: 0x%x, firmware device type: 0x%x\n",
+               image_device_type, board_info.info.board_type);
+
+       return false;
+}
+
+static int hinic_flash_fw(struct hinic_devlink_priv *priv, const u8 *data,
+                         struct host_image_st *host_image)
+{
+       u32 section_remain_send_len, send_fragment_len, send_pos, up_total_len;
+       struct hinic_cmd_update_fw *fw_update_msg = NULL;
+       u32 section_type, section_crc, section_version;
+       u32 i, len, section_len, section_offset;
+       u16 out_size = sizeof(*fw_update_msg);
+       int total_len_flag = 0;
+       int err;
+
+       fw_update_msg = kzalloc(sizeof(*fw_update_msg), GFP_KERNEL);
+       if (!fw_update_msg)
+               return -ENOMEM;
+
+       up_total_len = host_image->image_info.up_total_len;
+
+       for (i = 0; i < host_image->section_type_num; i++) {
+               len = host_image->image_section_info[i].fw_section_len;
+               if (host_image->image_section_info[i].fw_section_type ==
+                   UP_FW_UPDATE_BOOT) {
+                       up_total_len = up_total_len - len;
+                       break;
+               }
+       }
+
+       for (i = 0; i < host_image->section_type_num; i++) {
+               section_len =
+                       host_image->image_section_info[i].fw_section_len;
+               section_offset =
+                       host_image->image_section_info[i].fw_section_offset;
+               section_remain_send_len = section_len;
+               section_type =
+                       host_image->image_section_info[i].fw_section_type;
+               section_crc = host_image->image_section_info[i].fw_section_crc;
+               section_version =
+                       host_image->image_section_info[i].fw_section_version;
+
+               if (section_type == UP_FW_UPDATE_BOOT)
+                       continue;
+
+               send_fragment_len = 0;
+               send_pos = 0;
+
+               while (section_remain_send_len > 0) {
+                       if (!total_len_flag) {
+                               fw_update_msg->total_len = up_total_len;
+                               total_len_flag = 1;
+                       } else {
+                               fw_update_msg->total_len = 0;
+                       }
+
+                       memset(fw_update_msg->data, 0, MAX_FW_FRAGMENT_LEN);
+
+                       fw_update_msg->ctl_info.SF =
+                               (section_remain_send_len == section_len) ?
+                               true : false;
+                       fw_update_msg->section_info.FW_section_CRC = section_crc;
+                       fw_update_msg->fw_section_version = section_version;
+                       fw_update_msg->ctl_info.flag = UP_TYPE_A;
+
+                       if (section_type <= UP_FW_UPDATE_UP_DATA_B) {
+                               fw_update_msg->section_info.FW_section_type =
+                                       (section_type % 2) ?
+                                       UP_FW_UPDATE_UP_DATA :
+                                       UP_FW_UPDATE_UP_TEXT;
+
+                               fw_update_msg->ctl_info.flag = UP_TYPE_B;
+                               if (section_type <= UP_FW_UPDATE_UP_DATA_A)
+                                       fw_update_msg->ctl_info.flag = UP_TYPE_A;
+                       } else {
+                               fw_update_msg->section_info.FW_section_type =
+                                       section_type - 0x2;
+                       }
+
+                       fw_update_msg->setion_total_len = section_len;
+                       fw_update_msg->section_offset = send_pos;
+
+                       if (section_remain_send_len <= MAX_FW_FRAGMENT_LEN) {
+                               fw_update_msg->ctl_info.SL = true;
+                               fw_update_msg->ctl_info.fragment_len =
+                                       section_remain_send_len;
+                               send_fragment_len += section_remain_send_len;
+                       } else {
+                               fw_update_msg->ctl_info.SL = false;
+                               fw_update_msg->ctl_info.fragment_len =
+                                       MAX_FW_FRAGMENT_LEN;
+                               send_fragment_len += MAX_FW_FRAGMENT_LEN;
+                       }
+
+                       memcpy(fw_update_msg->data,
+                              data + UPDATEFW_IMAGE_HEAD_SIZE +
+                              section_offset + send_pos,
+                              fw_update_msg->ctl_info.fragment_len);
+
+                       err = hinic_port_msg_cmd(priv->hwdev,
+                                                HINIC_PORT_CMD_UPDATE_FW,
+                                                fw_update_msg,
+                                                sizeof(*fw_update_msg),
+                                                fw_update_msg, &out_size);
+                       if (err || !out_size || fw_update_msg->status) {
+                               dev_err(&priv->hwdev->hwif->pdev->dev, "Failed to update firmware, err: %d, status: 0x%x, out size: 0x%x\n",
+                                       err, fw_update_msg->status, out_size);
+                               err = fw_update_msg->status ?
+                                       fw_update_msg->status : -EIO;
+                               kfree(fw_update_msg);
+                               return err;
+                       }
+
+                       send_pos = send_fragment_len;
+                       section_remain_send_len = section_len -
+                                                 send_fragment_len;
+               }
+       }
+
+       kfree(fw_update_msg);
+
+       return 0;
+}
+
+static int hinic_firmware_update(struct hinic_devlink_priv *priv,
+                                const struct firmware *fw,
+                                struct netlink_ext_ack *extack)
+{
+       struct host_image_st host_image;
+       int err;
+
+       memset(&host_image, 0, sizeof(struct host_image_st));
+
+       if (!check_image_valid(priv, fw->data, fw->size, &host_image) ||
+           !check_image_integrity(priv, &host_image, FW_UPDATE_COLD) ||
+           !check_image_device_type(priv, host_image.device_id)) {
+               NL_SET_ERR_MSG_MOD(extack, "Check image failed");
+               return -EINVAL;
+       }
+
+       dev_info(&priv->hwdev->hwif->pdev->dev, "Flash firmware begin\n");
+
+       err = hinic_flash_fw(priv, fw->data, &host_image);
+       if (err) {
+               if (err == HINIC_FW_DISMATCH_ERROR) {
+                       dev_err(&priv->hwdev->hwif->pdev->dev, "Firmware image doesn't match this card, please use newer image, err: %d\n",
+                               err);
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Firmware image doesn't match this card, please use newer image");
+               } else {
+                       dev_err(&priv->hwdev->hwif->pdev->dev, "Send firmware image data failed, err: %d\n",
+                               err);
+                       NL_SET_ERR_MSG_MOD(extack, "Send firmware image data failed");
+               }
+
+               return err;
+       }
+
+       dev_info(&priv->hwdev->hwif->pdev->dev, "Flash firmware end\n");
+
+       return 0;
+}
+
+static int hinic_devlink_flash_update(struct devlink *devlink,
+                                     const char *file_name,
+                                     const char *component,
+                                     struct netlink_ext_ack *extack)
+{
+       struct hinic_devlink_priv *priv = devlink_priv(devlink);
+       const struct firmware *fw;
+       int err;
+
+       if (component)
+               return -EOPNOTSUPP;
+
+       err = request_firmware_direct(&fw, file_name,
+                                     &priv->hwdev->hwif->pdev->dev);
+       if (err)
+               return err;
+
+       err = hinic_firmware_update(priv, fw, extack);
+       release_firmware(fw);
+
+       return err;
+}
+
+static const struct devlink_ops hinic_devlink_ops = {
+       .flash_update = hinic_devlink_flash_update,
+};
+
+struct devlink *hinic_devlink_alloc(void)
+{
+       return devlink_alloc(&hinic_devlink_ops, sizeof(struct hinic_dev));
+}
+
+void hinic_devlink_free(struct devlink *devlink)
+{
+       devlink_free(devlink);
+}
+
+int hinic_devlink_register(struct devlink *devlink, struct device *dev)
+{
+       return devlink_register(devlink, dev);
+}
+
+void hinic_devlink_unregister(struct devlink *devlink)
+{
+       devlink_unregister(devlink);
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h
new file mode 100644 (file)
index 0000000..604e95a
--- /dev/null
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef __HINIC_DEVLINK_H__
+#define __HINIC_DEVLINK_H__
+
+#include <net/devlink.h>
+
+#define MAX_FW_TYPE_NUM 30
+#define HINIC_MAGIC_NUM 0x18221100
+#define UPDATEFW_IMAGE_HEAD_SIZE 1024
+#define FW_UPDATE_COLD 0
+#define FW_UPDATE_HOT  1
+
+#define UP_TYPE_A 0x0
+#define UP_TYPE_B 0x1
+
+#define MAX_FW_FRAGMENT_LEN 1536
+#define HINIC_FW_DISMATCH_ERROR 10
+
+enum hinic_fw_type {
+       UP_FW_UPDATE_UP_TEXT_A = 0x0,
+       UP_FW_UPDATE_UP_DATA_A,
+       UP_FW_UPDATE_UP_TEXT_B,
+       UP_FW_UPDATE_UP_DATA_B,
+       UP_FW_UPDATE_UP_DICT,
+
+       UP_FW_UPDATE_HLINK_ONE = 0x5,
+       UP_FW_UPDATE_HLINK_TWO,
+       UP_FW_UPDATE_HLINK_THR,
+       UP_FW_UPDATE_PHY,
+       UP_FW_UPDATE_TILE_TEXT,
+
+       UP_FW_UPDATE_TILE_DATA = 0xa,
+       UP_FW_UPDATE_TILE_DICT,
+       UP_FW_UPDATE_PPE_STATE,
+       UP_FW_UPDATE_PPE_BRANCH,
+       UP_FW_UPDATE_PPE_EXTACT,
+
+       UP_FW_UPDATE_CLP_LEGACY = 0xf,
+       UP_FW_UPDATE_PXE_LEGACY,
+       UP_FW_UPDATE_ISCSI_LEGACY,
+       UP_FW_UPDATE_CLP_EFI,
+       UP_FW_UPDATE_PXE_EFI,
+
+       UP_FW_UPDATE_ISCSI_EFI = 0x14,
+       UP_FW_UPDATE_CFG,
+       UP_FW_UPDATE_BOOT,
+       UP_FW_UPDATE_VPD,
+       FILE_TYPE_TOTAL_NUM
+};
+
+#define _IMAGE_UP_ALL_IN ((1 << UP_FW_UPDATE_UP_TEXT_A) | \
+                         (1 << UP_FW_UPDATE_UP_DATA_A) | \
+                         (1 << UP_FW_UPDATE_UP_TEXT_B) | \
+                         (1 << UP_FW_UPDATE_UP_DATA_B) | \
+                         (1 << UP_FW_UPDATE_UP_DICT) | \
+                         (1 << UP_FW_UPDATE_BOOT) | \
+                         (1 << UP_FW_UPDATE_HLINK_ONE) | \
+                         (1 << UP_FW_UPDATE_HLINK_TWO) | \
+                         (1 << UP_FW_UPDATE_HLINK_THR))
+
+#define _IMAGE_UCODE_ALL_IN ((1 << UP_FW_UPDATE_TILE_TEXT) | \
+                            (1 << UP_FW_UPDATE_TILE_DICT) | \
+                            (1 << UP_FW_UPDATE_PPE_STATE) | \
+                            (1 << UP_FW_UPDATE_PPE_BRANCH) | \
+                            (1 << UP_FW_UPDATE_PPE_EXTACT))
+
+#define _IMAGE_COLD_SUB_MODULES_MUST_IN (_IMAGE_UP_ALL_IN | _IMAGE_UCODE_ALL_IN)
+#define _IMAGE_HOT_SUB_MODULES_MUST_IN (_IMAGE_UP_ALL_IN | _IMAGE_UCODE_ALL_IN)
+#define _IMAGE_CFG_SUB_MODULES_MUST_IN BIT(UP_FW_UPDATE_CFG)
+#define UP_FW_UPDATE_UP_TEXT  0x0
+#define UP_FW_UPDATE_UP_DATA  0x1
+#define UP_FW_UPDATE_VPD_B    0x15
+
+struct fw_section_info_st {
+       u32 fw_section_len;
+       u32 fw_section_offset;
+       u32 fw_section_version;
+       u32 fw_section_type;
+       u32 fw_section_crc;
+};
+
+struct fw_image_st {
+       u32 fw_version;
+       u32 fw_len;
+       u32 fw_magic;
+       struct {
+               u32 fw_section_cnt:16;
+               u32 resd:16;
+       } fw_info;
+       struct fw_section_info_st fw_section_info[MAX_FW_TYPE_NUM];
+       u32 device_id;
+       u32 res[101];
+       void *bin_data;
+};
+
+struct host_image_st {
+       struct fw_section_info_st image_section_info[MAX_FW_TYPE_NUM];
+       struct {
+               u32 up_total_len;
+               u32 fw_version;
+       } image_info;
+       u32 section_type_num;
+       u32 device_id;
+};
+
+struct devlink *hinic_devlink_alloc(void);
+void hinic_devlink_free(struct devlink *devlink);
+int hinic_devlink_register(struct devlink *devlink, struct device *dev);
+void hinic_devlink_unregister(struct devlink *devlink);
+
+#endif /* __HINIC_DEVLINK_H__ */
index efb02e0..cb5ebae 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/if_vlan.h>
 #include <linux/ethtool.h>
 #include <linux/vmalloc.h>
+#include <linux/sfp.h>
 
 #include "hinic_hw_qp.h"
 #include "hinic_hw_dev.h"
 #define ETHTOOL_ADD_ADVERTISED_LINK_MODE(ecmd, mode)   \
                                ((ecmd)->advertising |= ADVERTISED_##mode)
 
+#define COALESCE_PENDING_LIMIT_UNIT    8
+#define        COALESCE_TIMER_CFG_UNIT         9
+#define COALESCE_ALL_QUEUE             0xFFFF
+#define COALESCE_MAX_PENDING_LIMIT     (255 * COALESCE_PENDING_LIMIT_UNIT)
+#define COALESCE_MAX_TIMER_CFG         (255 * COALESCE_TIMER_CFG_UNIT)
+#define OBJ_STR_MAX_LEN                        32
+
 struct hw2ethtool_link_mode {
        enum ethtool_link_mode_bit_indices link_mode_bit;
        u32 speed;
@@ -126,6 +134,16 @@ static struct hw2ethtool_link_mode
        },
 };
 
+#define LP_DEFAULT_TIME                 5 /* seconds */
+#define LP_PKT_LEN                      1514
+
+#define PORT_DOWN_ERR_IDX              0
+enum diag_test_index {
+       INTERNAL_LP_TEST = 0,
+       EXTERNAL_LP_TEST = 1,
+       DIAG_TEST_MAX = 2,
+};
+
 static void set_link_speed(struct ethtool_link_ksettings *link_ksettings,
                           enum hinic_speed speed)
 {
@@ -613,6 +631,255 @@ static int hinic_set_ringparam(struct net_device *netdev,
 
        return 0;
 }
+
+static int __hinic_get_coalesce(struct net_device *netdev,
+                               struct ethtool_coalesce *coal, u16 queue)
+{
+       struct hinic_dev *nic_dev = netdev_priv(netdev);
+       struct hinic_intr_coal_info *rx_intr_coal_info;
+       struct hinic_intr_coal_info *tx_intr_coal_info;
+
+       if (queue == COALESCE_ALL_QUEUE) {
+               /* get tx/rx irq0 as default parameters */
+               rx_intr_coal_info = &nic_dev->rx_intr_coalesce[0];
+               tx_intr_coal_info = &nic_dev->tx_intr_coalesce[0];
+       } else {
+               if (queue >= nic_dev->num_qps) {
+                       netif_err(nic_dev, drv, netdev,
+                                 "Invalid queue_id: %d\n", queue);
+                       return -EINVAL;
+               }
+               rx_intr_coal_info = &nic_dev->rx_intr_coalesce[queue];
+               tx_intr_coal_info = &nic_dev->tx_intr_coalesce[queue];
+       }
+
+       /* coalesce_timer is in unit of 9us */
+       coal->rx_coalesce_usecs = rx_intr_coal_info->coalesce_timer_cfg *
+                       COALESCE_TIMER_CFG_UNIT;
+       /* coalesced_frames is in unit of 8 */
+       coal->rx_max_coalesced_frames = rx_intr_coal_info->pending_limt *
+                       COALESCE_PENDING_LIMIT_UNIT;
+       coal->tx_coalesce_usecs = tx_intr_coal_info->coalesce_timer_cfg *
+                       COALESCE_TIMER_CFG_UNIT;
+       coal->tx_max_coalesced_frames = tx_intr_coal_info->pending_limt *
+                       COALESCE_PENDING_LIMIT_UNIT;
+
+       return 0;
+}
+
+static int is_coalesce_exceed_limit(const struct ethtool_coalesce *coal)
+{
+       if (coal->rx_coalesce_usecs > COALESCE_MAX_TIMER_CFG ||
+           coal->rx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT ||
+           coal->tx_coalesce_usecs > COALESCE_MAX_TIMER_CFG ||
+           coal->tx_max_coalesced_frames > COALESCE_MAX_PENDING_LIMIT)
+               return -ERANGE;
+
+       return 0;
+}
+
+static int set_queue_coalesce(struct hinic_dev *nic_dev, u16 q_id,
+                             struct hinic_intr_coal_info *coal,
+                             bool set_rx_coal)
+{
+       struct hinic_intr_coal_info *intr_coal = NULL;
+       struct hinic_msix_config interrupt_info = {0};
+       struct net_device *netdev = nic_dev->netdev;
+       u16 msix_idx;
+       int err;
+
+       intr_coal = set_rx_coal ? &nic_dev->rx_intr_coalesce[q_id] :
+                   &nic_dev->tx_intr_coalesce[q_id];
+
+       intr_coal->coalesce_timer_cfg = coal->coalesce_timer_cfg;
+       intr_coal->pending_limt = coal->pending_limt;
+
+       /* netdev not running or qp not in using,
+        * don't need to set coalesce to hw
+        */
+       if (!(nic_dev->flags & HINIC_INTF_UP) ||
+           q_id >= nic_dev->num_qps)
+               return 0;
+
+       msix_idx = set_rx_coal ? nic_dev->rxqs[q_id].rq->msix_entry :
+                  nic_dev->txqs[q_id].sq->msix_entry;
+       interrupt_info.msix_index = msix_idx;
+       interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg;
+       interrupt_info.pending_cnt = intr_coal->pending_limt;
+       interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg;
+
+       err = hinic_set_interrupt_cfg(nic_dev->hwdev, &interrupt_info);
+       if (err)
+               netif_warn(nic_dev, drv, netdev,
+                          "Failed to set %s queue%d coalesce",
+                          set_rx_coal ? "rx" : "tx", q_id);
+
+       return err;
+}
+
+static int __set_hw_coal_param(struct hinic_dev *nic_dev,
+                              struct hinic_intr_coal_info *intr_coal,
+                              u16 queue, bool set_rx_coal)
+{
+       int err;
+       u16 i;
+
+       if (queue == COALESCE_ALL_QUEUE) {
+               for (i = 0; i < nic_dev->max_qps; i++) {
+                       err = set_queue_coalesce(nic_dev, i, intr_coal,
+                                                set_rx_coal);
+                       if (err)
+                               return err;
+               }
+       } else {
+               if (queue >= nic_dev->num_qps) {
+                       netif_err(nic_dev, drv, nic_dev->netdev,
+                                 "Invalid queue_id: %d\n", queue);
+                       return -EINVAL;
+               }
+               err = set_queue_coalesce(nic_dev, queue, intr_coal,
+                                        set_rx_coal);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int __hinic_set_coalesce(struct net_device *netdev,
+                               struct ethtool_coalesce *coal, u16 queue)
+{
+       struct hinic_dev *nic_dev = netdev_priv(netdev);
+       struct hinic_intr_coal_info rx_intr_coal = {0};
+       struct hinic_intr_coal_info tx_intr_coal = {0};
+       bool set_rx_coal = false;
+       bool set_tx_coal = false;
+       int err;
+
+       err = is_coalesce_exceed_limit(coal);
+       if (err)
+               return err;
+
+       if (coal->rx_coalesce_usecs || coal->rx_max_coalesced_frames) {
+               rx_intr_coal.coalesce_timer_cfg =
+               (u8)(coal->rx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT);
+               rx_intr_coal.pending_limt = (u8)(coal->rx_max_coalesced_frames /
+                               COALESCE_PENDING_LIMIT_UNIT);
+               set_rx_coal = true;
+       }
+
+       if (coal->tx_coalesce_usecs || coal->tx_max_coalesced_frames) {
+               tx_intr_coal.coalesce_timer_cfg =
+               (u8)(coal->tx_coalesce_usecs / COALESCE_TIMER_CFG_UNIT);
+               tx_intr_coal.pending_limt = (u8)(coal->tx_max_coalesced_frames /
+               COALESCE_PENDING_LIMIT_UNIT);
+               set_tx_coal = true;
+       }
+
+       /* setting coalesce timer or pending limit to zero will disable
+        * coalesce
+        */
+       if (set_rx_coal && (!rx_intr_coal.coalesce_timer_cfg ||
+                           !rx_intr_coal.pending_limt))
+               netif_warn(nic_dev, drv, netdev, "RX coalesce will be disabled\n");
+       if (set_tx_coal && (!tx_intr_coal.coalesce_timer_cfg ||
+                           !tx_intr_coal.pending_limt))
+               netif_warn(nic_dev, drv, netdev, "TX coalesce will be disabled\n");
+
+       if (set_rx_coal) {
+               err = __set_hw_coal_param(nic_dev, &rx_intr_coal, queue, true);
+               if (err)
+                       return err;
+       }
+       if (set_tx_coal) {
+               err = __set_hw_coal_param(nic_dev, &tx_intr_coal, queue, false);
+               if (err)
+                       return err;
+       }
+       return 0;
+}
+
+static int hinic_get_coalesce(struct net_device *netdev,
+                             struct ethtool_coalesce *coal)
+{
+       return __hinic_get_coalesce(netdev, coal, COALESCE_ALL_QUEUE);
+}
+
+static int hinic_set_coalesce(struct net_device *netdev,
+                             struct ethtool_coalesce *coal)
+{
+       return __hinic_set_coalesce(netdev, coal, COALESCE_ALL_QUEUE);
+}
+
+static int hinic_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
+                                       struct ethtool_coalesce *coal)
+{
+       return __hinic_get_coalesce(netdev, coal, queue);
+}
+
+static int hinic_set_per_queue_coalesce(struct net_device *netdev, u32 queue,
+                                       struct ethtool_coalesce *coal)
+{
+       return __hinic_set_coalesce(netdev, coal, queue);
+}
+
+static void hinic_get_pauseparam(struct net_device *netdev,
+                                struct ethtool_pauseparam *pause)
+{
+       struct hinic_dev *nic_dev = netdev_priv(netdev);
+       struct hinic_pause_config pause_info = {0};
+       struct hinic_nic_cfg *nic_cfg;
+       int err;
+
+       nic_cfg = &nic_dev->hwdev->func_to_io.nic_cfg;
+
+       err = hinic_get_hw_pause_info(nic_dev->hwdev, &pause_info);
+       if (!err) {
+               pause->autoneg = pause_info.auto_neg;
+               if (nic_cfg->pause_set || !pause_info.auto_neg) {
+                       pause->rx_pause = nic_cfg->rx_pause;
+                       pause->tx_pause = nic_cfg->tx_pause;
+               } else {
+                       pause->rx_pause = pause_info.rx_pause;
+                       pause->tx_pause = pause_info.tx_pause;
+               }
+       }
+}
+
+static int hinic_set_pauseparam(struct net_device *netdev,
+                               struct ethtool_pauseparam *pause)
+{
+       struct hinic_dev *nic_dev = netdev_priv(netdev);
+       struct hinic_pause_config pause_info = {0};
+       struct hinic_port_cap port_cap = {0};
+       int err;
+
+       err = hinic_port_get_cap(nic_dev, &port_cap);
+       if (err)
+               return -EIO;
+
+       if (pause->autoneg != port_cap.autoneg_state)
+               return -EOPNOTSUPP;
+
+       pause_info.auto_neg = pause->autoneg;
+       pause_info.rx_pause = pause->rx_pause;
+       pause_info.tx_pause = pause->tx_pause;
+
+       mutex_lock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex);
+       err = hinic_set_hw_pause_info(nic_dev->hwdev, &pause_info);
+       if (err) {
+               mutex_unlock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex);
+               return err;
+       }
+       nic_dev->hwdev->func_to_io.nic_cfg.pause_set = true;
+       nic_dev->hwdev->func_to_io.nic_cfg.auto_neg = pause->autoneg;
+       nic_dev->hwdev->func_to_io.nic_cfg.rx_pause = pause->rx_pause;
+       nic_dev->hwdev->func_to_io.nic_cfg.tx_pause = pause->tx_pause;
+       mutex_unlock(&nic_dev->hwdev->func_to_io.nic_cfg.cfg_mutex);
+
+       return 0;
+}
+
 static void hinic_get_channels(struct net_device *netdev,
                               struct ethtool_channels *channels)
 {
@@ -970,6 +1237,11 @@ static struct hinic_stats hinic_function_stats[] = {
        HINIC_FUNC_STAT(rx_err_vport),
 };
 
+static char hinic_test_strings[][ETH_GSTRING_LEN] = {
+       "Internal lb test  (on/offline)",
+       "External lb test (external_lb)",
+};
+
 #define HINIC_PORT_STAT(_stat_item) { \
        .name = #_stat_item, \
        .size = sizeof_field(struct hinic_phy_port_stats, _stat_item), \
@@ -1179,6 +1451,8 @@ static int hinic_get_sset_count(struct net_device *netdev, int sset)
        int count, q_num;
 
        switch (sset) {
+       case ETH_SS_TEST:
+               return ARRAY_LEN(hinic_test_strings);
        case ETH_SS_STATS:
                q_num = nic_dev->num_qps;
                count = ARRAY_LEN(hinic_function_stats) +
@@ -1201,6 +1475,9 @@ static void hinic_get_strings(struct net_device *netdev,
        u16 i, j;
 
        switch (stringset) {
+       case ETH_SS_TEST:
+               memcpy(data, *hinic_test_strings, sizeof(hinic_test_strings));
+               return;
        case ETH_SS_STATS:
                for (i = 0; i < ARRAY_LEN(hinic_function_stats); i++) {
                        memcpy(p, hinic_function_stats[i].name,
@@ -1234,13 +1511,311 @@ static void hinic_get_strings(struct net_device *netdev,
        }
 }
 
+static int hinic_run_lp_test(struct hinic_dev *nic_dev, u32 test_time)
+{
+       u8 *lb_test_rx_buf = nic_dev->lb_test_rx_buf;
+       struct net_device *netdev = nic_dev->netdev;
+       struct sk_buff *skb_tmp = NULL;
+       struct sk_buff *skb = NULL;
+       u32 cnt = test_time * 5;
+       u8 *test_data = NULL;
+       u32 i;
+       u8 j;
+
+       skb_tmp = alloc_skb(LP_PKT_LEN, GFP_ATOMIC);
+       if (!skb_tmp)
+               return -ENOMEM;
+
+       test_data = __skb_put(skb_tmp, LP_PKT_LEN);
+
+       memset(test_data, 0xFF, 2 * ETH_ALEN);
+       test_data[ETH_ALEN] = 0xFE;
+       test_data[2 * ETH_ALEN] = 0x08;
+       test_data[2 * ETH_ALEN + 1] = 0x0;
+
+       for (i = ETH_HLEN; i < LP_PKT_LEN; i++)
+               test_data[i] = i & 0xFF;
+
+       skb_tmp->queue_mapping = 0;
+       skb_tmp->ip_summed = CHECKSUM_COMPLETE;
+       skb_tmp->dev = netdev;
+
+       for (i = 0; i < cnt; i++) {
+               nic_dev->lb_test_rx_idx = 0;
+               memset(lb_test_rx_buf, 0, LP_PKT_CNT * LP_PKT_LEN);
+
+               for (j = 0; j < LP_PKT_CNT; j++) {
+                       skb = pskb_copy(skb_tmp, GFP_ATOMIC);
+                       if (!skb) {
+                               dev_kfree_skb_any(skb_tmp);
+                               netif_err(nic_dev, drv, netdev,
+                                         "Copy skb failed for loopback test\n");
+                               return -ENOMEM;
+                       }
+
+                       /* mark index for every pkt */
+                       skb->data[LP_PKT_LEN - 1] = j;
+
+                       if (hinic_lb_xmit_frame(skb, netdev)) {
+                               dev_kfree_skb_any(skb);
+                               dev_kfree_skb_any(skb_tmp);
+                               netif_err(nic_dev, drv, netdev,
+                                         "Xmit pkt failed for loopback test\n");
+                               return -EBUSY;
+                       }
+               }
+
+               /* wait till all pkts received to RX buffer */
+               msleep(200);
+
+               for (j = 0; j < LP_PKT_CNT; j++) {
+                       if (memcmp(lb_test_rx_buf + j * LP_PKT_LEN,
+                                  skb_tmp->data, LP_PKT_LEN - 1) ||
+                           (*(lb_test_rx_buf + j * LP_PKT_LEN +
+                            LP_PKT_LEN - 1) != j)) {
+                               dev_kfree_skb_any(skb_tmp);
+                               netif_err(nic_dev, drv, netdev,
+                                         "Compare pkt failed in loopback test(index=0x%02x, data[%d]=0x%02x)\n",
+                                         j + i * LP_PKT_CNT,
+                                         LP_PKT_LEN - 1,
+                                         *(lb_test_rx_buf + j * LP_PKT_LEN +
+                                           LP_PKT_LEN - 1));
+                               return -EIO;
+                       }
+               }
+       }
+
+       dev_kfree_skb_any(skb_tmp);
+       return 0;
+}
+
+static int do_lp_test(struct hinic_dev *nic_dev, u32 flags, u32 test_time,
+                     enum diag_test_index *test_index)
+{
+       struct net_device *netdev = nic_dev->netdev;
+       u8 *lb_test_rx_buf = NULL;
+       int err = 0;
+
+       if (!(flags & ETH_TEST_FL_EXTERNAL_LB)) {
+               *test_index = INTERNAL_LP_TEST;
+               if (hinic_set_loopback_mode(nic_dev->hwdev,
+                                           HINIC_INTERNAL_LP_MODE, true)) {
+                       netif_err(nic_dev, drv, netdev,
+                                 "Failed to set port loopback mode before loopback test\n");
+                       return -EIO;
+               }
+       } else {
+               *test_index = EXTERNAL_LP_TEST;
+       }
+
+       lb_test_rx_buf = vmalloc(LP_PKT_CNT * LP_PKT_LEN);
+       if (!lb_test_rx_buf) {
+               err = -ENOMEM;
+       } else {
+               nic_dev->lb_test_rx_buf = lb_test_rx_buf;
+               nic_dev->lb_pkt_len = LP_PKT_LEN;
+               nic_dev->flags |= HINIC_LP_TEST;
+               err = hinic_run_lp_test(nic_dev, test_time);
+               nic_dev->flags &= ~HINIC_LP_TEST;
+               msleep(100);
+               vfree(lb_test_rx_buf);
+               nic_dev->lb_test_rx_buf = NULL;
+       }
+
+       if (!(flags & ETH_TEST_FL_EXTERNAL_LB)) {
+               if (hinic_set_loopback_mode(nic_dev->hwdev,
+                                           HINIC_INTERNAL_LP_MODE, false)) {
+                       netif_err(nic_dev, drv, netdev,
+                                 "Failed to cancel port loopback mode after loopback test\n");
+                       err = -EIO;
+               }
+       }
+
+       return err;
+}
+
+static void hinic_diag_test(struct net_device *netdev,
+                           struct ethtool_test *eth_test, u64 *data)
+{
+       struct hinic_dev *nic_dev = netdev_priv(netdev);
+       enum hinic_port_link_state link_state;
+       enum diag_test_index test_index = 0;
+       int err = 0;
+
+       memset(data, 0, DIAG_TEST_MAX * sizeof(u64));
+
+       /* don't support loopback test when netdev is closed. */
+       if (!(nic_dev->flags & HINIC_INTF_UP)) {
+               netif_err(nic_dev, drv, netdev,
+                         "Do not support loopback test when netdev is closed\n");
+               eth_test->flags |= ETH_TEST_FL_FAILED;
+               data[PORT_DOWN_ERR_IDX] = 1;
+               return;
+       }
+
+       netif_carrier_off(netdev);
+
+       err = do_lp_test(nic_dev, eth_test->flags, LP_DEFAULT_TIME,
+                        &test_index);
+       if (err) {
+               eth_test->flags |= ETH_TEST_FL_FAILED;
+               data[test_index] = 1;
+       }
+
+       err = hinic_port_link_state(nic_dev, &link_state);
+       if (!err && link_state == HINIC_LINK_STATE_UP)
+               netif_carrier_on(netdev);
+}
+
+static int hinic_set_phys_id(struct net_device *netdev,
+                            enum ethtool_phys_id_state state)
+{
+       struct hinic_dev *nic_dev = netdev_priv(netdev);
+       int err = 0;
+       u8 port;
+
+       port = nic_dev->hwdev->port_id;
+
+       switch (state) {
+       case ETHTOOL_ID_ACTIVE:
+               err = hinic_set_led_status(nic_dev->hwdev, port,
+                                          HINIC_LED_TYPE_LINK,
+                                          HINIC_LED_MODE_FORCE_2HZ);
+               if (err)
+                       netif_err(nic_dev, drv, netdev,
+                                 "Set LED blinking in 2HZ failed\n");
+               break;
+
+       case ETHTOOL_ID_INACTIVE:
+               err = hinic_reset_led_status(nic_dev->hwdev, port);
+               if (err)
+                       netif_err(nic_dev, drv, netdev,
+                                 "Reset LED to original status failed\n");
+               break;
+
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return err;
+}
+
+static int hinic_get_module_info(struct net_device *netdev,
+                                struct ethtool_modinfo *modinfo)
+{
+       struct hinic_dev *nic_dev = netdev_priv(netdev);
+       u8 sfp_type_ext;
+       u8 sfp_type;
+       int err;
+
+       err = hinic_get_sfp_type(nic_dev->hwdev, &sfp_type, &sfp_type_ext);
+       if (err)
+               return err;
+
+       switch (sfp_type) {
+       case SFF8024_ID_SFP:
+               modinfo->type = ETH_MODULE_SFF_8472;
+               modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+               break;
+       case SFF8024_ID_QSFP_8438:
+               modinfo->type = ETH_MODULE_SFF_8436;
+               modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
+               break;
+       case SFF8024_ID_QSFP_8436_8636:
+               if (sfp_type_ext >= 0x3) {
+                       modinfo->type = ETH_MODULE_SFF_8636;
+                       modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
+
+               } else {
+                       modinfo->type = ETH_MODULE_SFF_8436;
+                       modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
+               }
+               break;
+       case SFF8024_ID_QSFP28_8636:
+               modinfo->type = ETH_MODULE_SFF_8636;
+               modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
+               break;
+       default:
+               netif_warn(nic_dev, drv, netdev,
+                          "Optical module unknown: 0x%x\n", sfp_type);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int hinic_get_module_eeprom(struct net_device *netdev,
+                                  struct ethtool_eeprom *ee, u8 *data)
+{
+       struct hinic_dev *nic_dev = netdev_priv(netdev);
+       u8 sfp_data[STD_SFP_INFO_MAX_SIZE];
+       u16 len;
+       int err;
+
+       if (!ee->len || ((ee->len + ee->offset) > STD_SFP_INFO_MAX_SIZE))
+               return -EINVAL;
+
+       memset(data, 0, ee->len);
+
+       err = hinic_get_sfp_eeprom(nic_dev->hwdev, sfp_data, &len);
+       if (err)
+               return err;
+
+       memcpy(data, sfp_data + ee->offset, ee->len);
+
+       return 0;
+}
+
 static const struct ethtool_ops hinic_ethtool_ops = {
+       .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+                                    ETHTOOL_COALESCE_RX_MAX_FRAMES |
+                                    ETHTOOL_COALESCE_TX_USECS |
+                                    ETHTOOL_COALESCE_TX_MAX_FRAMES,
+
        .get_link_ksettings = hinic_get_link_ksettings,
        .set_link_ksettings = hinic_set_link_ksettings,
        .get_drvinfo = hinic_get_drvinfo,
        .get_link = ethtool_op_get_link,
        .get_ringparam = hinic_get_ringparam,
        .set_ringparam = hinic_set_ringparam,
+       .get_coalesce = hinic_get_coalesce,
+       .set_coalesce = hinic_set_coalesce,
+       .get_per_queue_coalesce = hinic_get_per_queue_coalesce,
+       .set_per_queue_coalesce = hinic_set_per_queue_coalesce,
+       .get_pauseparam = hinic_get_pauseparam,
+       .set_pauseparam = hinic_set_pauseparam,
+       .get_channels = hinic_get_channels,
+       .set_channels = hinic_set_channels,
+       .get_rxnfc = hinic_get_rxnfc,
+       .set_rxnfc = hinic_set_rxnfc,
+       .get_rxfh_key_size = hinic_get_rxfh_key_size,
+       .get_rxfh_indir_size = hinic_get_rxfh_indir_size,
+       .get_rxfh = hinic_get_rxfh,
+       .set_rxfh = hinic_set_rxfh,
+       .get_sset_count = hinic_get_sset_count,
+       .get_ethtool_stats = hinic_get_ethtool_stats,
+       .get_strings = hinic_get_strings,
+       .self_test = hinic_diag_test,
+       .set_phys_id = hinic_set_phys_id,
+       .get_module_info = hinic_get_module_info,
+       .get_module_eeprom = hinic_get_module_eeprom,
+};
+
+static const struct ethtool_ops hinicvf_ethtool_ops = {
+       .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+                                    ETHTOOL_COALESCE_RX_MAX_FRAMES |
+                                    ETHTOOL_COALESCE_TX_USECS |
+                                    ETHTOOL_COALESCE_TX_MAX_FRAMES,
+
+       .get_link_ksettings = hinic_get_link_ksettings,
+       .get_drvinfo = hinic_get_drvinfo,
+       .get_link = ethtool_op_get_link,
+       .get_ringparam = hinic_get_ringparam,
+       .set_ringparam = hinic_set_ringparam,
+       .get_coalesce = hinic_get_coalesce,
+       .set_coalesce = hinic_set_coalesce,
+       .get_per_queue_coalesce = hinic_get_per_queue_coalesce,
+       .set_per_queue_coalesce = hinic_set_per_queue_coalesce,
        .get_channels = hinic_get_channels,
        .set_channels = hinic_set_channels,
        .get_rxnfc = hinic_get_rxnfc,
@@ -1256,5 +1831,10 @@ static const struct ethtool_ops hinic_ethtool_ops = {
 
 void hinic_set_ethtool_ops(struct net_device *netdev)
 {
-       netdev->ethtool_ops = &hinic_ethtool_ops;
+       struct hinic_dev *nic_dev = netdev_priv(netdev);
+
+       if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+               netdev->ethtool_ops = &hinic_ethtool_ops;
+       else
+               netdev->ethtool_ops = &hinicvf_ethtool_ops;
 }
index 0245da0..9831c14 100644 (file)
@@ -83,6 +83,8 @@ static int parse_capability(struct hinic_hwdev *hwdev,
                nic_cap->max_vf_qps = dev_cap->max_vf_sqs + 1;
        }
 
+       hwdev->port_id = dev_cap->port_id;
+
        return 0;
 }
 
@@ -705,6 +707,68 @@ static int hinic_l2nic_reset(struct hinic_hwdev *hwdev)
        return 0;
 }
 
+int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev,
+                           struct hinic_msix_config *interrupt_info)
+{
+       u16 out_size = sizeof(*interrupt_info);
+       struct hinic_pfhwdev *pfhwdev;
+       int err;
+
+       if (!hwdev || !interrupt_info)
+               return -EINVAL;
+
+       pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+       interrupt_info->func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+
+       err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+                               HINIC_COMM_CMD_MSI_CTRL_REG_RD_BY_UP,
+                               interrupt_info, sizeof(*interrupt_info),
+                               interrupt_info, &out_size, HINIC_MGMT_MSG_SYNC);
+       if (err || !out_size || interrupt_info->status) {
+               dev_err(&hwdev->hwif->pdev->dev, "Failed to get interrupt config, err: %d, status: 0x%x, out size: 0x%x\n",
+                       err, interrupt_info->status, out_size);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,
+                           struct hinic_msix_config *interrupt_info)
+{
+       u16 out_size = sizeof(*interrupt_info);
+       struct hinic_msix_config temp_info;
+       struct hinic_pfhwdev *pfhwdev;
+       int err;
+
+       if (!hwdev)
+               return -EINVAL;
+
+       pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+       interrupt_info->func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+
+       err = hinic_get_interrupt_cfg(hwdev, &temp_info);
+       if (err)
+               return -EINVAL;
+
+       interrupt_info->lli_credit_cnt = temp_info.lli_timer_cnt;
+       interrupt_info->lli_timer_cnt = temp_info.lli_timer_cnt;
+
+       err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+                               HINIC_COMM_CMD_MSI_CTRL_REG_WR_BY_UP,
+                               interrupt_info, sizeof(*interrupt_info),
+                               interrupt_info, &out_size, HINIC_MGMT_MSG_SYNC);
+       if (err || !out_size || interrupt_info->status) {
+               dev_err(&hwdev->hwif->pdev->dev, "Failed to get interrupt config, err: %d, status: 0x%x, out size: 0x%x\n",
+                       err, interrupt_info->status, out_size);
+               return -EIO;
+       }
+
+       return 0;
+}
+
 /**
  * hinic_init_hwdev - Initialize the NIC HW
  * @pdev: the NIC pci device
@@ -777,6 +841,8 @@ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev)
                goto err_dev_cap;
        }
 
+       mutex_init(&hwdev->func_to_io.nic_cfg.cfg_mutex);
+
        err = hinic_vf_func_init(hwdev);
        if (err) {
                dev_err(&pdev->dev, "Failed to init nic mbox\n");
@@ -814,6 +880,8 @@ err_aeqs_init:
 err_init_msix:
 err_pfhwdev_alloc:
        hinic_free_hwif(hwif);
+       if (err > 0)
+               err = -EIO;
        return ERR_PTR(err);
 }
 
@@ -829,6 +897,8 @@ void hinic_free_hwdev(struct hinic_hwdev *hwdev)
 
        set_resources_state(hwdev, HINIC_RES_CLEAN);
 
+       hinic_vf_func_free(hwdev);
+
        free_pfhwdev(pfhwdev);
 
        hinic_aeqs_free(&hwdev->aeqs);
@@ -979,3 +1049,29 @@ void hinic_hwdev_set_msix_state(struct hinic_hwdev *hwdev, u16 msix_index,
 {
        hinic_set_msix_state(hwdev->hwif, msix_index, flag);
 }
+
+int hinic_get_board_info(struct hinic_hwdev *hwdev,
+                        struct hinic_comm_board_info *board_info)
+{
+       u16 out_size = sizeof(*board_info);
+       struct hinic_pfhwdev *pfhwdev;
+       int err;
+
+       if (!hwdev || !board_info)
+               return -EINVAL;
+
+       pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+       err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+                               HINIC_COMM_CMD_GET_BOARD_INFO,
+                               board_info, sizeof(*board_info),
+                               board_info, &out_size, HINIC_MGMT_MSG_SYNC);
+       if (err || board_info->status || !out_size) {
+               dev_err(&hwdev->hwif->pdev->dev,
+                       "Failed to get board info, err: %d, status: 0x%x, out size: 0x%x\n",
+                       err, board_info->status, out_size);
+               return -EIO;
+       }
+
+       return 0;
+}
index 71ea7e4..94593a8 100644 (file)
@@ -48,6 +48,8 @@ enum hinic_port_cmd {
        HINIC_PORT_CMD_ADD_VLAN         = 3,
        HINIC_PORT_CMD_DEL_VLAN         = 4,
 
+       HINIC_PORT_CMD_SET_PFC          = 5,
+
        HINIC_PORT_CMD_SET_MAC          = 9,
        HINIC_PORT_CMD_GET_MAC          = 10,
        HINIC_PORT_CMD_DEL_MAC          = 11,
@@ -95,6 +97,9 @@ enum hinic_port_cmd {
 
        HINIC_PORT_CMD_FWCTXT_INIT      = 69,
 
+       HINIC_PORT_CMD_GET_LOOPBACK_MODE = 72,
+       HINIC_PORT_CMD_SET_LOOPBACK_MODE,
+
        HINIC_PORT_CMD_ENABLE_SPOOFCHK = 78,
 
        HINIC_PORT_CMD_GET_MGMT_VERSION = 88,
@@ -111,6 +116,8 @@ enum hinic_port_cmd {
 
        HINIC_PORT_CMD_SET_TSO          = 112,
 
+       HINIC_PORT_CMD_UPDATE_FW        = 114,
+
        HINIC_PORT_CMD_SET_RQ_IQ_MAP    = 115,
 
        HINIC_PORT_CMD_LINK_STATUS_REPORT = 160,
@@ -125,9 +132,13 @@ enum hinic_port_cmd {
 
        HINIC_PORT_CMD_SET_AUTONEG      = 219,
 
+       HINIC_PORT_CMD_GET_STD_SFP_INFO = 240,
+
        HINIC_PORT_CMD_SET_LRO_TIMER    = 244,
 
        HINIC_PORT_CMD_SET_VF_MAX_MIN_RATE = 249,
+
+       HINIC_PORT_CMD_GET_SFP_ABS      = 251,
 };
 
 /* cmd of mgmt CPU message for HILINK module */
@@ -283,6 +294,50 @@ struct hinic_cmd_l2nic_reset {
        u16     reset_flag;
 };
 
+struct hinic_msix_config {
+       u8      status;
+       u8      version;
+       u8      rsvd0[6];
+
+       u16     func_id;
+       u16     msix_index;
+       u8      pending_cnt;
+       u8      coalesce_timer_cnt;
+       u8      lli_timer_cnt;
+       u8      lli_credit_cnt;
+       u8      resend_timer_cnt;
+       u8      rsvd1[3];
+};
+
+struct hinic_board_info {
+       u32     board_type;
+       u32     port_num;
+       u32     port_speed;
+       u32     pcie_width;
+       u32     host_num;
+       u32     pf_num;
+       u32     vf_total_num;
+       u32     tile_num;
+       u32     qcm_num;
+       u32     core_num;
+       u32     work_mode;
+       u32     service_mode;
+       u32     pcie_mode;
+       u32     cfg_addr;
+       u32     boot_sel;
+       u32     board_id;
+};
+
+struct hinic_comm_board_info {
+       u8      status;
+       u8      version;
+       u8      rsvd0[6];
+
+       struct hinic_board_info info;
+
+       u32     rsvd1[4];
+};
+
 struct hinic_hwdev {
        struct hinic_hwif               *hwif;
        struct msix_entry               *msix_entries;
@@ -292,6 +347,7 @@ struct hinic_hwdev {
        struct hinic_mbox_func_to_func  *func_to_func;
 
        struct hinic_cap                nic_cap;
+       u8                              port_id;
 };
 
 struct hinic_nic_cb {
@@ -376,4 +432,13 @@ int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq,
 void hinic_hwdev_set_msix_state(struct hinic_hwdev *hwdev, u16 msix_index,
                                enum hinic_msix_state flag);
 
+int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev,
+                           struct hinic_msix_config *interrupt_info);
+
+int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,
+                           struct hinic_msix_config *interrupt_info);
+
+int hinic_get_board_info(struct hinic_hwdev *hwdev,
+                        struct hinic_comm_board_info *board_info);
+
 #endif
index 214f162..ee6d607 100644 (file)
@@ -47,6 +47,15 @@ struct hinic_free_db_area {
        struct semaphore        idx_lock;
 };
 
+struct hinic_nic_cfg {
+       /* lock for getting nic cfg */
+       struct mutex            cfg_mutex;
+       bool                    pause_set;
+       u32                     auto_neg;
+       u32                     rx_pause;
+       u32                     tx_pause;
+};
+
 struct hinic_func_to_io {
        struct hinic_hwif       *hwif;
        struct hinic_hwdev      *hwdev;
@@ -78,6 +87,7 @@ struct hinic_func_to_io {
        u16                     max_vfs;
        struct vf_data_storage  *vf_infos;
        u8                      link_status;
+       struct hinic_nic_cfg    nic_cfg;
 };
 
 struct hinic_wq_page_size {
index c33eb11..e0f5a81 100644 (file)
@@ -370,48 +370,89 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
                                MSG_NOT_RESP, timeout);
 }
 
-/**
- * mgmt_recv_msg_handler - handler for message from mgmt cpu
- * @pf_to_mgmt: PF to MGMT channel
- * @recv_msg: received message details
- **/
-static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
-                                 struct hinic_recv_msg *recv_msg)
+static void recv_mgmt_msg_work_handler(struct work_struct *work)
 {
-       struct hinic_hwif *hwif = pf_to_mgmt->hwif;
-       struct pci_dev *pdev = hwif->pdev;
-       u8 *buf_out = recv_msg->buf_out;
+       struct hinic_mgmt_msg_handle_work *mgmt_work =
+               container_of(work, struct hinic_mgmt_msg_handle_work, work);
+       struct hinic_pf_to_mgmt *pf_to_mgmt = mgmt_work->pf_to_mgmt;
+       struct pci_dev *pdev = pf_to_mgmt->hwif->pdev;
+       u8 *buf_out = pf_to_mgmt->mgmt_ack_buf;
        struct hinic_mgmt_cb *mgmt_cb;
        unsigned long cb_state;
        u16 out_size = 0;
 
-       if (recv_msg->mod >= HINIC_MOD_MAX) {
+       memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE);
+
+       if (mgmt_work->mod >= HINIC_MOD_MAX) {
                dev_err(&pdev->dev, "Unknown MGMT MSG module = %d\n",
-                       recv_msg->mod);
+                       mgmt_work->mod);
+               kfree(mgmt_work->msg);
+               kfree(mgmt_work);
                return;
        }
 
-       mgmt_cb = &pf_to_mgmt->mgmt_cb[recv_msg->mod];
+       mgmt_cb = &pf_to_mgmt->mgmt_cb[mgmt_work->mod];
 
        cb_state = cmpxchg(&mgmt_cb->state,
                           HINIC_MGMT_CB_ENABLED,
                           HINIC_MGMT_CB_ENABLED | HINIC_MGMT_CB_RUNNING);
 
        if ((cb_state == HINIC_MGMT_CB_ENABLED) && (mgmt_cb->cb))
-               mgmt_cb->cb(mgmt_cb->handle, recv_msg->cmd,
-                           recv_msg->msg, recv_msg->msg_len,
+               mgmt_cb->cb(mgmt_cb->handle, mgmt_work->cmd,
+                           mgmt_work->msg, mgmt_work->msg_len,
                            buf_out, &out_size);
        else
                dev_err(&pdev->dev, "No MGMT msg handler, mod: %d, cmd: %d\n",
-                       recv_msg->mod, recv_msg->cmd);
+                       mgmt_work->mod, mgmt_work->cmd);
 
        mgmt_cb->state &= ~HINIC_MGMT_CB_RUNNING;
 
-       if (!recv_msg->async_mgmt_to_pf)
+       if (!mgmt_work->async_mgmt_to_pf)
                /* MGMT sent sync msg, send the response */
-               msg_to_mgmt_async(pf_to_mgmt, recv_msg->mod, recv_msg->cmd,
+               msg_to_mgmt_async(pf_to_mgmt, mgmt_work->mod, mgmt_work->cmd,
                                  buf_out, out_size, MGMT_RESP,
-                                 recv_msg->msg_id);
+                                 mgmt_work->msg_id);
+
+       kfree(mgmt_work->msg);
+       kfree(mgmt_work);
+}
+
+/**
+ * mgmt_recv_msg_handler - handler for message from mgmt cpu
+ * @pf_to_mgmt: PF to MGMT channel
+ * @recv_msg: received message details
+ **/
+static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
+                                 struct hinic_recv_msg *recv_msg)
+{
+       struct hinic_mgmt_msg_handle_work *mgmt_work = NULL;
+       struct pci_dev *pdev = pf_to_mgmt->hwif->pdev;
+
+       mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL);
+       if (!mgmt_work) {
+               dev_err(&pdev->dev, "Allocate mgmt work memory failed\n");
+               return;
+       }
+
+       if (recv_msg->msg_len) {
+               mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL);
+               if (!mgmt_work->msg) {
+                       dev_err(&pdev->dev, "Allocate mgmt msg memory failed\n");
+                       kfree(mgmt_work);
+                       return;
+               }
+       }
+
+       mgmt_work->pf_to_mgmt = pf_to_mgmt;
+       mgmt_work->msg_len = recv_msg->msg_len;
+       memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len);
+       mgmt_work->msg_id = recv_msg->msg_id;
+       mgmt_work->mod = recv_msg->mod;
+       mgmt_work->cmd = recv_msg->cmd;
+       mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf;
+
+       INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler);
+       queue_work(pf_to_mgmt->workq, &mgmt_work->work);
 }
 
 /**
@@ -546,6 +587,12 @@ static int alloc_msg_buf(struct hinic_pf_to_mgmt *pf_to_mgmt)
        if (!pf_to_mgmt->sync_msg_buf)
                return -ENOMEM;
 
+       pf_to_mgmt->mgmt_ack_buf = devm_kzalloc(&pdev->dev,
+                                               MAX_PF_MGMT_BUF_SIZE,
+                                               GFP_KERNEL);
+       if (!pf_to_mgmt->mgmt_ack_buf)
+               return -ENOMEM;
+
        return 0;
 }
 
@@ -571,6 +618,11 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
                return 0;
 
        sema_init(&pf_to_mgmt->sync_msg_lock, 1);
+       pf_to_mgmt->workq = create_singlethread_workqueue("hinic_mgmt");
+       if (!pf_to_mgmt->workq) {
+               dev_err(&pdev->dev, "Failed to initialize MGMT workqueue\n");
+               return -ENOMEM;
+       }
        pf_to_mgmt->sync_msg_id = 0;
 
        err = alloc_msg_buf(pf_to_mgmt);
@@ -605,4 +657,5 @@ void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt)
 
        hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU);
        hinic_api_cmd_free(pf_to_mgmt->cmd_chain);
+       destroy_workqueue(pf_to_mgmt->workq);
 }
index c2b142c..21b93b6 100644 (file)
@@ -78,11 +78,18 @@ enum hinic_comm_cmd {
 
        HINIC_COMM_CMD_CEQ_CTRL_REG_WR_BY_UP = 0x33,
 
-       HINIC_COMM_CMD_L2NIC_RESET              = 0x4b,
+       HINIC_COMM_CMD_MSI_CTRL_REG_WR_BY_UP,
+       HINIC_COMM_CMD_MSI_CTRL_REG_RD_BY_UP,
+
+       HINIC_COMM_CMD_SET_LED_STATUS   = 0x4a,
+
+       HINIC_COMM_CMD_L2NIC_RESET      = 0x4b,
 
        HINIC_COMM_CMD_PAGESIZE_SET     = 0x50,
 
-       HINIC_COMM_CMD_MAX              = 0x51,
+       HINIC_COMM_CMD_GET_BOARD_INFO   = 0x52,
+
+       HINIC_COMM_CMD_MAX,
 };
 
 enum hinic_mgmt_cb_state {
@@ -119,6 +126,7 @@ struct hinic_pf_to_mgmt {
        struct semaphore                sync_msg_lock;
        u16                             sync_msg_id;
        u8                              *sync_msg_buf;
+       void                            *mgmt_ack_buf;
 
        struct hinic_recv_msg           recv_resp_msg_from_mgmt;
        struct hinic_recv_msg           recv_msg_from_mgmt;
@@ -126,6 +134,21 @@ struct hinic_pf_to_mgmt {
        struct hinic_api_cmd_chain      *cmd_chain[HINIC_API_CMD_MAX];
 
        struct hinic_mgmt_cb            mgmt_cb[HINIC_MOD_MAX];
+
+       struct workqueue_struct         *workq;
+};
+
+struct hinic_mgmt_msg_handle_work {
+       struct work_struct work;
+       struct hinic_pf_to_mgmt *pf_to_mgmt;
+
+       void                    *msg;
+       u16                     msg_len;
+
+       enum hinic_mod_type     mod;
+       u8                      cmd;
+       u16                     msg_id;
+       int                     async_mgmt_to_pf;
 };
 
 void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt,
index e9e6f4c..c4c6f9c 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/semaphore.h>
 #include <linux/workqueue.h>
 #include <net/ip.h>
+#include <net/devlink.h>
 #include <linux/bitops.h>
 #include <linux/bitmap.h>
 #include <linux/delay.h>
@@ -25,6 +26,7 @@
 
 #include "hinic_hw_qp.h"
 #include "hinic_hw_dev.h"
+#include "hinic_devlink.h"
 #include "hinic_port.h"
 #include "hinic_tx.h"
 #include "hinic_rx.h"
@@ -69,6 +71,10 @@ MODULE_PARM_DESC(rx_weight, "Number Rx packets for NAPI budget (default=64)");
 
 #define HINIC_WAIT_SRIOV_CFG_TIMEOUT   15000
 
+#define HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT           2
+#define HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG       32
+#define HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG                7
+
 static int change_mac_addr(struct net_device *netdev, const u8 *addr);
 
 static int set_features(struct hinic_dev *nic_dev,
@@ -887,6 +893,26 @@ static void netdev_features_init(struct net_device *netdev)
        netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
 }
 
+static void hinic_refresh_nic_cfg(struct hinic_dev *nic_dev)
+{
+       struct hinic_nic_cfg *nic_cfg = &nic_dev->hwdev->func_to_io.nic_cfg;
+       struct hinic_pause_config pause_info = {0};
+       struct hinic_port_cap port_cap = {0};
+
+       if (hinic_port_get_cap(nic_dev, &port_cap))
+               return;
+
+       mutex_lock(&nic_cfg->cfg_mutex);
+       if (nic_cfg->pause_set || !port_cap.autoneg_state) {
+               nic_cfg->auto_neg = port_cap.autoneg_state;
+               pause_info.auto_neg = nic_cfg->auto_neg;
+               pause_info.rx_pause = nic_cfg->rx_pause;
+               pause_info.tx_pause = nic_cfg->tx_pause;
+               hinic_set_hw_pause_info(nic_dev->hwdev, &pause_info);
+       }
+       mutex_unlock(&nic_cfg->cfg_mutex);
+}
+
 /**
  * link_status_event_handler - link event handler
  * @handle: nic device for the handler
@@ -918,6 +944,9 @@ static void link_status_event_handler(void *handle, void *buf_in, u16 in_size,
 
                up(&nic_dev->mgmt_lock);
 
+               if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+                       hinic_refresh_nic_cfg(nic_dev);
+
                netif_info(nic_dev, drv, nic_dev->netdev, "HINIC_Link is UP\n");
        } else {
                down(&nic_dev->mgmt_lock);
@@ -948,28 +977,93 @@ static int set_features(struct hinic_dev *nic_dev,
 {
        netdev_features_t changed = force_change ? ~0 : pre_features ^ features;
        u32 csum_en = HINIC_RX_CSUM_OFFLOAD_EN;
+       netdev_features_t failed_features = 0;
+       int ret = 0;
        int err = 0;
 
-       if (changed & NETIF_F_TSO)
-               err = hinic_port_set_tso(nic_dev, (features & NETIF_F_TSO) ?
+       if (changed & NETIF_F_TSO) {
+               ret = hinic_port_set_tso(nic_dev, (features & NETIF_F_TSO) ?
                                         HINIC_TSO_ENABLE : HINIC_TSO_DISABLE);
+               if (ret) {
+                       err = ret;
+                       failed_features |= NETIF_F_TSO;
+               }
+       }
 
-       if (changed & NETIF_F_RXCSUM)
-               err = hinic_set_rx_csum_offload(nic_dev, csum_en);
+       if (changed & NETIF_F_RXCSUM) {
+               ret = hinic_set_rx_csum_offload(nic_dev, csum_en);
+               if (ret) {
+                       err = ret;
+                       failed_features |= NETIF_F_RXCSUM;
+               }
+       }
 
        if (changed & NETIF_F_LRO) {
-               err = hinic_set_rx_lro_state(nic_dev,
+               ret = hinic_set_rx_lro_state(nic_dev,
                                             !!(features & NETIF_F_LRO),
                                             HINIC_LRO_RX_TIMER_DEFAULT,
                                             HINIC_LRO_MAX_WQE_NUM_DEFAULT);
+               if (ret) {
+                       err = ret;
+                       failed_features |= NETIF_F_LRO;
+               }
        }
 
-       if (changed & NETIF_F_HW_VLAN_CTAG_RX)
-               err = hinic_set_rx_vlan_offload(nic_dev,
+       if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
+               ret = hinic_set_rx_vlan_offload(nic_dev,
                                                !!(features &
                                                   NETIF_F_HW_VLAN_CTAG_RX));
+               if (ret) {
+                       err = ret;
+                       failed_features |= NETIF_F_HW_VLAN_CTAG_RX;
+               }
+       }
 
-       return err;
+       if (err) {
+               nic_dev->netdev->features = features ^ failed_features;
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int hinic_init_intr_coalesce(struct hinic_dev *nic_dev)
+{
+       u64 size;
+       u16 i;
+
+       size = sizeof(struct hinic_intr_coal_info) * nic_dev->max_qps;
+       nic_dev->rx_intr_coalesce = kzalloc(size, GFP_KERNEL);
+       if (!nic_dev->rx_intr_coalesce)
+               return -ENOMEM;
+       nic_dev->tx_intr_coalesce = kzalloc(size, GFP_KERNEL);
+       if (!nic_dev->tx_intr_coalesce) {
+               kfree(nic_dev->rx_intr_coalesce);
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < nic_dev->max_qps; i++) {
+               nic_dev->rx_intr_coalesce[i].pending_limt =
+                       HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT;
+               nic_dev->rx_intr_coalesce[i].coalesce_timer_cfg =
+                       HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG;
+               nic_dev->rx_intr_coalesce[i].resend_timer_cfg =
+                       HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG;
+               nic_dev->tx_intr_coalesce[i].pending_limt =
+                       HINIC_DEAULT_TXRX_MSIX_PENDING_LIMIT;
+               nic_dev->tx_intr_coalesce[i].coalesce_timer_cfg =
+                       HINIC_DEAULT_TXRX_MSIX_COALESC_TIMER_CFG;
+               nic_dev->tx_intr_coalesce[i].resend_timer_cfg =
+                       HINIC_DEAULT_TXRX_MSIX_RESEND_TIMER_CFG;
+       }
+
+       return 0;
+}
+
+static void hinic_free_intr_coalesce(struct hinic_dev *nic_dev)
+{
+       kfree(nic_dev->tx_intr_coalesce);
+       kfree(nic_dev->rx_intr_coalesce);
 }
 
 /**
@@ -983,9 +1077,11 @@ static int nic_dev_init(struct pci_dev *pdev)
        struct hinic_rx_mode_work *rx_mode_work;
        struct hinic_txq_stats *tx_stats;
        struct hinic_rxq_stats *rx_stats;
+       struct hinic_devlink_priv *priv;
        struct hinic_dev *nic_dev;
        struct net_device *netdev;
        struct hinic_hwdev *hwdev;
+       struct devlink *devlink;
        int err, num_qps;
 
        hwdev = hinic_init_hwdev(pdev);
@@ -994,6 +1090,16 @@ static int nic_dev_init(struct pci_dev *pdev)
                return PTR_ERR(hwdev);
        }
 
+       devlink = hinic_devlink_alloc();
+       if (!devlink) {
+               dev_err(&pdev->dev, "Hinic devlink alloc failed\n");
+               err = -ENOMEM;
+               goto err_devlink_alloc;
+       }
+
+       priv = devlink_priv(devlink);
+       priv->hwdev = hwdev;
+
        num_qps = hinic_hwdev_num_qps(hwdev);
        if (num_qps <= 0) {
                dev_err(&pdev->dev, "Invalid number of QPS\n");
@@ -1008,8 +1114,6 @@ static int nic_dev_init(struct pci_dev *pdev)
                goto err_alloc_etherdev;
        }
 
-       hinic_set_ethtool_ops(netdev);
-
        if (!HINIC_IS_VF(hwdev->hwif))
                netdev->netdev_ops = &hinic_netdev_ops;
        else
@@ -1031,6 +1135,9 @@ static int nic_dev_init(struct pci_dev *pdev)
        nic_dev->sriov_info.hwdev = hwdev;
        nic_dev->sriov_info.pdev = pdev;
        nic_dev->max_qps = num_qps;
+       nic_dev->devlink = devlink;
+
+       hinic_set_ethtool_ops(netdev);
 
        sema_init(&nic_dev->mgmt_lock, 1);
 
@@ -1054,6 +1161,10 @@ static int nic_dev_init(struct pci_dev *pdev)
                goto err_workq;
        }
 
+       err = hinic_devlink_register(devlink, &pdev->dev);
+       if (err)
+               goto err_devlink_reg;
+
        pci_set_drvdata(pdev, netdev);
 
        err = hinic_port_get_mac(nic_dev, netdev->dev_addr);
@@ -1100,8 +1211,19 @@ static int nic_dev_init(struct pci_dev *pdev)
        if (err)
                goto err_set_features;
 
+       /* enable pause and disable pfc by default */
+       err = hinic_dcb_set_pfc(nic_dev->hwdev, 0, 0);
+       if (err)
+               goto err_set_pfc;
+
        SET_NETDEV_DEV(netdev, &pdev->dev);
 
+       err = hinic_init_intr_coalesce(nic_dev);
+       if (err) {
+               dev_err(&pdev->dev, "Failed to init_intr_coalesce\n");
+               goto err_init_intr;
+       }
+
        err = register_netdev(netdev);
        if (err) {
                dev_err(&pdev->dev, "Failed to register netdev\n");
@@ -1111,15 +1233,20 @@ static int nic_dev_init(struct pci_dev *pdev)
        return 0;
 
 err_reg_netdev:
+       hinic_free_intr_coalesce(nic_dev);
+err_init_intr:
+err_set_pfc:
 err_set_features:
        hinic_hwdev_cb_unregister(nic_dev->hwdev,
                                  HINIC_MGMT_MSG_CMD_LINK_STATUS);
        cancel_work_sync(&rx_mode_work->work);
 
 err_set_mtu:
-err_get_mac:
+       hinic_port_del_mac(nic_dev, netdev->dev_addr, 0);
 err_add_mac:
+err_get_mac:
        pci_set_drvdata(pdev, NULL);
+err_devlink_reg:
        destroy_workqueue(nic_dev->workq);
 
 err_workq:
@@ -1128,6 +1255,7 @@ err_vlan_bitmap:
 
 err_alloc_etherdev:
 err_num_qps:
+err_devlink_alloc:
        hinic_free_hwdev(hwdev);
        return err;
 }
@@ -1224,6 +1352,8 @@ static void hinic_remove(struct pci_dev *pdev)
 
        unregister_netdev(netdev);
 
+       hinic_free_intr_coalesce(nic_dev);
+
        hinic_port_del_mac(nic_dev, netdev->dev_addr, 0);
 
        hinic_hwdev_cb_unregister(nic_dev->hwdev,
@@ -1234,9 +1364,11 @@ static void hinic_remove(struct pci_dev *pdev)
 
        pci_set_drvdata(pdev, NULL);
 
+       hinic_devlink_unregister(nic_dev->devlink);
+
        destroy_workqueue(nic_dev->workq);
 
-       hinic_vf_func_free(nic_dev->hwdev);
+       hinic_devlink_free(nic_dev->devlink);
 
        hinic_free_hwdev(nic_dev->hwdev);
 
index 175c0ee..ba358bb 100644 (file)
@@ -1082,6 +1082,7 @@ int hinic_get_link_mode(struct hinic_hwdev *hwdev,
        if (!hwdev || !link_mode)
                return -EINVAL;
 
+       link_mode->func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
        out_size = sizeof(*link_mode);
 
        err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_LINK_MODE,
@@ -1172,6 +1173,8 @@ int hinic_get_hw_pause_info(struct hinic_hwdev *hwdev,
        u16 out_size = sizeof(*pause_info);
        int err;
 
+       pause_info->func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+
        err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_PAUSE_INFO,
                                 pause_info, sizeof(*pause_info),
                                 pause_info, &out_size);
@@ -1190,6 +1193,8 @@ int hinic_set_hw_pause_info(struct hinic_hwdev *hwdev,
        u16 out_size = sizeof(*pause_info);
        int err;
 
+       pause_info->func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+
        err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_PAUSE_INFO,
                                 pause_info, sizeof(*pause_info),
                                 pause_info, &out_size);
@@ -1201,3 +1206,192 @@ int hinic_set_hw_pause_info(struct hinic_hwdev *hwdev,
 
        return 0;
 }
+
+int hinic_dcb_set_pfc(struct hinic_hwdev *hwdev, u8 pfc_en, u8 pfc_bitmap)
+{
+       struct hinic_nic_cfg *nic_cfg = &hwdev->func_to_io.nic_cfg;
+       struct hinic_set_pfc pfc = {0};
+       u16 out_size = sizeof(pfc);
+       int err;
+
+       if (HINIC_IS_VF(hwdev->hwif))
+               return 0;
+
+       mutex_lock(&nic_cfg->cfg_mutex);
+
+       pfc.func_id = HINIC_HWIF_FUNC_IDX(hwdev->hwif);
+       pfc.pfc_bitmap = pfc_bitmap;
+       pfc.pfc_en = pfc_en;
+
+       err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_PFC,
+                                &pfc, sizeof(pfc), &pfc, &out_size);
+       if (err || pfc.status || !out_size) {
+               dev_err(&hwdev->hwif->pdev->dev, "Failed to %s pfc, err: %d, status: 0x%x, out size: 0x%x\n",
+                       pfc_en ? "enable" : "disable", err, pfc.status,
+                       out_size);
+               mutex_unlock(&nic_cfg->cfg_mutex);
+               return -EIO;
+       }
+
+       /* pause settings is opposite from pfc */
+       nic_cfg->rx_pause = pfc_en ? 0 : 1;
+       nic_cfg->tx_pause = pfc_en ? 0 : 1;
+
+       mutex_unlock(&nic_cfg->cfg_mutex);
+
+       return 0;
+}
+
+int hinic_set_loopback_mode(struct hinic_hwdev *hwdev, u32 mode, u32 enable)
+{
+       struct hinic_port_loopback lb = {0};
+       u16 out_size = sizeof(lb);
+       int err;
+
+       lb.mode = mode;
+       lb.en = enable;
+
+       if (mode < LOOP_MODE_MIN || mode > LOOP_MODE_MAX) {
+               dev_err(&hwdev->hwif->pdev->dev,
+                       "Invalid loopback mode %d to set\n", mode);
+               return -EINVAL;
+       }
+
+       err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_LOOPBACK_MODE,
+                                &lb, sizeof(lb), &lb, &out_size);
+       if (err || !out_size || lb.status) {
+               dev_err(&hwdev->hwif->pdev->dev,
+                       "Failed to set loopback mode %d en %d, err: %d, status: 0x%x, out size: 0x%x\n",
+                       mode, enable, err, lb.status, out_size);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int _set_led_status(struct hinic_hwdev *hwdev, u8 port,
+                          enum hinic_led_type type,
+                          enum hinic_led_mode mode, u8 reset)
+{
+       struct hinic_led_info led_info = {0};
+       u16 out_size = sizeof(led_info);
+       struct hinic_pfhwdev *pfhwdev;
+       int err;
+
+       pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
+
+       led_info.port = port;
+       led_info.reset = reset;
+
+       led_info.type = type;
+       led_info.mode = mode;
+
+       err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
+                               HINIC_COMM_CMD_SET_LED_STATUS,
+                               &led_info, sizeof(led_info),
+                               &led_info, &out_size, HINIC_MGMT_MSG_SYNC);
+       if (err || led_info.status || !out_size) {
+               dev_err(&hwdev->hwif->pdev->dev, "Failed to set led status, err: %d, status: 0x%x, out size: 0x%x\n",
+                       err, led_info.status, out_size);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+int hinic_set_led_status(struct hinic_hwdev *hwdev, u8 port,
+                        enum hinic_led_type type, enum hinic_led_mode mode)
+{
+       if (!hwdev)
+               return -EINVAL;
+
+       return _set_led_status(hwdev, port, type, mode, 0);
+}
+
+int hinic_reset_led_status(struct hinic_hwdev *hwdev, u8 port)
+{
+       int err;
+
+       if (!hwdev)
+               return -EINVAL;
+
+       err = _set_led_status(hwdev, port, HINIC_LED_TYPE_INVALID,
+                             HINIC_LED_MODE_INVALID, 1);
+       if (err)
+               dev_err(&hwdev->hwif->pdev->dev,
+                       "Failed to reset led status\n");
+
+       return err;
+}
+
+static bool hinic_if_sfp_absent(struct hinic_hwdev *hwdev)
+{
+       struct hinic_cmd_get_light_module_abs sfp_abs = {0};
+       u16 out_size = sizeof(sfp_abs);
+       u8 port_id = hwdev->port_id;
+       int err;
+
+       sfp_abs.port_id = port_id;
+       err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_SFP_ABS,
+                                &sfp_abs, sizeof(sfp_abs), &sfp_abs,
+                                &out_size);
+       if (sfp_abs.status || err || !out_size) {
+               dev_err(&hwdev->hwif->pdev->dev,
+                       "Failed to get port%d sfp absent status, err: %d, status: 0x%x, out size: 0x%x\n",
+                       port_id, err, sfp_abs.status, out_size);
+               return true;
+       }
+
+       return ((sfp_abs.abs_status == 0) ? false : true);
+}
+
+int hinic_get_sfp_eeprom(struct hinic_hwdev *hwdev, u8 *data, u16 *len)
+{
+       struct hinic_cmd_get_std_sfp_info sfp_info = {0};
+       u16 out_size = sizeof(sfp_info);
+       u8 port_id;
+       int err;
+
+       if (!hwdev || !data || !len)
+               return -EINVAL;
+
+       port_id = hwdev->port_id;
+
+       if (hinic_if_sfp_absent(hwdev))
+               return -ENXIO;
+
+       sfp_info.port_id = port_id;
+       err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_GET_STD_SFP_INFO,
+                                &sfp_info, sizeof(sfp_info), &sfp_info,
+                                &out_size);
+       if (sfp_info.status || err || !out_size) {
+               dev_err(&hwdev->hwif->pdev->dev,
+                       "Failed to get port%d sfp eeprom information, err: %d, status: 0x%x, out size: 0x%x\n",
+                       port_id, err, sfp_info.status, out_size);
+               return -EIO;
+       }
+
+       *len = min_t(u16, sfp_info.eeprom_len, STD_SFP_INFO_MAX_SIZE);
+       memcpy(data, sfp_info.sfp_info, STD_SFP_INFO_MAX_SIZE);
+
+       return 0;
+}
+
+int hinic_get_sfp_type(struct hinic_hwdev *hwdev, u8 *data0, u8 *data1)
+{
+       u8 sfp_data[STD_SFP_INFO_MAX_SIZE];
+       u16 len;
+       int err;
+
+       if (hinic_if_sfp_absent(hwdev))
+               return -ENXIO;
+
+       err = hinic_get_sfp_eeprom(hwdev, sfp_data, &len);
+       if (err)
+               return err;
+
+       *data0 = sfp_data[0];
+       *data1 = sfp_data[1];
+
+       return 0;
+}
index 661c632..14931ad 100644 (file)
@@ -641,6 +641,93 @@ struct hinic_pause_config {
        u32     tx_pause;
 };
 
+struct hinic_set_pfc {
+       u8      status;
+       u8      version;
+       u8      rsvd0[6];
+
+       u16     func_id;
+       u8      pfc_en;
+       u8      pfc_bitmap;
+       u8      rsvd1[4];
+};
+
+/* get or set loopback mode, need to modify by base API */
+#define HINIC_INTERNAL_LP_MODE                 5
+#define LOOP_MODE_MIN                          1
+#define LOOP_MODE_MAX                          6
+
+struct hinic_port_loopback {
+       u8      status;
+       u8      version;
+       u8      rsvd[6];
+
+       u32     mode;
+       u32     en;
+};
+
+struct hinic_led_info {
+       u8      status;
+       u8      version;
+       u8      rsvd0[6];
+
+       u8      port;
+       u8      type;
+       u8      mode;
+       u8      reset;
+};
+
+#define STD_SFP_INFO_MAX_SIZE  640
+
+struct hinic_cmd_get_light_module_abs {
+       u8 status;
+       u8 version;
+       u8 rsvd0[6];
+
+       u8 port_id;
+       u8 abs_status; /* 0:present, 1:absent */
+       u8 rsv[2];
+};
+
+#define STD_SFP_INFO_MAX_SIZE  640
+
+struct hinic_cmd_get_std_sfp_info {
+       u8 status;
+       u8 version;
+       u8 rsvd0[6];
+
+       u8 port_id;
+       u8 wire_type;
+       u16 eeprom_len;
+       u32 rsvd;
+       u8 sfp_info[STD_SFP_INFO_MAX_SIZE];
+};
+
+struct hinic_cmd_update_fw {
+       u8 status;
+       u8 version;
+       u8 rsvd0[6];
+
+       struct {
+               u32 SL:1;
+               u32 SF:1;
+               u32 flag:1;
+               u32 reserved:13;
+               u32 fragment_len:16;
+       } ctl_info;
+
+       struct {
+               u32 FW_section_CRC;
+               u32 FW_section_type;
+       } section_info;
+
+       u32 total_len;
+       u32 setion_total_len;
+       u32 fw_section_version;
+       u32 section_offset;
+       u32 data[384];
+};
+
 int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr,
                       u16 vlan_id);
 
@@ -736,6 +823,38 @@ int hinic_get_hw_pause_info(struct hinic_hwdev *hwdev,
 int hinic_set_hw_pause_info(struct hinic_hwdev *hwdev,
                            struct hinic_pause_config *pause_info);
 
+int hinic_dcb_set_pfc(struct hinic_hwdev *hwdev, u8 pfc_en, u8 pfc_bitmap);
+
+int hinic_set_loopback_mode(struct hinic_hwdev *hwdev, u32 mode, u32 enable);
+
+enum hinic_led_mode {
+       HINIC_LED_MODE_ON,
+       HINIC_LED_MODE_OFF,
+       HINIC_LED_MODE_FORCE_1HZ,
+       HINIC_LED_MODE_FORCE_2HZ,
+       HINIC_LED_MODE_FORCE_4HZ,
+       HINIC_LED_MODE_1HZ,
+       HINIC_LED_MODE_2HZ,
+       HINIC_LED_MODE_4HZ,
+       HINIC_LED_MODE_INVALID,
+};
+
+enum hinic_led_type {
+       HINIC_LED_TYPE_LINK,
+       HINIC_LED_TYPE_LOW_SPEED,
+       HINIC_LED_TYPE_HIGH_SPEED,
+       HINIC_LED_TYPE_INVALID,
+};
+
+int hinic_reset_led_status(struct hinic_hwdev *hwdev, u8 port);
+
+int hinic_set_led_status(struct hinic_hwdev *hwdev, u8 port,
+                        enum hinic_led_type type, enum hinic_led_mode mode);
+
+int hinic_get_sfp_type(struct hinic_hwdev *hwdev, u8 *data0, u8 *data1);
+
+int hinic_get_sfp_eeprom(struct hinic_hwdev *hwdev, u8 *data, u16 *len);
+
 int hinic_open(struct net_device *netdev);
 
 int hinic_close(struct net_device *netdev);
index af20d0d..5bee951 100644 (file)
@@ -316,6 +316,39 @@ static int rx_recv_jumbo_pkt(struct hinic_rxq *rxq, struct sk_buff *head_skb,
        return num_wqes;
 }
 
+static void hinic_copy_lp_data(struct hinic_dev *nic_dev,
+                              struct sk_buff *skb)
+{
+       struct net_device *netdev = nic_dev->netdev;
+       u8 *lb_buf = nic_dev->lb_test_rx_buf;
+       int lb_len = nic_dev->lb_pkt_len;
+       int pkt_offset, frag_len, i;
+       void *frag_data = NULL;
+
+       if (nic_dev->lb_test_rx_idx == LP_PKT_CNT) {
+               nic_dev->lb_test_rx_idx = 0;
+               netif_warn(nic_dev, drv, netdev, "Loopback test warning, receive too more test pkts\n");
+       }
+
+       if (skb->len != nic_dev->lb_pkt_len) {
+               netif_warn(nic_dev, drv, netdev, "Wrong packet length\n");
+               nic_dev->lb_test_rx_idx++;
+               return;
+       }
+
+       pkt_offset = nic_dev->lb_test_rx_idx * lb_len;
+       frag_len = (int)skb_headlen(skb);
+       memcpy(lb_buf + pkt_offset, skb->data, frag_len);
+       pkt_offset += frag_len;
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               frag_data = skb_frag_address(&skb_shinfo(skb)->frags[i]);
+               frag_len = (int)skb_frag_size(&skb_shinfo(skb)->frags[i]);
+               memcpy((lb_buf + pkt_offset), frag_data, frag_len);
+               pkt_offset += frag_len;
+       }
+       nic_dev->lb_test_rx_idx++;
+}
+
 /**
  * rxq_recv - Rx handler
  * @rxq: rx queue
@@ -330,6 +363,7 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
        u64 pkt_len = 0, rx_bytes = 0;
        struct hinic_rq *rq = rxq->rq;
        struct hinic_rq_wqe *rq_wqe;
+       struct hinic_dev *nic_dev;
        unsigned int free_wqebbs;
        struct hinic_rq_cqe *cqe;
        int num_wqes, pkts = 0;
@@ -342,6 +376,8 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
        u32 vlan_len;
        u16 vid;
 
+       nic_dev = netdev_priv(netdev);
+
        while (pkts < budget) {
                num_wqes = 0;
 
@@ -384,6 +420,9 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget)
                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
                }
 
+               if (unlikely(nic_dev->flags & HINIC_LP_TEST))
+                       hinic_copy_lp_data(nic_dev, skb);
+
                skb_record_rx_queue(skb, qp->q_id);
                skb->protocol = eth_type_trans(skb, rxq->netdev);
 
@@ -478,11 +517,15 @@ static irqreturn_t rx_irq(int irq, void *data)
 static int rx_request_irq(struct hinic_rxq *rxq)
 {
        struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
+       struct hinic_msix_config interrupt_info = {0};
+       struct hinic_intr_coal_info *intr_coal = NULL;
        struct hinic_hwdev *hwdev = nic_dev->hwdev;
        struct hinic_rq *rq = rxq->rq;
        struct hinic_qp *qp;
        int err;
 
+       qp = container_of(rq, struct hinic_qp, rq);
+
        rx_add_napi(rxq);
 
        hinic_hwdev_msix_set(hwdev, rq->msix_entry,
@@ -490,13 +533,26 @@ static int rx_request_irq(struct hinic_rxq *rxq)
                             RX_IRQ_NO_LLI_TIMER, RX_IRQ_NO_CREDIT,
                             RX_IRQ_NO_RESEND_TIMER);
 
+       intr_coal = &nic_dev->rx_intr_coalesce[qp->q_id];
+       interrupt_info.msix_index = rq->msix_entry;
+       interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg;
+       interrupt_info.pending_cnt = intr_coal->pending_limt;
+       interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg;
+
+       err = hinic_set_interrupt_cfg(hwdev, &interrupt_info);
+       if (err) {
+               netif_err(nic_dev, drv, rxq->netdev,
+                         "Failed to set RX interrupt coalescing attribute\n");
+               rx_del_napi(rxq);
+               return err;
+       }
+
        err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq);
        if (err) {
                rx_del_napi(rxq);
                return err;
        }
 
-       qp = container_of(rq, struct hinic_qp, rq);
        cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
        return irq_set_affinity_hint(rq->irq, &rq->affinity_mask);
 }
index efab2dd..caf7e81 100644 (file)
@@ -383,7 +383,7 @@ static int hinic_del_vf_mac_msg_handler(void *hwdev, u16 vf_id,
 
        nic_io = &hw_dev->func_to_io;
        vf_info = nic_io->vf_infos + HW_VF_ID_TO_OS(vf_id);
-       if (vf_info->pf_set_mac  && is_valid_ether_addr(mac_in->mac) &&
+       if (vf_info->pf_set_mac && is_valid_ether_addr(mac_in->mac) &&
            !memcmp(vf_info->vf_mac_addr, mac_in->mac, ETH_ALEN)) {
                dev_warn(&hw_dev->hwif->pdev->dev, "PF has already set VF mac.\n");
                mac_out->status = HINIC_PF_SET_VF_ALREADY;
@@ -905,7 +905,6 @@ int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
 
        err = hinic_set_vf_spoofchk(sriov_info->hwdev,
                                    OS_VF_ID_TO_HW(vf), setting);
-
        if (!err) {
                netif_info(nic_dev, drv, netdev, "Set VF %d spoofchk %s successfully\n",
                           vf, setting ? "on" : "off");
@@ -1020,6 +1019,7 @@ static int cfg_mbx_pf_proc_vf_msg(void *hwdev, u16 vf_id, u8 cmd, void *buf_in,
        dev_cap->max_vf = cap->max_vf;
        dev_cap->max_sqs = cap->max_vf_qps;
        dev_cap->max_rqs = cap->max_vf_qps;
+       dev_cap->port_id = dev->port_id;
 
        *out_size = sizeof(*dev_cap);
 
@@ -1060,9 +1060,7 @@ static int hinic_init_vf_infos(struct hinic_func_to_io *nic_io, u16 vf_id)
 static void hinic_clear_vf_infos(struct hinic_dev *nic_dev, u16 vf_id)
 {
        struct vf_data_storage *vf_infos;
-       u16 func_id;
 
-       func_id = hinic_glb_pf_vf_offset(nic_dev->hwdev->hwif) + vf_id;
        vf_infos = nic_dev->hwdev->func_to_io.vf_infos + HW_VF_ID_TO_OS(vf_id);
        if (vf_infos->pf_set_mac)
                hinic_port_del_mac(nic_dev, vf_infos->vf_mac_addr, 0);
index 4c66a0b..a97498e 100644 (file)
@@ -459,6 +459,67 @@ static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task,
        return 0;
 }
 
+netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+       struct hinic_dev *nic_dev = netdev_priv(netdev);
+       u16 prod_idx, q_id = skb->queue_mapping;
+       struct netdev_queue *netdev_txq;
+       int nr_sges, err = NETDEV_TX_OK;
+       struct hinic_sq_wqe *sq_wqe;
+       unsigned int wqe_size;
+       struct hinic_txq *txq;
+       struct hinic_qp *qp;
+
+       txq = &nic_dev->txqs[q_id];
+       qp = container_of(txq->sq, struct hinic_qp, sq);
+       nr_sges = skb_shinfo(skb)->nr_frags + 1;
+
+       err = tx_map_skb(nic_dev, skb, txq->sges);
+       if (err)
+               goto skb_error;
+
+       wqe_size = HINIC_SQ_WQE_SIZE(nr_sges);
+
+       sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
+       if (!sq_wqe) {
+               netif_stop_subqueue(netdev, qp->q_id);
+
+               sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx);
+               if (sq_wqe) {
+                       netif_wake_subqueue(nic_dev->netdev, qp->q_id);
+                       goto process_sq_wqe;
+               }
+
+               tx_unmap_skb(nic_dev, skb, txq->sges);
+
+               u64_stats_update_begin(&txq->txq_stats.syncp);
+               txq->txq_stats.tx_busy++;
+               u64_stats_update_end(&txq->txq_stats.syncp);
+               err = NETDEV_TX_BUSY;
+               wqe_size = 0;
+               goto flush_skbs;
+       }
+
+process_sq_wqe:
+       hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
+       hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
+
+flush_skbs:
+       netdev_txq = netdev_get_tx_queue(netdev, q_id);
+       if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
+               hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
+
+       return err;
+
+skb_error:
+       dev_kfree_skb_any(skb);
+       u64_stats_update_begin(&txq->txq_stats.syncp);
+       txq->txq_stats.tx_dropped++;
+       u64_stats_update_end(&txq->txq_stats.syncp);
+
+       return NETDEV_TX_OK;
+}
+
 netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 {
        struct hinic_dev *nic_dev = netdev_priv(netdev);
@@ -718,12 +779,17 @@ static irqreturn_t tx_irq(int irq, void *data)
 static int tx_request_irq(struct hinic_txq *txq)
 {
        struct hinic_dev *nic_dev = netdev_priv(txq->netdev);
+       struct hinic_msix_config interrupt_info = {0};
+       struct hinic_intr_coal_info *intr_coal = NULL;
        struct hinic_hwdev *hwdev = nic_dev->hwdev;
        struct hinic_hwif *hwif = hwdev->hwif;
        struct pci_dev *pdev = hwif->pdev;
        struct hinic_sq *sq = txq->sq;
+       struct hinic_qp *qp;
        int err;
 
+       qp = container_of(sq, struct hinic_qp, sq);
+
        tx_napi_add(txq, nic_dev->tx_weight);
 
        hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry,
@@ -731,6 +797,20 @@ static int tx_request_irq(struct hinic_txq *txq)
                             TX_IRQ_NO_LLI_TIMER, TX_IRQ_NO_CREDIT,
                             TX_IRQ_NO_RESEND_TIMER);
 
+       intr_coal = &nic_dev->tx_intr_coalesce[qp->q_id];
+       interrupt_info.msix_index = sq->msix_entry;
+       interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg;
+       interrupt_info.pending_cnt = intr_coal->pending_limt;
+       interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg;
+
+       err = hinic_set_interrupt_cfg(hwdev, &interrupt_info);
+       if (err) {
+               netif_err(nic_dev, drv, txq->netdev,
+                         "Failed to set TX interrupt coalescing attribute\n");
+               tx_napi_del(txq);
+               return err;
+       }
+
        err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq);
        if (err) {
                dev_err(&pdev->dev, "Failed to request Tx irq\n");
index f158b7d..b3c8657 100644 (file)
@@ -44,6 +44,8 @@ void hinic_txq_clean_stats(struct hinic_txq *txq);
 
 void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats);
 
+netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+
 netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
 
 int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
index 96d36ae..c5c7326 100644 (file)
@@ -1715,7 +1715,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
        }
 
        netdev->min_mtu = IBMVETH_MIN_MTU;
-       netdev->max_mtu = ETH_MAX_MTU;
+       netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH;
 
        memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
 
index 1b4d04e..0fd7eae 100644 (file)
@@ -842,12 +842,13 @@ static int ibmvnic_login(struct net_device *netdev)
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
        unsigned long timeout = msecs_to_jiffies(30000);
        int retry_count = 0;
+       int retries = 10;
        bool retry;
        int rc;
 
        do {
                retry = false;
-               if (retry_count > IBMVNIC_MAX_QUEUES) {
+               if (retry_count > retries) {
                        netdev_warn(netdev, "Login attempts exceeded\n");
                        return -1;
                }
@@ -862,11 +863,23 @@ static int ibmvnic_login(struct net_device *netdev)
 
                if (!wait_for_completion_timeout(&adapter->init_done,
                                                 timeout)) {
-                       netdev_warn(netdev, "Login timed out\n");
-                       return -1;
+                       netdev_warn(netdev, "Login timed out, retrying...\n");
+                       retry = true;
+                       adapter->init_done_rc = 0;
+                       retry_count++;
+                       continue;
                }
 
-               if (adapter->init_done_rc == PARTIALSUCCESS) {
+               if (adapter->init_done_rc == ABORTED) {
+                       netdev_warn(netdev, "Login aborted, retrying...\n");
+                       retry = true;
+                       adapter->init_done_rc = 0;
+                       retry_count++;
+                       /* FW or device may be busy, so
+                        * wait a bit before retrying login
+                        */
+                       msleep(500);
+               } else if (adapter->init_done_rc == PARTIALSUCCESS) {
                        retry_count++;
                        release_sub_crqs(adapter, 1);
 
@@ -1958,13 +1971,18 @@ static int do_reset(struct ibmvnic_adapter *adapter,
                        release_sub_crqs(adapter, 1);
                } else {
                        rc = ibmvnic_reset_crq(adapter);
-                       if (!rc)
+                       if (rc == H_CLOSED || rc == H_SUCCESS) {
                                rc = vio_enable_interrupts(adapter->vdev);
+                               if (rc)
+                                       netdev_err(adapter->netdev,
+                                                  "Reset failed to enable interrupts. rc=%d\n",
+                                                  rc);
+                       }
                }
 
                if (rc) {
                        netdev_err(adapter->netdev,
-                                  "Couldn't initialize crq. rc=%d\n", rc);
+                                  "Reset couldn't initialize crq. rc=%d\n", rc);
                        goto out;
                }
 
index 48a8f9a..3cd13fd 100644 (file)
@@ -34,7 +34,7 @@ config E100
          to identify the adapter.
 
          More specific information on configuring the driver is in
-         <file:Documentation/networking/device_drivers/intel/e100.rst>.
+         <file:Documentation/networking/device_drivers/ethernet/intel/e100.rst>.
 
          To compile this driver as a module, choose M here. The module
          will be called e100.
@@ -50,7 +50,7 @@ config E1000
          <http://support.intel.com>
 
          More specific information on configuring the driver is in
-         <file:Documentation/networking/device_drivers/intel/e1000.rst>.
+         <file:Documentation/networking/device_drivers/ethernet/intel/e1000.rst>.
 
          To compile this driver as a module, choose M here. The module
          will be called e1000.
@@ -70,7 +70,7 @@ config E1000E
          <http://support.intel.com>
 
          More specific information on configuring the driver is in
-         <file:Documentation/networking/device_drivers/intel/e1000e.rst>.
+         <file:Documentation/networking/device_drivers/ethernet/intel/e1000e.rst>.
 
          To compile this driver as a module, choose M here. The module
          will be called e1000e.
@@ -98,7 +98,7 @@ config IGB
          <http://support.intel.com>
 
          More specific information on configuring the driver is in
-         <file:Documentation/networking/device_drivers/intel/igb.rst>.
+         <file:Documentation/networking/device_drivers/ethernet/intel/igb.rst>.
 
          To compile this driver as a module, choose M here. The module
          will be called igb.
@@ -134,7 +134,7 @@ config IGBVF
          <http://support.intel.com>
 
          More specific information on configuring the driver is in
-         <file:Documentation/networking/device_drivers/intel/igbvf.rst>.
+         <file:Documentation/networking/device_drivers/ethernet/intel/igbvf.rst>.
 
          To compile this driver as a module, choose M here. The module
          will be called igbvf.
@@ -151,7 +151,7 @@ config IXGB
          <http://support.intel.com>
 
          More specific information on configuring the driver is in
-         <file:Documentation/networking/device_drivers/intel/ixgb.rst>.
+         <file:Documentation/networking/device_drivers/ethernet/intel/ixgb.rst>.
 
          To compile this driver as a module, choose M here. The module
          will be called ixgb.
@@ -170,7 +170,7 @@ config IXGBE
          <http://support.intel.com>
 
          More specific information on configuring the driver is in
-         <file:Documentation/networking/device_drivers/intel/ixgbe.rst>.
+         <file:Documentation/networking/device_drivers/ethernet/intel/ixgbe.rst>.
 
          To compile this driver as a module, choose M here. The module
          will be called ixgbe.
@@ -222,7 +222,7 @@ config IXGBEVF
          <http://support.intel.com>
 
          More specific information on configuring the driver is in
-         <file:Documentation/networking/device_drivers/intel/ixgbevf.rst>.
+         <file:Documentation/networking/device_drivers/ethernet/intel/ixgbevf.rst>.
 
          To compile this driver as a module, choose M here. The module
          will be called ixgbevf.  MSI-X interrupt support is required
@@ -249,7 +249,7 @@ config I40E
          <http://support.intel.com>
 
          More specific information on configuring the driver is in
-         <file:Documentation/networking/device_drivers/intel/i40e.rst>.
+         <file:Documentation/networking/device_drivers/ethernet/intel/i40e.rst>.
 
          To compile this driver as a module, choose M here. The module
          will be called i40e.
@@ -284,7 +284,7 @@ config I40EVF
          This driver was formerly named i40evf.
 
          More specific information on configuring the driver is in
-         <file:Documentation/networking/device_drivers/intel/iavf.rst>.
+         <file:Documentation/networking/device_drivers/ethernet/intel/iavf.rst>.
 
          To compile this driver as a module, choose M here. The module
          will be called iavf.  MSI-X interrupt support is required
@@ -303,7 +303,7 @@ config ICE
          <http://support.intel.com>
 
          More specific information on configuring the driver is in
-         <file:Documentation/networking/device_drivers/intel/ice.rst>.
+         <file:Documentation/networking/device_drivers/ethernet/intel/ice.rst>.
 
          To compile this driver as a module, choose M here. The module
          will be called ice.
@@ -321,7 +321,7 @@ config FM10K
          <http://support.intel.com>
 
          More specific information on configuring the driver is in
-         <file:Documentation/networking/device_drivers/intel/fm10k.rst>.
+         <file:Documentation/networking/device_drivers/ethernet/intel/fm10k.rst>.
 
          To compile this driver as a module, choose M here. The module
          will be called fm10k.  MSI-X interrupt support is required
index 1b8d015..91c64f9 100644 (file)
 
 
 #define DRV_NAME               "e100"
-#define DRV_EXT                        "-NAPI"
-#define DRV_VERSION            "3.5.24-k2"DRV_EXT
 #define DRV_DESCRIPTION                "Intel(R) PRO/100 Network Driver"
 #define DRV_COPYRIGHT          "Copyright(c) 1999-2006 Intel Corporation"
 
 MODULE_DESCRIPTION(DRV_DESCRIPTION);
 MODULE_AUTHOR(DRV_COPYRIGHT);
 MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
 MODULE_FIRMWARE(FIRMWARE_D101M);
 MODULE_FIRMWARE(FIRMWARE_D101S);
 MODULE_FIRMWARE(FIRMWARE_D102E);
@@ -2430,7 +2427,6 @@ static void e100_get_drvinfo(struct net_device *netdev,
 {
        struct nic *nic = netdev_priv(netdev);
        strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
-       strlcpy(info->version, DRV_VERSION, sizeof(info->version));
        strlcpy(info->bus_info, pci_name(nic->pdev),
                sizeof(info->bus_info));
 }
@@ -3167,7 +3163,7 @@ static struct pci_driver e100_driver = {
 static int __init e100_init_module(void)
 {
        if (((1 << debug) - 1) & NETIF_MSG_DRV) {
-               pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
+               pr_info("%s\n", DRV_DESCRIPTION);
                pr_info("%s\n", DRV_COPYRIGHT);
        }
        return pci_register_driver(&e100_driver);
index 7fad2f2..4817eb1 100644 (file)
@@ -330,7 +330,6 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
        dev_err(&adapter->pdev->dev, format, ## arg)
 
 extern char e1000_driver_name[];
-extern const char e1000_driver_version[];
 
 int e1000_open(struct net_device *netdev);
 int e1000_close(struct net_device *netdev);
index 6f45df5..0b4196d 100644 (file)
@@ -533,8 +533,6 @@ static void e1000_get_drvinfo(struct net_device *netdev,
 
        strlcpy(drvinfo->driver,  e1000_driver_name,
                sizeof(drvinfo->driver));
-       strlcpy(drvinfo->version, e1000_driver_version,
-               sizeof(drvinfo->version));
 
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info));
index 623e516..4e7a081 100644 (file)
@@ -4526,7 +4526,7 @@ s32 e1000_setup_led(struct e1000_hw *hw)
                                                     ~IGP01E1000_GMII_SPD));
                if (ret_val)
                        return ret_val;
-               /* Fall Through */
+               fallthrough;
        default:
                if (hw->media_type == e1000_media_type_fiber) {
                        ledctl = er32(LEDCTL);
@@ -4571,7 +4571,7 @@ s32 e1000_cleanup_led(struct e1000_hw *hw)
                                              hw->phy_spd_default);
                if (ret_val)
                        return ret_val;
-               /* Fall Through */
+               fallthrough;
        default:
                /* Restore LEDCTL settings */
                ew32(LEDCTL, hw->ledctl_default);
index d9fa460..1e6ec08 100644 (file)
@@ -10,8 +10,6 @@
 
 char e1000_driver_name[] = "e1000";
 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
-#define DRV_VERSION "7.3.21-k8-NAPI"
-const char e1000_driver_version[] = DRV_VERSION;
 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
 
 /* e1000_pci_tbl - PCI Device ID Table
@@ -151,10 +149,8 @@ static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
                                  __be16 proto, u16 vid);
 static void e1000_restore_vlan(struct e1000_adapter *adapter);
 
-#ifdef CONFIG_PM
-static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
-static int e1000_resume(struct pci_dev *pdev);
-#endif
+static int __maybe_unused e1000_suspend(struct device *dev);
+static int __maybe_unused e1000_resume(struct device *dev);
 static void e1000_shutdown(struct pci_dev *pdev);
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -179,16 +175,16 @@ static const struct pci_error_handlers e1000_err_handler = {
        .resume = e1000_io_resume,
 };
 
+static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume);
+
 static struct pci_driver e1000_driver = {
        .name     = e1000_driver_name,
        .id_table = e1000_pci_tbl,
        .probe    = e1000_probe,
        .remove   = e1000_remove,
-#ifdef CONFIG_PM
-       /* Power Management Hooks */
-       .suspend  = e1000_suspend,
-       .resume   = e1000_resume,
-#endif
+       .driver = {
+               .pm = &e1000_pm_ops,
+       },
        .shutdown = e1000_shutdown,
        .err_handler = &e1000_err_handler
 };
@@ -196,7 +192,6 @@ static struct pci_driver e1000_driver = {
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
 MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
 
 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
 static int debug = -1;
@@ -223,7 +218,7 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
 static int __init e1000_init_module(void)
 {
        int ret;
-       pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
+       pr_info("%s\n", e1000_driver_string);
 
        pr_info("%s\n", e1000_copyright);
 
@@ -1143,7 +1138,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                                EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
                        break;
                }
-               /* Fall Through */
+               fallthrough;
        default:
                e1000_read_eeprom(hw,
                        EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
@@ -3159,7 +3154,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
                                if ((unsigned long)(skb_tail_pointer(skb) - 1)
                                    & 4)
                                        break;
-                               /* fall through */
                                pull_size = min((unsigned int)4, skb->data_len);
                                if (!__pskb_pull_tail(skb, pull_size)) {
                                        e_err(drv, "__pskb_pull_tail "
@@ -5060,9 +5054,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
        struct e1000_hw *hw = &adapter->hw;
        u32 ctrl, ctrl_ext, rctl, status;
        u32 wufc = adapter->wol;
-#ifdef CONFIG_PM
-       int retval = 0;
-#endif
 
        netif_device_detach(netdev);
 
@@ -5076,12 +5067,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
                e1000_down(adapter);
        }
 
-#ifdef CONFIG_PM
-       retval = pci_save_state(pdev);
-       if (retval)
-               return retval;
-#endif
-
        status = er32(STATUS);
        if (status & E1000_STATUS_LU)
                wufc &= ~E1000_WUFC_LNKC;
@@ -5142,37 +5127,26 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused e1000_suspend(struct device *dev)
 {
        int retval;
+       struct pci_dev *pdev = to_pci_dev(dev);
        bool wake;
 
        retval = __e1000_shutdown(pdev, &wake);
-       if (retval)
-               return retval;
-
-       if (wake) {
-               pci_prepare_to_sleep(pdev);
-       } else {
-               pci_wake_from_d3(pdev, false);
-               pci_set_power_state(pdev, PCI_D3hot);
-       }
+       device_set_wakeup_enable(dev, wake);
 
-       return 0;
+       return retval;
 }
 
-static int e1000_resume(struct pci_dev *pdev)
+static int __maybe_unused e1000_resume(struct device *dev)
 {
+       struct pci_dev *pdev = to_pci_dev(dev);
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
        u32 err;
 
-       pci_set_power_state(pdev, PCI_D0);
-       pci_restore_state(pdev);
-       pci_save_state(pdev);
-
        if (adapter->need_ioport)
                err = pci_enable_device(pdev);
        else
@@ -5209,7 +5183,6 @@ static int e1000_resume(struct pci_dev *pdev)
 
        return 0;
 }
-#endif
 
 static void e1000_shutdown(struct pci_dev *pdev)
 {
index d3f29ff..4d4f5bf 100644 (file)
@@ -708,7 +708,7 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter)
                goto full_duplex_only;
        case SPEED_1000 + HALF_DUPLEX:
                e_dev_info("Half Duplex is not supported at 1000 Mbps\n");
-               /* fall through */
+               fallthrough;
        case SPEED_1000 + FULL_DUPLEX:
 full_duplex_only:
                e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex "
index 2c1bab3..88faf05 100644 (file)
@@ -154,7 +154,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
                        ew32(EECD, eecd);
                        break;
                }
-               /* Fall Through */
+               fallthrough;
        default:
                nvm->type = e1000_nvm_eeprom_spi;
                size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
@@ -1107,7 +1107,7 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
        switch (mac->type) {
        case e1000_82573:
                e1000e_enable_tx_pkt_filtering(hw);
-               /* fall through */
+               fallthrough;
        case e1000_82574:
        case e1000_82583:
                reg_data = er32(GCR);
index 944abd5..ba7a0f8 100644 (file)
@@ -460,7 +460,6 @@ enum latency_range {
 };
 
 extern char e1000e_driver_name[];
-extern const char e1000e_driver_version[];
 
 void e1000e_check_options(struct e1000_adapter *adapter);
 void e1000e_set_ethtool_ops(struct net_device *netdev);
index 1d47e25..64f684d 100644 (file)
@@ -633,8 +633,6 @@ static void e1000_get_drvinfo(struct net_device *netdev,
        struct e1000_adapter *adapter = netdev_priv(netdev);
 
        strlcpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver));
-       strlcpy(drvinfo->version, e1000e_driver_version,
-               sizeof(drvinfo->version));
 
        /* EEPROM image version # is reported as firmware version # for
         * PCI-E controllers
@@ -895,7 +893,6 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
        case e1000_pch_lpt:
        case e1000_pch_spt:
        case e1000_pch_cnp:
-               /* fall through */
        case e1000_pch_tgp:
        case e1000_pch_adp:
                mask |= BIT(18);
@@ -1571,7 +1568,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
                /* set bit 29 (value of MULR requests is now 0) */
                tarc0 &= 0xcfffffff;
                ew32(TARC(0), tarc0);
-               /* fall through */
+               fallthrough;
        case e1000_80003es2lan:
                if (hw->phy.media_type == e1000_media_type_fiber ||
                    hw->phy.media_type == e1000_media_type_internal_serdes) {
@@ -1579,7 +1576,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
                        ew32(CTRL_EXT, adapter->tx_fifo_head);
                        adapter->tx_fifo_head = 0;
                }
-               /* fall through */
+               fallthrough;
        case e1000_82571:
        case e1000_82572:
                if (hw->phy.media_type == e1000_media_type_fiber ||
@@ -1589,7 +1586,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
                        usleep_range(10000, 11000);
                        break;
                }
-               /* Fall Through */
+               fallthrough;
        default:
                hw->mac.autoneg = 1;
                if (hw->phy.type == e1000_phy_gg82563)
@@ -2124,7 +2121,7 @@ static int e1000_get_rxnfc(struct net_device *netdev,
                case TCP_V4_FLOW:
                        if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
                                info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-                       /* fall through */
+                       fallthrough;
                case UDP_V4_FLOW:
                case SCTP_V4_FLOW:
                case AH_ESP_V4_FLOW:
@@ -2135,7 +2132,7 @@ static int e1000_get_rxnfc(struct net_device *netdev,
                case TCP_V6_FLOW:
                        if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
                                info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-                       /* fall through */
+                       fallthrough;
                case UDP_V6_FLOW:
                case SCTP_V6_FLOW:
                case AH_ESP_V6_FLOW:
index f999cca..ae0a633 100644 (file)
@@ -338,12 +338,12 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
                 */
                msleep(50);
 
-               /* fall-through */
+               fallthrough;
        case e1000_pch2lan:
                if (e1000_phy_is_accessible_pchlan(hw))
                        break;
 
-               /* fall-through */
+               fallthrough;
        case e1000_pchlan:
                if ((hw->mac.type == e1000_pchlan) &&
                    (fwsm & E1000_ICH_FWSM_FW_VALID))
@@ -459,7 +459,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
                                return ret_val;
                        if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
                                break;
-                       /* fall-through */
+                       fallthrough;
                case e1000_pch2lan:
                case e1000_pch_lpt:
                case e1000_pch_spt:
@@ -704,7 +704,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
        case e1000_pch2lan:
                mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
                mac->ops.rar_set = e1000_rar_set_pch2lan;
-               /* fall-through */
+               fallthrough;
        case e1000_pch_lpt:
        case e1000_pch_spt:
        case e1000_pch_cnp:
@@ -1559,7 +1559,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
                ret_val = e1000_k1_workaround_lv(hw);
                if (ret_val)
                        return ret_val;
-               /* fall-thru */
+               fallthrough;
        case e1000_pchlan:
                if (hw->phy.type == e1000_phy_82578) {
                        ret_val = e1000_link_stall_workaround_hv(hw);
@@ -2096,7 +2096,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
                        sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
                        break;
                }
-               /* Fall-thru */
+               fallthrough;
        case e1000_pchlan:
        case e1000_pch2lan:
        case e1000_pch_lpt:
@@ -3189,7 +3189,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
                        return 0;
                }
                e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n");
-               /* fall-thru */
+               fallthrough;
        default:
                /* set bank to 0 in case flash read fails */
                *bank = 0;
index a279f4f..63dde3b 100644 (file)
 
 #include "e1000.h"
 
-#define DRV_EXTRAVERSION "-k"
-
-#define DRV_VERSION "3.2.6" DRV_EXTRAVERSION
 char e1000e_driver_name[] = "e1000e";
-const char e1000e_driver_version[] = DRV_VERSION;
 
 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
 static int debug = -1;
@@ -2111,7 +2107,7 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
                        e1000e_reset_interrupt_capability(adapter);
                }
                adapter->int_mode = E1000E_INT_MODE_MSI;
-               /* Fall through */
+               fallthrough;
        case E1000E_INT_MODE_MSI:
                if (!pci_enable_msi(adapter->pdev)) {
                        adapter->flags |= FLAG_MSI_ENABLED;
@@ -2119,7 +2115,7 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
                        adapter->int_mode = E1000E_INT_MODE_LEGACY;
                        e_err("Failed to initialize MSI interrupts.  Falling back to legacy interrupts.\n");
                }
-               /* Fall through */
+               fallthrough;
        case E1000E_INT_MODE_LEGACY:
                /* Don't do anything; this is the system default */
                break;
@@ -3177,10 +3173,10 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
                switch (adapter->rx_ps_pages) {
                case 3:
                        psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT;
-                       /* fall-through */
+                       fallthrough;
                case 2:
                        psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT;
-                       /* fall-through */
+                       fallthrough;
                case 1:
                        psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT;
                        break;
@@ -3677,9 +3673,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
                is_l2 = true;
                break;
        case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
-               /* Hardware cannot filter just V2 L4 Sync messages;
-                * fall-through to V2 (both L2 and L4) Sync.
-                */
+               /* Hardware cannot filter just V2 L4 Sync messages */
+               fallthrough;
        case HWTSTAMP_FILTER_PTP_V2_SYNC:
                /* Also time stamps V2 Path Delay Request/Response. */
                tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
@@ -3688,9 +3683,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
                is_l4 = true;
                break;
        case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
-               /* Hardware cannot filter just V2 L4 Delay Request messages;
-                * fall-through to V2 (both L2 and L4) Delay Request.
-                */
+               /* Hardware cannot filter just V2 L4 Delay Request messages */
+               fallthrough;
        case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
                /* Also time stamps V2 Path Delay Request/Response. */
                tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
@@ -3700,9 +3694,8 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
                break;
        case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
        case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
-               /* Hardware cannot filter just V2 L4 or L2 Event messages;
-                * fall-through to all V2 (both L2 and L4) Events.
-                */
+               /* Hardware cannot filter just V2 L4 or L2 Event messages */
+               fallthrough;
        case HWTSTAMP_FILTER_PTP_V2_EVENT:
                tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
                config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
@@ -3714,6 +3707,7 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
                 * Delay Request messages but not both so fall-through to
                 * time stamp all packets.
                 */
+               fallthrough;
        case HWTSTAMP_FILTER_NTP_ALL:
        case HWTSTAMP_FILTER_ALL:
                is_l2 = true;
@@ -4060,7 +4054,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
                        fc->low_water = fc->high_water - 8;
                        break;
                }
-               /* fall-through */
+               fallthrough;
        default:
                hwm = min(((pba << 10) * 9 / 10),
                          ((pba << 10) - adapter->max_frame_size));
@@ -4085,7 +4079,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
        case e1000_pch_lpt:
        case e1000_pch_spt:
        case e1000_pch_cnp:
-               /* fall-through */
+               fallthrough;
        case e1000_pch_tgp:
        case e1000_pch_adp:
                fc->refresh_time = 0xFFFF;
@@ -6349,7 +6343,6 @@ fl_out:
        pm_runtime_put_sync(netdev->dev.parent);
 }
 
-#ifdef CONFIG_PM_SLEEP
 /* S0ix implementation */
 static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
 {
@@ -6571,7 +6564,6 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
        mac_data &= ~E1000_CTRL_EXT_FORCE_SMBUS;
        ew32(CTRL_EXT, mac_data);
 }
-#endif /* CONFIG_PM_SLEEP */
 
 static int e1000e_pm_freeze(struct device *dev)
 {
@@ -6611,11 +6603,17 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
-       u32 ctrl, ctrl_ext, rctl, status;
-       /* Runtime suspend should only enable wakeup for link changes */
-       u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
+       u32 ctrl, ctrl_ext, rctl, status, wufc;
        int retval = 0;
 
+       /* Runtime suspend should only enable wakeup for link changes */
+       if (runtime)
+               wufc = E1000_WUFC_LNKC;
+       else if (device_may_wakeup(&pdev->dev))
+               wufc = adapter->wol;
+       else
+               wufc = 0;
+
        status = er32(STATUS);
        if (status & E1000_STATUS_LU)
                wufc &= ~E1000_WUFC_LNKC;
@@ -6672,7 +6670,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
        if (adapter->hw.phy.type == e1000_phy_igp_3) {
                e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
        } else if (hw->mac.type >= e1000_pch_lpt) {
-               if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
+               if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
                        /* ULP does not support wake from unicast, multicast
                         * or broadcast.
                         */
@@ -6764,7 +6762,7 @@ static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state, int locked)
        case PCIE_LINK_STATE_L0S:
        case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1:
                aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S;
-               /* fall-through - can't have L1 without L0s */
+               fallthrough; /* can't have L1 without L0s */
        case PCIE_LINK_STATE_L1:
                aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1;
                break;
@@ -6869,7 +6867,6 @@ err_irq:
        return rc;
 }
 
-#ifdef CONFIG_PM
 static int __e1000_resume(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
@@ -6935,8 +6932,7 @@ static int __e1000_resume(struct pci_dev *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int e1000e_pm_suspend(struct device *dev)
+static __maybe_unused int e1000e_pm_suspend(struct device *dev)
 {
        struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
        struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -6960,7 +6956,7 @@ static int e1000e_pm_suspend(struct device *dev)
        return rc;
 }
 
-static int e1000e_pm_resume(struct device *dev)
+static __maybe_unused int e1000e_pm_resume(struct device *dev)
 {
        struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
        struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -6979,9 +6975,8 @@ static int e1000e_pm_resume(struct device *dev)
 
        return e1000e_pm_thaw(dev);
 }
-#endif /* CONFIG_PM_SLEEP */
 
-static int e1000e_pm_runtime_idle(struct device *dev)
+static __maybe_unused int e1000e_pm_runtime_idle(struct device *dev)
 {
        struct net_device *netdev = dev_get_drvdata(dev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -6997,7 +6992,7 @@ static int e1000e_pm_runtime_idle(struct device *dev)
        return -EBUSY;
 }
 
-static int e1000e_pm_runtime_resume(struct device *dev)
+static __maybe_unused int e1000e_pm_runtime_resume(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7014,7 +7009,7 @@ static int e1000e_pm_runtime_resume(struct device *dev)
        return rc;
 }
 
-static int e1000e_pm_runtime_suspend(struct device *dev)
+static __maybe_unused int e1000e_pm_runtime_suspend(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7039,7 +7034,6 @@ static int e1000e_pm_runtime_suspend(struct device *dev)
 
        return 0;
 }
-#endif /* CONFIG_PM */
 
 static void e1000_shutdown(struct pci_dev *pdev)
 {
@@ -7899,8 +7893,7 @@ static struct pci_driver e1000_driver = {
  **/
 static int __init e1000_init_module(void)
 {
-       pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
-               e1000e_driver_version);
+       pr_info("Intel(R) PRO/1000 Network Driver\n");
        pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n");
 
        return pci_register_driver(&e1000_driver);
@@ -7922,6 +7915,5 @@ module_exit(e1000_exit_module);
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
 MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
 
 /* netdev.c */
index 098369f..ebe121d 100644 (file)
@@ -375,7 +375,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
                                 "%s Invalid mode - setting default\n",
                                 opt.name);
                        adapter->itr_setting = opt.def;
-                       /* fall-through */
+                       fallthrough;
                case 3:
                        dev_info(&adapter->pdev->dev,
                                 "%s set to dynamic conservative mode\n",
index 4223301..e11c877 100644 (file)
@@ -607,7 +607,7 @@ static s32 e1000_set_master_slave_mode(struct e1000_hw *hw)
                break;
        case e1000_ms_auto:
                phy_data &= ~CTL1000_ENABLE_MASTER;
-               /* fall-through */
+               fallthrough;
        default:
                break;
        }
index 439fda2..34b988d 100644 (file)
@@ -295,7 +295,6 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
        case e1000_pch_lpt:
        case e1000_pch_spt:
        case e1000_pch_cnp:
-               /* fall-through */
        case e1000_pch_tgp:
        case e1000_pch_adp:
                if ((hw->mac.type < e1000_pch_lpt) ||
@@ -303,7 +302,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
                        adapter->ptp_clock_info.max_adj = 24000000 - 1;
                        break;
                }
-               /* fall-through */
+               fallthrough;
        case e1000_82574:
        case e1000_82583:
                adapter->ptp_clock_info.max_adj = 600000000 - 1;
index 5b78362..6119a41 100644 (file)
@@ -221,12 +221,6 @@ struct fm10k_iov_data {
        struct fm10k_vf_info    vf_info[];
 };
 
-struct fm10k_udp_port {
-       struct list_head        list;
-       sa_family_t             sa_family;
-       __be16                  port;
-};
-
 enum fm10k_macvlan_request_type {
        FM10K_UC_MAC_REQUEST,
        FM10K_MC_MAC_REQUEST,
@@ -370,8 +364,8 @@ struct fm10k_intfc {
        u32 rssrk[FM10K_RSSRK_SIZE];
 
        /* UDP encapsulation port tracking information */
-       struct list_head vxlan_port;
-       struct list_head geneve_port;
+       __be16 vxlan_port;
+       __be16 geneve_port;
 
        /* MAC/VLAN update queue */
        struct list_head macvlan_requests;
@@ -476,7 +470,6 @@ struct fm10k_cb {
 
 /* main */
 extern char fm10k_driver_name[];
-extern const char fm10k_driver_version[];
 int fm10k_init_queueing_scheme(struct fm10k_intfc *interface);
 void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface);
 __be16 fm10k_tx_encap_offload(struct sk_buff *skb);
index 37fbc64..908fefa 100644 (file)
@@ -449,8 +449,6 @@ static void fm10k_get_drvinfo(struct net_device *dev,
 
        strncpy(info->driver, fm10k_driver_name,
                sizeof(info->driver) - 1);
-       strncpy(info->version, fm10k_driver_version,
-               sizeof(info->version) - 1);
        strncpy(info->bus_info, pci_name(interface->pdev),
                sizeof(info->bus_info) - 1);
 }
@@ -694,12 +692,12 @@ static int fm10k_get_rss_hash_opts(struct fm10k_intfc *interface,
        case TCP_V4_FLOW:
        case TCP_V6_FLOW:
                cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-               /* fall through */
+               fallthrough;
        case UDP_V4_FLOW:
                if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP,
                             interface->flags))
                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-               /* fall through */
+               fallthrough;
        case SCTP_V4_FLOW:
        case SCTP_V6_FLOW:
        case AH_ESP_V4_FLOW:
index 17738b0..d88dd41 100644 (file)
@@ -11,9 +11,7 @@
 
 #include "fm10k.h"
 
-#define DRV_VERSION    "0.27.1-k"
 #define DRV_SUMMARY    "Intel(R) Ethernet Switch Host Interface Driver"
-const char fm10k_driver_version[] = DRV_VERSION;
 char fm10k_driver_name[] = "fm10k";
 static const char fm10k_driver_string[] = DRV_SUMMARY;
 static const char fm10k_copyright[] =
@@ -22,7 +20,6 @@ static const char fm10k_copyright[] =
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 MODULE_DESCRIPTION(DRV_SUMMARY);
 MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
 
 /* single workqueue for entire fm10k driver */
 struct workqueue_struct *fm10k_workqueue;
@@ -35,7 +32,7 @@ struct workqueue_struct *fm10k_workqueue;
  **/
 static int __init fm10k_init_module(void)
 {
-       pr_info("%s - version %s\n", fm10k_driver_string, fm10k_driver_version);
+       pr_info("%s\n", fm10k_driver_string);
        pr_info("%s\n", fm10k_copyright);
 
        /* create driver workqueue */
@@ -638,15 +635,8 @@ static int fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
 static struct ethhdr *fm10k_port_is_vxlan(struct sk_buff *skb)
 {
        struct fm10k_intfc *interface = netdev_priv(skb->dev);
-       struct fm10k_udp_port *vxlan_port;
 
-       /* we can only offload a vxlan if we recognize it as such */
-       vxlan_port = list_first_entry_or_null(&interface->vxlan_port,
-                                             struct fm10k_udp_port, list);
-
-       if (!vxlan_port)
-               return NULL;
-       if (vxlan_port->port != udp_hdr(skb)->dest)
+       if (interface->vxlan_port != udp_hdr(skb)->dest)
                return NULL;
 
        /* return offset of udp_hdr plus 8 bytes for VXLAN header */
@@ -859,7 +849,7 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
        case IPPROTO_GRE:
                if (skb->encapsulation)
                        break;
-               /* fall through */
+               fallthrough;
        default:
                if (unlikely(net_ratelimit())) {
                        dev_warn(tx_ring->dev,
@@ -1557,7 +1547,7 @@ static bool fm10k_set_rss_queues(struct fm10k_intfc *interface)
  * important, starting with the "most" number of features turned on at once,
  * and ending with the smallest set of features.  This way large combinations
  * can be allocated if they're turned on, and smaller combinations are the
- * fallthrough conditions.
+ * fall through conditions.
  *
  **/
 static void fm10k_set_num_queues(struct fm10k_intfc *interface)
index 75e51f9..8e2e92b 100644 (file)
@@ -967,7 +967,7 @@ static s32 fm10k_mbx_validate_msg_hdr(struct fm10k_mbx_info *mbx)
                if (tail != mbx->head)
                        return FM10K_MBX_ERR_TAIL;
 
-               /* fall through */
+               fallthrough;
        case FM10K_MSG_DATA:
                /* validate that head is moving correctly */
                if (!head || (head == FM10K_MSG_HDR_MASK(HEAD)))
@@ -987,7 +987,7 @@ static s32 fm10k_mbx_validate_msg_hdr(struct fm10k_mbx_info *mbx)
                if ((size < FM10K_VFMBX_MSG_MTU) || (size & (size + 1)))
                        return FM10K_MBX_ERR_SIZE;
 
-               /* fall through */
+               fallthrough;
        case FM10K_MSG_ERROR:
                if (!head || (head == FM10K_MSG_HDR_MASK(HEAD)))
                        return FM10K_MBX_ERR_HEAD;
@@ -1570,7 +1570,7 @@ s32 fm10k_pfvf_mbx_init(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx,
                        mbx->mbmem_reg = FM10K_MBMEM_VF(id, 0);
                        break;
                }
-               /* fall through */
+               fallthrough;
        default:
                return FM10K_MBX_ERR_NO_MBX;
        }
index 0637cca..5c19ff4 100644 (file)
@@ -367,39 +367,6 @@ static void fm10k_request_glort_range(struct fm10k_intfc *interface)
 }
 
 /**
- * fm10k_free_udp_port_info
- * @interface: board private structure
- *
- * This function frees both geneve_port and vxlan_port structures
- **/
-static void fm10k_free_udp_port_info(struct fm10k_intfc *interface)
-{
-       struct fm10k_udp_port *port;
-
-       /* flush all entries from vxlan list */
-       port = list_first_entry_or_null(&interface->vxlan_port,
-                                       struct fm10k_udp_port, list);
-       while (port) {
-               list_del(&port->list);
-               kfree(port);
-               port = list_first_entry_or_null(&interface->vxlan_port,
-                                               struct fm10k_udp_port,
-                                               list);
-       }
-
-       /* flush all entries from geneve list */
-       port = list_first_entry_or_null(&interface->geneve_port,
-                                       struct fm10k_udp_port, list);
-       while (port) {
-               list_del(&port->list);
-               kfree(port);
-               port = list_first_entry_or_null(&interface->vxlan_port,
-                                               struct fm10k_udp_port,
-                                               list);
-       }
-}
-
-/**
  * fm10k_restore_udp_port_info
  * @interface: board private structure
  *
@@ -408,131 +375,52 @@ static void fm10k_free_udp_port_info(struct fm10k_intfc *interface)
 static void fm10k_restore_udp_port_info(struct fm10k_intfc *interface)
 {
        struct fm10k_hw *hw = &interface->hw;
-       struct fm10k_udp_port *port;
 
        /* only the PF supports configuring tunnels */
        if (hw->mac.type != fm10k_mac_pf)
                return;
 
-       port = list_first_entry_or_null(&interface->vxlan_port,
-                                       struct fm10k_udp_port, list);
-
        /* restore tunnel configuration register */
        fm10k_write_reg(hw, FM10K_TUNNEL_CFG,
-                       (port ? ntohs(port->port) : 0) |
+                       ntohs(interface->vxlan_port) |
                        (ETH_P_TEB << FM10K_TUNNEL_CFG_NVGRE_SHIFT));
 
-       port = list_first_entry_or_null(&interface->geneve_port,
-                                       struct fm10k_udp_port, list);
-
        /* restore Geneve tunnel configuration register */
        fm10k_write_reg(hw, FM10K_TUNNEL_CFG_GENEVE,
-                       (port ? ntohs(port->port) : 0));
-}
-
-static struct fm10k_udp_port *
-fm10k_remove_tunnel_port(struct list_head *ports,
-                        struct udp_tunnel_info *ti)
-{
-       struct fm10k_udp_port *port;
-
-       list_for_each_entry(port, ports, list) {
-               if ((port->port == ti->port) &&
-                   (port->sa_family == ti->sa_family)) {
-                       list_del(&port->list);
-                       return port;
-               }
-       }
-
-       return NULL;
-}
-
-static void fm10k_insert_tunnel_port(struct list_head *ports,
-                                    struct udp_tunnel_info *ti)
-{
-       struct fm10k_udp_port *port;
-
-       /* remove existing port entry from the list so that the newest items
-        * are always at the tail of the list.
-        */
-       port = fm10k_remove_tunnel_port(ports, ti);
-       if (!port) {
-               port = kmalloc(sizeof(*port), GFP_ATOMIC);
-               if  (!port)
-                       return;
-               port->port = ti->port;
-               port->sa_family = ti->sa_family;
-       }
-
-       list_add_tail(&port->list, ports);
+                       ntohs(interface->geneve_port));
 }
 
 /**
- * fm10k_udp_tunnel_add
+ * fm10k_udp_tunnel_sync - Called when UDP tunnel ports change
  * @dev: network interface device structure
- * @ti: Tunnel endpoint information
+ * @table: Tunnel table (according to tables of @fm10k_udp_tunnels)
  *
- * This function is called when a new UDP tunnel port has been added.
+ * This function is called when a new UDP tunnel port is added or deleted.
  * Due to hardware restrictions, only one port per type can be offloaded at
- * once.
+ * once. Core will send to the driver a port of its choice.
  **/
-static void fm10k_udp_tunnel_add(struct net_device *dev,
-                                struct udp_tunnel_info *ti)
+static int fm10k_udp_tunnel_sync(struct net_device *dev, unsigned int table)
 {
        struct fm10k_intfc *interface = netdev_priv(dev);
+       struct udp_tunnel_info ti;
 
-       /* only the PF supports configuring tunnels */
-       if (interface->hw.mac.type != fm10k_mac_pf)
-               return;
-
-       switch (ti->type) {
-       case UDP_TUNNEL_TYPE_VXLAN:
-               fm10k_insert_tunnel_port(&interface->vxlan_port, ti);
-               break;
-       case UDP_TUNNEL_TYPE_GENEVE:
-               fm10k_insert_tunnel_port(&interface->geneve_port, ti);
-               break;
-       default:
-               return;
-       }
+       udp_tunnel_nic_get_port(dev, table, 0, &ti);
+       if (!table)
+               interface->vxlan_port = ti.port;
+       else
+               interface->geneve_port = ti.port;
 
        fm10k_restore_udp_port_info(interface);
+       return 0;
 }
 
-/**
- * fm10k_udp_tunnel_del
- * @dev: network interface device structure
- * @ti: Tunnel end point information
- *
- * This function is called when a new UDP tunnel port is deleted. The freed
- * port will be removed from the list, then we reprogram the offloaded port
- * based on the head of the list.
- **/
-static void fm10k_udp_tunnel_del(struct net_device *dev,
-                                struct udp_tunnel_info *ti)
-{
-       struct fm10k_intfc *interface = netdev_priv(dev);
-       struct fm10k_udp_port *port = NULL;
-
-       if (interface->hw.mac.type != fm10k_mac_pf)
-               return;
-
-       switch (ti->type) {
-       case UDP_TUNNEL_TYPE_VXLAN:
-               port = fm10k_remove_tunnel_port(&interface->vxlan_port, ti);
-               break;
-       case UDP_TUNNEL_TYPE_GENEVE:
-               port = fm10k_remove_tunnel_port(&interface->geneve_port, ti);
-               break;
-       default:
-               return;
-       }
-
-       /* if we did remove a port we need to free its memory */
-       kfree(port);
-
-       fm10k_restore_udp_port_info(interface);
-}
+static const struct udp_tunnel_nic_info fm10k_udp_tunnels = {
+       .sync_table     = fm10k_udp_tunnel_sync,
+       .tables         = {
+               { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
+               { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+       },
+};
 
 /**
  * fm10k_open - Called when a network interface is made active
@@ -580,8 +468,6 @@ int fm10k_open(struct net_device *netdev)
        if (err)
                goto err_set_queues;
 
-       udp_tunnel_get_rx_info(netdev);
-
        fm10k_up(interface);
 
        return 0;
@@ -615,8 +501,6 @@ int fm10k_close(struct net_device *netdev)
 
        fm10k_qv_free_irq(interface);
 
-       fm10k_free_udp_port_info(interface);
-
        fm10k_free_all_tx_resources(interface);
        fm10k_free_all_rx_resources(interface);
 
@@ -853,7 +737,7 @@ void fm10k_clear_macvlan_queue(struct fm10k_intfc *interface,
                        /* Don't free requests for other interfaces */
                        if (r->mac.glort != glort)
                                break;
-                       /* fall through */
+                       fallthrough;
                case FM10K_VLAN_REQUEST:
                        if (vlans) {
                                list_del(&r->list);
@@ -1647,8 +1531,8 @@ static const struct net_device_ops fm10k_netdev_ops = {
        .ndo_set_vf_rate        = fm10k_ndo_set_vf_bw,
        .ndo_get_vf_config      = fm10k_ndo_get_vf_config,
        .ndo_get_vf_stats       = fm10k_ndo_get_vf_stats,
-       .ndo_udp_tunnel_add     = fm10k_udp_tunnel_add,
-       .ndo_udp_tunnel_del     = fm10k_udp_tunnel_del,
+       .ndo_udp_tunnel_add     = udp_tunnel_nic_add_port,
+       .ndo_udp_tunnel_del     = udp_tunnel_nic_del_port,
        .ndo_dfwd_add_station   = fm10k_dfwd_add_station,
        .ndo_dfwd_del_station   = fm10k_dfwd_del_station,
        .ndo_features_check     = fm10k_features_check,
@@ -1695,6 +1579,8 @@ struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info)
                                       NETIF_F_SG;
 
                dev->features |= NETIF_F_GSO_UDP_TUNNEL;
+
+               dev->udp_tunnel_nic_info = &fm10k_udp_tunnels;
        }
 
        /* all features defined to this point should be changeable */
index d122d00..140212b 100644 (file)
@@ -2066,10 +2066,6 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
        interface->tx_itr = FM10K_TX_ITR_DEFAULT;
        interface->rx_itr = FM10K_ITR_ADAPTIVE | FM10K_RX_ITR_DEFAULT;
 
-       /* initialize udp port lists */
-       INIT_LIST_HEAD(&interface->vxlan_port);
-       INIT_LIST_HEAD(&interface->geneve_port);
-
        /* Initialize the MAC/VLAN queue */
        INIT_LIST_HEAD(&interface->macvlan_requests);
 
index be07bfd..c0780c3 100644 (file)
@@ -1317,19 +1317,19 @@ static u8 fm10k_iov_supported_xcast_mode_pf(struct fm10k_vf_info *vf_info,
        case FM10K_XCAST_MODE_PROMISC:
                if (vf_flags & FM10K_VF_FLAG_PROMISC_CAPABLE)
                        return FM10K_XCAST_MODE_PROMISC;
-               /* fall through */
+               fallthrough;
        case FM10K_XCAST_MODE_ALLMULTI:
                if (vf_flags & FM10K_VF_FLAG_ALLMULTI_CAPABLE)
                        return FM10K_XCAST_MODE_ALLMULTI;
-               /* fall through */
+               fallthrough;
        case FM10K_XCAST_MODE_MULTI:
                if (vf_flags & FM10K_VF_FLAG_MULTI_CAPABLE)
                        return FM10K_XCAST_MODE_MULTI;
-               /* fall through */
+               fallthrough;
        case FM10K_XCAST_MODE_NONE:
                if (vf_flags & FM10K_VF_FLAG_NONE_CAPABLE)
                        return FM10K_XCAST_MODE_NONE;
-               /* fall through */
+               fallthrough;
        default:
                break;
        }
index e95b8da..a7e212d 100644 (file)
@@ -38,7 +38,7 @@
 #include <net/xdp_sock.h>
 #include "i40e_type.h"
 #include "i40e_prototype.h"
-#include "i40e_client.h"
+#include <linux/net/intel/i40e_client.h>
 #include <linux/avf/virtchnl.h>
 #include "i40e_virtchnl_pf.h"
 #include "i40e_txrx.h"
                (((pf)->hw_features & I40E_HW_RSS_AQ_CAPABLE) ? 4 : 1)
 #define I40E_DEFAULT_QUEUES_PER_VF     4
 #define I40E_MAX_VF_QUEUES             16
-#define I40E_DEFAULT_QUEUES_PER_TC     1 /* should be a power of 2 */
 #define i40e_pf_get_max_q_per_tc(pf) \
                (((pf)->hw_features & I40E_HW_128_QP_RSS_CAPABLE) ? 128 : 64)
-#define I40E_FDIR_RING                 0
 #define I40E_FDIR_RING_COUNT           32
 #define I40E_MAX_AQ_BUF_SIZE           4096
 #define I40E_AQ_LEN                    256
 #define I40E_AQ_WORK_LIMIT             66 /* max number of VFs + a little */
 #define I40E_MAX_USER_PRIORITY         8
 #define I40E_DEFAULT_TRAFFIC_CLASS     BIT(0)
-#define I40E_DEFAULT_MSG_ENABLE                4
 #define I40E_QUEUE_WAIT_RETRY_LIMIT    10
 #define I40E_INT_NAME_STR_LEN          (IFNAMSIZ + 16)
 
 #define I40E_OEM_SNAP_SHIFT            16
 #define I40E_OEM_RELEASE_MASK          0x0000ffff
 
-/* The values in here are decimal coded as hex as is the case in the NVM map*/
-#define I40E_CURRENT_NVM_VERSION_HI    0x2
-#define I40E_CURRENT_NVM_VERSION_LO    0x40
-
 #define I40E_RX_DESC(R, i)     \
        (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
 #define I40E_TX_DESC(R, i)     \
 #define I40E_TX_FDIRDESC(R, i) \
        (&(((struct i40e_filter_program_desc *)((R)->desc))[i]))
 
-/* default to trying for four seconds */
-#define I40E_TRY_LINK_TIMEOUT  (4 * HZ)
-
 /* BW rate limiting */
 #define I40E_BW_CREDIT_DIVISOR         50 /* 50Mbps per BW credit */
 #define I40E_BW_MBPS_DIVISOR           125000 /* rate / (1000000 / 8) Mbps */
@@ -295,9 +285,6 @@ struct i40e_cloud_filter {
        u8 tunnel_type;
 };
 
-#define I40E_DCB_PRIO_TYPE_STRICT      0
-#define I40E_DCB_PRIO_TYPE_ETS         1
-#define I40E_DCB_STRICT_PRIO_CREDITS   127
 /* DCB per TC information data structure */
 struct i40e_tc_info {
        u16     qoffset;        /* Queue offset from base queue */
@@ -357,15 +344,6 @@ struct i40e_ddp_old_profile_list {
                                             I40E_FLEX_SET_FSIZE(fsize) | \
                                             I40E_FLEX_SET_SRC_WORD(src))
 
-#define I40E_FLEX_PIT_GET_SRC(flex) (((flex) & \
-                                    I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) >> \
-                                    I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
-#define I40E_FLEX_PIT_GET_DST(flex) (((flex) & \
-                                    I40E_PRTQF_FLX_PIT_DEST_OFF_MASK) >> \
-                                    I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
-#define I40E_FLEX_PIT_GET_FSIZE(flex) (((flex) & \
-                                      I40E_PRTQF_FLX_PIT_FSIZE_MASK) >> \
-                                      I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
 
 #define I40E_MAX_FLEX_SRC_OFFSET 0x1F
 
@@ -390,7 +368,6 @@ struct i40e_ddp_old_profile_list {
 #define I40E_L4_GLQF_ORT_IDX           35
 
 /* Flex PIT register index */
-#define I40E_FLEX_PIT_IDX_START_L2     0
 #define I40E_FLEX_PIT_IDX_START_L3     3
 #define I40E_FLEX_PIT_IDX_START_L4     6
 
@@ -531,7 +508,6 @@ struct i40e_pf {
 #define I40E_HW_GENEVE_OFFLOAD_CAPABLE         BIT(9)
 #define I40E_HW_PTP_L4_CAPABLE                 BIT(10)
 #define I40E_HW_WOL_MC_MAGIC_PKT_WAKE          BIT(11)
-#define I40E_HW_MPLS_HDR_OFFLOAD_CAPABLE       BIT(12)
 #define I40E_HW_HAVE_CRT_RETIMER               BIT(13)
 #define I40E_HW_OUTER_UDP_CSUM_CAPABLE         BIT(14)
 #define I40E_HW_PHY_CONTROLS_LEDS              BIT(15)
@@ -567,6 +543,28 @@ struct i40e_pf {
 #define I40E_FLAG_DISABLE_FW_LLDP              BIT(24)
 #define I40E_FLAG_RS_FEC                       BIT(25)
 #define I40E_FLAG_BASE_R_FEC                   BIT(26)
+/* TOTAL_PORT_SHUTDOWN
+ * Allows to physically disable the link on the NIC's port.
+ * If enabled, (after link down request from the OS)
+ * no link, traffic or led activity is possible on that port.
+ *
+ * If I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED is set, the
+ * I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED must be explicitly forced to true
+ * and cannot be disabled by system admin at that time.
+ * The functionalities are exclusive in terms of configuration, but they also
+ * have similar behavior (allowing to disable physical link of the port),
+ * with following differences:
+ * - LINK_DOWN_ON_CLOSE_ENABLED is configurable at host OS run-time and is
+ *   supported by whole family of 7xx Intel Ethernet Controllers
+ * - TOTAL_PORT_SHUTDOWN may be enabled only before OS loads (in BIOS)
+ *   only if motherboard's BIOS and NIC's FW has support of it
+ * - when LINK_DOWN_ON_CLOSE_ENABLED is used, the link is being brought down
+ *   by sending phy_type=0 to NIC's FW
+ * - when TOTAL_PORT_SHUTDOWN is used, phy_type is not altered, instead
+ *   the link is being brought down by clearing bit (I40E_AQ_PHY_ENABLE_LINK)
+ *   in abilities field of i40e_aq_set_phy_config structure
+ */
+#define I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED  BIT(27)
 
        struct i40e_client_instance *cinst;
        bool stat_offsets_loaded;
@@ -992,7 +990,6 @@ static inline void i40e_write_fd_input_set(struct i40e_pf *pf,
 int i40e_up(struct i40e_vsi *vsi);
 void i40e_down(struct i40e_vsi *vsi);
 extern const char i40e_driver_name[];
-extern const char i40e_driver_version_str[];
 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired);
 int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
index 6a08984..c897a28 100644 (file)
@@ -541,7 +541,7 @@ static void i40e_set_hw_flags(struct i40e_hw *hw)
                    (aq->api_maj_ver == 1 &&
                     aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
                        hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
-               /* fall through */
+               fallthrough;
        default:
                break;
        }
index aa5f1c0..a62ddd6 100644 (file)
@@ -55,29 +55,17 @@ struct i40e_aq_desc {
  */
 
 /* command flags and offsets*/
-#define I40E_AQ_FLAG_DD_SHIFT  0
-#define I40E_AQ_FLAG_CMP_SHIFT 1
 #define I40E_AQ_FLAG_ERR_SHIFT 2
-#define I40E_AQ_FLAG_VFE_SHIFT 3
 #define I40E_AQ_FLAG_LB_SHIFT  9
 #define I40E_AQ_FLAG_RD_SHIFT  10
-#define I40E_AQ_FLAG_VFC_SHIFT 11
 #define I40E_AQ_FLAG_BUF_SHIFT 12
 #define I40E_AQ_FLAG_SI_SHIFT  13
-#define I40E_AQ_FLAG_EI_SHIFT  14
-#define I40E_AQ_FLAG_FE_SHIFT  15
 
-#define I40E_AQ_FLAG_DD                BIT(I40E_AQ_FLAG_DD_SHIFT)  /* 0x1    */
-#define I40E_AQ_FLAG_CMP       BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2    */
 #define I40E_AQ_FLAG_ERR       BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4    */
-#define I40E_AQ_FLAG_VFE       BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8    */
 #define I40E_AQ_FLAG_LB                BIT(I40E_AQ_FLAG_LB_SHIFT)  /* 0x200  */
 #define I40E_AQ_FLAG_RD                BIT(I40E_AQ_FLAG_RD_SHIFT)  /* 0x400  */
-#define I40E_AQ_FLAG_VFC       BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800  */
 #define I40E_AQ_FLAG_BUF       BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
 #define I40E_AQ_FLAG_SI                BIT(I40E_AQ_FLAG_SI_SHIFT)  /* 0x2000 */
-#define I40E_AQ_FLAG_EI                BIT(I40E_AQ_FLAG_EI_SHIFT)  /* 0x4000 */
-#define I40E_AQ_FLAG_FE                BIT(I40E_AQ_FLAG_FE_SHIFT)  /* 0x8000 */
 
 /* error codes */
 enum i40e_admin_queue_err {
@@ -362,13 +350,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
 /* Request resource ownership (direct 0x0008)
  * Release resource ownership (direct 0x0009)
  */
-#define I40E_AQ_RESOURCE_NVM                   1
-#define I40E_AQ_RESOURCE_SDP                   2
-#define I40E_AQ_RESOURCE_ACCESS_READ           1
-#define I40E_AQ_RESOURCE_ACCESS_WRITE          2
-#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT      3000
-#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT     180000
-
 struct i40e_aqc_request_resource {
        __le16  resource_id;
        __le16  access_type;
@@ -384,7 +365,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource);
  */
 struct i40e_aqc_list_capabilites {
        u8 command_flags;
-#define I40E_AQ_LIST_CAP_PF_INDEX_EN   1
        u8 pf_index;
        u8 reserved[2];
        __le32 count;
@@ -411,8 +391,6 @@ struct i40e_aqc_list_capabilities_element_resp {
 #define I40E_AQ_CAP_ID_NPAR_ACTIVE     0x0003
 #define I40E_AQ_CAP_ID_OS2BMC_CAP      0x0004
 #define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
-#define I40E_AQ_CAP_ID_ALTERNATE_RAM   0x0006
-#define I40E_AQ_CAP_ID_WOL_AND_PROXY   0x0008
 #define I40E_AQ_CAP_ID_SRIOV           0x0012
 #define I40E_AQ_CAP_ID_VF              0x0013
 #define I40E_AQ_CAP_ID_VMDQ            0x0014
@@ -441,11 +419,6 @@ struct i40e_aqc_list_capabilities_element_resp {
 /* Set CPPM Configuration (direct 0x0103) */
 struct i40e_aqc_cppm_configuration {
        __le16  command_flags;
-#define I40E_AQ_CPPM_EN_LTRC   0x0800
-#define I40E_AQ_CPPM_EN_DMCTH  0x1000
-#define I40E_AQ_CPPM_EN_DMCTLX 0x2000
-#define I40E_AQ_CPPM_EN_HPTC   0x4000
-#define I40E_AQ_CPPM_EN_DMARC  0x8000
        __le16  ttlx;
        __le32  dmacr;
        __le16  dmcth;
@@ -459,15 +432,8 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
 /* Set ARP Proxy command / response (indirect 0x0104) */
 struct i40e_aqc_arp_proxy_data {
        __le16  command_flags;
-#define I40E_AQ_ARP_INIT_IPV4  0x0800
-#define I40E_AQ_ARP_UNSUP_CTL  0x1000
-#define I40E_AQ_ARP_ENA                0x2000
-#define I40E_AQ_ARP_ADD_IPV4   0x4000
-#define I40E_AQ_ARP_DEL_IPV4   0x8000
        __le16  table_id;
        __le32  enabled_offloads;
-#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE    0x00000020
-#define I40E_AQ_ARP_OFFLOAD_ENABLE             0x00000800
        __le32  ip_addr;
        u8      mac_addr[6];
        u8      reserved[2];
@@ -482,19 +448,6 @@ struct i40e_aqc_ns_proxy_data {
        __le16  table_idx_ipv6_0;
        __le16  table_idx_ipv6_1;
        __le16  control;
-#define I40E_AQ_NS_PROXY_ADD_0         0x0001
-#define I40E_AQ_NS_PROXY_DEL_0         0x0002
-#define I40E_AQ_NS_PROXY_ADD_1         0x0004
-#define I40E_AQ_NS_PROXY_DEL_1         0x0008
-#define I40E_AQ_NS_PROXY_ADD_IPV6_0    0x0010
-#define I40E_AQ_NS_PROXY_DEL_IPV6_0    0x0020
-#define I40E_AQ_NS_PROXY_ADD_IPV6_1    0x0040
-#define I40E_AQ_NS_PROXY_DEL_IPV6_1    0x0080
-#define I40E_AQ_NS_PROXY_COMMAND_SEQ   0x0100
-#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200
-#define I40E_AQ_NS_PROXY_INIT_MAC_TBL  0x0400
-#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE        0x0800
-#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE       0x1000
        u8      mac_addr_0[6];
        u8      mac_addr_1[6];
        u8      local_mac_addr[6];
@@ -507,7 +460,6 @@ I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data);
 /* Manage LAA Command (0x0106) - obsolete */
 struct i40e_aqc_mng_laa {
        __le16  command_flags;
-#define I40E_AQ_LAA_FLAG_WR    0x8000
        u8      reserved[2];
        __le32  sal;
        __le16  sah;
@@ -520,11 +472,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa);
 struct i40e_aqc_mac_address_read {
        __le16  command_flags;
 #define I40E_AQC_LAN_ADDR_VALID                0x10
-#define I40E_AQC_SAN_ADDR_VALID                0x20
 #define I40E_AQC_PORT_ADDR_VALID       0x40
-#define I40E_AQC_WOL_ADDR_VALID                0x80
-#define I40E_AQC_MC_MAG_EN_VALID       0x100
-#define I40E_AQC_ADDR_VALID_MASK       0x3F0
        u8      reserved[6];
        __le32  addr_high;
        __le32  addr_low;
@@ -548,9 +496,7 @@ struct i40e_aqc_mac_address_write {
 #define I40E_AQC_WOL_PRESERVE_ON_PFR   0x0200
 #define I40E_AQC_WRITE_TYPE_LAA_ONLY   0x0000
 #define I40E_AQC_WRITE_TYPE_LAA_WOL    0x4000
-#define I40E_AQC_WRITE_TYPE_PORT       0x8000
 #define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG      0xC000
-#define I40E_AQC_WRITE_TYPE_MASK       0xC000
 
        __le16  mac_sah;
        __le32  mac_sal;
@@ -573,22 +519,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
 
 struct i40e_aqc_set_wol_filter {
        __le16 filter_index;
-#define I40E_AQC_MAX_NUM_WOL_FILTERS   8
-#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT       15
-#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK        (0x1 << \
-               I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT)
-
-#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT            0
-#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK     (0x7 << \
-               I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT)
+
        __le16 cmd_flags;
-#define I40E_AQC_SET_WOL_FILTER                                0x8000
-#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL             0x4000
-#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR           0
-#define I40E_AQC_SET_WOL_FILTER_ACTION_SET             1
        __le16 valid_flags;
-#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID           0x8000
-#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID    0x4000
        u8 reserved[2];
        __le32  address_high;
        __le32  address_low;
@@ -608,12 +541,6 @@ I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
 struct i40e_aqc_get_wake_reason_completion {
        u8 reserved_1[2];
        __le16 wake_reason;
-#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT     0
-#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \
-               I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT)
-#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT  8
-#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK   (0xFF << \
-               I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT)
        u8 reserved_2[12];
 };
 
@@ -646,25 +573,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp);
 
 struct i40e_aqc_switch_config_element_resp {
        u8      element_type;
-#define I40E_AQ_SW_ELEM_TYPE_MAC       1
-#define I40E_AQ_SW_ELEM_TYPE_PF                2
-#define I40E_AQ_SW_ELEM_TYPE_VF                3
-#define I40E_AQ_SW_ELEM_TYPE_EMP       4
-#define I40E_AQ_SW_ELEM_TYPE_BMC       5
-#define I40E_AQ_SW_ELEM_TYPE_PV                16
-#define I40E_AQ_SW_ELEM_TYPE_VEB       17
-#define I40E_AQ_SW_ELEM_TYPE_PA                18
-#define I40E_AQ_SW_ELEM_TYPE_VSI       19
        u8      revision;
-#define I40E_AQ_SW_ELEM_REV_1          1
        __le16  seid;
        __le16  uplink_seid;
        __le16  downlink_seid;
        u8      reserved[3];
        u8      connection_type;
-#define I40E_AQ_CONN_TYPE_REGULAR      0x1
-#define I40E_AQ_CONN_TYPE_DEFAULT      0x2
-#define I40E_AQ_CONN_TYPE_CASCADED     0x3
        __le16  scheduler_id;
        __le16  element_info;
 };
@@ -697,12 +611,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics);
 /* Set Port Parameters command (direct 0x0203) */
 struct i40e_aqc_set_port_parameters {
        __le16  command_flags;
-#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS  1
-#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
-#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA   4
        __le16  bad_frame_vsi;
-#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0
-#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK  0x3FF
        __le16  default_seid;        /* reserved for command */
        u8      reserved[10];
 };
@@ -722,25 +631,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc);
 /* expect an array of these structs in the response buffer */
 struct i40e_aqc_switch_resource_alloc_element_resp {
        u8      resource_type;
-#define I40E_AQ_RESOURCE_TYPE_VEB              0x0
-#define I40E_AQ_RESOURCE_TYPE_VSI              0x1
-#define I40E_AQ_RESOURCE_TYPE_MACADDR          0x2
-#define I40E_AQ_RESOURCE_TYPE_STAG             0x3
-#define I40E_AQ_RESOURCE_TYPE_ETAG             0x4
-#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH   0x5
-#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH     0x6
-#define I40E_AQ_RESOURCE_TYPE_VLAN             0x7
-#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY   0x8
-#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY  0x9
-#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL   0xA
-#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE      0xB
-#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS       0xC
-#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS     0xD
-#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS        0xF
-#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS       0x10
-#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS      0x11
-#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS         0x12
-#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS     0x13
        u8      reserved1;
        __le16  guaranteed;
        __le16  total;
@@ -756,7 +646,6 @@ struct i40e_aqc_set_switch_config {
        __le16  flags;
 /* flags used for both fields below */
 #define I40E_AQ_SET_SWITCH_CFG_PROMISC         0x0001
-#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER       0x0002
        __le16  valid_flags;
        /* The ethertype in switch_tag is dropped on ingress and used
         * internally by the switch. Set this to zero for the default
@@ -789,17 +678,10 @@ struct i40e_aqc_set_switch_config {
         */
 #define I40E_AQ_SET_SWITCH_BIT7_VALID          0x80
 
-#define I40E_AQ_SET_SWITCH_L4_SRC_PORT         0x40
 
-#define I40E_AQ_SET_SWITCH_L4_TYPE_RSVD                0x00
 #define I40E_AQ_SET_SWITCH_L4_TYPE_TCP         0x10
-#define I40E_AQ_SET_SWITCH_L4_TYPE_UDP         0x20
-#define I40E_AQ_SET_SWITCH_L4_TYPE_BOTH                0x30
 
-#define I40E_AQ_SET_SWITCH_MODE_DEFAULT                0x00
-#define I40E_AQ_SET_SWITCH_MODE_L4_PORT                0x01
 #define I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL     0x02
-#define I40E_AQ_SET_SWITCH_MODE_TUNNEL         0x03
        u8      mode;
        u8      rsvd5[5];
 };
@@ -834,19 +716,13 @@ struct i40e_aqc_add_get_update_vsi {
        __le16  uplink_seid;
        u8      connection_type;
 #define I40E_AQ_VSI_CONN_TYPE_NORMAL   0x1
-#define I40E_AQ_VSI_CONN_TYPE_DEFAULT  0x2
-#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3
        u8      reserved1;
        u8      vf_id;
        u8      reserved2;
        __le16  vsi_flags;
-#define I40E_AQ_VSI_TYPE_SHIFT         0x0
-#define I40E_AQ_VSI_TYPE_MASK          (0x3 << I40E_AQ_VSI_TYPE_SHIFT)
 #define I40E_AQ_VSI_TYPE_VF            0x0
 #define I40E_AQ_VSI_TYPE_VMDQ2         0x1
 #define I40E_AQ_VSI_TYPE_PF            0x2
-#define I40E_AQ_VSI_TYPE_EMP_MNG       0x3
-#define I40E_AQ_VSI_FLAG_CASCADED_PV   0x4
        __le32  addr_high;
        __le32  addr_low;
 };
@@ -870,24 +746,18 @@ struct i40e_aqc_vsi_properties_data {
 #define I40E_AQ_VSI_PROP_SWITCH_VALID          0x0001
 #define I40E_AQ_VSI_PROP_SECURITY_VALID                0x0002
 #define I40E_AQ_VSI_PROP_VLAN_VALID            0x0004
-#define I40E_AQ_VSI_PROP_CAS_PV_VALID          0x0008
-#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID      0x0010
-#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID       0x0020
 #define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID       0x0040
 #define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID       0x0080
-#define I40E_AQ_VSI_PROP_OUTER_UP_VALID                0x0100
 #define I40E_AQ_VSI_PROP_SCHED_VALID           0x0200
        /* switch section */
        __le16  switch_id; /* 12bit id combined with flags below */
 #define I40E_AQ_VSI_SW_ID_SHIFT                0x0000
 #define I40E_AQ_VSI_SW_ID_MASK         (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
-#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG        0x1000
 #define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB        0x2000
 #define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB        0x4000
        u8      sw_reserved[2];
        /* security section */
        u8      sec_flags;
-#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD   0x01
 #define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK   0x02
 #define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK    0x04
        u8      sec_reserved;
@@ -899,78 +769,33 @@ struct i40e_aqc_vsi_properties_data {
 #define I40E_AQ_VSI_PVLAN_MODE_MASK    (0x03 << \
                                         I40E_AQ_VSI_PVLAN_MODE_SHIFT)
 #define I40E_AQ_VSI_PVLAN_MODE_TAGGED  0x01
-#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED        0x02
 #define I40E_AQ_VSI_PVLAN_MODE_ALL     0x03
 #define I40E_AQ_VSI_PVLAN_INSERT_PVID  0x04
 #define I40E_AQ_VSI_PVLAN_EMOD_SHIFT   0x03
 #define I40E_AQ_VSI_PVLAN_EMOD_MASK    (0x3 << \
                                         I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
 #define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH        0x0
-#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP  0x08
 #define I40E_AQ_VSI_PVLAN_EMOD_STR     0x10
 #define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
        u8      pvlan_reserved[3];
        /* ingress egress up sections */
        __le32  ingress_table; /* bitmap, 3 bits per up */
-#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
-#define I40E_AQ_VSI_UP_TABLE_UP0_MASK  (0x7 << \
-                                        I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
-#define I40E_AQ_VSI_UP_TABLE_UP1_MASK  (0x7 << \
-                                        I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
-#define I40E_AQ_VSI_UP_TABLE_UP2_MASK  (0x7 << \
-                                        I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
-#define I40E_AQ_VSI_UP_TABLE_UP3_MASK  (0x7 << \
-                                        I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
-#define I40E_AQ_VSI_UP_TABLE_UP4_MASK  (0x7 << \
-                                        I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
-#define I40E_AQ_VSI_UP_TABLE_UP5_MASK  (0x7 << \
-                                        I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
-#define I40E_AQ_VSI_UP_TABLE_UP6_MASK  (0x7 << \
-                                        I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
-#define I40E_AQ_VSI_UP_TABLE_UP7_MASK  (0x7 << \
-                                        I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
        __le32  egress_table;   /* same defines as for ingress table */
        /* cascaded PV section */
        __le16  cas_pv_tag;
        u8      cas_pv_flags;
-#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT          0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_MASK           (0x03 << \
-                                                I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
-#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE          0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE         0x01
-#define I40E_AQ_VSI_CAS_PV_TAGX_COPY           0x02
-#define I40E_AQ_VSI_CAS_PV_INSERT_TAG          0x10
-#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE          0x20
-#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG     0x40
        u8      cas_pv_reserved;
        /* queue mapping section */
        __le16  mapping_flags;
 #define I40E_AQ_VSI_QUE_MAP_CONTIG     0x0
 #define I40E_AQ_VSI_QUE_MAP_NONCONTIG  0x1
        __le16  queue_mapping[16];
-#define I40E_AQ_VSI_QUEUE_SHIFT                0x0
-#define I40E_AQ_VSI_QUEUE_MASK         (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
        __le16  tc_mapping[8];
 #define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT        0
-#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
-                                        I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
 #define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT        9
-#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
-                                        I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
        /* queueing option section */
        u8      queueing_opt_flags;
-#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA  0x04
-#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA    0x08
 #define I40E_AQ_VSI_QUE_OPT_TCP_ENA    0x10
-#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA   0x20
-#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
 #define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI        0x40
        u8      queueing_opt_reserved[3];
        /* scheduler section */
@@ -995,10 +820,6 @@ I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
  */
 struct i40e_aqc_add_update_pv {
        __le16  command_flags;
-#define I40E_AQC_PV_FLAG_PV_TYPE               0x1
-#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN   0x2
-#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN   0x4
-#define I40E_AQC_PV_FLAG_IS_CTRL_PORT          0x8
        __le16  uplink_seid;
        __le16  connected_seid;
        u8      reserved[10];
@@ -1009,10 +830,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv);
 struct i40e_aqc_add_update_pv_completion {
        /* reserved for update; for add also encodes error if rc == ENOSPC */
        __le16  pv_seid;
-#define I40E_AQC_PV_ERR_FLAG_NO_PV     0x1
-#define I40E_AQC_PV_ERR_FLAG_NO_SCHED  0x2
-#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER        0x4
-#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY  0x8
        u8      reserved[14];
 };
 
@@ -1026,9 +843,6 @@ struct i40e_aqc_get_pv_params_completion {
        __le16  seid;
        __le16  default_stag;
        __le16  pv_flags; /* same flags as add_pv */
-#define I40E_AQC_GET_PV_PV_TYPE                        0x1
-#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG      0x2
-#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG      0x4
        u8      reserved[8];
        __le16  default_port_seid;
 };
@@ -1041,12 +855,8 @@ struct i40e_aqc_add_veb {
        __le16  downlink_seid;
        __le16  veb_flags;
 #define I40E_AQC_ADD_VEB_FLOATING              0x1
-#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT       1
-#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK                (0x3 << \
-                                       I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT)
 #define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT     0x2
 #define I40E_AQC_ADD_VEB_PORT_TYPE_DATA                0x4
-#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER      0x8     /* deprecated */
 #define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS  0x10
        u8      enable_tcs;
        u8      reserved[9];
@@ -1059,10 +869,6 @@ struct i40e_aqc_add_veb_completion {
        __le16  switch_seid;
        /* also encodes error if rc == ENOSPC; codes are the same as add_pv */
        __le16  veb_seid;
-#define I40E_AQC_VEB_ERR_FLAG_NO_VEB           0x1
-#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED         0x2
-#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER       0x4
-#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY         0x8
        __le16  statistic_index;
        __le16  vebs_used;
        __le16  vebs_free;
@@ -1095,9 +901,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
 struct i40e_aqc_macvlan {
        __le16  num_addresses;
        __le16  seid[3];
-#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT    0
-#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK     (0x3FF << \
-                                       I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
 #define I40E_AQC_MACVLAN_CMD_SEID_VALID                0x8000
        __le32  addr_high;
        __le32  addr_low;
@@ -1111,18 +914,11 @@ struct i40e_aqc_add_macvlan_element_data {
        __le16  vlan_tag;
        __le16  flags;
 #define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH     0x0001
-#define I40E_AQC_MACVLAN_ADD_HASH_MATCH                0x0002
 #define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN       0x0004
-#define I40E_AQC_MACVLAN_ADD_TO_QUEUE          0x0008
 #define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC    0x0010
        __le16  queue_number;
-#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT       0
-#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK                (0x7FF << \
-                                       I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT)
        /* response section */
        u8      match_method;
-#define I40E_AQC_MM_PERFECT_MATCH      0x01
-#define I40E_AQC_MM_HASH_MATCH         0x02
 #define I40E_AQC_MM_ERR_NO_RES         0xFF
        u8      reserved1[3];
 };
@@ -1148,14 +944,10 @@ struct i40e_aqc_remove_macvlan_element_data {
        __le16  vlan_tag;
        u8      flags;
 #define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH     0x01
-#define I40E_AQC_MACVLAN_DEL_HASH_MATCH                0x02
 #define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN       0x08
-#define I40E_AQC_MACVLAN_DEL_ALL_VSIS          0x10
        u8      reserved[3];
        /* reply section */
        u8      error_code;
-#define I40E_AQC_REMOVE_MACVLAN_SUCCESS                0x0
-#define I40E_AQC_REMOVE_MACVLAN_FAIL           0xFF
        u8      reply_reserved[3];
 };
 
@@ -1166,30 +958,8 @@ struct i40e_aqc_remove_macvlan_element_data {
 struct i40e_aqc_add_remove_vlan_element_data {
        __le16  vlan_tag;
        u8      vlan_flags;
-/* flags for add VLAN */
-#define I40E_AQC_ADD_VLAN_LOCAL                        0x1
-#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT          1
-#define I40E_AQC_ADD_PVLAN_TYPE_MASK   (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT)
-#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR                0x0
-#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY                0x2
-#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY      0x4
-#define I40E_AQC_VLAN_PTYPE_SHIFT              3
-#define I40E_AQC_VLAN_PTYPE_MASK       (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT)
-#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI                0x0
-#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI                0x8
-#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI      0x10
-#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI       0x18
-/* flags for remove VLAN */
-#define I40E_AQC_REMOVE_VLAN_ALL       0x1
        u8      reserved;
        u8      result;
-/* flags for add VLAN */
-#define I40E_AQC_ADD_VLAN_SUCCESS      0x0
-#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE
-#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE        0xFF
-/* flags for remove VLAN */
-#define I40E_AQC_REMOVE_VLAN_SUCCESS   0x0
-#define I40E_AQC_REMOVE_VLAN_FAIL      0xFF
        u8      reserved1[3];
 };
 
@@ -1213,9 +983,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
 #define I40E_AQC_SET_VSI_PROMISC_VLAN          0x10
 #define I40E_AQC_SET_VSI_PROMISC_TX            0x8000
        __le16  seid;
-#define I40E_AQC_VSI_PROM_CMD_SEID_MASK                0x3FF
        __le16  vlan_tag;
-#define I40E_AQC_SET_VSI_VLAN_MASK             0x0FFF
 #define I40E_AQC_SET_VSI_VLAN_VALID            0x8000
        u8      reserved[8];
 };
@@ -1227,11 +995,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
  */
 struct i40e_aqc_add_tag {
        __le16  flags;
-#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE         0x0001
        __le16  seid;
-#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT    0
-#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK     (0x3FF << \
-                                       I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT)
        __le16  tag;
        __le16  queue_number;
        u8      reserved[8];
@@ -1252,9 +1016,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion);
  */
 struct i40e_aqc_remove_tag {
        __le16  seid;
-#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK  (0x3FF << \
-                                       I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT)
        __le16  tag;
        u8      reserved[12];
 };
@@ -1290,9 +1051,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion);
 /* Update S/E-Tag (direct 0x0259) */
 struct i40e_aqc_update_tag {
        __le16  seid;
-#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK  (0x3FF << \
-                                       I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT)
        __le16  old_tag;
        __le16  new_tag;
        u8      reserved[10];
@@ -1319,13 +1077,8 @@ struct i40e_aqc_add_remove_control_packet_filter {
        __le16  flags;
 #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC   0x0001
 #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP         0x0002
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE     0x0004
 #define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX           0x0008
-#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX           0x0000
        __le16  seid;
-#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0
-#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK  (0x3FF << \
-                               I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT)
        __le16  queue;
        u8      reserved[2];
 };
@@ -1351,9 +1104,6 @@ struct i40e_aqc_add_remove_cloud_filters {
        u8      num_filters;
        u8      reserved;
        __le16  seid;
-#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT  0
-#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK   (0x3FF << \
-                                       I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
        u8      big_buffer_flag;
 #define I40E_AQC_ADD_CLOUD_CMD_BB      1
        u8      reserved2[3];
@@ -1380,9 +1130,6 @@ struct i40e_aqc_cloud_filters_element_data {
                } raw_v6;
        } ipaddr;
        __le16  flags;
-#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT                        0
-#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
-                                       I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
 /* 0x0000 reserved */
 /* 0x0001 reserved */
 /* 0x0002 reserved */
@@ -1404,36 +1151,20 @@ struct i40e_aqc_cloud_filters_element_data {
 #define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT             0x0011 /* Dest MAC + L4 Port */
 #define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT                0x0012 /* Dest MAC + VLAN + L4 Port */
 
-#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE              0x0080
-#define I40E_AQC_ADD_CLOUD_VNK_SHIFT                   6
-#define I40E_AQC_ADD_CLOUD_VNK_MASK                    0x00C0
 #define I40E_AQC_ADD_CLOUD_FLAGS_IPV4                  0
 #define I40E_AQC_ADD_CLOUD_FLAGS_IPV6                  0x0100
 
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT              9
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK               0x1E00
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN              0
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC         1
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE             2
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP                 3
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED           4
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE          5
 
-#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC      0x2000
-#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC      0x4000
-#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP       0x8000
 
        __le32  tenant_id;
        u8      reserved[4];
        __le16  queue_number;
-#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT         0
-#define I40E_AQC_ADD_CLOUD_QUEUE_MASK          (0x7FF << \
-                                                I40E_AQC_ADD_CLOUD_QUEUE_SHIFT)
        u8      reserved2[14];
        /* response section */
        u8      allocation_result;
-#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS      0x0
-#define I40E_AQC_ADD_CLOUD_FILTER_FAIL         0xFF
        u8      response_reserved[7];
 };
 
@@ -1445,37 +1176,7 @@ I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data);
 struct i40e_aqc_cloud_filters_element_bb {
        struct i40e_aqc_cloud_filters_element_data element;
        u16     general_fields[32];
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0   0
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1   1
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2   2
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0   3
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1   4
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2   5
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0   6
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1   7
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2   8
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0   9
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1   10
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2   11
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0   12
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1   13
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2   14
 #define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0   15
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1   16
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2   17
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3   18
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4   19
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5   20
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6   21
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7   22
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0   23
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1   24
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2   25
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3   26
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4   27
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5   28
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6   29
-#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7   30
 };
 
 I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb);
@@ -1504,11 +1205,6 @@ I40E_CHECK_STRUCT_LEN(4, i40e_filter_data);
 
 struct i40e_aqc_replace_cloud_filters_cmd {
        u8      valid_flags;
-#define I40E_AQC_REPLACE_L1_FILTER             0x0
-#define I40E_AQC_REPLACE_CLOUD_FILTER          0x1
-#define I40E_AQC_GET_CLOUD_FILTERS             0x2
-#define I40E_AQC_MIRROR_CLOUD_FILTER           0x4
-#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER    0x8
        u8      old_filter_type;
        u8      new_filter_type;
        u8      tr_bit;
@@ -1521,25 +1217,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd);
 
 struct i40e_aqc_replace_cloud_filters_cmd_buf {
        u8      data[32];
-/* Filter type INPUT codes*/
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX   3
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED     BIT(7)
-
-/* Field Vector offsets */
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA     0
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH   6
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG       7
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN       8
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC       12
-/* big FLU */
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA      14
-/* big FLU */
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA     15
-
-#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37
        struct i40e_filter_data filters[8];
 };
 
@@ -1556,8 +1233,6 @@ struct i40e_aqc_add_delete_mirror_rule {
 #define I40E_AQC_MIRROR_RULE_TYPE_SHIFT                0
 #define I40E_AQC_MIRROR_RULE_TYPE_MASK         (0x7 << \
                                                I40E_AQC_MIRROR_RULE_TYPE_SHIFT)
-#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS        1
-#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2
 #define I40E_AQC_MIRROR_RULE_TYPE_VLAN         3
 #define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS  4
 #define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS   5
@@ -1600,8 +1275,6 @@ struct i40e_aqc_write_ddp_resp {
 
 struct i40e_aqc_get_applied_profiles {
        u8      flags;
-#define I40E_AQC_GET_DDP_GET_CONF      0x1
-#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2
        u8      rsv[3];
        __le32  reserved;
        __le32  addr_high;
@@ -1618,8 +1291,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles);
 struct i40e_aqc_pfc_ignore {
        u8      tc_bitmap;
        u8      command_flags; /* unused on response */
-#define I40E_AQC_PFC_IGNORE_SET                0x80
-#define I40E_AQC_PFC_IGNORE_CLEAR      0x0
        u8      reserved[14];
 };
 
@@ -1736,7 +1407,6 @@ struct i40e_aqc_configure_switching_comp_ets_data {
        u8      reserved[4];
        u8      tc_valid_bits;
        u8      seepage;
-#define I40E_AQ_ETS_SEEPAGE_EN_MASK    0x1
        u8      tc_strict_priority_flags;
        u8      reserved1[17];
        u8      tc_bw_share_credits[8];
@@ -1977,40 +1647,18 @@ struct i40e_aq_get_phy_abilities_resp {
        u8      abilities;
 #define I40E_AQ_PHY_FLAG_PAUSE_TX      0x01
 #define I40E_AQ_PHY_FLAG_PAUSE_RX      0x02
-#define I40E_AQ_PHY_FLAG_LOW_POWER     0x04
-#define I40E_AQ_PHY_LINK_ENABLED       0x08
-#define I40E_AQ_PHY_AN_ENABLED         0x10
-#define I40E_AQ_PHY_FLAG_MODULE_QUAL   0x20
-#define I40E_AQ_PHY_FEC_ABILITY_KR     0x40
-#define I40E_AQ_PHY_FEC_ABILITY_RS     0x80
        __le16  eee_capability;
-#define I40E_AQ_EEE_100BASE_TX         0x0002
-#define I40E_AQ_EEE_1000BASE_T         0x0004
-#define I40E_AQ_EEE_10GBASE_T          0x0008
-#define I40E_AQ_EEE_1000BASE_KX                0x0010
-#define I40E_AQ_EEE_10GBASE_KX4                0x0020
-#define I40E_AQ_EEE_10GBASE_KR         0x0040
        __le32  eeer_val;
        u8      d3_lpan;
-#define I40E_AQ_SET_PHY_D3_LPAN_ENA    0x01
        u8      phy_type_ext;
 #define I40E_AQ_PHY_TYPE_EXT_25G_KR    0X01
 #define I40E_AQ_PHY_TYPE_EXT_25G_CR    0X02
 #define I40E_AQ_PHY_TYPE_EXT_25G_SR    0x04
 #define I40E_AQ_PHY_TYPE_EXT_25G_LR    0x08
-#define I40E_AQ_PHY_TYPE_EXT_25G_AOC   0x10
-#define I40E_AQ_PHY_TYPE_EXT_25G_ACC   0x20
-#define I40E_AQ_PHY_TYPE_EXT_2_5GBASE_T        0x40
-#define I40E_AQ_PHY_TYPE_EXT_5GBASE_T  0x80
        u8      fec_cfg_curr_mod_ext_info;
-#define I40E_AQ_ENABLE_FEC_KR          0x01
-#define I40E_AQ_ENABLE_FEC_RS          0x02
 #define I40E_AQ_REQUEST_FEC_KR         0x04
 #define I40E_AQ_REQUEST_FEC_RS         0x08
 #define I40E_AQ_ENABLE_FEC_AUTO                0x10
-#define I40E_AQ_FEC
-#define I40E_AQ_MODULE_TYPE_EXT_MASK   0xE0
-#define I40E_AQ_MODULE_TYPE_EXT_SHIFT  5
 
        u8      ext_comp_code;
        u8      phy_id[4];
@@ -2056,21 +1704,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
 struct i40e_aq_set_mac_config {
        __le16  max_frame_size;
        u8      params;
-#define I40E_AQ_SET_MAC_CONFIG_CRC_EN                  0x04
-#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK             0x78
-#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT            3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE             0x0
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX          0xF
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX          0x9
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX          0x8
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX          0x7
-#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX          0x6
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX          0x5
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX          0x4
-#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX          0x3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX          0x2
-#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX          0x1
-#define I40E_AQ_SET_MAC_CONFIG_DROP_BLOCKING_PACKET_EN 0x80
        u8      tx_timer_priority; /* bitmap */
        __le16  tx_timer_value;
        __le16  fc_refresh_threshold;
@@ -2092,8 +1725,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an);
 /* Get Link Status cmd & response data structure (direct 0x0607) */
 struct i40e_aqc_get_link_status {
        __le16  command_flags; /* only field set on command */
-#define I40E_AQ_LSE_MASK               0x3
-#define I40E_AQ_LSE_NOP                        0x0
 #define I40E_AQ_LSE_DISABLE            0x2
 #define I40E_AQ_LSE_ENABLE             0x3
 /* only response uses this flag */
@@ -2102,44 +1733,16 @@ struct i40e_aqc_get_link_status {
        u8      link_speed;  /* i40e_aq_link_speed */
        u8      link_info;
 #define I40E_AQ_LINK_UP                        0x01    /* obsolete */
-#define I40E_AQ_LINK_UP_FUNCTION       0x01
-#define I40E_AQ_LINK_FAULT             0x02
-#define I40E_AQ_LINK_FAULT_TX          0x04
-#define I40E_AQ_LINK_FAULT_RX          0x08
-#define I40E_AQ_LINK_FAULT_REMOTE      0x10
-#define I40E_AQ_LINK_UP_PORT           0x20
 #define I40E_AQ_MEDIA_AVAILABLE                0x40
-#define I40E_AQ_SIGNAL_DETECT          0x80
        u8      an_info;
 #define I40E_AQ_AN_COMPLETED           0x01
-#define I40E_AQ_LP_AN_ABILITY          0x02
-#define I40E_AQ_PD_FAULT               0x04
-#define I40E_AQ_FEC_EN                 0x08
-#define I40E_AQ_PHY_LOW_POWER          0x10
 #define I40E_AQ_LINK_PAUSE_TX          0x20
 #define I40E_AQ_LINK_PAUSE_RX          0x40
 #define I40E_AQ_QUALIFIED_MODULE       0x80
        u8      ext_info;
-#define I40E_AQ_LINK_PHY_TEMP_ALARM    0x01
-#define I40E_AQ_LINK_XCESSIVE_ERRORS   0x02
-#define I40E_AQ_LINK_TX_SHIFT          0x02
-#define I40E_AQ_LINK_TX_MASK           (0x03 << I40E_AQ_LINK_TX_SHIFT)
-#define I40E_AQ_LINK_TX_ACTIVE         0x00
-#define I40E_AQ_LINK_TX_DRAINED                0x01
-#define I40E_AQ_LINK_TX_FLUSHED                0x03
-#define I40E_AQ_LINK_FORCED_40G                0x10
-/* 25G Error Codes */
-#define I40E_AQ_25G_NO_ERR             0X00
-#define I40E_AQ_25G_NOT_PRESENT                0X01
-#define I40E_AQ_25G_NVM_CRC_ERR                0X02
-#define I40E_AQ_25G_SBUS_UCODE_ERR     0X03
-#define I40E_AQ_25G_SERDES_UCODE_ERR   0X04
-#define I40E_AQ_25G_NIMB_UCODE_ERR     0X05
        u8      loopback; /* use defines from i40e_aqc_set_lb_mode */
 /* Since firmware API 1.7 loopback field keeps power class info as well */
 #define I40E_AQ_LOOPBACK_MASK          0x07
-#define I40E_AQ_PWR_CLASS_SHIFT_LB     6
-#define I40E_AQ_PWR_CLASS_MASK_LB      (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB)
        __le16  max_frame_size;
        u8      config;
 #define I40E_AQ_CONFIG_FEC_KR_ENA      0x01
@@ -2149,11 +1752,6 @@ struct i40e_aqc_get_link_status {
        union {
                struct {
                        u8      power_desc;
-#define I40E_AQ_LINK_POWER_CLASS_1     0x00
-#define I40E_AQ_LINK_POWER_CLASS_2     0x01
-#define I40E_AQ_LINK_POWER_CLASS_3     0x02
-#define I40E_AQ_LINK_POWER_CLASS_4     0x03
-#define I40E_AQ_PWR_CLASS_MASK         0x03
                        u8      reserved[4];
                };
                struct {
@@ -2171,13 +1769,7 @@ struct i40e_aqc_set_phy_int_mask {
        __le16  event_mask;
 #define I40E_AQ_EVENT_LINK_UPDOWN      0x0002
 #define I40E_AQ_EVENT_MEDIA_NA         0x0004
-#define I40E_AQ_EVENT_LINK_FAULT       0x0008
-#define I40E_AQ_EVENT_PHY_TEMP_ALARM   0x0010
-#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020
-#define I40E_AQ_EVENT_SIGNAL_DETECT    0x0040
-#define I40E_AQ_EVENT_AN_COMPLETED     0x0080
 #define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100
-#define I40E_AQ_EVENT_PORT_TX_SUSPENDED        0x0200
        u8      reserved1[6];
 };
 
@@ -2209,13 +1801,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
 /* Set PHY Debug command (0x0622) */
 struct i40e_aqc_set_phy_debug {
        u8      command_flags;
-#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL       0x02
-#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
-#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK  (0x03 << \
-                                       I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
-#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE  0x00
-#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD  0x01
-#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT  0x02
 /* Disable link manageability on a single port */
 #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW      0x10
 /* Disable link manageability on all ports */
@@ -2247,7 +1832,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity);
 /* Get PHY Register command (0x0629) */
 struct i40e_aqc_phy_register_access {
        u8      phy_interface;
-#define I40E_AQ_PHY_REG_ACCESS_INTERNAL        0
 #define I40E_AQ_PHY_REG_ACCESS_EXTERNAL        1
 #define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2
        u8      dev_address;
@@ -2274,9 +1858,7 @@ struct i40e_aqc_nvm_update {
 #define I40E_AQ_NVM_LAST_CMD                   0x01
 #define I40E_AQ_NVM_REARRANGE_TO_FLAT          0x20
 #define I40E_AQ_NVM_REARRANGE_TO_STRUCT                0x40
-#define I40E_AQ_NVM_FLASH_ONLY                 0x80
 #define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT   1
-#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK    0x03
 #define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED        0x03
 #define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL     0x01
        u8      module_pointer;
@@ -2291,9 +1873,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
 /* NVM Config Read (indirect 0x0704) */
 struct i40e_aqc_nvm_config_read {
        __le16  cmd_flags;
-#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK  1 
-#define I40E_AQ_ANVM_READ_SINGLE_FEATURE               0 
-#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES            1
        __le16  element_count;
        __le16  element_id;     /* Feature/field ID */
        __le16  element_id_msw; /* MSWord of field ID */
@@ -2315,16 +1894,8 @@ struct i40e_aqc_nvm_config_write {
 I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
 
 /* Used for 0x0704 as well as for 0x0705 commands */
-#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT                1
-#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
-                               BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
-#define I40E_AQ_ANVM_FEATURE           0
-#define I40E_AQ_ANVM_IMMEDIATE_FIELD   BIT(FEATURE_OR_IMMEDIATE_SHIFT)
 struct i40e_aqc_nvm_config_data_feature {
        __le16 feature_id;
-#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY           0x01
-#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP          0x08
-#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR            0x10
        __le16 feature_options;
        __le16 feature_selection;
 };
@@ -2344,7 +1915,6 @@ I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field);
  * no command data struct used
  */
 struct i40e_aqc_nvm_oem_post_update {
-#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA      0x01
        u8 sel_data;
        u8 reserved[7];
 };
@@ -2366,9 +1936,6 @@ I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer);
  */
 struct i40e_aqc_thermal_sensor {
        u8 sensor_action;
-#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG     0
-#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG      1
-#define I40E_AQ_THERMAL_SENSOR_READ_TEMP       2
        u8 reserved[7];
        __le32  addr_high;
        __le32  addr_low;
@@ -2421,10 +1988,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write);
  */
 struct i40e_aqc_alternate_write_done {
        __le16  cmd_flags;
-#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK       1
-#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY     0
-#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI       1
-#define I40E_AQ_ALTERNATE_RESET_NEEDED         2
        u8      reserved[14];
 };
 
@@ -2433,8 +1996,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done);
 /* Set OEM mode (direct 0x0905) */
 struct i40e_aqc_alternate_set_mode {
        __le32  mode;
-#define I40E_AQ_ALTERNATE_MODE_NONE    0
-#define I40E_AQ_ALTERNATE_MODE_OEM     1
        u8      reserved[12];
 };
 
@@ -2460,13 +2021,9 @@ struct i40e_aqc_lldp_get_mib {
 #define I40E_AQ_LLDP_MIB_TYPE_MASK             0x3
 #define I40E_AQ_LLDP_MIB_LOCAL                 0x0
 #define I40E_AQ_LLDP_MIB_REMOTE                        0x1
-#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE      0x2
 #define I40E_AQ_LLDP_BRIDGE_TYPE_MASK          0xC
 #define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT         0x2
 #define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE        0x0
-#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR      0x1
-#define I40E_AQ_LLDP_TX_SHIFT                  0x4
-#define I40E_AQ_LLDP_TX_MASK                   (0x03 << I40E_AQ_LLDP_TX_SHIFT)
 /* TX pause flags use I40E_AQ_LINK_TX_* above */
        __le16  local_len;
        __le16  remote_len;
@@ -2482,7 +2039,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib);
  */
 struct i40e_aqc_lldp_update_mib {
        u8      command;
-#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
 #define I40E_AQ_LLDP_MIB_UPDATE_DISABLE        0x1
        u8      reserved[7];
        __le32  addr_high;
@@ -2521,7 +2077,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
 /* Stop LLDP (direct 0x0A05) */
 struct i40e_aqc_lldp_stop {
        u8      command;
-#define I40E_AQ_LLDP_AGENT_STOP                        0x0
 #define I40E_AQ_LLDP_AGENT_SHUTDOWN            0x1
 #define I40E_AQ_LLDP_AGENT_STOP_PERSIST                0x2
        u8      reserved[15];
@@ -2627,13 +2182,6 @@ I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
  *     Used to replace the local MIB of a given LLDP agent. e.g. DCBx
  */
 struct i40e_aqc_lldp_set_local_mib {
-#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT       0
-#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK        BIT(SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
-#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB        0x0
-#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT   (1)
-#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK \
-                       BIT(SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
-#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS         0x1
        u8      type;
        u8      reserved0;
        __le16  length;
@@ -2648,9 +2196,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib);
  *     Used for stopping/starting specific LLDP agent. e.g. DCBx
  */
 struct i40e_aqc_lldp_stop_start_specific_agent {
-#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT    0
-#define I40E_AQC_START_SPECIFIC_AGENT_MASK \
-                               BIT(I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
        u8      command;
        u8      reserved[15];
 };
@@ -2660,7 +2205,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent);
 /* Restore LLDP Agent factory settings (direct 0x0A0A) */
 struct i40e_aqc_lldp_restore {
        u8      command;
-#define I40E_AQ_LLDP_AGENT_RESTORE_NOT         0x0
 #define I40E_AQ_LLDP_AGENT_RESTORE             0x1
        u8      reserved[15];
 };
@@ -2674,8 +2218,6 @@ struct i40e_aqc_add_udp_tunnel {
        u8      protocol_type;
 #define I40E_AQC_TUNNEL_TYPE_VXLAN     0x00
 #define I40E_AQC_TUNNEL_TYPE_NGE       0x01
-#define I40E_AQC_TUNNEL_TYPE_TEREDO    0x10
-#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11
        u8      reserved1[10];
 };
 
@@ -2685,8 +2227,6 @@ struct i40e_aqc_add_udp_tunnel_completion {
        __le16  udp_port;
        u8      filter_entry_index;
        u8      multiple_pfs;
-#define I40E_AQC_SINGLE_PF             0x0
-#define I40E_AQC_MULTIPLE_PFS          0x1
        u8      total_filters;
        u8      reserved[11];
 };
@@ -2759,16 +2299,7 @@ struct i40e_aqc_tunnel_key_structure {
        u8      key1_len;  /* 0 to 15 */
        u8      key2_len;  /* 0 to 15 */
        u8      flags;
-#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE    0x01
-/* response flags */
-#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS     0x01
-#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED    0x02
-#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN  0x03
        u8      network_key_index;
-#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN               0x0
-#define I40E_AQC_NETWORK_KEY_INDEX_NGE                 0x1
-#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP     0x2
-#define I40E_AQC_NETWORK_KEY_INDEX_GRE                 0x3
        u8      reserved[10];
 };
 
@@ -2777,9 +2308,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure);
 /* OEM mode commands (direct 0xFE0x) */
 struct i40e_aqc_oem_param_change {
        __le32  param_type;
-#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL  0
-#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL  1
-#define I40E_AQ_OEM_PARAM_MAC          2
        __le32  param_value1;
        __le16  param_value2;
        u8      reserved[6];
@@ -2789,8 +2317,6 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change);
 
 struct i40e_aqc_oem_state_change {
        __le32  state;
-#define I40E_AQ_OEM_STATE_LINK_DOWN    0x0
-#define I40E_AQ_OEM_STATE_LINK_UP      0x1
        u8      reserved[12];
 };
 
@@ -2826,14 +2352,8 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize);
 
 struct i40e_acq_set_test_mode {
        u8      mode;
-#define I40E_AQ_TEST_PARTIAL   0
-#define I40E_AQ_TEST_FULL      1
-#define I40E_AQ_TEST_NVM       2
        u8      reserved[3];
        u8      command;
-#define I40E_AQ_TEST_OPEN      0
-#define I40E_AQ_TEST_CLOSE     1
-#define I40E_AQ_TEST_INC       2
        u8      reserved2[3];
        __le32  address_high;
        __le32  address_low;
@@ -2874,20 +2394,6 @@ struct i40e_aqc_debug_modify_reg {
 I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg);
 
 /* dump internal data (0xFF08, indirect) */
-
-#define I40E_AQ_CLUSTER_ID_AUX         0
-#define I40E_AQ_CLUSTER_ID_SWITCH_FLU  1
-#define I40E_AQ_CLUSTER_ID_TXSCHED     2
-#define I40E_AQ_CLUSTER_ID_HMC         3
-#define I40E_AQ_CLUSTER_ID_MAC0                4
-#define I40E_AQ_CLUSTER_ID_MAC1                5
-#define I40E_AQ_CLUSTER_ID_MAC2                6
-#define I40E_AQ_CLUSTER_ID_MAC3                7
-#define I40E_AQ_CLUSTER_ID_DCB         8
-#define I40E_AQ_CLUSTER_ID_EMP_MEM     9
-#define I40E_AQ_CLUSTER_ID_PKT_BUF     10
-#define I40E_AQ_CLUSTER_ID_ALTRAM      11
-
 struct i40e_aqc_debug_dump_internals {
        u8      cluster_id;
        u8      table_id;
index e81530c..befd301 100644 (file)
@@ -3,10 +3,10 @@
 
 #include <linux/list.h>
 #include <linux/errno.h>
+#include <linux/net/intel/i40e_client.h>
 
 #include "i40e.h"
 #include "i40e_prototype.h"
-#include "i40e_client.h"
 
 static const char i40e_client_interface_version_str[] = I40E_CLIENT_VERSION_STR;
 static struct i40e_client *registered_client;
index 45b90eb..afad5e9 100644 (file)
@@ -27,6 +27,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
                case I40E_DEV_ID_QSFP_A:
                case I40E_DEV_ID_QSFP_B:
                case I40E_DEV_ID_QSFP_C:
+               case I40E_DEV_ID_5G_BASE_T_BC:
                case I40E_DEV_ID_10G_BASE_T:
                case I40E_DEV_ID_10G_BASE_T4:
                case I40E_DEV_ID_10G_BASE_T_BC:
@@ -1455,10 +1456,6 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
        return gpio_val;
 }
 
-#define I40E_COMBINED_ACTIVITY 0xA
-#define I40E_FILTER_ACTIVITY 0xE
-#define I40E_LINK_ACTIVITY 0xC
-#define I40E_MAC_ACTIVITY 0xD
 #define I40E_FW_LED BIT(4)
 #define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \
                             I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
@@ -4910,6 +4907,7 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw,
                status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
                                                          value);
                break;
+       case I40E_DEV_ID_5G_BASE_T_BC:
        case I40E_DEV_ID_10G_BASE_T:
        case I40E_DEV_ID_10G_BASE_T4:
        case I40E_DEV_ID_10G_BASE_T_BC:
@@ -4947,6 +4945,7 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw,
                status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
                                                         value);
                break;
+       case I40E_DEV_ID_5G_BASE_T_BC:
        case I40E_DEV_ID_10G_BASE_T:
        case I40E_DEV_ID_10G_BASE_T4:
        case I40E_DEV_ID_10G_BASE_T_BC:
index ba86ad8..2b1a2e8 100644 (file)
@@ -6,10 +6,8 @@
 
 #include "i40e_type.h"
 
-#define I40E_DCBX_STATUS_NOT_STARTED   0
 #define I40E_DCBX_STATUS_IN_PROGRESS   1
 #define I40E_DCBX_STATUS_DONE          2
-#define I40E_DCBX_STATUS_MULTIPLE_PEERS        3
 #define I40E_DCBX_STATUS_DISABLED      7
 
 #define I40E_TLV_TYPE_END              0
@@ -24,7 +22,6 @@
 #define I40E_CEE_DCBX_OUI              0x001b21
 #define I40E_CEE_DCBX_TYPE             2
 
-#define I40E_CEE_SUBTYPE_CTRL          1
 #define I40E_CEE_SUBTYPE_PG_CFG                2
 #define I40E_CEE_SUBTYPE_PFC_CFG       3
 #define I40E_CEE_SUBTYPE_APP_PRI       4
@@ -105,9 +102,7 @@ struct i40e_cee_ctrl_tlv {
 struct i40e_cee_feat_tlv {
        struct i40e_cee_tlv_hdr hdr;
        u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
-#define I40E_CEE_FEAT_TLV_ENABLE_MASK  0x80
 #define I40E_CEE_FEAT_TLV_WILLING_MASK 0x40
-#define I40E_CEE_FEAT_TLV_ERR_MASK     0x20
        u8 subtype;
        u8 tlvinfo[1];
 };
index 99ea543..d3ad2e3 100644 (file)
 
 static struct dentry *i40e_dbg_root;
 
+enum ring_type {
+       RING_TYPE_RX,
+       RING_TYPE_TX,
+       RING_TYPE_XDP
+};
+
 /**
  * i40e_dbg_find_vsi - searches for the vsi with the given seid
  * @pf: the PF structure to search for the vsi
@@ -319,6 +325,47 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
                         i, tx_ring->itr_setting,
                         ITR_IS_DYNAMIC(tx_ring->itr_setting) ? "dynamic" : "fixed");
        }
+       if (i40e_enabled_xdp_vsi(vsi)) {
+               for (i = 0; i < vsi->num_queue_pairs; i++) {
+                       struct i40e_ring *xdp_ring = READ_ONCE(vsi->xdp_rings[i]);
+
+                       if (!xdp_ring)
+                               continue;
+
+                       dev_info(&pf->pdev->dev,
+                                "    xdp_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
+                                i, *xdp_ring->state,
+                                xdp_ring->queue_index,
+                                xdp_ring->reg_idx);
+                       dev_info(&pf->pdev->dev,
+                                "    xdp_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+                                i,
+                                xdp_ring->next_to_use,
+                                xdp_ring->next_to_clean,
+                                xdp_ring->ring_active);
+                       dev_info(&pf->pdev->dev,
+                                "    xdp_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
+                                i, xdp_ring->stats.packets,
+                                xdp_ring->stats.bytes,
+                                xdp_ring->tx_stats.restart_queue);
+                       dev_info(&pf->pdev->dev,
+                                "    xdp_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
+                                i,
+                                xdp_ring->tx_stats.tx_busy,
+                                xdp_ring->tx_stats.tx_done_old);
+                       dev_info(&pf->pdev->dev,
+                                "    xdp_rings[%i]: size = %i\n",
+                                i, xdp_ring->size);
+                       dev_info(&pf->pdev->dev,
+                                "    xdp_rings[%i]: DCB tc = %d\n",
+                                i, xdp_ring->dcb_tc);
+                       dev_info(&pf->pdev->dev,
+                                "    xdp_rings[%i]: itr_setting = %d (%s)\n",
+                                i, xdp_ring->itr_setting,
+                                ITR_IS_DYNAMIC(xdp_ring->itr_setting) ?
+                                "dynamic" : "fixed");
+               }
+       }
        rcu_read_unlock();
        dev_info(&pf->pdev->dev,
                 "    work_limit = %d\n",
@@ -489,11 +536,12 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
  * @ring_id: ring id entered by user
  * @desc_n: descriptor number entered by user
  * @pf: the i40e_pf created in command write
- * @is_rx_ring: true if rx, false if tx
+ * @type: enum describing whether ring is RX, TX or XDP
  **/
 static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
-                              struct i40e_pf *pf, bool is_rx_ring)
+                              struct i40e_pf *pf, enum ring_type type)
 {
+       bool is_rx_ring = type == RING_TYPE_RX;
        struct i40e_tx_desc *txd;
        union i40e_rx_desc *rxd;
        struct i40e_ring *ring;
@@ -505,6 +553,10 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
                dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid);
                return;
        }
+       if (type == RING_TYPE_XDP && !i40e_enabled_xdp_vsi(vsi)) {
+               dev_info(&pf->pdev->dev, "XDP not enabled on VSI %d\n", vsi_seid);
+               return;
+       }
        if (ring_id >= vsi->num_queue_pairs || ring_id < 0) {
                dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id);
                return;
@@ -516,15 +568,32 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
                return;
        }
 
-       ring = kmemdup(is_rx_ring
-                      ? vsi->rx_rings[ring_id] : vsi->tx_rings[ring_id],
-                      sizeof(*ring), GFP_KERNEL);
+       switch (type) {
+       case RING_TYPE_RX:
+               ring = kmemdup(vsi->rx_rings[ring_id], sizeof(*ring), GFP_KERNEL);
+               break;
+       case RING_TYPE_TX:
+               ring = kmemdup(vsi->tx_rings[ring_id], sizeof(*ring), GFP_KERNEL);
+               break;
+       case RING_TYPE_XDP:
+               ring = kmemdup(vsi->xdp_rings[ring_id], sizeof(*ring), GFP_KERNEL);
+               break;
+       }
        if (!ring)
                return;
 
        if (cnt == 2) {
-               dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
-                        vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
+               switch (type) {
+               case RING_TYPE_RX:
+                       dev_info(&pf->pdev->dev, "VSI = %02i Rx ring = %02i\n", vsi_seid, ring_id);
+                       break;
+               case RING_TYPE_TX:
+                       dev_info(&pf->pdev->dev, "VSI = %02i Tx ring = %02i\n", vsi_seid, ring_id);
+                       break;
+               case RING_TYPE_XDP:
+                       dev_info(&pf->pdev->dev, "VSI = %02i XDP ring = %02i\n", vsi_seid, ring_id);
+                       break;
+               }
                for (i = 0; i < ring->count; i++) {
                        if (!is_rx_ring) {
                                txd = I40E_TX_DESC(ring, i);
@@ -562,7 +631,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
                                 rxd->read.rsvd1, rxd->read.rsvd2);
                }
        } else {
-               dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n");
+               dev_info(&pf->pdev->dev, "dump desc rx/tx/xdp <vsi_seid> <ring_id> [<desc_n>]\n");
        }
 
 out:
@@ -688,7 +757,6 @@ static void i40e_dbg_dump_vf_all(struct i40e_pf *pf)
                        i40e_dbg_dump_vf(pf, i);
 }
 
-#define I40E_MAX_DEBUG_OUT_BUFFER (4096*4)
 /**
  * i40e_dbg_command_write - write into command datum
  * @filp: the opened file
@@ -920,13 +988,19 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                                cnt = sscanf(&cmd_buf[12], "%i %i %i",
                                             &vsi_seid, &ring_id, &desc_n);
                                i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
-                                                  desc_n, pf, true);
+                                                  desc_n, pf, RING_TYPE_RX);
                        } else if (strncmp(&cmd_buf[10], "tx", 2)
                                        == 0) {
                                cnt = sscanf(&cmd_buf[12], "%i %i %i",
                                             &vsi_seid, &ring_id, &desc_n);
                                i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
-                                                  desc_n, pf, false);
+                                                  desc_n, pf, RING_TYPE_TX);
+                       } else if (strncmp(&cmd_buf[10], "xdp", 3)
+                                       == 0) {
+                               cnt = sscanf(&cmd_buf[13], "%i %i %i",
+                                            &vsi_seid, &ring_id, &desc_n);
+                               i40e_dbg_dump_desc(cnt, vsi_seid, ring_id,
+                                                  desc_n, pf, RING_TYPE_XDP);
                        } else if (strncmp(&cmd_buf[10], "aq", 2) == 0) {
                                i40e_dbg_dump_aq_desc(pf);
                        } else {
@@ -934,6 +1008,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                                         "dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
                                dev_info(&pf->pdev->dev,
                                         "dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+                               dev_info(&pf->pdev->dev,
+                                        "dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n");
                                dev_info(&pf->pdev->dev, "dump desc aq\n");
                        }
                } else if (strncmp(&cmd_buf[5], "reset stats", 11) == 0) {
@@ -1104,7 +1180,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                        buff = NULL;
                } else {
                        dev_info(&pf->pdev->dev,
-                                "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>],\n");
+                                "dump desc tx <vsi_seid> <ring_id> [<desc_n>], dump desc rx <vsi_seid> <ring_id> [<desc_n>], dump desc xdp <vsi_seid> <ring_id> [<desc_n>],\n");
                        dev_info(&pf->pdev->dev, "dump switch\n");
                        dev_info(&pf->pdev->dev, "dump vsi [seid]\n");
                        dev_info(&pf->pdev->dev, "dump reset stats\n");
@@ -1520,6 +1596,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                dev_info(&pf->pdev->dev, "  dump vsi [seid]\n");
                dev_info(&pf->pdev->dev, "  dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n");
                dev_info(&pf->pdev->dev, "  dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n");
+               dev_info(&pf->pdev->dev, "  dump desc xdp <vsi_seid> <ring_id> [<desc_n>]\n");
                dev_info(&pf->pdev->dev, "  dump desc aq\n");
                dev_info(&pf->pdev->dev, "  dump reset stats\n");
                dev_info(&pf->pdev->dev, "  dump debug fwdata <cluster_id> <table_id> <index>\n");
index bf15a86..1bcb0ec 100644 (file)
 #define I40E_DEV_ID_10G_BASE_T_BC      0x15FF
 #define I40E_DEV_ID_10G_B              0x104F
 #define I40E_DEV_ID_10G_SFP            0x104E
+#define I40E_DEV_ID_5G_BASE_T_BC       0x101F
 #define I40E_IS_X710TL_DEVICE(d) \
-       ((d) == I40E_DEV_ID_10G_BASE_T_BC)
+       (((d) == I40E_DEV_ID_5G_BASE_T_BC) || \
+        ((d) == I40E_DEV_ID_10G_BASE_T_BC))
 #define I40E_DEV_ID_KX_X722            0x37CE
 #define I40E_DEV_ID_QSFP_X722          0x37CF
 #define I40E_DEV_ID_SFP_X722           0x37D0
@@ -32,8 +34,5 @@
 #define I40E_DEV_ID_10G_BASE_T_X722    0x37D2
 #define I40E_DEV_ID_SFP_I_X722         0x37D3
 
-#define i40e_is_40G_device(d)          ((d) == I40E_DEV_ID_QSFP_A  || \
-                                        (d) == I40E_DEV_ID_QSFP_B  || \
-                                        (d) == I40E_DEV_ID_QSFP_C)
 
 #endif /* _I40E_DEVIDS_H_ */
index aa8026b..825c104 100644 (file)
@@ -428,6 +428,8 @@ struct i40e_priv_flags {
 static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
        /* NOTE: MFP setting cannot be changed */
        I40E_PRIV_FLAG("MFP", I40E_FLAG_MFP_ENABLED, 1),
+       I40E_PRIV_FLAG("total-port-shutdown",
+                      I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED, 1),
        I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0),
        I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0),
        I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0),
@@ -1893,8 +1895,6 @@ static void i40e_get_drvinfo(struct net_device *netdev,
        struct i40e_pf *pf = vsi->back;
 
        strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
-       strlcpy(drvinfo->version, i40e_driver_version_str,
-               sizeof(drvinfo->version));
        strlcpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw),
                sizeof(drvinfo->fw_version));
        strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
@@ -2072,6 +2072,9 @@ static int i40e_set_ringparam(struct net_device *netdev,
                        err = i40e_setup_rx_descriptors(&rx_rings[i]);
                        if (err)
                                goto rx_unwind;
+                       err = i40e_alloc_rx_bi(&rx_rings[i]);
+                       if (err)
+                               goto rx_unwind;
 
                        /* now allocate the Rx buffers to make sure the OS
                         * has enough memory, any failure here means abort
@@ -4101,7 +4104,7 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
        switch (fsp->flow_type & ~FLOW_EXT) {
        case SCTP_V4_FLOW:
                new_mask &= ~I40E_VERIFY_TAG_MASK;
-               /* Fall through */
+               fallthrough;
        case TCP_V4_FLOW:
        case UDP_V4_FLOW:
                tcp_ip4_spec = &fsp->m_u.tcp_ip4_spec;
@@ -5006,6 +5009,13 @@ flags_complete:
                        dev_warn(&pf->pdev->dev, "Cannot change FEC config\n");
        }
 
+       if ((changed_flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
+           (orig_flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)) {
+               dev_err(&pf->pdev->dev,
+                       "Setting link-down-on-close not supported on this port (because total-port-shutdown is enabled)\n");
+               return -EOPNOTSUPP;
+       }
+
        if ((changed_flags & new_flags &
             I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
            (new_flags & I40E_FLAG_MFP_ENABLED))
index 1c78de8..3113792 100644 (file)
@@ -14,7 +14,6 @@ struct i40e_hw;
 #define I40E_HMC_DIRECT_BP_SIZE                0x200000 /* 2M */
 #define I40E_HMC_PAGED_BP_SIZE         4096
 #define I40E_HMC_PD_BP_BUF_ALIGNMENT   4096
-#define I40E_FIRST_VF_FPM_ID           16
 
 struct i40e_hmc_obj_info {
        u64 base;       /* base addr in FPM */
index 5d807c8..dadbfb3 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/of_net.h>
 #include <linux/pci.h>
 #include <linux/bpf.h>
+#include <generated/utsrelease.h>
 
 /* Local includes */
 #include "i40e.h"
@@ -23,15 +24,6 @@ const char i40e_driver_name[] = "i40e";
 static const char i40e_driver_string[] =
                        "Intel(R) Ethernet Connection XL710 Network Driver";
 
-#define DRV_KERN "-k"
-
-#define DRV_VERSION_MAJOR 2
-#define DRV_VERSION_MINOR 8
-#define DRV_VERSION_BUILD 20
-#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
-            __stringify(DRV_VERSION_MINOR) "." \
-            __stringify(DRV_VERSION_BUILD)    DRV_KERN
-const char i40e_driver_version_str[] = DRV_VERSION;
 static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
 
 /* a bit of forward declarations */
@@ -54,7 +46,7 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf);
 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
 static int i40e_get_capabilities(struct i40e_pf *pf,
                                 enum i40e_admin_queue_opc list_type);
-
+static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf);
 
 /* i40e_pci_tbl - PCI Device ID Table
  *
@@ -101,7 +93,6 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX
 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
 MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
 
 static struct workqueue_struct *i40e_wq;
 
@@ -439,11 +430,15 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
                i40e_get_netdev_stats_struct_tx(ring, stats);
 
                if (i40e_enabled_xdp_vsi(vsi)) {
-                       ring++;
+                       ring = READ_ONCE(vsi->xdp_rings[i]);
+                       if (!ring)
+                               continue;
                        i40e_get_netdev_stats_struct_tx(ring, stats);
                }
 
-               ring++;
+               ring = READ_ONCE(vsi->rx_rings[i]);
+               if (!ring)
+                       continue;
                do {
                        start   = u64_stats_fetch_begin_irq(&ring->syncp);
                        packets = ring->stats.packets;
@@ -787,6 +782,8 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
        for (q = 0; q < vsi->num_queue_pairs; q++) {
                /* locate Tx ring */
                p = READ_ONCE(vsi->tx_rings[q]);
+               if (!p)
+                       continue;
 
                do {
                        start = u64_stats_fetch_begin_irq(&p->syncp);
@@ -800,8 +797,11 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
                tx_linearize += p->tx_stats.tx_linearize;
                tx_force_wb += p->tx_stats.tx_force_wb;
 
-               /* Rx queue is part of the same block as Tx queue */
-               p = &p[1];
+               /* locate Rx ring */
+               p = READ_ONCE(vsi->rx_rings[q]);
+               if (!p)
+                       continue;
+
                do {
                        start = u64_stats_fetch_begin_irq(&p->syncp);
                        packets = p->stats.packets;
@@ -811,6 +811,25 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
                rx_p += packets;
                rx_buf += p->rx_stats.alloc_buff_failed;
                rx_page += p->rx_stats.alloc_page_failed;
+
+               if (i40e_enabled_xdp_vsi(vsi)) {
+                       /* locate XDP ring */
+                       p = READ_ONCE(vsi->xdp_rings[q]);
+                       if (!p)
+                               continue;
+
+                       do {
+                               start = u64_stats_fetch_begin_irq(&p->syncp);
+                               packets = p->stats.packets;
+                               bytes = p->stats.bytes;
+                       } while (u64_stats_fetch_retry_irq(&p->syncp, start));
+                       tx_b += bytes;
+                       tx_p += packets;
+                       tx_restart += p->tx_stats.restart_queue;
+                       tx_busy += p->tx_stats.tx_busy;
+                       tx_linearize += p->tx_stats.tx_linearize;
+                       tx_force_wb += p->tx_stats.tx_force_wb;
+               }
        }
        rcu_read_unlock();
        vsi->tx_restart = tx_restart;
@@ -1817,7 +1836,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
                                                       num_tc_qps);
                                        break;
                                }
-                               /* fall through */
+                               fallthrough;
                        case I40E_VSI_FDIR:
                        case I40E_VSI_SRIOV:
                        case I40E_VSI_VMDQ2:
@@ -6492,8 +6511,7 @@ out:
        return err;
 }
 #endif /* CONFIG_I40E_DCB */
-#define SPEED_SIZE 14
-#define FC_SIZE 8
+
 /**
  * i40e_print_link_message - print link up or down
  * @vsi: the VSI for which link needs a message
@@ -6681,21 +6699,6 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
 }
 
 /**
- * i40e_up - Bring the connection back up after being down
- * @vsi: the VSI being configured
- **/
-int i40e_up(struct i40e_vsi *vsi)
-{
-       int err;
-
-       err = i40e_vsi_configure(vsi);
-       if (!err)
-               err = i40e_up_complete(vsi);
-
-       return err;
-}
-
-/**
  * i40e_force_link_state - Force the link status
  * @pf: board private structure
  * @is_up: whether the link state should be forced up or down
@@ -6704,6 +6707,7 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
 {
        struct i40e_aq_get_phy_abilities_resp abilities;
        struct i40e_aq_set_phy_config config = {0};
+       bool non_zero_phy_type = is_up;
        struct i40e_hw *hw = &pf->hw;
        i40e_status err;
        u64 mask;
@@ -6739,8 +6743,11 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
 
        /* If link needs to go up, but was not forced to go down,
         * and its speed values are OK, no need for a flap
+        * if non_zero_phy_type was set, still need to force up
         */
-       if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
+       if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED)
+               non_zero_phy_type = true;
+       else if (is_up && abilities.phy_type != 0 && abilities.link_speed != 0)
                return I40E_SUCCESS;
 
        /* To force link we need to set bits for all supported PHY types,
@@ -6748,10 +6755,18 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
         * across two fields.
         */
        mask = I40E_PHY_TYPES_BITMASK;
-       config.phy_type = is_up ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
-       config.phy_type_ext = is_up ? (u8)((mask >> 32) & 0xff) : 0;
+       config.phy_type =
+               non_zero_phy_type ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
+       config.phy_type_ext =
+               non_zero_phy_type ? (u8)((mask >> 32) & 0xff) : 0;
        /* Copy the old settings, except of phy_type */
        config.abilities = abilities.abilities;
+       if (pf->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED) {
+               if (is_up)
+                       config.abilities |= I40E_AQ_PHY_ENABLE_LINK;
+               else
+                       config.abilities &= ~(I40E_AQ_PHY_ENABLE_LINK);
+       }
        if (abilities.link_speed != 0)
                config.link_speed = abilities.link_speed;
        else
@@ -6782,12 +6797,32 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
                i40e_update_link_info(hw);
        }
 
-       i40e_aq_set_link_restart_an(hw, true, NULL);
+       i40e_aq_set_link_restart_an(hw, is_up, NULL);
 
        return I40E_SUCCESS;
 }
 
 /**
+ * i40e_up - Bring the connection back up after being down
+ * @vsi: the VSI being configured
+ **/
+int i40e_up(struct i40e_vsi *vsi)
+{
+       int err;
+
+       if (vsi->type == I40E_VSI_MAIN &&
+           (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
+            vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
+               i40e_force_link_state(vsi->back, true);
+
+       err = i40e_vsi_configure(vsi);
+       if (!err)
+               err = i40e_up_complete(vsi);
+
+       return err;
+}
+
+/**
  * i40e_down - Shutdown the connection processing
  * @vsi: the VSI being stopped
  **/
@@ -6805,7 +6840,8 @@ void i40e_down(struct i40e_vsi *vsi)
        i40e_vsi_disable_irq(vsi);
        i40e_vsi_stop_rings(vsi);
        if (vsi->type == I40E_VSI_MAIN &&
-           vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED)
+          (vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED ||
+           vsi->back->flags & I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED))
                i40e_force_link_state(vsi->back, false);
        i40e_napi_disable_all(vsi);
 
@@ -8950,13 +8986,6 @@ u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
        return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
 }
 
-/* We can see up to 256 filter programming desc in transit if the filters are
- * being applied really fast; before we see the first
- * filter miss error on Rx queue 0. Accumulating enough error messages before
- * reacting will make sure we don't cause flush too often.
- */
-#define I40E_MAX_FD_PROGRAM_ERROR 256
-
 /**
  * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
  * @pf: board private structure
@@ -9851,11 +9880,11 @@ static void i40e_send_version(struct i40e_pf *pf)
 {
        struct i40e_driver_version dv;
 
-       dv.major_version = DRV_VERSION_MAJOR;
-       dv.minor_version = DRV_VERSION_MINOR;
-       dv.build_version = DRV_VERSION_BUILD;
+       dv.major_version = 0xff;
+       dv.minor_version = 0xff;
+       dv.build_version = 0xff;
        dv.subbuild_version = 0;
-       strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
+       strlcpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
        i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
 }
 
@@ -10824,10 +10853,10 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
        if (vsi->tx_rings && vsi->tx_rings[0]) {
                for (i = 0; i < vsi->alloc_queue_pairs; i++) {
                        kfree_rcu(vsi->tx_rings[i], rcu);
-                       vsi->tx_rings[i] = NULL;
-                       vsi->rx_rings[i] = NULL;
+                       WRITE_ONCE(vsi->tx_rings[i], NULL);
+                       WRITE_ONCE(vsi->rx_rings[i], NULL);
                        if (vsi->xdp_rings)
-                               vsi->xdp_rings[i] = NULL;
+                               WRITE_ONCE(vsi->xdp_rings[i], NULL);
                }
        }
 }
@@ -10861,7 +10890,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
                if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
                        ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
                ring->itr_setting = pf->tx_itr_default;
-               vsi->tx_rings[i] = ring++;
+               WRITE_ONCE(vsi->tx_rings[i], ring++);
 
                if (!i40e_enabled_xdp_vsi(vsi))
                        goto setup_rx;
@@ -10879,7 +10908,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
                        ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
                set_ring_xdp(ring);
                ring->itr_setting = pf->tx_itr_default;
-               vsi->xdp_rings[i] = ring++;
+               WRITE_ONCE(vsi->xdp_rings[i], ring++);
 
 setup_rx:
                ring->queue_index = i;
@@ -10892,7 +10921,7 @@ setup_rx:
                ring->size = 0;
                ring->dcb_tc = 0;
                ring->itr_setting = pf->rx_itr_default;
-               vsi->rx_rings[i] = ring;
+               WRITE_ONCE(vsi->rx_rings[i], ring);
        }
 
        return 0;
@@ -11846,6 +11875,58 @@ bw_commit_out:
 }
 
 /**
+ * i40e_is_total_port_shutdown_enabled - read NVM and return value
+ * if total port shutdown feature is enabled for this PF
+ * @pf: board private structure
+ **/
+static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
+{
+#define I40E_TOTAL_PORT_SHUTDOWN_ENABLED       BIT(4)
+#define I40E_FEATURES_ENABLE_PTR               0x2A
+#define I40E_CURRENT_SETTING_PTR               0x2B
+#define I40E_LINK_BEHAVIOR_WORD_OFFSET         0x2D
+#define I40E_LINK_BEHAVIOR_WORD_LENGTH         0x1
+#define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED   BIT(0)
+#define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH     4
+       i40e_status read_status = I40E_SUCCESS;
+       u16 sr_emp_sr_settings_ptr = 0;
+       u16 features_enable = 0;
+       u16 link_behavior = 0;
+       bool ret = false;
+
+       read_status = i40e_read_nvm_word(&pf->hw,
+                                        I40E_SR_EMP_SR_SETTINGS_PTR,
+                                        &sr_emp_sr_settings_ptr);
+       if (read_status)
+               goto err_nvm;
+       read_status = i40e_read_nvm_word(&pf->hw,
+                                        sr_emp_sr_settings_ptr +
+                                        I40E_FEATURES_ENABLE_PTR,
+                                        &features_enable);
+       if (read_status)
+               goto err_nvm;
+       if (I40E_TOTAL_PORT_SHUTDOWN_ENABLED & features_enable) {
+               read_status = i40e_read_nvm_module_data(&pf->hw,
+                                                       I40E_SR_EMP_SR_SETTINGS_PTR,
+                                                       I40E_CURRENT_SETTING_PTR,
+                                                       I40E_LINK_BEHAVIOR_WORD_OFFSET,
+                                                       I40E_LINK_BEHAVIOR_WORD_LENGTH,
+                                                       &link_behavior);
+               if (read_status)
+                       goto err_nvm;
+               link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH);
+               ret = I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED & link_behavior;
+       }
+       return ret;
+
+err_nvm:
+       dev_warn(&pf->pdev->dev,
+                "total-port-shutdown feature is off due to read nvm error: %s\n",
+                i40e_stat_str(&pf->hw, read_status));
+       return ret;
+}
+
+/**
  * i40e_sw_init - Initialize general software structures (struct i40e_pf)
  * @pf: board private structure to initialize
  *
@@ -12020,6 +12101,16 @@ static int i40e_sw_init(struct i40e_pf *pf)
 
        pf->tx_timeout_recovery_level = 1;
 
+       if (pf->hw.mac.type != I40E_MAC_X722 &&
+           i40e_is_total_port_shutdown_enabled(pf)) {
+               /* Link down on close must be on when total port shutdown
+                * is enabled for a given port
+                */
+               pf->flags |= (I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED |
+                             I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED);
+               dev_info(&pf->pdev->dev,
+                        "total-port-shutdown was enabled, link-down-on-close is forced on\n");
+       }
        mutex_init(&pf->switch_mutex);
 
 sw_init_done:
@@ -13694,8 +13785,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
                /* Setup DCB netlink interface */
                i40e_dcbnl_setup(vsi);
 #endif /* CONFIG_I40E_DCB */
-               /* fall through */
-
+               fallthrough;
        case I40E_VSI_FDIR:
                /* set up vectors and rings if needed */
                ret = i40e_vsi_setup_vectors(vsi);
@@ -13711,7 +13801,6 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
 
                i40e_vsi_reset_stats(vsi);
                break;
-
        default:
                /* no netdev or rings for the other VSI types */
                break;
@@ -14565,28 +14654,17 @@ void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
  **/
 static bool i40e_check_recovery_mode(struct i40e_pf *pf)
 {
-       u32 val = rd32(&pf->hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
-       bool is_recovery_mode = false;
-
-       if (pf->hw.mac.type == I40E_MAC_XL710)
-               is_recovery_mode =
-               val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK ||
-               val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
-               val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK ||
-               val == I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK;
-       if (pf->hw.mac.type == I40E_MAC_X722)
-               is_recovery_mode =
-               val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK ||
-               val == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK;
-       if (is_recovery_mode) {
-               dev_notice(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
-               dev_notice(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
+       u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
+
+       if (val & I40E_GL_FWSTS_FWS1B_MASK) {
+               dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
+               dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
                set_bit(__I40E_RECOVERY_MODE, pf->state);
 
                return true;
        }
-       if (test_and_clear_bit(__I40E_RECOVERY_MODE, pf->state))
-               dev_info(&pf->pdev->dev, "Reinitializing in normal mode with full functionality.\n");
+       if (test_bit(__I40E_RECOVERY_MODE, pf->state))
+               dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full functionality.\n");
 
        return false;
 }
@@ -14614,29 +14692,68 @@ static bool i40e_check_recovery_mode(struct i40e_pf *pf)
  **/
 static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
 {
-       const unsigned short MAX_CNT = 1000;
-       const unsigned short MSECS = 10;
+       /* wait max 10 seconds for PF reset to succeed */
+       const unsigned long time_end = jiffies + 10 * HZ;
+
        struct i40e_hw *hw = &pf->hw;
        i40e_status ret;
-       int cnt;
 
-       for (cnt = 0; cnt < MAX_CNT; ++cnt) {
+       ret = i40e_pf_reset(hw);
+       while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
+               usleep_range(10000, 20000);
                ret = i40e_pf_reset(hw);
-               if (!ret)
-                       break;
-               msleep(MSECS);
        }
 
-       if (cnt == MAX_CNT) {
+       if (ret == I40E_SUCCESS)
+               pf->pfr_count++;
+       else
                dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret);
-               return ret;
-       }
 
-       pf->pfr_count++;
        return ret;
 }
 
 /**
+ * i40e_check_fw_empr - check if FW issued unexpected EMP Reset
+ * @pf: board private structure
+ *
+ * Check FW registers to determine if FW issued unexpected EMP Reset.
+ * Every time when unexpected EMP Reset occurs the FW increments
+ * a counter of unexpected EMP Resets. When the counter reaches 10
+ * the FW should enter the Recovery mode
+ *
+ * Returns true if FW issued unexpected EMP Reset
+ **/
+static bool i40e_check_fw_empr(struct i40e_pf *pf)
+{
+       const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) &
+                          I40E_GL_FWSTS_FWS1B_MASK;
+       return (fw_sts > I40E_GL_FWSTS_FWS1B_EMPR_0) &&
+              (fw_sts <= I40E_GL_FWSTS_FWS1B_EMPR_10);
+}
+
+/**
+ * i40e_handle_resets - handle EMP resets and PF resets
+ * @pf: board private structure
+ *
+ * Handle both EMP resets and PF resets and conclude whether there are
+ * any issues regarding these resets. If there are any issues then
+ * generate log entry.
+ *
+ * Return 0 if NIC is healthy or negative value when there are issues
+ * with resets
+ **/
+static i40e_status i40e_handle_resets(struct i40e_pf *pf)
+{
+       const i40e_status pfr = i40e_pf_loop_reset(pf);
+       const bool is_empr = i40e_check_fw_empr(pf);
+
+       if (is_empr || pfr != I40E_SUCCESS)
+               dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
+
+       return is_empr ? I40E_ERR_RESET_FAILED : pfr;
+}
+
+/**
  * i40e_init_recovery_mode - initialize subsystems needed in recovery mode
  * @pf: board private structure
  * @hw: ptr to the hardware info
@@ -14872,11 +14989,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_pf_reset;
        }
 
-       err = i40e_pf_loop_reset(pf);
-       if (err) {
-               dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
+       err = i40e_handle_resets(pf);
+       if (err)
                goto err_pf_reset;
-       }
 
        i40e_check_recovery_mode(pf);
 
@@ -15272,6 +15387,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                        i40e_stat_str(&pf->hw, err),
                        i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 
+       /* make sure the MFS hasn't been set lower than the default */
+#define MAX_FRAME_SIZE_DEFAULT 0x2600
+       val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
+              I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
+       if (val < MAX_FRAME_SIZE_DEFAULT)
+               dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
+                        i, val);
+
        /* Add a filter to drop all Flow control frames from any VSI from being
         * transmitted. By doing so we stop a malicious VF from sending out
         * PAUSE or PFC frames and potentially controlling traffic for other
@@ -15782,8 +15905,7 @@ static struct pci_driver i40e_driver = {
  **/
 static int __init i40e_init_module(void)
 {
-       pr_info("%s: %s - version %s\n", i40e_driver_name,
-               i40e_driver_string, i40e_driver_version_str);
+       pr_info("%s: %s\n", i40e_driver_name, i40e_driver_string);
        pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
 
        /* There is no need to throttle the number of active tasks because
index c302ef2..2f6815b 100644 (file)
@@ -26,7 +26,6 @@ do {                                                                          \
 #define wr32(a, reg, value)    writel((value), ((a)->hw_addr + (reg)))
 #define rd32(a, reg)           readl((a)->hw_addr + (reg))
 
-#define wr64(a, reg, value)    writeq((value), ((a)->hw_addr + (reg)))
 #define rd64(a, reg)           readq((a)->hw_addr + (reg))
 #define i40e_flush(a)          readl((a)->hw_addr + I40E_GLGEN_STAT)
 
index 9bf1ad4..ff7b19c 100644 (file)
@@ -586,7 +586,7 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
        case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
                if (!(pf->hw_features & I40E_HW_PTP_L4_CAPABLE))
                        return -ERANGE;
-               /* fall through */
+               fallthrough;
        case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
        case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
        case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
index d35d690..564df22 100644 (file)
@@ -4,53 +4,14 @@
 #ifndef _I40E_REGISTER_H_
 #define _I40E_REGISTER_H_
 
-#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */
-#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT)
-#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */
-#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT)
-#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */
-#define I40E_GL_ARQH_ARQH_SHIFT 0
-#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT)
-#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */
-#define I40E_GL_ARQT_ARQT_SHIFT 0
-#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT)
-#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */
-#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT)
-#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */
-#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT)
-#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */
-#define I40E_GL_ATQH_ATQH_SHIFT 0
-#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT)
-#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */
-#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT)
-#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28
-#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT)
-#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29
-#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT)
 #define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30
 #define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT)
-#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT)
-#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */
-#define I40E_GL_ATQT_ATQT_SHIFT 0
-#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT)
 #define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */
-#define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT)
 #define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */
-#define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT)
 #define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */
 #define I40E_PF_ARQH_ARQH_SHIFT 0
 #define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT)
 #define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */
-#define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
-#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT)
 #define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
 #define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT)
 #define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
 #define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
 #define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
 #define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
-#define I40E_PF_ARQT_ARQT_SHIFT 0
-#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
 #define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */
-#define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT)
 #define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */
-#define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT)
 #define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */
-#define I40E_PF_ATQH_ATQH_SHIFT 0
-#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT)
 #define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */
-#define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT)
 #define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
 #define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT)
 #define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
 #define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
 #define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
 #define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
-#define I40E_PF_ATQT_ATQT_SHIFT 0
-#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
-#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQBAH_MAX_INDEX 127
-#define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT)
-#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQBAL_MAX_INDEX 127
-#define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT)
-#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQH_MAX_INDEX 127
-#define I40E_VF_ARQH_ARQH_SHIFT 0
-#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT)
-#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQLEN_MAX_INDEX 127
-#define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
-#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT)
-#define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
-#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT)
-#define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
-#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT)
-#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
-#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
-#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
-#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ARQT_MAX_INDEX 127
-#define I40E_VF_ARQT_ARQT_SHIFT 0
-#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT)
-#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQBAH_MAX_INDEX 127
-#define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT)
-#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQBAL_MAX_INDEX 127
-#define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT)
-#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQH_MAX_INDEX 127
-#define I40E_VF_ATQH_ATQH_SHIFT 0
-#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT)
-#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQLEN_MAX_INDEX 127
-#define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT)
-#define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
-#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT)
-#define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
-#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT)
-#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
-#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
-#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
-#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_VF_ATQT_MAX_INDEX 127
-#define I40E_VF_ATQT_ATQT_SHIFT 0
-#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT)
-#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */
-#define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
-#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
-#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */
-#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
-#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
-#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
-#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
-#define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
-#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */
-#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
-#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
-#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
-#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
-#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
-#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
-#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */
-#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
-#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
-#define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
-#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
-#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
-#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
-#define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
-#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
-#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
-#define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
-#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT)
-#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */
-#define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
-#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
-#define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
-#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
-#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
-#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
-#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
-#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
-#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
-#define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
-#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
-#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
-#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
-#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
-#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
-#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
-#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
-#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */
-#define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
-#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT)
-#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */
-#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
-#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
-#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */
-#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
-#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT)
-#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */
-#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
-#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
-#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */
-#define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
-#define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
-#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
-#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
-#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
 #define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */
-#define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
-#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
-#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
-#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT)
-#define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
-#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT)
-#define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
-#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
 #define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
 #define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT)
 #define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */
 #define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
 #define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
-#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */
-#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
-#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT)
-#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
-#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT)
-#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
-#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
-#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
-#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT)
-#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
-#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
-#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */
-#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
-#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
-#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
-#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
-#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
-#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
-#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
-#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT)
-#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
-#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
-#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
-#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
-#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
-#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
-#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
-#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
-#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
-#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
-#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
-#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
-#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
-#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
-#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */
-#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
-#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
-#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */
-#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
-#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
-#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
-#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
-#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
-#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
-#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
-#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
-#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
-#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
-#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7
-#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0
-#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT)
-#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
-#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
-#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
-#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7
-#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0
-#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT)
-#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */
-#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
-#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT)
-#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
-#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT)
-#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
-#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
-#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
-#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
-#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
-#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */
-#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
-#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT)
-#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
-#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
-#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */
-#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
-#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
-#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
-#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
-#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */
-#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
-#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
-#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
-#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
-#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */
-#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
-#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
-#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
-#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
-#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
-#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
-#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
-#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
-#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
-#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
-#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
-#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */
-#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
-#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
-#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
-#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */
-#define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
-#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
-#define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
-#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
-#define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
-#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT)
-#define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
-#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
 #define I40E_GL_FWSTS 0x00083048 /* Reset: POR */
-#define I40E_GL_FWSTS_FWS0B_SHIFT 0
-#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT)
-#define I40E_GL_FWSTS_FWRI_SHIFT 9
-#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT)
 #define I40E_GL_FWSTS_FWS1B_SHIFT 16
 #define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_EMPR_0 I40E_MASK(0x20, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_EMPR_10 I40E_MASK(0x2A, I40E_GL_FWSTS_FWS1B_SHIFT)
 #define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0x30, I40E_GL_FWSTS_FWS1B_SHIFT)
 #define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0x31, I40E_GL_FWSTS_FWS1B_SHIFT)
 #define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK I40E_MASK(0x32, I40E_GL_FWSTS_FWS1B_SHIFT)
 #define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK I40E_MASK(0x33, I40E_GL_FWSTS_FWS1B_SHIFT)
 #define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK I40E_MASK(0xB, I40E_GL_FWSTS_FWS1B_SHIFT)
 #define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK I40E_MASK(0xC, I40E_GL_FWSTS_FWS1B_SHIFT)
-#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */
-#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
-#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
-#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
-#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
-#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
-#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
-#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
-#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
 #define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */
 #define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
-#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
-#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
-#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
 #define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
-#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
-#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
 #define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
-#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
-#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
-#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
-#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT 26
-#define I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_MASK I40E_MASK(0xF, I40E_GLGEN_GPIO_CTL_PRT_BIT_MAP_SHIFT)
-#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
-#define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
-#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
-#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
-#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
-#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
-#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
-#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */
-#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
-#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
-#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */
-#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
-#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
-#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_I2CCMD_MAX_INDEX 3
-#define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
-#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT)
-#define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
-#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT)
-#define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
-#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
-#define I40E_GLGEN_I2CCMD_OP_SHIFT 27
-#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT)
-#define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
-#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT)
-#define I40E_GLGEN_I2CCMD_R_SHIFT 29
-#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT)
-#define I40E_GLGEN_I2CCMD_E_SHIFT 31
-#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT)
-#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
-#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
-#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
-#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
-#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
-#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
-#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
-#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
-#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
-#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
-#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
-#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
-#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
-#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
-#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */
-#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
-#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
-#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x7FF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT 29
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_MASK I40E_MASK(0x7, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD0_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
-#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
 #define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_MSCA_MAX_INDEX 3
 #define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
-#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT)
 #define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
-#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT)
 #define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
-#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT)
 #define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
-#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT)
 #define I40E_GLGEN_MSCA_STCODE_SHIFT 28
-#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT)
 #define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
 #define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
 #define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
 #define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
 #define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_GLGEN_MSRWD_MAX_INDEX 3
 #define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
-#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
 #define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
 #define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
-#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */
-#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
-#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
-#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
-#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
 #define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */
 #define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
 #define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
 #define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
 #define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
-#define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
-#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
-#define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
-#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
-#define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
-#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
-#define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
-#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
 #define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */
 #define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
 #define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
-#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
-#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
 #define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
 #define I40E_GLGEN_RTRIG_CORER_SHIFT 0
 #define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
 #define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
 #define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT)
-#define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
-#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
 #define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */
-#define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
-#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT)
-#define I40E_GLGEN_STAT_DCBEN_SHIFT 2
-#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT)
-#define I40E_GLGEN_STAT_VTEN_SHIFT 3
-#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT)
-#define I40E_GLGEN_STAT_FCOEN_SHIFT 4
-#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT)
-#define I40E_GLGEN_STAT_EVBEN_SHIFT 5
-#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT)
-#define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
-#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT)
 #define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
-#define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
-#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
 #define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */
-#define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
-#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT)
 #define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */
 #define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
 #define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT)
-#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */
-#define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
-#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
 #define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */
 #define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
 #define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
-#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */
-#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0
-#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT)
-#define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
-#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT)
-#define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
-#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT)
-#define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
-#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT)
 #define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */
 #define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
 #define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
-#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
-#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
-#define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
-#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
-#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */
-#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
-#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
 #define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */
-#define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
-#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
-#define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
-#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
 #define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFGEN_RSTAT1_MAX_INDEX 127
-#define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
-#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
 #define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
 #define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
 #define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
 #define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
 #define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
 #define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
-#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_VSIGEN_RSTAT_MAX_INDEX 383
-#define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
-#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT)
-#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_VSIGEN_RTRIG_MAX_INDEX 383
-#define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
-#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
 #define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
 #define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
 #define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
 #define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
-#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
-#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
 #define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */
-#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
-#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
 #define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
 #define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
 #define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
 #define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
-#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
-#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
 #define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */
 #define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
 #define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
 #define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */
-#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
-#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
 #define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */
-#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
-#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
-#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
-#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
-#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
-#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
-#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
-#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
-#define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
-#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
-#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */
-#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
-#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
-#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */
-#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
-#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
-#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
-#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
-#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
-#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
-#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
-#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
-#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */
-#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
-#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
-#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */
-#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
-#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
 #define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */
-#define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
-#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
 #define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
 #define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
 #define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
 #define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
-#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
-#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
 #define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */
-#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
-#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
 #define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
 #define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
 #define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
-#define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
-#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
 #define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
-#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
-#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
 #define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */
-#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
-#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
-#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
-#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
-#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
-#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_SDPART_MAX_INDEX 15
-#define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
-#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
-#define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
-#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
 #define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */
-#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
-#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
 #define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */
-#define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
-#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
-#define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
-#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
-#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
-#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
-#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
-#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
-#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
-#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
 #define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */
 #define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
-#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
 #define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
-#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
 #define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */
-#define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
-#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
 #define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
-#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
 #define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */
-#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
-#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
 #define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */
 #define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
-#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
 #define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
-#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
 #define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
-#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
-#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
-#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
-#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */
-#define I40E_GL_GP_FUSE_MAX_INDEX 28
-#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
-#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
-#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */
-#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
-#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
-#define I40E_GL_UFUSE_NIC_ID_SHIFT 2
-#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT)
-#define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
-#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
-#define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
-#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
-#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */
-#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
-#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
-#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
-#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
-#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
-#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
-#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
-#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
-#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
-#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
-#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
-#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
-#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
-#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
-#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
-#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
-#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
-#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
-#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
-#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
-#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
-#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
-#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
-#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
-#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
-#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
-#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
-#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
-#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
-#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
-#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
-#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
 #define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */
-#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
-#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
 #define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
 #define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
 #define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */
 #define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
 #define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
-#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
-#define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
 #define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
 #define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
-#define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
-#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
 #define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */
-#define I40E_PFINT_CEQCTL_MAX_INDEX 511
 #define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
 #define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
-#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
-#define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
 #define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
-#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
 #define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
 #define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
-#define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
-#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
 #define I40E_GLINT_CTL 0x0003F800 /* Reset: CORER */
-#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT 0
-#define I40E_GLINT_CTL_DIS_AUTOMASK_PF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_PF0_SHIFT)
 #define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT 1
 #define I40E_GLINT_CTL_DIS_AUTOMASK_VF0_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_VF0_SHIFT)
-#define I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT 2
-#define I40E_GLINT_CTL_DIS_AUTOMASK_N_MASK I40E_MASK(0x1, I40E_GLINT_CTL_DIS_AUTOMASK_N_SHIFT)
 #define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */
 #define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
 #define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
 #define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
 #define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
 #define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
-#define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
-#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
 #define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
 #define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
 #define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
 #define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
 #define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
 #define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
-#define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
 #define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
 #define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
 #define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
 #define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
 #define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
 #define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
-#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
 #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
 #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
-#define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
-#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
-#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */
-#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
-#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
-#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
-#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
-#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
-#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
-#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
-#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
-#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
-#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
-#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
-#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
-#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
-#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
-#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
-#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
-#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
-#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
-#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
-#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
-#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
-#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
-#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
-#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
-#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
-#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
-#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
-#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
-#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
-#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
-#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
-#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
 #define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */
 #define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
 #define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
 #define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
-#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
-#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
-#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
-#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
-#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
-#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT)
-#define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
-#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT)
 #define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
 #define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT)
 #define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
 #define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT)
 #define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
 #define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
-#define I40E_PFINT_ICR0_GPIO_SHIFT 22
-#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT)
 #define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
 #define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
-#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
 #define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
 #define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT)
 #define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
 #define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
 #define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
 #define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
-#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
 #define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
 #define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
 #define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
 #define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
 #define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
 #define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
-#define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
-#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
 #define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */
-#define I40E_PFINT_ITR0_MAX_INDEX 2
-#define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
-#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT)
 #define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */
-#define I40E_PFINT_ITRN_MAX_INDEX 2
-#define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
-#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT)
 #define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */
 #define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
-#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
-#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
-#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
 #define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
-#define I40E_PFINT_LNKLSTN_MAX_INDEX 511
 #define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
 #define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
 #define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
-#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
-#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */
-#define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
-#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT)
-#define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
-#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
 #define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
-#define I40E_PFINT_RATEN_MAX_INDEX 511
-#define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
-#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
-#define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
-#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
 #define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: CORER */
-#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
-#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
 #define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QINT_RQCTL_MAX_INDEX 1535
 #define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
 #define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
 #define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
 #define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
 #define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
 #define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
 #define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
 #define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
 #define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
 #define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT)
 #define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QINT_TQCTL_MAX_INDEX 1535
 #define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
 #define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
 #define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
 #define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
 #define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
 #define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
 #define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
 #define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
 #define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
 #define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT)
 #define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
-#define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
-#define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
-#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
-#define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
 #define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
-#define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
-#define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
 #define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
 #define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
-#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
-#define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
-#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VFINT_ICR0_MAX_INDEX 127
-#define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
-#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT)
-#define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
-#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT)
-#define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
-#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT)
-#define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
-#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT)
-#define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
-#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT)
-#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
-#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
-#define I40E_VFINT_ICR0_SWINT_SHIFT 31
-#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT)
-#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
-#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
-#define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
-#define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
-#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
-#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */
-#define I40E_VFINT_ITR0_MAX_INDEX 2
-#define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT)
-#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */
-#define I40E_VFINT_ITRN_MAX_INDEX 2
-#define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
-#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
-#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
 #define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VPINT_AEQCTL_MAX_INDEX 127
 #define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
 #define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
-#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
-#define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
 #define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
 #define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
-#define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
-#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
 #define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */
-#define I40E_VPINT_CEQCTL_MAX_INDEX 511
 #define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
 #define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
-#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
-#define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
 #define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
 #define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
 #define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
 #define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
 #define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
 #define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
-#define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
-#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
 #define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPINT_LNKLST0_MAX_INDEX 127
 #define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
 #define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
-#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
-#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
 #define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
-#define I40E_VPINT_LNKLSTN_MAX_INDEX 511
 #define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
 #define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
 #define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
 #define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
-#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPINT_RATE0_MAX_INDEX 127
-#define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
-#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT)
-#define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
-#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
-#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
-#define I40E_VPINT_RATEN_MAX_INDEX 511
-#define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
-#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT)
-#define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
-#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
-#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */
-#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
-#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
-#define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
-#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT)
 #define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */
 #define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
 #define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
 #define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */
-#define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
-#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
 #define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */
-#define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
-#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
 #define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */
-#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
-#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
 #define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */
-#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11
 #define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
 #define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
-#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16
-#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT)
 #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
 #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
 #define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
 #define I40E_PFLAN_QALLOC_VALID_SHIFT 31
 #define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT)
 #define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
-#define I40E_QRX_ENA_MAX_INDEX 1535
 #define I40E_QRX_ENA_QENA_REQ_SHIFT 0
 #define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT)
-#define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
-#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT)
 #define I40E_QRX_ENA_QENA_STAT_SHIFT 2
 #define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT)
 #define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QRX_TAIL_MAX_INDEX 1535
-#define I40E_QRX_TAIL_TAIL_SHIFT 0
-#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT)
 #define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QTX_CTL_MAX_INDEX 1535
 #define I40E_QTX_CTL_PFVF_Q_SHIFT 0
 #define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT)
 #define I40E_QTX_CTL_PF_INDX_SHIFT 2
 #define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
 #define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT)
 #define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
-#define I40E_QTX_ENA_MAX_INDEX 1535
 #define I40E_QTX_ENA_QENA_REQ_SHIFT 0
 #define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT)
-#define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
-#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT)
 #define I40E_QTX_ENA_QENA_STAT_SHIFT 2
 #define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT)
 #define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
-#define I40E_QTX_HEAD_MAX_INDEX 1535
-#define I40E_QTX_HEAD_HEAD_SHIFT 0
-#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT)
-#define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
-#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT)
 #define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
-#define I40E_QTX_TAIL_MAX_INDEX 1535
-#define I40E_QTX_TAIL_TAIL_SHIFT 0
-#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT)
 #define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPLAN_MAPENA_MAX_INDEX 127
 #define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
 #define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
 #define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */
-#define I40E_VPLAN_QTABLE_MAX_INDEX 15
 #define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
 #define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT)
 #define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
-#define I40E_VSILAN_QBASE_MAX_INDEX 383
-#define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
-#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT)
 #define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
 #define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
 #define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */
-#define I40E_VSILAN_QTABLE_MAX_INDEX 7
-#define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
-#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
-#define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
-#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
 #define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */
 #define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
 #define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT)
 #define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */
 #define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
 #define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
-#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */
-#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0
-#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT)
-#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */
-#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
-#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
-#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
-#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
-#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
-#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
-#define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
-#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
-#define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
-#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
-#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
-#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
-#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
-#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
-#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */
-#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
-#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
-#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */
-#define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
-#define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
-#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
-#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */
-#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
-#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
-#define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
-#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
-#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */
-#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
-#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
-#define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
-#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
-#define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
-#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
-#define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
-#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
-#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
-#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
-#define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
-#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
-#define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
-#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
-#define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
-#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
-#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
-#define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
-#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT)
-#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PRT_MNG_MDEF_MAX_INDEX 7
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
-#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
-#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
-#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
-#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
-#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
-#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
-#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
-#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
-#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
-#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
-#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
-#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
-#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
-#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
-#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_METF_MAX_INDEX 3
-#define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
-#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT)
-#define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
-#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT)
-#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
-#define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
-#define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
-#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
-#define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
-#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT)
-#define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
-#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT)
-#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
-#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
-#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
-#define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
-#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
-#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
-#define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
-#define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
-#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
-#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_MMAH_MAX_INDEX 3
-#define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
-#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT)
-#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
-#define I40E_PRT_MNG_MMAL_MAX_INDEX 3
-#define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
-#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT)
-#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */
-#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
-#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
-#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */
-#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
-#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
-#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
-#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
-#define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
-#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
-#define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
-#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
-#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
-#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
-#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
-#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
-#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
-#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
-#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
-#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
-#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */
-#define I40E_MSIX_PBA_MAX_INDEX 5
-#define I40E_MSIX_PBA_PENBIT_SHIFT 0
-#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT)
-#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
-#define I40E_MSIX_TADD_MAX_INDEX 128
-#define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
-#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT)
-#define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
-#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT)
-#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
-#define I40E_MSIX_TMSG_MAX_INDEX 128
-#define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
-#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
-#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
-#define I40E_MSIX_TUADD_MAX_INDEX 128
-#define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
-#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
-#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
-#define I40E_MSIX_TVCTRL_MAX_INDEX 128
-#define I40E_MSIX_TVCTRL_MASK_SHIFT 0
-#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT)
-#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */
-#define I40E_VFMSIX_PBA1_MAX_INDEX 19
-#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
-#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT)
-#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TADD1_MAX_INDEX 639
-#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
-#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
-#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
-#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
-#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TMSG1_MAX_INDEX 639
-#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
-#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
-#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TUADD1_MAX_INDEX 639
-#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
-#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
-#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
-#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
-#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
 #define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
-#define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
-#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT)
-#define I40E_GLNVM_FLA_FL_CE_SHIFT 1
-#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT)
-#define I40E_GLNVM_FLA_FL_SI_SHIFT 2
-#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT)
-#define I40E_GLNVM_FLA_FL_SO_SHIFT 3
-#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT)
-#define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
-#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT)
-#define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
-#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT)
 #define I40E_GLNVM_FLA_LOCKED_SHIFT 6
 #define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
-#define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
-#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT)
-#define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
-#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT)
-#define I40E_GLNVM_FLA_FL_DER_SHIFT 31
-#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT)
-#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */
-#define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
-#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT)
-#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31
-#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT)
 #define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */
-#define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
-#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT)
 #define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
 #define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT)
-#define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
-#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT)
-#define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
-#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT)
-#define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
-#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
-#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */
-#define I40E_GLNVM_PROTCSR_MAX_INDEX 59
-#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
-#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
 #define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */
-#define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
-#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
 #define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
-#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT)
-#define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
-#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT)
 #define I40E_GLNVM_SRCTL_START_SHIFT 30
-#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
 #define I40E_GLNVM_SRCTL_DONE_SHIFT 31
 #define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT)
 #define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
-#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
-#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
 #define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
 #define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT)
 #define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
-#define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
-#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
-#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
-#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
 #define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
 #define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
-#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
-#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
-#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
-#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
-#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
-#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
-#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */
-#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
-#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
-#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */
-#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
-#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
-#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */
-#define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
-#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */
-#define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
-#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
-#define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
-#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
-#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
 #define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
-#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
-#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
-#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
-#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
-#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
-#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
-#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
-#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
-#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
-#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
-#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
-#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
-#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */
-#define I40E_GLPCI_CNF_FLEX10_SHIFT 1
-#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT)
-#define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
-#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
 #define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */
-#define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
-#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT)
-#define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
-#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
 #define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
 #define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
 #define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
 #define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
-#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */
-#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
-#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
-#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
-#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
-#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
-#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
-#define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
-#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
-#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
-#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
-#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
-#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
-#define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
-#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
-#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
 #define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
-#define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
-#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
-#define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
-#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
-#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
-#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
-#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4
-#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT)
 #define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
 #define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
-#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10
-#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT)
-#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
-#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
-#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */
-#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
-#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
-#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
-#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
-#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
-#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
-#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */
-#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
-#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
-#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */
-#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
-#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
-#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */
-#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
-#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
-#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */
-#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
-#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
-#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */
-#define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
-#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
-#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
-#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
-#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
-#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
-#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
-#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
-#define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
-#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
-#define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
-#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
-#define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
-#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
-#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
-#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */
-#define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
-#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
-#define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
-#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
-#define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
-#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
-#define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
-#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
-#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */
-#define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
-#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT)
-#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */
-#define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
-#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
-#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */
-#define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
-#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
-#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */
-#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
-#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
-#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */
-#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
-#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
-#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */
-#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0
-#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT)
-#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */
-#define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
-#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT)
-#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */
-#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0
-#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT)
-#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */
-#define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
-#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
-#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
-#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
-#define I40E_GLTPH_CTRL 0x000BE480 /* Reset: PCIR */
-#define I40E_GLTPH_CTRL_DESC_PH_SHIFT 9
-#define I40E_GLTPH_CTRL_DESC_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DESC_PH_SHIFT)
-#define I40E_GLTPH_CTRL_DATA_PH_SHIFT 11
-#define I40E_GLTPH_CTRL_DATA_PH_MASK I40E_MASK(0x3, I40E_GLTPH_CTRL_DATA_PH_SHIFT)
 #define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
-#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
-#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
-#define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
-#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
-#define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
-#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
 #define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */
-#define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
-#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
 #define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
-#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
 #define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */
-#define I40E_PF_PCI_CIAD_DATA_SHIFT 0
-#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT)
-#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */
-#define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
-#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
-#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1
-#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT)
-#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2
-#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT)
-#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */
-#define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
-#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT)
-#define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
-#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
-#define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
-#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT)
-#define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
-#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT)
-#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */
-#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0
-#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT)
-#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16
-#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT)
-#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */
-#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
-#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
-#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
-#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
-#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */
-#define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
-#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
-#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
-#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
-#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
-#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
-#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */
-#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
-#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
-#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */
-#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
-#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
-#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */
-#define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
-#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
-#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */
-#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */
-#define I40E_PFPCI_PM_PME_EN_SHIFT 0
-#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT)
-#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */
-#define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
-#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
-#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */
-#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0
-#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT)
-#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16
-#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT)
-#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */
-#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */
-#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
-#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */
-#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */
-#define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
-#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
-#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */
-#define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
-#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT)
 #define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */
-#define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
-#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
 #define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
 #define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
 #define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
 #define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
-#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */
-#define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
-#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
-#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
-#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
-#define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
-#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
-#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */
-#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
-#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
-#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */
-#define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
-#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
-#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
-#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
-#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */
-#define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
-#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
-#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */
-#define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
-#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
-#define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
-#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT)
-#define I40E_PRTPM_GC_RATD_SHIFT 2
-#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT)
-#define I40E_PRTPM_GC_LCDMP_SHIFT 3
-#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT)
-#define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
-#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
 #define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */
-#define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
-#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
 #define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */
-#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
-#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
-#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GL_PRS_FVBM_MAX_INDEX 3
-#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT 0
-#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_MASK I40E_MASK(0x7F, I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT)
-#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT 8
-#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_MASK I40E_MASK(0x3F, I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT)
-#define I40E_GL_PRS_FVBM_MSK_ENA_SHIFT 31
-#define I40E_GL_PRS_FVBM_MSK_ENA_MASK I40E_MASK(0x1, I40E_GL_PRS_FVBM_MSK_ENA_SHIFT)
-#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */
-#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
-#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
-#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */
-#define I40E_GLRPB_GHW_GHW_SHIFT 0
-#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT)
-#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */
-#define I40E_GLRPB_GLW_GLW_SHIFT 0
-#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT)
-#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */
-#define I40E_GLRPB_PHW_PHW_SHIFT 0
-#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT)
-#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */
-#define I40E_GLRPB_PLW_PLW_SHIFT 0
-#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT)
-#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_DHW_MAX_INDEX 7
-#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
-#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
-#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_DLW_MAX_INDEX 7
-#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
-#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
-#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_DPS_MAX_INDEX 7
-#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
-#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
-#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_SHT_MAX_INDEX 7
-#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
-#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
-#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */
-#define I40E_PRTRPB_SHW_SHW_SHIFT 0
-#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT)
-#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTRPB_SLT_MAX_INDEX 7
-#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
-#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
-#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */
-#define I40E_PRTRPB_SLW_SLW_SHIFT 0
-#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT)
-#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */
-#define I40E_PRTRPB_SPS_SPS_SHIFT 0
-#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT)
-#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */
-#define I40E_GLQF_CTL_HTOEP_SHIFT 1
-#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT)
-#define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
-#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
-#define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
-#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
-#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6
-#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT)
-#define I40E_GLQF_CTL_RSVD_SHIFT 7
-#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT)
-#define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
-#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
-#define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
-#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
-#define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
-#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
-#define I40E_GLQF_CTL_FDBEST_SHIFT 17
-#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT)
-#define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
-#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT)
-#define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
-#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT)
-#define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
-#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT)
 #define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */
 #define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
 #define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
 #define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
 #define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
 #define I40E_GLQF_HKEY_MAX_INDEX 12
-#define I40E_GLQF_HKEY_KEY_0_SHIFT 0
-#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT)
-#define I40E_GLQF_HKEY_KEY_1_SHIFT 8
-#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT)
-#define I40E_GLQF_HKEY_KEY_2_SHIFT 16
-#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT)
-#define I40E_GLQF_HKEY_KEY_3_SHIFT 24
-#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT)
-#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
-#define I40E_GLQF_HSYM_MAX_INDEX 63
-#define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
-#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
 #define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */
-#define I40E_GLQF_PCNT_MAX_INDEX 511
-#define I40E_GLQF_PCNT_PCNT_SHIFT 0
-#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT)
-#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
-#define I40E_GLQF_SWAP_MAX_INDEX 1
-#define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
-#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
-#define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
-#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
-#define I40E_GLQF_SWAP_FLEN0_SHIFT 12
-#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT)
-#define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
-#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
-#define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
-#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
-#define I40E_GLQF_SWAP_FLEN1_SHIFT 28
-#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT)
 #define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */
 #define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
 #define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
 #define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
 #define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
 #define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
-#define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
-#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
-#define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
-#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
 #define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */
 #define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
 #define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
-#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */
-#define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
-#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
-#define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
-#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT)
 #define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */
 #define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
 #define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
 #define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
 #define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
 #define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */
-#define I40E_PFQF_HENA_MAX_INDEX 1
-#define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
-#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
 #define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */
 #define I40E_PFQF_HKEY_MAX_INDEX 12
-#define I40E_PFQF_HKEY_KEY_0_SHIFT 0
-#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT)
-#define I40E_PFQF_HKEY_KEY_1_SHIFT 8
-#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT)
-#define I40E_PFQF_HKEY_KEY_2_SHIFT 16
-#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT)
-#define I40E_PFQF_HKEY_KEY_3_SHIFT 24
-#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT)
 #define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_PFQF_HLUT_MAX_INDEX 127
-#define I40E_PFQF_HLUT_LUT0_SHIFT 0
-#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT)
-#define I40E_PFQF_HLUT_LUT1_SHIFT 8
-#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT)
-#define I40E_PFQF_HLUT_LUT2_SHIFT 16
-#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT)
-#define I40E_PFQF_HLUT_LUT3_SHIFT 24
-#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT)
-#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */
-#define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
-#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
-#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */
-#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
-#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
-#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
 #define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
 #define I40E_PRTQF_FD_INSET_MAX_INDEX 63
 #define I40E_PRTQF_FD_INSET_INSET_SHIFT 0
 #define I40E_PRTQF_FD_INSET_MAX_INDEX 63
 #define I40E_PRTQF_FD_INSET_INSET_SHIFT 0
 #define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT)
-#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
-#define I40E_PRTQF_FD_MSK_MAX_INDEX 63
-#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
-#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT)
-#define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
-#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
 #define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */
-#define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
 #define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
 #define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
 #define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
 #define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
 #define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
 #define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */
-#define I40E_VFQF_HENA1_MAX_INDEX 1
-#define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
-#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
 #define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */
 #define I40E_VFQF_HKEY1_MAX_INDEX 12
-#define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
-#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT)
-#define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
-#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT)
-#define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
-#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT)
-#define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
-#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT)
 #define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */
 #define I40E_VFQF_HLUT1_MAX_INDEX 15
-#define I40E_VFQF_HLUT1_LUT0_SHIFT 0
-#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT)
-#define I40E_VFQF_HLUT1_LUT1_SHIFT 8
-#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT)
-#define I40E_VFQF_HLUT1_LUT2_SHIFT 16
-#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT)
-#define I40E_VFQF_HLUT1_LUT3_SHIFT 24
-#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT)
-#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */
-#define I40E_VFQF_HREGION1_MAX_INDEX 7
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
-#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
-#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
-#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
-#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
-#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
-#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
-#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT)
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
-#define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
-#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT)
-#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPQF_CTL_MAX_INDEX 127
-#define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
-#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT)
-#define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
-#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT)
-#define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
-#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT)
-#define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
-#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT)
-#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
-#define I40E_VSIQF_CTL_MAX_INDEX 383
-#define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
-#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
-#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
-#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
-#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
-#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
-#define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
-#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
-#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */
-#define I40E_VSIQF_TCREGION_MAX_INDEX 3
-#define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
-#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
-#define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
-#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
-#define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
-#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
-#define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
-#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
-#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOECRC_MAX_INDEX 143
-#define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
-#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT)
-#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDDPC_MAX_INDEX 143
-#define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
-#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
-#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDIFEC_MAX_INDEX 143
-#define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
-#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
-#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
-#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
-#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
-#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDIXEC_MAX_INDEX 143
-#define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
-#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
-#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDIXVC_MAX_INDEX 143
-#define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
-#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
-#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDWRCH_MAX_INDEX 143
-#define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
-#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
-#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDWRCL_MAX_INDEX 143
-#define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
-#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
-#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDWTCH_MAX_INDEX 143
-#define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
-#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
-#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEDWTCL_MAX_INDEX 143
-#define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
-#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
-#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOELAST_MAX_INDEX 143
-#define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
-#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT)
-#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEPRC_MAX_INDEX 143
-#define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
-#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
-#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOEPTC_MAX_INDEX 143
-#define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
-#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
-#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_FCOERPDC_MAX_INDEX 143
-#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
-#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
-#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_RXERR1_L_MAX_INDEX 143
-#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0
-#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT)
-#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_RXERR2_L_MAX_INDEX 143
-#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0
-#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
 #define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_BPRCH_MAX_INDEX 3
-#define I40E_GLPRT_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_BPRCH_SHIFT)
 #define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_BPRCL_MAX_INDEX 3
-#define I40E_GLPRT_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLPRT_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_BPRCL_SHIFT)
 #define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_BPTCH_MAX_INDEX 3
-#define I40E_GLPRT_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLPRT_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_BPTCH_SHIFT)
 #define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_BPTCL_MAX_INDEX 3
-#define I40E_GLPRT_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLPRT_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_BPTCL_SHIFT)
 #define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_CRCERRS_MAX_INDEX 3
-#define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
-#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
 #define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_GORCH_MAX_INDEX 3
-#define I40E_GLPRT_GORCH_GORCH_SHIFT 0
-#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT)
 #define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_GORCL_MAX_INDEX 3
-#define I40E_GLPRT_GORCL_GORCL_SHIFT 0
-#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT)
 #define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_GOTCH_MAX_INDEX 3
-#define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT)
 #define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_GOTCL_MAX_INDEX 3
-#define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT)
 #define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_ILLERRC_MAX_INDEX 3
-#define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
-#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
-#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LDPC_MAX_INDEX 3
-#define I40E_GLPRT_LDPC_LDPC_SHIFT 0
-#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT)
 #define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
-#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
-#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
 #define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
-#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
-#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
 #define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LXONRXC_MAX_INDEX 3
-#define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
-#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
 #define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_LXONTXC_MAX_INDEX 3
-#define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
-#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
 #define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MLFC_MAX_INDEX 3
-#define I40E_GLPRT_MLFC_MLFC_SHIFT 0
-#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT)
 #define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MPRCH_MAX_INDEX 3
-#define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT)
 #define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MPRCL_MAX_INDEX 3
-#define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT)
 #define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MPTCH_MAX_INDEX 3
-#define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT)
 #define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MPTCL_MAX_INDEX 3
-#define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT)
 #define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_MRFC_MAX_INDEX 3
-#define I40E_GLPRT_MRFC_MRFC_SHIFT 0
-#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT)
 #define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC1023H_MAX_INDEX 3
-#define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
-#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
 #define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC1023L_MAX_INDEX 3
-#define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
-#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
 #define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC127H_MAX_INDEX 3
-#define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
-#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT)
 #define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC127L_MAX_INDEX 3
-#define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
-#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT)
 #define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC1522H_MAX_INDEX 3
-#define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
-#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
 #define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC1522L_MAX_INDEX 3
-#define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
-#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
 #define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC255H_MAX_INDEX 3
-#define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
-#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
 #define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC255L_MAX_INDEX 3
-#define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
-#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT)
 #define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC511H_MAX_INDEX 3
-#define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
-#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT)
 #define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC511L_MAX_INDEX 3
-#define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
-#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT)
 #define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC64H_MAX_INDEX 3
-#define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
-#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT)
 #define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC64L_MAX_INDEX 3
-#define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
-#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT)
 #define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC9522H_MAX_INDEX 3
-#define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
-#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
 #define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PRC9522L_MAX_INDEX 3
-#define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
-#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
 #define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC1023H_MAX_INDEX 3
-#define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
-#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
 #define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC1023L_MAX_INDEX 3
-#define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
-#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
 #define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC127H_MAX_INDEX 3
-#define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
-#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT)
 #define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC127L_MAX_INDEX 3
-#define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
-#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT)
 #define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC1522H_MAX_INDEX 3
-#define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
-#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
 #define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC1522L_MAX_INDEX 3
-#define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
-#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
 #define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC255H_MAX_INDEX 3
-#define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
-#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT)
 #define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC255L_MAX_INDEX 3
-#define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
-#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT)
 #define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC511H_MAX_INDEX 3
-#define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
-#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT)
 #define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC511L_MAX_INDEX 3
-#define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
-#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT)
 #define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC64H_MAX_INDEX 3
-#define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
-#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT)
 #define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC64L_MAX_INDEX 3
-#define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
-#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT)
 #define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC9522H_MAX_INDEX 3
-#define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
-#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
 #define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_PTC9522L_MAX_INDEX 3
-#define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
-#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
 #define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
-#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
-#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
 #define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
-#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
-#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
 #define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_PXONRXC_MAX_INDEX 3
-#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
-#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
 #define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_PXONTXC_MAX_INDEX 3
-#define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
-#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
 #define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RDPC_MAX_INDEX 3
-#define I40E_GLPRT_RDPC_RDPC_SHIFT 0
-#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT)
 #define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RFC_MAX_INDEX 3
-#define I40E_GLPRT_RFC_RFC_SHIFT 0
-#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT)
 #define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RJC_MAX_INDEX 3
-#define I40E_GLPRT_RJC_RJC_SHIFT 0
-#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT)
 #define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RLEC_MAX_INDEX 3
-#define I40E_GLPRT_RLEC_RLEC_SHIFT 0
-#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT)
 #define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_ROC_MAX_INDEX 3
-#define I40E_GLPRT_ROC_ROC_SHIFT 0
-#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT)
 #define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RUC_MAX_INDEX 3
-#define I40E_GLPRT_RUC_RUC_SHIFT 0
-#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT)
-#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_RUPP_MAX_INDEX 3
-#define I40E_GLPRT_RUPP_RUPP_SHIFT 0
-#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT)
 #define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
-#define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
-#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
-#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
 #define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_TDOLD_MAX_INDEX 3
-#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
-#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
 #define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_UPRCH_MAX_INDEX 3
-#define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT)
 #define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_UPRCL_MAX_INDEX 3
-#define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT)
 #define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_UPTCH_MAX_INDEX 3
-#define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
-#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT)
 #define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_GLPRT_UPTCL_MAX_INDEX 3
-#define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
-#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
 #define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_BPRCH_MAX_INDEX 15
-#define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT)
 #define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_BPRCL_MAX_INDEX 15
-#define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT)
 #define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_BPTCH_MAX_INDEX 15
-#define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT)
 #define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_BPTCL_MAX_INDEX 15
-#define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT)
 #define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_GORCH_MAX_INDEX 15
-#define I40E_GLSW_GORCH_GORCH_SHIFT 0
-#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT)
 #define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_GORCL_MAX_INDEX 15
-#define I40E_GLSW_GORCL_GORCL_SHIFT 0
-#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT)
 #define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_GOTCH_MAX_INDEX 15
-#define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT)
 #define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_GOTCL_MAX_INDEX 15
-#define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT)
 #define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_MPRCH_MAX_INDEX 15
-#define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT)
 #define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_MPRCL_MAX_INDEX 15
-#define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT)
 #define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_MPTCH_MAX_INDEX 15
-#define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT)
 #define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_MPTCL_MAX_INDEX 15
-#define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT)
 #define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_RUPP_MAX_INDEX 15
-#define I40E_GLSW_RUPP_RUPP_SHIFT 0
-#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT)
 #define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_TDPC_MAX_INDEX 15
-#define I40E_GLSW_TDPC_TDPC_SHIFT 0
-#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT)
 #define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_UPRCH_MAX_INDEX 15
-#define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT)
 #define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_UPRCL_MAX_INDEX 15
-#define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT)
 #define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_UPTCH_MAX_INDEX 15
-#define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
-#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT)
 #define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLSW_UPTCL_MAX_INDEX 15
-#define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
-#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT)
 #define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_BPRCH_MAX_INDEX 383
-#define I40E_GLV_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT)
 #define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_BPRCL_MAX_INDEX 383
-#define I40E_GLV_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT)
 #define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_BPTCH_MAX_INDEX 383
-#define I40E_GLV_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT)
 #define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_BPTCL_MAX_INDEX 383
-#define I40E_GLV_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT)
 #define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_GORCH_MAX_INDEX 383
-#define I40E_GLV_GORCH_GORCH_SHIFT 0
-#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT)
 #define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_GORCL_MAX_INDEX 383
-#define I40E_GLV_GORCL_GORCL_SHIFT 0
-#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT)
 #define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_GOTCH_MAX_INDEX 383
-#define I40E_GLV_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT)
 #define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_GOTCL_MAX_INDEX 383
-#define I40E_GLV_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT)
 #define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_MPRCH_MAX_INDEX 383
-#define I40E_GLV_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT)
 #define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_MPRCL_MAX_INDEX 383
-#define I40E_GLV_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT)
 #define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_MPTCH_MAX_INDEX 383
-#define I40E_GLV_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT)
 #define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_MPTCL_MAX_INDEX 383
-#define I40E_GLV_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT)
 #define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_RDPC_MAX_INDEX 383
-#define I40E_GLV_RDPC_RDPC_SHIFT 0
-#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT)
 #define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_RUPP_MAX_INDEX 383
-#define I40E_GLV_RUPP_RUPP_SHIFT 0
-#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
 #define I40E_GLV_TEPC(_i) (0x00344000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_TEPC_MAX_INDEX 383
-#define I40E_GLV_TEPC_TEPC_SHIFT 0
-#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
 #define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_UPRCH_MAX_INDEX 383
-#define I40E_GLV_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT)
 #define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_UPRCL_MAX_INDEX 383
-#define I40E_GLV_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT)
 #define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_UPTCH_MAX_INDEX 383
-#define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
-#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
 #define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
-#define I40E_GLV_UPTCL_MAX_INDEX 383
-#define I40E_GLV_UPTCL_UPTCL_SHIFT 0
-#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT)
 #define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_RBCH_MAX_INDEX 7
-#define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
-#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
 #define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_RBCL_MAX_INDEX 7
-#define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
-#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
 #define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_RPCH_MAX_INDEX 7
-#define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
-#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
 #define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_RPCL_MAX_INDEX 7
-#define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
-#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
 #define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_TBCH_MAX_INDEX 7
-#define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
-#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
 #define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_TBCL_MAX_INDEX 7
-#define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
-#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
 #define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_TPCH_MAX_INDEX 7
-#define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
-#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
 #define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
-#define I40E_GLVEBTC_TPCL_MAX_INDEX 7
-#define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
-#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
-#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_BPCH_MAX_INDEX 127
-#define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
-#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
-#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_BPCL_MAX_INDEX 127
-#define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
-#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
-#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_GORCH_MAX_INDEX 127
-#define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
-#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
-#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_GORCL_MAX_INDEX 127
-#define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
-#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
-#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
-#define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
-#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
-#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
-#define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
-#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
-#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_MPCH_MAX_INDEX 127
-#define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
-#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
-#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_MPCL_MAX_INDEX 127
-#define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
-#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
-#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_UPCH_MAX_INDEX 127
-#define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
-#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
-#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_GLVEBVL_UPCL_MAX_INDEX 127
-#define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
-#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
-#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */
-#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
-#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
-#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */
-#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35
-#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
-#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
-#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
-#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1
-#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
-#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
-#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */
-#define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
-#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
-#define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
-#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT)
-#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
-#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
-#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
-#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
-#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
-#define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
-#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
-#define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
-#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
-#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
-#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
-#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
-#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
-#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
-#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
-#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
-#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_CLKO_MAX_INDEX 1
-#define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
-#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
 #define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */
-#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
-#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
 #define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
 #define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
-#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
-#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
-#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
-#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
 #define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
 #define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
-#define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
-#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
 #define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
 #define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
 #define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */
 #define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
 #define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
 #define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
 #define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
 #define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
 #define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
 #define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
 #define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
 #define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
-#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
-#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
-#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
-#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
-#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
-#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
 #define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */
-#define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
-#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
 #define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */
-#define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
-#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
 #define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
-#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
-#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
 #define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
-#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
-#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
-#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
 #define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */
-#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
-#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
-#define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
-#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
-#define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
-#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
-#define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
-#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
 #define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
 #define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
 #define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */
-#define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
-#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
-#define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
-#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
-#define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
-#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
-#define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
-#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
-#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
-#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
-#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
-#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
-#define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
-#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
-#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
 #define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */
-#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
-#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
 #define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */
-#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
-#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
 #define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */
-#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
-#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
 #define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
-#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
-#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
 #define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
 #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
 #define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
 #define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
 #define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT)
 #define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VP_MDET_RX_MAX_INDEX 127
 #define I40E_VP_MDET_RX_VALID_SHIFT 0
 #define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT)
 #define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_VP_MDET_TX_MAX_INDEX 127
 #define I40E_VP_MDET_TX_VALID_SHIFT 0
 #define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT)
-#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */
-#define I40E_GLPM_WUMC_NOTCO_SHIFT 0
-#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT)
-#define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
-#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
-#define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
-#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT)
-#define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
-#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT)
-#define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
-#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
 #define I40E_PFPM_APM 0x000B8080 /* Reset: POR */
 #define I40E_PFPM_APM_APME_SHIFT 0
 #define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT)
-#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */
-#define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
-#define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
-#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */
-#define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
-#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT)
 #define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */
-#define I40E_PFPM_WUFC_LNKC_SHIFT 0
-#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT)
 #define I40E_PFPM_WUFC_MAG_SHIFT 1
 #define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT)
-#define I40E_PFPM_WUFC_MNG_SHIFT 3
-#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT)
-#define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
-#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
-#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
-#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
-#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
-#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
-#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
-#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
-#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
-#define I40E_PFPM_WUFC_FLX0_SHIFT 16
-#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT)
-#define I40E_PFPM_WUFC_FLX1_SHIFT 17
-#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT)
-#define I40E_PFPM_WUFC_FLX2_SHIFT 18
-#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT)
-#define I40E_PFPM_WUFC_FLX3_SHIFT 19
-#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT)
-#define I40E_PFPM_WUFC_FLX4_SHIFT 20
-#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT)
-#define I40E_PFPM_WUFC_FLX5_SHIFT 21
-#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT)
-#define I40E_PFPM_WUFC_FLX6_SHIFT 22
-#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT)
-#define I40E_PFPM_WUFC_FLX7_SHIFT 23
-#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT)
-#define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
-#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
-#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */
-#define I40E_PFPM_WUS_LNKC_SHIFT 0
-#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT)
-#define I40E_PFPM_WUS_MAG_SHIFT 1
-#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT)
-#define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
-#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT)
-#define I40E_PFPM_WUS_MNG_SHIFT 3
-#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT)
-#define I40E_PFPM_WUS_FLX0_SHIFT 16
-#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT)
-#define I40E_PFPM_WUS_FLX1_SHIFT 17
-#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT)
-#define I40E_PFPM_WUS_FLX2_SHIFT 18
-#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT)
-#define I40E_PFPM_WUS_FLX3_SHIFT 19
-#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT)
-#define I40E_PFPM_WUS_FLX4_SHIFT 20
-#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT)
-#define I40E_PFPM_WUS_FLX5_SHIFT 21
-#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT)
-#define I40E_PFPM_WUS_FLX6_SHIFT 22
-#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT)
-#define I40E_PFPM_WUS_FLX7_SHIFT 23
-#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT)
-#define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
-#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT)
-#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */
-#define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
-#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT)
-#define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
-#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
-#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
-#define I40E_PRTPM_SAH_MAX_INDEX 3
-#define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
-#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
-#define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
-#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT)
-#define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
-#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
-#define I40E_PRTPM_SAH_AV_SHIFT 31
-#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT)
-#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
-#define I40E_PRTPM_SAL_MAX_INDEX 3
-#define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
-#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
 #define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
-#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
-#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT)
 #define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
-#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
-#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT)
 #define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */
-#define I40E_VF_ARQH1_ARQH_SHIFT 0
-#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT)
 #define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
-#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
-#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT)
-#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
-#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT)
-#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
-#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
-#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
-#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
-#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
 #define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
-#define I40E_VF_ARQT1_ARQT_SHIFT 0
-#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
 #define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
-#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
-#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT)
 #define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
-#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
-#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT)
 #define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */
-#define I40E_VF_ATQH1_ATQH_SHIFT 0
-#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT)
 #define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
-#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
-#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT)
-#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
-#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT)
-#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
-#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
-#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
-#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
-#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
 #define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
-#define I40E_VF_ATQT1_ATQT_SHIFT 0
-#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
-#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */
-#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
-#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
-#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
-#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
-#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
-#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
-#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
-#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
-#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
-#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
-#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
-#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
-#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
-#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
-#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
-#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
-#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */
-#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
-#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT)
-#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
-#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT)
-#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
-#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT)
-#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
-#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT)
-#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
-#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT)
-#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
-#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT)
-#define I40E_VFINT_ICR01_SWINT_SHIFT 31
-#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT)
-#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */
-#define I40E_VFINT_ITR01_MAX_INDEX 2
-#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT)
-#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
-#define I40E_VFINT_ITRN1_MAX_INDEX 2
-#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */
-#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
-#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
-#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_QRX_TAIL1_MAX_INDEX 15
-#define I40E_QRX_TAIL1_TAIL_SHIFT 0
-#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT)
-#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
-#define I40E_QTX_TAIL1_MAX_INDEX 15
-#define I40E_QTX_TAIL1_TAIL_SHIFT 0
-#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT)
-#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */
-#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
-#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT)
-#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TADD_MAX_INDEX 16
-#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
-#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
-#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
-#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
-#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TMSG_MAX_INDEX 16
-#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
-#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
-#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TUADD_MAX_INDEX 16
-#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
-#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
-#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
-#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
-#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
-#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT)
-#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */
-#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
-#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
-#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
-#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
-#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
-#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
-#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */
-#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
-#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
-#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
-#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
-#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
-#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
-#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
-#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
-#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
-#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
-#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
-#define I40E_VFQF_HENA_MAX_INDEX 1
-#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
-#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
-#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
-#define I40E_VFQF_HKEY_MAX_INDEX 12
-#define I40E_VFQF_HKEY_KEY_0_SHIFT 0
-#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT)
-#define I40E_VFQF_HKEY_KEY_1_SHIFT 8
-#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT)
-#define I40E_VFQF_HKEY_KEY_2_SHIFT 16
-#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT)
-#define I40E_VFQF_HKEY_KEY_3_SHIFT 24
-#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT)
-#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_VFQF_HLUT_MAX_INDEX 15
-#define I40E_VFQF_HLUT_LUT0_SHIFT 0
-#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT)
-#define I40E_VFQF_HLUT_LUT1_SHIFT 8
-#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT)
-#define I40E_VFQF_HLUT_LUT2_SHIFT 16
-#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT)
-#define I40E_VFQF_HLUT_LUT3_SHIFT 24
-#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT)
-#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_VFQF_HREGION_MAX_INDEX 7
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
-#define I40E_VFQF_HREGION_REGION_0_SHIFT 1
-#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
-#define I40E_VFQF_HREGION_REGION_1_SHIFT 5
-#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
-#define I40E_VFQF_HREGION_REGION_2_SHIFT 9
-#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
-#define I40E_VFQF_HREGION_REGION_3_SHIFT 13
-#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
-#define I40E_VFQF_HREGION_REGION_4_SHIFT 17
-#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
-#define I40E_VFQF_HREGION_REGION_5_SHIFT 21
-#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
-#define I40E_VFQF_HREGION_REGION_6_SHIFT 25
-#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT)
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
-#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
-#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
 
-#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */
-#define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0
-#define I40E_MNGSB_FDCRC_CRC_RES_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT)
-#define I40E_MNGSB_FDCS 0x000B7040 /* Reset: POR */
-#define I40E_MNGSB_FDCS_CRC_CONT_SHIFT 2
-#define I40E_MNGSB_FDCS_CRC_CONT_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT)
-#define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3
-#define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT)
-#define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT 4
-#define I40E_MNGSB_FDCS_CRC_WR_INH_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT)
-#define I40E_MNGSB_FDCS_CRC_SEED_SHIFT 8
-#define I40E_MNGSB_FDCS_CRC_SEED_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT)
-#define I40E_MNGSB_FDS 0x000B7048 /* Reset: POR */
-#define I40E_MNGSB_FDS_START_BC_SHIFT 0
-#define I40E_MNGSB_FDS_START_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT)
-#define I40E_MNGSB_FDS_LAST_BC_SHIFT 16
-#define I40E_MNGSB_FDS_LAST_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT)
 
-#define I40E_GL_VF_CTRL_RX(_VF) (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_GL_VF_CTRL_RX_MAX_INDEX 127
-#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0
-#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT)
-#define I40E_GL_VF_CTRL_TX(_VF) (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
-#define I40E_GL_VF_CTRL_TX_MAX_INDEX 127
-#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0
-#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT)
 
-#define I40E_GLCM_LAN_CACHESIZE 0x0010C4D8 /* Reset: CORER */
-#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0
-#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT)
-#define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT 12
-#define I40E_GLCM_LAN_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT)
-#define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT 16
-#define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT)
-#define I40E_GLCM_PE_CACHESIZE 0x00138FE4 /* Reset: CORER */
-#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0
-#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT)
-#define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT 12
-#define I40E_GLCM_PE_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT)
-#define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT 16
-#define I40E_GLCM_PE_CACHESIZE_WAYS_MASK I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT)
-#define I40E_PFCM_PE_ERRDATA 0x00138D00 /* Reset: PFR */
-#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
-#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
-#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
-#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
-#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
-#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
-#define I40E_PFCM_PE_ERRINFO 0x00138C80 /* Reset: PFR */
-#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
-#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
-#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
-#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
-#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
-#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
-#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
-#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
-#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
-#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
 
-#define I40E_PRTDCB_TFMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PRTDCB_TFMSTC_MAX_INDEX 7
-#define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0
-#define I40E_PRTDCB_TFMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT)
-#define I40E_GL_FWSTS_FWROWD_SHIFT 8
-#define I40E_GL_FWSTS_FWROWD_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT)
-#define I40E_GLFOC_CACHESIZE 0x000AA0DC /* Reset: CORER */
-#define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0
-#define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT)
-#define I40E_GLFOC_CACHESIZE_SETS_SHIFT 8
-#define I40E_GLFOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT)
-#define I40E_GLFOC_CACHESIZE_WAYS_SHIFT 20
-#define I40E_GLFOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT)
-#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
-#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
-#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
-#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_CEQPART_MAX_INDEX 15
-#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
-#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
-#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
-#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
-#define I40E_GLHMC_DBCQMAX 0x000C20F0 /* Reset: CORER */
-#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0
-#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT)
-#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
-#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
-#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
-#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
-#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
-#define I40E_GLHMC_DBQPMAX 0x000C20EC /* Reset: CORER */
-#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0
-#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT)
-#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
-#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
-#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
-#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
-#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
-#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
-#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
-#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
-#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
-#define I40E_GLHMC_PEARPMAX 0x000C2038 /* Reset: CORER */
-#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
-#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
-#define I40E_GLHMC_PEARPOBJSZ 0x000C2034 /* Reset: CORER */
-#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
-#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
-#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
-#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
-#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
-#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
-#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
-#define I40E_GLHMC_PECQOBJSZ 0x000C2020 /* Reset: CORER */
-#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
-#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
-#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
-#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
-#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
-#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
-#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c /* Reset: CORER */
-#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
-#define I40E_GLHMC_PEHTMAX 0x000C2030 /* Reset: CORER */
-#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
-#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
-#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
-#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
-#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
-#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
-#define I40E_GLHMC_PEMRMAX 0x000C2040 /* Reset: CORER */
-#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
-#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
-#define I40E_GLHMC_PEMROBJSZ 0x000C203c /* Reset: CORER */
-#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
-#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
-#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
-#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
-#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
-#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
-#define I40E_GLHMC_PEPBLMAX 0x000C206c /* Reset: CORER */
-#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
-#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
-#define I40E_GLHMC_PEPFFIRSTSD 0x000C20E4 /* Reset: CORER */
-#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0
-#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT)
-#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
-#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
-#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
-#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
-#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
-#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
-#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
-#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
-#define I40E_GLHMC_PEQ1FLMAX 0x000C2058 /* Reset: CORER */
-#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
-#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
-#define I40E_GLHMC_PEQ1MAX 0x000C2054 /* Reset: CORER */
-#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
-#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
-#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 /* Reset: CORER */
-#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
-#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
-#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
-#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
-#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
-#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
-#define I40E_GLHMC_PEQPOBJSZ 0x000C201c /* Reset: CORER */
-#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
-#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
-#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
-#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
-#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
-#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
-#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
-#define I40E_GLHMC_PESRQMAX 0x000C2028 /* Reset: CORER */
-#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
-#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
-#define I40E_GLHMC_PESRQOBJSZ 0x000C2024 /* Reset: CORER */
-#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
-#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
-#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
-#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
-#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
-#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
-#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
-#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
-#define I40E_GLHMC_PETIMERMAX 0x000C2084 /* Reset: CORER */
-#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
-#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
-#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 /* Reset: CORER */
-#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
-#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
-#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
-#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
-#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
-#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
-#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
-#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
-#define I40E_GLHMC_PEXFFLMAX 0x000C204c /* Reset: CORER */
-#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
-#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
-#define I40E_GLHMC_PEXFMAX 0x000C2048 /* Reset: CORER */
-#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
-#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
-#define I40E_GLHMC_PEXFOBJSZ 0x000C2044 /* Reset: CORER */
-#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
-#define I40E_GLHMC_PFPESDPART(_i) (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLHMC_PFPESDPART_MAX_INDEX 15
-#define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0
-#define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT)
-#define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16
-#define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT)
-#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
-#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
-#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
-#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
-#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
-#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
-#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
-#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
-#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
-#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
-#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
-#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
-#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
-#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
-#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
-#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
-#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
-#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
-#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
-#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
-#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
-#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
-#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
-#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
-#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15
-#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT)
-#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
-#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
-#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
-#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
-#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
-#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
-#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
-#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
-#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
-#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
-#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
-#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
-#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
-#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
-#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
-#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
-#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
-#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
-#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
-#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
-#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
-#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
-#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
-#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
-#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
-#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
-#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
-#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
-#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
-#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
-#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
-#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
-#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
-#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
-#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
-#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
-#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
-#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
-#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
-#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
-#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
-#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
-#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
-#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
-#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
-#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
-#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
-#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
-#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
-#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
-#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
-#define I40E_GLPBLOC_CACHESIZE 0x000A80BC /* Reset: CORER */
-#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0
-#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT)
-#define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT 8
-#define I40E_GLPBLOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT)
-#define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT 20
-#define I40E_GLPBLOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT)
-#define I40E_GLPDOC_CACHESIZE 0x000D0088 /* Reset: CORER */
-#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0
-#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT)
-#define I40E_GLPDOC_CACHESIZE_SETS_SHIFT 8
-#define I40E_GLPDOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT)
-#define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT 20
-#define I40E_GLPDOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT)
-#define I40E_GLPEOC_CACHESIZE 0x000A60E8 /* Reset: CORER */
-#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0
-#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT)
-#define I40E_GLPEOC_CACHESIZE_SETS_SHIFT 8
-#define I40E_GLPEOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT)
-#define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT 20
-#define I40E_GLPEOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT)
-#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15
-#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT)
-#define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15
-#define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT)
-#define I40E_GL_PPRS_SPARE 0x000856E0 /* Reset: CORER */
-#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0
-#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT)
-#define I40E_GL_TLAN_SPARE 0x000E64E0 /* Reset: CORER */
-#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0
-#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT)
-#define I40E_GL_TUPM_SPARE 0x000a2230 /* Reset: CORER */
-#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0
-#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG 0x000B81C0 /* Reset: POR */
-#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT 0
-#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT 1
-#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT 2
-#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT 3
-#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT 4
-#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5
-#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6
-#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT 7
-#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8
-#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT 9
-#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT 10
-#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT 11
-#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT 12
-#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT 13
-#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT)
-#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT 14
-#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT)
-#define I40E_GLGEN_MISC_SPARE 0x000880E0 /* Reset: POR */
-#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0
-#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT)
-#define I40E_GL_UFUSE_SOC 0x000BE550 /* Reset: POR */
-#define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT 0
-#define I40E_GL_UFUSE_SOC_PORT_MODE_MASK I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT)
-#define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT 2
-#define I40E_GL_UFUSE_SOC_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT)
-#define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3
-#define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT)
 #define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
 #define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
 #define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
 #define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
-#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
-#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
-#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
-#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
-#define I40E_VPLAN_QBASE(_VF) (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VPLAN_QBASE_MAX_INDEX 127
-#define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT 0
-#define I40E_VPLAN_QBASE_VFFIRSTQ_MASK I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT)
-#define I40E_VPLAN_QBASE_VFNUMQ_SHIFT 11
-#define I40E_VPLAN_QBASE_VFNUMQ_MASK I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT)
-#define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31
-#define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT)
-#define I40E_PRTMAC_LINK_DOWN_COUNTER 0x001E2440 /* Reset: GLOBR */
-#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0
-#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT)
-#define I40E_GLNVM_AL_REQ 0x000B6164 /* Reset: POR */
-#define I40E_GLNVM_AL_REQ_POR_SHIFT 0
-#define I40E_GLNVM_AL_REQ_POR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT)
-#define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT 1
-#define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT)
-#define I40E_GLNVM_AL_REQ_GLOBR_SHIFT 2
-#define I40E_GLNVM_AL_REQ_GLOBR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT)
-#define I40E_GLNVM_AL_REQ_CORER_SHIFT 3
-#define I40E_GLNVM_AL_REQ_CORER_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT)
-#define I40E_GLNVM_AL_REQ_PE_SHIFT 4
-#define I40E_GLNVM_AL_REQ_PE_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT)
-#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5
-#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT)
-#define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */
-#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0
-#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT)
-#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12
-#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT)
 #define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
 #define I40E_GLNVM_FLA_LOCKED_SHIFT 6
 #define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
 
 #define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
-#define I40E_GLNVM_ULD_PCIER_DONE_SHIFT 0
-#define I40E_GLNVM_ULD_PCIER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT)
-#define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1
-#define I40E_GLNVM_ULD_PCIER_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT)
-#define I40E_GLNVM_ULD_CORER_DONE_SHIFT 3
-#define I40E_GLNVM_ULD_CORER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT)
-#define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT 4
-#define I40E_GLNVM_ULD_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT)
-#define I40E_GLNVM_ULD_POR_DONE_SHIFT 5
-#define I40E_GLNVM_ULD_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT)
-#define I40E_GLNVM_ULD_POR_DONE_1_SHIFT 8
-#define I40E_GLNVM_ULD_POR_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT)
-#define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9
-#define I40E_GLNVM_ULD_PCIER_DONE_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT)
-#define I40E_GLNVM_ULD_PE_DONE_SHIFT 10
-#define I40E_GLNVM_ULD_PE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT)
-#define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */
-#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0
-#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT)
-#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1
-#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT)
-#define I40E_GLNVM_ULT_RESERVED_1_SHIFT 2
-#define I40E_GLNVM_ULT_RESERVED_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT)
-#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3
-#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT)
-#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4
-#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT)
-#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5
-#define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT)
-#define I40E_GLNVM_ULT_RESERVED_2_SHIFT 6
-#define I40E_GLNVM_ULT_RESERVED_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT)
-#define I40E_GLNVM_ULT_RESERVED_3_SHIFT 7
-#define I40E_GLNVM_ULT_RESERVED_3_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT)
-#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8
-#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT)
-#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9
-#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT)
-#define I40E_GLNVM_ULT_RESERVED_4_SHIFT 10
-#define I40E_GLNVM_ULT_RESERVED_4_MASK I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT 0x000B615C /* Reset: POR */
-#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0
-#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT 1
-#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT 2
-#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT 3
-#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT 4
-#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT 5
-#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT 6
-#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT 7
-#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT 8
-#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT 9
-#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT 10
-#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT 11
-#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT 12
-#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT 13
-#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT 14
-#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT 15
-#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT)
-#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT 16
-#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT)
-#define I40E_MNGSB_DADD 0x000B7030 /* Reset: POR */
-#define I40E_MNGSB_DADD_ADDR_SHIFT 0
-#define I40E_MNGSB_DADD_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT)
-#define I40E_MNGSB_DCNT 0x000B7034 /* Reset: POR */
-#define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0
-#define I40E_MNGSB_DCNT_BYTE_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT)
-#define I40E_MNGSB_MSGCTL 0x000B7020 /* Reset: POR */
-#define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT 0
-#define I40E_MNGSB_MSGCTL_HDR_DWS_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT)
-#define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT 8
-#define I40E_MNGSB_MSGCTL_EXP_RDW_MASK I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT)
-#define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT 26
-#define I40E_MNGSB_MSGCTL_MSG_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT)
-#define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28
-#define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT)
-#define I40E_MNGSB_MSGCTL_BARCLR_SHIFT 30
-#define I40E_MNGSB_MSGCTL_BARCLR_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT)
-#define I40E_MNGSB_MSGCTL_CMDV_SHIFT 31
-#define I40E_MNGSB_MSGCTL_CMDV_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT)
-#define I40E_MNGSB_RDATA 0x000B7300 /* Reset: POR */
-#define I40E_MNGSB_RDATA_DATA_SHIFT 0
-#define I40E_MNGSB_RDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT)
-#define I40E_MNGSB_RHDR0 0x000B72FC /* Reset: POR */
-#define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0
-#define I40E_MNGSB_RHDR0_DESTINATION_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT)
-#define I40E_MNGSB_RHDR0_SOURCE_SHIFT 8
-#define I40E_MNGSB_RHDR0_SOURCE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT)
-#define I40E_MNGSB_RHDR0_OPCODE_SHIFT 16
-#define I40E_MNGSB_RHDR0_OPCODE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT)
-#define I40E_MNGSB_RHDR0_TAG_SHIFT 24
-#define I40E_MNGSB_RHDR0_TAG_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT)
-#define I40E_MNGSB_RHDR0_RESPONSE_SHIFT 27
-#define I40E_MNGSB_RHDR0_RESPONSE_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT)
-#define I40E_MNGSB_RHDR0_EH_SHIFT 31
-#define I40E_MNGSB_RHDR0_EH_MASK I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT)
-#define I40E_MNGSB_RSPCTL 0x000B7024 /* Reset: POR */
-#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0
-#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT)
-#define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT 26
-#define I40E_MNGSB_RSPCTL_RSP_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT)
-#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT 30
-#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT)
-#define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT 31
-#define I40E_MNGSB_RSPCTL_RSP_ERR_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT)
-#define I40E_MNGSB_WDATA 0x000B7100 /* Reset: POR */
-#define I40E_MNGSB_WDATA_DATA_SHIFT 0
-#define I40E_MNGSB_WDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT)
-#define I40E_MNGSB_WHDR0 0x000B70F4 /* Reset: POR */
-#define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT 0
-#define I40E_MNGSB_WHDR0_RAW_DEST_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT)
-#define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT 12
-#define I40E_MNGSB_WHDR0_DEST_SEL_MASK I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT)
-#define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16
-#define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT)
-#define I40E_MNGSB_WHDR0_TAG_SHIFT 24
-#define I40E_MNGSB_WHDR0_TAG_MASK I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT)
-#define I40E_MNGSB_WHDR1 0x000B70F8 /* Reset: POR */
-#define I40E_MNGSB_WHDR1_ADDR_SHIFT 0
-#define I40E_MNGSB_WHDR1_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT)
-#define I40E_MNGSB_WHDR2 0x000B70FC /* Reset: POR */
-#define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0
-#define I40E_MNGSB_WHDR2_LENGTH_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT)
 
-#define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT 21
-#define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT)
 
-#define I40E_GLPCI_CUR_CLNT_COMMON 0x0009CA18 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT)
-#define I40E_GLPCI_CUR_CLNT_PIPEMON 0x0009CA20 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_MNG_ALWD 0x0009c514 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_MNG_RSVD 0x0009c594 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_PMAT_ALWD 0x0009c510 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_PMAT_RSVD 0x0009c590 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_RLAN_ALWD 0x0009c500 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_RLAN_RSVD 0x0009c580 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_RXPE_ALWD 0x0009c508 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_RXPE_RSVD 0x0009c588 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_TDPU_ALWD 0x0009c518 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_TDPU_RSVD 0x0009c598 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_TLAN_ALWD 0x0009c504 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_TLAN_RSVD 0x0009c584 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_TXPE_ALWD 0x0009c50C /* Reset: PCIR */
-#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_TXPE_RSVD 0x0009c58c /* Reset: PCIR */
-#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT)
-#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON 0x0009CA28 /* Reset: PCIR */
-#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT)
-#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT 16
-#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT)
 
-#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
-#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
-#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
-#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
-#define I40E_GLPCI_NPQ_CFG 0x0009CA00 /* Reset: PCIR */
-#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT 0
-#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT)
-#define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT 1
-#define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT)
-#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT 2
-#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT)
-#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT 6
-#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT)
-#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16
-#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT)
-#define I40E_GLPCI_WATMK_CLNT_PIPEMON 0x0009CA30 /* Reset: PCIR */
-#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT)
-#define I40E_GLPCI_WATMK_MNG_ALWD 0x0009CB14 /* Reset: PCIR */
-#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_WATMK_PMAT_ALWD 0x0009CB10 /* Reset: PCIR */
-#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_WATMK_RLAN_ALWD 0x0009CB00 /* Reset: PCIR */
-#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_WATMK_RXPE_ALWD 0x0009CB08 /* Reset: PCIR */
-#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_WATMK_TLAN_ALWD 0x0009CB04 /* Reset: PCIR */
-#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_WATMK_TPDU_ALWD 0x0009CB18 /* Reset: PCIR */
-#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT)
-#define I40E_GLPCI_WATMK_TXPE_ALWD 0x0009CB0c /* Reset: PCIR */
-#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0
-#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT)
-#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT 16
-#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT)
-#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */
-#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
-#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
-#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */
-#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
-#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
-#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */
-#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
-#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
-#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */
-#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0
-#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT)
-#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17
-#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT)
-#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18
-#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT)
-#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */
-#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0
-#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT)
-#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15
-#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
-#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
-#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15
-#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
-#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
-#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
-#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15
-#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0
-#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT)
-#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */
-#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0
-#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT)
-#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */
-#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0
-#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT)
-#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */
-#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0
-#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT)
-#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26
-#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT)
-#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27
-#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT)
-#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28
-#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT)
-#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29
-#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT)
-#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30
-#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT)
-#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31
-#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT)
-#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */
-#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0
-#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT)
-#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */
-#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0
-#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT)
-#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */
-#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0
-#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT)
-#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31
-#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
-#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
-#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31
-#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
-#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
-#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
-#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31
-#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0
-#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT)
-#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
-#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
-#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
-#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
-#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
-#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
-#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
-#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
-#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
-#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
-#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
-#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
-#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
-#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
-#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
-#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
-#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
-#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
-#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
-#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */
-#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
-#define I40E_PFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
-#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */
-#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
-#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
-#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */
-#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
-#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
-#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */
-#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
-#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
-#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
-#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
-#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
-#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
-#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
-#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
-#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */
-#define I40E_PFPE_CQACK_PECQID_SHIFT 0
-#define I40E_PFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT)
-#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */
-#define I40E_PFPE_CQARM_PECQID_SHIFT 0
-#define I40E_PFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT)
-#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */
-#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
-#define I40E_PFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT)
-#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */
-#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
-#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
-#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
-#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
-#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */
-#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
-#define I40E_PFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
-#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
-#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
-#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */
-#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */
-#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */
-#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
-#define I40E_PFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
-#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
-#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */
-#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
-#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
-#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */
-#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
-#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
-#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */
-#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
-#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
-#define I40E_PFPE_UDACTRL 0x00008700 /* Reset: PFR */
-#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
-#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
-#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
-#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
-#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
-#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
-#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
-#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
-#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
-#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
-#define I40E_PFPE_UDAUCFBQPN 0x00008780 /* Reset: PFR */
-#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
-#define I40E_PFPE_UDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
-#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
-#define I40E_PFPE_UDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
-#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */
-#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
-#define I40E_PFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
-#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
-#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
-#define I40E_PRTDCB_RLPMC 0x0001F140 /* Reset: PE_CORER */
-#define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0
-#define I40E_PRTDCB_RLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT)
-#define I40E_PRTDCB_TCMSTC_RLPM(_i) (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */
-#define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX 7
-#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0
-#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT)
-#define I40E_PRTDCB_TCPMC_RLPM 0x0001F1A0 /* Reset: PE_CORER */
-#define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT 0
-#define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT)
-#define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT 13
-#define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT)
-#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30
-#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT)
-#define I40E_PRTE_RUPM_TCCNTR03 0x0000DAE0 /* Reset: PE_CORER */
-#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0
-#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT)
-#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8
-#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT)
-#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16
-#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT)
-#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24
-#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_CNTR 0x0000DB20 /* Reset: PE_CORER */
-#define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0
-#define I40E_PRTPE_RUPM_CNTR_COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_CTL 0x0000DA40 /* Reset: PE_CORER */
-#define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT 13
-#define I40E_PRTPE_RUPM_CTL_LLTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT)
-#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30
-#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT)
-#define I40E_PRTPE_RUPM_PFCCTL 0x0000DA60 /* Reset: PE_CORER */
-#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0
-#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT)
-#define I40E_PRTPE_RUPM_PFCPC 0x0000DA80 /* Reset: PE_CORER */
-#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0
-#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT)
-#define I40E_PRTPE_RUPM_PFCTCC 0x0000DAA0 /* Reset: PE_CORER */
-#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT 0
-#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT)
-#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16
-#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT)
-#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31
-#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT)
-#define I40E_PRTPE_RUPM_PTCTCCNTR47 0x0000DB60 /* Reset: PE_CORER */
-#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0
-#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8
-#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16
-#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24
-#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_PTXTCCNTR03 0x0000DB40 /* Reset: PE_CORER */
-#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0
-#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8
-#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16
-#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24
-#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_TCCNTR47 0x0000DB00 /* Reset: PE_CORER */
-#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0
-#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8
-#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16
-#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24
-#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT)
-#define I40E_PRTPE_RUPM_THRES 0x0000DA20 /* Reset: PE_CORER */
-#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0
-#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT)
-#define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT 8
-#define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT)
-#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16
-#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT)
-#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
-#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
-#define I40E_VFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
-#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
-#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
-#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
-#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
-#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
-#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
-#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
-#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
-#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
-#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
-#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
-#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
-#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
-#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
-#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
-#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_CQACK_MAX_INDEX 127
-#define I40E_VFPE_CQACK_PECQID_SHIFT 0
-#define I40E_VFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT)
-#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_CQARM_MAX_INDEX 127
-#define I40E_VFPE_CQARM_PECQID_SHIFT 0
-#define I40E_VFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT)
-#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_CQPDB_MAX_INDEX 127
-#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
-#define I40E_VFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT)
-#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
-#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
-#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
-#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
-#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
-#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
-#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
-#define I40E_VFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
-#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
-#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
-#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
-#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
-#define I40E_VFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
-#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
-#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
-#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
-#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
-#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
-#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
-#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
-#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
-#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
-#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
-#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
-#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
-#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
-#define I40E_VFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
-#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
-#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
-#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
-#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
-#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
-#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
-#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
-#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
-#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
-#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
-#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
-#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
-#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
-#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
-#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
-#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
-#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
-#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
-#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
-#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
-#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
-#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
-#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
-#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
-#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
-#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
-#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
-#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
-#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
-#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
-#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
-#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
-#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
-#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
-#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
-#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
-#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
-#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
-#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
-#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
-#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
-#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
-#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
-#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
-#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
-#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
-#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
-#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */
-#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
-#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
-#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */
-#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
-#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
-#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */
-#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
-#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
-#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */
-#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
-#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
-#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */
-#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
-#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
-#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */
-#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
-#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
-#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
-#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
-#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
-#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
-#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
-#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
-#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
-#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
-#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */
-#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
-#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
-#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
-#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */
-#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */
-#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
-#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
-#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
-#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
-#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
-#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
-#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
-#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
-#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
-#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
-#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
-#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
-#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
-#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
-#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
-#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
-#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
-#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
-#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
-#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
-#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
-#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
-#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
-#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
-#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
-#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
-#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
-#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
-#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
-#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
-#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
-#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
-#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
-#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
-#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
-#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
-#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
-#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
-#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
-#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
-#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
-#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
-#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
-#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
-#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
-#define I40E_GLGEN_PME_TO 0x000B81BC /* Reset: POR */
-#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0
-#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT)
-#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */
-#define I40E_GLQF_APBVT_MAX_INDEX 2047
-#define I40E_GLQF_APBVT_APBVT_SHIFT 0
-#define I40E_GLQF_APBVT_APBVT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT)
-#define I40E_GLQF_FD_PCTYPES(_i) (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */
-#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63
-#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0
-#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT)
-#define I40E_GLQF_FD_MSK(_i, _j) (0x00267200 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
-#define I40E_GLQF_FD_MSK_MAX_INDEX 1
-#define I40E_GLQF_FD_MSK_MASK_SHIFT 0
-#define I40E_GLQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_FD_MSK_MASK_SHIFT)
-#define I40E_GLQF_FD_MSK_OFFSET_SHIFT 16
-#define I40E_GLQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_FD_MSK_OFFSET_SHIFT)
 #define I40E_GLQF_HASH_INSET(_i, _j) (0x00267600 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
-#define I40E_GLQF_HASH_INSET_MAX_INDEX 1
-#define I40E_GLQF_HASH_INSET_INSET_SHIFT 0
-#define I40E_GLQF_HASH_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_HASH_INSET_INSET_SHIFT)
-#define I40E_GLQF_HASH_MSK(_i, _j) (0x00267A00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
-#define I40E_GLQF_HASH_MSK_MAX_INDEX 1
-#define I40E_GLQF_HASH_MSK_MASK_SHIFT 0
-#define I40E_GLQF_HASH_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_HASH_MSK_MASK_SHIFT)
-#define I40E_GLQF_HASH_MSK_OFFSET_SHIFT 16
-#define I40E_GLQF_HASH_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_HASH_MSK_OFFSET_SHIFT)
 #define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
-#define I40E_GLQF_ORT_MAX_INDEX 63
 #define I40E_GLQF_ORT_PIT_INDX_SHIFT 0
 #define I40E_GLQF_ORT_PIT_INDX_MASK I40E_MASK(0x1F, I40E_GLQF_ORT_PIT_INDX_SHIFT)
 #define I40E_GLQF_ORT_FIELD_CNT_SHIFT 5
 #define I40E_GLQF_ORT_FIELD_CNT_MASK I40E_MASK(0x3, I40E_GLQF_ORT_FIELD_CNT_SHIFT)
 #define I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT 7
 #define I40E_GLQF_ORT_FLX_PAYLOAD_MASK I40E_MASK(0x1, I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT)
-#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4)) /* _i=0...23 */ /* Reset: CORER */
-#define I40E_GLQF_PIT_MAX_INDEX 23
-#define I40E_GLQF_PIT_SOURCE_OFF_SHIFT 0
-#define I40E_GLQF_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_SOURCE_OFF_SHIFT)
-#define I40E_GLQF_PIT_FSIZE_SHIFT 5
-#define I40E_GLQF_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_FSIZE_SHIFT)
-#define I40E_GLQF_PIT_DEST_OFF_SHIFT 10
-#define I40E_GLQF_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_GLQF_PIT_DEST_OFF_SHIFT)
 #define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
-#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1
-#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0
-#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT)
-#define I40E_GLQF_FDEVICTFLAG 0x00270280 /* Reset: CORER */
-#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0
-#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT)
-#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8
-#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT)
-#define I40E_PFQF_CTL_2 0x00270300 /* Reset: CORER */
-#define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0
-#define I40E_PFQF_CTL_2_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT)
-#define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5
-#define I40E_PFQF_CTL_2_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT)
 /* Redefined for X722 family */
-#define I40E_X722_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
-#define I40E_X722_PFQF_HLUT_MAX_INDEX 127
-#define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0
-#define I40E_X722_PFQF_HLUT_LUT0_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT)
-#define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8
-#define I40E_X722_PFQF_HLUT_LUT1_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT)
-#define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16
-#define I40E_X722_PFQF_HLUT_LUT2_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT)
-#define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24
-#define I40E_X722_PFQF_HLUT_LUT3_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT)
-#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */
-#define I40E_PFQF_HREGION_MAX_INDEX 7
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
-#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
-#define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
-#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
-#define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
-#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
-#define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
-#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
-#define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
-#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
-#define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
-#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
-#define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
-#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
-#define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
-#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
-#define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT)
-#define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8
-#define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT)
-#define I40E_VSIQF_HKEY(_i, _VSI) (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */
-#define I40E_VSIQF_HKEY_MAX_INDEX 12
-#define I40E_VSIQF_HKEY_KEY_0_SHIFT 0
-#define I40E_VSIQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT)
-#define I40E_VSIQF_HKEY_KEY_1_SHIFT 8
-#define I40E_VSIQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT)
-#define I40E_VSIQF_HKEY_KEY_2_SHIFT 16
-#define I40E_VSIQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT)
-#define I40E_VSIQF_HKEY_KEY_3_SHIFT 24
-#define I40E_VSIQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT)
-#define I40E_VSIQF_HLUT(_i, _VSI) (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */
-#define I40E_VSIQF_HLUT_MAX_INDEX 15
-#define I40E_VSIQF_HLUT_LUT0_SHIFT 0
-#define I40E_VSIQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT)
-#define I40E_VSIQF_HLUT_LUT1_SHIFT 8
-#define I40E_VSIQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT)
-#define I40E_VSIQF_HLUT_LUT2_SHIFT 16
-#define I40E_VSIQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT)
-#define I40E_VSIQF_HLUT_LUT3_SHIFT 24
-#define I40E_VSIQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT)
 #define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */
-#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0
-#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT)
-#define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */
-#define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0
-#define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT)
-#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
-#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
-#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
-#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
-#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
-#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
-#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
-#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
-#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
-#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
-#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
-#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
-#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
-#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
-#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
-#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
-#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
-#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
-#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
-#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
-#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
-#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
-#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
-#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
-#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
-#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
-#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
-#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
-#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
-#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
-#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
-#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
-#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
-#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
-#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
-#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
-#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
-#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
-#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
-#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
-#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
-#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
-#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
-#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
-#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
-#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
-#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
-#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
-#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
-#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
-#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
-#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
-#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
-#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
-#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
 #endif /* _I40E_REGISTER_H_ */
index f9555c8..3e5c566 100644 (file)
@@ -1690,7 +1690,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
        case I40E_RX_PTYPE_INNER_PROT_UDP:
        case I40E_RX_PTYPE_INNER_PROT_SCTP:
                skb->ip_summed = CHECKSUM_UNNECESSARY;
-               /* fall though */
+               fallthrough;
        default:
                break;
        }
@@ -2210,10 +2210,10 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
-               /* fall through */
+               fallthrough;
        case XDP_ABORTED:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
-               /* fall through -- handle aborts by dropping packet */
+               fallthrough; /* handle aborts by dropping packet */
        case XDP_DROP:
                result = I40E_XDP_CONSUMED;
                break;
@@ -2580,7 +2580,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
         */
        i40e_for_each_ring(ring, q_vector->tx) {
                bool wd = ring->xsk_umem ?
-                         i40e_clean_xdp_tx_irq(vsi, ring, budget) :
+                         i40e_clean_xdp_tx_irq(vsi, ring) :
                          i40e_clean_tx_irq(vsi, ring, budget);
 
                if (!wd) {
@@ -2595,10 +2595,16 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
        if (budget <= 0)
                goto tx_only;
 
-       /* We attempt to distribute budget to each Rx queue fairly, but don't
-        * allow the budget to go below 1 because that would exit polling early.
-        */
-       budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
+       /* normally we have 1 Rx ring per q_vector */
+       if (unlikely(q_vector->num_ringpairs > 1))
+               /* We attempt to distribute budget to each Rx queue fairly, but
+                * don't allow the budget to go below 1 because that would exit
+                * polling early.
+                */
+               budget_per_ring = max_t(int, budget / q_vector->num_ringpairs, 1);
+       else
+               /* Max of 1 Rx ring in this q_vector so give it the budget */
+               budget_per_ring = budget;
 
        i40e_for_each_ring(ring, q_vector->rx) {
                int cleaned = ring->xsk_umem ?
@@ -3538,6 +3544,7 @@ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
         */
        smp_wmb();
 
+       xdp_ring->xdp_tx_active++;
        i++;
        if (i == xdp_ring->count)
                i = 0;
index 5c25597..4036893 100644 (file)
 #define I40E_ITR_DYNAMIC       0x8000  /* use top bit as a flag */
 #define I40E_ITR_MASK          0x1FFE  /* mask for ITR register value */
 #define I40E_MIN_ITR                2  /* reg uses 2 usec resolution */
-#define I40E_ITR_100K              10  /* all values below must be even */
-#define I40E_ITR_50K               20
 #define I40E_ITR_20K               50
-#define I40E_ITR_18K               60
 #define I40E_ITR_8K               122
 #define I40E_MAX_ITR             8160  /* maximum value as per datasheet */
 #define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC)
@@ -52,9 +49,6 @@ static inline u16 i40e_intrl_usec_to_reg(int intrl)
        else
                return 0;
 }
-#define I40E_INTRL_8K              125     /* 8000 ints/sec */
-#define I40E_INTRL_62K             16      /* 62500 ints/sec */
-#define I40E_INTRL_83K             12      /* 83333 ints/sec */
 
 #define I40E_QUEUE_END_OF_LIST 0x7FF
 
@@ -73,7 +67,6 @@ enum i40e_dyn_idx_t {
 /* these are indexes into ITRN registers */
 #define I40E_RX_ITR    I40E_IDX_ITR0
 #define I40E_TX_ITR    I40E_IDX_ITR1
-#define I40E_PE_ITR    I40E_IDX_ITR2
 
 /* Supported RSS offloads */
 #define I40E_DEFAULT_RSS_HENA ( \
@@ -193,13 +186,6 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
 
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
 #define I40E_RX_BUFFER_WRITE   32      /* Must be power of 2 */
-#define I40E_RX_INCREMENT(r, i) \
-       do {                                    \
-               (i)++;                          \
-               if ((i) == (r)->count)          \
-                       i = 0;                  \
-               r->next_to_clean = i;           \
-       } while (0)
 
 #define I40E_RX_NEXT_DESC(r, i, n)             \
        do {                                    \
@@ -209,11 +195,6 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
                (n) = I40E_RX_DESC((r), (i));   \
        } while (0)
 
-#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n)            \
-       do {                                            \
-               I40E_RX_NEXT_DESC((r), (i), (n));       \
-               prefetch((n));                          \
-       } while (0)
 
 #define I40E_MAX_BUFFER_TXD    8
 #define I40E_MIN_TX_LEN                17
@@ -262,15 +243,12 @@ static inline unsigned int i40e_txd_use_count(unsigned int size)
 
 /* Tx Descriptors needed, worst case */
 #define DESC_NEEDED (MAX_SKB_FRAGS + 6)
-#define I40E_MIN_DESC_PENDING  4
 
 #define I40E_TX_FLAGS_HW_VLAN          BIT(1)
 #define I40E_TX_FLAGS_SW_VLAN          BIT(2)
 #define I40E_TX_FLAGS_TSO              BIT(3)
 #define I40E_TX_FLAGS_IPV4             BIT(4)
 #define I40E_TX_FLAGS_IPV6             BIT(5)
-#define I40E_TX_FLAGS_FCCRC            BIT(6)
-#define I40E_TX_FLAGS_FSO              BIT(7)
 #define I40E_TX_FLAGS_TSYN             BIT(8)
 #define I40E_TX_FLAGS_FD_SB            BIT(9)
 #define I40E_TX_FLAGS_UDP_TUNNEL       BIT(10)
@@ -332,9 +310,7 @@ enum i40e_ring_state_t {
 /* some useful defines for virtchannel interface, which
  * is the only remaining user of header split
  */
-#define I40E_RX_DTYPE_NO_SPLIT      0
 #define I40E_RX_DTYPE_HEADER_SPLIT  1
-#define I40E_RX_DTYPE_SPLIT_ALWAYS  2
 #define I40E_RX_SPLIT_L2      0x1
 #define I40E_RX_SPLIT_IP      0x2
 #define I40E_RX_SPLIT_TCP_UDP 0x4
@@ -371,6 +347,7 @@ struct i40e_ring {
        /* used in interrupt processing */
        u16 next_to_use;
        u16 next_to_clean;
+       u16 xdp_tx_active;
 
        u8 atr_sample_rate;
        u8 atr_count;
@@ -444,7 +421,6 @@ static inline void set_ring_xdp(struct i40e_ring *ring)
 #define I40E_ITR_ADAPTIVE_MAX_USECS    0x007e
 #define I40E_ITR_ADAPTIVE_LATENCY      0x8000
 #define I40E_ITR_ADAPTIVE_BULK         0x0000
-#define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY))
 
 struct i40e_ring_container {
        struct i40e_ring *ring;         /* pointer to linked list of ring(s) */
index 63e098f..52410d6 100644 (file)
@@ -84,8 +84,6 @@ enum i40e_debug_mask {
                                                  I40E_GLGEN_MSCA_OPCODE_SHIFT)
 #define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK   I40E_MASK(1, \
                                                  I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK   I40E_MASK(2, \
-                                               I40E_GLGEN_MSCA_OPCODE_SHIFT)
 #define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK    I40E_MASK(3, \
                                                I40E_GLGEN_MSCA_OPCODE_SHIFT)
 
@@ -178,21 +176,9 @@ struct i40e_link_status {
        u8 module_type[3];
        /* 1st byte: module identifier */
 #define I40E_MODULE_TYPE_SFP           0x03
-#define I40E_MODULE_TYPE_QSFP          0x0D
-       /* 2nd byte: ethernet compliance codes for 10/40G */
-#define I40E_MODULE_TYPE_40G_ACTIVE    0x01
-#define I40E_MODULE_TYPE_40G_LR4       0x02
-#define I40E_MODULE_TYPE_40G_SR4       0x04
-#define I40E_MODULE_TYPE_40G_CR4       0x08
-#define I40E_MODULE_TYPE_10G_BASE_SR   0x10
-#define I40E_MODULE_TYPE_10G_BASE_LR   0x20
-#define I40E_MODULE_TYPE_10G_BASE_LRM  0x40
-#define I40E_MODULE_TYPE_10G_BASE_ER   0x80
        /* 3rd byte: ethernet compliance codes for 1G */
 #define I40E_MODULE_TYPE_1000BASE_SX   0x01
 #define I40E_MODULE_TYPE_1000BASE_LX   0x02
-#define I40E_MODULE_TYPE_1000BASE_CX   0x04
-#define I40E_MODULE_TYPE_1000BASE_T    0x08
 };
 
 struct i40e_phy_info {
@@ -262,9 +248,6 @@ struct i40e_phy_info {
 /* Capabilities of a PF or a VF or the whole device */
 struct i40e_hw_capabilities {
        u32  switch_mode;
-#define I40E_NVM_IMAGE_TYPE_EVB                0x0
-#define I40E_NVM_IMAGE_TYPE_CLOUD      0x2
-#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD  0x3
 
        /* Cloud filter modes:
         * Mode1: Filter on L4 port only
@@ -273,14 +256,10 @@ struct i40e_hw_capabilities {
         */
 #define I40E_CLOUD_FILTER_MODE1        0x6
 #define I40E_CLOUD_FILTER_MODE2        0x7
-#define I40E_CLOUD_FILTER_MODE3        0x8
 #define I40E_SWITCH_MODE_MASK  0xF
 
        u32  management_mode;
        u32  mng_protocols_over_mctp;
-#define I40E_MNG_PROTOCOL_PLDM         0x2
-#define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4
-#define I40E_MNG_PROTOCOL_NCSI         0x8
        u32  npar_enable;
        u32  os2bmc;
        u32  valid_functions;
@@ -294,13 +273,8 @@ struct i40e_hw_capabilities {
        bool flex10_enable;
        bool flex10_capable;
        u32  flex10_mode;
-#define I40E_FLEX10_MODE_UNKNOWN       0x0
-#define I40E_FLEX10_MODE_DCC           0x1
-#define I40E_FLEX10_MODE_DCI           0x2
 
        u32 flex10_status;
-#define I40E_FLEX10_STATUS_DCC_ERROR   0x1
-#define I40E_FLEX10_STATUS_VC_MODE     0x2
 
        bool sec_rev_disabled;
        bool update_disabled;
@@ -421,11 +395,8 @@ enum i40e_nvmupd_state {
 #define I40E_NVM_AQE                           0xe
 #define I40E_NVM_EXEC                          0xf
 
-#define I40E_NVM_ADAPT_SHIFT   16
-#define I40E_NVM_ADAPT_MASK    (0xffff << I40E_NVM_ADAPT_SHIFT)
 
 #define I40E_NVMUPD_MAX_DATA   4096
-#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */
 
 struct i40e_nvm_access {
        u32 command;
@@ -438,7 +409,6 @@ struct i40e_nvm_access {
 /* (Q)SFP module access definitions */
 #define I40E_I2C_EEPROM_DEV_ADDR       0xA0
 #define I40E_I2C_EEPROM_DEV_ADDR2      0xA2
-#define I40E_MODULE_TYPE_ADDR          0x00
 #define I40E_MODULE_REVISION_ADDR      0x01
 #define I40E_MODULE_SFF_8472_COMP      0x5E
 #define I40E_MODULE_SFF_8472_SWAP      0x5C
@@ -547,7 +517,6 @@ struct i40e_dcbx_config {
 #define I40E_DCBX_MODE_CEE     0x1
 #define I40E_DCBX_MODE_IEEE    0x2
        u8  app_mode;
-#define I40E_DCBX_APPS_NON_WILLING     0x1
        u32 numapps;
        u32 tlv_status; /* CEE mode TLV status */
        struct i40e_dcb_ets_config etscfg;
@@ -895,9 +864,6 @@ enum i40e_rx_ptype_payload_layer {
 #define I40E_RXD_QW1_LENGTH_PBUF_MASK  (0x3FFFULL << \
                                         I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
 
-#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52
-#define I40E_RXD_QW1_LENGTH_HBUF_MASK  (0x7FFULL << \
-                                        I40E_RXD_QW1_LENGTH_HBUF_SHIFT)
 
 #define I40E_RXD_QW1_LENGTH_SPH_SHIFT  63
 #define I40E_RXD_QW1_LENGTH_SPH_MASK   BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT)
@@ -926,7 +892,6 @@ enum i40e_rx_desc_pe_status_bits {
        I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT      = 29
 };
 
-#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT          38
 #define I40E_RX_PROG_STATUS_DESC_LENGTH                        0x2000000
 
 #define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT      2
@@ -963,8 +928,6 @@ struct i40e_tx_desc {
        __le64 cmd_type_offset_bsz;
 };
 
-#define I40E_TXD_QW1_DTYPE_SHIFT       0
-#define I40E_TXD_QW1_DTYPE_MASK                (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT)
 
 enum i40e_tx_desc_dtype_value {
        I40E_TX_DESC_DTYPE_DATA         = 0x0,
@@ -980,7 +943,6 @@ enum i40e_tx_desc_dtype_value {
 };
 
 #define I40E_TXD_QW1_CMD_SHIFT 4
-#define I40E_TXD_QW1_CMD_MASK  (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT)
 
 enum i40e_tx_desc_cmd_bits {
        I40E_TX_DESC_CMD_EOP                    = 0x0001,
@@ -1004,8 +966,6 @@ enum i40e_tx_desc_cmd_bits {
 };
 
 #define I40E_TXD_QW1_OFFSET_SHIFT      16
-#define I40E_TXD_QW1_OFFSET_MASK       (0x3FFFFULL << \
-                                        I40E_TXD_QW1_OFFSET_SHIFT)
 
 enum i40e_tx_desc_length_fields {
        /* Note: These are predefined bit offsets */
@@ -1015,11 +975,8 @@ enum i40e_tx_desc_length_fields {
 };
 
 #define I40E_TXD_QW1_TX_BUF_SZ_SHIFT   34
-#define I40E_TXD_QW1_TX_BUF_SZ_MASK    (0x3FFFULL << \
-                                        I40E_TXD_QW1_TX_BUF_SZ_SHIFT)
 
 #define I40E_TXD_QW1_L2TAG1_SHIFT      48
-#define I40E_TXD_QW1_L2TAG1_MASK       (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT)
 
 /* Context descriptors */
 struct i40e_tx_context_desc {
@@ -1029,11 +986,8 @@ struct i40e_tx_context_desc {
        __le64 type_cmd_tso_mss;
 };
 
-#define I40E_TXD_CTX_QW1_DTYPE_SHIFT   0
-#define I40E_TXD_CTX_QW1_DTYPE_MASK    (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT)
 
 #define I40E_TXD_CTX_QW1_CMD_SHIFT     4
-#define I40E_TXD_CTX_QW1_CMD_MASK      (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT)
 
 enum i40e_tx_ctx_desc_cmd_bits {
        I40E_TX_CTX_DESC_TSO            = 0x01,
@@ -1048,19 +1002,10 @@ enum i40e_tx_ctx_desc_cmd_bits {
 };
 
 #define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30
-#define I40E_TXD_CTX_QW1_TSO_LEN_MASK  (0x3FFFFULL << \
-                                        I40E_TXD_CTX_QW1_TSO_LEN_SHIFT)
 
 #define I40E_TXD_CTX_QW1_MSS_SHIFT     50
-#define I40E_TXD_CTX_QW1_MSS_MASK      (0x3FFFULL << \
-                                        I40E_TXD_CTX_QW1_MSS_SHIFT)
 
-#define I40E_TXD_CTX_QW1_VSI_SHIFT     50
-#define I40E_TXD_CTX_QW1_VSI_MASK      (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT)
 
-#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT  0
-#define I40E_TXD_CTX_QW0_EXT_IP_MASK   (0x3ULL << \
-                                        I40E_TXD_CTX_QW0_EXT_IP_SHIFT)
 
 enum i40e_tx_ctx_desc_eipt_offload {
        I40E_TX_CTX_EXT_IP_NONE         = 0x0,
@@ -1070,28 +1015,16 @@ enum i40e_tx_ctx_desc_eipt_offload {
 };
 
 #define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT       2
-#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK        (0x3FULL << \
-                                        I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT)
 
 #define I40E_TXD_CTX_QW0_NATT_SHIFT    9
-#define I40E_TXD_CTX_QW0_NATT_MASK     (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
 
 #define I40E_TXD_CTX_UDP_TUNNELING     BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT)
 #define I40E_TXD_CTX_GRE_TUNNELING     (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT)
 
-#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT       11
-#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \
-                                      BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT)
 
-#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST      I40E_TXD_CTX_QW0_EIP_NOINC_MASK
 
 #define I40E_TXD_CTX_QW0_NATLEN_SHIFT  12
-#define I40E_TXD_CTX_QW0_NATLEN_MASK   (0X7FULL << \
-                                        I40E_TXD_CTX_QW0_NATLEN_SHIFT)
 
-#define I40E_TXD_CTX_QW0_DECTTL_SHIFT  19
-#define I40E_TXD_CTX_QW0_DECTTL_MASK   (0xFULL << \
-                                        I40E_TXD_CTX_QW0_DECTTL_SHIFT)
 
 #define I40E_TXD_CTX_QW0_L4T_CS_SHIFT  23
 #define I40E_TXD_CTX_QW0_L4T_CS_MASK   BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
@@ -1161,11 +1094,8 @@ enum i40e_filter_program_desc_fd_status {
                                         I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_CMD_SHIFT    4
-#define I40E_TXD_FLTR_QW1_CMD_MASK     (0xFFFFULL << \
-                                        I40E_TXD_FLTR_QW1_CMD_SHIFT)
 
 #define I40E_TXD_FLTR_QW1_PCMD_SHIFT   (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT)
-#define I40E_TXD_FLTR_QW1_PCMD_MASK    (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT)
 
 enum i40e_filter_program_desc_pcmd {
        I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE        = 0x1,
@@ -1316,7 +1246,6 @@ struct i40e_hw_port_stats {
 #define I40E_NVM_OEM_VER_OFF                   0x83
 #define I40E_SR_NVM_DEV_STARTER_VERSION                0x18
 #define I40E_SR_NVM_WAKE_ON_LAN                        0x19
-#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR  0x27
 #define I40E_SR_NVM_EETRACK_LO                 0x2D
 #define I40E_SR_NVM_EETRACK_HI                 0x2E
 #define I40E_SR_VPD_PTR                                0x2F
@@ -1329,7 +1258,6 @@ struct i40e_hw_port_stats {
 #define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE       1024
 #define I40E_SR_CONTROL_WORD_1_SHIFT           0x06
 #define I40E_SR_CONTROL_WORD_1_MASK    (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT)
-#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID  BIT(5)
 #define I40E_SR_NVM_MAP_STRUCTURE_TYPE         BIT(12)
 #define I40E_PTR_TYPE                          BIT(15)
 #define I40E_SR_OCP_CFG_WORD0                  0x2B
@@ -1463,14 +1391,11 @@ struct i40e_lldp_variables {
 /* Offsets into Alternate Ram */
 #define I40E_ALT_STRUCT_FIRST_PF_OFFSET                0   /* in dwords */
 #define I40E_ALT_STRUCT_DWORDS_PER_PF          64   /* in dwords */
-#define I40E_ALT_STRUCT_OUTER_VLAN_TAG_OFFSET  0xD  /* in dwords */
-#define I40E_ALT_STRUCT_USER_PRIORITY_OFFSET   0xC  /* in dwords */
 #define I40E_ALT_STRUCT_MIN_BW_OFFSET          0xE  /* in dwords */
 #define I40E_ALT_STRUCT_MAX_BW_OFFSET          0xF  /* in dwords */
 
 /* Alternate Ram Bandwidth Masks */
 #define I40E_ALT_BW_VALUE_MASK         0xFF
-#define I40E_ALT_BW_RELATIVE_MASK      0x40000000
 #define I40E_ALT_BW_VALID_MASK         0x80000000
 
 /* RSS Hash Table Size */
@@ -1529,9 +1454,7 @@ struct i40e_package_header {
 /* Generic segment header */
 struct i40e_generic_seg_header {
 #define SEGMENT_TYPE_METADATA  0x00000001
-#define SEGMENT_TYPE_NOTES     0x00000002
 #define SEGMENT_TYPE_I40E      0x00000011
-#define SEGMENT_TYPE_X722      0x00000012
        u32 type;
        struct i40e_ddp_version version;
        u32 size;
@@ -1541,7 +1464,6 @@ struct i40e_generic_seg_header {
 struct i40e_metadata_segment {
        struct i40e_generic_seg_header header;
        struct i40e_ddp_version version;
-#define I40E_DDP_TRACKID_RDONLY                0
 #define I40E_DDP_TRACKID_INVALID       0xFFFFFFFF
        u32 track_id;
        char name[I40E_DDP_NAME_SIZE];
@@ -1575,10 +1497,6 @@ struct i40e_profile_section_header {
 #define SECTION_TYPE_AQ                0x00000801
 #define SECTION_TYPE_RB_AQ     0x00001801
 #define SECTION_TYPE_NOTE      0x80000000
-#define SECTION_TYPE_NAME      0x80000001
-#define SECTION_TYPE_PROTO     0x80000002
-#define SECTION_TYPE_PCTYPE    0x80000003
-#define SECTION_TYPE_PTYPE     0x80000004
                u32 type;
                u32 offset;
                u32 size;
index 56b9e44..8e133d6 100644 (file)
@@ -1106,39 +1106,81 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
        return -EIO;
 }
 
-static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi);
+/**
+ * i40e_getnum_vf_vsi_vlan_filters
+ * @vsi: pointer to the vsi
+ *
+ * called to get the number of VLANs offloaded on this VF
+ **/
+static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
+{
+       struct i40e_mac_filter *f;
+       int num_vlans = 0, bkt;
+
+       hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
+               if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
+                       num_vlans++;
+       }
+
+       return num_vlans;
+}
 
 /**
- * i40e_config_vf_promiscuous_mode
- * @vf: pointer to the VF info
- * @vsi_id: VSI id
- * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
- * @alluni: set MAC L2 layer unicast promiscuous enable/disable
+ * i40e_get_vlan_list_sync
+ * @vsi: pointer to the VSI
+ * @num_vlans: number of VLANs in mac_filter_hash, returned to caller
+ * @vlan_list: list of VLANs present in mac_filter_hash, returned to caller.
+ *             This array is allocated here, but has to be freed in caller.
  *
- * Called from the VF to configure the promiscuous mode of
- * VF vsis and from the VF reset path to reset promiscuous mode.
+ * Called to get number of VLANs and VLAN list present in mac_filter_hash.
  **/
-static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
-                                                  u16 vsi_id,
-                                                  bool allmulti,
-                                                  bool alluni)
+static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, int *num_vlans,
+                                          s16 **vlan_list)
 {
-       struct i40e_pf *pf = vf->pf;
-       struct i40e_hw *hw = &pf->hw;
        struct i40e_mac_filter *f;
-       i40e_status aq_ret = 0;
-       struct i40e_vsi *vsi;
+       int i = 0;
        int bkt;
 
-       vsi = i40e_find_vsi_from_id(pf, vsi_id);
-       if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
-               return I40E_ERR_PARAM;
+       spin_lock_bh(&vsi->mac_filter_hash_lock);
+       *num_vlans = i40e_getnum_vf_vsi_vlan_filters(vsi);
+       *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC);
+       if (!(*vlan_list))
+               goto err;
 
-       if (vf->port_vlan_id) {
-               aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
-                                                           allmulti,
-                                                           vf->port_vlan_id,
-                                                           NULL);
+       hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
+               if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
+                       continue;
+               (*vlan_list)[i++] = f->vlan;
+       }
+err:
+       spin_unlock_bh(&vsi->mac_filter_hash_lock);
+}
+
+/**
+ * i40e_set_vsi_promisc
+ * @vf: pointer to the VF struct
+ * @seid: VSI number
+ * @multi_enable: set MAC L2 layer multicast promiscuous enable/disable
+ *                for a given VLAN
+ * @unicast_enable: set MAC L2 layer unicast promiscuous enable/disable
+ *                  for a given VLAN
+ * @vl: List of VLANs - apply filter for given VLANs
+ * @num_vlans: Number of elements in @vl
+ **/
+static i40e_status
+i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
+                    bool unicast_enable, s16 *vl, int num_vlans)
+{
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_hw *hw = &pf->hw;
+       i40e_status aq_ret;
+       int i;
+
+       /* No VLAN to set promisc on, set on VSI */
+       if (!num_vlans || !vl) {
+               aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, seid,
+                                                              multi_enable,
+                                                              NULL);
                if (aq_ret) {
                        int aq_err = pf->hw.aq.asq_last_status;
 
@@ -1147,13 +1189,14 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
                                vf->vf_id,
                                i40e_stat_str(&pf->hw, aq_ret),
                                i40e_aq_str(&pf->hw, aq_err));
+
                        return aq_ret;
                }
 
-               aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
-                                                           alluni,
-                                                           vf->port_vlan_id,
-                                                           NULL);
+               aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, seid,
+                                                            unicast_enable,
+                                                            NULL, true);
+
                if (aq_ret) {
                        int aq_err = pf->hw.aq.asq_last_status;
 
@@ -1163,68 +1206,84 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
                                i40e_stat_str(&pf->hw, aq_ret),
                                i40e_aq_str(&pf->hw, aq_err));
                }
+
                return aq_ret;
-       } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
-               hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
-                       if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
-                               continue;
-                       aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
-                                                                   vsi->seid,
-                                                                   allmulti,
-                                                                   f->vlan,
-                                                                   NULL);
-                       if (aq_ret) {
-                               int aq_err = pf->hw.aq.asq_last_status;
+       }
 
-                               dev_err(&pf->pdev->dev,
-                                       "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
-                                       f->vlan,
-                                       i40e_stat_str(&pf->hw, aq_ret),
-                                       i40e_aq_str(&pf->hw, aq_err));
-                       }
+       for (i = 0; i < num_vlans; i++) {
+               aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, seid,
+                                                           multi_enable,
+                                                           vl[i], NULL);
+               if (aq_ret) {
+                       int aq_err = pf->hw.aq.asq_last_status;
+
+                       dev_err(&pf->pdev->dev,
+                               "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
+                               vf->vf_id,
+                               i40e_stat_str(&pf->hw, aq_ret),
+                               i40e_aq_str(&pf->hw, aq_err));
+               }
 
-                       aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
-                                                                   vsi->seid,
-                                                                   alluni,
-                                                                   f->vlan,
-                                                                   NULL);
-                       if (aq_ret) {
-                               int aq_err = pf->hw.aq.asq_last_status;
+               aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid,
+                                                           unicast_enable,
+                                                           vl[i], NULL);
+               if (aq_ret) {
+                       int aq_err = pf->hw.aq.asq_last_status;
 
-                               dev_err(&pf->pdev->dev,
-                                       "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
-                                       f->vlan,
-                                       i40e_stat_str(&pf->hw, aq_ret),
-                                       i40e_aq_str(&pf->hw, aq_err));
-                       }
+                       dev_err(&pf->pdev->dev,
+                               "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
+                               vf->vf_id,
+                               i40e_stat_str(&pf->hw, aq_ret),
+                               i40e_aq_str(&pf->hw, aq_err));
                }
-               return aq_ret;
        }
-       aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, allmulti,
-                                                      NULL);
-       if (aq_ret) {
-               int aq_err = pf->hw.aq.asq_last_status;
+       return aq_ret;
+}
 
-               dev_err(&pf->pdev->dev,
-                       "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
-                       vf->vf_id,
-                       i40e_stat_str(&pf->hw, aq_ret),
-                       i40e_aq_str(&pf->hw, aq_err));
+/**
+ * i40e_config_vf_promiscuous_mode
+ * @vf: pointer to the VF info
+ * @vsi_id: VSI id
+ * @allmulti: set MAC L2 layer multicast promiscuous enable/disable
+ * @alluni: set MAC L2 layer unicast promiscuous enable/disable
+ *
+ * Called from the VF to configure the promiscuous mode of
+ * VF vsis and from the VF reset path to reset promiscuous mode.
+ **/
+static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
+                                                  u16 vsi_id,
+                                                  bool allmulti,
+                                                  bool alluni)
+{
+       i40e_status aq_ret = I40E_SUCCESS;
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_vsi *vsi;
+       int num_vlans;
+       s16 *vl;
+
+       vsi = i40e_find_vsi_from_id(pf, vsi_id);
+       if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
+               return I40E_ERR_PARAM;
+
+       if (vf->port_vlan_id) {
+               aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti,
+                                             alluni, &vf->port_vlan_id, 1);
                return aq_ret;
-       }
+       } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
+               i40e_get_vlan_list_sync(vsi, &num_vlans, &vl);
 
-       aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, alluni,
-                                                    NULL, true);
-       if (aq_ret) {
-               int aq_err = pf->hw.aq.asq_last_status;
+               if (!vl)
+                       return I40E_ERR_NO_MEMORY;
 
-               dev_err(&pf->pdev->dev,
-                       "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
-                       vf->vf_id,
-                       i40e_stat_str(&pf->hw, aq_ret),
-                       i40e_aq_str(&pf->hw, aq_err));
+               aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
+                                             vl, num_vlans);
+               kfree(vl);
+               return aq_ret;
        }
 
+       /* no VLANs to set on, set on VSI */
+       aq_ret = i40e_set_vsi_promisc(vf, vsi->seid, allmulti, alluni,
+                                     NULL, 0);
        return aq_ret;
 }
 
@@ -1973,25 +2032,6 @@ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
 }
 
 /**
- * i40e_getnum_vf_vsi_vlan_filters
- * @vsi: pointer to the vsi
- *
- * called to get the number of VLANs offloaded on this VF
- **/
-static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
-{
-       struct i40e_mac_filter *f;
-       int num_vlans = 0, bkt;
-
-       hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) {
-               if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
-                       num_vlans++;
-       }
-
-       return num_vlans;
-}
-
-/**
  * i40e_vc_config_promiscuous_mode_msg
  * @vf: pointer to the VF info
  * @msg: pointer to the msg buffer
index 631248c..5491215 100644 (file)
@@ -10,7 +10,6 @@
 
 #define I40E_VIRTCHNL_SUPPORTED_QTYPES 2
 
-#define I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED    3
 #define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED  10
 
 #define I40E_VLAN_PRIORITY_SHIFT       13
index 7276580..8ce57b5 100644 (file)
@@ -168,10 +168,10 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
-               /* fall through */
+               fallthrough;
        case XDP_ABORTED:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
-               /* fallthrough -- handle aborts by dropping packet */
+               fallthrough; /* handle aborts by dropping packet */
        case XDP_DROP:
                result = I40E_XDP_CONSUMED;
                break;
@@ -378,19 +378,13 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
  **/
 static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
 {
+       unsigned int sent_frames = 0, total_bytes = 0;
        struct i40e_tx_desc *tx_desc = NULL;
        struct i40e_tx_buffer *tx_bi;
-       bool work_done = true;
        struct xdp_desc desc;
        dma_addr_t dma;
 
        while (budget-- > 0) {
-               if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
-                       xdp_ring->tx_stats.tx_busy++;
-                       work_done = false;
-                       break;
-               }
-
                if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
                        break;
 
@@ -408,6 +402,9 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
                                   | I40E_TX_DESC_CMD_EOP,
                                   0, desc.len, 0);
 
+               sent_frames++;
+               total_bytes += tx_bi->bytecount;
+
                xdp_ring->next_to_use++;
                if (xdp_ring->next_to_use == xdp_ring->count)
                        xdp_ring->next_to_use = 0;
@@ -420,9 +417,10 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
                i40e_xdp_ring_update_tail(xdp_ring);
 
                xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+               i40e_update_tx_stats(xdp_ring, sent_frames, total_bytes);
        }
 
-       return !!budget && work_done;
+       return !!budget;
 }
 
 /**
@@ -434,6 +432,7 @@ static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
                                     struct i40e_tx_buffer *tx_bi)
 {
        xdp_return_frame(tx_bi->xdpf);
+       tx_ring->xdp_tx_active--;
        dma_unmap_single(tx_ring->dev,
                         dma_unmap_addr(tx_bi, dma),
                         dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
@@ -442,32 +441,29 @@ static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
 
 /**
  * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries
+ * @vsi: Current VSI
  * @tx_ring: XDP Tx ring
- * @tx_bi: Tx buffer info to clean
  *
  * Returns true if cleanup/tranmission is done.
  **/
-bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
-                          struct i40e_ring *tx_ring, int napi_budget)
+bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring)
 {
-       unsigned int ntc, total_bytes = 0, budget = vsi->work_limit;
-       u32 i, completed_frames, frames_ready, xsk_frames = 0;
        struct xdp_umem *umem = tx_ring->xsk_umem;
+       u32 i, completed_frames, xsk_frames = 0;
        u32 head_idx = i40e_get_head(tx_ring);
-       bool work_done = true, xmit_done;
        struct i40e_tx_buffer *tx_bi;
+       unsigned int ntc;
 
        if (head_idx < tx_ring->next_to_clean)
                head_idx += tx_ring->count;
-       frames_ready = head_idx - tx_ring->next_to_clean;
+       completed_frames = head_idx - tx_ring->next_to_clean;
 
-       if (frames_ready == 0) {
+       if (completed_frames == 0)
                goto out_xmit;
-       } else if (frames_ready > budget) {
-               completed_frames = budget;
-               work_done = false;
-       } else {
-               completed_frames = frames_ready;
+
+       if (likely(!tx_ring->xdp_tx_active)) {
+               xsk_frames = completed_frames;
+               goto skip;
        }
 
        ntc = tx_ring->next_to_clean;
@@ -475,18 +471,18 @@ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
        for (i = 0; i < completed_frames; i++) {
                tx_bi = &tx_ring->tx_bi[ntc];
 
-               if (tx_bi->xdpf)
+               if (tx_bi->xdpf) {
                        i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
-               else
+                       tx_bi->xdpf = NULL;
+               } else {
                        xsk_frames++;
-
-               tx_bi->xdpf = NULL;
-               total_bytes += tx_bi->bytecount;
+               }
 
                if (++ntc >= tx_ring->count)
                        ntc = 0;
        }
 
+skip:
        tx_ring->next_to_clean += completed_frames;
        if (unlikely(tx_ring->next_to_clean >= tx_ring->count))
                tx_ring->next_to_clean -= tx_ring->count;
@@ -494,16 +490,13 @@ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
        if (xsk_frames)
                xsk_umem_complete_tx(umem, xsk_frames);
 
-       i40e_arm_wb(tx_ring, vsi, budget);
-       i40e_update_tx_stats(tx_ring, completed_frames, total_bytes);
+       i40e_arm_wb(tx_ring, vsi, completed_frames);
 
 out_xmit:
        if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
                xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
 
-       xmit_done = i40e_xmit_zc(tx_ring, budget);
-
-       return work_done && xmit_done;
+       return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring));
 }
 
 /**
@@ -567,7 +560,7 @@ void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
 
 /**
  * i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown
- * @xdp_ring: XDP Tx ring
+ * @tx_ring: XDP Tx ring
  **/
 void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
 {
index ea919a7..c524c14 100644 (file)
@@ -15,8 +15,7 @@ int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
 bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 cleaned_count);
 int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);
 
-bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
-                          struct i40e_ring *tx_ring, int napi_budget);
+bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring);
 int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
 int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring);
 void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring);
index 10b805b..8a65525 100644 (file)
@@ -375,7 +375,6 @@ struct iavf_device {
 
 /* needed by iavf_ethtool.c */
 extern char iavf_driver_name[];
-extern const char iavf_driver_version[];
 extern struct workqueue_struct *iavf_wq;
 
 int iavf_up(struct iavf_adapter *adapter);
index 1815738..c93567f 100644 (file)
@@ -571,7 +571,6 @@ static void iavf_get_drvinfo(struct net_device *netdev,
        struct iavf_adapter *adapter = netdev_priv(netdev);
 
        strlcpy(drvinfo->driver, iavf_driver_name, 32);
-       strlcpy(drvinfo->version, iavf_driver_version, 32);
        strlcpy(drvinfo->fw_version, "N/A", 4);
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
        drvinfo->n_priv_flags = IAVF_PRIV_FLAGS_STR_LEN;
index fa82768..48c956d 100644 (file)
@@ -21,16 +21,6 @@ char iavf_driver_name[] = "iavf";
 static const char iavf_driver_string[] =
        "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
 
-#define DRV_KERN "-k"
-
-#define DRV_VERSION_MAJOR 3
-#define DRV_VERSION_MINOR 2
-#define DRV_VERSION_BUILD 3
-#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
-            __stringify(DRV_VERSION_MINOR) "." \
-            __stringify(DRV_VERSION_BUILD) \
-            DRV_KERN
-const char iavf_driver_version[] = DRV_VERSION;
 static const char iavf_copyright[] =
        "Copyright (c) 2013 - 2018 Intel Corporation.";
 
@@ -57,7 +47,6 @@ MODULE_ALIAS("i40evf");
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
 MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
 
 static const struct net_device_ops iavf_netdev_ops;
 struct workqueue_struct *iavf_wq;
@@ -1863,8 +1852,10 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
 
        adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
        adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
-       if (!adapter->rss_key || !adapter->rss_lut)
+       if (!adapter->rss_key || !adapter->rss_lut) {
+               err = -ENOMEM;
                goto err_mem;
+       }
        if (RSS_AQ(adapter))
                adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
        else
@@ -1946,7 +1937,10 @@ static void iavf_watchdog_task(struct work_struct *work)
                                iavf_send_api_ver(adapter);
                        }
                } else {
-                       if (!iavf_process_aq_command(adapter) &&
+                       /* An error will be returned if no commands were
+                        * processed; use this opportunity to update stats
+                        */
+                       if (iavf_process_aq_command(adapter) &&
                            adapter->state == __IAVF_RUNNING)
                                iavf_request_stats(adapter);
                }
@@ -3982,8 +3976,7 @@ static int __init iavf_init_module(void)
 {
        int ret;
 
-       pr_info("iavf: %s - version %s\n", iavf_driver_string,
-               iavf_driver_version);
+       pr_info("iavf: %s\n", iavf_driver_string);
 
        pr_info("%s\n", iavf_copyright);
 
index e091bab..ca041b3 100644 (file)
@@ -1007,7 +1007,7 @@ static inline void iavf_rx_checksum(struct iavf_vsi *vsi,
        case IAVF_RX_PTYPE_INNER_PROT_UDP:
        case IAVF_RX_PTYPE_INNER_PROT_SCTP:
                skb->ip_summed = CHECKSUM_UNNECESSARY;
-               /* fall though */
+               fallthrough;
        default:
                break;
        }
index 7190a40..de9fda7 100644 (file)
@@ -192,14 +192,6 @@ struct iavf_hw {
        char err_str[16];
 };
 
-struct iavf_driver_version {
-       u8 major_version;
-       u8 minor_version;
-       u8 build_version;
-       u8 subbuild_version;
-       u8 driver_string[32];
-};
-
 /* RX Descriptors */
 union iavf_16byte_rx_desc {
        struct {
index 5792ee6..c665220 100644 (file)
@@ -55,7 +55,6 @@
 #include "ice_xsk.h"
 #include "ice_arfs.h"
 
-extern const char ice_drv_ver[];
 #define ICE_BAR0               0
 #define ICE_REQ_DESC_MULTIPLE  32
 #define ICE_MIN_NUM_DESC       64
@@ -223,6 +222,8 @@ enum ice_state {
        __ICE_OICR_INTR_DIS,            /* Global OICR interrupt disabled */
        __ICE_MDD_VF_PRINT_PENDING,     /* set when MDD event handle */
        __ICE_VF_RESETS_DISABLED,       /* disable resets during ice_remove */
+       __ICE_LINK_DEFAULT_OVERRIDE_PENDING,
+       __ICE_PHY_INIT_COMPLETE,
        __ICE_STATE_NBITS               /* must be last */
 };
 
@@ -358,12 +359,14 @@ enum ice_pf_flags {
        ICE_FLAG_FD_ENA,
        ICE_FLAG_ADV_FEATURES,
        ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
+       ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA,
        ICE_FLAG_NO_MEDIA,
        ICE_FLAG_FW_LLDP_AGENT,
        ICE_FLAG_ETHTOOL_CTXT,          /* set when ethtool holds RTNL lock */
        ICE_FLAG_LEGACY_RX,
        ICE_FLAG_VF_TRUE_PROMISC_ENA,
        ICE_FLAG_MDD_AUTO_RESET_VF,
+       ICE_FLAG_LINK_LENIENT_MODE_ENA,
        ICE_PF_FLAGS_NBITS              /* must be last */
 };
 
@@ -374,6 +377,7 @@ struct ice_pf {
        struct devlink_port devlink_port;
 
        struct devlink_region *nvm_region;
+       struct devlink_region *devcaps_region;
 
        /* OS reserved IRQ details */
        struct msix_entry *msix_entries;
@@ -423,6 +427,8 @@ struct ice_pf {
        u16 empr_count;         /* EMP reset count */
        u16 pfr_count;          /* PF reset count */
 
+       u8 wol_ena : 1;         /* software state of WoL */
+       u32 wakeup_reason;      /* last wakeup reason */
        struct ice_hw_port_stats stats;
        struct ice_hw_port_stats stats_prev;
        struct ice_hw hw;
@@ -435,6 +441,10 @@ struct ice_pf {
        u32 tx_timeout_recovery_level;
        char int_name[ICE_INT_NAME_STR_LEN];
        u32 sw_int_count;
+
+       __le64 nvm_phy_type_lo; /* NVM PHY type low */
+       __le64 nvm_phy_type_hi; /* NVM PHY type high */
+       struct ice_link_default_override_tlv link_dflt_override;
 };
 
 struct ice_netdev_priv {
@@ -568,6 +578,7 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
 void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
 const char *ice_stat_str(enum ice_status stat_err);
 const char *ice_aq_str(enum ice_aq_err aq_err);
+bool ice_is_wol_supported(struct ice_pf *pf);
 int
 ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
                    bool is_tun);
index 92f82f2..b363e02 100644 (file)
@@ -215,13 +215,6 @@ struct ice_aqc_get_sw_cfg_resp_elem {
 #define ICE_AQC_GET_SW_CONF_RESP_IS_VF         BIT(15)
 };
 
-/* The response buffer is as follows. Note that the length of the
- * elements array varies with the length of the command response.
- */
-struct ice_aqc_get_sw_cfg_resp {
-       struct ice_aqc_get_sw_cfg_resp_elem elements[1];
-};
-
 /* These resource type defines are used for all switch resource
  * commands where a resource type is required, such as:
  * Get Resource Allocation command (indirect 0x0204)
@@ -274,7 +267,7 @@ struct ice_aqc_alloc_free_res_elem {
 #define ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_M      \
                                (0xF << ICE_AQC_RES_TYPE_VSI_PRUNE_LIST_S)
        __le16 num_elems;
-       struct ice_aqc_res_elem elem[1];
+       struct ice_aqc_res_elem elem[];
 };
 
 /* Add VSI (indirect 0x0210)
@@ -568,8 +561,8 @@ struct ice_sw_rule_lkup_rx_tx {
         * lookup-type
         */
        __le16 hdr_len;
-       u8 hdr[1];
-} __packed;
+       u8 hdr[];
+};
 
 /* Add/Update/Remove large action command/response entry
  * "index" is returned as part of a response to a successful Add command, and
@@ -578,7 +571,6 @@ struct ice_sw_rule_lkup_rx_tx {
 struct ice_sw_rule_lg_act {
        __le16 index; /* Index in large action table */
        __le16 size;
-       __le32 act[1]; /* array of size for actions */
        /* Max number of large actions */
 #define ICE_MAX_LG_ACT 4
        /* Bit 0:1 - Action type */
@@ -629,6 +621,7 @@ struct ice_sw_rule_lg_act {
 #define ICE_LG_ACT_STAT_COUNT          0x7
 #define ICE_LG_ACT_STAT_COUNT_S                3
 #define ICE_LG_ACT_STAT_COUNT_M                (0x7F << ICE_LG_ACT_STAT_COUNT_S)
+       __le32 act[]; /* array of size for actions */
 };
 
 /* Add/Update/Remove VSI list command/response entry
@@ -638,7 +631,7 @@ struct ice_sw_rule_lg_act {
 struct ice_sw_rule_vsi_list {
        __le16 index; /* Index of VSI/Prune list */
        __le16 number_vsi;
-       __le16 vsi[1]; /* Array of number_vsi VSI numbers */
+       __le16 vsi[]; /* Array of number_vsi VSI numbers */
 };
 
 /* Query VSI list command/response entry */
@@ -695,14 +688,6 @@ struct ice_aqc_sched_elem_cmd {
        __le32 addr_low;
 };
 
-/* This is the buffer for:
- * Suspend Nodes (indirect 0x0409)
- * Resume Nodes (indirect 0x040A)
- */
-struct ice_aqc_suspend_resume_elem {
-       __le32 teid[1];
-};
-
 struct ice_aqc_elem_info_bw {
        __le16 bw_profile_idx;
        __le16 bw_alloc;
@@ -753,15 +738,7 @@ struct ice_aqc_txsched_topo_grp_info_hdr {
 
 struct ice_aqc_add_elem {
        struct ice_aqc_txsched_topo_grp_info_hdr hdr;
-       struct ice_aqc_txsched_elem_data generic[1];
-};
-
-struct ice_aqc_conf_elem {
-       struct ice_aqc_txsched_elem_data generic[1];
-};
-
-struct ice_aqc_get_elem {
-       struct ice_aqc_txsched_elem_data generic[1];
+       struct ice_aqc_txsched_elem_data generic[];
 };
 
 struct ice_aqc_get_topo_elem {
@@ -772,7 +749,7 @@ struct ice_aqc_get_topo_elem {
 
 struct ice_aqc_delete_elem {
        struct ice_aqc_txsched_topo_grp_info_hdr hdr;
-       __le32 teid[1];
+       __le32 teid[];
 };
 
 /* Query Port ETS (indirect 0x040E)
@@ -835,10 +812,6 @@ struct ice_aqc_rl_profile_elem {
        __le16 rl_encode;
 };
 
-struct ice_aqc_rl_profile_generic_elem {
-       struct ice_aqc_rl_profile_elem generic[1];
-};
-
 /* Query Scheduler Resource Allocation (indirect 0x0412)
  * This indirect command retrieves the scheduler resources allocated by
  * EMP Firmware to the given PF.
@@ -988,8 +961,11 @@ struct ice_aqc_get_phy_caps_data {
 #define ICE_AQC_GET_PHY_EN_MOD_QUAL                    BIT(5)
 #define ICE_AQC_PHY_EN_AUTO_FEC                                BIT(7)
 #define ICE_AQC_PHY_CAPS_MASK                          ICE_M(0xff, 0)
-       u8 low_power_ctrl;
+       u8 low_power_ctrl_an;
 #define ICE_AQC_PHY_EN_D3COLD_LOW_POWER_AUTONEG                BIT(0)
+#define ICE_AQC_PHY_AN_EN_CLAUSE28                     BIT(1)
+#define ICE_AQC_PHY_AN_EN_CLAUSE73                     BIT(2)
+#define ICE_AQC_PHY_AN_EN_CLAUSE37                     BIT(3)
        __le16 eee_cap;
 #define ICE_AQC_PHY_EEE_EN_100BASE_TX                  BIT(0)
 #define ICE_AQC_PHY_EEE_EN_1000BASE_T                  BIT(1)
@@ -1010,12 +986,14 @@ struct ice_aqc_get_phy_caps_data {
 #define ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN             BIT(6)
 #define ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN             BIT(7)
 #define ICE_AQC_PHY_FEC_MASK                           ICE_M(0xdf, 0)
-       u8 rsvd1;       /* Byte 35 reserved */
+       u8 module_compliance_enforcement;
+#define ICE_AQC_MOD_ENFORCE_STRICT_MODE                        BIT(0)
        u8 extended_compliance_code;
 #define ICE_MODULE_TYPE_TOTAL_BYTE                     3
        u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE];
 #define ICE_AQC_MOD_TYPE_BYTE0_SFP_PLUS                        0xA0
 #define ICE_AQC_MOD_TYPE_BYTE0_QSFP_PLUS               0x80
+#define ICE_AQC_MOD_TYPE_IDENT                         1
 #define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE     BIT(0)
 #define ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE      BIT(1)
 #define ICE_AQC_MOD_TYPE_BYTE1_10G_BASE_SR             BIT(4)
@@ -1059,11 +1037,11 @@ struct ice_aqc_set_phy_cfg_data {
 #define ICE_AQ_PHY_ENA_AUTO_LINK_UPDT  BIT(5)
 #define ICE_AQ_PHY_ENA_LESM            BIT(6)
 #define ICE_AQ_PHY_ENA_AUTO_FEC                BIT(7)
-       u8 low_power_ctrl;
+       u8 low_power_ctrl_an;
        __le16 eee_cap; /* Value from ice_aqc_get_phy_caps */
        __le16 eeer_value;
        u8 link_fec_opt; /* Use defines from ice_aqc_get_phy_caps */
-       u8 rsvd1;
+       u8 module_compliance_enforcement;
 };
 
 /* Set MAC Config command data structure (direct 0x0603) */
@@ -1174,6 +1152,7 @@ struct ice_aqc_get_link_status_data {
 #define ICE_AQ_LINK_PWR_QSFP_CLASS_3   2
 #define ICE_AQ_LINK_PWR_QSFP_CLASS_4   3
        __le16 link_speed;
+#define ICE_AQ_LINK_SPEED_M            0x7FF
 #define ICE_AQ_LINK_SPEED_10MB         BIT(0)
 #define ICE_AQ_LINK_SPEED_100MB                BIT(1)
 #define ICE_AQ_LINK_SPEED_1000MB       BIT(2)
@@ -1216,6 +1195,57 @@ struct ice_aqc_set_mac_lb {
        u8 reserved[15];
 };
 
+struct ice_aqc_link_topo_addr {
+       u8 lport_num;
+       u8 lport_num_valid;
+#define ICE_AQC_LINK_TOPO_PORT_NUM_VALID       BIT(0)
+       u8 node_type_ctx;
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_S          0
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_M  (0xF << ICE_AQC_LINK_TOPO_NODE_TYPE_S)
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_PHY                0
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL  1
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_MUX_CTRL   2
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_LED_CTRL   3
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_LED                4
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_THERMAL    5
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE       6
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_MEZZ       7
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_ID_EEPROM  8
+#define ICE_AQC_LINK_TOPO_NODE_CTX_S           4
+#define ICE_AQC_LINK_TOPO_NODE_CTX_M           \
+                               (0xF << ICE_AQC_LINK_TOPO_NODE_CTX_S)
+#define ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL      0
+#define ICE_AQC_LINK_TOPO_NODE_CTX_BOARD       1
+#define ICE_AQC_LINK_TOPO_NODE_CTX_PORT                2
+#define ICE_AQC_LINK_TOPO_NODE_CTX_NODE                3
+#define ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED    4
+#define ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE    5
+       u8 index;
+       __le16 handle;
+#define ICE_AQC_LINK_TOPO_HANDLE_S     0
+#define ICE_AQC_LINK_TOPO_HANDLE_M     (0x3FF << ICE_AQC_LINK_TOPO_HANDLE_S)
+/* Used to decode the handle field */
+#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_M    BIT(9)
+#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_LOM  BIT(9)
+#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0
+#define ICE_AQC_LINK_TOPO_HANDLE_NODE_S                0
+/* In case of a Mezzanine type */
+#define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_NODE_M   \
+                               (0x3F << ICE_AQC_LINK_TOPO_HANDLE_NODE_S)
+#define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_S        6
+#define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_M        (0x7 << ICE_AQC_LINK_TOPO_HANDLE_MEZZ_S)
+/* In case of a LOM type */
+#define ICE_AQC_LINK_TOPO_HANDLE_LOM_NODE_M    \
+                               (0x1FF << ICE_AQC_LINK_TOPO_HANDLE_NODE_S)
+};
+
+/* Get Link Topology Handle (direct, 0x06E0) */
+struct ice_aqc_get_link_topo {
+       struct ice_aqc_link_topo_addr addr;
+       u8 node_part_num;
+       u8 rsvd[9];
+};
+
 /* Set Port Identification LED (direct, 0x06E9) */
 struct ice_aqc_set_port_id_led {
        u8 lport_num;
@@ -1537,7 +1567,7 @@ struct ice_aqc_add_tx_qgrp {
        __le32 parent_teid;
        u8 num_txqs;
        u8 rsvd[3];
-       struct ice_aqc_add_txqs_perq txqs[1];
+       struct ice_aqc_add_txqs_perq txqs[];
 };
 
 /* Disable Tx LAN Queues (indirect 0x0C31) */
@@ -1575,18 +1605,13 @@ struct ice_aqc_dis_txq_item {
        u8 num_qs;
        u8 rsvd;
        /* The length of the q_id array varies according to num_qs */
-       __le16 q_id[1];
-       /* This only applies from F8 onward */
 #define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S          15
 #define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_LAN_Q      \
                        (0 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S)
 #define ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET  \
                        (1 << ICE_AQC_Q_DIS_BUF_ELEM_TYPE_S)
-};
-
-struct ice_aqc_dis_txq {
-       struct ice_aqc_dis_txq_item qgrps[1];
-};
+       __le16 q_id[];
+} __packed;
 
 /* Configure Firmware Logging Command (indirect 0xFF09)
  * Logging Information Read Response (indirect 0xFF10)
@@ -1636,12 +1661,7 @@ enum ice_aqc_fw_logging_mod {
        ICE_AQC_FW_LOG_ID_MAX,
 };
 
-/* This is the buffer for both of the logging commands.
- * The entry array size depends on the datalen parameter in the descriptor.
- * There will be a total of datalen / 2 entries.
- */
-struct ice_aqc_fw_logging_data {
-       __le16 entry[1];
+/* Defines for both above FW logging command/response buffers */
 #define ICE_AQC_FW_LOG_ID_S            0
 #define ICE_AQC_FW_LOG_ID_M            (0xFFF << ICE_AQC_FW_LOG_ID_S)
 
@@ -1654,7 +1674,6 @@ struct ice_aqc_fw_logging_data {
 #define ICE_AQC_FW_LOG_INIT_EN         BIT(13) /* Used by command */
 #define ICE_AQC_FW_LOG_FLOW_EN         BIT(14) /* Used by command */
 #define ICE_AQC_FW_LOG_ERR_EN          BIT(15) /* Used by command */
-};
 
 /* Get/Clear FW Log (indirect 0xFF11) */
 struct ice_aqc_get_clear_fw_log {
@@ -1716,7 +1735,7 @@ struct ice_aqc_get_pkg_info {
 /* Get Package Info List response buffer format (0x0C43) */
 struct ice_aqc_get_pkg_info_resp {
        __le32 count;
-       struct ice_aqc_get_pkg_info pkg_info[1];
+       struct ice_aqc_get_pkg_info pkg_info[];
 };
 
 /* Lan Queue Overflow Event (direct, 0x1001) */
@@ -1797,6 +1816,7 @@ struct ice_aq_desc {
                struct ice_aqc_set_event_mask set_event_mask;
                struct ice_aqc_get_link_status get_link_status;
                struct ice_aqc_event_lan_overflow lan_overflow;
+               struct ice_aqc_get_link_topo get_link_topo;
        } params;
 };
 
@@ -1896,6 +1916,7 @@ enum ice_adminq_opc {
        ice_aqc_opc_get_link_status                     = 0x0607,
        ice_aqc_opc_set_event_mask                      = 0x0613,
        ice_aqc_opc_set_mac_lb                          = 0x0620,
+       ice_aqc_opc_get_link_topo                       = 0x06E0,
        ice_aqc_opc_set_port_id_led                     = 0x06E9,
        ice_aqc_opc_sff_eeprom                          = 0x06EE,
 
index d620d26..8700847 100644 (file)
@@ -635,10 +635,10 @@ int
 ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
                struct ice_aqc_add_tx_qgrp *qg_buf)
 {
+       u8 buf_len = struct_size(qg_buf, txqs, 1);
        struct ice_tlan_ctx tlan_ctx = { 0 };
        struct ice_aqc_add_txqs_perq *txq;
        struct ice_pf *pf = vsi->back;
-       u8 buf_len = sizeof(*qg_buf);
        struct ice_hw *hw = &pf->hw;
        enum ice_status status;
        u16 pf_q;
index bce0e12..c72cc77 100644 (file)
@@ -20,7 +20,40 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
        if (hw->vendor_id != PCI_VENDOR_ID_INTEL)
                return ICE_ERR_DEVICE_NOT_SUPPORTED;
 
-       hw->mac_type = ICE_MAC_GENERIC;
+       switch (hw->device_id) {
+       case ICE_DEV_ID_E810C_BACKPLANE:
+       case ICE_DEV_ID_E810C_QSFP:
+       case ICE_DEV_ID_E810C_SFP:
+       case ICE_DEV_ID_E810_XXV_SFP:
+               hw->mac_type = ICE_MAC_E810;
+               break;
+       case ICE_DEV_ID_E823C_10G_BASE_T:
+       case ICE_DEV_ID_E823C_BACKPLANE:
+       case ICE_DEV_ID_E823C_QSFP:
+       case ICE_DEV_ID_E823C_SFP:
+       case ICE_DEV_ID_E823C_SGMII:
+       case ICE_DEV_ID_E822C_10G_BASE_T:
+       case ICE_DEV_ID_E822C_BACKPLANE:
+       case ICE_DEV_ID_E822C_QSFP:
+       case ICE_DEV_ID_E822C_SFP:
+       case ICE_DEV_ID_E822C_SGMII:
+       case ICE_DEV_ID_E822L_10G_BASE_T:
+       case ICE_DEV_ID_E822L_BACKPLANE:
+       case ICE_DEV_ID_E822L_SFP:
+       case ICE_DEV_ID_E822L_SGMII:
+       case ICE_DEV_ID_E823L_10G_BASE_T:
+       case ICE_DEV_ID_E823L_1GBE:
+       case ICE_DEV_ID_E823L_BACKPLANE:
+       case ICE_DEV_ID_E823L_QSFP:
+       case ICE_DEV_ID_E823L_SFP:
+               hw->mac_type = ICE_MAC_GENERIC;
+               break;
+       default:
+               hw->mac_type = ICE_MAC_UNKNOWN;
+               break;
+       }
+
+       ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
        return 0;
 }
 
@@ -52,7 +85,8 @@ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
  * is returned in user specified buffer. Please interpret user specified
  * buffer as "manage_mac_read" response.
  * Response such as various MAC addresses are stored in HW struct (port.mac)
- * ice_aq_discover_caps is expected to be called before this function is called.
+ * ice_discover_dev_caps is expected to be called before this function is
+ * called.
  */
 static enum ice_status
 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
@@ -116,11 +150,13 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
        u16 pcaps_size = sizeof(*pcaps);
        struct ice_aq_desc desc;
        enum ice_status status;
+       struct ice_hw *hw;
 
        cmd = &desc.params.get_phy;
 
        if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
                return ICE_ERR_PARAM;
+       hw = pi->hw;
 
        ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
 
@@ -128,17 +164,94 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
                cmd->param0 |= cpu_to_le16(ICE_AQC_GET_PHY_RQM);
 
        cmd->param0 |= cpu_to_le16(report_mode);
-       status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
+       status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
+
+       ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
+                 report_mode);
+       ice_debug(hw, ICE_DBG_LINK, "   phy_type_low = 0x%llx\n",
+                 (unsigned long long)le64_to_cpu(pcaps->phy_type_low));
+       ice_debug(hw, ICE_DBG_LINK, "   phy_type_high = 0x%llx\n",
+                 (unsigned long long)le64_to_cpu(pcaps->phy_type_high));
+       ice_debug(hw, ICE_DBG_LINK, "   caps = 0x%x\n", pcaps->caps);
+       ice_debug(hw, ICE_DBG_LINK, "   low_power_ctrl_an = 0x%x\n",
+                 pcaps->low_power_ctrl_an);
+       ice_debug(hw, ICE_DBG_LINK, "   eee_cap = 0x%x\n", pcaps->eee_cap);
+       ice_debug(hw, ICE_DBG_LINK, "   eeer_value = 0x%x\n",
+                 pcaps->eeer_value);
+       ice_debug(hw, ICE_DBG_LINK, "   link_fec_options = 0x%x\n",
+                 pcaps->link_fec_options);
+       ice_debug(hw, ICE_DBG_LINK, "   module_compliance_enforcement = 0x%x\n",
+                 pcaps->module_compliance_enforcement);
+       ice_debug(hw, ICE_DBG_LINK, "   extended_compliance_code = 0x%x\n",
+                 pcaps->extended_compliance_code);
+       ice_debug(hw, ICE_DBG_LINK, "   module_type[0] = 0x%x\n",
+                 pcaps->module_type[0]);
+       ice_debug(hw, ICE_DBG_LINK, "   module_type[1] = 0x%x\n",
+                 pcaps->module_type[1]);
+       ice_debug(hw, ICE_DBG_LINK, "   module_type[2] = 0x%x\n",
+                 pcaps->module_type[2]);
 
        if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
                pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
                pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
+               memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
+                      sizeof(pi->phy.link_info.module_type));
        }
 
        return status;
 }
 
 /**
+ * ice_aq_get_link_topo_handle - get link topology node return status
+ * @pi: port information structure
+ * @node_type: requested node type
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get link topology node return status for specified node type (0x06E0)
+ *
+ * Node type cage can be used to determine if cage is present. If AQC
+ * returns error (ENOENT), then no cage present. If no cage present, then
+ * connection type is backplane or BASE-T.
+ */
+static enum ice_status
+ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
+                           struct ice_sq_cd *cd)
+{
+       struct ice_aqc_get_link_topo *cmd;
+       struct ice_aq_desc desc;
+
+       cmd = &desc.params.get_link_topo;
+
+       ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+
+       cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
+                                  ICE_AQC_LINK_TOPO_NODE_CTX_S);
+
+       /* set node type */
+       cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
+
+       return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_is_media_cage_present
+ * @pi: port information structure
+ *
+ * Returns true if media cage is present, else false. If no cage, then
+ * media type is backplane or BASE-T.
+ */
+static bool ice_is_media_cage_present(struct ice_port_info *pi)
+{
+       /* Node type cage can be used to determine if cage is present. If AQC
+        * returns error (ENOENT), then no cage present. If no cage present then
+        * connection type is backplane or BASE-T.
+        */
+       return !ice_aq_get_link_topo_handle(pi,
+                                           ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
+                                           NULL);
+}
+
+/**
  * ice_get_media_type - Gets media type
  * @pi: port information structure
  */
@@ -155,6 +268,18 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
                return ICE_MEDIA_UNKNOWN;
 
        if (hw_link_info->phy_type_low) {
+               /* 1G SGMII is a special case where some DA cable PHYs
+                * may show this as an option when it really shouldn't
+                * be since SGMII is meant to be between a MAC and a PHY
+                * in a backplane. Try to detect this case and handle it
+                */
+               if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
+                   (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
+                   ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
+                   hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
+                   ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
+                       return ICE_MEDIA_DA;
+
                switch (hw_link_info->phy_type_low) {
                case ICE_PHY_TYPE_LOW_1000BASE_SX:
                case ICE_PHY_TYPE_LOW_1000BASE_LX:
@@ -163,7 +288,6 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
                case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
                case ICE_PHY_TYPE_LOW_25GBASE_SR:
                case ICE_PHY_TYPE_LOW_25GBASE_LR:
-               case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
                case ICE_PHY_TYPE_LOW_40GBASE_SR4:
                case ICE_PHY_TYPE_LOW_40GBASE_LR4:
                case ICE_PHY_TYPE_LOW_50GBASE_SR2:
@@ -175,6 +299,14 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
                case ICE_PHY_TYPE_LOW_100GBASE_LR4:
                case ICE_PHY_TYPE_LOW_100GBASE_SR2:
                case ICE_PHY_TYPE_LOW_100GBASE_DR:
+               case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
+               case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
+               case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
+               case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
+               case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
+               case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
+               case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
+               case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
                        return ICE_MEDIA_FIBER;
                case ICE_PHY_TYPE_LOW_100BASE_TX:
                case ICE_PHY_TYPE_LOW_1000BASE_T:
@@ -194,6 +326,16 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
                case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
                case ICE_PHY_TYPE_LOW_100GBASE_CP2:
                        return ICE_MEDIA_DA;
+               case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
+               case ICE_PHY_TYPE_LOW_40G_XLAUI:
+               case ICE_PHY_TYPE_LOW_50G_LAUI2:
+               case ICE_PHY_TYPE_LOW_50G_AUI2:
+               case ICE_PHY_TYPE_LOW_50G_AUI1:
+               case ICE_PHY_TYPE_LOW_100G_AUI4:
+               case ICE_PHY_TYPE_LOW_100G_CAUI4:
+                       if (ice_is_media_cage_present(pi))
+                               return ICE_MEDIA_DA;
+                       fallthrough;
                case ICE_PHY_TYPE_LOW_1000BASE_KX:
                case ICE_PHY_TYPE_LOW_2500BASE_KX:
                case ICE_PHY_TYPE_LOW_2500BASE_X:
@@ -211,8 +353,16 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
                }
        } else {
                switch (hw_link_info->phy_type_high) {
+               case ICE_PHY_TYPE_HIGH_100G_AUI2:
+               case ICE_PHY_TYPE_HIGH_100G_CAUI2:
+                       if (ice_is_media_cage_present(pi))
+                               return ICE_MEDIA_DA;
+                       fallthrough;
                case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
                        return ICE_MEDIA_BACKPLANE;
+               case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
+               case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
+                       return ICE_MEDIA_FIBER;
                }
        }
        return ICE_MEDIA_UNKNOWN;
@@ -292,18 +442,21 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
 
        li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
 
-       ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed);
-       ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
+       ice_debug(hw, ICE_DBG_LINK, "get link info\n");
+       ice_debug(hw, ICE_DBG_LINK, "   link_speed = 0x%x\n", li->link_speed);
+       ice_debug(hw, ICE_DBG_LINK, "   phy_type_low = 0x%llx\n",
                  (unsigned long long)li->phy_type_low);
-       ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
+       ice_debug(hw, ICE_DBG_LINK, "   phy_type_high = 0x%llx\n",
                  (unsigned long long)li->phy_type_high);
-       ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type);
-       ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info);
-       ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info);
-       ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info);
-       ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena);
-       ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size);
-       ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing);
+       ice_debug(hw, ICE_DBG_LINK, "   media_type = 0x%x\n", *hw_media_type);
+       ice_debug(hw, ICE_DBG_LINK, "   link_info = 0x%x\n", li->link_info);
+       ice_debug(hw, ICE_DBG_LINK, "   an_info = 0x%x\n", li->an_info);
+       ice_debug(hw, ICE_DBG_LINK, "   ext_info = 0x%x\n", li->ext_info);
+       ice_debug(hw, ICE_DBG_LINK, "   fec_info = 0x%x\n", li->fec_info);
+       ice_debug(hw, ICE_DBG_LINK, "   lse_ena = 0x%x\n", li->lse_ena);
+       ice_debug(hw, ICE_DBG_LINK, "   max_frame = 0x%x\n",
+                 li->max_frame_size);
+       ice_debug(hw, ICE_DBG_LINK, "   pacing = 0x%x\n", li->pacing);
 
        /* save link status information */
        if (link)
@@ -440,30 +593,24 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
        devm_kfree(ice_hw_to_dev(hw), sw);
 }
 
-#define ICE_FW_LOG_DESC_SIZE(n)        (sizeof(struct ice_aqc_fw_logging_data) + \
-       (((n) - 1) * sizeof(((struct ice_aqc_fw_logging_data *)0)->entry)))
-#define ICE_FW_LOG_DESC_SIZE_MAX       \
-       ICE_FW_LOG_DESC_SIZE(ICE_AQC_FW_LOG_ID_MAX)
-
 /**
  * ice_get_fw_log_cfg - get FW logging configuration
  * @hw: pointer to the HW struct
  */
 static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
 {
-       struct ice_aqc_fw_logging_data *config;
        struct ice_aq_desc desc;
        enum ice_status status;
+       __le16 *config;
        u16 size;
 
-       size = ICE_FW_LOG_DESC_SIZE_MAX;
+       size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
        config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
        if (!config)
                return ICE_ERR_NO_MEMORY;
 
        ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
 
-       desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
        desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
 
        status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
@@ -474,7 +621,7 @@ static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
                for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
                        u16 v, m, flgs;
 
-                       v = le16_to_cpu(config->entry[i]);
+                       v = le16_to_cpu(config[i]);
                        m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
                        flgs = (v & ICE_AQC_FW_LOG_EN_M) >> ICE_AQC_FW_LOG_EN_S;
 
@@ -526,11 +673,11 @@ static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
  */
 static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
 {
-       struct ice_aqc_fw_logging_data *data = NULL;
        struct ice_aqc_fw_logging *cmd;
        enum ice_status status = 0;
        u16 i, chgs = 0, len = 0;
        struct ice_aq_desc desc;
+       __le16 *data = NULL;
        u8 actv_evnts = 0;
        void *buf = NULL;
 
@@ -571,8 +718,9 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
                                continue;
 
                        if (!data) {
-                               data = devm_kzalloc(ice_hw_to_dev(hw),
-                                                   ICE_FW_LOG_DESC_SIZE_MAX,
+                               data = devm_kcalloc(ice_hw_to_dev(hw),
+                                                   sizeof(*data),
+                                                   ICE_AQC_FW_LOG_ID_MAX,
                                                    GFP_KERNEL);
                                if (!data)
                                        return ICE_ERR_NO_MEMORY;
@@ -580,7 +728,7 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
 
                        val = i << ICE_AQC_FW_LOG_ID_S;
                        val |= hw->fw_log.evnts[i].cfg << ICE_AQC_FW_LOG_EN_S;
-                       data->entry[chgs++] = cpu_to_le16(val);
+                       data[chgs++] = cpu_to_le16(val);
                }
 
                /* Only enable FW logging if at least one module is specified.
@@ -599,7 +747,7 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
                                cmd->log_ctrl |= ICE_AQC_FW_LOG_UART_EN;
 
                        buf = data;
-                       len = ICE_FW_LOG_DESC_SIZE(chgs);
+                       len = sizeof(*data) * chgs;
                        desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
                }
        }
@@ -629,7 +777,7 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
                                continue;
                        }
 
-                       v = le16_to_cpu(data->entry[i]);
+                       v = le16_to_cpu(data[i]);
                        m = (v & ICE_AQC_FW_LOG_ID_M) >> ICE_AQC_FW_LOG_ID_S;
                        hw->fw_log.evnts[m].cur = hw->fw_log.evnts[m].cfg;
                }
@@ -1541,7 +1689,7 @@ ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
        enum ice_status status;
        u16 buf_len;
 
-       buf_len = struct_size(buf, elem, num - 1);
+       buf_len = struct_size(buf, elem, num);
        buf = kzalloc(buf_len, GFP_KERNEL);
        if (!buf)
                return ICE_ERR_NO_MEMORY;
@@ -1558,7 +1706,7 @@ ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
        if (status)
                goto ice_alloc_res_exit;
 
-       memcpy(res, buf->elem, sizeof(buf->elem) * num);
+       memcpy(res, buf->elem, sizeof(*buf->elem) * num);
 
 ice_alloc_res_exit:
        kfree(buf);
@@ -1579,7 +1727,7 @@ ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
        enum ice_status status;
        u16 buf_len;
 
-       buf_len = struct_size(buf, elem, num - 1);
+       buf_len = struct_size(buf, elem, num);
        buf = kzalloc(buf_len, GFP_KERNEL);
        if (!buf)
                return ICE_ERR_NO_MEMORY;
@@ -1587,7 +1735,7 @@ ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
        /* Prepare buffer to free resource. */
        buf->num_elems = cpu_to_le16(num);
        buf->res_type = cpu_to_le16(type);
-       memcpy(buf->elem, res, sizeof(buf->elem) * num);
+       memcpy(buf->elem, res, sizeof(*buf->elem) * num);
 
        status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
                                       ice_aqc_opc_free_res, NULL);
@@ -1622,221 +1770,412 @@ static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
 }
 
 /**
- * ice_parse_caps - parse function/device capabilities
+ * ice_parse_common_caps - parse common device/function capabilities
+ * @hw: pointer to the HW struct
+ * @caps: pointer to common capabilities structure
+ * @elem: the capability element to parse
+ * @prefix: message prefix for tracing capabilities
+ *
+ * Given a capability element, extract relevant details into the common
+ * capability structure.
+ *
+ * Returns: true if the capability matches one of the common capability ids,
+ * false otherwise.
+ */
+static bool
+ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
+                     struct ice_aqc_list_caps_elem *elem, const char *prefix)
+{
+       u32 logical_id = le32_to_cpu(elem->logical_id);
+       u32 phys_id = le32_to_cpu(elem->phys_id);
+       u32 number = le32_to_cpu(elem->number);
+       u16 cap = le16_to_cpu(elem->cap);
+       bool found = true;
+
+       switch (cap) {
+       case ICE_AQC_CAPS_VALID_FUNCTIONS:
+               caps->valid_functions = number;
+               ice_debug(hw, ICE_DBG_INIT,
+                         "%s: valid_functions (bitmap) = %d\n", prefix,
+                         caps->valid_functions);
+               break;
+       case ICE_AQC_CAPS_SRIOV:
+               caps->sr_iov_1_1 = (number == 1);
+               ice_debug(hw, ICE_DBG_INIT,
+                         "%s: sr_iov_1_1 = %d\n", prefix,
+                         caps->sr_iov_1_1);
+               break;
+       case ICE_AQC_CAPS_DCB:
+               caps->dcb = (number == 1);
+               caps->active_tc_bitmap = logical_id;
+               caps->maxtc = phys_id;
+               ice_debug(hw, ICE_DBG_INIT,
+                         "%s: dcb = %d\n", prefix, caps->dcb);
+               ice_debug(hw, ICE_DBG_INIT,
+                         "%s: active_tc_bitmap = %d\n", prefix,
+                         caps->active_tc_bitmap);
+               ice_debug(hw, ICE_DBG_INIT,
+                         "%s: maxtc = %d\n", prefix, caps->maxtc);
+               break;
+       case ICE_AQC_CAPS_RSS:
+               caps->rss_table_size = number;
+               caps->rss_table_entry_width = logical_id;
+               ice_debug(hw, ICE_DBG_INIT,
+                         "%s: rss_table_size = %d\n", prefix,
+                         caps->rss_table_size);
+               ice_debug(hw, ICE_DBG_INIT,
+                         "%s: rss_table_entry_width = %d\n", prefix,
+                         caps->rss_table_entry_width);
+               break;
+       case ICE_AQC_CAPS_RXQS:
+               caps->num_rxq = number;
+               caps->rxq_first_id = phys_id;
+               ice_debug(hw, ICE_DBG_INIT,
+                         "%s: num_rxq = %d\n", prefix,
+                         caps->num_rxq);
+               ice_debug(hw, ICE_DBG_INIT,
+                         "%s: rxq_first_id = %d\n", prefix,
+                         caps->rxq_first_id);
+               break;
+       case ICE_AQC_CAPS_TXQS:
+               caps->num_txq = number;
+               caps->txq_first_id = phys_id;
+               ice_debug(hw, ICE_DBG_INIT,
+                         "%s: num_txq = %d\n", prefix,
+                         caps->num_txq);
+               ice_debug(hw, ICE_DBG_INIT,
+                         "%s: txq_first_id = %d\n", prefix,
+                         caps->txq_first_id);
+               break;
+       case ICE_AQC_CAPS_MSIX:
+               caps->num_msix_vectors = number;
+               caps->msix_vector_first_id = phys_id;
+               ice_debug(hw, ICE_DBG_INIT,
+                         "%s: num_msix_vectors = %d\n", prefix,
+                         caps->num_msix_vectors);
+               ice_debug(hw, ICE_DBG_INIT,
+                         "%s: msix_vector_first_id = %d\n", prefix,
+                         caps->msix_vector_first_id);
+               break;
+       case ICE_AQC_CAPS_MAX_MTU:
+               caps->max_mtu = number;
+               ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
+                         prefix, caps->max_mtu);
+               break;
+       default:
+               /* Not one of the recognized common capabilities */
+               found = false;
+       }
+
+       return found;
+}
+
+/**
+ * ice_recalc_port_limited_caps - Recalculate port limited capabilities
+ * @hw: pointer to the HW structure
+ * @caps: pointer to capabilities structure to fix
+ *
+ * Re-calculate the capabilities that are dependent on the number of physical
+ * ports; i.e. some features are not supported or function differently on
+ * devices with more than 4 ports.
+ */
+static void
+ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
+{
+       /* This assumes device capabilities are always scanned before function
+        * capabilities during the initialization flow.
+        */
+       if (hw->dev_caps.num_funcs > 4) {
+               /* Max 4 TCs per port */
+               caps->maxtc = 4;
+               ice_debug(hw, ICE_DBG_INIT,
+                         "reducing maxtc to %d (based on #ports)\n",
+                         caps->maxtc);
+       }
+}
+
+/**
+ * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for ICE_AQC_CAPS_VF.
+ */
+static void
+ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
+                      struct ice_aqc_list_caps_elem *cap)
+{
+       u32 logical_id = le32_to_cpu(cap->logical_id);
+       u32 number = le32_to_cpu(cap->number);
+
+       func_p->num_allocd_vfs = number;
+       func_p->vf_base_id = logical_id;
+       ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
+                 func_p->num_allocd_vfs);
+       ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
+                 func_p->vf_base_id);
+}
+
+/**
+ * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for ICE_AQC_CAPS_VSI.
+ */
+static void
+ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
+                       struct ice_aqc_list_caps_elem *cap)
+{
+       func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
+       ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
+                 le32_to_cpu(cap->number));
+       ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
+                 func_p->guar_num_vsi);
+}
+
+/**
+ * ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ *
+ * Extract function capabilities for ICE_AQC_CAPS_FD.
+ */
+static void
+ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
+{
+       u32 reg_val, val;
+
+       reg_val = rd32(hw, GLQF_FD_SIZE);
+       val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
+               GLQF_FD_SIZE_FD_GSIZE_S;
+       func_p->fd_fltr_guar =
+               ice_get_num_per_func(hw, val);
+       val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
+               GLQF_FD_SIZE_FD_BSIZE_S;
+       func_p->fd_fltr_best_effort = val;
+
+       ice_debug(hw, ICE_DBG_INIT,
+                 "func caps: fd_fltr_guar = %d\n",
+                 func_p->fd_fltr_guar);
+       ice_debug(hw, ICE_DBG_INIT,
+                 "func caps: fd_fltr_best_effort = %d\n",
+                 func_p->fd_fltr_best_effort);
+}
+
+/**
+ * ice_parse_func_caps - Parse function capabilities
  * @hw: pointer to the HW struct
- * @buf: pointer to a buffer containing function/device capability records
- * @cap_count: number of capability records in the list
- * @opc: type of capabilities list to parse
+ * @func_p: pointer to function capabilities structure
+ * @buf: buffer containing the function capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper function to parse function (0x000A) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ice_parse_common_caps.
  *
- * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the function capabilities structured.
  */
 static void
-ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
-              enum ice_adminq_opc opc)
+ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
+                   void *buf, u32 cap_count)
 {
        struct ice_aqc_list_caps_elem *cap_resp;
-       struct ice_hw_func_caps *func_p = NULL;
-       struct ice_hw_dev_caps *dev_p = NULL;
-       struct ice_hw_common_caps *caps;
-       char const *prefix;
        u32 i;
 
-       if (!buf)
-               return;
-
        cap_resp = (struct ice_aqc_list_caps_elem *)buf;
 
-       if (opc == ice_aqc_opc_list_dev_caps) {
-               dev_p = &hw->dev_caps;
-               caps = &dev_p->common_cap;
-               prefix = "dev cap";
-       } else if (opc == ice_aqc_opc_list_func_caps) {
-               func_p = &hw->func_caps;
-               caps = &func_p->common_cap;
-               prefix = "func cap";
-       } else {
-               ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
-               return;
-       }
+       memset(func_p, 0, sizeof(*func_p));
 
-       for (i = 0; caps && i < cap_count; i++, cap_resp++) {
-               u32 logical_id = le32_to_cpu(cap_resp->logical_id);
-               u32 phys_id = le32_to_cpu(cap_resp->phys_id);
-               u32 number = le32_to_cpu(cap_resp->number);
-               u16 cap = le16_to_cpu(cap_resp->cap);
+       for (i = 0; i < cap_count; i++) {
+               u16 cap = le16_to_cpu(cap_resp[i].cap);
+               bool found;
 
-               switch (cap) {
-               case ICE_AQC_CAPS_VALID_FUNCTIONS:
-                       caps->valid_functions = number;
-                       ice_debug(hw, ICE_DBG_INIT,
-                                 "%s: valid_functions (bitmap) = %d\n", prefix,
-                                 caps->valid_functions);
+               found = ice_parse_common_caps(hw, &func_p->common_cap,
+                                             &cap_resp[i], "func caps");
 
-                       /* store func count for resource management purposes */
-                       if (dev_p)
-                               dev_p->num_funcs = hweight32(number);
-                       break;
-               case ICE_AQC_CAPS_SRIOV:
-                       caps->sr_iov_1_1 = (number == 1);
-                       ice_debug(hw, ICE_DBG_INIT,
-                                 "%s: sr_iov_1_1 = %d\n", prefix,
-                                 caps->sr_iov_1_1);
-                       break;
+               switch (cap) {
                case ICE_AQC_CAPS_VF:
-                       if (dev_p) {
-                               dev_p->num_vfs_exposed = number;
-                               ice_debug(hw, ICE_DBG_INIT,
-                                         "%s: num_vfs_exposed = %d\n", prefix,
-                                         dev_p->num_vfs_exposed);
-                       } else if (func_p) {
-                               func_p->num_allocd_vfs = number;
-                               func_p->vf_base_id = logical_id;
-                               ice_debug(hw, ICE_DBG_INIT,
-                                         "%s: num_allocd_vfs = %d\n", prefix,
-                                         func_p->num_allocd_vfs);
-                               ice_debug(hw, ICE_DBG_INIT,
-                                         "%s: vf_base_id = %d\n", prefix,
-                                         func_p->vf_base_id);
-                       }
+                       ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
                        break;
                case ICE_AQC_CAPS_VSI:
-                       if (dev_p) {
-                               dev_p->num_vsi_allocd_to_host = number;
-                               ice_debug(hw, ICE_DBG_INIT,
-                                         "%s: num_vsi_allocd_to_host = %d\n",
-                                         prefix,
-                                         dev_p->num_vsi_allocd_to_host);
-                       } else if (func_p) {
-                               func_p->guar_num_vsi =
-                                       ice_get_num_per_func(hw, ICE_MAX_VSI);
-                               ice_debug(hw, ICE_DBG_INIT,
-                                         "%s: guar_num_vsi (fw) = %d\n",
-                                         prefix, number);
-                               ice_debug(hw, ICE_DBG_INIT,
-                                         "%s: guar_num_vsi = %d\n",
-                                         prefix, func_p->guar_num_vsi);
-                       }
+                       ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
                        break;
-               case ICE_AQC_CAPS_DCB:
-                       caps->dcb = (number == 1);
-                       caps->active_tc_bitmap = logical_id;
-                       caps->maxtc = phys_id;
-                       ice_debug(hw, ICE_DBG_INIT,
-                                 "%s: dcb = %d\n", prefix, caps->dcb);
-                       ice_debug(hw, ICE_DBG_INIT,
-                                 "%s: active_tc_bitmap = %d\n", prefix,
-                                 caps->active_tc_bitmap);
-                       ice_debug(hw, ICE_DBG_INIT,
-                                 "%s: maxtc = %d\n", prefix, caps->maxtc);
-                       break;
-               case ICE_AQC_CAPS_RSS:
-                       caps->rss_table_size = number;
-                       caps->rss_table_entry_width = logical_id;
-                       ice_debug(hw, ICE_DBG_INIT,
-                                 "%s: rss_table_size = %d\n", prefix,
-                                 caps->rss_table_size);
-                       ice_debug(hw, ICE_DBG_INIT,
-                                 "%s: rss_table_entry_width = %d\n", prefix,
-                                 caps->rss_table_entry_width);
+               case ICE_AQC_CAPS_FD:
+                       ice_parse_fdir_func_caps(hw, func_p);
                        break;
-               case ICE_AQC_CAPS_RXQS:
-                       caps->num_rxq = number;
-                       caps->rxq_first_id = phys_id;
-                       ice_debug(hw, ICE_DBG_INIT,
-                                 "%s: num_rxq = %d\n", prefix,
-                                 caps->num_rxq);
-                       ice_debug(hw, ICE_DBG_INIT,
-                                 "%s: rxq_first_id = %d\n", prefix,
-                                 caps->rxq_first_id);
+               default:
+                       /* Don't list common capabilities as unknown */
+                       if (!found)
+                               ice_debug(hw, ICE_DBG_INIT,
+                                         "func caps: unknown capability[%d]: 0x%x\n",
+                                         i, cap);
                        break;
-               case ICE_AQC_CAPS_TXQS:
-                       caps->num_txq = number;
-                       caps->txq_first_id = phys_id;
-                       ice_debug(hw, ICE_DBG_INIT,
-                                 "%s: num_txq = %d\n", prefix,
-                                 caps->num_txq);
-                       ice_debug(hw, ICE_DBG_INIT,
-                                 "%s: txq_first_id = %d\n", prefix,
-                                 caps->txq_first_id);
+               }
+       }
+
+       ice_recalc_port_limited_caps(hw, &func_p->common_cap);
+}
+
+/**
+ * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
+ */
+static void
+ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+                             struct ice_aqc_list_caps_elem *cap)
+{
+       u32 number = le32_to_cpu(cap->number);
+
+       dev_p->num_funcs = hweight32(number);
+       ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
+                 dev_p->num_funcs);
+}
+
+/**
+ * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_VF for device capabilities.
+ */
+static void
+ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+                     struct ice_aqc_list_caps_elem *cap)
+{
+       u32 number = le32_to_cpu(cap->number);
+
+       dev_p->num_vfs_exposed = number;
+       ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
+                 dev_p->num_vfs_exposed);
+}
+
+/**
+ * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_VSI for device capabilities.
+ */
+static void
+ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+                      struct ice_aqc_list_caps_elem *cap)
+{
+       u32 number = le32_to_cpu(cap->number);
+
+       dev_p->num_vsi_allocd_to_host = number;
+       ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
+                 dev_p->num_vsi_allocd_to_host);
+}
+
+/**
+ * ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_FD for device capabilities.
+ */
+static void
+ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+                       struct ice_aqc_list_caps_elem *cap)
+{
+       u32 number = le32_to_cpu(cap->number);
+
+       dev_p->num_flow_director_fltr = number;
+       ice_debug(hw, ICE_DBG_INIT, "dev caps: num_flow_director_fltr = %d\n",
+                 dev_p->num_flow_director_fltr);
+}
+
+/**
+ * ice_parse_dev_caps - Parse device capabilities
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @buf: buffer containing the device capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper device to parse device (0x000B) capabilities list. For
+ * capabilities shared between device and device, this relies on
+ * ice_parse_common_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the device capabilities structured.
+ */
+static void
+ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+                  void *buf, u32 cap_count)
+{
+       struct ice_aqc_list_caps_elem *cap_resp;
+       u32 i;
+
+       cap_resp = (struct ice_aqc_list_caps_elem *)buf;
+
+       memset(dev_p, 0, sizeof(*dev_p));
+
+       for (i = 0; i < cap_count; i++) {
+               u16 cap = le16_to_cpu(cap_resp[i].cap);
+               bool found;
+
+               found = ice_parse_common_caps(hw, &dev_p->common_cap,
+                                             &cap_resp[i], "dev caps");
+
+               switch (cap) {
+               case ICE_AQC_CAPS_VALID_FUNCTIONS:
+                       ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
                        break;
-               case ICE_AQC_CAPS_MSIX:
-                       caps->num_msix_vectors = number;
-                       caps->msix_vector_first_id = phys_id;
-                       ice_debug(hw, ICE_DBG_INIT,
-                                 "%s: num_msix_vectors = %d\n", prefix,
-                                 caps->num_msix_vectors);
-                       ice_debug(hw, ICE_DBG_INIT,
-                                 "%s: msix_vector_first_id = %d\n", prefix,
-                                 caps->msix_vector_first_id);
+               case ICE_AQC_CAPS_VF:
+                       ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
                        break;
-               case ICE_AQC_CAPS_FD:
-                       if (dev_p) {
-                               dev_p->num_flow_director_fltr = number;
-                               ice_debug(hw, ICE_DBG_INIT,
-                                         "%s: num_flow_director_fltr = %d\n",
-                                         prefix,
-                                         dev_p->num_flow_director_fltr);
-                       }
-                       if (func_p) {
-                               u32 reg_val, val;
-
-                               reg_val = rd32(hw, GLQF_FD_SIZE);
-                               val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
-                                     GLQF_FD_SIZE_FD_GSIZE_S;
-                               func_p->fd_fltr_guar =
-                                     ice_get_num_per_func(hw, val);
-                               val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
-                                     GLQF_FD_SIZE_FD_BSIZE_S;
-                               func_p->fd_fltr_best_effort = val;
-                               ice_debug(hw, ICE_DBG_INIT,
-                                         "%s: fd_fltr_guar = %d\n",
-                                         prefix, func_p->fd_fltr_guar);
-                               ice_debug(hw, ICE_DBG_INIT,
-                                         "%s: fd_fltr_best_effort = %d\n",
-                                         prefix, func_p->fd_fltr_best_effort);
-                       }
+               case ICE_AQC_CAPS_VSI:
+                       ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
                        break;
-               case ICE_AQC_CAPS_MAX_MTU:
-                       caps->max_mtu = number;
-                       ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
-                                 prefix, caps->max_mtu);
+               case  ICE_AQC_CAPS_FD:
+                       ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
                        break;
                default:
-                       ice_debug(hw, ICE_DBG_INIT,
-                                 "%s: unknown capability[%d]: 0x%x\n", prefix,
-                                 i, cap);
+                       /* Don't list common capabilities as unknown */
+                       if (!found)
+                               ice_debug(hw, ICE_DBG_INIT,
+                                         "dev caps: unknown capability[%d]: 0x%x\n",
+                                         i, cap);
                        break;
                }
        }
 
-       /* Re-calculate capabilities that are dependent on the number of
-        * physical ports; i.e. some features are not supported or function
-        * differently on devices with more than 4 ports.
-        */
-       if (hw->dev_caps.num_funcs > 4) {
-               /* Max 4 TCs per port */
-               caps->maxtc = 4;
-               ice_debug(hw, ICE_DBG_INIT,
-                         "%s: maxtc = %d (based on #ports)\n", prefix,
-                         caps->maxtc);
-       }
+       ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
 }
 
 /**
- * ice_aq_discover_caps - query function/device capabilities
+ * ice_aq_list_caps - query function/device capabilities
  * @hw: pointer to the HW struct
- * @buf: a virtual buffer to hold the capabilities
- * @buf_size: Size of the virtual buffer
- * @cap_count: cap count needed if AQ err==ENOMEM
- * @opc: capabilities type to discover - pass in the command opcode
+ * @buf: a buffer to hold the capabilities
+ * @buf_size: size of the buffer
+ * @cap_count: if not NULL, set to the number of capabilities reported
+ * @opc: capabilities type to discover, device or function
  * @cd: pointer to command details structure or NULL
  *
- * Get the function(0x000a)/device(0x000b) capabilities description from
- * the firmware.
+ * Get the function (0x000A) or device (0x000B) capabilities description from
+ * firmware and store it in the buffer.
+ *
+ * If the cap_count pointer is not NULL, then it is set to the number of
+ * capabilities firmware will report. Note that if the buffer size is too
+ * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
+ * cap_count will still be updated in this case. It is recommended that the
+ * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
+ * firmware could return) to avoid this.
  */
-static enum ice_status
-ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
-                    enum ice_adminq_opc opc, struct ice_sq_cd *cd)
+enum ice_status
+ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
+                enum ice_adminq_opc opc, struct ice_sq_cd *cd)
 {
        struct ice_aqc_list_caps *cmd;
        struct ice_aq_desc desc;
@@ -1849,59 +2188,78 @@ ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
                return ICE_ERR_PARAM;
 
        ice_fill_dflt_direct_cmd_desc(&desc, opc);
-
        status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
-       if (!status)
-               ice_parse_caps(hw, buf, le32_to_cpu(cmd->count), opc);
-       else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
+
+       if (cap_count)
                *cap_count = le32_to_cpu(cmd->count);
+
        return status;
 }
 
 /**
- * ice_discover_caps - get info about the HW
+ * ice_discover_dev_caps - Read and extract device capabilities
  * @hw: pointer to the hardware structure
- * @opc: capabilities type to discover - pass in the command opcode
+ * @dev_caps: pointer to device capabilities structure
+ *
+ * Read the device capabilities and extract them into the dev_caps structure
+ * for later use.
  */
 static enum ice_status
-ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
+ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
 {
        enum ice_status status;
-       u32 cap_count;
-       u16 cbuf_len;
-       u8 retries;
-
-       /* The driver doesn't know how many capabilities the device will return
-        * so the buffer size required isn't known ahead of time. The driver
-        * starts with cbuf_len and if this turns out to be insufficient, the
-        * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
-        * The driver then allocates the buffer based on the count and retries
-        * the operation. So it follows that the retry count is 2.
+       u32 cap_count = 0;
+       void *cbuf;
+
+       cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+       if (!cbuf)
+               return ICE_ERR_NO_MEMORY;
+
+       /* Although the driver doesn't know the number of capabilities the
+        * device will return, we can simply send a 4KB buffer, the maximum
+        * possible size that firmware can return.
         */
-#define ICE_GET_CAP_BUF_COUNT  40
-#define ICE_GET_CAP_RETRY_COUNT        2
+       cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
 
-       cap_count = ICE_GET_CAP_BUF_COUNT;
-       retries = ICE_GET_CAP_RETRY_COUNT;
+       status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
+                                 ice_aqc_opc_list_dev_caps, NULL);
+       if (!status)
+               ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
+       kfree(cbuf);
 
-       do {
-               void *cbuf;
+       return status;
+}
 
-               cbuf_len = (u16)(cap_count *
-                                sizeof(struct ice_aqc_list_caps_elem));
-               cbuf = devm_kzalloc(ice_hw_to_dev(hw), cbuf_len, GFP_KERNEL);
-               if (!cbuf)
-                       return ICE_ERR_NO_MEMORY;
+/**
+ * ice_discover_func_caps - Read and extract function capabilities
+ * @hw: pointer to the hardware structure
+ * @func_caps: pointer to function capabilities structure
+ *
+ * Read the function capabilities and extract them into the func_caps structure
+ * for later use.
+ */
+static enum ice_status
+ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
+{
+       enum ice_status status;
+       u32 cap_count = 0;
+       void *cbuf;
 
-               status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
-                                             opc, NULL);
-               devm_kfree(ice_hw_to_dev(hw), cbuf);
+       cbuf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+       if (!cbuf)
+               return ICE_ERR_NO_MEMORY;
 
-               if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
-                       break;
+       /* Although the driver doesn't know the number of capabilities the
+        * device will return, we can simply send a 4KB buffer, the maximum
+        * possible size that firmware can return.
+        */
+       cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
 
-               /* If ENOMEM is returned, try again with bigger buffer */
-       } while (--retries);
+       status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
+                                 ice_aqc_opc_list_func_caps, NULL);
+       if (!status)
+               ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
+       kfree(cbuf);
 
        return status;
 }
@@ -1978,11 +2336,11 @@ enum ice_status ice_get_caps(struct ice_hw *hw)
 {
        enum ice_status status;
 
-       status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
-       if (!status)
-               status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
+       status = ice_discover_dev_caps(hw, &hw->dev_caps);
+       if (status)
+               return status;
 
-       return status;
+       return ice_discover_func_caps(hw, &hw->func_caps);
 }
 
 /**
@@ -2218,7 +2576,7 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
 /**
  * ice_aq_set_phy_cfg
  * @hw: pointer to the HW struct
- * @lport: logical port number
+ * @pi: port info structure of the interested logical port
  * @cfg: structure with PHY configuration data to be set
  * @cd: pointer to command details structure or NULL
  *
@@ -2228,7 +2586,7 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
  * parameters. This status will be indicated by the command response (0x0601).
  */
 enum ice_status
-ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
+ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
                   struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
 {
        struct ice_aq_desc desc;
@@ -2247,24 +2605,29 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
        }
 
        ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
-       desc.params.set_phy.lport_num = lport;
+       desc.params.set_phy.lport_num = pi->lport;
        desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
 
-       ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
+       ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
+       ice_debug(hw, ICE_DBG_LINK, "   phy_type_low = 0x%llx\n",
                  (unsigned long long)le64_to_cpu(cfg->phy_type_low));
-       ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
+       ice_debug(hw, ICE_DBG_LINK, "   phy_type_high = 0x%llx\n",
                  (unsigned long long)le64_to_cpu(cfg->phy_type_high));
-       ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps);
-       ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl = 0x%x\n",
-                 cfg->low_power_ctrl);
-       ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap);
-       ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
-       ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
+       ice_debug(hw, ICE_DBG_LINK, "   caps = 0x%x\n", cfg->caps);
+       ice_debug(hw, ICE_DBG_LINK, "   low_power_ctrl_an = 0x%x\n",
+                 cfg->low_power_ctrl_an);
+       ice_debug(hw, ICE_DBG_LINK, "   eee_cap = 0x%x\n", cfg->eee_cap);
+       ice_debug(hw, ICE_DBG_LINK, "   eeer_value = 0x%x\n", cfg->eeer_value);
+       ice_debug(hw, ICE_DBG_LINK, "   link_fec_opt = 0x%x\n",
+                 cfg->link_fec_opt);
 
        status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
        if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
                status = 0;
 
+       if (!status)
+               pi->phy.curr_user_phy_cfg = *cfg;
+
        return status;
 }
 
@@ -2298,9 +2661,6 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi)
 
                status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
                                             pcaps, NULL);
-               if (!status)
-                       memcpy(li->module_type, &pcaps->module_type,
-                              sizeof(li->module_type));
 
                devm_kfree(ice_hw_to_dev(hw), pcaps);
        }
@@ -2309,28 +2669,101 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi)
 }
 
 /**
- * ice_set_fc
+ * ice_cache_phy_user_req
  * @pi: port information structure
- * @aq_failures: pointer to status code, specific to ice_set_fc routine
- * @ena_auto_link_update: enable automatic link update
+ * @cache_data: PHY logging data
+ * @cache_mode: PHY logging mode
  *
- * Set the requested flow control mode.
+ * Log the user request on (FC, FEC, SPEED) for later use.
+ */
+static void
+ice_cache_phy_user_req(struct ice_port_info *pi,
+                      struct ice_phy_cache_mode_data cache_data,
+                      enum ice_phy_cache_mode cache_mode)
+{
+       if (!pi)
+               return;
+
+       switch (cache_mode) {
+       case ICE_FC_MODE:
+               pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
+               break;
+       case ICE_SPEED_MODE:
+               pi->phy.curr_user_speed_req =
+                       cache_data.data.curr_user_speed_req;
+               break;
+       case ICE_FEC_MODE:
+               pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
+               break;
+       default:
+               break;
+       }
+}
+
+/**
+ * ice_caps_to_fc_mode
+ * @caps: PHY capabilities
+ *
+ * Convert PHY FC capabilities to ice FC mode
+ */
+enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
+{
+       if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
+           caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
+               return ICE_FC_FULL;
+
+       if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
+               return ICE_FC_TX_PAUSE;
+
+       if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
+               return ICE_FC_RX_PAUSE;
+
+       return ICE_FC_NONE;
+}
+
+/**
+ * ice_caps_to_fec_mode
+ * @caps: PHY capabilities
+ * @fec_options: Link FEC options
+ *
+ * Convert PHY FEC capabilities to ice FEC mode
+ */
+enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
+{
+       if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
+               return ICE_FEC_AUTO;
+
+       if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
+                          ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
+                          ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
+                          ICE_AQC_PHY_FEC_25G_KR_REQ))
+               return ICE_FEC_BASER;
+
+       if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
+                          ICE_AQC_PHY_FEC_25G_RS_544_REQ |
+                          ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
+               return ICE_FEC_RS;
+
+       return ICE_FEC_NONE;
+}
+
+/**
+ * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
+ * @pi: port information structure
+ * @cfg: PHY configuration data to set FC mode
+ * @req_mode: FC mode to configure
  */
 enum ice_status
-ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
+ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
+              enum ice_fc_mode req_mode)
 {
-       struct ice_aqc_set_phy_cfg_data cfg = { 0 };
-       struct ice_aqc_get_phy_caps_data *pcaps;
-       enum ice_status status;
+       struct ice_phy_cache_mode_data cache_data;
        u8 pause_mask = 0x0;
-       struct ice_hw *hw;
 
-       if (!pi)
-               return ICE_ERR_PARAM;
-       hw = pi->hw;
-       *aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
+       if (!pi || !cfg)
+               return ICE_ERR_BAD_PTR;
 
-       switch (pi->fc.req_mode) {
+       switch (req_mode) {
        case ICE_FC_FULL:
                pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
                pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
@@ -2345,6 +2778,42 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
                break;
        }
 
+       /* clear the old pause settings */
+       cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
+               ICE_AQC_PHY_EN_RX_LINK_PAUSE);
+
+       /* set the new capabilities */
+       cfg->caps |= pause_mask;
+
+       /* Cache user FC request */
+       cache_data.data.curr_user_fc_req = req_mode;
+       ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
+
+       return 0;
+}
+
+/**
+ * ice_set_fc
+ * @pi: port information structure
+ * @aq_failures: pointer to status code, specific to ice_set_fc routine
+ * @ena_auto_link_update: enable automatic link update
+ *
+ * Set the requested flow control mode.
+ */
+enum ice_status
+ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
+{
+       struct ice_aqc_set_phy_cfg_data cfg = { 0 };
+       struct ice_aqc_get_phy_caps_data *pcaps;
+       enum ice_status status;
+       struct ice_hw *hw;
+
+       if (!pi || !aq_failures)
+               return ICE_ERR_BAD_PTR;
+
+       *aq_failures = 0;
+       hw = pi->hw;
+
        pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
        if (!pcaps)
                return ICE_ERR_NO_MEMORY;
@@ -2357,12 +2826,12 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
                goto out;
        }
 
-       /* clear the old pause settings */
-       cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
-                                  ICE_AQC_PHY_EN_RX_LINK_PAUSE);
+       ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
 
-       /* set the new capabilities */
-       cfg.caps |= pause_mask;
+       /* Configure the set PHY data */
+       status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
+       if (status)
+               goto out;
 
        /* If the capabilities have changed, then set the new config */
        if (cfg.caps != pcaps->caps) {
@@ -2371,15 +2840,8 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
                /* Auto restart link so settings take effect */
                if (ena_auto_link_update)
                        cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
-               /* Copy over all the old settings */
-               cfg.phy_type_high = pcaps->phy_type_high;
-               cfg.phy_type_low = pcaps->phy_type_low;
-               cfg.low_power_ctrl = pcaps->low_power_ctrl;
-               cfg.eee_cap = pcaps->eee_cap;
-               cfg.eeer_value = pcaps->eeer_value;
-               cfg.link_fec_opt = pcaps->link_fec_options;
-
-               status = ice_aq_set_phy_cfg(hw, pi->lport, &cfg, NULL);
+
+               status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
                if (status) {
                        *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
                        goto out;
@@ -2409,7 +2871,44 @@ out:
 }
 
 /**
+ * ice_phy_caps_equals_cfg
+ * @phy_caps: PHY capabilities
+ * @phy_cfg: PHY configuration
+ *
+ * Helper function to determine if PHY capabilities matches PHY
+ * configuration
+ */
+bool
+ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
+                       struct ice_aqc_set_phy_cfg_data *phy_cfg)
+{
+       u8 caps_mask, cfg_mask;
+
+       if (!phy_caps || !phy_cfg)
+               return false;
+
+       /* These bits are not common between capabilities and configuration.
+        * Do not use them to determine equality.
+        */
+       caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
+                                             ICE_AQC_GET_PHY_EN_MOD_QUAL);
+       cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+
+       if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
+           phy_caps->phy_type_high != phy_cfg->phy_type_high ||
+           ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
+           phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
+           phy_caps->eee_cap != phy_cfg->eee_cap ||
+           phy_caps->eeer_value != phy_cfg->eeer_value ||
+           phy_caps->link_fec_options != phy_cfg->link_fec_opt)
+               return false;
+
+       return true;
+}
+
+/**
  * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
+ * @pi: port information structure
  * @caps: PHY ability structure to copy date from
  * @cfg: PHY configuration structure to copy data to
  *
@@ -2417,42 +2916,73 @@ out:
  * data structure
  */
 void
-ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps,
+ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
+                        struct ice_aqc_get_phy_caps_data *caps,
                         struct ice_aqc_set_phy_cfg_data *cfg)
 {
-       if (!caps || !cfg)
+       if (!pi || !caps || !cfg)
                return;
 
+       memset(cfg, 0, sizeof(*cfg));
        cfg->phy_type_low = caps->phy_type_low;
        cfg->phy_type_high = caps->phy_type_high;
        cfg->caps = caps->caps;
-       cfg->low_power_ctrl = caps->low_power_ctrl;
+       cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
        cfg->eee_cap = caps->eee_cap;
        cfg->eeer_value = caps->eeer_value;
        cfg->link_fec_opt = caps->link_fec_options;
+       cfg->module_compliance_enforcement =
+               caps->module_compliance_enforcement;
+
+       if (ice_fw_supports_link_override(pi->hw)) {
+               struct ice_link_default_override_tlv tlv;
+
+               if (ice_get_link_default_override(&tlv, pi))
+                       return;
+
+               if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)
+                       cfg->module_compliance_enforcement |=
+                               ICE_LINK_OVERRIDE_STRICT_MODE;
+       }
 }
 
 /**
  * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
+ * @pi: port information structure
  * @cfg: PHY configuration data to set FEC mode
  * @fec: FEC mode to configure
- *
- * Caller should copy ice_aqc_get_phy_caps_data.caps ICE_AQC_PHY_EN_AUTO_FEC
- * (bit 7) and ice_aqc_get_phy_caps_data.link_fec_options to cfg.caps
- * ICE_AQ_PHY_ENA_AUTO_FEC (bit 7) and cfg.link_fec_options before calling.
  */
-void
-ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
+enum ice_status
+ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
+               enum ice_fec_mode fec)
 {
+       struct ice_aqc_get_phy_caps_data *pcaps;
+       enum ice_status status;
+
+       if (!pi || !cfg)
+               return ICE_ERR_BAD_PTR;
+
+       pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
+       if (!pcaps)
+               return ICE_ERR_NO_MEMORY;
+
+       status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
+                                    NULL);
+       if (status)
+               goto out;
+
+       cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
+       cfg->link_fec_opt = pcaps->link_fec_options;
+
        switch (fec) {
        case ICE_FEC_BASER:
                /* Clear RS bits, and AND BASE-R ability
                 * bits and OR request bits.
                 */
                cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
-                                    ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
+                       ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
                cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
-                                    ICE_AQC_PHY_FEC_25G_KR_REQ;
+                       ICE_AQC_PHY_FEC_25G_KR_REQ;
                break;
        case ICE_FEC_RS:
                /* Clear BASE-R bits, and AND RS ability
@@ -2460,7 +2990,7 @@ ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
                 */
                cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
                cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
-                                    ICE_AQC_PHY_FEC_25G_RS_544_REQ;
+                       ICE_AQC_PHY_FEC_25G_RS_544_REQ;
                break;
        case ICE_FEC_NONE:
                /* Clear all FEC option bits. */
@@ -2469,8 +2999,28 @@ ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
        case ICE_FEC_AUTO:
                /* AND auto FEC bit, and all caps bits. */
                cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
+               cfg->link_fec_opt |= pcaps->link_fec_options;
+               break;
+       default:
+               status = ICE_ERR_PARAM;
                break;
        }
+
+       if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) {
+               struct ice_link_default_override_tlv tlv;
+
+               if (ice_get_link_default_override(&tlv, pi))
+                       goto out;
+
+               if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
+                   (tlv.options & ICE_LINK_OVERRIDE_EN))
+                       cfg->link_fec_opt = tlv.fec_options;
+       }
+
+out:
+       kfree(pcaps);
+
+       return status;
 }
 
 /**
@@ -2889,10 +3439,10 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
                   struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
                   struct ice_sq_cd *cd)
 {
-       u16 i, sum_header_size, sum_q_size = 0;
        struct ice_aqc_add_tx_qgrp *list;
        struct ice_aqc_add_txqs *cmd;
        struct ice_aq_desc desc;
+       u16 i, sum_size = 0;
 
        cmd = &desc.params.add_txqs;
 
@@ -2904,18 +3454,13 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
        if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
                return ICE_ERR_PARAM;
 
-       sum_header_size = num_qgrps *
-               (sizeof(*qg_list) - sizeof(*qg_list->txqs));
-
-       list = qg_list;
-       for (i = 0; i < num_qgrps; i++) {
-               struct ice_aqc_add_txqs_perq *q = list->txqs;
-
-               sum_q_size += list->num_txqs * sizeof(*q);
-               list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
+       for (i = 0, list = qg_list; i < num_qgrps; i++) {
+               sum_size += struct_size(list, txqs, list->num_txqs);
+               list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
+                                                     list->num_txqs);
        }
 
-       if (buf_size != (sum_header_size + sum_q_size))
+       if (buf_size != sum_size)
                return ICE_ERR_PARAM;
 
        desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
@@ -2943,6 +3488,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
                   enum ice_disq_rst_src rst_src, u16 vmvf_num,
                   struct ice_sq_cd *cd)
 {
+       struct ice_aqc_dis_txq_item *item;
        struct ice_aqc_dis_txqs *cmd;
        struct ice_aq_desc desc;
        enum ice_status status;
@@ -2992,16 +3538,16 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
         */
        desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
 
-       for (i = 0; i < num_qgrps; ++i) {
-               /* Calculate the size taken up by the queue IDs in this group */
-               sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
-
-               /* Add the size of the group header */
-               sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
+       for (i = 0, item = qg_list; i < num_qgrps; i++) {
+               u16 item_size = struct_size(item, q_id, item->num_qs);
 
                /* If the num of queues is even, add 2 bytes of padding */
-               if ((qg_list[i].num_qs % 2) == 0)
-                       sz += 2;
+               if ((item->num_qs % 2) == 0)
+                       item_size += 2;
+
+               sz += item_size;
+
+               item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
        }
 
        if (buf_size != sz)
@@ -3390,24 +3936,32 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
                struct ice_sq_cd *cd)
 {
        enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
-       struct ice_aqc_dis_txq_item qg_list;
+       struct ice_aqc_dis_txq_item *qg_list;
        struct ice_q_ctx *q_ctx;
-       u16 i;
+       struct ice_hw *hw;
+       u16 i, buf_size;
 
        if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
                return ICE_ERR_CFG;
 
+       hw = pi->hw;
+
        if (!num_queues) {
                /* if queue is disabled already yet the disable queue command
                 * has to be sent to complete the VF reset, then call
                 * ice_aq_dis_lan_txq without any queue information
                 */
                if (rst_src)
-                       return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
+                       return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
                                                  vmvf_num, NULL);
                return ICE_ERR_CFG;
        }
 
+       buf_size = struct_size(qg_list, q_id, 1);
+       qg_list = kzalloc(buf_size, GFP_KERNEL);
+       if (!qg_list)
+               return ICE_ERR_NO_MEMORY;
+
        mutex_lock(&pi->sched_lock);
 
        for (i = 0; i < num_queues; i++) {
@@ -3416,23 +3970,22 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
                node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
                if (!node)
                        continue;
-               q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
+               q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
                if (!q_ctx) {
-                       ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
+                       ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
                                  q_handles[i]);
                        continue;
                }
                if (q_ctx->q_handle != q_handles[i]) {
-                       ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
+                       ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
                                  q_ctx->q_handle, q_handles[i]);
                        continue;
                }
-               qg_list.parent_teid = node->info.parent_teid;
-               qg_list.num_qs = 1;
-               qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
-               status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
-                                           sizeof(qg_list), rst_src, vmvf_num,
-                                           cd);
+               qg_list->parent_teid = node->info.parent_teid;
+               qg_list->num_qs = 1;
+               qg_list->q_id[0] = cpu_to_le16(q_ids[i]);
+               status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
+                                           vmvf_num, cd);
 
                if (status)
                        break;
@@ -3440,6 +3993,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
                q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
        }
        mutex_unlock(&pi->sched_lock);
+       kfree(qg_list);
        return status;
 }
 
@@ -3652,17 +4206,135 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
  */
 enum ice_status
 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
-                    struct ice_aqc_get_elem *buf)
+                    struct ice_aqc_txsched_elem_data *buf)
 {
        u16 buf_size, num_elem_ret = 0;
        enum ice_status status;
 
        buf_size = sizeof(*buf);
        memset(buf, 0, buf_size);
-       buf->generic[0].node_teid = cpu_to_le32(node_teid);
+       buf->node_teid = cpu_to_le32(node_teid);
        status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
                                          NULL);
        if (status || num_elem_ret != 1)
                ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
        return status;
 }
+
+/**
+ * ice_fw_supports_link_override
+ * @hw: pointer to the hardware structure
+ *
+ * Checks if the firmware supports link override
+ */
+bool ice_fw_supports_link_override(struct ice_hw *hw)
+{
+       /* Currently, only supported for E810 devices */
+       if (hw->mac_type != ICE_MAC_E810)
+               return false;
+
+       if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
+               if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
+                       return true;
+               if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
+                   hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
+                       return true;
+       } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
+               return true;
+       }
+
+       return false;
+}
+
+/**
+ * ice_get_link_default_override
+ * @ldo: pointer to the link default override struct
+ * @pi: pointer to the port info struct
+ *
+ * Gets the link default override for a port
+ */
+enum ice_status
+ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
+                             struct ice_port_info *pi)
+{
+       u16 i, tlv, tlv_len, tlv_start, buf, offset;
+       struct ice_hw *hw = pi->hw;
+       enum ice_status status;
+
+       status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
+                                       ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
+       if (status) {
+               ice_debug(hw, ICE_DBG_INIT,
+                         "Failed to read link override TLV.\n");
+               return status;
+       }
+
+       /* Each port has its own config; calculate for our port */
+       tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
+               ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
+
+       /* link options first */
+       status = ice_read_sr_word(hw, tlv_start, &buf);
+       if (status) {
+               ice_debug(hw, ICE_DBG_INIT,
+                         "Failed to read override link options.\n");
+               return status;
+       }
+       ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
+       ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
+               ICE_LINK_OVERRIDE_PHY_CFG_S;
+
+       /* link PHY config */
+       offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
+       status = ice_read_sr_word(hw, offset, &buf);
+       if (status) {
+               ice_debug(hw, ICE_DBG_INIT,
+                         "Failed to read override phy config.\n");
+               return status;
+       }
+       ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
+
+       /* PHY types low */
+       offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
+       for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
+               status = ice_read_sr_word(hw, (offset + i), &buf);
+               if (status) {
+                       ice_debug(hw, ICE_DBG_INIT,
+                                 "Failed to read override link options.\n");
+                       return status;
+               }
+               /* shift 16 bits at a time to fill 64 bits */
+               ldo->phy_type_low |= ((u64)buf << (i * 16));
+       }
+
+       /* PHY types high */
+       offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
+               ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
+       for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
+               status = ice_read_sr_word(hw, (offset + i), &buf);
+               if (status) {
+                       ice_debug(hw, ICE_DBG_INIT,
+                                 "Failed to read override link options.\n");
+                       return status;
+               }
+               /* shift 16 bits at a time to fill 64 bits */
+               ldo->phy_type_high |= ((u64)buf << (i * 16));
+       }
+
+       return status;
+}
+
+/**
+ * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
+ * @caps: get PHY capability data
+ */
+bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
+{
+       if (caps->caps & ICE_AQC_PHY_AN_MODE ||
+           caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
+                                      ICE_AQC_PHY_AN_EN_CLAUSE73 |
+                                      ICE_AQC_PHY_AN_EN_CLAUSE37))
+               return true;
+
+       return false;
+}
index 9b9e50d..33a681a 100644 (file)
@@ -87,6 +87,9 @@ enum ice_status
 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
                    struct ice_aqc_get_phy_caps_data *caps,
                    struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
+                enum ice_adminq_opc opc, struct ice_sq_cd *cd);
 void
 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
                    u16 link_speeds_bitmap);
@@ -95,17 +98,33 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
                        struct ice_sq_cd *cd);
 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);
 enum ice_status
-ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
+ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
                   struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd);
+bool ice_fw_supports_link_override(struct ice_hw *hw);
+enum ice_status
+ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
+                             struct ice_port_info *pi);
+bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps);
+
+enum ice_fc_mode ice_caps_to_fc_mode(u8 caps);
+enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options);
 enum ice_status
 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures,
           bool ena_auto_link_update);
+enum ice_status
+ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
+              enum ice_fc_mode fc);
+bool
+ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *caps,
+                       struct ice_aqc_set_phy_cfg_data *cfg);
 void
-ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec);
-void
-ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps,
+ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
+                        struct ice_aqc_get_phy_caps_data *caps,
                         struct ice_aqc_set_phy_cfg_data *cfg);
 enum ice_status
+ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
+               enum ice_fec_mode fec);
+enum ice_status
 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
                           struct ice_sq_cd *cd);
 enum ice_status
@@ -152,5 +171,5 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
                  u64 *prev_stat, u64 *cur_stat);
 enum ice_status
 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
-                    struct ice_aqc_get_elem *buf);
+                    struct ice_aqc_txsched_elem_data *buf);
 #endif /* _ICE_COMMON_H_ */
index adb8dab..2cecc9d 100644 (file)
@@ -1362,7 +1362,7 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
                            struct ice_aqc_port_ets_elem *buf)
 {
        struct ice_sched_node *node, *tc_node;
-       struct ice_aqc_get_elem elem;
+       struct ice_aqc_txsched_elem_data elem;
        enum ice_status status = 0;
        u32 teid1, teid2;
        u8 i, j;
@@ -1404,7 +1404,7 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
                /* new TC */
                status = ice_sched_query_elem(pi->hw, teid2, &elem);
                if (!status)
-                       status = ice_sched_add_node(pi, 1, &elem.generic[0]);
+                       status = ice_sched_add_node(pi, 1, &elem);
                if (status)
                        break;
                /* update the TC number */
index ee138f9..d7e5e61 100644 (file)
@@ -87,7 +87,7 @@
 struct ice_lldp_org_tlv {
        __be16 typelen;
        __be32 ouisubtype;
-       u8 tlvinfo[1];
+       u8 tlvinfo[];
 } __packed;
 
 struct ice_cee_tlv_hdr {
@@ -109,7 +109,7 @@ struct ice_cee_feat_tlv {
 #define ICE_CEE_FEAT_TLV_WILLING_M     0x40
 #define ICE_CEE_FEAT_TLV_ERR_M         0x20
        u8 subtype;
-       u8 tlvinfo[1];
+       u8 tlvinfo[];
 };
 
 struct ice_cee_app_prio {
index a73d06e..43da2dc 100644 (file)
@@ -312,6 +312,7 @@ int ice_devlink_create_port(struct ice_pf *pf)
        struct devlink *devlink = priv_to_devlink(pf);
        struct ice_vsi *vsi = ice_get_main_vsi(pf);
        struct device *dev = ice_pf_to_dev(pf);
+       struct devlink_port_attrs attrs = {};
        int err;
 
        if (!vsi) {
@@ -319,8 +320,9 @@ int ice_devlink_create_port(struct ice_pf *pf)
                return -EIO;
        }
 
-       devlink_port_attrs_set(&pf->devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
-                              pf->hw.pf_id, false, 0, NULL, 0);
+       attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+       attrs.phys.port_number = pf->hw.pf_id;
+       devlink_port_attrs_set(&pf->devlink_port, &attrs);
        err = devlink_port_register(devlink, &pf->devlink_port, pf->hw.pf_id);
        if (err) {
                dev_err(dev, "devlink_port_register failed: %d\n", err);
@@ -397,12 +399,60 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
        return 0;
 }
 
+/**
+ * ice_devlink_devcaps_snapshot - Capture snapshot of device capabilities
+ * @devlink: the devlink instance
+ * @extack: extended ACK response structure
+ * @data: on exit points to snapshot data buffer
+ *
+ * This function is called in response to the DEVLINK_CMD_REGION_TRIGGER for
+ * the device-caps devlink region. It captures a snapshot of the device
+ * capabilities reported by firmware.
+ *
+ * @returns zero on success, and updates the data pointer. Returns a non-zero
+ * error code on failure.
+ */
+static int
+ice_devlink_devcaps_snapshot(struct devlink *devlink,
+                            struct netlink_ext_ack *extack, u8 **data)
+{
+       struct ice_pf *pf = devlink_priv(devlink);
+       struct device *dev = ice_pf_to_dev(pf);
+       struct ice_hw *hw = &pf->hw;
+       enum ice_status status;
+       void *devcaps;
+
+       devcaps = vzalloc(ICE_AQ_MAX_BUF_LEN);
+       if (!devcaps)
+               return -ENOMEM;
+
+       status = ice_aq_list_caps(hw, devcaps, ICE_AQ_MAX_BUF_LEN, NULL,
+                                 ice_aqc_opc_list_dev_caps, NULL);
+       if (status) {
+               dev_dbg(dev, "ice_aq_list_caps: failed to read device capabilities, err %d aq_err %d\n",
+                       status, hw->adminq.sq_last_status);
+               NL_SET_ERR_MSG_MOD(extack, "Failed to read device capabilities");
+               vfree(devcaps);
+               return -EIO;
+       }
+
+       *data = (u8 *)devcaps;
+
+       return 0;
+}
+
 static const struct devlink_region_ops ice_nvm_region_ops = {
        .name = "nvm-flash",
        .destructor = vfree,
        .snapshot = ice_devlink_nvm_snapshot,
 };
 
+static const struct devlink_region_ops ice_devcaps_region_ops = {
+       .name = "device-caps",
+       .destructor = vfree,
+       .snapshot = ice_devlink_devcaps_snapshot,
+};
+
 /**
  * ice_devlink_init_regions - Initialize devlink regions
  * @pf: the PF device structure
@@ -424,6 +474,15 @@ void ice_devlink_init_regions(struct ice_pf *pf)
                        PTR_ERR(pf->nvm_region));
                pf->nvm_region = NULL;
        }
+
+       pf->devcaps_region = devlink_region_create(devlink,
+                                                  &ice_devcaps_region_ops, 10,
+                                                  ICE_AQ_MAX_BUF_LEN);
+       if (IS_ERR(pf->devcaps_region)) {
+               dev_err(dev, "failed to create device-caps devlink region, err %ld\n",
+                       PTR_ERR(pf->devcaps_region));
+               pf->devcaps_region = NULL;
+       }
 }
 
 /**
@@ -436,4 +495,6 @@ void ice_devlink_destroy_regions(struct ice_pf *pf)
 {
        if (pf->nvm_region)
                devlink_region_destroy(pf->nvm_region);
+       if (pf->devcaps_region)
+               devlink_region_destroy(pf->devcaps_region);
 }
index 68c3800..06b93e9 100644 (file)
@@ -179,7 +179,6 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
        orom = &nvm->orom;
 
        strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
-       strscpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
 
        /* Display NVM version (from which the firmware version can be
         * determined) which contains more pertinent information.
@@ -967,12 +966,8 @@ static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec)
 {
        struct ice_netdev_priv *np = netdev_priv(netdev);
        struct ice_aqc_set_phy_cfg_data config = { 0 };
-       struct ice_aqc_get_phy_caps_data *caps;
        struct ice_vsi *vsi = np->vsi;
-       u8 sw_cfg_caps, sw_cfg_fec;
        struct ice_port_info *pi;
-       enum ice_status status;
-       int err = 0;
 
        pi = vsi->port_info;
        if (!pi)
@@ -984,54 +979,26 @@ static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec)
                return -EOPNOTSUPP;
        }
 
-       /* Get last SW configuration */
-       caps = kzalloc(sizeof(*caps), GFP_KERNEL);
-       if (!caps)
-               return -ENOMEM;
-
-       status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
-                                    caps, NULL);
-       if (status) {
-               err = -EAGAIN;
-               goto done;
-       }
-
-       /* Copy SW configuration returned from PHY caps to PHY config */
-       ice_copy_phy_caps_to_cfg(caps, &config);
-       sw_cfg_caps = caps->caps;
-       sw_cfg_fec = caps->link_fec_options;
-
-       /* Get toloplogy caps, then copy PHY FEC topoloy caps to PHY config */
-       memset(caps, 0, sizeof(*caps));
-
-       status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
-                                    caps, NULL);
-       if (status) {
-               err = -EAGAIN;
-               goto done;
-       }
-
-       config.caps |= (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
-       config.link_fec_opt = caps->link_fec_options;
+       /* Proceed only if requesting different FEC mode */
+       if (pi->phy.curr_user_fec_req == req_fec)
+               return 0;
 
-       ice_cfg_phy_fec(&config, req_fec);
+       /* Copy the current user PHY configuration. The current user PHY
+        * configuration is initialized during probe from PHY capabilities
+        * software mode, and updated on set PHY configuration.
+        */
+       memcpy(&config, &pi->phy.curr_user_phy_cfg, sizeof(config));
 
-       /* If FEC mode has changed, then set PHY configuration and enable AN. */
-       if ((config.caps & ICE_AQ_PHY_ENA_AUTO_FEC) !=
-           (sw_cfg_caps & ICE_AQC_PHY_EN_AUTO_FEC) ||
-           config.link_fec_opt != sw_cfg_fec) {
-               if (caps->caps & ICE_AQC_PHY_AN_MODE)
-                       config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+       ice_cfg_phy_fec(pi, &config, req_fec);
+       config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
 
-               status = ice_aq_set_phy_cfg(pi->hw, pi->lport, &config, NULL);
+       if (ice_aq_set_phy_cfg(pi->hw, pi, &config, NULL))
+               return -EAGAIN;
 
-               if (status)
-                       err = -EAGAIN;
-       }
+       /* Save requested FEC config */
+       pi->phy.curr_user_fec_req = req_fec;
 
-done:
-       kfree(caps);
-       return err;
+       return 0;
 }
 
 /**
@@ -1229,6 +1196,17 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
 
        bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS);
 
+       /* Do not allow change to link-down-on-close when Total Port Shutdown
+        * is enabled.
+        */
+       if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, change_flags) &&
+           test_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags)) {
+               dev_err(dev, "Setting link-down-on-close not supported on this port\n");
+               set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
+               ret = -EINVAL;
+               goto ethtool_exit;
+       }
+
        if (test_bit(ICE_FLAG_FW_LLDP_AGENT, change_flags)) {
                if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) {
                        enum ice_status status;
@@ -1316,6 +1294,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
                change_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags);
                ret = -EAGAIN;
        }
+ethtool_exit:
        clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
        return ret;
 }
@@ -1420,6 +1399,77 @@ ice_get_ethtool_stats(struct net_device *netdev,
        }
 }
 
+#define ICE_PHY_TYPE_LOW_MASK_MIN_1G   (ICE_PHY_TYPE_LOW_100BASE_TX | \
+                                        ICE_PHY_TYPE_LOW_100M_SGMII)
+
+#define ICE_PHY_TYPE_LOW_MASK_MIN_25G  (ICE_PHY_TYPE_LOW_MASK_MIN_1G | \
+                                        ICE_PHY_TYPE_LOW_1000BASE_T | \
+                                        ICE_PHY_TYPE_LOW_1000BASE_SX | \
+                                        ICE_PHY_TYPE_LOW_1000BASE_LX | \
+                                        ICE_PHY_TYPE_LOW_1000BASE_KX | \
+                                        ICE_PHY_TYPE_LOW_1G_SGMII | \
+                                        ICE_PHY_TYPE_LOW_2500BASE_T | \
+                                        ICE_PHY_TYPE_LOW_2500BASE_X | \
+                                        ICE_PHY_TYPE_LOW_2500BASE_KX | \
+                                        ICE_PHY_TYPE_LOW_5GBASE_T | \
+                                        ICE_PHY_TYPE_LOW_5GBASE_KR | \
+                                        ICE_PHY_TYPE_LOW_10GBASE_T | \
+                                        ICE_PHY_TYPE_LOW_10G_SFI_DA | \
+                                        ICE_PHY_TYPE_LOW_10GBASE_SR | \
+                                        ICE_PHY_TYPE_LOW_10GBASE_LR | \
+                                        ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 | \
+                                        ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC | \
+                                        ICE_PHY_TYPE_LOW_10G_SFI_C2C)
+
+#define ICE_PHY_TYPE_LOW_MASK_100G     (ICE_PHY_TYPE_LOW_100GBASE_CR4 | \
+                                        ICE_PHY_TYPE_LOW_100GBASE_SR4 | \
+                                        ICE_PHY_TYPE_LOW_100GBASE_LR4 | \
+                                        ICE_PHY_TYPE_LOW_100GBASE_KR4 | \
+                                        ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC | \
+                                        ICE_PHY_TYPE_LOW_100G_CAUI4 | \
+                                        ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC | \
+                                        ICE_PHY_TYPE_LOW_100G_AUI4 | \
+                                        ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 | \
+                                        ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 | \
+                                        ICE_PHY_TYPE_LOW_100GBASE_CP2 | \
+                                        ICE_PHY_TYPE_LOW_100GBASE_SR2 | \
+                                        ICE_PHY_TYPE_LOW_100GBASE_DR)
+
+#define ICE_PHY_TYPE_HIGH_MASK_100G    (ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 | \
+                                        ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC |\
+                                        ICE_PHY_TYPE_HIGH_100G_CAUI2 | \
+                                        ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | \
+                                        ICE_PHY_TYPE_HIGH_100G_AUI2)
+
+/**
+ * ice_mask_min_supported_speeds
+ * @phy_types_high: PHY type high
+ * @phy_types_low: PHY type low to apply minimum supported speeds mask
+ *
+ * Apply minimum supported speeds mask to PHY type low. These are the speeds
+ * for ethtool supported link mode.
+ */
+static
+void ice_mask_min_supported_speeds(u64 phy_types_high, u64 *phy_types_low)
+{
+       /* if QSFP connection with 100G speed, minimum supported speed is 25G */
+       if (*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G ||
+           phy_types_high & ICE_PHY_TYPE_HIGH_MASK_100G)
+               *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_25G;
+       else
+               *phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_1G;
+}
+
+#define ice_ethtool_advertise_link_mode(aq_link_speed, ethtool_link_mode)    \
+       do {                                                                 \
+               if (req_speeds & (aq_link_speed) ||                          \
+                   (!req_speeds &&                                          \
+                    (adv_phy_type_lo & phy_type_mask_lo ||                  \
+                     adv_phy_type_hi & phy_type_mask_hi)))                  \
+                       ethtool_link_ksettings_add_link_mode(ks, advertising,\
+                                                       ethtool_link_mode);  \
+       } while (0)
+
 /**
  * ice_phy_type_to_ethtool - convert the phy_types to ethtool link modes
  * @netdev: network interface device structure
@@ -1430,277 +1480,312 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
                        struct ethtool_link_ksettings *ks)
 {
        struct ice_netdev_priv *np = netdev_priv(netdev);
-       struct ice_link_status *hw_link_info;
-       bool need_add_adv_mode = false;
        struct ice_vsi *vsi = np->vsi;
-       u64 phy_types_high;
-       u64 phy_types_low;
+       struct ice_pf *pf = vsi->back;
+       u64 phy_type_mask_lo = 0;
+       u64 phy_type_mask_hi = 0;
+       u64 adv_phy_type_lo = 0;
+       u64 adv_phy_type_hi = 0;
+       u64 phy_types_high = 0;
+       u64 phy_types_low = 0;
+       u16 req_speeds;
+
+       req_speeds = vsi->port_info->phy.link_info.req_speeds;
+
+       /* Check if lenient mode is supported and enabled, or in strict mode.
+        *
+        * In lenient mode the Supported link modes are the PHY types without
+        * media. The Advertising link mode is either 1. the user requested
+        * speed, 2. the override PHY mask, or 3. the PHY types with media.
+        *
+        * In strict mode Supported link mode are the PHY type with media,
+        * and Advertising link modes are the media PHY type or the speed
+        * requested by user.
+        */
+       if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) {
+               struct ice_link_default_override_tlv *ldo;
 
-       hw_link_info = &vsi->port_info->phy.link_info;
-       phy_types_low = vsi->port_info->phy.phy_type_low;
-       phy_types_high = vsi->port_info->phy.phy_type_high;
+               ldo = &pf->link_dflt_override;
+               phy_types_low = le64_to_cpu(pf->nvm_phy_type_lo);
+               phy_types_high = le64_to_cpu(pf->nvm_phy_type_hi);
+
+               ice_mask_min_supported_speeds(phy_types_high, &phy_types_low);
+
+               /* If override enabled and PHY mask set, then
+                * Advertising link mode is the intersection of the PHY
+                * types without media and the override PHY mask.
+                */
+               if (ldo->options & ICE_LINK_OVERRIDE_EN &&
+                   (ldo->phy_type_low || ldo->phy_type_high)) {
+                       adv_phy_type_lo =
+                               le64_to_cpu(pf->nvm_phy_type_lo) &
+                               ldo->phy_type_low;
+                       adv_phy_type_hi =
+                               le64_to_cpu(pf->nvm_phy_type_hi) &
+                               ldo->phy_type_high;
+               }
+       } else {
+               phy_types_low = vsi->port_info->phy.phy_type_low;
+               phy_types_high = vsi->port_info->phy.phy_type_high;
+       }
+
+       /* If Advertising link mode PHY type is not using override PHY type,
+        * then use PHY type with media.
+        */
+       if (!adv_phy_type_lo && !adv_phy_type_hi) {
+               adv_phy_type_lo = vsi->port_info->phy.phy_type_low;
+               adv_phy_type_hi = vsi->port_info->phy.phy_type_high;
+       }
 
        ethtool_link_ksettings_zero_link_mode(ks, supported);
        ethtool_link_ksettings_zero_link_mode(ks, advertising);
 
-       if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
-           phy_types_low & ICE_PHY_TYPE_LOW_100M_SGMII) {
+       phy_type_mask_lo = ICE_PHY_TYPE_LOW_100BASE_TX |
+                          ICE_PHY_TYPE_LOW_100M_SGMII;
+       if (phy_types_low & phy_type_mask_lo) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     100baseT_Full);
-               if (!hw_link_info->req_speeds ||
-                   hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100MB)
-                       ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                            100baseT_Full);
+
+               ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100MB,
+                                               100baseT_Full);
        }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T ||
-           phy_types_low & ICE_PHY_TYPE_LOW_1G_SGMII) {
+
+       phy_type_mask_lo = ICE_PHY_TYPE_LOW_1000BASE_T |
+                          ICE_PHY_TYPE_LOW_1G_SGMII;
+       if (phy_types_low & phy_type_mask_lo) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     1000baseT_Full);
-               if (!hw_link_info->req_speeds ||
-                   hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
-                       ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                            1000baseT_Full);
+               ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_1000MB,
+                                               1000baseT_Full);
        }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX) {
+
+       phy_type_mask_lo = ICE_PHY_TYPE_LOW_1000BASE_KX;
+       if (phy_types_low & phy_type_mask_lo) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     1000baseKX_Full);
-               if (!hw_link_info->req_speeds ||
-                   hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
-                       ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                            1000baseKX_Full);
+               ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_1000MB,
+                                               1000baseKX_Full);
        }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_SX ||
-           phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_LX) {
+
+       phy_type_mask_lo = ICE_PHY_TYPE_LOW_1000BASE_SX |
+                          ICE_PHY_TYPE_LOW_1000BASE_LX;
+       if (phy_types_low & phy_type_mask_lo) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     1000baseX_Full);
-               if (!hw_link_info->req_speeds ||
-                   hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_1000MB)
-                       ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                            1000baseX_Full);
+               ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_1000MB,
+                                               1000baseX_Full);
        }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T) {
+
+       phy_type_mask_lo = ICE_PHY_TYPE_LOW_2500BASE_T;
+       if (phy_types_low & phy_type_mask_lo) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     2500baseT_Full);
-               if (!hw_link_info->req_speeds ||
-                   hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB)
-                       ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                            2500baseT_Full);
+               ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_2500MB,
+                                               2500baseT_Full);
        }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_X ||
-           phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX) {
+
+       phy_type_mask_lo = ICE_PHY_TYPE_LOW_2500BASE_X |
+                          ICE_PHY_TYPE_LOW_2500BASE_KX;
+       if (phy_types_low & phy_type_mask_lo) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     2500baseX_Full);
-               if (!hw_link_info->req_speeds ||
-                   hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_2500MB)
-                       ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                            2500baseX_Full);
+               ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_2500MB,
+                                               2500baseX_Full);
        }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T ||
-           phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR) {
+
+       phy_type_mask_lo = ICE_PHY_TYPE_LOW_5GBASE_T |
+                          ICE_PHY_TYPE_LOW_5GBASE_KR;
+       if (phy_types_low & phy_type_mask_lo) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     5000baseT_Full);
-               if (!hw_link_info->req_speeds ||
-                   hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_5GB)
-                       ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                            5000baseT_Full);
-       }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T ||
-           phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_DA ||
-           phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC ||
-           phy_types_low & ICE_PHY_TYPE_LOW_10G_SFI_C2C) {
+               ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_5GB,
+                                               5000baseT_Full);
+       }
+
+       phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_T |
+                          ICE_PHY_TYPE_LOW_10G_SFI_DA |
+                          ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC |
+                          ICE_PHY_TYPE_LOW_10G_SFI_C2C;
+       if (phy_types_low & phy_type_mask_lo) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     10000baseT_Full);
-               if (!hw_link_info->req_speeds ||
-                   hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
-                       ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                            10000baseT_Full);
+               ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB,
+                                               10000baseT_Full);
        }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1) {
+
+       phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_KR_CR1;
+       if (phy_types_low & phy_type_mask_lo) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     10000baseKR_Full);
-               if (!hw_link_info->req_speeds ||
-                   hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
-                       ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                            10000baseKR_Full);
+               ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB,
+                                               10000baseKR_Full);
        }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_SR) {
+
+       phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_SR;
+       if (phy_types_low & phy_type_mask_lo) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     10000baseSR_Full);
-               if (!hw_link_info->req_speeds ||
-                   hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
-                       ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                            10000baseSR_Full);
+               ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB,
+                                               10000baseSR_Full);
        }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_LR) {
+
+       phy_type_mask_lo = ICE_PHY_TYPE_LOW_10GBASE_LR;
+       if (phy_types_low & phy_type_mask_lo) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     10000baseLR_Full);
-               if (!hw_link_info->req_speeds ||
-                   hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_10GB)
-                       ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                            10000baseLR_Full);
+               ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_10GB,
+                                               10000baseLR_Full);
        }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25G_AUI_C2C) {
+
+       phy_type_mask_lo = ICE_PHY_TYPE_LOW_25GBASE_T |
+                          ICE_PHY_TYPE_LOW_25GBASE_CR |
+                          ICE_PHY_TYPE_LOW_25GBASE_CR_S |
+                          ICE_PHY_TYPE_LOW_25GBASE_CR1 |
+                          ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC |
+                          ICE_PHY_TYPE_LOW_25G_AUI_C2C;
+       if (phy_types_low & phy_type_mask_lo) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     25000baseCR_Full);
-               if (!hw_link_info->req_speeds ||
-                   hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
-                       ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                            25000baseCR_Full);
+               ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_25GB,
+                                               25000baseCR_Full);
        }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_SR ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_LR) {
+
+       phy_type_mask_lo = ICE_PHY_TYPE_LOW_25GBASE_SR |
+                          ICE_PHY_TYPE_LOW_25GBASE_LR;
+       if (phy_types_low & phy_type_mask_lo) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     25000baseSR_Full);
-               if (!hw_link_info->req_speeds ||
-                   hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
-                       ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                            25000baseSR_Full);
+               ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_25GB,
+                                               25000baseSR_Full);
        }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S ||
-           phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1) {
+
+       phy_type_mask_lo = ICE_PHY_TYPE_LOW_25GBASE_KR |
+                          ICE_PHY_TYPE_LOW_25GBASE_KR_S |
+                          ICE_PHY_TYPE_LOW_25GBASE_KR1;
+       if (phy_types_low & phy_type_mask_lo) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     25000baseKR_Full);
-               if (!hw_link_info->req_speeds ||
-                   hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_25GB)
-                       ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                            25000baseKR_Full);
+               ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_25GB,
+                                               25000baseKR_Full);
        }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) {
+
+       phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_KR4;
+       if (phy_types_low & phy_type_mask_lo) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     40000baseKR4_Full);
-               if (!hw_link_info->req_speeds ||
-                   hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
-                       ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                            40000baseKR4_Full);
-       }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC ||
-           phy_types_low & ICE_PHY_TYPE_LOW_40G_XLAUI) {
+               ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB,
+                                               40000baseKR4_Full);
+       }
+
+       phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_CR4 |
+                          ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC |
+                          ICE_PHY_TYPE_LOW_40G_XLAUI;
+       if (phy_types_low & phy_type_mask_lo) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     40000baseCR4_Full);
-               if (!hw_link_info->req_speeds ||
-                   hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
-                       ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                            40000baseCR4_Full);
+               ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB,
+                                               40000baseCR4_Full);
        }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_SR4) {
+
+       phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_SR4;
+       if (phy_types_low & phy_type_mask_lo) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     40000baseSR4_Full);
-               if (!hw_link_info->req_speeds ||
-                   hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
-                       ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                            40000baseSR4_Full);
+               ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB,
+                                               40000baseSR4_Full);
        }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_LR4) {
+
+       phy_type_mask_lo = ICE_PHY_TYPE_LOW_40GBASE_LR4;
+       if (phy_types_low & phy_type_mask_lo) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     40000baseLR4_Full);
-               if (!hw_link_info->req_speeds ||
-                   hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_40GB)
-                       ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                            40000baseLR4_Full);
-       }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC ||
-           phy_types_low & ICE_PHY_TYPE_LOW_50G_LAUI2 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC ||
-           phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI2 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP ||
-           phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_SR ||
-           phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC ||
-           phy_types_low & ICE_PHY_TYPE_LOW_50G_AUI1) {
+               ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_40GB,
+                                               40000baseLR4_Full);
+       }
+
+       phy_type_mask_lo = ICE_PHY_TYPE_LOW_50GBASE_CR2 |
+                          ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC |
+                          ICE_PHY_TYPE_LOW_50G_LAUI2 |
+                          ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC |
+                          ICE_PHY_TYPE_LOW_50G_AUI2 |
+                          ICE_PHY_TYPE_LOW_50GBASE_CP |
+                          ICE_PHY_TYPE_LOW_50GBASE_SR |
+                          ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC |
+                          ICE_PHY_TYPE_LOW_50G_AUI1;
+       if (phy_types_low & phy_type_mask_lo) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     50000baseCR2_Full);
-               if (!hw_link_info->req_speeds ||
-                   hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
-                       ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                            50000baseCR2_Full);
+               ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_50GB,
+                                               50000baseCR2_Full);
        }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) {
+
+       phy_type_mask_lo = ICE_PHY_TYPE_LOW_50GBASE_KR2 |
+                          ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4;
+       if (phy_types_low & phy_type_mask_lo) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     50000baseKR2_Full);
-               if (!hw_link_info->req_speeds ||
-                   hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
-                       ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                            50000baseKR2_Full);
-       }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_SR2 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR2 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_FR ||
-           phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_LR) {
+               ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_50GB,
+                                               50000baseKR2_Full);
+       }
+
+       phy_type_mask_lo = ICE_PHY_TYPE_LOW_50GBASE_SR2 |
+                          ICE_PHY_TYPE_LOW_50GBASE_LR2 |
+                          ICE_PHY_TYPE_LOW_50GBASE_FR |
+                          ICE_PHY_TYPE_LOW_50GBASE_LR;
+       if (phy_types_low & phy_type_mask_lo) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     50000baseSR2_Full);
-               if (!hw_link_info->req_speeds ||
-                   hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_50GB)
-                       ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                            50000baseSR2_Full);
-       }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC ||
-           phy_types_low & ICE_PHY_TYPE_LOW_100G_CAUI4 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC ||
-           phy_types_low & ICE_PHY_TYPE_LOW_100G_AUI4 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2  ||
-           phy_types_high & ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC ||
-           phy_types_high & ICE_PHY_TYPE_HIGH_100G_CAUI2 ||
-           phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC ||
-           phy_types_high & ICE_PHY_TYPE_HIGH_100G_AUI2) {
+               ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_50GB,
+                                               50000baseSR2_Full);
+       }
+
+       phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_CR4 |
+                          ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC |
+                          ICE_PHY_TYPE_LOW_100G_CAUI4 |
+                          ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC |
+                          ICE_PHY_TYPE_LOW_100G_AUI4 |
+                          ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 |
+                          ICE_PHY_TYPE_LOW_100GBASE_CP2;
+       phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC |
+                          ICE_PHY_TYPE_HIGH_100G_CAUI2 |
+                          ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC |
+                          ICE_PHY_TYPE_HIGH_100G_AUI2;
+       if (phy_types_low & phy_type_mask_lo ||
+           phy_types_high & phy_type_mask_hi) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     100000baseCR4_Full);
-               if (!hw_link_info->req_speeds ||
-                   hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
-                       need_add_adv_mode = true;
+               ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
+                                               100000baseCR4_Full);
        }
-       if (need_add_adv_mode) {
-               need_add_adv_mode = false;
-               ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                    100000baseCR4_Full);
-       }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR4 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR2) {
+
+       phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_SR4 |
+                          ICE_PHY_TYPE_LOW_100GBASE_SR2;
+       if (phy_types_low & phy_type_mask_lo) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     100000baseSR4_Full);
-               if (!hw_link_info->req_speeds ||
-                   hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
-                       need_add_adv_mode = true;
-       }
-       if (need_add_adv_mode) {
-               need_add_adv_mode = false;
-               ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                    100000baseSR4_Full);
+               ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
+                                               100000baseSR4_Full);
        }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_LR4 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_DR) {
+
+       phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_LR4 |
+                          ICE_PHY_TYPE_LOW_100GBASE_DR;
+       if (phy_types_low & phy_type_mask_lo) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     100000baseLR4_ER4_Full);
-               if (!hw_link_info->req_speeds ||
-                   hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
-                       need_add_adv_mode = true;
-       }
-       if (need_add_adv_mode) {
-               need_add_adv_mode = false;
-               ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                    100000baseLR4_ER4_Full);
+               ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
+                                               100000baseLR4_ER4_Full);
        }
-       if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 ||
-           phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 ||
-           phy_types_high & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) {
+
+       phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_KR4 |
+                          ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4;
+       phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4;
+       if (phy_types_low & phy_type_mask_lo ||
+           phy_types_high & phy_type_mask_hi) {
                ethtool_link_ksettings_add_link_mode(ks, supported,
                                                     100000baseKR4_Full);
-               if (!hw_link_info->req_speeds ||
-                   hw_link_info->req_speeds & ICE_AQ_LINK_SPEED_100GB)
-                       need_add_adv_mode = true;
+               ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
+                                               100000baseKR4_Full);
        }
-       if (need_add_adv_mode)
-               ethtool_link_ksettings_add_link_mode(ks, advertising,
-                                                    100000baseKR4_Full);
 
        /* Autoneg PHY types */
        if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
@@ -2128,18 +2213,18 @@ static int
 ice_set_link_ksettings(struct net_device *netdev,
                       const struct ethtool_link_ksettings *ks)
 {
-       u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT, lport = 0;
        struct ice_netdev_priv *np = netdev_priv(netdev);
        struct ethtool_link_ksettings safe_ks, copy_ks;
        struct ice_aqc_get_phy_caps_data *abilities;
+       u8 autoneg, timeout = TEST_SET_BITS_TIMEOUT;
        u16 adv_link_speed, curr_link_speed, idx;
        struct ice_aqc_set_phy_cfg_data config;
        struct ice_pf *pf = np->vsi->back;
        struct ice_port_info *p;
        u8 autoneg_changed = 0;
        enum ice_status status;
-       u64 phy_type_high;
-       u64 phy_type_low;
+       u64 phy_type_high = 0;
+       u64 phy_type_low = 0;
        int err = 0;
        bool linkup;
 
@@ -2163,6 +2248,18 @@ ice_set_link_ksettings(struct net_device *netdev,
            p->phy.link_info.link_info & ICE_AQ_LINK_UP)
                return -EOPNOTSUPP;
 
+       abilities = kzalloc(sizeof(*abilities), GFP_KERNEL);
+       if (!abilities)
+               return -ENOMEM;
+
+       /* Get the PHY capabilities based on media */
+       status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_TOPO_CAP,
+                                    abilities, NULL);
+       if (status) {
+               err = -EAGAIN;
+               goto done;
+       }
+
        /* copy the ksettings to copy_ks to avoid modifying the original */
        memcpy(&copy_ks, ks, sizeof(copy_ks));
 
@@ -2179,8 +2276,12 @@ ice_set_link_ksettings(struct net_device *netdev,
         */
        if (!bitmap_subset(copy_ks.link_modes.advertising,
                           safe_ks.link_modes.supported,
-                          __ETHTOOL_LINK_MODE_MASK_NBITS))
-               return -EINVAL;
+                          __ETHTOOL_LINK_MODE_MASK_NBITS)) {
+               if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags))
+                       netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n");
+               err = -EINVAL;
+               goto done;
+       }
 
        /* get our own copy of the bits to check against */
        memset(&safe_ks, 0, sizeof(safe_ks));
@@ -2197,33 +2298,27 @@ ice_set_link_ksettings(struct net_device *netdev,
        /* If copy_ks.base and safe_ks.base are not the same now, then they are
         * trying to set something that we do not support.
         */
-       if (memcmp(&copy_ks.base, &safe_ks.base, sizeof(copy_ks.base)))
-               return -EOPNOTSUPP;
+       if (memcmp(&copy_ks.base, &safe_ks.base, sizeof(copy_ks.base))) {
+               err = -EOPNOTSUPP;
+               goto done;
+       }
 
        while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
                timeout--;
-               if (!timeout)
-                       return -EBUSY;
+               if (!timeout) {
+                       err = -EBUSY;
+                       goto done;
+               }
                usleep_range(TEST_SET_BITS_SLEEP_MIN, TEST_SET_BITS_SLEEP_MAX);
        }
 
-       abilities = kzalloc(sizeof(*abilities), GFP_KERNEL);
-       if (!abilities)
-               return -ENOMEM;
-
-       /* Get the current PHY config */
-       status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_SW_CFG, abilities,
-                                    NULL);
-       if (status) {
-               err = -EAGAIN;
-               goto done;
-       }
+       /* Copy the current user PHY configuration. The current user PHY
+        * configuration is initialized during probe from PHY capabilities
+        * software mode, and updated on set PHY configuration.
+        */
+       memcpy(&config, &p->phy.curr_user_phy_cfg, sizeof(config));
 
-       /* Copy abilities to config in case autoneg is not set below */
-       memset(&config, 0, sizeof(config));
-       config.caps = abilities->caps & ~ICE_AQC_PHY_AN_MODE;
-       if (abilities->caps & ICE_AQC_PHY_AN_MODE)
-               config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
+       config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
 
        /* Check autoneg */
        err = ice_setup_autoneg(p, &safe_ks, &config, autoneg, &autoneg_changed,
@@ -2258,29 +2353,44 @@ ice_set_link_ksettings(struct net_device *netdev,
                goto done;
        }
 
-       /* copy over the rest of the abilities */
-       config.low_power_ctrl = abilities->low_power_ctrl;
-       config.eee_cap = abilities->eee_cap;
-       config.eeer_value = abilities->eeer_value;
-       config.link_fec_opt = abilities->link_fec_options;
-
        /* save the requested speeds */
        p->phy.link_info.req_speeds = adv_link_speed;
 
        /* set link and auto negotiation so changes take effect */
        config.caps |= ICE_AQ_PHY_ENA_LINK;
 
-       if (phy_type_low || phy_type_high) {
-               config.phy_type_high = cpu_to_le64(phy_type_high) &
-                       abilities->phy_type_high;
-               config.phy_type_low = cpu_to_le64(phy_type_low) &
-                       abilities->phy_type_low;
-       } else {
+       /* check if there is a PHY type for the requested advertised speed */
+       if (!(phy_type_low || phy_type_high)) {
+               netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n");
                err = -EAGAIN;
-               netdev_info(netdev, "Nothing changed. No PHY_TYPE is corresponded to advertised link speed.\n");
                goto done;
        }
 
+       /* intersect requested advertised speed PHY types with media PHY types
+        * for set PHY configuration
+        */
+       config.phy_type_high = cpu_to_le64(phy_type_high) &
+                       abilities->phy_type_high;
+       config.phy_type_low = cpu_to_le64(phy_type_low) &
+                       abilities->phy_type_low;
+
+       if (!(config.phy_type_high || config.phy_type_low)) {
+               /* If there is no intersection and lenient mode is enabled, then
+                * intersect the requested advertised speed with NVM media type
+                * PHY types.
+                */
+               if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags)) {
+                       config.phy_type_high = cpu_to_le64(phy_type_high) &
+                                              pf->nvm_phy_type_hi;
+                       config.phy_type_low = cpu_to_le64(phy_type_low) &
+                                             pf->nvm_phy_type_lo;
+               } else {
+                       netdev_info(netdev, "The selected speed is not supported by the current media. Please select a link speed that is supported by the current media.\n");
+                       err = -EAGAIN;
+                       goto done;
+               }
+       }
+
        /* If link is up put link down */
        if (p->phy.link_info.link_info & ICE_AQ_LINK_UP) {
                /* Tell the OS link is going down, the link will go
@@ -2292,12 +2402,15 @@ ice_set_link_ksettings(struct net_device *netdev,
        }
 
        /* make the aq call */
-       status = ice_aq_set_phy_cfg(&pf->hw, lport, &config, NULL);
+       status = ice_aq_set_phy_cfg(&pf->hw, p, &config, NULL);
        if (status) {
                netdev_info(netdev, "Set phy config failed,\n");
                err = -EAGAIN;
+               goto done;
        }
 
+       /* Save speed request */
+       p->phy.curr_user_speed_req = adv_link_speed;
 done:
        kfree(abilities);
        clear_bit(__ICE_CFG_BUSY, pf->state);
@@ -2874,8 +2987,8 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
        if (status)
                goto out;
 
-       pause->autoneg = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ?
-                       AUTONEG_ENABLE : AUTONEG_DISABLE);
+       pause->autoneg = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE :
+                                                            AUTONEG_DISABLE;
 
        if (dcbx_cfg->pfc.pfcena)
                /* PFC enabled so report LFC as off */
@@ -2943,8 +3056,8 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
                return -EIO;
        }
 
-       is_an = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ?
-                       AUTONEG_ENABLE : AUTONEG_DISABLE);
+       is_an = ice_is_phy_caps_an_enabled(pcaps) ? AUTONEG_ENABLE :
+                                                   AUTONEG_DISABLE;
 
        kfree(pcaps);
 
@@ -3323,6 +3436,58 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
        return 0;
 }
 
+/**
+ * ice_get_wol - get current Wake on LAN configuration
+ * @netdev: network interface device structure
+ * @wol: Ethtool structure to retrieve WoL settings
+ */
+static void ice_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+       struct ice_netdev_priv *np = netdev_priv(netdev);
+       struct ice_pf *pf = np->vsi->back;
+
+       if (np->vsi->type != ICE_VSI_PF)
+               netdev_warn(netdev, "Wake on LAN is not supported on this interface!\n");
+
+       /* Get WoL settings based on the HW capability */
+       if (ice_is_wol_supported(pf)) {
+               wol->supported = WAKE_MAGIC;
+               wol->wolopts = pf->wol_ena ? WAKE_MAGIC : 0;
+       } else {
+               wol->supported = 0;
+               wol->wolopts = 0;
+       }
+}
+
+/**
+ * ice_set_wol - set Wake on LAN on supported device
+ * @netdev: network interface device structure
+ * @wol: Ethtool structure to set WoL
+ */
+static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+       struct ice_netdev_priv *np = netdev_priv(netdev);
+       struct ice_vsi *vsi = np->vsi;
+       struct ice_pf *pf = vsi->back;
+
+       if (vsi->type != ICE_VSI_PF || !ice_is_wol_supported(pf))
+               return -EOPNOTSUPP;
+
+       /* only magic packet is supported */
+       if (wol->wolopts && wol->wolopts != WAKE_MAGIC)
+               return -EOPNOTSUPP;
+
+       /* Set WoL only if there is a new value */
+       if (pf->wol_ena != !!wol->wolopts) {
+               pf->wol_ena = !!wol->wolopts;
+               device_set_wakeup_enable(ice_pf_to_dev(pf), pf->wol_ena);
+               netdev_dbg(netdev, "WoL magic packet %sabled\n",
+                          pf->wol_ena ? "en" : "dis");
+       }
+
+       return 0;
+}
+
 enum ice_container_type {
        ICE_RX_CONTAINER,
        ICE_TX_CONTAINER,
@@ -3806,6 +3971,8 @@ static const struct ethtool_ops ice_ethtool_ops = {
        .get_drvinfo            = ice_get_drvinfo,
        .get_regs_len           = ice_get_regs_len,
        .get_regs               = ice_get_regs,
+       .get_wol                = ice_get_wol,
+       .set_wol                = ice_set_wol,
        .get_msglevel           = ice_get_msglevel,
        .set_msglevel           = ice_set_msglevel,
        .self_test              = ice_self_test,
@@ -3848,6 +4015,8 @@ static const struct ethtool_ops ice_ethtool_safe_mode_ops = {
        .get_drvinfo            = ice_get_drvinfo,
        .get_regs_len           = ice_get_regs_len,
        .get_regs               = ice_get_regs,
+       .get_wol                = ice_get_wol,
+       .set_wol                = ice_set_wol,
        .get_msglevel           = ice_get_msglevel,
        .set_msglevel           = ice_set_msglevel,
        .get_link               = ethtool_op_get_link,
index 4420fc0..3c217e5 100644 (file)
@@ -1121,8 +1121,7 @@ static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
        u16 size;
        u32 i;
 
-       size = sizeof(*pkg_info) + (sizeof(pkg_info->pkg_info[0]) *
-                                   (ICE_PKG_CNT - 1));
+       size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
        pkg_info = kzalloc(size, GFP_KERNEL);
        if (!pkg_info)
                return ICE_ERR_NO_MEMORY;
@@ -1180,7 +1179,7 @@ static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
        u32 seg_count;
        u32 i;
 
-       if (len < sizeof(*pkg))
+       if (len < struct_size(pkg, seg_offset, 1))
                return ICE_ERR_BUF_TOO_SHORT;
 
        if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
@@ -1195,7 +1194,7 @@ static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
                return ICE_ERR_CFG;
 
        /* make sure segment array fits in package length */
-       if (len < sizeof(*pkg) + ((seg_count - 1) * sizeof(pkg->seg_offset)))
+       if (len < struct_size(pkg, seg_offset, seg_count))
                return ICE_ERR_BUF_TOO_SHORT;
 
        /* all segments must fit within length */
@@ -1300,7 +1299,7 @@ ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
        }
 
        /* Check if FW is compatible with the OS package */
-       size = struct_size(pkg, pkg_info, ICE_PKG_CNT - 1);
+       size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
        pkg = kzalloc(size, GFP_KERNEL);
        if (!pkg)
                return ICE_ERR_NO_MEMORY;
@@ -1764,13 +1763,13 @@ ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
                goto ice_create_tunnel_err;
 
        sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
-                                           sizeof(*sect_rx));
+                                           struct_size(sect_rx, tcam, 1));
        if (!sect_rx)
                goto ice_create_tunnel_err;
        sect_rx->count = cpu_to_le16(1);
 
        sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
-                                           sizeof(*sect_tx));
+                                           struct_size(sect_tx, tcam, 1));
        if (!sect_tx)
                goto ice_create_tunnel_err;
        sect_tx->count = cpu_to_le16(1);
@@ -1847,7 +1846,7 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
        }
 
        /* size of section - there is at least one entry */
-       size = struct_size(sect_rx, tcam, count - 1);
+       size = struct_size(sect_rx, tcam, count);
 
        bld = ice_pkg_buf_alloc(hw);
        if (!bld) {
@@ -3324,10 +3323,10 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
                        u32 id;
 
                        id = ice_sect_id(blk, ICE_VEC_TBL);
-                       p = (struct ice_pkg_es *)
-                               ice_pkg_buf_alloc_section(bld, id, sizeof(*p) +
-                                                         vec_size -
-                                                         sizeof(p->es[0]));
+                       p = ice_pkg_buf_alloc_section(bld, id,
+                                                     struct_size(p, es, 1) +
+                                                     vec_size -
+                                                     sizeof(p->es[0]));
 
                        if (!p)
                                return ICE_ERR_MAX_LIMIT;
@@ -3360,8 +3359,8 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
                        u32 id;
 
                        id = ice_sect_id(blk, ICE_PROF_TCAM);
-                       p = (struct ice_prof_id_section *)
-                               ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+                       p = ice_pkg_buf_alloc_section(bld, id,
+                                                     struct_size(p, entry, 1));
 
                        if (!p)
                                return ICE_ERR_MAX_LIMIT;
@@ -3396,8 +3395,8 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
                        u32 id;
 
                        id = ice_sect_id(blk, ICE_XLT1);
-                       p = (struct ice_xlt1_section *)
-                               ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+                       p = ice_pkg_buf_alloc_section(bld, id,
+                                                     struct_size(p, value, 1));
 
                        if (!p)
                                return ICE_ERR_MAX_LIMIT;
@@ -3431,8 +3430,8 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
                case ICE_VSI_MOVE:
                case ICE_VSIG_REM:
                        id = ice_sect_id(blk, ICE_XLT2);
-                       p = (struct ice_xlt2_section *)
-                               ice_pkg_buf_alloc_section(bld, id, sizeof(*p));
+                       p = ice_pkg_buf_alloc_section(bld, id,
+                                                     struct_size(p, value, 1));
 
                        if (!p)
                                return ICE_ERR_MAX_LIMIT;
index a6f391e..c1c99a2 100644 (file)
@@ -22,7 +22,7 @@ struct ice_fv {
 struct ice_pkg_hdr {
        struct ice_pkg_ver pkg_format_ver;
        __le32 seg_count;
-       __le32 seg_offset[1];
+       __le32 seg_offset[];
 };
 
 /* generic segment */
@@ -53,12 +53,12 @@ struct ice_device_id_entry {
 struct ice_seg {
        struct ice_generic_seg_hdr hdr;
        __le32 device_table_count;
-       struct ice_device_id_entry device_table[1];
+       struct ice_device_id_entry device_table[];
 };
 
 struct ice_nvm_table {
        __le32 table_count;
-       __le32 vers[1];
+       __le32 vers[];
 };
 
 struct ice_buf {
@@ -68,7 +68,7 @@ struct ice_buf {
 
 struct ice_buf_table {
        __le32 buf_count;
-       struct ice_buf buf_array[1];
+       struct ice_buf buf_array[];
 };
 
 /* global metadata specific segment */
@@ -101,11 +101,12 @@ struct ice_section_entry {
 struct ice_buf_hdr {
        __le16 section_count;
        __le16 data_end;
-       struct ice_section_entry section_entry[1];
+       struct ice_section_entry section_entry[];
 };
 
 #define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
-       sizeof(struct ice_buf_hdr) - (hd_sz)) / (ent_sz))
+       struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\
+       (ent_sz))
 
 /* ice package section IDs */
 #define ICE_SID_XLT0_SW                        10
@@ -198,17 +199,17 @@ struct ice_label {
 
 struct ice_label_section {
        __le16 count;
-       struct ice_label label[1];
+       struct ice_label label[];
 };
 
 #define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
-       sizeof(struct ice_label_section) - sizeof(struct ice_label), \
-       sizeof(struct ice_label))
+       struct_size((struct ice_label_section *)0, label, 1) - \
+       sizeof(struct ice_label), sizeof(struct ice_label))
 
 struct ice_sw_fv_section {
        __le16 count;
        __le16 base_offset;
-       struct ice_fv fv[1];
+       struct ice_fv fv[];
 };
 
 /* The BOOST TCAM stores the match packet header in reverse order, meaning
@@ -245,30 +246,30 @@ struct ice_boost_tcam_entry {
 struct ice_boost_tcam_section {
        __le16 count;
        __le16 reserved;
-       struct ice_boost_tcam_entry tcam[1];
+       struct ice_boost_tcam_entry tcam[];
 };
 
 #define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
-       sizeof(struct ice_boost_tcam_section) - \
+       struct_size((struct ice_boost_tcam_section *)0, tcam, 1) - \
        sizeof(struct ice_boost_tcam_entry), \
        sizeof(struct ice_boost_tcam_entry))
 
 struct ice_xlt1_section {
        __le16 count;
        __le16 offset;
-       u8 value[1];
-} __packed;
+       u8 value[];
+};
 
 struct ice_xlt2_section {
        __le16 count;
        __le16 offset;
-       __le16 value[1];
+       __le16 value[];
 };
 
 struct ice_prof_redir_section {
        __le16 count;
        __le16 offset;
-       u8 redir_value[1];
+       u8 redir_value[];
 };
 
 /* package buffer building */
@@ -327,7 +328,7 @@ struct ice_tunnel_table {
 struct ice_pkg_es {
        __le16 count;
        __le16 offset;
-       struct ice_fv_word es[1];
+       struct ice_fv_word es[];
 };
 
 struct ice_es {
@@ -461,8 +462,8 @@ struct ice_prof_tcam_entry {
 
 struct ice_prof_id_section {
        __le16 count;
-       struct ice_prof_tcam_entry entry[1];
-} __packed;
+       struct ice_prof_tcam_entry entry[];
+};
 
 struct ice_prof_tcam {
        u32 sid;
index 1086c9f..92e4abc 100644 (file)
 #define VSIQF_FD_CNT_FD_GCNT_M                 ICE_M(0x3FFF, 0)
 #define VSIQF_HKEY_MAX_INDEX                   12
 #define VSIQF_HLUT_MAX_INDEX                   15
+#define PFPM_APM                               0x000B8080
+#define PFPM_APM_APME_M                                BIT(0)
+#define PFPM_WUFC                              0x0009DC00
+#define PFPM_WUFC_MAG_M                                BIT(1)
+#define PFPM_WUS                               0x0009DB80
+#define PFPM_WUS_LNKC_M                                BIT(0)
+#define PFPM_WUS_MAG_M                         BIT(1)
+#define PFPM_WUS_MNG_M                         BIT(3)
+#define PFPM_WUS_FW_RST_WK_M                   BIT(31)
 #define VFINT_DYN_CTLN(_i)                     (0x00003800 + ((_i) * 4))
 #define VFINT_DYN_CTLN_CLEARPBA_M              BIT(1)
 #define PRTRPB_RDPC                            0x000AC260
index 28b46cc..8f6a191 100644 (file)
@@ -1194,7 +1194,7 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
                for (i = 0; i < vsi->alloc_txq; i++) {
                        if (vsi->tx_rings[i]) {
                                kfree_rcu(vsi->tx_rings[i], rcu);
-                               vsi->tx_rings[i] = NULL;
+                               WRITE_ONCE(vsi->tx_rings[i], NULL);
                        }
                }
        }
@@ -1202,7 +1202,7 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
                for (i = 0; i < vsi->alloc_rxq; i++) {
                        if (vsi->rx_rings[i]) {
                                kfree_rcu(vsi->rx_rings[i], rcu);
-                               vsi->rx_rings[i] = NULL;
+                               WRITE_ONCE(vsi->rx_rings[i], NULL);
                        }
                }
        }
@@ -1235,7 +1235,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
                ring->vsi = vsi;
                ring->dev = dev;
                ring->count = vsi->num_tx_desc;
-               vsi->tx_rings[i] = ring;
+               WRITE_ONCE(vsi->tx_rings[i], ring);
        }
 
        /* Allocate Rx rings */
@@ -1254,7 +1254,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
                ring->netdev = vsi->netdev;
                ring->dev = dev;
                ring->count = vsi->num_rx_desc;
-               vsi->rx_rings[i] = ring;
+               WRITE_ONCE(vsi->rx_rings[i], ring);
        }
 
        return 0;
@@ -1468,6 +1468,30 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
 }
 
 /**
+ * ice_pf_state_is_nominal - checks the PF for nominal state
+ * @pf: pointer to PF to check
+ *
+ * Check the PF's state for a collection of bits that would indicate
+ * the PF is in a state that would inhibit normal operation for
+ * driver functionality.
+ *
+ * Returns true if PF is in a nominal state, false otherwise
+ */
+bool ice_pf_state_is_nominal(struct ice_pf *pf)
+{
+       DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
+
+       if (!pf)
+               return false;
+
+       bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
+       if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
+               return false;
+
+       return true;
+}
+
+/**
  * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
  * @vsi: the VSI to be updated
  */
@@ -1667,7 +1691,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
        u16 q_idx = 0;
        int err = 0;
 
-       qg_buf = kzalloc(sizeof(*qg_buf), GFP_KERNEL);
+       qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
        if (!qg_buf)
                return -ENOMEM;
 
index d80e6af..981f3a1 100644 (file)
@@ -8,6 +8,8 @@
 
 const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
 
+bool ice_pf_state_is_nominal(struct ice_pf *pf);
+
 void ice_update_eth_stats(struct ice_vsi *vsi);
 
 int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
index 082825e..16a4096 100644 (file)
@@ -5,6 +5,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <generated/utsrelease.h>
 #include "ice.h"
 #include "ice_base.h"
 #include "ice_lib.h"
 #include "ice_dcb_nl.h"
 #include "ice_devlink.h"
 
-#define DRV_VERSION_MAJOR 0
-#define DRV_VERSION_MINOR 8
-#define DRV_VERSION_BUILD 2
-
-#define DRV_VERSION    __stringify(DRV_VERSION_MAJOR) "." \
-                       __stringify(DRV_VERSION_MINOR) "." \
-                       __stringify(DRV_VERSION_BUILD) "-k"
 #define DRV_SUMMARY    "Intel(R) Ethernet Connection E800 Series Linux Driver"
-const char ice_drv_ver[] = DRV_VERSION;
 static const char ice_driver_string[] = DRV_SUMMARY;
 static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
 
@@ -32,7 +25,6 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 MODULE_DESCRIPTION(DRV_SUMMARY);
 MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
 MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
 
 static int debug = -1;
@@ -620,6 +612,7 @@ static void ice_print_topo_conflict(struct ice_vsi *vsi)
 void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
 {
        struct ice_aqc_get_phy_caps_data *caps;
+       const char *an_advertised;
        enum ice_status status;
        const char *fec_req;
        const char *speed;
@@ -718,6 +711,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
        caps = kzalloc(sizeof(*caps), GFP_KERNEL);
        if (!caps) {
                fec_req = "Unknown";
+               an_advertised = "Unknown";
                goto done;
        }
 
@@ -726,6 +720,8 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
        if (status)
                netdev_info(vsi->netdev, "Get phy capability failed.\n");
 
+       an_advertised = ice_is_phy_caps_an_enabled(caps) ? "On" : "Off";
+
        if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
            caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
                fec_req = "RS-FEC";
@@ -738,8 +734,8 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
        kfree(caps);
 
 done:
-       netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
-                   speed, fec_req, fec, an, fc);
+       netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg Advertised: %s, Autoneg Negotiated: %s, Flow Control: %s\n",
+                   speed, fec_req, fec, an_advertised, an, fc);
        ice_print_topo_conflict(vsi);
 }
 
@@ -804,10 +800,6 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
                dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n",
                        pi->lport);
 
-       /* if the old link up/down and speed is the same as the new */
-       if (link_up == old_link && link_speed == old_link_speed)
-               return result;
-
        vsi = ice_get_main_vsi(pf);
        if (!vsi || !vsi->port_info)
                return -EINVAL;
@@ -825,6 +817,10 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
                }
        }
 
+       /* if the old link up/down and speed is the same as the new */
+       if (link_up == old_link && link_speed == old_link_speed)
+               return result;
+
        ice_dcb_rebuild(pf);
        ice_vsi_link_event(vsi, link_up);
        ice_print_link_msg(vsi, link_up);
@@ -1137,10 +1133,15 @@ static void ice_service_task_complete(struct ice_pf *pf)
 /**
  * ice_service_task_stop - stop service task and cancel works
  * @pf: board private structure
+ *
+ * Return 0 if the __ICE_SERVICE_DIS bit was not already set,
+ * 1 otherwise.
  */
-static void ice_service_task_stop(struct ice_pf *pf)
+static int ice_service_task_stop(struct ice_pf *pf)
 {
-       set_bit(__ICE_SERVICE_DIS, pf->state);
+       int ret;
+
+       ret = test_and_set_bit(__ICE_SERVICE_DIS, pf->state);
 
        if (pf->serv_tmr.function)
                del_timer_sync(&pf->serv_tmr);
@@ -1148,6 +1149,7 @@ static void ice_service_task_stop(struct ice_pf *pf)
                cancel_work_sync(&pf->serv_task);
 
        clear_bit(__ICE_SERVICE_SCHED, pf->state);
+       return ret;
 }
 
 /**
@@ -1382,25 +1384,23 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
            link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP))
                goto out;
 
-       cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+       /* Use the current user PHY configuration. The current user PHY
+        * configuration is initialized during probe from PHY capabilities
+        * software mode, and updated on set PHY configuration.
+        */
+       cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL);
        if (!cfg) {
                retcode = -ENOMEM;
                goto out;
        }
 
-       cfg->phy_type_low = pcaps->phy_type_low;
-       cfg->phy_type_high = pcaps->phy_type_high;
-       cfg->caps = pcaps->caps | ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
-       cfg->low_power_ctrl = pcaps->low_power_ctrl;
-       cfg->eee_cap = pcaps->eee_cap;
-       cfg->eeer_value = pcaps->eeer_value;
-       cfg->link_fec_opt = pcaps->link_fec_options;
+       cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
        if (link_up)
                cfg->caps |= ICE_AQ_PHY_ENA_LINK;
        else
                cfg->caps &= ~ICE_AQ_PHY_ENA_LINK;
 
-       retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi->lport, cfg, NULL);
+       retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
        if (retcode) {
                dev_err(dev, "Failed to set phy config, VSI %d error %d\n",
                        vsi->vsi_num, retcode);
@@ -1414,8 +1414,312 @@ out:
 }
 
 /**
- * ice_check_media_subtask - Check for media; bring link up if detected.
+ * ice_init_nvm_phy_type - Initialize the NVM PHY type
+ * @pi: port info structure
+ *
+ * Initialize nvm_phy_type_[low|high] for link lenient mode support
+ */
+static int ice_init_nvm_phy_type(struct ice_port_info *pi)
+{
+       struct ice_aqc_get_phy_caps_data *pcaps;
+       struct ice_pf *pf = pi->hw->back;
+       enum ice_status status;
+       int err = 0;
+
+       pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
+       if (!pcaps)
+               return -ENOMEM;
+
+       status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_NVM_CAP, pcaps,
+                                    NULL);
+
+       if (status) {
+               dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
+               err = -EIO;
+               goto out;
+       }
+
+       pf->nvm_phy_type_hi = pcaps->phy_type_high;
+       pf->nvm_phy_type_lo = pcaps->phy_type_low;
+
+out:
+       kfree(pcaps);
+       return err;
+}
+
+/**
+ * ice_init_link_dflt_override - Initialize link default override
+ * @pi: port info structure
+ *
+ * Initialize link default override and PHY total port shutdown during probe
+ */
+static void ice_init_link_dflt_override(struct ice_port_info *pi)
+{
+       struct ice_link_default_override_tlv *ldo;
+       struct ice_pf *pf = pi->hw->back;
+
+       ldo = &pf->link_dflt_override;
+       if (ice_get_link_default_override(ldo, pi))
+               return;
+
+       if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS))
+               return;
+
+       /* Enable Total Port Shutdown (override/replace link-down-on-close
+        * ethtool private flag) for ports with Port Disable bit set.
+        */
+       set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags);
+       set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags);
+}
+
+/**
+ * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
+ * @pi: port info structure
+ *
+ * If default override is enabled, initialized the user PHY cfg speed and FEC
+ * settings using the default override mask from the NVM.
+ *
+ * The PHY should only be configured with the default override settings the
+ * first time media is available. The __ICE_LINK_DEFAULT_OVERRIDE_PENDING state
+ * is used to indicate that the user PHY cfg default override is initialized
+ * and the PHY has not been configured with the default override settings. The
+ * state is set here, and cleared in ice_configure_phy the first time the PHY is
+ * configured.
+ */
+static void ice_init_phy_cfg_dflt_override(struct ice_port_info *pi)
+{
+       struct ice_link_default_override_tlv *ldo;
+       struct ice_aqc_set_phy_cfg_data *cfg;
+       struct ice_phy_info *phy = &pi->phy;
+       struct ice_pf *pf = pi->hw->back;
+
+       ldo = &pf->link_dflt_override;
+
+       /* If link default override is enabled, use to mask NVM PHY capabilities
+        * for speed and FEC default configuration.
+        */
+       cfg = &phy->curr_user_phy_cfg;
+
+       if (ldo->phy_type_low || ldo->phy_type_high) {
+               cfg->phy_type_low = pf->nvm_phy_type_lo &
+                                   cpu_to_le64(ldo->phy_type_low);
+               cfg->phy_type_high = pf->nvm_phy_type_hi &
+                                    cpu_to_le64(ldo->phy_type_high);
+       }
+       cfg->link_fec_opt = ldo->fec_options;
+       phy->curr_user_fec_req = ICE_FEC_AUTO;
+
+       set_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state);
+}
+
+/**
+ * ice_init_phy_user_cfg - Initialize the PHY user configuration
+ * @pi: port info structure
+ *
+ * Initialize the current user PHY configuration, speed, FEC, and FC requested
+ * mode to default. The PHY defaults are from get PHY capabilities topology
+ * with media so call when media is first available. An error is returned if
+ * called when media is not available. The PHY initialization completed state is
+ * set here.
+ *
+ * These configurations are used when setting PHY
+ * configuration. The user PHY configuration is updated on set PHY
+ * configuration. Returns 0 on success, negative on failure
+ */
+static int ice_init_phy_user_cfg(struct ice_port_info *pi)
+{
+       struct ice_aqc_get_phy_caps_data *pcaps;
+       struct ice_phy_info *phy = &pi->phy;
+       struct ice_pf *pf = pi->hw->back;
+       enum ice_status status;
+       struct ice_vsi *vsi;
+       int err = 0;
+
+       if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
+               return -EIO;
+
+       vsi = ice_get_main_vsi(pf);
+       if (!vsi)
+               return -EINVAL;
+
+       pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
+       if (!pcaps)
+               return -ENOMEM;
+
+       status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
+                                    NULL);
+       if (status) {
+               dev_err(ice_pf_to_dev(pf), "Get PHY capability failed.\n");
+               err = -EIO;
+               goto err_out;
+       }
+
+       ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg);
+
+       /* check if lenient mode is supported and enabled */
+       if (ice_fw_supports_link_override(&vsi->back->hw) &&
+           !(pcaps->module_compliance_enforcement &
+             ICE_AQC_MOD_ENFORCE_STRICT_MODE)) {
+               set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags);
+
+               /* if link default override is enabled, initialize user PHY
+                * configuration with link default override values
+                */
+               if (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN) {
+                       ice_init_phy_cfg_dflt_override(pi);
+                       goto out;
+               }
+       }
+
+       /* if link default override is not enabled, initialize PHY using
+        * topology with media
+        */
+       phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps,
+                                                     pcaps->link_fec_options);
+       phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps);
+
+out:
+       phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M;
+       set_bit(__ICE_PHY_INIT_COMPLETE, pf->state);
+err_out:
+       kfree(pcaps);
+       return err;
+}
+
+/**
+ * ice_configure_phy - configure PHY
+ * @vsi: VSI of PHY
+ *
+ * Set the PHY configuration. If the current PHY configuration is the same as
+ * the curr_user_phy_cfg, then do nothing to avoid link flap. Otherwise
+ * configure the based get PHY capabilities for topology with media.
+ */
+static int ice_configure_phy(struct ice_vsi *vsi)
+{
+       struct device *dev = ice_pf_to_dev(vsi->back);
+       struct ice_aqc_get_phy_caps_data *pcaps;
+       struct ice_aqc_set_phy_cfg_data *cfg;
+       struct ice_port_info *pi;
+       enum ice_status status;
+       int err = 0;
+
+       pi = vsi->port_info;
+       if (!pi)
+               return -EINVAL;
+
+       /* Ensure we have media as we cannot configure a medialess port */
+       if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE))
+               return -EPERM;
+
+       ice_print_topo_conflict(vsi);
+
+       if (vsi->port_info->phy.link_info.topo_media_conflict ==
+           ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
+               return -EPERM;
+
+       if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
+               return ice_force_phys_link_state(vsi, true);
+
+       pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
+       if (!pcaps)
+               return -ENOMEM;
+
+       /* Get current PHY config */
+       status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
+                                    NULL);
+       if (status) {
+               dev_err(dev, "Failed to get PHY configuration, VSI %d error %s\n",
+                       vsi->vsi_num, ice_stat_str(status));
+               err = -EIO;
+               goto done;
+       }
+
+       /* If PHY enable link is configured and configuration has not changed,
+        * there's nothing to do
+        */
+       if (pcaps->caps & ICE_AQC_PHY_EN_LINK &&
+           ice_phy_caps_equals_cfg(pcaps, &pi->phy.curr_user_phy_cfg))
+               goto done;
+
+       /* Use PHY topology as baseline for configuration */
+       memset(pcaps, 0, sizeof(*pcaps));
+       status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
+                                    NULL);
+       if (status) {
+               dev_err(dev, "Failed to get PHY topology, VSI %d error %s\n",
+                       vsi->vsi_num, ice_stat_str(status));
+               err = -EIO;
+               goto done;
+       }
+
+       cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+       if (!cfg) {
+               err = -ENOMEM;
+               goto done;
+       }
+
+       ice_copy_phy_caps_to_cfg(pi, pcaps, cfg);
+
+       /* Speed - If default override pending, use curr_user_phy_cfg set in
+        * ice_init_phy_user_cfg_ldo.
+        */
+       if (test_and_clear_bit(__ICE_LINK_DEFAULT_OVERRIDE_PENDING,
+                              vsi->back->state)) {
+               cfg->phy_type_low = pi->phy.curr_user_phy_cfg.phy_type_low;
+               cfg->phy_type_high = pi->phy.curr_user_phy_cfg.phy_type_high;
+       } else {
+               u64 phy_low = 0, phy_high = 0;
+
+               ice_update_phy_type(&phy_low, &phy_high,
+                                   pi->phy.curr_user_speed_req);
+               cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low);
+               cfg->phy_type_high = pcaps->phy_type_high &
+                                    cpu_to_le64(phy_high);
+       }
+
+       /* Can't provide what was requested; use PHY capabilities */
+       if (!cfg->phy_type_low && !cfg->phy_type_high) {
+               cfg->phy_type_low = pcaps->phy_type_low;
+               cfg->phy_type_high = pcaps->phy_type_high;
+       }
+
+       /* FEC */
+       ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req);
+
+       /* Can't provide what was requested; use PHY capabilities */
+       if (cfg->link_fec_opt !=
+           (cfg->link_fec_opt & pcaps->link_fec_options)) {
+               cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC;
+               cfg->link_fec_opt = pcaps->link_fec_options;
+       }
+
+       /* Flow Control - always supported; no need to check against
+        * capabilities
+        */
+       ice_cfg_phy_fc(pi, cfg, pi->phy.curr_user_fc_req);
+
+       /* Enable link and link update */
+       cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
+
+       status = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL);
+       if (status) {
+               dev_err(dev, "Failed to set phy config, VSI %d error %s\n",
+                       vsi->vsi_num, ice_stat_str(status));
+               err = -EIO;
+       }
+
+       kfree(cfg);
+done:
+       kfree(pcaps);
+       return err;
+}
+
+/**
+ * ice_check_media_subtask - Check for media
  * @pf: pointer to PF struct
+ *
+ * If media is available, then initialize PHY user configuration if it is not
+ * been, and configure the PHY if the interface is up.
  */
 static void ice_check_media_subtask(struct ice_pf *pf)
 {
@@ -1423,15 +1727,12 @@ static void ice_check_media_subtask(struct ice_pf *pf)
        struct ice_vsi *vsi;
        int err;
 
-       vsi = ice_get_main_vsi(pf);
-       if (!vsi)
+       /* No need to check for media if it's already present */
+       if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags))
                return;
 
-       /* No need to check for media if it's already present or the interface
-        * is down
-        */
-       if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) ||
-           test_bit(__ICE_DOWN, vsi->state))
+       vsi = ice_get_main_vsi(pf);
+       if (!vsi)
                return;
 
        /* Refresh link info and check if media is present */
@@ -1441,10 +1742,19 @@ static void ice_check_media_subtask(struct ice_pf *pf)
                return;
 
        if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
-               err = ice_force_phys_link_state(vsi, true);
-               if (err)
+               if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state))
+                       ice_init_phy_user_cfg(pi);
+
+               /* PHY settings are reset on media insertion, reconfigure
+                * PHY to preserve settings.
+                */
+               if (test_bit(__ICE_DOWN, vsi->state) &&
+                   test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags))
                        return;
-               clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
+
+               err = ice_configure_phy(vsi);
+               if (!err)
+                       clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
 
                /* A Link Status Event will be generated; the event handler
                 * will complete bringing the interface up
@@ -1702,7 +2012,7 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
                xdp_ring->netdev = NULL;
                xdp_ring->dev = dev;
                xdp_ring->count = vsi->num_tx_desc;
-               vsi->xdp_rings[i] = xdp_ring;
+               WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
                if (ice_setup_tx_ring(xdp_ring))
                        goto free_xdp_rings;
                ice_set_ring_xdp(xdp_ring);
@@ -2949,6 +3259,27 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
 }
 
 /**
+ * ice_is_wol_supported - get NVM state of WoL
+ * @pf: board private structure
+ *
+ * Check if WoL is supported based on the HW configuration.
+ * Returns true if NVM supports and enables WoL for this port, false otherwise
+ */
+bool ice_is_wol_supported(struct ice_pf *pf)
+{
+       struct ice_hw *hw = &pf->hw;
+       u16 wol_ctrl;
+
+       /* A bit set to 1 in the NVM Software Reserved Word 2 (WoL control
+        * word) indicates WoL is not supported on the corresponding PF ID.
+        */
+       if (ice_read_sr_word(hw, ICE_SR_NVM_WOL_CFG, &wol_ctrl))
+               return false;
+
+       return !(BIT(hw->pf_id) & wol_ctrl);
+}
+
+/**
  * ice_vsi_recfg_qs - Change the number of queues on a VSI
  * @vsi: VSI being changed
  * @new_rx: new number of Rx queues
@@ -3168,11 +3499,11 @@ static enum ice_status ice_send_version(struct ice_pf *pf)
 {
        struct ice_driver_ver dv;
 
-       dv.major_ver = DRV_VERSION_MAJOR;
-       dv.minor_ver = DRV_VERSION_MINOR;
-       dv.build_ver = DRV_VERSION_BUILD;
+       dv.major_ver = 0xff;
+       dv.minor_ver = 0xff;
+       dv.build_ver = 0xff;
        dv.subbuild_ver = 0;
-       strscpy((char *)dv.driver_string, DRV_VERSION,
+       strscpy((char *)dv.driver_string, UTS_RELEASE,
                sizeof(dv.driver_string));
        return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
 }
@@ -3296,6 +3627,33 @@ dflt_pkg_load:
 }
 
 /**
+ * ice_print_wake_reason - show the wake up cause in the log
+ * @pf: pointer to the PF struct
+ */
+static void ice_print_wake_reason(struct ice_pf *pf)
+{
+       u32 wus = pf->wakeup_reason;
+       const char *wake_str;
+
+       /* if no wake event, nothing to print */
+       if (!wus)
+               return;
+
+       if (wus & PFPM_WUS_LNKC_M)
+               wake_str = "Link\n";
+       else if (wus & PFPM_WUS_MAG_M)
+               wake_str = "Magic Packet\n";
+       else if (wus & PFPM_WUS_MNG_M)
+               wake_str = "Management\n";
+       else if (wus & PFPM_WUS_FW_RST_WK_M)
+               wake_str = "Firmware Reset\n";
+       else
+               wake_str = "Unknown\n";
+
+       dev_info(ice_pf_to_dev(pf), "Wake reason: %s", wake_str);
+}
+
+/**
  * ice_probe - Device initialization routine
  * @pdev: PCI device information struct
  * @ent: entry in ice_pci_tbl
@@ -3463,7 +3821,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
        err = ice_send_version(pf);
        if (err) {
                dev_err(dev, "probe failed sending driver version %s. error: %d\n",
-                       ice_drv_ver, err);
+                       UTS_RELEASE, err);
                goto err_alloc_sw_unroll;
        }
 
@@ -3476,8 +3834,53 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
                goto err_alloc_sw_unroll;
        }
 
+       err = ice_init_nvm_phy_type(pf->hw.port_info);
+       if (err) {
+               dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
+               goto err_alloc_sw_unroll;
+       }
+
+       err = ice_update_link_info(pf->hw.port_info);
+       if (err) {
+               dev_err(dev, "ice_update_link_info failed: %d\n", err);
+               goto err_alloc_sw_unroll;
+       }
+
+       ice_init_link_dflt_override(pf->hw.port_info);
+
+       /* if media available, initialize PHY settings */
+       if (pf->hw.port_info->phy.link_info.link_info &
+           ICE_AQ_MEDIA_AVAILABLE) {
+               err = ice_init_phy_user_cfg(pf->hw.port_info);
+               if (err) {
+                       dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
+                       goto err_alloc_sw_unroll;
+               }
+
+               if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
+                       struct ice_vsi *vsi = ice_get_main_vsi(pf);
+
+                       if (vsi)
+                               ice_configure_phy(vsi);
+               }
+       } else {
+               set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
+       }
+
        ice_verify_cacheline_size(pf);
 
+       /* Save wakeup reason register for later use */
+       pf->wakeup_reason = rd32(hw, PFPM_WUS);
+
+       /* check for a power management event */
+       ice_print_wake_reason(pf);
+
+       /* clear wake status, all bits */
+       wr32(hw, PFPM_WUS, U32_MAX);
+
+       /* Disable WoL at init, wait for user to enable */
+       device_set_wakeup_enable(dev, false);
+
        /* If no DDP driven features have to be setup, we are done with probe */
        if (ice_is_safe_mode(pf))
                goto probe_done;
@@ -3522,10 +3925,73 @@ err_init_pf_unroll:
 err_exit_unroll:
        ice_devlink_unregister(pf);
        pci_disable_pcie_error_reporting(pdev);
+       pci_disable_device(pdev);
        return err;
 }
 
 /**
+ * ice_set_wake - enable or disable Wake on LAN
+ * @pf: pointer to the PF struct
+ *
+ * Simple helper for WoL control
+ */
+static void ice_set_wake(struct ice_pf *pf)
+{
+       struct ice_hw *hw = &pf->hw;
+       bool wol = pf->wol_ena;
+
+       /* clear wake state, otherwise new wake events won't fire */
+       wr32(hw, PFPM_WUS, U32_MAX);
+
+       /* enable / disable APM wake up, no RMW needed */
+       wr32(hw, PFPM_APM, wol ? PFPM_APM_APME_M : 0);
+
+       /* set magic packet filter enabled */
+       wr32(hw, PFPM_WUFC, wol ? PFPM_WUFC_MAG_M : 0);
+}
+
+/**
+ * ice_setup_magic_mc_wake - setup device to wake on multicast magic packet
+ * @pf: pointer to the PF struct
+ *
+ * Issue firmware command to enable multicast magic wake, making
+ * sure that any locally administered address (LAA) is used for
+ * wake, and that PF reset doesn't undo the LAA.
+ */
+static void ice_setup_mc_magic_wake(struct ice_pf *pf)
+{
+       struct device *dev = ice_pf_to_dev(pf);
+       struct ice_hw *hw = &pf->hw;
+       enum ice_status status;
+       u8 mac_addr[ETH_ALEN];
+       struct ice_vsi *vsi;
+       u8 flags;
+
+       if (!pf->wol_ena)
+               return;
+
+       vsi = ice_get_main_vsi(pf);
+       if (!vsi)
+               return;
+
+       /* Get current MAC address in case it's an LAA */
+       if (vsi->netdev)
+               ether_addr_copy(mac_addr, vsi->netdev->dev_addr);
+       else
+               ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
+
+       flags = ICE_AQC_MAN_MAC_WR_MC_MAG_EN |
+               ICE_AQC_MAN_MAC_UPDATE_LAA_WOL |
+               ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP;
+
+       status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
+       if (status)
+               dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %s aq_err %s\n",
+                       ice_stat_str(status),
+                       ice_aq_str(hw->adminq.sq_last_status));
+}
+
+/**
  * ice_remove - Device removal routine
  * @pdev: PCI device information struct
  */
@@ -3554,8 +4020,10 @@ static void ice_remove(struct pci_dev *pdev)
        mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
        if (!ice_is_safe_mode(pf))
                ice_remove_arfs(pf);
+       ice_setup_mc_magic_wake(pf);
        ice_devlink_destroy_port(pf);
        ice_vsi_release_all(pf);
+       ice_set_wake(pf);
        ice_free_irq_msix_misc(pf);
        ice_for_each_vsi(pf, i) {
                if (!pf->vsi[i])
@@ -3575,8 +4043,230 @@ static void ice_remove(struct pci_dev *pdev)
        pci_wait_for_pending_transaction(pdev);
        ice_clear_interrupt_scheme(pf);
        pci_disable_pcie_error_reporting(pdev);
+       pci_disable_device(pdev);
+}
+
+/**
+ * ice_shutdown - PCI callback for shutting down device
+ * @pdev: PCI device information struct
+ */
+static void ice_shutdown(struct pci_dev *pdev)
+{
+       struct ice_pf *pf = pci_get_drvdata(pdev);
+
+       ice_remove(pdev);
+
+       if (system_state == SYSTEM_POWER_OFF) {
+               pci_wake_from_d3(pdev, pf->wol_ena);
+               pci_set_power_state(pdev, PCI_D3hot);
+       }
 }
 
+#ifdef CONFIG_PM
+/**
+ * ice_prepare_for_shutdown - prep for PCI shutdown
+ * @pf: board private structure
+ *
+ * Inform or close all dependent features in prep for PCI device shutdown
+ */
+static void ice_prepare_for_shutdown(struct ice_pf *pf)
+{
+       struct ice_hw *hw = &pf->hw;
+       u32 v;
+
+       /* Notify VFs of impending reset */
+       if (ice_check_sq_alive(hw, &hw->mailboxq))
+               ice_vc_notify_reset(pf);
+
+       dev_dbg(ice_pf_to_dev(pf), "Tearing down internal switch for shutdown\n");
+
+       /* disable the VSIs and their queues that are not already DOWN */
+       ice_pf_dis_all_vsi(pf, false);
+
+       ice_for_each_vsi(pf, v)
+               if (pf->vsi[v])
+                       pf->vsi[v]->vsi_num = 0;
+
+       ice_shutdown_all_ctrlq(hw);
+}
+
+/**
+ * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
+ * @pf: board private structure to reinitialize
+ *
+ * This routine reinitialize interrupt scheme that was cleared during
+ * power management suspend callback.
+ *
+ * This should be called during resume routine to re-allocate the q_vectors
+ * and reacquire interrupts.
+ */
+static int ice_reinit_interrupt_scheme(struct ice_pf *pf)
+{
+       struct device *dev = ice_pf_to_dev(pf);
+       int ret, v;
+
+       /* Since we clear MSIX flag during suspend, we need to
+        * set it back during resume...
+        */
+
+       ret = ice_init_interrupt_scheme(pf);
+       if (ret) {
+               dev_err(dev, "Failed to re-initialize interrupt %d\n", ret);
+               return ret;
+       }
+
+       /* Remap vectors and rings, after successful re-init interrupts */
+       ice_for_each_vsi(pf, v) {
+               if (!pf->vsi[v])
+                       continue;
+
+               ret = ice_vsi_alloc_q_vectors(pf->vsi[v]);
+               if (ret)
+                       goto err_reinit;
+               ice_vsi_map_rings_to_vectors(pf->vsi[v]);
+       }
+
+       ret = ice_req_irq_msix_misc(pf);
+       if (ret) {
+               dev_err(dev, "Setting up misc vector failed after device suspend %d\n",
+                       ret);
+               goto err_reinit;
+       }
+
+       return 0;
+
+err_reinit:
+       while (v--)
+               if (pf->vsi[v])
+                       ice_vsi_free_q_vectors(pf->vsi[v]);
+
+       return ret;
+}
+
+/**
+ * ice_suspend
+ * @dev: generic device information structure
+ *
+ * Power Management callback to quiesce the device and prepare
+ * for D3 transition.
+ */
+static int ice_suspend(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct ice_pf *pf;
+       int disabled, v;
+
+       pf = pci_get_drvdata(pdev);
+
+       if (!ice_pf_state_is_nominal(pf)) {
+               dev_err(dev, "Device is not ready, no need to suspend it\n");
+               return -EBUSY;
+       }
+
+       /* Stop watchdog tasks until resume completion.
+        * Even though it is most likely that the service task is
+        * disabled if the device is suspended or down, the service task's
+        * state is controlled by a different state bit, and we should
+        * store and honor whatever state that bit is in at this point.
+        */
+       disabled = ice_service_task_stop(pf);
+
+       /* Already suspended?, then there is nothing to do */
+       if (test_and_set_bit(__ICE_SUSPENDED, pf->state)) {
+               if (!disabled)
+                       ice_service_task_restart(pf);
+               return 0;
+       }
+
+       if (test_bit(__ICE_DOWN, pf->state) ||
+           ice_is_reset_in_progress(pf->state)) {
+               dev_err(dev, "can't suspend device in reset or already down\n");
+               if (!disabled)
+                       ice_service_task_restart(pf);
+               return 0;
+       }
+
+       ice_setup_mc_magic_wake(pf);
+
+       ice_prepare_for_shutdown(pf);
+
+       ice_set_wake(pf);
+
+       /* Free vectors, clear the interrupt scheme and release IRQs
+        * for proper hibernation, especially with large number of CPUs.
+        * Otherwise hibernation might fail when mapping all the vectors back
+        * to CPU0.
+        */
+       ice_free_irq_msix_misc(pf);
+       ice_for_each_vsi(pf, v) {
+               if (!pf->vsi[v])
+                       continue;
+               ice_vsi_free_q_vectors(pf->vsi[v]);
+       }
+       ice_clear_interrupt_scheme(pf);
+
+       pci_wake_from_d3(pdev, pf->wol_ena);
+       pci_set_power_state(pdev, PCI_D3hot);
+       return 0;
+}
+
+/**
+ * ice_resume - PM callback for waking up from D3
+ * @dev: generic device information structure
+ */
+static int ice_resume(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       enum ice_reset_req reset_type;
+       struct ice_pf *pf;
+       struct ice_hw *hw;
+       int ret;
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+       pci_save_state(pdev);
+
+       if (!pci_device_is_present(pdev))
+               return -ENODEV;
+
+       ret = pci_enable_device_mem(pdev);
+       if (ret) {
+               dev_err(dev, "Cannot enable device after suspend\n");
+               return ret;
+       }
+
+       pf = pci_get_drvdata(pdev);
+       hw = &pf->hw;
+
+       pf->wakeup_reason = rd32(hw, PFPM_WUS);
+       ice_print_wake_reason(pf);
+
+       /* We cleared the interrupt scheme when we suspended, so we need to
+        * restore it now to resume device functionality.
+        */
+       ret = ice_reinit_interrupt_scheme(pf);
+       if (ret)
+               dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
+
+       clear_bit(__ICE_DOWN, pf->state);
+       /* Now perform PF reset and rebuild */
+       reset_type = ICE_RESET_PFR;
+       /* re-enable service task for reset, but allow reset to schedule it */
+       clear_bit(__ICE_SERVICE_DIS, pf->state);
+
+       if (ice_schedule_reset(pf, reset_type))
+               dev_err(dev, "Reset during resume failed.\n");
+
+       clear_bit(__ICE_SUSPENDED, pf->state);
+       ice_service_task_restart(pf);
+
+       /* Restart the service task */
+       mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
+
+       return 0;
+}
+#endif /* CONFIG_PM */
+
 /**
  * ice_pci_err_detected - warning that PCI error has been detected
  * @pdev: PCI device information struct
@@ -3742,6 +4432,8 @@ static const struct pci_device_id ice_pci_tbl[] = {
 };
 MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
 
+static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
+
 static const struct pci_error_handlers ice_pci_err_handler = {
        .error_detected = ice_pci_err_detected,
        .slot_reset = ice_pci_err_slot_reset,
@@ -3755,6 +4447,10 @@ static struct pci_driver ice_driver = {
        .id_table = ice_pci_tbl,
        .probe = ice_probe,
        .remove = ice_remove,
+#ifdef CONFIG_PM
+       .driver.pm = &ice_pm_ops,
+#endif /* CONFIG_PM */
+       .shutdown = ice_shutdown,
        .sriov_configure = ice_sriov_configure,
        .err_handler = &ice_pci_err_handler
 };
@@ -3769,7 +4465,7 @@ static int __init ice_module_init(void)
 {
        int status;
 
-       pr_info("%s - version %s\n", ice_driver_string, ice_drv_ver);
+       pr_info("%s\n", ice_driver_string);
        pr_info("%s\n", ice_copyright);
 
        ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
@@ -5667,20 +6363,30 @@ int ice_open(struct net_device *netdev)
 
        /* Set PHY if there is media, otherwise, turn off PHY */
        if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
-               err = ice_force_phys_link_state(vsi, true);
+               clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
+               if (!test_bit(__ICE_PHY_INIT_COMPLETE, pf->state)) {
+                       err = ice_init_phy_user_cfg(pi);
+                       if (err) {
+                               netdev_err(netdev, "Failed to initialize PHY settings, error %d\n",
+                                          err);
+                               return err;
+                       }
+               }
+
+               err = ice_configure_phy(vsi);
                if (err) {
                        netdev_err(netdev, "Failed to set physical link up, error %d\n",
                                   err);
                        return err;
                }
        } else {
+               set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
                err = ice_aq_set_link_restart_an(pi, false, NULL);
                if (err) {
                        netdev_err(netdev, "Failed to set PHY state, VSI %d error %d\n",
                                   vsi->vsi_num, err);
                        return err;
                }
-               set_bit(ICE_FLAG_NO_MEDIA, vsi->back->flags);
        }
 
        err = ice_vsi_open(vsi);
index b049c1c..7c2a068 100644 (file)
@@ -172,8 +172,7 @@ void ice_release_nvm(struct ice_hw *hw)
  *
  * Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq.
  */
-static enum ice_status
-ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
+enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
 {
        enum ice_status status;
 
@@ -197,7 +196,7 @@ ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
  * Area (PFA) and returns the TLV pointer and length. The caller can
  * use these to read the variable length TLV value.
  */
-static enum ice_status
+enum ice_status
 ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
                       u16 module_type)
 {
index 165eda0..999f273 100644 (file)
@@ -11,6 +11,10 @@ enum ice_status
 ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
                  bool read_shadow_ram);
 enum ice_status
+ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
+                      u16 module_type);
+enum ice_status
 ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
 enum ice_status ice_init_nvm(struct ice_hw *hw);
+enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
 #endif /* _ICE_NVM_H_ */
index 0475134..1c29cfa 100644 (file)
@@ -129,7 +129,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
  */
 enum ice_status
 ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
-                        struct ice_aqc_get_elem *buf, u16 buf_size,
+                        struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
                         u16 *elems_ret, struct ice_sq_cd *cd)
 {
        return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems,
@@ -149,8 +149,8 @@ enum ice_status
 ice_sched_add_node(struct ice_port_info *pi, u8 layer,
                   struct ice_aqc_txsched_elem_data *info)
 {
+       struct ice_aqc_txsched_elem_data elem;
        struct ice_sched_node *parent;
-       struct ice_aqc_get_elem elem;
        struct ice_sched_node *node;
        enum ice_status status;
        struct ice_hw *hw;
@@ -195,7 +195,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
        node->parent = parent;
        node->tx_sched_layer = layer;
        parent->children[parent->num_children++] = node;
-       memcpy(&node->info, &elem.generic[0], sizeof(node->info));
+       node->info = elem;
        return 0;
 }
 
@@ -238,7 +238,7 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
        enum ice_status status;
        u16 buf_size;
 
-       buf_size = sizeof(*buf) + sizeof(u32) * (num_nodes - 1);
+       buf_size = struct_size(buf, teid, num_nodes);
        buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
        if (!buf)
                return ICE_ERR_NO_MEMORY;
@@ -423,7 +423,7 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
  */
 static enum ice_status
 ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
-                      struct ice_aqc_conf_elem *buf, u16 buf_size,
+                      struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
                       u16 *elems_cfgd, struct ice_sq_cd *cd)
 {
        return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems,
@@ -443,8 +443,7 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
  * Suspend scheduling elements (0x0409)
  */
 static enum ice_status
-ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
-                          struct ice_aqc_suspend_resume_elem *buf,
+ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
                           u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
 {
        return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems,
@@ -464,8 +463,7 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
  * resume scheduling elements (0x040A)
  */
 static enum ice_status
-ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req,
-                         struct ice_aqc_suspend_resume_elem *buf,
+ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
                          u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
 {
        return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems,
@@ -506,9 +504,9 @@ static enum ice_status
 ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
                               bool suspend)
 {
-       struct ice_aqc_suspend_resume_elem *buf;
        u16 i, buf_size, num_elem_ret = 0;
        enum ice_status status;
+       __le32 *buf;
 
        buf_size = sizeof(*buf) * num_nodes;
        buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
@@ -516,7 +514,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
                return ICE_ERR_NO_MEMORY;
 
        for (i = 0; i < num_nodes; i++)
-               buf->teid[i] = cpu_to_le32(node_teids[i]);
+               buf[i] = cpu_to_le32(node_teids[i]);
 
        if (suspend)
                status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
@@ -591,7 +589,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
  */
 static enum ice_status
 ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
-                 u16 num_profiles, struct ice_aqc_rl_profile_generic_elem *buf,
+                 u16 num_profiles, struct ice_aqc_rl_profile_elem *buf,
                  u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
 {
        struct ice_aqc_rl_profile *cmd;
@@ -622,13 +620,11 @@ ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
  */
 static enum ice_status
 ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
-                     struct ice_aqc_rl_profile_generic_elem *buf,
-                     u16 buf_size, u16 *num_profiles_added,
-                     struct ice_sq_cd *cd)
+                     struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
+                     u16 *num_profiles_added, struct ice_sq_cd *cd)
 {
-       return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles,
-                                num_profiles, buf,
-                                buf_size, num_profiles_added, cd);
+       return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, num_profiles,
+                                buf, buf_size, num_profiles_added, cd);
 }
 
 /**
@@ -644,13 +640,12 @@ ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
  */
 static enum ice_status
 ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
-                        struct ice_aqc_rl_profile_generic_elem *buf,
-                        u16 buf_size, u16 *num_profiles_removed,
-                        struct ice_sq_cd *cd)
+                        struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
+                        u16 *num_profiles_removed, struct ice_sq_cd *cd)
 {
        return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles,
-                                num_profiles, buf,
-                                buf_size, num_profiles_removed, cd);
+                                num_profiles, buf, buf_size,
+                                num_profiles_removed, cd);
 }
 
 /**
@@ -666,7 +661,7 @@ static enum ice_status
 ice_sched_del_rl_profile(struct ice_hw *hw,
                         struct ice_aqc_rl_profile_info *rl_info)
 {
-       struct ice_aqc_rl_profile_generic_elem *buf;
+       struct ice_aqc_rl_profile_elem *buf;
        u16 num_profiles_removed;
        enum ice_status status;
        u16 num_profiles = 1;
@@ -675,8 +670,7 @@ ice_sched_del_rl_profile(struct ice_hw *hw,
                return ICE_ERR_IN_USE;
 
        /* Safe to remove profile ID */
-       buf = (struct ice_aqc_rl_profile_generic_elem *)
-               &rl_info->profile;
+       buf = &rl_info->profile;
        status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
                                          &num_profiles_removed, NULL);
        if (status || num_profiles_removed != num_profiles)
@@ -831,7 +825,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
        size_t buf_size;
        u32 teid;
 
-       buf_size = struct_size(buf, generic, num_nodes - 1);
+       buf_size = struct_size(buf, generic, num_nodes);
        buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
        if (!buf)
                return ICE_ERR_NO_MEMORY;
@@ -1867,7 +1861,7 @@ static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
  * @node: pointer to node
  * @info: node info to update
  *
- * It updates the HW DB, and local SW DB of node. It updates the scheduling
+ * Update the HW DB, and local SW DB of node. Update the scheduling
  * parameters of node from argument info data buffer (Info->data buf) and
  * returns success or error on config sched element failure. The caller
  * needs to hold scheduler lock.
@@ -1876,18 +1870,18 @@ static enum ice_status
 ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
                      struct ice_aqc_txsched_elem_data *info)
 {
-       struct ice_aqc_conf_elem buf;
+       struct ice_aqc_txsched_elem_data buf;
        enum ice_status status;
        u16 elem_cfgd = 0;
        u16 num_elems = 1;
 
-       buf.generic[0] = *info;
+       buf = *info;
        /* Parent TEID is reserved field in this aq call */
-       buf.generic[0].parent_teid = 0;
+       buf.parent_teid = 0;
        /* Element type is reserved field in this aq call */
-       buf.generic[0].data.elem_type = 0;
+       buf.data.elem_type = 0;
        /* Flags is reserved field in this aq call */
-       buf.generic[0].data.flags = 0;
+       buf.data.flags = 0;
 
        /* Update HW DB */
        /* Configure element node */
@@ -2131,9 +2125,9 @@ static struct ice_aqc_rl_profile_info *
 ice_sched_add_rl_profile(struct ice_port_info *pi,
                         enum ice_rl_type rl_type, u32 bw, u8 layer_num)
 {
-       struct ice_aqc_rl_profile_generic_elem *buf;
        struct ice_aqc_rl_profile_info *rl_prof_elem;
        u16 profiles_added = 0, num_profiles = 1;
+       struct ice_aqc_rl_profile_elem *buf;
        enum ice_status status;
        struct ice_hw *hw;
        u8 profile_type;
@@ -2182,8 +2176,7 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
        rl_prof_elem->profile.max_burst_size = cpu_to_le16(hw->max_burst_size);
 
        /* Create new entry in HW DB */
-       buf = (struct ice_aqc_rl_profile_generic_elem *)
-               &rl_prof_elem->profile;
+       buf = &rl_prof_elem->profile;
        status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf),
                                       &profiles_added, NULL);
        if (status || profiles_added != num_profiles)
index f0593cf..0e55ae0 100644 (file)
@@ -56,7 +56,7 @@ struct ice_sched_agg_info {
 /* FW AQ command calls */
 enum ice_status
 ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
-                        struct ice_aqc_get_elem *buf, u16 buf_size,
+                        struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
                         u16 *elems_ret, struct ice_sq_cd *cd);
 enum ice_status ice_sched_init_port(struct ice_port_info *pi);
 enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
index ff7d16a..ccbe1cc 100644 (file)
@@ -29,25 +29,17 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
                                                        0x81, 0, 0, 0};
 
 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
-       (sizeof(struct ice_aqc_sw_rules_elem) - \
-        sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
-        sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
+       (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
+        (DUMMY_ETH_HDR_LEN * \
+         sizeof(((struct ice_sw_rule_lkup_rx_tx *)0)->hdr[0])))
 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
-       (sizeof(struct ice_aqc_sw_rules_elem) - \
-        sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
-        sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
+       (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
-       (sizeof(struct ice_aqc_sw_rules_elem) - \
-        sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
-        sizeof(struct ice_sw_rule_lg_act) - \
-        sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
-        ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
+       (offsetof(struct ice_aqc_sw_rules_elem, pdata.lg_act.act) + \
+        ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act[0])))
 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
-       (sizeof(struct ice_aqc_sw_rules_elem) - \
-        sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
-        sizeof(struct ice_sw_rule_vsi_list) - \
-        sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
-        ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
+       (offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \
+        ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0])))
 
 /**
  * ice_init_def_sw_recp - initialize the recipe book keeping tables
@@ -87,7 +79,7 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
  * @num_elems: pointer to number of elements
  * @cd: pointer to command details structure or NULL
  *
- * Get switch configuration (0x0200) to be placed in 'buff'.
+ * Get switch configuration (0x0200) to be placed in buf.
  * This admin command returns information such as initial VSI/port number
  * and switch ID it belongs to.
  *
@@ -104,13 +96,13 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
  * parsing the response buffer.
  */
 static enum ice_status
-ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
+ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
                  u16 buf_size, u16 *req_desc, u16 *num_elems,
                  struct ice_sq_cd *cd)
 {
        struct ice_aqc_get_sw_cfg *cmd;
-       enum ice_status status;
        struct ice_aq_desc desc;
+       enum ice_status status;
 
        ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
        cmd = &desc.params.get_sw_conf;
@@ -449,7 +441,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
        enum ice_status status;
        u16 buf_len;
 
-       buf_len = sizeof(*sw_buf);
+       buf_len = struct_size(sw_buf, elem, 1);
        sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
        if (!sw_buf)
                return ICE_ERR_NO_MEMORY;
@@ -550,7 +542,7 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
  */
 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
 {
-       struct ice_aqc_get_sw_cfg_resp *rbuf;
+       struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
        enum ice_status status;
        u16 req_desc = 0;
        u16 num_elems;
@@ -568,19 +560,19 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
         * writing a non-zero value in req_desc
         */
        do {
+               struct ice_aqc_get_sw_cfg_resp_elem *ele;
+
                status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
                                           &req_desc, &num_elems, NULL);
 
                if (status)
                        break;
 
-               for (i = 0; i < num_elems; i++) {
-                       struct ice_aqc_get_sw_cfg_resp_elem *ele;
+               for (i = 0, ele = rbuf; i < num_elems; i++, ele++) {
                        u16 pf_vf_num, swid, vsi_port_num;
                        bool is_vf = false;
                        u8 res_type;
 
-                       ele = rbuf[i].elements;
                        vsi_port_num = le16_to_cpu(ele->vsi_port_num) &
                                ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
 
@@ -856,8 +848,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
                m_ent->fltr_info.fwd_id.hw_vsi_id;
 
        act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
-       act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
-               ICE_LG_ACT_VSI_LIST_ID_M;
+       act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) & ICE_LG_ACT_VSI_LIST_ID_M;
        if (m_ent->vsi_count > 1)
                act |= ICE_LG_ACT_VSI_LIST;
        lg_act->pdata.lg_act.act[0] = cpu_to_le32(act);
@@ -2037,7 +2028,8 @@ ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction)
        hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
 
        s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
-                           ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
+               ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
+
        s_rule = devm_kzalloc(ice_hw_to_dev(hw), s_rule_size, GFP_KERNEL);
        if (!s_rule)
                return ICE_ERR_NO_MEMORY;
@@ -2691,7 +2683,7 @@ ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
        u16 buf_len;
 
        /* Allocate resource */
-       buf_len = sizeof(*buf);
+       buf_len = struct_size(buf, elem, 1);
        buf = kzalloc(buf_len, GFP_KERNEL);
        if (!buf)
                return ICE_ERR_NO_MEMORY;
@@ -2729,7 +2721,7 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
        u16 buf_len;
 
        /* Free resource */
-       buf_len = sizeof(*buf);
+       buf_len = struct_size(buf, elem, 1);
        buf = kzalloc(buf_len, GFP_KERNEL);
        if (!buf)
                return ICE_ERR_NO_MEMORY;
index c1ad862..08c616d 100644 (file)
@@ -87,6 +87,12 @@ enum ice_fc_mode {
        ICE_FC_DFLT
 };
 
+enum ice_phy_cache_mode {
+       ICE_FC_MODE = 0,
+       ICE_SPEED_MODE,
+       ICE_FEC_MODE
+};
+
 enum ice_fec_mode {
        ICE_FEC_NONE = 0,
        ICE_FEC_RS,
@@ -94,6 +100,14 @@ enum ice_fec_mode {
        ICE_FEC_AUTO
 };
 
+struct ice_phy_cache_mode_data {
+       union {
+               enum ice_fec_mode curr_user_fec_req;
+               enum ice_fc_mode curr_user_fc_req;
+               u16 curr_user_speed_req;
+       } data;
+};
+
 enum ice_set_fc_aq_failures {
        ICE_SET_FC_AQ_FAIL_NONE = 0,
        ICE_SET_FC_AQ_FAIL_GET,
@@ -104,6 +118,7 @@ enum ice_set_fc_aq_failures {
 /* Various MAC types */
 enum ice_mac_type {
        ICE_MAC_UNKNOWN = 0,
+       ICE_MAC_E810,
        ICE_MAC_GENERIC,
 };
 
@@ -160,6 +175,13 @@ struct ice_phy_info {
        u64 phy_type_high;
        enum ice_media_type media_type;
        u8 get_link_info;
+       /* Please refer to struct ice_aqc_get_link_status_data to get
+        * detail of enable bit in curr_user_speed_req
+        */
+       u16 curr_user_speed_req;
+       enum ice_fec_mode curr_user_fec_req;
+       enum ice_fc_mode curr_user_fc_req;
+       struct ice_aqc_set_phy_cfg_data curr_user_phy_cfg;
 };
 
 /* protocol enumeration for filters */
@@ -293,6 +315,28 @@ struct ice_nvm_info {
        u8 blank_nvm_mode;        /* is NVM empty (no FW present) */
 };
 
+struct ice_link_default_override_tlv {
+       u8 options;
+#define ICE_LINK_OVERRIDE_OPT_M                0x3F
+#define ICE_LINK_OVERRIDE_STRICT_MODE  BIT(0)
+#define ICE_LINK_OVERRIDE_EPCT_DIS     BIT(1)
+#define ICE_LINK_OVERRIDE_PORT_DIS     BIT(2)
+#define ICE_LINK_OVERRIDE_EN           BIT(3)
+#define ICE_LINK_OVERRIDE_AUTO_LINK_DIS        BIT(4)
+#define ICE_LINK_OVERRIDE_EEE_EN       BIT(5)
+       u8 phy_config;
+#define ICE_LINK_OVERRIDE_PHY_CFG_S    8
+#define ICE_LINK_OVERRIDE_PHY_CFG_M    (0xC3 << ICE_LINK_OVERRIDE_PHY_CFG_S)
+#define ICE_LINK_OVERRIDE_PAUSE_M      0x3
+#define ICE_LINK_OVERRIDE_LESM_EN      BIT(6)
+#define ICE_LINK_OVERRIDE_AUTO_FEC_EN  BIT(7)
+       u8 fec_options;
+#define ICE_LINK_OVERRIDE_FEC_OPT_M    0xFF
+       u8 rsvd1;
+       u64 phy_type_low;
+       u64 phy_type_high;
+};
+
 #define ICE_NVM_VER_LEN        32
 
 /* netlist version information */
@@ -444,6 +488,7 @@ struct ice_dcb_app_priority_table {
 #define ICE_APP_SEL_ETHTYPE    0x1
 #define ICE_APP_SEL_TCPIP      0x2
 #define ICE_CEE_APP_SEL_ETHTYPE        0x0
+#define ICE_SR_LINK_DEFAULT_OVERRIDE_PTR       0x134
 #define ICE_CEE_APP_SEL_TCPIP  0x1
 
 struct ice_dcbx_cfg {
@@ -709,6 +754,7 @@ struct ice_hw_port_stats {
 
 /* Checksum and Shadow RAM pointers */
 #define ICE_SR_BOOT_CFG_PTR            0x132
+#define ICE_SR_NVM_WOL_CFG             0x19
 #define ICE_NVM_OROM_VER_OFF           0x02
 #define ICE_SR_PBA_BLOCK_PTR           0x16
 #define ICE_SR_NVM_DEV_STARTER_VER     0x18
@@ -726,6 +772,17 @@ struct ice_hw_port_stats {
 #define ICE_OROM_VER_MASK              (0xff << ICE_OROM_VER_SHIFT)
 #define ICE_SR_PFA_PTR                 0x40
 #define ICE_SR_SECTOR_SIZE_IN_WORDS    0x800
+
+/* Link override related */
+#define ICE_SR_PFA_LINK_OVERRIDE_WORDS         10
+#define ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS     4
+#define ICE_SR_PFA_LINK_OVERRIDE_OFFSET                2
+#define ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET    1
+#define ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET    2
+#define ICE_FW_API_LINK_OVERRIDE_MAJ           1
+#define ICE_FW_API_LINK_OVERRIDE_MIN           5
+#define ICE_FW_API_LINK_OVERRIDE_PATCH         2
+
 #define ICE_SR_WORDS_IN_1KB            512
 
 /* Hash redirection LUT for VSI - maximum array size */
index 16a2f25..9df5ceb 100644 (file)
@@ -1593,31 +1593,6 @@ err_unroll_intr:
 }
 
 /**
- * ice_pf_state_is_nominal - checks the PF for nominal state
- * @pf: pointer to PF to check
- *
- * Check the PF's state for a collection of bits that would indicate
- * the PF is in a state that would inhibit normal operation for
- * driver functionality.
- *
- * Returns true if PF is in a nominal state.
- * Returns false otherwise
- */
-static bool ice_pf_state_is_nominal(struct ice_pf *pf)
-{
-       DECLARE_BITMAP(check_bits, __ICE_STATE_NBITS) = { 0 };
-
-       if (!pf)
-               return false;
-
-       bitmap_set(check_bits, 0, __ICE_STATE_NOMINAL_CHECK_BITS);
-       if (bitmap_intersects(pf->state, check_bits, __ICE_STATE_NBITS))
-               return false;
-
-       return true;
-}
-
-/**
  * ice_pci_sriov_ena - Enable or change number of VFs
  * @pf: pointer to the PF structure
  * @num_vfs: number of VFs to allocate
index b6f928c..6badfd6 100644 (file)
@@ -206,12 +206,14 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
        struct ice_aqc_add_tx_qgrp *qg_buf;
        struct ice_ring *tx_ring, *rx_ring;
        struct ice_q_vector *q_vector;
+       u16 size;
        int err;
 
        if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
                return -EINVAL;
 
-       qg_buf = kzalloc(sizeof(*qg_buf), GFP_KERNEL);
+       size = struct_size(qg_buf, txqs, 1);
+       qg_buf = kzalloc(size, GFP_KERNEL);
        if (!qg_buf)
                return -ENOMEM;
 
@@ -228,7 +230,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
        if (ice_is_xdp_ena_vsi(vsi)) {
                struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
 
-               memset(qg_buf, 0, sizeof(*qg_buf));
+               memset(qg_buf, 0, size);
                qg_buf->num_txqs = 1;
                err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
                if (err)
index 438b42c..a32391e 100644 (file)
@@ -638,7 +638,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
                        dev_spec->sgmii_active = true;
                        break;
                }
-               /* fall through - for I2C based SGMII */
+               fallthrough; /* for I2C based SGMII */
        case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
                /* read media type from SFP EEPROM */
                ret_val = igb_set_sfp_media_type_82575(hw);
@@ -1704,7 +1704,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
        case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
                /* disable PCS autoneg and support parallel detect only */
                pcs_autoneg = false;
-               /* fall through */
+               fallthrough;
        default:
                if (hw->mac.type == e1000_82575 ||
                    hw->mac.type == e1000_82576) {
index 09f4dcb..fa136e6 100644 (file)
@@ -721,7 +721,7 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
                        igb_read_invm_version(hw, fw_vers);
                        return;
                }
-               /* fall through */
+               fallthrough;
        case e1000_i350:
                /* find combo image version */
                hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
index ad2125e..8c8eb82 100644 (file)
@@ -659,7 +659,7 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
                        phy_data |= M88E1000_PSCR_AUTO_X_1000T;
                        break;
                }
-               /* fall through */
+               fallthrough;
        case 0:
        default:
                phy_data |= M88E1000_PSCR_AUTO_X_MODE;
@@ -2621,7 +2621,7 @@ static s32 igb_set_master_slave_mode(struct e1000_hw *hw)
                break;
        case e1000_ms_auto:
                phy_data &= ~CR_1000T_MS_ENABLE;
-               /* fall-through */
+               fallthrough;
        default:
                break;
        }
index 0c9282e..2f015b6 100644 (file)
@@ -642,7 +642,6 @@ enum igb_boards {
 };
 
 extern char igb_driver_name[];
-extern char igb_driver_version[];
 
 int igb_open(struct net_device *netdev);
 int igb_close(struct net_device *netdev);
index 2cd003c..c2cf414 100644 (file)
@@ -851,7 +851,6 @@ static void igb_get_drvinfo(struct net_device *netdev,
        struct igb_adapter *adapter = netdev_priv(netdev);
 
        strlcpy(drvinfo->driver,  igb_driver_name, sizeof(drvinfo->driver));
-       strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version));
 
        /* EEPROM image version # is reported as firmware version # for
         * 82575 controllers
@@ -2518,11 +2517,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
        switch (cmd->flow_type) {
        case TCP_V4_FLOW:
                cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-               /* Fall through */
+               fallthrough;
        case UDP_V4_FLOW:
                if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-               /* Fall through */
+               fallthrough;
        case SCTP_V4_FLOW:
        case AH_ESP_V4_FLOW:
        case AH_V4_FLOW:
@@ -2532,11 +2531,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
                break;
        case TCP_V6_FLOW:
                cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-               /* Fall through */
+               fallthrough;
        case UDP_V6_FLOW:
                if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-               /* Fall through */
+               fallthrough;
        case SCTP_V6_FLOW:
        case AH_ESP_V6_FLOW:
        case AH_V6_FLOW:
index 8bb3db2..ae8d643 100644 (file)
 #include <linux/i2c.h>
 #include "igb.h"
 
-#define MAJ 5
-#define MIN 6
-#define BUILD 0
-#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
-__stringify(BUILD) "-k"
-
 enum queue_mode {
        QUEUE_MODE_STRICT_PRIORITY,
        QUEUE_MODE_STREAM_RESERVATION,
@@ -55,7 +49,6 @@ enum tx_queue_prio {
 };
 
 char igb_driver_name[] = "igb";
-char igb_driver_version[] = DRV_VERSION;
 static const char igb_driver_string[] =
                                "Intel(R) Gigabit Ethernet Network Driver";
 static const char igb_copyright[] =
@@ -240,7 +233,6 @@ static struct pci_driver igb_driver = {
 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
 MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
 
 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
 static int debug = -1;
@@ -666,8 +658,7 @@ static int __init igb_init_module(void)
 {
        int ret;
 
-       pr_info("%s - version %s\n",
-              igb_driver_string, igb_driver_version);
+       pr_info("%s\n", igb_driver_string);
        pr_info("%s\n", igb_copyright);
 
 #ifdef CONFIG_IGB_DCA
@@ -720,14 +711,14 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
                                adapter->rx_ring[i]->reg_idx = rbase_offset +
                                                               Q_IDX_82576(i);
                }
-               /* Fall through */
+               fallthrough;
        case e1000_82575:
        case e1000_82580:
        case e1000_i350:
        case e1000_i354:
        case e1000_i210:
        case e1000_i211:
-               /* Fall through */
+               fallthrough;
        default:
                for (; i < adapter->num_rx_queues; i++)
                        adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@ -2882,7 +2873,7 @@ void igb_set_fw_version(struct igb_adapter *adapter)
                                 fw.invm_img_type);
                        break;
                }
-               /* fall through */
+               fallthrough;
        default:
                /* if option is rom valid, display its version too */
                if (fw.or_valid) {
@@ -3733,13 +3724,13 @@ unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter)
                        max_rss_queues = 1;
                        break;
                }
-               /* fall through */
+               fallthrough;
        case e1000_82576:
                if (!!adapter->vfs_allocated_count) {
                        max_rss_queues = 2;
                        break;
                }
-               /* fall through */
+               fallthrough;
        case e1000_82580:
        case e1000_i354:
        default:
@@ -4878,14 +4869,14 @@ static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
                /* VLAN filtering needed for VLAN prio filter */
                if (adapter->netdev->features & NETIF_F_NTUPLE)
                        break;
-               /* fall through */
+               fallthrough;
        case e1000_82576:
        case e1000_82580:
        case e1000_i354:
                /* VLAN filtering needed for pool filtering */
                if (adapter->vfs_allocated_count)
                        break;
-               /* fall through */
+               fallthrough;
        default:
                return 1;
        }
@@ -5165,7 +5156,7 @@ bool igb_has_link(struct igb_adapter *adapter)
        case e1000_media_type_copper:
                if (!hw->mac.get_link_status)
                        return true;
-               /* fall through */
+               fallthrough;
        case e1000_media_type_internal_serdes:
                hw->mac.ops.check_for_link(hw);
                link_active = !hw->mac.get_link_status;
@@ -5825,7 +5816,7 @@ csum_failed:
        switch (skb->csum_offset) {
        case offsetof(struct tcphdr, check):
                type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
-               /* fall through */
+               fallthrough;
        case offsetof(struct udphdr, check):
                break;
        case offsetof(struct sctphdr, checksum):
@@ -5837,7 +5828,7 @@ csum_failed:
                        type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
                        break;
                }
-               /* fall through */
+               fallthrough;
        default:
                skb_checksum_help(skb);
                goto csum_failed;
@@ -6715,7 +6706,7 @@ static int __igb_notify_dca(struct device *dev, void *data)
                        igb_setup_dca(adapter);
                        break;
                }
-               /* Fall Through - since DCA is disabled. */
+               fallthrough; /* since DCA is disabled. */
        case DCA_PROVIDER_REMOVE:
                if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
                        /* without this a class_device is left
@@ -9384,13 +9375,13 @@ static void igb_vmm_control(struct igb_adapter *adapter)
                reg = rd32(E1000_DTXCTL);
                reg |= E1000_DTXCTL_VLAN_ADDED;
                wr32(E1000_DTXCTL, reg);
-               /* Fall through */
+               fallthrough;
        case e1000_82580:
                /* enable replication vlan tag stripping */
                reg = rd32(E1000_RPLOLR);
                reg |= E1000_RPLOLR_STRVLAN;
                wr32(E1000_RPLOLR, reg);
-               /* Fall through */
+               fallthrough;
        case e1000_i350:
                /* none of the above registers are supported by i350 */
                break;
index c39e921..490368d 100644 (file)
@@ -1053,7 +1053,7 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
                        config->rx_filter = HWTSTAMP_FILTER_ALL;
                        break;
                }
-               /* fall through */
+               fallthrough;
        default:
                config->rx_filter = HWTSTAMP_FILTER_NONE;
                return -ERANGE;
index 9217d15..f4835eb 100644 (file)
@@ -170,8 +170,6 @@ static void igbvf_get_drvinfo(struct net_device *netdev,
        struct igbvf_adapter *adapter = netdev_priv(netdev);
 
        strlcpy(drvinfo->driver,  igbvf_driver_name, sizeof(drvinfo->driver));
-       strlcpy(drvinfo->version, igbvf_driver_version,
-               sizeof(drvinfo->version));
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info));
 }
index eee26a3..975eb47 100644 (file)
@@ -281,7 +281,6 @@ enum igbvf_state_t {
 };
 
 extern char igbvf_driver_name[];
-extern const char igbvf_driver_version[];
 
 void igbvf_check_options(struct igbvf_adapter *);
 void igbvf_set_ethtool_ops(struct net_device *);
index 5b1800c..97a0659 100644 (file)
@@ -24,9 +24,7 @@
 
 #include "igbvf.h"
 
-#define DRV_VERSION "2.4.0-k"
 char igbvf_driver_name[] = "igbvf";
-const char igbvf_driver_version[] = DRV_VERSION;
 static const char igbvf_driver_string[] =
                  "Intel(R) Gigabit Virtual Function Network Driver";
 static const char igbvf_copyright[] =
@@ -2093,7 +2091,7 @@ csum_failed:
        switch (skb->csum_offset) {
        case offsetof(struct tcphdr, check):
                type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
-               /* fall through */
+               fallthrough;
        case offsetof(struct udphdr, check):
                break;
        case offsetof(struct sctphdr, checksum):
@@ -2105,7 +2103,7 @@ csum_failed:
                        type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
                        break;
                }
-               /* fall through */
+               fallthrough;
        default:
                skb_checksum_help(skb);
                goto csum_failed;
@@ -2987,7 +2985,7 @@ static int __init igbvf_init_module(void)
 {
        int ret;
 
-       pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version);
+       pr_info("%s\n", igbvf_driver_string);
        pr_info("%s\n", igbvf_copyright);
 
        ret = pci_register_driver(&igbvf_driver);
@@ -3011,6 +3009,5 @@ module_exit(igbvf_exit_module);
 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
 MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver");
 MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
 
 /* netdev.c */
index 5dbc5a1..3070dfd 100644 (file)
@@ -117,6 +117,9 @@ struct igc_ring {
 struct igc_adapter {
        struct net_device *netdev;
 
+       struct ethtool_eee eee;
+       u16 eee_advert;
+
        unsigned long state;
        unsigned int flags;
        unsigned int num_q_vectors;
@@ -207,8 +210,6 @@ struct igc_adapter {
        struct sk_buff *ptp_tx_skb;
        struct hwtstamp_config tstamp_config;
        unsigned long ptp_tx_start;
-       unsigned long last_rx_ptp_check;
-       unsigned long last_rx_timestamp;
        unsigned int ptp_flags;
        /* System time value lock */
        spinlock_t tmreg_lock;
@@ -239,7 +240,6 @@ void igc_rings_dump(struct igc_adapter *adapter);
 void igc_regs_dump(struct igc_adapter *adapter);
 
 extern char igc_driver_name[];
-extern char igc_driver_version[];
 
 #define IGC_REGS_LEN                   740
 
@@ -256,6 +256,7 @@ extern char igc_driver_version[];
 #define IGC_FLAG_MEDIA_RESET           BIT(10)
 #define IGC_FLAG_MAS_ENABLE            BIT(12)
 #define IGC_FLAG_HAS_MSIX              BIT(13)
+#define IGC_FLAG_EEE                   BIT(14)
 #define IGC_FLAG_VLAN_PROMISC          BIT(15)
 #define IGC_FLAG_RX_LEGACY             BIT(16)
 #define IGC_FLAG_TSN_QBV_ENABLED       BIT(17)
@@ -546,7 +547,6 @@ void igc_ptp_init(struct igc_adapter *adapter);
 void igc_ptp_reset(struct igc_adapter *adapter);
 void igc_ptp_suspend(struct igc_adapter *adapter);
 void igc_ptp_stop(struct igc_adapter *adapter);
-void igc_ptp_rx_rgtstamp(struct igc_q_vector *q_vector, struct sk_buff *skb);
 void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
                         struct sk_buff *skb);
 int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
index 186deb1..f1f4649 100644 (file)
 
 /* Advanced Receive Descriptor bit definitions */
 #define IGC_RXDADV_STAT_TSIP   0x08000 /* timestamp in packet */
-#define IGC_RXDADV_STAT_TS     0x10000 /* Pkt was time stamped */
 
 #define IGC_RXDEXT_STATERR_CE          0x01000000
 #define IGC_RXDEXT_STATERR_SE          0x02000000
 #define IGC_FTQF_MASK_PROTO_BP 0x10000000
 
 /* Time Sync Receive Control bit definitions */
-#define IGC_TSYNCRXCTL_VALID           0x00000001  /* Rx timestamp valid */
 #define IGC_TSYNCRXCTL_TYPE_MASK       0x0000000E  /* Rx type mask */
 #define IGC_TSYNCRXCTL_TYPE_L2_V2      0x00
 #define IGC_TSYNCRXCTL_TYPE_L4_V1      0x02
 /* Maximum size of the MTA register table in all supported adapters */
 #define MAX_MTA_REG                    128
 
+/* EEE defines */
+#define IGC_IPCNFG_EEE_2_5G_AN         0x00000010 /* IPCNFG EEE Ena 2.5G AN */
+#define IGC_IPCNFG_EEE_1G_AN           0x00000008 /* IPCNFG EEE Ena 1G AN */
+#define IGC_IPCNFG_EEE_100M_AN         0x00000004 /* IPCNFG EEE Ena 100M AN */
+#define IGC_EEER_EEE_NEG               0x20000000 /* EEE capability nego */
+#define IGC_EEER_TX_LPI_EN             0x00010000 /* EEER Tx LPI Enable */
+#define IGC_EEER_RX_LPI_EN             0x00020000 /* EEER Rx LPI Enable */
+#define IGC_EEER_LPI_FC                        0x00040000 /* EEER Ena on Flow Cntrl */
+#define IGC_EEE_SU_LPI_CLK_STP         0x00800000 /* EEE LPI Clock Stop */
+
+/* LTR defines */
+#define IGC_LTRC_EEEMS_EN              0x00000020 /* Enable EEE LTR max send */
+#define IGC_RXPBS_SIZE_I225_MASK       0x0000003F /* Rx packet buffer size */
+#define IGC_TW_SYSTEM_1000_MASK                0x000000FF
+/* Minimum time for 100BASE-T where no data will be transmit following move out
+ * of EEE LPI Tx state
+ */
+#define IGC_TW_SYSTEM_100_MASK         0x0000FF00
+#define IGC_TW_SYSTEM_100_SHIFT                8
+#define IGC_DMACR_DMAC_EN              0x80000000 /* Enable DMA Coalescing */
+#define IGC_DMACR_DMACTHR_MASK         0x00FF0000
+#define IGC_DMACR_DMACTHR_SHIFT                16
+/* Reg val to set scale to 1024 nsec */
+#define IGC_LTRMINV_SCALE_1024         2
+/* Reg val to set scale to 32768 nsec */
+#define IGC_LTRMINV_SCALE_32768                3
+/* Reg val to set scale to 1024 nsec */
+#define IGC_LTRMAXV_SCALE_1024         2
+/* Reg val to set scale to 32768 nsec */
+#define IGC_LTRMAXV_SCALE_32768                3
+#define IGC_LTRMINV_LTRV_MASK          0x000003FF /* LTR minimum value */
+#define IGC_LTRMAXV_LTRV_MASK          0x000003FF /* LTR maximum value */
+#define IGC_LTRMINV_LSNP_REQ           0x00008000 /* LTR Snoop Requirement */
+#define IGC_LTRMINV_SCALE_SHIFT                10
+#define IGC_LTRMAXV_LSNP_REQ           0x00008000 /* LTR Snoop Requirement */
+#define IGC_LTRMAXV_SCALE_SHIFT                10
+
 #endif /* _IGC_DEFINES_H_ */
index a938ec8..44410c2 100644 (file)
@@ -4,6 +4,7 @@
 /* ethtool support for igc */
 #include <linux/if_vlan.h>
 #include <linux/pm_runtime.h>
+#include <linux/mdio.h>
 
 #include "igc.h"
 #include "igc_diag.h"
@@ -130,7 +131,6 @@ static void igc_ethtool_get_drvinfo(struct net_device *netdev,
        struct igc_adapter *adapter = netdev_priv(netdev);
 
        strlcpy(drvinfo->driver,  igc_driver_name, sizeof(drvinfo->driver));
-       strlcpy(drvinfo->version, igc_driver_version, sizeof(drvinfo->version));
 
        /* add fw_version here */
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
@@ -1015,37 +1015,29 @@ static int igc_ethtool_get_rss_hash_opts(struct igc_adapter *adapter,
        switch (cmd->flow_type) {
        case TCP_V4_FLOW:
                cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-               /* Fall through */
+               fallthrough;
        case UDP_V4_FLOW:
                if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-               /* Fall through */
+               fallthrough;
        case SCTP_V4_FLOW:
-               /* Fall through */
        case AH_ESP_V4_FLOW:
-               /* Fall through */
        case AH_V4_FLOW:
-               /* Fall through */
        case ESP_V4_FLOW:
-               /* Fall through */
        case IPV4_FLOW:
                cmd->data |= RXH_IP_SRC | RXH_IP_DST;
                break;
        case TCP_V6_FLOW:
                cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-               /* Fall through */
+               fallthrough;
        case UDP_V6_FLOW:
                if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-               /* Fall through */
+               fallthrough;
        case SCTP_V6_FLOW:
-               /* Fall through */
        case AH_ESP_V6_FLOW:
-               /* Fall through */
        case AH_V6_FLOW:
-               /* Fall through */
        case ESP_V6_FLOW:
-               /* Fall through */
        case IPV6_FLOW:
                cmd->data |= RXH_IP_SRC | RXH_IP_DST;
                break;
@@ -1549,6 +1541,98 @@ static int igc_ethtool_set_priv_flags(struct net_device *netdev, u32 priv_flags)
        return 0;
 }
 
+static int igc_ethtool_get_eee(struct net_device *netdev,
+                              struct ethtool_eee *edata)
+{
+       struct igc_adapter *adapter = netdev_priv(netdev);
+       struct igc_hw *hw = &adapter->hw;
+       u32 eeer;
+
+       if (hw->dev_spec._base.eee_enable)
+               edata->advertised =
+                       mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
+
+       *edata = adapter->eee;
+       edata->supported = SUPPORTED_Autoneg;
+
+       eeer = rd32(IGC_EEER);
+
+       /* EEE status on negotiated link */
+       if (eeer & IGC_EEER_EEE_NEG)
+               edata->eee_active = true;
+
+       if (eeer & IGC_EEER_TX_LPI_EN)
+               edata->tx_lpi_enabled = true;
+
+       edata->eee_enabled = hw->dev_spec._base.eee_enable;
+
+       edata->advertised = SUPPORTED_Autoneg;
+       edata->lp_advertised = SUPPORTED_Autoneg;
+
+       /* Report correct negotiated EEE status for devices that
+        * wrongly report EEE at half-duplex
+        */
+       if (adapter->link_duplex == HALF_DUPLEX) {
+               edata->eee_enabled = false;
+               edata->eee_active = false;
+               edata->tx_lpi_enabled = false;
+               edata->advertised &= ~edata->advertised;
+       }
+
+       return 0;
+}
+
+static int igc_ethtool_set_eee(struct net_device *netdev,
+                              struct ethtool_eee *edata)
+{
+       struct igc_adapter *adapter = netdev_priv(netdev);
+       struct igc_hw *hw = &adapter->hw;
+       struct ethtool_eee eee_curr;
+       s32 ret_val;
+
+       memset(&eee_curr, 0, sizeof(struct ethtool_eee));
+
+       ret_val = igc_ethtool_get_eee(netdev, &eee_curr);
+       if (ret_val) {
+               netdev_err(netdev,
+                          "Problem setting EEE advertisement options\n");
+               return -EINVAL;
+       }
+
+       if (eee_curr.eee_enabled) {
+               if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
+                       netdev_err(netdev,
+                                  "Setting EEE tx-lpi is not supported\n");
+                       return -EINVAL;
+               }
+
+               /* Tx LPI timer is not implemented currently */
+               if (edata->tx_lpi_timer) {
+                       netdev_err(netdev,
+                                  "Setting EEE Tx LPI timer is not supported\n");
+                       return -EINVAL;
+               }
+       } else if (!edata->eee_enabled) {
+               netdev_err(netdev,
+                          "Setting EEE options are not supported with EEE disabled\n");
+               return -EINVAL;
+       }
+
+       adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
+       if (hw->dev_spec._base.eee_enable != edata->eee_enabled) {
+               hw->dev_spec._base.eee_enable = edata->eee_enabled;
+               adapter->flags |= IGC_FLAG_EEE;
+
+               /* reset link */
+               if (netif_running(netdev))
+                       igc_reinit_locked(adapter);
+               else
+                       igc_reset(adapter);
+       }
+
+       return 0;
+}
+
 static int igc_ethtool_begin(struct net_device *netdev)
 {
        struct igc_adapter *adapter = netdev_priv(netdev);
@@ -1830,6 +1914,8 @@ static const struct ethtool_ops igc_ethtool_ops = {
        .set_channels           = igc_ethtool_set_channels,
        .get_priv_flags         = igc_ethtool_get_priv_flags,
        .set_priv_flags         = igc_ethtool_set_priv_flags,
+       .get_eee                = igc_ethtool_get_eee,
+       .set_eee                = igc_ethtool_set_eee,
        .begin                  = igc_ethtool_begin,
        .complete               = igc_ethtool_complete,
        .get_link_ksettings     = igc_ethtool_get_link_ksettings,
index af34ae3..2ab7d9f 100644 (file)
@@ -191,6 +191,7 @@ struct igc_fc_info {
 
 struct igc_dev_spec_base {
        bool clear_semaphore_once;
+       bool eee_enable;
 };
 
 struct igc_hw {
index c25f555..8b67d9b 100644 (file)
@@ -488,3 +488,159 @@ s32 igc_init_nvm_params_i225(struct igc_hw *hw)
        }
        return 0;
 }
+
+/**
+ *  igc_set_eee_i225 - Enable/disable EEE support
+ *  @hw: pointer to the HW structure
+ *  @adv2p5G: boolean flag enabling 2.5G EEE advertisement
+ *  @adv1G: boolean flag enabling 1G EEE advertisement
+ *  @adv100M: boolean flag enabling 100M EEE advertisement
+ *
+ *  Enable/disable EEE based on setting in dev_spec structure.
+ **/
+s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G,
+                    bool adv100M)
+{
+       u32 ipcnfg, eeer;
+
+       ipcnfg = rd32(IGC_IPCNFG);
+       eeer = rd32(IGC_EEER);
+
+       /* enable or disable per user setting */
+       if (hw->dev_spec._base.eee_enable) {
+               u32 eee_su = rd32(IGC_EEE_SU);
+
+               if (adv100M)
+                       ipcnfg |= IGC_IPCNFG_EEE_100M_AN;
+               else
+                       ipcnfg &= ~IGC_IPCNFG_EEE_100M_AN;
+
+               if (adv1G)
+                       ipcnfg |= IGC_IPCNFG_EEE_1G_AN;
+               else
+                       ipcnfg &= ~IGC_IPCNFG_EEE_1G_AN;
+
+               if (adv2p5G)
+                       ipcnfg |= IGC_IPCNFG_EEE_2_5G_AN;
+               else
+                       ipcnfg &= ~IGC_IPCNFG_EEE_2_5G_AN;
+
+               eeer |= (IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN |
+                        IGC_EEER_LPI_FC);
+
+               /* This bit should not be set in normal operation. */
+               if (eee_su & IGC_EEE_SU_LPI_CLK_STP)
+                       hw_dbg("LPI Clock Stop Bit should not be set!\n");
+       } else {
+               ipcnfg &= ~(IGC_IPCNFG_EEE_2_5G_AN | IGC_IPCNFG_EEE_1G_AN |
+                           IGC_IPCNFG_EEE_100M_AN);
+               eeer &= ~(IGC_EEER_TX_LPI_EN | IGC_EEER_RX_LPI_EN |
+                         IGC_EEER_LPI_FC);
+       }
+       wr32(IGC_IPCNFG, ipcnfg);
+       wr32(IGC_EEER, eeer);
+       rd32(IGC_IPCNFG);
+       rd32(IGC_EEER);
+
+       return IGC_SUCCESS;
+}
+
+/* igc_set_ltr_i225 - Set Latency Tolerance Reporting thresholds
+ * @hw: pointer to the HW structure
+ * @link: bool indicating link status
+ *
+ * Set the LTR thresholds based on the link speed (Mbps), EEE, and DMAC
+ * settings, otherwise specify that there is no LTR requirement.
+ */
+s32 igc_set_ltr_i225(struct igc_hw *hw, bool link)
+{
+       u32 tw_system, ltrc, ltrv, ltr_min, ltr_max, scale_min, scale_max;
+       u16 speed, duplex;
+       s32 size;
+
+       /* If we do not have link, LTR thresholds are zero. */
+       if (link) {
+               hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
+
+               /* Check if using copper interface with EEE enabled or if the
+                * link speed is 10 Mbps.
+                */
+               if (hw->dev_spec._base.eee_enable &&
+                   speed != SPEED_10) {
+                       /* EEE enabled, so send LTRMAX threshold. */
+                       ltrc = rd32(IGC_LTRC) |
+                              IGC_LTRC_EEEMS_EN;
+                       wr32(IGC_LTRC, ltrc);
+
+                       /* Calculate tw_system (nsec). */
+                       if (speed == SPEED_100) {
+                               tw_system = ((rd32(IGC_EEE_SU) &
+                                            IGC_TW_SYSTEM_100_MASK) >>
+                                            IGC_TW_SYSTEM_100_SHIFT) * 500;
+                       } else {
+                               tw_system = (rd32(IGC_EEE_SU) &
+                                            IGC_TW_SYSTEM_1000_MASK) * 500;
+                       }
+               } else {
+                       tw_system = 0;
+               }
+
+               /* Get the Rx packet buffer size. */
+               size = rd32(IGC_RXPBS) &
+                      IGC_RXPBS_SIZE_I225_MASK;
+
+               /* Calculations vary based on DMAC settings. */
+               if (rd32(IGC_DMACR) & IGC_DMACR_DMAC_EN) {
+                       size -= (rd32(IGC_DMACR) &
+                                IGC_DMACR_DMACTHR_MASK) >>
+                                IGC_DMACR_DMACTHR_SHIFT;
+                       /* Convert size to bits. */
+                       size *= 1024 * 8;
+               } else {
+                       /* Convert size to bytes, subtract the MTU, and then
+                        * convert the size to bits.
+                        */
+                       size *= 1024;
+                       size *= 8;
+               }
+
+               if (size < 0) {
+                       hw_dbg("Invalid effective Rx buffer size %d\n",
+                              size);
+                       return -IGC_ERR_CONFIG;
+               }
+
+               /* Calculate the thresholds. Since speed is in Mbps, simplify
+                * the calculation by multiplying size/speed by 1000 for result
+                * to be in nsec before dividing by the scale in nsec. Set the
+                * scale such that the LTR threshold fits in the register.
+                */
+               ltr_min = (1000 * size) / speed;
+               ltr_max = ltr_min + tw_system;
+               scale_min = (ltr_min / 1024) < 1024 ? IGC_LTRMINV_SCALE_1024 :
+                           IGC_LTRMINV_SCALE_32768;
+               scale_max = (ltr_max / 1024) < 1024 ? IGC_LTRMAXV_SCALE_1024 :
+                           IGC_LTRMAXV_SCALE_32768;
+               ltr_min /= scale_min == IGC_LTRMINV_SCALE_1024 ? 1024 : 32768;
+               ltr_min -= 1;
+               ltr_max /= scale_max == IGC_LTRMAXV_SCALE_1024 ? 1024 : 32768;
+               ltr_max -= 1;
+
+               /* Only write the LTR thresholds if they differ from before. */
+               ltrv = rd32(IGC_LTRMINV);
+               if (ltr_min != (ltrv & IGC_LTRMINV_LTRV_MASK)) {
+                       ltrv = IGC_LTRMINV_LSNP_REQ | ltr_min |
+                              (scale_min << IGC_LTRMINV_SCALE_SHIFT);
+                       wr32(IGC_LTRMINV, ltrv);
+               }
+
+               ltrv = rd32(IGC_LTRMAXV);
+               if (ltr_max != (ltrv & IGC_LTRMAXV_LTRV_MASK)) {
+                       ltrv = IGC_LTRMAXV_LSNP_REQ | ltr_max |
+                              (scale_min << IGC_LTRMAXV_SCALE_SHIFT);
+                       wr32(IGC_LTRMAXV, ltrv);
+               }
+       }
+
+       return IGC_SUCCESS;
+}
index 7b66e1f..dae47e4 100644 (file)
@@ -9,5 +9,8 @@ void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask);
 
 s32 igc_init_nvm_params_i225(struct igc_hw *hw);
 bool igc_get_flash_presence_i225(struct igc_hw *hw);
+s32 igc_set_eee_i225(struct igc_hw *hw, bool adv2p5G, bool adv1G,
+                    bool adv100M);
+s32 igc_set_ltr_i225(struct igc_hw *hw, bool link);
 
 #endif
index 410aeb0..b47e7b0 100644 (file)
@@ -289,7 +289,6 @@ void igc_clear_hw_cntrs_base(struct igc_hw *hw)
        rd32(IGC_TNCRS);
        rd32(IGC_HTDPMC);
        rd32(IGC_TSCTC);
-       rd32(IGC_TSCTFC);
 
        rd32(IGC_MGTPRC);
        rd32(IGC_MGTPDC);
@@ -307,6 +306,8 @@ void igc_clear_hw_cntrs_base(struct igc_hw *hw)
        rd32(IGC_ICRXDMTC);
 
        rd32(IGC_RPTHC);
+       rd32(IGC_TLPIC);
+       rd32(IGC_RLPIC);
        rd32(IGC_HGPTC);
        rd32(IGC_HGORCL);
        rd32(IGC_HGORCH);
@@ -417,6 +418,11 @@ s32 igc_check_for_copper_link(struct igc_hw *hw)
                hw_dbg("Error configuring flow control\n");
 
 out:
+       /* Now that we are aware of our link settings, we can set the LTR
+        * thresholds.
+        */
+       ret_val = igc_set_ltr_i225(hw, link);
+
        return ret_val;
 }
 
@@ -462,10 +468,8 @@ s32 igc_config_fc_after_link_up(struct igc_hw *hw)
         * so we had to force link.  In this case, we need to force the
         * configuration of the MAC to match the "fc" parameter.
         */
-       if (mac->autoneg_failed) {
-               if (hw->phy.media_type == igc_media_type_copper)
-                       ret_val = igc_force_mac_fc(hw);
-       }
+       if (mac->autoneg_failed)
+               ret_val = igc_force_mac_fc(hw);
 
        if (ret_val) {
                hw_dbg("Error forcing flow control settings\n");
@@ -477,7 +481,7 @@ s32 igc_config_fc_after_link_up(struct igc_hw *hw)
         * has completed, and if so, how the PHY and link partner has
         * flow control configured.
         */
-       if (hw->phy.media_type == igc_media_type_copper && mac->autoneg) {
+       if (mac->autoneg) {
                /* Read the MII Status Register and check to see if AutoNeg
                 * has completed.  We read this twice because this reg has
                 * some "sticky" (latched) bits.
index 6919c50..8d5869d 100644 (file)
@@ -17,7 +17,6 @@
 #include "igc_hw.h"
 #include "igc_tsn.h"
 
-#define DRV_VERSION    "0.0.1-k"
 #define DRV_SUMMARY    "Intel(R) 2.5G Ethernet Linux Driver"
 
 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
@@ -27,12 +26,10 @@ static int debug = -1;
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 MODULE_DESCRIPTION(DRV_SUMMARY);
 MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 
 char igc_driver_name[] = "igc";
-char igc_driver_version[] = DRV_VERSION;
 static const char igc_driver_string[] = DRV_SUMMARY;
 static const char igc_copyright[] =
        "Copyright(c) 2018 Intel Corporation.";
@@ -64,16 +61,6 @@ enum latency_range {
        latency_invalid = 255
 };
 
-/**
- * igc_power_down_link - Power down the phy/serdes link
- * @adapter: address of board private structure
- */
-static void igc_power_down_link(struct igc_adapter *adapter)
-{
-       if (adapter->hw.phy.media_type == igc_media_type_copper)
-               igc_power_down_phy_copper_base(&adapter->hw);
-}
-
 void igc_reset(struct igc_adapter *adapter)
 {
        struct net_device *dev = adapter->netdev;
@@ -105,8 +92,11 @@ void igc_reset(struct igc_adapter *adapter)
        if (hw->mac.ops.init_hw(hw))
                netdev_err(dev, "Error on hardware initialization\n");
 
+       /* Re-establish EEE setting */
+       igc_set_eee_i225(hw, true, true, true);
+
        if (!netif_running(adapter->netdev))
-               igc_power_down_link(adapter);
+               igc_power_down_phy_copper_base(&adapter->hw);
 
        /* Re-enable PTP, where applicable. */
        igc_ptp_reset(adapter);
@@ -125,8 +115,7 @@ static void igc_power_up_link(struct igc_adapter *adapter)
 {
        igc_reset_phy(&adapter->hw);
 
-       if (adapter->hw.phy.media_type == igc_media_type_copper)
-               igc_power_up_phy_copper(&adapter->hw);
+       igc_power_up_phy_copper(&adapter->hw);
 
        igc_setup_link(&adapter->hw);
 }
@@ -980,7 +969,7 @@ csum_failed:
        switch (skb->csum_offset) {
        case offsetof(struct tcphdr, check):
                type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
-               /* fall through */
+               fallthrough;
        case offsetof(struct udphdr, check):
                break;
        case offsetof(struct sctphdr, checksum):
@@ -992,7 +981,7 @@ csum_failed:
                        type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
                        break;
                }
-               /* fall through */
+               fallthrough;
        default:
                skb_checksum_help(skb);
                goto csum_failed;
@@ -1479,9 +1468,9 @@ static inline void igc_rx_hash(struct igc_ring *ring,
  * @rx_desc: pointer to the EOP Rx descriptor
  * @skb: pointer to current skb being populated
  *
- * This function checks the ring, descriptor, and packet information in
- * order to populate the hash, checksum, VLAN, timestamp, protocol, and
- * other fields within the skb.
+ * This function checks the ring, descriptor, and packet information in order
+ * to populate the hash, checksum, VLAN, protocol, and other fields within the
+ * skb.
  */
 static void igc_process_skb_fields(struct igc_ring *rx_ring,
                                   union igc_adv_rx_desc *rx_desc,
@@ -1491,10 +1480,6 @@ static void igc_process_skb_fields(struct igc_ring *rx_ring,
 
        igc_rx_checksum(rx_ring, rx_desc, skb);
 
-       if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TS) &&
-           !igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP))
-               igc_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
-
        skb_record_rx_queue(skb, rx_ring->queue_index);
 
        skb->protocol = eth_type_trans(skb, rx_ring->netdev);
@@ -1975,7 +1960,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
                /* probably a little skewed due to removing CRC */
                total_bytes += skb->len;
 
-               /* populate checksum, timestamp, VLAN, and protocol */
+               /* populate checksum, VLAN, and protocol */
                igc_process_skb_fields(rx_ring, rx_desc, skb);
 
                napi_gro_receive(&q_vector->napi, skb);
@@ -3284,7 +3269,6 @@ static void igc_cache_ring_register(struct igc_adapter *adapter)
 
        switch (adapter->hw.mac.type) {
        case igc_i225:
-       /* Fall through */
        default:
                for (; i < adapter->num_rx_queues; i++)
                        adapter->rx_ring[i]->reg_idx = i;
@@ -3744,7 +3728,6 @@ void igc_update_stats(struct igc_adapter *adapter)
        adapter->stats.algnerrc += rd32(IGC_ALGNERRC);
 
        adapter->stats.tsctc += rd32(IGC_TSCTC);
-       adapter->stats.tsctfc += rd32(IGC_TSCTFC);
 
        adapter->stats.iac += rd32(IGC_IAC);
        adapter->stats.icrxoc += rd32(IGC_ICRXOC);
@@ -4255,6 +4238,15 @@ static void igc_watchdog_task(struct work_struct *work)
                                    (ctrl & IGC_CTRL_RFCE) ?  "RX" :
                                    (ctrl & IGC_CTRL_TFCE) ?  "TX" : "None");
 
+                       /* disable EEE if enabled */
+                       if ((adapter->flags & IGC_FLAG_EEE) &&
+                           adapter->link_duplex == HALF_DUPLEX) {
+                               netdev_info(netdev,
+                                           "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n");
+                               adapter->hw.dev_spec._base.eee_enable = false;
+                               adapter->flags &= ~IGC_FLAG_EEE;
+                       }
+
                        /* check if SmartSpeed worked */
                        igc_check_downshift(hw);
                        if (phy->speed_downgraded)
@@ -4611,7 +4603,7 @@ err_set_queues:
        igc_free_irq(adapter);
 err_req_irq:
        igc_release_hw_control(adapter);
-       igc_power_down_link(adapter);
+       igc_power_down_phy_copper_base(&adapter->hw);
        igc_free_all_rx_resources(adapter);
 err_setup_rx:
        igc_free_all_tx_resources(adapter);
@@ -5185,6 +5177,10 @@ static int igc_probe(struct pci_dev *pdev,
        netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
 
        dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
+       /* Disable EEE for internal PHY devices */
+       hw->dev_spec._base.eee_enable = false;
+       adapter->flags &= ~IGC_FLAG_EEE;
+       igc_set_eee_i225(hw, false, false, false);
 
        pm_runtime_put_noidle(&pdev->dev);
 
@@ -5305,7 +5301,7 @@ static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
 
        wake = wufc || adapter->en_mng_pt;
        if (!wake)
-               igc_power_down_link(adapter);
+               igc_power_down_phy_copper_base(&adapter->hw);
        else
                igc_power_up_link(adapter);
 
@@ -5614,9 +5610,7 @@ static int __init igc_init_module(void)
 {
        int ret;
 
-       pr_info("%s - version %s\n",
-               igc_driver_string, igc_driver_version);
-
+       pr_info("%s\n", igc_driver_string);
        pr_info("%s\n", igc_copyright);
 
        ret = pci_register_driver(&igc_driver);
index 0d746f8..e67d465 100644 (file)
@@ -205,78 +205,66 @@ void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
                ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
 }
 
-/**
- * igc_ptp_rx_rgtstamp - retrieve Rx timestamp stored in register
- * @q_vector: Pointer to interrupt specific structure
- * @skb: Buffer containing timestamp and packet
- *
- * This function is meant to retrieve a timestamp from the internal registers
- * of the adapter and store it in the skb.
- */
-void igc_ptp_rx_rgtstamp(struct igc_q_vector *q_vector,
-                        struct sk_buff *skb)
+static void igc_ptp_disable_rx_timestamp(struct igc_adapter *adapter)
 {
-       struct igc_adapter *adapter = q_vector->adapter;
        struct igc_hw *hw = &adapter->hw;
-       u64 regval;
-
-       /* If this bit is set, then the RX registers contain the time
-        * stamp. No other packet will be time stamped until we read
-        * these registers, so read the registers to make them
-        * available again. Because only one packet can be time
-        * stamped at a time, we know that the register values must
-        * belong to this one here and therefore we don't need to
-        * compare any of the additional attributes stored for it.
-        *
-        * If nothing went wrong, then it should have a shared
-        * tx_flags that we can turn into a skb_shared_hwtstamps.
-        */
-       if (!(rd32(IGC_TSYNCRXCTL) & IGC_TSYNCRXCTL_VALID))
-               return;
+       u32 val;
+       int i;
 
-       regval = rd32(IGC_RXSTMPL);
-       regval |= (u64)rd32(IGC_RXSTMPH) << 32;
+       wr32(IGC_TSYNCRXCTL, 0);
 
-       igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+       for (i = 0; i < adapter->num_rx_queues; i++) {
+               val = rd32(IGC_SRRCTL(i));
+               val &= ~IGC_SRRCTL_TIMESTAMP;
+               wr32(IGC_SRRCTL(i), val);
+       }
 
-       /* Update the last_rx_timestamp timer in order to enable watchdog check
-        * for error case of latched timestamp on a dropped packet.
-        */
-       adapter->last_rx_timestamp = jiffies;
+       val = rd32(IGC_RXPBS);
+       val &= ~IGC_RXPBS_CFG_TS_EN;
+       wr32(IGC_RXPBS, val);
 }
 
-/**
- * igc_ptp_enable_tstamp_rxqueue - Enable RX timestamp for a queue
- * @rx_ring: Pointer to RX queue
- * @timer: Index for timer
- *
- * This function enables RX timestamping for a queue, and selects
- * which 1588 timer will provide the timestamp.
- */
-static void igc_ptp_enable_tstamp_rxqueue(struct igc_adapter *adapter,
-                                         struct igc_ring *rx_ring, u8 timer)
+static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter)
 {
        struct igc_hw *hw = &adapter->hw;
-       int reg_idx = rx_ring->reg_idx;
-       u32 srrctl = rd32(IGC_SRRCTL(reg_idx));
+       u32 val;
+       int i;
+
+       val = rd32(IGC_RXPBS);
+       val |= IGC_RXPBS_CFG_TS_EN;
+       wr32(IGC_RXPBS, val);
 
-       srrctl |= IGC_SRRCTL_TIMESTAMP;
-       srrctl |= IGC_SRRCTL_TIMER1SEL(timer);
-       srrctl |= IGC_SRRCTL_TIMER0SEL(timer);
+       for (i = 0; i < adapter->num_rx_queues; i++) {
+               val = rd32(IGC_SRRCTL(i));
+               /* FIXME: For now, only support retrieving RX timestamps from
+                * timer 0.
+                */
+               val |= IGC_SRRCTL_TIMER1SEL(0) | IGC_SRRCTL_TIMER0SEL(0) |
+                      IGC_SRRCTL_TIMESTAMP;
+               wr32(IGC_SRRCTL(i), val);
+       }
 
-       wr32(IGC_SRRCTL(reg_idx), srrctl);
+       val = IGC_TSYNCRXCTL_ENABLED | IGC_TSYNCRXCTL_TYPE_ALL |
+             IGC_TSYNCRXCTL_RXSYNSIG;
+       wr32(IGC_TSYNCRXCTL, val);
 }
 
-static void igc_ptp_enable_tstamp_all_rxqueues(struct igc_adapter *adapter,
-                                              u8 timer)
+static void igc_ptp_disable_tx_timestamp(struct igc_adapter *adapter)
 {
-       int i;
+       struct igc_hw *hw = &adapter->hw;
 
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct igc_ring *ring = adapter->rx_ring[i];
+       wr32(IGC_TSYNCTXCTL, 0);
+}
 
-               igc_ptp_enable_tstamp_rxqueue(adapter, ring, timer);
-       }
+static void igc_ptp_enable_tx_timestamp(struct igc_adapter *adapter)
+{
+       struct igc_hw *hw = &adapter->hw;
+
+       wr32(IGC_TSYNCTXCTL, IGC_TSYNCTXCTL_ENABLED | IGC_TSYNCTXCTL_TXSYNSIG);
+
+       /* Read TXSTMP registers to discard any timestamp previously stored. */
+       rd32(IGC_TXSTMPL);
+       rd32(IGC_TXSTMPH);
 }
 
 /**
@@ -284,37 +272,21 @@ static void igc_ptp_enable_tstamp_all_rxqueues(struct igc_adapter *adapter,
  * @adapter: networking device structure
  * @config: hwtstamp configuration
  *
- * Outgoing time stamping can be enabled and disabled. Play nice and
- * disable it when requested, although it shouldn't case any overhead
- * when no packet needs it. At most one packet in the queue may be
- * marked for time stamping, otherwise it would be impossible to tell
- * for sure to which packet the hardware time stamp belongs.
- *
- * Incoming time stamping has to be configured via the hardware
- * filters. Not all combinations are supported, in particular event
- * type has to be specified. Matching the kind of event packet is
- * not supported, with the exception of "all V2 events regardless of
- * level 2 or 4".
- *
+ * Return: 0 in case of success, negative errno code otherwise.
  */
 static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
                                      struct hwtstamp_config *config)
 {
-       u32 tsync_tx_ctl = IGC_TSYNCTXCTL_ENABLED;
-       u32 tsync_rx_ctl = IGC_TSYNCRXCTL_ENABLED;
-       struct igc_hw *hw = &adapter->hw;
-       u32 tsync_rx_cfg = 0;
-       bool is_l4 = false;
-       u32 regval;
-
        /* reserved for future extensions */
        if (config->flags)
                return -EINVAL;
 
        switch (config->tx_type) {
        case HWTSTAMP_TX_OFF:
-               tsync_tx_ctl = 0;
+               igc_ptp_disable_tx_timestamp(adapter);
+               break;
        case HWTSTAMP_TX_ON:
+               igc_ptp_enable_tx_timestamp(adapter);
                break;
        default:
                return -ERANGE;
@@ -322,18 +294,10 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
 
        switch (config->rx_filter) {
        case HWTSTAMP_FILTER_NONE:
-               tsync_rx_ctl = 0;
+               igc_ptp_disable_rx_timestamp(adapter);
                break;
        case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
-               tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_L4_V1;
-               tsync_rx_cfg = IGC_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
-               is_l4 = true;
-               break;
        case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
-               tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_L4_V1;
-               tsync_rx_cfg = IGC_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
-               is_l4 = true;
-               break;
        case HWTSTAMP_FILTER_PTP_V2_EVENT:
        case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
        case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -343,99 +307,36 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
        case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
        case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
        case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
-               tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_EVENT_V2;
-               config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
-               is_l4 = true;
-               break;
        case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
        case HWTSTAMP_FILTER_NTP_ALL:
        case HWTSTAMP_FILTER_ALL:
-               tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_ALL;
+               igc_ptp_enable_rx_timestamp(adapter);
                config->rx_filter = HWTSTAMP_FILTER_ALL;
                break;
-               /* fall through */
        default:
-               config->rx_filter = HWTSTAMP_FILTER_NONE;
                return -ERANGE;
        }
 
-       /* Per-packet timestamping only works if all packets are
-        * timestamped, so enable timestamping in all packets as long
-        * as one Rx filter was configured.
-        */
-       if (tsync_rx_ctl) {
-               tsync_rx_ctl = IGC_TSYNCRXCTL_ENABLED;
-               tsync_rx_ctl |= IGC_TSYNCRXCTL_TYPE_ALL;
-               tsync_rx_ctl |= IGC_TSYNCRXCTL_RXSYNSIG;
-               config->rx_filter = HWTSTAMP_FILTER_ALL;
-               is_l4 = true;
-
-               if (hw->mac.type == igc_i225) {
-                       regval = rd32(IGC_RXPBS);
-                       regval |= IGC_RXPBS_CFG_TS_EN;
-                       wr32(IGC_RXPBS, regval);
-
-                       /* FIXME: For now, only support retrieving RX
-                        * timestamps from timer 0
-                        */
-                       igc_ptp_enable_tstamp_all_rxqueues(adapter, 0);
-               }
-       }
-
-       if (tsync_tx_ctl) {
-               tsync_tx_ctl = IGC_TSYNCTXCTL_ENABLED;
-               tsync_tx_ctl |= IGC_TSYNCTXCTL_TXSYNSIG;
-       }
-
-       /* enable/disable TX */
-       regval = rd32(IGC_TSYNCTXCTL);
-       regval &= ~IGC_TSYNCTXCTL_ENABLED;
-       regval |= tsync_tx_ctl;
-       wr32(IGC_TSYNCTXCTL, regval);
-
-       /* enable/disable RX */
-       regval = rd32(IGC_TSYNCRXCTL);
-       regval &= ~(IGC_TSYNCRXCTL_ENABLED | IGC_TSYNCRXCTL_TYPE_MASK);
-       regval |= tsync_rx_ctl;
-       wr32(IGC_TSYNCRXCTL, regval);
-
-       /* define which PTP packets are time stamped */
-       wr32(IGC_TSYNCRXCFG, tsync_rx_cfg);
-
-       /* L4 Queue Filter[3]: filter by destination port and protocol */
-       if (is_l4) {
-               u32 ftqf = (IPPROTO_UDP /* UDP */
-                           | IGC_FTQF_VF_BP /* VF not compared */
-                           | IGC_FTQF_1588_TIME_STAMP /* Enable Timestamp */
-                           | IGC_FTQF_MASK); /* mask all inputs */
-               ftqf &= ~IGC_FTQF_MASK_PROTO_BP; /* enable protocol check */
-
-               wr32(IGC_IMIR(3), htons(PTP_EV_PORT));
-               wr32(IGC_IMIREXT(3),
-                    (IGC_IMIREXT_SIZE_BP | IGC_IMIREXT_CTRL_BP));
-               wr32(IGC_FTQF(3), ftqf);
-       } else {
-               wr32(IGC_FTQF(3), IGC_FTQF_MASK);
-       }
-       wrfl();
+       return 0;
+}
 
-       /* clear TX/RX time stamp registers, just to be sure */
-       regval = rd32(IGC_TXSTMPL);
-       regval = rd32(IGC_TXSTMPH);
-       regval = rd32(IGC_RXSTMPL);
-       regval = rd32(IGC_RXSTMPH);
+static void igc_ptp_tx_timeout(struct igc_adapter *adapter)
+{
+       struct igc_hw *hw = &adapter->hw;
 
-       return 0;
+       dev_kfree_skb_any(adapter->ptp_tx_skb);
+       adapter->ptp_tx_skb = NULL;
+       adapter->tx_hwtstamp_timeouts++;
+       clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
+       /* Clear the tx valid bit in TSYNCTXCTL register to enable interrupt. */
+       rd32(IGC_TXSTMPH);
+       netdev_warn(adapter->netdev, "Tx timestamp timeout\n");
 }
 
 void igc_ptp_tx_hang(struct igc_adapter *adapter)
 {
        bool timeout = time_is_before_jiffies(adapter->ptp_tx_start +
                                              IGC_PTP_TX_TIMEOUT);
-       struct igc_hw *hw = &adapter->hw;
-
-       if (!adapter->ptp_tx_skb)
-               return;
 
        if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state))
                return;
@@ -446,15 +347,7 @@ void igc_ptp_tx_hang(struct igc_adapter *adapter)
         */
        if (timeout) {
                cancel_work_sync(&adapter->ptp_tx_work);
-               dev_kfree_skb_any(adapter->ptp_tx_skb);
-               adapter->ptp_tx_skb = NULL;
-               clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
-               adapter->tx_hwtstamp_timeouts++;
-               /* Clear the Tx valid bit in TSYNCTXCTL register to enable
-                * interrupt
-                */
-               rd32(IGC_TXSTMPH);
-               netdev_warn(adapter->netdev, "Clearing Tx timestamp hang\n");
+               igc_ptp_tx_timeout(adapter);
        }
 }
 
@@ -473,6 +366,9 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
        struct igc_hw *hw = &adapter->hw;
        u64 regval;
 
+       if (WARN_ON_ONCE(!skb))
+               return;
+
        regval = rd32(IGC_TXSTMPL);
        regval |= (u64)rd32(IGC_TXSTMPH) << 32;
        igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
@@ -504,20 +400,12 @@ static void igc_ptp_tx_work(struct work_struct *work)
        struct igc_hw *hw = &adapter->hw;
        u32 tsynctxctl;
 
-       if (!adapter->ptp_tx_skb)
+       if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state))
                return;
 
        if (time_is_before_jiffies(adapter->ptp_tx_start +
                                   IGC_PTP_TX_TIMEOUT)) {
-               dev_kfree_skb_any(adapter->ptp_tx_skb);
-               adapter->ptp_tx_skb = NULL;
-               clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
-               adapter->tx_hwtstamp_timeouts++;
-               /* Clear the tx valid bit in TSYNCTXCTL register to enable
-                * interrupt
-                */
-               rd32(IGC_TXSTMPH);
-               netdev_warn(adapter->netdev, "Clearing Tx timestamp hang\n");
+               igc_ptp_tx_timeout(adapter);
                return;
        }
 
@@ -634,11 +522,9 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
                return;
 
        cancel_work_sync(&adapter->ptp_tx_work);
-       if (adapter->ptp_tx_skb) {
-               dev_kfree_skb_any(adapter->ptp_tx_skb);
-               adapter->ptp_tx_skb = NULL;
-               clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
-       }
+       dev_kfree_skb_any(adapter->ptp_tx_skb);
+       adapter->ptp_tx_skb = NULL;
+       clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
 }
 
 /**
index 232e82d..1c46cec 100644 (file)
 #define IGC_MPTC       0x040F0  /* Multicast Packets Tx Count - R/clr */
 #define IGC_BPTC       0x040F4  /* Broadcast Packets Tx Count - R/clr */
 #define IGC_TSCTC      0x040F8  /* TCP Segmentation Context Tx - R/clr */
-#define IGC_TSCTFC     0x040FC  /* TCP Segmentation Context Tx Fail - R/clr */
 #define IGC_IAC                0x04100  /* Interrupt Assertion Count */
 #define IGC_ICTXPTC    0x0410C  /* Interrupt Cause Tx Pkt Timer Expire Count */
 #define IGC_ICTXATC    0x04110  /* Interrupt Cause Tx Abs Timer Expire Count */
 #define IGC_ICTXQEC    0x04118  /* Interrupt Cause Tx Queue Empty Count */
 #define IGC_ICTXQMTC   0x0411C  /* Interrupt Cause Tx Queue Min Thresh Count */
 #define IGC_RPTHC      0x04104  /* Rx Packets To Host */
+#define IGC_TLPIC      0x04148  /* EEE Tx LPI Count */
+#define IGC_RLPIC      0x0414C  /* EEE Rx LPI Count */
 #define IGC_HGPTC      0x04118  /* Host Good Packets Tx Count */
 #define IGC_RXDMTC     0x04120  /* Rx Descriptor Minimum Threshold Count */
 #define IGC_HGORCL     0x04128  /* Host Good Octets Received Count Low */
 #define IGC_SYSTIMR    0x0B6F8  /* System time register Residue */
 #define IGC_TIMINCA    0x0B608  /* Increment attributes register - RW */
 
-#define IGC_RXSTMPL    0x0B624  /* Rx timestamp Low - RO */
-#define IGC_RXSTMPH    0x0B628  /* Rx timestamp High - RO */
 #define IGC_TXSTMPL    0x0B618  /* Tx timestamp value Low - RO */
 #define IGC_TXSTMPH    0x0B61C  /* Tx timestamp value High - RO */
 
 /* Wake Up packet memory */
 #define IGC_WUPM_REG(_i)       (0x05A00 + ((_i) * 4))
 
+/* Energy Efficient Ethernet "EEE" registers */
+#define IGC_EEER       0x0E30 /* Energy Efficient Ethernet "EEE"*/
+#define IGC_IPCNFG     0x0E38 /* Internal PHY Configuration */
+#define IGC_EEE_SU     0x0E34 /* EEE Setup */
+
+/* LTR registers */
+#define IGC_LTRC       0x01A0 /* Latency Tolerance Reporting Control */
+#define IGC_DMACR      0x02508 /* DMA Coalescing Control Register */
+#define IGC_LTRMINV    0x5BB0 /* LTR Minimum Value */
+#define IGC_LTRMAXV    0x5BB4 /* LTR Maximum Value */
+
 /* forward declaration */
 struct igc_hw;
 u32 igc_rd32(struct igc_hw *hw, u32 reg);
index 681d44c..81ac395 100644 (file)
@@ -163,7 +163,6 @@ enum ixgb_state_t {
 void ixgb_check_options(struct ixgb_adapter *adapter);
 void ixgb_set_ethtool_ops(struct net_device *netdev);
 extern char ixgb_driver_name[];
-extern const char ixgb_driver_version[];
 
 void ixgb_set_speed_duplex(struct net_device *netdev);
 
index c65eb1a..582099a 100644 (file)
@@ -458,8 +458,6 @@ ixgb_get_drvinfo(struct net_device *netdev,
 
        strlcpy(drvinfo->driver,  ixgb_driver_name,
                sizeof(drvinfo->driver));
-       strlcpy(drvinfo->version, ixgb_driver_version,
-               sizeof(drvinfo->version));
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info));
 }
index b64e91e..46829cf 100644 (file)
@@ -9,9 +9,6 @@
 char ixgb_driver_name[] = "ixgb";
 static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
 
-#define DRIVERNAPI "-NAPI"
-#define DRV_VERSION "1.0.135-k2" DRIVERNAPI
-const char ixgb_driver_version[] = DRV_VERSION;
 static const char ixgb_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
 
 #define IXGB_CB_LENGTH 256
@@ -103,7 +100,6 @@ static struct pci_driver ixgb_driver = {
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
 MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
 
 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
 static int debug = -1;
@@ -120,7 +116,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 static int __init
 ixgb_init_module(void)
 {
-       pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version);
+       pr_info("%s\n", ixgb_driver_string);
        pr_info("%s\n", ixgb_copyright);
 
        return pci_register_driver(&ixgb_driver);
index 5ddfc83..1e8a809 100644 (file)
@@ -588,11 +588,9 @@ struct ixgbe_adapter {
 #define IXGBE_FLAG_FCOE_ENABLED                        BIT(21)
 #define IXGBE_FLAG_SRIOV_CAPABLE               BIT(22)
 #define IXGBE_FLAG_SRIOV_ENABLED               BIT(23)
-#define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE       BIT(24)
 #define IXGBE_FLAG_RX_HWTSTAMP_ENABLED         BIT(25)
 #define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER     BIT(26)
 #define IXGBE_FLAG_DCB_CAPABLE                 BIT(27)
-#define IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE      BIT(28)
 
        u32 flags2;
 #define IXGBE_FLAG2_RSC_CAPABLE                        BIT(0)
@@ -606,7 +604,6 @@ struct ixgbe_adapter {
 #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP         BIT(9)
 #define IXGBE_FLAG2_PTP_PPS_ENABLED            BIT(10)
 #define IXGBE_FLAG2_PHY_INTERRUPT              BIT(11)
-#define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED       BIT(12)
 #define IXGBE_FLAG2_VLAN_PROMISC               BIT(13)
 #define IXGBE_FLAG2_EEE_CAPABLE                        BIT(14)
 #define IXGBE_FLAG2_EEE_ENABLED                        BIT(15)
@@ -846,7 +843,6 @@ extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops;
 #endif
 
 extern char ixgbe_driver_name[];
-extern const char ixgbe_driver_version[];
 #ifdef IXGBE_FCOE
 extern char ixgbe_default_device_descr[];
 #endif /* IXGBE_FCOE */
index eee277c..95c92fe 100644 (file)
@@ -1098,7 +1098,7 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
                        IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
                /* Setup the last four at 48KB...don't re-init i */
                rxpktsize = IXGBE_RXPBSIZE_48KB;
-               /* Fall Through */
+               fallthrough;
        case PBA_STRATEGY_EQUAL:
        default:
                /* Divide the remaining Rx packet buffer evenly among the TCs */
index 109f8de..8d3798a 100644 (file)
@@ -1568,7 +1568,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
        case 0x0000:
                /* mask VLAN ID */
                fdirm |= IXGBE_FDIRM_VLANID;
-               /* fall through */
+               fallthrough;
        case 0x0FFF:
                /* mask VLAN priority */
                fdirm |= IXGBE_FDIRM_VLANP;
@@ -1576,7 +1576,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
        case 0xE000:
                /* mask VLAN ID only */
                fdirm |= IXGBE_FDIRM_VLANID;
-               /* fall through */
+               fallthrough;
        case 0xEFFF:
                /* no VLAN fields masked */
                break;
@@ -1589,7 +1589,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
        case 0x0000:
                /* Mask Flex Bytes */
                fdirm |= IXGBE_FDIRM_FLEX;
-               /* fall through */
+               fallthrough;
        case 0xFFFF:
                break;
        default:
index 17357a1..62ddb45 100644 (file)
@@ -145,7 +145,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
                if (ret_val)
                        return ret_val;
 
-               /* fall through - only backplane uses autoc */
+               fallthrough; /* only backplane uses autoc */
        case ixgbe_media_type_fiber:
                reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
 
@@ -3533,7 +3533,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw,
                rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
                for (; i < (num_pb / 2); i++)
                        IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
-               /* fall through - configure remaining packet buffers */
+               fallthrough; /* configure remaining packet buffers */
        case (PBA_STRATEGY_EQUAL):
                /* Divide the remaining Rx packet buffer evenly among the TCs */
                rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
index c6bf0a5..6725d89 100644 (file)
@@ -142,32 +142,71 @@ static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
 
 #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
 
-/* currently supported speeds for 10G */
-#define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \
-                        SUPPORTED_10000baseKX4_Full | \
-                        SUPPORTED_10000baseKR_Full)
-
 #define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane)
 
-static u32 ixgbe_get_supported_10gtypes(struct ixgbe_hw *hw)
+static void ixgbe_set_supported_10gtypes(struct ixgbe_hw *hw,
+                                        struct ethtool_link_ksettings *cmd)
+{
+       if (!ixgbe_isbackplane(hw->phy.media_type)) {
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    10000baseT_Full);
+               return;
+       }
+
+       switch (hw->device_id) {
+       case IXGBE_DEV_ID_82598:
+       case IXGBE_DEV_ID_82599_KX4:
+       case IXGBE_DEV_ID_82599_KX4_MEZZ:
+       case IXGBE_DEV_ID_X550EM_X_KX4:
+               ethtool_link_ksettings_add_link_mode
+                       (cmd, supported, 10000baseKX4_Full);
+               break;
+       case IXGBE_DEV_ID_82598_BX:
+       case IXGBE_DEV_ID_82599_KR:
+       case IXGBE_DEV_ID_X550EM_X_KR:
+       case IXGBE_DEV_ID_X550EM_X_XFI:
+               ethtool_link_ksettings_add_link_mode
+                       (cmd, supported, 10000baseKR_Full);
+               break;
+       default:
+               ethtool_link_ksettings_add_link_mode
+                       (cmd, supported, 10000baseKX4_Full);
+               ethtool_link_ksettings_add_link_mode
+                       (cmd, supported, 10000baseKR_Full);
+               break;
+       }
+}
+
+static void ixgbe_set_advertising_10gtypes(struct ixgbe_hw *hw,
+                                          struct ethtool_link_ksettings *cmd)
 {
-       if (!ixgbe_isbackplane(hw->phy.media_type))
-               return SUPPORTED_10000baseT_Full;
+       if (!ixgbe_isbackplane(hw->phy.media_type)) {
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    10000baseT_Full);
+               return;
+       }
 
        switch (hw->device_id) {
        case IXGBE_DEV_ID_82598:
        case IXGBE_DEV_ID_82599_KX4:
        case IXGBE_DEV_ID_82599_KX4_MEZZ:
        case IXGBE_DEV_ID_X550EM_X_KX4:
-               return SUPPORTED_10000baseKX4_Full;
+               ethtool_link_ksettings_add_link_mode
+                       (cmd, advertising, 10000baseKX4_Full);
+               break;
        case IXGBE_DEV_ID_82598_BX:
        case IXGBE_DEV_ID_82599_KR:
        case IXGBE_DEV_ID_X550EM_X_KR:
        case IXGBE_DEV_ID_X550EM_X_XFI:
-               return SUPPORTED_10000baseKR_Full;
+               ethtool_link_ksettings_add_link_mode
+                       (cmd, advertising, 10000baseKR_Full);
+               break;
        default:
-               return SUPPORTED_10000baseKX4_Full |
-                      SUPPORTED_10000baseKR_Full;
+               ethtool_link_ksettings_add_link_mode
+                       (cmd, advertising, 10000baseKX4_Full);
+               ethtool_link_ksettings_add_link_mode
+                       (cmd, advertising, 10000baseKR_Full);
+               break;
        }
 }
 
@@ -178,52 +217,88 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev,
        struct ixgbe_hw *hw = &adapter->hw;
        ixgbe_link_speed supported_link;
        bool autoneg = false;
-       u32 supported, advertising;
 
-       ethtool_convert_link_mode_to_legacy_u32(&supported,
-                                               cmd->link_modes.supported);
+       ethtool_link_ksettings_zero_link_mode(cmd, supported);
+       ethtool_link_ksettings_zero_link_mode(cmd, advertising);
 
        hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
 
        /* set the supported link speeds */
-       if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
-               supported |= ixgbe_get_supported_10gtypes(hw);
-       if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
-               supported |= (ixgbe_isbackplane(hw->phy.media_type)) ?
-                                  SUPPORTED_1000baseKX_Full :
-                                  SUPPORTED_1000baseT_Full;
-       if (supported_link & IXGBE_LINK_SPEED_100_FULL)
-               supported |= SUPPORTED_100baseT_Full;
-       if (supported_link & IXGBE_LINK_SPEED_10_FULL)
-               supported |= SUPPORTED_10baseT_Full;
-
-       /* default advertised speed if phy.autoneg_advertised isn't set */
-       advertising = supported;
+       if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) {
+               ixgbe_set_supported_10gtypes(hw, cmd);
+               ixgbe_set_advertising_10gtypes(hw, cmd);
+       }
+       if (supported_link & IXGBE_LINK_SPEED_5GB_FULL)
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    5000baseT_Full);
+
+       if (supported_link & IXGBE_LINK_SPEED_2_5GB_FULL)
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    2500baseT_Full);
+
+       if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) {
+               if (ixgbe_isbackplane(hw->phy.media_type)) {
+                       ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                            1000baseKX_Full);
+                       ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                            1000baseKX_Full);
+               } else {
+                       ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                            1000baseT_Full);
+                       ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                            1000baseT_Full);
+               }
+       }
+       if (supported_link & IXGBE_LINK_SPEED_100_FULL) {
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    100baseT_Full);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    100baseT_Full);
+       }
+       if (supported_link & IXGBE_LINK_SPEED_10_FULL) {
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    10baseT_Full);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    10baseT_Full);
+       }
+
        /* set the advertised speeds */
        if (hw->phy.autoneg_advertised) {
-               advertising = 0;
+               ethtool_link_ksettings_zero_link_mode(cmd, advertising);
                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
-                       advertising |= ADVERTISED_10baseT_Full;
+                       ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                            10baseT_Full);
                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
-                       advertising |= ADVERTISED_100baseT_Full;
+                       ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                            100baseT_Full);
                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
-                       advertising |= supported & ADVRTSD_MSK_10G;
+                       ixgbe_set_advertising_10gtypes(hw, cmd);
                if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
-                       if (supported & SUPPORTED_1000baseKX_Full)
-                               advertising |= ADVERTISED_1000baseKX_Full;
+                       if (ethtool_link_ksettings_test_link_mode
+                               (cmd, supported, 1000baseKX_Full))
+                               ethtool_link_ksettings_add_link_mode
+                                       (cmd, advertising, 1000baseKX_Full);
                        else
-                               advertising |= ADVERTISED_1000baseT_Full;
+                               ethtool_link_ksettings_add_link_mode
+                                       (cmd, advertising, 1000baseT_Full);
                }
+               if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL)
+                       ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                            5000baseT_Full);
+               if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
+                       ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                            2500baseT_Full);
        } else {
                if (hw->phy.multispeed_fiber && !autoneg) {
                        if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
-                               advertising = ADVERTISED_10000baseT_Full;
+                               ethtool_link_ksettings_add_link_mode
+                                       (cmd, advertising, 10000baseT_Full);
                }
        }
 
        if (autoneg) {
-               supported |= SUPPORTED_Autoneg;
-               advertising |= ADVERTISED_Autoneg;
+               ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
                cmd->base.autoneg = AUTONEG_ENABLE;
        } else
                cmd->base.autoneg = AUTONEG_DISABLE;
@@ -235,13 +310,13 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev,
        case ixgbe_phy_x550em_ext_t:
        case ixgbe_phy_fw:
        case ixgbe_phy_cu_unknown:
-               supported |= SUPPORTED_TP;
-               advertising |= ADVERTISED_TP;
+               ethtool_link_ksettings_add_link_mode(cmd, supported, TP);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising, TP);
                cmd->base.port = PORT_TP;
                break;
        case ixgbe_phy_qt:
-               supported |= SUPPORTED_FIBRE;
-               advertising |= ADVERTISED_FIBRE;
+               ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE);
                cmd->base.port = PORT_FIBRE;
                break;
        case ixgbe_phy_nl:
@@ -260,8 +335,10 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev,
                case ixgbe_sfp_type_da_cu:
                case ixgbe_sfp_type_da_cu_core0:
                case ixgbe_sfp_type_da_cu_core1:
-                       supported |= SUPPORTED_FIBRE;
-                       advertising |= ADVERTISED_FIBRE;
+                       ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                            FIBRE);
+                       ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                            FIBRE);
                        cmd->base.port = PORT_DA;
                        break;
                case ixgbe_sfp_type_sr:
@@ -272,61 +349,76 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev,
                case ixgbe_sfp_type_1g_sx_core1:
                case ixgbe_sfp_type_1g_lx_core0:
                case ixgbe_sfp_type_1g_lx_core1:
-                       supported |= SUPPORTED_FIBRE;
-                       advertising |= ADVERTISED_FIBRE;
+                       ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                            FIBRE);
+                       ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                            FIBRE);
                        cmd->base.port = PORT_FIBRE;
                        break;
                case ixgbe_sfp_type_not_present:
-                       supported |= SUPPORTED_FIBRE;
-                       advertising |= ADVERTISED_FIBRE;
+                       ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                            FIBRE);
+                       ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                            FIBRE);
                        cmd->base.port = PORT_NONE;
                        break;
                case ixgbe_sfp_type_1g_cu_core0:
                case ixgbe_sfp_type_1g_cu_core1:
-                       supported |= SUPPORTED_TP;
-                       advertising |= ADVERTISED_TP;
+                       ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                            TP);
+                       ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                            TP);
                        cmd->base.port = PORT_TP;
                        break;
                case ixgbe_sfp_type_unknown:
                default:
-                       supported |= SUPPORTED_FIBRE;
-                       advertising |= ADVERTISED_FIBRE;
+                       ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                            FIBRE);
+                       ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                            FIBRE);
                        cmd->base.port = PORT_OTHER;
                        break;
                }
                break;
        case ixgbe_phy_xaui:
-               supported |= SUPPORTED_FIBRE;
-               advertising |= ADVERTISED_FIBRE;
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    FIBRE);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    FIBRE);
                cmd->base.port = PORT_NONE;
                break;
        case ixgbe_phy_unknown:
        case ixgbe_phy_generic:
        case ixgbe_phy_sfp_unsupported:
        default:
-               supported |= SUPPORTED_FIBRE;
-               advertising |= ADVERTISED_FIBRE;
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    FIBRE);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    FIBRE);
                cmd->base.port = PORT_OTHER;
                break;
        }
 
        /* Indicate pause support */
-       supported |= SUPPORTED_Pause;
+       ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
 
        switch (hw->fc.requested_mode) {
        case ixgbe_fc_full:
-               advertising |= ADVERTISED_Pause;
+               ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
                break;
        case ixgbe_fc_rx_pause:
-               advertising |= ADVERTISED_Pause |
-                                    ADVERTISED_Asym_Pause;
+               ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    Asym_Pause);
                break;
        case ixgbe_fc_tx_pause:
-               advertising |= ADVERTISED_Asym_Pause;
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    Asym_Pause);
                break;
        default:
-               advertising &= ~(ADVERTISED_Pause |
-                                      ADVERTISED_Asym_Pause);
+               ethtool_link_ksettings_del_link_mode(cmd, advertising, Pause);
+               ethtool_link_ksettings_del_link_mode(cmd, advertising,
+                                                    Asym_Pause);
        }
 
        if (netif_carrier_ok(netdev)) {
@@ -358,11 +450,6 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev,
                cmd->base.duplex = DUPLEX_UNKNOWN;
        }
 
-       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
-                                               supported);
-       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
-                                               advertising);
-
        return 0;
 }
 
@@ -373,12 +460,6 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev,
        struct ixgbe_hw *hw = &adapter->hw;
        u32 advertised, old;
        s32 err = 0;
-       u32 supported, advertising;
-
-       ethtool_convert_link_mode_to_legacy_u32(&supported,
-                                               cmd->link_modes.supported);
-       ethtool_convert_link_mode_to_legacy_u32(&advertising,
-                                               cmd->link_modes.advertising);
 
        if ((hw->phy.media_type == ixgbe_media_type_copper) ||
            (hw->phy.multispeed_fiber)) {
@@ -386,29 +467,41 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev,
                 * this function does not support duplex forcing, but can
                 * limit the advertising of the adapter to the specified speed
                 */
-               if (advertising & ~supported)
+               if (!bitmap_subset(cmd->link_modes.advertising,
+                                  cmd->link_modes.supported,
+                                  __ETHTOOL_LINK_MODE_MASK_NBITS))
                        return -EINVAL;
 
                /* only allow one speed at a time if no autoneg */
                if (!cmd->base.autoneg && hw->phy.multispeed_fiber) {
-                       if (advertising ==
-                           (ADVERTISED_10000baseT_Full |
-                            ADVERTISED_1000baseT_Full))
+                       if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+                                                                 10000baseT_Full) &&
+                           ethtool_link_ksettings_test_link_mode(cmd, advertising,
+                                                                 1000baseT_Full))
                                return -EINVAL;
                }
 
                old = hw->phy.autoneg_advertised;
                advertised = 0;
-               if (advertising & ADVERTISED_10000baseT_Full)
+               if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+                                                         10000baseT_Full))
                        advertised |= IXGBE_LINK_SPEED_10GB_FULL;
-
-               if (advertising & ADVERTISED_1000baseT_Full)
+               if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+                                                         5000baseT_Full))
+                       advertised |= IXGBE_LINK_SPEED_5GB_FULL;
+               if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+                                                         2500baseT_Full))
+                       advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
+               if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+                                                         1000baseT_Full))
                        advertised |= IXGBE_LINK_SPEED_1GB_FULL;
 
-               if (advertising & ADVERTISED_100baseT_Full)
+               if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+                                                         100baseT_Full))
                        advertised |= IXGBE_LINK_SPEED_100_FULL;
 
-               if (advertising & ADVERTISED_10baseT_Full)
+               if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+                                                         10baseT_Full))
                        advertised |= IXGBE_LINK_SPEED_10_FULL;
 
                if (old == advertised)
@@ -429,7 +522,8 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev,
                u32 speed = cmd->base.speed;
 
                if ((cmd->base.autoneg == AUTONEG_ENABLE) ||
-                   (advertising != ADVERTISED_10000baseT_Full) ||
+                   (!ethtool_link_ksettings_test_link_mode(cmd, advertising,
+                                                           10000baseT_Full)) ||
                    (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL))
                        return -EINVAL;
        }
@@ -1004,8 +1098,6 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
        strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
-       strlcpy(drvinfo->version, ixgbe_driver_version,
-               sizeof(drvinfo->version));
 
        strlcpy(drvinfo->fw_version, adapter->eeprom_id,
                sizeof(drvinfo->fw_version));
@@ -2086,7 +2178,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
                                        eth_test->flags |= ETH_TEST_FL_FAILED;
                                        clear_bit(__IXGBE_TESTING,
                                                  &adapter->state);
-                                       goto skip_ol_tests;
+                                       return;
                                }
                        }
                }
@@ -2158,9 +2250,6 @@ skip_loopback:
 
                clear_bit(__IXGBE_TESTING, &adapter->state);
        }
-
-skip_ol_tests:
-       msleep_interruptible(4 * 1000);
 }
 
 static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
@@ -2509,11 +2598,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
        switch (cmd->flow_type) {
        case TCP_V4_FLOW:
                cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-               /* fallthrough */
+               fallthrough;
        case UDP_V4_FLOW:
                if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-               /* fallthrough */
+               fallthrough;
        case SCTP_V4_FLOW:
        case AH_ESP_V4_FLOW:
        case AH_V4_FLOW:
@@ -2523,11 +2612,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
                break;
        case TCP_V6_FLOW:
                cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-               /* fallthrough */
+               fallthrough;
        case UDP_V6_FLOW:
                if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
                        cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
-               /* fallthrough */
+               fallthrough;
        case SCTP_V6_FLOW:
        case AH_ESP_V6_FLOW:
        case AH_V6_FLOW:
@@ -2659,7 +2748,7 @@ static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
                                *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
                                break;
                        }
-                       /* fall through */
+                       fallthrough;
                default:
                        return 0;
                }
index ec7a11d..e67b1a5 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/if_ether.h>
 #include <linux/gfp.h>
 #include <linux/if_vlan.h>
+#include <generated/utsrelease.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
 #include <scsi/fc/fc_fs.h>
@@ -443,7 +444,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
                ddp->err = (__force u32)ddp_err;
                ddp->sgl = NULL;
                ddp->sgc = 0;
-               /* fall through */
+               fallthrough;
        /* if DDP length is present pass it through to ULD */
        case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
                /* update length of DDPed data */
@@ -1001,7 +1002,7 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
                 sizeof(info->driver_version),
                 "%s v%s",
                 ixgbe_driver_name,
-                ixgbe_driver_version);
+                UTS_RELEASE);
        /* Firmware Version */
        strlcpy(info->firmware_version, adapter->eeprom_id,
                sizeof(info->firmware_version));
index 113f608..6516980 100644 (file)
@@ -427,7 +427,7 @@ static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
 static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
                                        u32 *mykey, u32 *mysalt)
 {
-       struct net_device *dev = xs->xso.dev;
+       struct net_device *dev = xs->xso.real_dev;
        unsigned char *key_data;
        char *alg_name = NULL;
        int key_len;
@@ -477,7 +477,7 @@ static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
  **/
 static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
 {
-       struct net_device *dev = xs->xso.dev;
+       struct net_device *dev = xs->xso.real_dev;
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        struct ixgbe_hw *hw = &adapter->hw;
        u32 mfval, manc, reg;
@@ -560,7 +560,7 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
  **/
 static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
 {
-       struct net_device *dev = xs->xso.dev;
+       struct net_device *dev = xs->xso.real_dev;
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        struct ixgbe_ipsec *ipsec = adapter->ipsec;
        struct ixgbe_hw *hw = &adapter->hw;
@@ -745,7 +745,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
  **/
 static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
 {
-       struct net_device *dev = xs->xso.dev;
+       struct net_device *dev = xs->xso.real_dev;
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        struct ixgbe_ipsec *ipsec = adapter->ipsec;
        struct ixgbe_hw *hw = &adapter->hw;
index fd9f5d4..2e35c57 100644 (file)
@@ -921,7 +921,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
                ring->queue_index = txr_idx;
 
                /* assign ring to adapter */
-               adapter->tx_ring[txr_idx] = ring;
+               WRITE_ONCE(adapter->tx_ring[txr_idx], ring);
 
                /* update count and index */
                txr_count--;
@@ -948,7 +948,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
                set_ring_xdp(ring);
 
                /* assign ring to adapter */
-               adapter->xdp_ring[xdp_idx] = ring;
+               WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring);
 
                /* update count and index */
                xdp_count--;
@@ -991,7 +991,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
                ring->queue_index = rxr_idx;
 
                /* assign ring to adapter */
-               adapter->rx_ring[rxr_idx] = ring;
+               WRITE_ONCE(adapter->rx_ring[rxr_idx], ring);
 
                /* update count and index */
                rxr_count--;
@@ -1020,13 +1020,13 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
 
        ixgbe_for_each_ring(ring, q_vector->tx) {
                if (ring_is_xdp(ring))
-                       adapter->xdp_ring[ring->queue_index] = NULL;
+                       WRITE_ONCE(adapter->xdp_ring[ring->queue_index], NULL);
                else
-                       adapter->tx_ring[ring->queue_index] = NULL;
+                       WRITE_ONCE(adapter->tx_ring[ring->queue_index], NULL);
        }
 
        ixgbe_for_each_ring(ring, q_vector->rx)
-               adapter->rx_ring[ring->queue_index] = NULL;
+               WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL);
 
        adapter->q_vector[v_idx] = NULL;
        napi_hash_del(&q_vector->napi);
index f162b8b..4d898ff 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/bpf_trace.h>
 #include <linux/atomic.h>
 #include <linux/numa.h>
+#include <generated/utsrelease.h>
 #include <scsi/fc/fc_fcoe.h>
 #include <net/udp_tunnel.h>
 #include <net/pkt_cls.h>
@@ -56,8 +57,6 @@ char ixgbe_default_device_descr[] =
 static char ixgbe_default_device_descr[] =
                              "Intel(R) 10 Gigabit Network Connection";
 #endif
-#define DRV_VERSION "5.1.0-k"
-const char ixgbe_driver_version[] = DRV_VERSION;
 static const char ixgbe_copyright[] =
                                "Copyright (c) 1999-2016 Intel Corporation.";
 
@@ -165,7 +164,6 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
 MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
 
 static struct workqueue_struct *ixgbe_wq;
 
@@ -1397,7 +1395,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
                                        IXGBE_DCA_CTRL_DCA_MODE_CB2);
                        break;
                }
-               /* fall through - DCA is disabled. */
+               fallthrough; /* DCA is disabled. */
        case DCA_PROVIDER_REMOVE:
                if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
                        dca_remove_requester(dev);
@@ -2231,10 +2229,10 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
-               /* fallthrough */
+               fallthrough;
        case XDP_ABORTED:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
-               /* fallthrough -- handle aborts by dropping packet */
+               fallthrough; /* handle aborts by dropping packet */
        case XDP_DROP:
                result = IXGBE_XDP_CONSUMED;
                break;
@@ -3009,7 +3007,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
        case ixgbe_mac_82599EB:
                mask |= IXGBE_EIMS_GPI_SDP1(hw);
                mask |= IXGBE_EIMS_GPI_SDP2(hw);
-               /* fall through */
+               fallthrough;
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
@@ -3315,7 +3313,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
        switch (hw->mac.type) {
        case ixgbe_mac_82599EB:
                ixgbe_check_sfp_event(adapter, eicr);
-               /* Fall through */
+               fallthrough;
        case ixgbe_mac_X540:
        case ixgbe_mac_X550:
        case ixgbe_mac_X550EM_x:
@@ -4337,7 +4335,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
        case ixgbe_mac_x550em_a:
                if (adapter->num_vfs)
                        rdrxctl |= IXGBE_RDRXCTL_PSP;
-               /* fall through */
+               fallthrough;
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X540:
                /* Disable RSC for ACK packets */
@@ -4996,24 +4994,41 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
                napi_disable(&adapter->q_vector[q_idx]->napi);
 }
 
-static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
+static int ixgbe_udp_tunnel_sync(struct net_device *dev, unsigned int table)
 {
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
        struct ixgbe_hw *hw = &adapter->hw;
-       u32 vxlanctrl;
+       struct udp_tunnel_info ti;
 
-       if (!(adapter->flags & (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE |
-                               IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
-               return;
+       udp_tunnel_nic_get_port(dev, table, 0, &ti);
+       if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
+               adapter->vxlan_port = ti.port;
+       else
+               adapter->geneve_port = ti.port;
 
-       vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask;
-       IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
+       IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL,
+                       ntohs(adapter->vxlan_port) |
+                       ntohs(adapter->geneve_port) <<
+                               IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT);
+       return 0;
+}
 
-       if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
-               adapter->vxlan_port = 0;
+static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550 = {
+       .sync_table     = ixgbe_udp_tunnel_sync,
+       .flags          = UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
+       .tables         = {
+               { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
+       },
+};
 
-       if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK)
-               adapter->geneve_port = 0;
-}
+static const struct udp_tunnel_nic_info ixgbe_udp_tunnels_x550em_a = {
+       .sync_table     = ixgbe_udp_tunnel_sync,
+       .flags          = UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
+       .tables         = {
+               { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
+               { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+       },
+};
 
 #ifdef CONFIG_IXGBE_DCB
 /**
@@ -5503,9 +5518,13 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
                return ret;
 
        speed = hw->phy.autoneg_advertised;
-       if ((!speed) && (hw->mac.ops.get_link_capabilities))
+       if (!speed && hw->mac.ops.get_link_capabilities) {
                ret = hw->mac.ops.get_link_capabilities(hw, &speed,
                                                        &autoneg);
+               speed &= ~(IXGBE_LINK_SPEED_5GB_FULL |
+                          IXGBE_LINK_SPEED_2_5GB_FULL);
+       }
+
        if (ret)
                return ret;
 
@@ -5887,7 +5906,7 @@ dma_engine_disable:
                IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
                                (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
                                 ~IXGBE_DMATXCTL_TE));
-               /* fall through */
+               fallthrough;
        default:
                break;
        }
@@ -6330,7 +6349,6 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
                        adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
                break;
        case ixgbe_mac_x550em_a:
-               adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE;
                switch (hw->device_id) {
                case IXGBE_DEV_ID_X550EM_A_1G_T:
                case IXGBE_DEV_ID_X550EM_A_1G_T_L:
@@ -6339,7 +6357,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
                default:
                        break;
                }
-       /* fall through */
+               fallthrough;
        case ixgbe_mac_X550EM_x:
 #ifdef CONFIG_IXGBE_DCB
                adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
@@ -6350,14 +6368,13 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
                adapter->fcoe.up = 0;
 #endif /* IXGBE_DCB */
 #endif /* IXGBE_FCOE */
-       /* Fall Through */
+               fallthrough;
        case ixgbe_mac_X550:
                if (hw->mac.type == ixgbe_mac_X550)
                        adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
 #ifdef CONFIG_IXGBE_DCA
                adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
 #endif
-               adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
                break;
        default:
                break;
@@ -6796,8 +6813,7 @@ int ixgbe_open(struct net_device *netdev)
 
        ixgbe_up_complete(adapter);
 
-       ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK);
-       udp_tunnel_get_rx_info(netdev);
+       udp_tunnel_nic_reset_ntf(netdev);
 
        return 0;
 
@@ -7051,7 +7067,10 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
        }
 
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
+               struct ixgbe_ring *rx_ring = READ_ONCE(adapter->rx_ring[i]);
+
+               if (!rx_ring)
+                       continue;
                non_eop_descs += rx_ring->rx_stats.non_eop_descs;
                alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
                alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
@@ -7072,15 +7091,20 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
        packets = 0;
        /* gather some stats to the adapter struct that are per queue */
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+               struct ixgbe_ring *tx_ring = READ_ONCE(adapter->tx_ring[i]);
+
+               if (!tx_ring)
+                       continue;
                restart_queue += tx_ring->tx_stats.restart_queue;
                tx_busy += tx_ring->tx_stats.tx_busy;
                bytes += tx_ring->stats.bytes;
                packets += tx_ring->stats.packets;
        }
        for (i = 0; i < adapter->num_xdp_queues; i++) {
-               struct ixgbe_ring *xdp_ring = adapter->xdp_ring[i];
+               struct ixgbe_ring *xdp_ring = READ_ONCE(adapter->xdp_ring[i]);
 
+               if (!xdp_ring)
+                       continue;
                restart_queue += xdp_ring->tx_stats.restart_queue;
                tx_busy += xdp_ring->tx_stats.tx_busy;
                bytes += xdp_ring->stats.bytes;
@@ -7162,7 +7186,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
                hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
                hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
                hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
-               /* fall through */
+               fallthrough;
        case ixgbe_mac_82599EB:
                for (i = 0; i < 16; i++)
                        adapter->hw_rx_no_dma_resources +=
@@ -7911,12 +7935,6 @@ static void ixgbe_service_task(struct work_struct *work)
                ixgbe_service_event_complete(adapter);
                return;
        }
-       if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) {
-               rtnl_lock();
-               adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
-               udp_tunnel_get_rx_info(adapter->netdev);
-               rtnl_unlock();
-       }
        ixgbe_reset_subtask(adapter);
        ixgbe_phy_interrupt_subtask(adapter);
        ixgbe_sfp_detection_subtask(adapter);
@@ -8071,7 +8089,7 @@ csum_failed:
        switch (skb->csum_offset) {
        case offsetof(struct tcphdr, check):
                type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
-               /* fall through */
+               fallthrough;
        case offsetof(struct udphdr, check):
                break;
        case offsetof(struct sctphdr, checksum):
@@ -8083,7 +8101,7 @@ csum_failed:
                        type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
                        break;
                }
-               /* fall through */
+               fallthrough;
        default:
                skb_checksum_help(skb);
                goto csum_failed;
@@ -8526,7 +8544,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
 
                if (!sb_dev && (adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
                        break;
-               /* fall through */
+               fallthrough;
        default:
                return netdev_pick_tx(dev, skb, sb_dev);
        }
@@ -8860,7 +8878,7 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
        case SIOCGMIIPHY:
                if (!adapter->hw.phy.ops.read_reg)
                        return -EOPNOTSUPP;
-               /* fall through */
+               fallthrough;
        default:
                return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
        }
@@ -9774,26 +9792,6 @@ static int ixgbe_set_features(struct net_device *netdev,
 
        netdev->features = features;
 
-       if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
-               if (features & NETIF_F_RXCSUM) {
-                       adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
-               } else {
-                       u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
-
-                       ixgbe_clear_udp_tunnel_port(adapter, port_mask);
-               }
-       }
-
-       if ((adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) {
-               if (features & NETIF_F_RXCSUM) {
-                       adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
-               } else {
-                       u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
-
-                       ixgbe_clear_udp_tunnel_port(adapter, port_mask);
-               }
-       }
-
        if ((changed & NETIF_F_HW_L2FW_DOFFLOAD) && adapter->num_rx_pools > 1)
                ixgbe_reset_l2fw_offload(adapter);
        else if (need_reset)
@@ -9805,118 +9803,6 @@ static int ixgbe_set_features(struct net_device *netdev,
        return 1;
 }
 
-/**
- * ixgbe_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports
- * @dev: The port's netdev
- * @ti: Tunnel endpoint information
- **/
-static void ixgbe_add_udp_tunnel_port(struct net_device *dev,
-                                     struct udp_tunnel_info *ti)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(dev);
-       struct ixgbe_hw *hw = &adapter->hw;
-       __be16 port = ti->port;
-       u32 port_shift = 0;
-       u32 reg;
-
-       if (ti->sa_family != AF_INET)
-               return;
-
-       switch (ti->type) {
-       case UDP_TUNNEL_TYPE_VXLAN:
-               if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
-                       return;
-
-               if (adapter->vxlan_port == port)
-                       return;
-
-               if (adapter->vxlan_port) {
-                       netdev_info(dev,
-                                   "VXLAN port %d set, not adding port %d\n",
-                                   ntohs(adapter->vxlan_port),
-                                   ntohs(port));
-                       return;
-               }
-
-               adapter->vxlan_port = port;
-               break;
-       case UDP_TUNNEL_TYPE_GENEVE:
-               if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
-                       return;
-
-               if (adapter->geneve_port == port)
-                       return;
-
-               if (adapter->geneve_port) {
-                       netdev_info(dev,
-                                   "GENEVE port %d set, not adding port %d\n",
-                                   ntohs(adapter->geneve_port),
-                                   ntohs(port));
-                       return;
-               }
-
-               port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT;
-               adapter->geneve_port = port;
-               break;
-       default:
-               return;
-       }
-
-       reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift;
-       IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg);
-}
-
-/**
- * ixgbe_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports
- * @dev: The port's netdev
- * @ti: Tunnel endpoint information
- **/
-static void ixgbe_del_udp_tunnel_port(struct net_device *dev,
-                                     struct udp_tunnel_info *ti)
-{
-       struct ixgbe_adapter *adapter = netdev_priv(dev);
-       u32 port_mask;
-
-       if (ti->type != UDP_TUNNEL_TYPE_VXLAN &&
-           ti->type != UDP_TUNNEL_TYPE_GENEVE)
-               return;
-
-       if (ti->sa_family != AF_INET)
-               return;
-
-       switch (ti->type) {
-       case UDP_TUNNEL_TYPE_VXLAN:
-               if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
-                       return;
-
-               if (adapter->vxlan_port != ti->port) {
-                       netdev_info(dev, "VXLAN port %d not found\n",
-                                   ntohs(ti->port));
-                       return;
-               }
-
-               port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK;
-               break;
-       case UDP_TUNNEL_TYPE_GENEVE:
-               if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))
-                       return;
-
-               if (adapter->geneve_port != ti->port) {
-                       netdev_info(dev, "GENEVE port %d not found\n",
-                                   ntohs(ti->port));
-                       return;
-               }
-
-               port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK;
-               break;
-       default:
-               return;
-       }
-
-       ixgbe_clear_udp_tunnel_port(adapter, port_mask);
-       adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED;
-}
-
 static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                             struct net_device *dev,
                             const unsigned char *addr, u16 vid,
@@ -10406,8 +10292,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_bridge_getlink     = ixgbe_ndo_bridge_getlink,
        .ndo_dfwd_add_station   = ixgbe_fwd_add,
        .ndo_dfwd_del_station   = ixgbe_fwd_del,
-       .ndo_udp_tunnel_add     = ixgbe_add_udp_tunnel_port,
-       .ndo_udp_tunnel_del     = ixgbe_del_udp_tunnel_port,
+       .ndo_udp_tunnel_add     = udp_tunnel_nic_add_port,
+       .ndo_udp_tunnel_del     = udp_tunnel_nic_del_port,
        .ndo_features_check     = ixgbe_features_check,
        .ndo_bpf                = ixgbe_xdp,
        .ndo_xdp_xmit           = ixgbe_xdp_xmit,
@@ -10650,7 +10536,7 @@ bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
                        /* only support first port */
                        if (hw->bus.func != 0)
                                break;
-                       /* fall through */
+                       fallthrough;
                case IXGBE_SUBDEV_ID_82599_SP_560FLR:
                case IXGBE_SUBDEV_ID_82599_SFP:
                case IXGBE_SUBDEV_ID_82599_RNDC:
@@ -10852,6 +10738,18 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                goto err_sw_init;
 
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_X550:
+       case ixgbe_mac_X550EM_x:
+               netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550;
+               break;
+       case ixgbe_mac_x550em_a:
+               netdev->udp_tunnel_nic_info = &ixgbe_udp_tunnels_x550em_a;
+               break;
+       default:
+               break;
+       }
+
        /* Make sure the SWFW semaphore is in a valid state */
        if (hw->mac.ops.init_swfw_sync)
                hw->mac.ops.init_swfw_sync(hw);
@@ -11146,8 +11044,8 @@ skip_sriov:
         */
        if (hw->mac.ops.set_fw_drv_ver)
                hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF,
-                                          sizeof(ixgbe_driver_version) - 1,
-                                          ixgbe_driver_version);
+                                          sizeof(UTS_RELEASE) - 1,
+                                          UTS_RELEASE);
 
        /* add san mac addr to netdev */
        ixgbe_add_sanmac_netdev(netdev);
@@ -11167,10 +11065,14 @@ skip_sriov:
                        IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
                        true);
 
-       ixgbe_mii_bus_init(hw);
+       err = ixgbe_mii_bus_init(hw);
+       if (err)
+               goto err_netdev;
 
        return 0;
 
+err_netdev:
+       unregister_netdev(netdev);
 err_register:
        ixgbe_release_hw_control(adapter);
        ixgbe_clear_interrupt_scheme(adapter);
@@ -11504,7 +11406,7 @@ static struct pci_driver ixgbe_driver = {
 static int __init ixgbe_init_module(void)
 {
        int ret;
-       pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
+       pr_info("%s\n", ixgbe_driver_string);
        pr_info("%s\n", ixgbe_copyright);
 
        ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name);
index 2fb9796..7980d72 100644 (file)
@@ -905,7 +905,6 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
        struct pci_dev *pdev = adapter->pdev;
        struct device *dev = &adapter->netdev->dev;
        struct mii_bus *bus;
-       int err = -ENODEV;
 
        bus = devm_mdiobus_alloc(dev);
        if (!bus)
@@ -923,7 +922,7 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
        case IXGBE_DEV_ID_X550EM_A_1G_T:
        case IXGBE_DEV_ID_X550EM_A_1G_T_L:
                if (!ixgbe_x550em_a_has_mii(hw))
-                       goto ixgbe_no_mii_bus;
+                       return -ENODEV;
                bus->read = &ixgbe_x550em_a_mii_bus_read;
                bus->write = &ixgbe_x550em_a_mii_bus_write;
                break;
@@ -948,15 +947,8 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
         */
        hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22;
 
-       err = mdiobus_register(bus);
-       if (!err) {
-               adapter->mii_bus = bus;
-               return 0;
-       }
-
-ixgbe_no_mii_bus:
-       devm_mdiobus_free(dev, bus);
-       return err;
+       adapter->mii_bus = bus;
+       return mdiobus_register(bus);
 }
 
 /**
index 0be13a9..22a874e 100644 (file)
@@ -1051,7 +1051,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
                        adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
                        break;
                }
-               /* fall through */
+               fallthrough;
        default:
                /*
                 * register RXMTRL must be set in order to do V1 packets,
@@ -1242,7 +1242,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
                        cc.mult = 3;
                        cc.shift = 2;
                }
-               /* fallthrough */
+               fallthrough;
        case ixgbe_mac_x550em_a:
        case ixgbe_mac_X550:
                cc.read = ixgbe_ptp_read_X550;
index d05a569..23a9265 100644 (file)
@@ -503,7 +503,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
                         */
                        if (pf_max_frame > ETH_FRAME_LEN)
                                break;
-                       /* fall through */
+                       fallthrough;
                default:
                        /* If the PF or VF are running w/ jumbo frames enabled
                         * we need to shut down the VF Rx path as we cannot
@@ -1141,7 +1141,7 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter,
                /* promisc introduced in 1.3 version */
                if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
                        return -EOPNOTSUPP;
-               /* Fall through */
+               fallthrough;
        case ixgbe_mbox_api_13:
        case ixgbe_mbox_api_14:
                break;
index 9c42f74..5e339af 100644 (file)
@@ -306,7 +306,7 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
                hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
                ixgbe_setup_mux_ctl(hw);
                ixgbe_check_cs4227(hw);
-               /* Fallthrough */
+               fallthrough;
        case IXGBE_DEV_ID_X550EM_A_SFP_N:
                return ixgbe_identify_module_generic(hw);
        case IXGBE_DEV_ID_X550EM_X_KX4:
@@ -325,7 +325,7 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
                        hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
                else
                        hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
-               /* Fallthrough */
+               fallthrough;
        case IXGBE_DEV_ID_X550EM_X_10G_T:
                return ixgbe_identify_phy_generic(hw);
        case IXGBE_DEV_ID_X550EM_X_1G_T:
@@ -2303,7 +2303,7 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
                                        break;
                                }
                        }
-                       /* fall through */
+                       fallthrough;
                default:
                        *speed = IXGBE_LINK_SPEED_10GB_FULL |
                                 IXGBE_LINK_SPEED_1GB_FULL;
@@ -2885,7 +2885,7 @@ static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
                 * through to the fc_full statement.  Later, we will
                 * disable the adapter's ability to send PAUSE frames.
                 */
-               /* Fallthrough */
+               fallthrough;
        case ixgbe_fc_full:
                pause = true;
                asm_dir = true;
@@ -3284,7 +3284,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
        case IXGBE_DEV_ID_X550EM_A_SGMII:
        case IXGBE_DEV_ID_X550EM_A_SGMII_L:
                hw->phy.type = ixgbe_phy_sgmii;
-               /* Fallthrough */
+               fallthrough;
        case IXGBE_DEV_ID_X550EM_X_KR:
        case IXGBE_DEV_ID_X550EM_X_KX4:
        case IXGBE_DEV_ID_X550EM_X_XFI:
index be9d2a8..ec7121f 100644 (file)
@@ -120,10 +120,10 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
-               /* fallthrough */
+               fallthrough;
        case XDP_ABORTED:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
-               /* fallthrough -- handle aborts by dropping packet */
+               fallthrough; /* handle aborts by dropping packet */
        case XDP_DROP:
                result = IXGBE_XDP_CONSUMED;
                break;
index 988fa49..e49fb1c 100644 (file)
@@ -218,8 +218,6 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev,
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
 
        strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
-       strlcpy(drvinfo->version, ixgbevf_driver_version,
-               sizeof(drvinfo->version));
        strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
                sizeof(drvinfo->bus_info));
 
index ecab686..a0e3257 100644 (file)
@@ -440,7 +440,6 @@ extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops;
 
 /* needed by ethtool.c */
 extern const char ixgbevf_driver_name[];
-extern const char ixgbevf_driver_version[];
 
 int ixgbevf_open(struct net_device *netdev);
 int ixgbevf_close(struct net_device *netdev);
index a39e2cb..6e9a397 100644 (file)
@@ -38,8 +38,6 @@ const char ixgbevf_driver_name[] = "ixgbevf";
 static const char ixgbevf_driver_string[] =
        "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
 
-#define DRV_VERSION "4.1.0-k"
-const char ixgbevf_driver_version[] = DRV_VERSION;
 static char ixgbevf_copyright[] =
        "Copyright (c) 2009 - 2018 Intel Corporation.";
 
@@ -81,7 +79,6 @@ MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
 MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
 MODULE_LICENSE("GPL v2");
-MODULE_VERSION(DRV_VERSION);
 
 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
 static int debug = -1;
@@ -1082,10 +1079,10 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
-               /* fallthrough */
+               fallthrough;
        case XDP_ABORTED:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
-               /* fallthrough -- handle aborts by dropping packet */
+               fallthrough; /* handle aborts by dropping packet */
        case XDP_DROP:
                result = IXGBEVF_XDP_CONSUMED;
                break;
@@ -2605,7 +2602,7 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
  * important, starting with the "most" number of features turned on at once,
  * and ending with the smallest set of features.  This way large combinations
  * can be allocated if they're turned on, and smaller combinations are the
- * fallthrough conditions.
+ * fall through conditions.
  *
  **/
 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
@@ -3877,7 +3874,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
        switch (skb->csum_offset) {
        case offsetof(struct tcphdr, check):
                type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
-               /* fall through */
+               fallthrough;
        case offsetof(struct udphdr, check):
                break;
        case offsetof(struct sctphdr, checksum):
@@ -3889,7 +3886,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
                        type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
                        break;
                }
-               /* fall through */
+               fallthrough;
        default:
                skb_checksum_help(skb);
                goto no_csum;
@@ -4913,9 +4910,7 @@ static struct pci_driver ixgbevf_driver = {
  **/
 static int __init ixgbevf_init_module(void)
 {
-       pr_info("%s - version %s\n", ixgbevf_driver_string,
-               ixgbevf_driver_version);
-
+       pr_info("%s\n", ixgbevf_driver_string);
        pr_info("%s\n", ixgbevf_copyright);
        ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name);
        if (!ixgbevf_wq) {
index d5ce496..bfe6dfc 100644 (file)
@@ -314,7 +314,7 @@ int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
        case ixgbe_mbox_api_12:
                if (hw->mac.type < ixgbe_mac_X550_vf)
                        break;
-               /* fall through */
+               fallthrough;
        default:
                return -EOPNOTSUPP;
        }
@@ -382,7 +382,7 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
        case ixgbe_mbox_api_12:
                if (hw->mac.type < ixgbe_mac_X550_vf)
                        break;
-               /* fall through */
+               fallthrough;
        default:
                return -EOPNOTSUPP;
        }
@@ -540,7 +540,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
                /* promisc introduced in 1.3 version */
                if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
                        return -EOPNOTSUPP;
-               /* Fall threw */
+               fallthrough;
        case ixgbe_mbox_api_14:
        case ixgbe_mbox_api_13:
                break;
index c97c741..ddc7576 100644 (file)
@@ -3,7 +3,7 @@
  * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
  *
  * Copyright 2008 JMicron Technology Corporation
- * http://www.jmicron.com/
+ * https://www.jmicron.com/
  * Copyright (c) 2009 - 2010 Guo-Fu Tseng <cooldavid@cooldavid.org>
  *
  * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
index 2bba5ce..a2c3b00 100644 (file)
@@ -3,7 +3,7 @@
  * JMicron JMC2x0 series PCIe Ethernet Linux Device Driver
  *
  * Copyright 2008 JMicron Technology Corporation
- * http://www.jmicron.com/
+ * https://www.jmicron.com/
  * Copyright (c) 2009 - 2010 Guo-Fu Tseng <cooldavid@cooldavid.org>
  *
  * Author: Guo-Fu Tseng <cooldavid@cooldavid.org>
index cd8ddd1..ef4f35b 100644 (file)
@@ -87,6 +87,7 @@ config MVPP2
        depends on ARCH_MVEBU || COMPILE_TEST
        select MVMDIO
        select PHYLINK
+       select PAGE_POOL
        help
          This driver supports the network interface units in the
          Marvell ARMADA 375, 7K and 8K SoCs.
index 4d4b624..90e6111 100644 (file)
@@ -816,10 +816,9 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
                          struct net_device *dev)
 {
        struct mv643xx_eth_private *mp = txq_to_mp(txq);
-       int total_len, data_left, ret;
+       int hdr_len, total_len, data_left, ret;
        int desc_count = 0;
        struct tso_t tso;
-       int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
        struct tx_desc *first_tx_desc;
        u32 first_cmd_sts = 0;
 
@@ -832,7 +831,7 @@ static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
        first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc];
 
        /* Initialize the TSO handler, and prepare the first payload */
-       tso_start(skb, &tso);
+       hdr_len = tso_start(skb, &tso);
 
        total_len = skb->len - hdr_len;
        while (total_len > 0) {
index 946925b..2c9277e 100644 (file)
 #define      MVNETA_TX_IN_PRGRS                  BIT(1)
 #define      MVNETA_TX_FIFO_EMPTY                BIT(8)
 #define MVNETA_RX_MIN_FRAME_SIZE                 0x247c
+/* Only exists on Armada XP and Armada 370 */
 #define MVNETA_SERDES_CFG                       0x24A0
 #define      MVNETA_SGMII_SERDES_PROTO          0x0cc7
 #define      MVNETA_QSGMII_SERDES_PROTO                 0x0667
+#define      MVNETA_HSGMII_SERDES_PROTO                 0x1107
 #define MVNETA_TYPE_PRIO                         0x24bc
 #define      MVNETA_FORCE_UNI                    BIT(21)
 #define MVNETA_TXQ_CMD_1                         0x24e4
@@ -696,10 +698,6 @@ struct mvneta_rx_queue {
        /* Index of first RX DMA descriptor to refill */
        int first_to_refill;
        u32 refill_num;
-
-       /* pointer to uncomplete skb buffer */
-       struct sk_buff *skb;
-       int left_size;
 };
 
 static enum cpuhp_state online_hpstate;
@@ -2024,6 +2022,20 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
        return i;
 }
 
+static void
+mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
+                   struct xdp_buff *xdp, int sync_len, bool napi)
+{
+       struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+       int i;
+
+       page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
+                          sync_len, napi);
+       for (i = 0; i < sinfo->nr_frags; i++)
+               page_pool_put_full_page(rxq->page_pool,
+                                       skb_frag_page(&sinfo->frags[i]), napi);
+}
+
 static int
 mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
                        struct xdp_frame *xdpf, bool dma_map)
@@ -2156,13 +2168,13 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
 static int
 mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
               struct bpf_prog *prog, struct xdp_buff *xdp,
-              struct mvneta_stats *stats)
+              u32 frame_sz, struct mvneta_stats *stats)
 {
-       unsigned int len, sync;
-       struct page *page;
+       unsigned int len, data_len, sync;
        u32 ret, act;
 
        len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
+       data_len = xdp->data_end - xdp->data;
        act = bpf_prog_run_xdp(prog, xdp);
 
        /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
@@ -2178,9 +2190,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
 
                err = xdp_do_redirect(pp->dev, xdp, prog);
                if (unlikely(err)) {
+                       mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
                        ret = MVNETA_XDP_DROPPED;
-                       page = virt_to_head_page(xdp->data);
-                       page_pool_put_page(rxq->page_pool, page, sync, true);
                } else {
                        ret = MVNETA_XDP_REDIR;
                        stats->xdp_redirect++;
@@ -2189,10 +2200,8 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
        }
        case XDP_TX:
                ret = mvneta_xdp_xmit_back(pp, xdp);
-               if (ret != MVNETA_XDP_TX) {
-                       page = virt_to_head_page(xdp->data);
-                       page_pool_put_page(rxq->page_pool, page, sync, true);
-               }
+               if (ret != MVNETA_XDP_TX)
+                       mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
@@ -2201,25 +2210,23 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
                trace_xdp_exception(pp->dev, prog, act);
                /* fall through */
        case XDP_DROP:
-               page = virt_to_head_page(xdp->data);
-               page_pool_put_page(rxq->page_pool, page, sync, true);
+               mvneta_xdp_put_buff(pp, rxq, xdp, sync, true);
                ret = MVNETA_XDP_DROPPED;
                stats->xdp_drop++;
                break;
        }
 
-       stats->rx_bytes += xdp->data_end - xdp->data;
+       stats->rx_bytes += frame_sz + xdp->data_end - xdp->data - data_len;
        stats->rx_packets++;
 
        return ret;
 }
 
-static int
+static void
 mvneta_swbm_rx_frame(struct mvneta_port *pp,
                     struct mvneta_rx_desc *rx_desc,
                     struct mvneta_rx_queue *rxq,
-                    struct xdp_buff *xdp,
-                    struct bpf_prog *xdp_prog,
+                    struct xdp_buff *xdp, int *size,
                     struct page *page,
                     struct mvneta_stats *stats)
 {
@@ -2227,7 +2234,7 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
        int data_len = -MVNETA_MH_SIZE, len;
        struct net_device *dev = pp->dev;
        enum dma_data_direction dma_dir;
-       int ret = 0;
+       struct skb_shared_info *sinfo;
 
        if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) {
                len = MVNETA_MAX_RX_BUF_SIZE;
@@ -2250,71 +2257,81 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
        xdp->data_end = xdp->data + data_len;
        xdp_set_data_meta_invalid(xdp);
 
-       if (xdp_prog) {
-               ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp, stats);
-               if (ret)
-                       goto out;
-       }
-
-       rxq->skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
-       if (unlikely(!rxq->skb)) {
-               struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
-
-               netdev_err(dev, "Can't allocate skb on queue %d\n", rxq->id);
-
-               u64_stats_update_begin(&stats->syncp);
-               stats->es.skb_alloc_error++;
-               stats->rx_dropped++;
-               u64_stats_update_end(&stats->syncp);
-
-               return -ENOMEM;
-       }
-       page_pool_release_page(rxq->page_pool, page);
+       sinfo = xdp_get_shared_info_from_buff(xdp);
+       sinfo->nr_frags = 0;
 
-       skb_reserve(rxq->skb,
-                   xdp->data - xdp->data_hard_start);
-       skb_put(rxq->skb, xdp->data_end - xdp->data);
-       mvneta_rx_csum(pp, rx_desc->status, rxq->skb);
-
-       rxq->left_size = rx_desc->data_size - len;
-
-out:
+       *size = rx_desc->data_size - len;
        rx_desc->buf_phys_addr = 0;
-
-       return ret;
 }
 
 static void
 mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
                            struct mvneta_rx_desc *rx_desc,
                            struct mvneta_rx_queue *rxq,
+                           struct xdp_buff *xdp, int *size,
                            struct page *page)
 {
+       struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
        struct net_device *dev = pp->dev;
        enum dma_data_direction dma_dir;
        int data_len, len;
 
-       if (rxq->left_size > MVNETA_MAX_RX_BUF_SIZE) {
+       if (*size > MVNETA_MAX_RX_BUF_SIZE) {
                len = MVNETA_MAX_RX_BUF_SIZE;
                data_len = len;
        } else {
-               len = rxq->left_size;
+               len = *size;
                data_len = len - ETH_FCS_LEN;
        }
        dma_dir = page_pool_get_dma_dir(rxq->page_pool);
        dma_sync_single_for_cpu(dev->dev.parent,
                                rx_desc->buf_phys_addr,
                                len, dma_dir);
-       if (data_len > 0) {
-               /* refill descriptor with new buffer later */
-               skb_add_rx_frag(rxq->skb,
-                               skb_shinfo(rxq->skb)->nr_frags,
-                               page, pp->rx_offset_correction, data_len,
-                               PAGE_SIZE);
-       }
-       page_pool_release_page(rxq->page_pool, page);
-       rx_desc->buf_phys_addr = 0;
-       rxq->left_size -= len;
+
+       if (data_len > 0 && sinfo->nr_frags < MAX_SKB_FRAGS) {
+               skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags];
+
+               skb_frag_off_set(frag, pp->rx_offset_correction);
+               skb_frag_size_set(frag, data_len);
+               __skb_frag_set_page(frag, page);
+               sinfo->nr_frags++;
+
+               rx_desc->buf_phys_addr = 0;
+       }
+       *size -= len;
+}
+
+static struct sk_buff *
+mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
+                     struct xdp_buff *xdp, u32 desc_status)
+{
+       struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+       int i, num_frags = sinfo->nr_frags;
+       skb_frag_t frags[MAX_SKB_FRAGS];
+       struct sk_buff *skb;
+
+       memcpy(frags, sinfo->frags, sizeof(skb_frag_t) * num_frags);
+
+       skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       page_pool_release_page(rxq->page_pool, virt_to_page(xdp->data));
+
+       skb_reserve(skb, xdp->data - xdp->data_hard_start);
+       skb_put(skb, xdp->data_end - xdp->data);
+       mvneta_rx_csum(pp, desc_status, skb);
+
+       for (i = 0; i < num_frags; i++) {
+               struct page *page = skb_frag_page(&frags[i]);
+
+               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+                               page, skb_frag_off(&frags[i]),
+                               skb_frag_size(&frags[i]), PAGE_SIZE);
+               page_pool_release_page(rxq->page_pool, page);
+       }
+
+       return skb;
 }
 
 /* Main rx processing when using software buffer management */
@@ -2322,24 +2339,27 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
                          struct mvneta_port *pp, int budget,
                          struct mvneta_rx_queue *rxq)
 {
-       int rx_proc = 0, rx_todo, refill;
+       int rx_proc = 0, rx_todo, refill, size = 0;
        struct net_device *dev = pp->dev;
+       struct xdp_buff xdp_buf = {
+               .frame_sz = PAGE_SIZE,
+               .rxq = &rxq->xdp_rxq,
+       };
        struct mvneta_stats ps = {};
        struct bpf_prog *xdp_prog;
-       struct xdp_buff xdp_buf;
+       u32 desc_status, frame_sz;
 
        /* Get number of received packets */
        rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
 
        rcu_read_lock();
        xdp_prog = READ_ONCE(pp->xdp_prog);
-       xdp_buf.rxq = &rxq->xdp_rxq;
-       xdp_buf.frame_sz = PAGE_SIZE;
 
        /* Fairness NAPI loop */
        while (rx_proc < budget && rx_proc < rx_todo) {
                struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
                u32 rx_status, index;
+               struct sk_buff *skb;
                struct page *page;
 
                index = rx_desc - rxq->descs;
@@ -2350,54 +2370,66 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
                rxq->refill_num++;
 
                if (rx_status & MVNETA_RXD_FIRST_DESC) {
-                       int err;
-
                        /* Check errors only for FIRST descriptor */
                        if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
                                mvneta_rx_error(pp, rx_desc);
-                               /* leave the descriptor untouched */
-                               continue;
+                               goto next;
                        }
 
-                       err = mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
-                                                  xdp_prog, page, &ps);
-                       if (err)
-                               continue;
+                       size = rx_desc->data_size;
+                       frame_sz = size - ETH_FCS_LEN;
+                       desc_status = rx_desc->status;
+
+                       mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
+                                            &size, page, &ps);
                } else {
-                       if (unlikely(!rxq->skb)) {
-                               pr_debug("no skb for rx_status 0x%x\n",
-                                        rx_status);
+                       if (unlikely(!xdp_buf.data_hard_start))
                                continue;
-                       }
-                       mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, page);
+
+                       mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
+                                                   &size, page);
                } /* Middle or Last descriptor */
 
                if (!(rx_status & MVNETA_RXD_LAST_DESC))
                        /* no last descriptor this time */
                        continue;
 
-               if (rxq->left_size) {
-                       pr_err("get last desc, but left_size (%d) != 0\n",
-                              rxq->left_size);
-                       dev_kfree_skb_any(rxq->skb);
-                       rxq->left_size = 0;
-                       rxq->skb = NULL;
-                       continue;
+               if (size) {
+                       mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
+                       goto next;
                }
 
-               ps.rx_bytes += rxq->skb->len;
-               ps.rx_packets++;
+               if (xdp_prog &&
+                   mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps))
+                       goto next;
 
-               /* Linux processing */
-               rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
+               skb = mvneta_swbm_build_skb(pp, rxq, &xdp_buf, desc_status);
+               if (IS_ERR(skb)) {
+                       struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+                       mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
 
-               napi_gro_receive(napi, rxq->skb);
+                       u64_stats_update_begin(&stats->syncp);
+                       stats->es.skb_alloc_error++;
+                       stats->rx_dropped++;
+                       u64_stats_update_end(&stats->syncp);
 
-               /* clean uncomplete skb pointer in queue */
-               rxq->skb = NULL;
+                       goto next;
+               }
+
+               ps.rx_bytes += skb->len;
+               ps.rx_packets++;
+
+               skb->protocol = eth_type_trans(skb, dev);
+               napi_gro_receive(napi, skb);
+next:
+               xdp_buf.data_hard_start = NULL;
        }
        rcu_read_unlock();
 
+       if (xdp_buf.data_hard_start)
+               mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1, true);
+
        if (ps.xdp_redirect)
                xdp_do_flush_map();
 
@@ -2604,11 +2636,10 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
 static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
                         struct mvneta_tx_queue *txq)
 {
-       int total_len, data_left;
+       int hdr_len, total_len, data_left;
        int desc_count = 0;
        struct mvneta_port *pp = netdev_priv(dev);
        struct tso_t tso;
-       int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
        int i;
 
        /* Count needed descriptors */
@@ -2621,7 +2652,7 @@ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
        }
 
        /* Initialize the TSO handler, and prepare the first payload */
-       tso_start(skb, &tso);
+       hdr_len = tso_start(skb, &tso);
 
        total_len = skb->len - hdr_len;
        while (total_len > 0) {
@@ -3327,9 +3358,6 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp,
 {
        mvneta_rxq_drop_pkts(pp, rxq);
 
-       if (rxq->skb)
-               dev_kfree_skb_any(rxq->skb);
-
        if (rxq->descs)
                dma_free_coherent(pp->dev->dev.parent,
                                  rxq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -3342,8 +3370,6 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp,
        rxq->descs_phys        = 0;
        rxq->first_to_refill   = 0;
        rxq->refill_num        = 0;
-       rxq->skb               = NULL;
-       rxq->left_size         = 0;
 }
 
 static int mvneta_txq_sw_init(struct mvneta_port *pp,
@@ -3529,26 +3555,60 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
        return 0;
 }
 
-static int mvneta_comphy_init(struct mvneta_port *pp)
+static int mvneta_comphy_init(struct mvneta_port *pp, phy_interface_t interface)
 {
        int ret;
 
-       if (!pp->comphy)
-               return 0;
-
-       ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET,
-                              pp->phy_interface);
+       ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, interface);
        if (ret)
                return ret;
 
        return phy_power_on(pp->comphy);
 }
 
+static int mvneta_config_interface(struct mvneta_port *pp,
+                                  phy_interface_t interface)
+{
+       int ret = 0;
+
+       if (pp->comphy) {
+               if (interface == PHY_INTERFACE_MODE_SGMII ||
+                   interface == PHY_INTERFACE_MODE_1000BASEX ||
+                   interface == PHY_INTERFACE_MODE_2500BASEX) {
+                       ret = mvneta_comphy_init(pp, interface);
+               }
+       } else {
+               switch (interface) {
+               case PHY_INTERFACE_MODE_QSGMII:
+                       mvreg_write(pp, MVNETA_SERDES_CFG,
+                                   MVNETA_QSGMII_SERDES_PROTO);
+                       break;
+
+               case PHY_INTERFACE_MODE_SGMII:
+               case PHY_INTERFACE_MODE_1000BASEX:
+                       mvreg_write(pp, MVNETA_SERDES_CFG,
+                                   MVNETA_SGMII_SERDES_PROTO);
+                       break;
+
+               case PHY_INTERFACE_MODE_2500BASEX:
+                       mvreg_write(pp, MVNETA_SERDES_CFG,
+                                   MVNETA_HSGMII_SERDES_PROTO);
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       pp->phy_interface = interface;
+
+       return ret;
+}
+
 static void mvneta_start_dev(struct mvneta_port *pp)
 {
        int cpu;
 
-       WARN_ON(mvneta_comphy_init(pp));
+       WARN_ON(mvneta_config_interface(pp, pp->phy_interface));
 
        mvneta_max_rx_size_set(pp, pp->pkt_size);
        mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
@@ -3576,6 +3636,10 @@ static void mvneta_start_dev(struct mvneta_port *pp)
                    MVNETA_CAUSE_LINK_CHANGE);
 
        phylink_start(pp->phylink);
+
+       /* We may have called phy_speed_down before */
+       phylink_speed_up(pp->phylink);
+
        netif_tx_start_all_queues(pp->dev);
 
        clear_bit(__MVNETA_DOWN, &pp->state);
@@ -3587,6 +3651,9 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
 
        set_bit(__MVNETA_DOWN, &pp->state);
 
+       if (device_may_wakeup(&pp->dev->dev))
+               phylink_speed_down(pp->phylink, false);
+
        phylink_stop(pp->phylink);
 
        if (!pp->neta_armada3700) {
@@ -3923,17 +3990,13 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
        /* When at 2.5G, the link partner can send frames with shortened
         * preambles.
         */
-       if (state->speed == SPEED_2500)
+       if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
                new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
 
-       if (pp->comphy && pp->phy_interface != state->interface &&
-           (state->interface == PHY_INTERFACE_MODE_SGMII ||
-            state->interface == PHY_INTERFACE_MODE_1000BASEX ||
-            state->interface == PHY_INTERFACE_MODE_2500BASEX)) {
-               pp->phy_interface = state->interface;
-
-               WARN_ON(phy_power_off(pp->comphy));
-               WARN_ON(mvneta_comphy_init(pp));
+       if (pp->phy_interface != state->interface) {
+               if (pp->comphy)
+                       WARN_ON(phy_power_off(pp->comphy));
+               WARN_ON(mvneta_config_interface(pp, state->interface));
        }
 
        if (new_ctrl0 != gmac_ctrl0)
@@ -4059,6 +4122,10 @@ static int mvneta_mdio_probe(struct mvneta_port *pp)
        phylink_ethtool_get_wol(pp->phylink, &wol);
        device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
 
+       /* PHY WoL may be enabled but device wakeup disabled */
+       if (wol.supported)
+               device_set_wakeup_enable(&pp->dev->dev, !!wol.wolopts);
+
        return err;
 }
 
@@ -4982,12 +5049,10 @@ static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
        /* MAC Cause register should be cleared */
        mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
 
-       if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
-               mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
-       else if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
-                phy_interface_mode_is_8023z(phy_mode))
-               mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
-       else if (!phy_interface_mode_is_rgmii(phy_mode))
+       if (phy_mode != PHY_INTERFACE_MODE_QSGMII &&
+           phy_mode != PHY_INTERFACE_MODE_SGMII &&
+           !phy_interface_mode_is_8023z(phy_mode) &&
+           !phy_interface_mode_is_rgmii(phy_mode))
                return -EINVAL;
 
        return 0;
@@ -5176,10 +5241,10 @@ static int mvneta_probe(struct platform_device *pdev)
        if (err < 0)
                goto err_netdev;
 
-       err = mvneta_port_power_up(pp, phy_mode);
+       err = mvneta_port_power_up(pp, pp->phy_interface);
        if (err < 0) {
                dev_err(&pdev->dev, "can't power up port\n");
-               goto err_netdev;
+               return err;
        }
 
        /* Armada3700 network controller does not support per-cpu
index 543a310..32753cc 100644 (file)
 #include <linux/phy.h>
 #include <linux/phylink.h>
 #include <net/flow_offload.h>
+#include <net/page_pool.h>
+#include <linux/bpf.h>
+#include <net/xdp.h>
+
+/* The PacketOffset field is measured in units of 32 bytes and is 3 bits wide,
+ * so the maximum offset is 7 * 32 = 224
+ */
+#define MVPP2_SKB_HEADROOM     min(max(XDP_PACKET_HEADROOM, NET_SKB_PAD), 224)
+
+#define MVPP2_XDP_PASS         0
+#define MVPP2_XDP_DROPPED      BIT(0)
+#define MVPP2_XDP_TX           BIT(1)
+#define MVPP2_XDP_REDIR                BIT(2)
 
 /* Fifo Registers */
 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port)      (0x00 + 4 * (port))
        ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
              ETH_HLEN + ETH_FCS_LEN, cache_line_size())
 
-#define MVPP2_RX_BUF_SIZE(pkt_size)    ((pkt_size) + NET_SKB_PAD)
+#define MVPP2_RX_BUF_SIZE(pkt_size)    ((pkt_size) + MVPP2_SKB_HEADROOM)
 #define MVPP2_RX_TOTAL_SIZE(buf_size)  ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
-       ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
+       ((total_size) - MVPP2_SKB_HEADROOM - MVPP2_SKB_SHINFO_SIZE)
+
+#define MVPP2_MAX_RX_BUF_SIZE  (PAGE_SIZE - MVPP2_SKB_SHINFO_SIZE - MVPP2_SKB_HEADROOM)
 
 #define MVPP2_BIT_TO_BYTE(bit)         ((bit) / 8)
 #define MVPP2_BIT_TO_WORD(bit)         ((bit) / 32)
@@ -689,9 +704,9 @@ enum mvpp2_prs_l3_cast {
 #define MVPP2_BM_COOKIE_POOL_OFFS      8
 #define MVPP2_BM_COOKIE_CPU_OFFS       24
 
-#define MVPP2_BM_SHORT_FRAME_SIZE              512
-#define MVPP2_BM_LONG_FRAME_SIZE               2048
-#define MVPP2_BM_JUMBO_FRAME_SIZE              10240
+#define MVPP2_BM_SHORT_FRAME_SIZE      704     /* frame size 128 */
+#define MVPP2_BM_LONG_FRAME_SIZE       2240    /* frame size 1664 */
+#define MVPP2_BM_JUMBO_FRAME_SIZE      10432   /* frame size 9856 */
 /* BM short pool packet size
  * These value assure that for SWF the total number
  * of bytes allocated for each buffer will be 512
@@ -820,6 +835,9 @@ struct mvpp2 {
 
        /* RSS Indirection tables */
        struct mvpp2_rss_table *rss_tables[MVPP22_N_RSS_TABLES];
+
+       /* page_pool allocator */
+       struct page_pool *page_pool[MVPP2_PORT_MAX_RXQ];
 };
 
 struct mvpp2_pcpu_stats {
@@ -828,6 +846,14 @@ struct mvpp2_pcpu_stats {
        u64     rx_bytes;
        u64     tx_packets;
        u64     tx_bytes;
+       /* XDP */
+       u64     xdp_redirect;
+       u64     xdp_pass;
+       u64     xdp_drop;
+       u64     xdp_xmit;
+       u64     xdp_xmit_err;
+       u64     xdp_tx;
+       u64     xdp_tx_err;
 };
 
 /* Per-CPU port control */
@@ -909,6 +935,8 @@ struct mvpp2_port {
        unsigned int ntxqs;
        struct net_device *dev;
 
+       struct bpf_prog *xdp_prog;
+
        int pkt_size;
 
        /* Per-CPU port control */
@@ -928,6 +956,8 @@ struct mvpp2_port {
        struct mvpp2_pcpu_stats __percpu *stats;
        u64 *ethtool_stats;
 
+       unsigned long state;
+
        /* Per-port work and its lock to gather hardware statistics */
        struct mutex gather_stats_lock;
        struct delayed_work stats_work;
@@ -1060,9 +1090,20 @@ struct mvpp2_rx_desc {
        };
 };
 
+enum mvpp2_tx_buf_type {
+       MVPP2_TYPE_SKB,
+       MVPP2_TYPE_XDP_TX,
+       MVPP2_TYPE_XDP_NDO,
+};
+
 struct mvpp2_txq_pcpu_buf {
+       enum mvpp2_tx_buf_type type;
+
        /* Transmitted SKB */
-       struct sk_buff *skb;
+       union {
+               struct xdp_frame *xdpf;
+               struct sk_buff *skb;
+       };
 
        /* Physical address of transmitted buffer */
        dma_addr_t dma;
@@ -1161,6 +1202,10 @@ struct mvpp2_rx_queue {
 
        /* Port's logic RXQ number to which physical RXQ is mapped */
        int logic_rxq;
+
+       /* XDP memory accounting */
+       struct xdp_rxq_info xdp_rxq_short;
+       struct xdp_rxq_info xdp_rxq_long;
 };
 
 struct mvpp2_bm_pool {
index 2b5dad2..6a3f356 100644 (file)
@@ -36,6 +36,7 @@
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/tso.h>
+#include <linux/bpf_trace.h>
 
 #include "mvpp2.h"
 #include "mvpp2_prs.h"
@@ -95,6 +96,24 @@ static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
        return cpu % priv->nthreads;
 }
 
+static struct page_pool *
+mvpp2_create_page_pool(struct device *dev, int num, int len,
+                      enum dma_data_direction dma_dir)
+{
+       struct page_pool_params pp_params = {
+               /* internal DMA mapping in page_pool */
+               .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+               .pool_size = num,
+               .nid = NUMA_NO_NODE,
+               .dev = dev,
+               .dma_dir = dma_dir,
+               .offset = MVPP2_SKB_HEADROOM,
+               .max_len = len,
+       };
+
+       return page_pool_create(&pp_params);
+}
+
 /* These accessors should be used to access:
  *
  * - per-thread registers, where each thread has its own copy of the
@@ -281,12 +300,17 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
 
 static void mvpp2_txq_inc_put(struct mvpp2_port *port,
                              struct mvpp2_txq_pcpu *txq_pcpu,
-                             struct sk_buff *skb,
-                             struct mvpp2_tx_desc *tx_desc)
+                             void *data,
+                             struct mvpp2_tx_desc *tx_desc,
+                             enum mvpp2_tx_buf_type buf_type)
 {
        struct mvpp2_txq_pcpu_buf *tx_buf =
                txq_pcpu->buffs + txq_pcpu->txq_put_index;
-       tx_buf->skb = skb;
+       tx_buf->type = buf_type;
+       if (buf_type == MVPP2_TYPE_SKB)
+               tx_buf->skb = data;
+       else
+               tx_buf->xdpf = data;
        tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
        tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
                mvpp2_txdesc_offset_get(port, tx_desc);
@@ -327,17 +351,25 @@ static inline int mvpp2_txq_phys(int port, int txq)
        return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
 }
 
-static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool)
+/* Returns a struct page if page_pool is set, otherwise a buffer */
+static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool,
+                             struct page_pool *page_pool)
 {
+       if (page_pool)
+               return page_pool_dev_alloc_pages(page_pool);
+
        if (likely(pool->frag_size <= PAGE_SIZE))
                return netdev_alloc_frag(pool->frag_size);
-       else
-               return kmalloc(pool->frag_size, GFP_ATOMIC);
+
+       return kmalloc(pool->frag_size, GFP_ATOMIC);
 }
 
-static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool, void *data)
+static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool,
+                           struct page_pool *page_pool, void *data)
 {
-       if (likely(pool->frag_size <= PAGE_SIZE))
+       if (page_pool)
+               page_pool_put_full_page(page_pool, virt_to_head_page(data), false);
+       else if (likely(pool->frag_size <= PAGE_SIZE))
                skb_free_frag(data);
        else
                kfree(data);
@@ -442,6 +474,7 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
                               struct mvpp2_bm_pool *bm_pool, int buf_num)
 {
+       struct page_pool *pp = NULL;
        int i;
 
        if (buf_num > bm_pool->buf_num) {
@@ -450,6 +483,9 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
                buf_num = bm_pool->buf_num;
        }
 
+       if (priv->percpu_pools)
+               pp = priv->page_pool[bm_pool->id];
+
        for (i = 0; i < buf_num; i++) {
                dma_addr_t buf_dma_addr;
                phys_addr_t buf_phys_addr;
@@ -458,14 +494,15 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
                mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
                                        &buf_dma_addr, &buf_phys_addr);
 
-               dma_unmap_single(dev, buf_dma_addr,
-                                bm_pool->buf_size, DMA_FROM_DEVICE);
+               if (!pp)
+                       dma_unmap_single(dev, buf_dma_addr,
+                                        bm_pool->buf_size, DMA_FROM_DEVICE);
 
                data = (void *)phys_to_virt(buf_phys_addr);
                if (!data)
                        break;
 
-               mvpp2_frag_free(bm_pool, data);
+               mvpp2_frag_free(bm_pool, pp, data);
        }
 
        /* Update BM driver with number of buffers removed from pool */
@@ -511,6 +548,11 @@ static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv,
        val |= MVPP2_BM_STOP_MASK;
        mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
 
+       if (priv->percpu_pools) {
+               page_pool_destroy(priv->page_pool[bm_pool->id]);
+               priv->page_pool[bm_pool->id] = NULL;
+       }
+
        dma_free_coherent(dev, bm_pool->size_bytes,
                          bm_pool->virt_addr,
                          bm_pool->dma_addr);
@@ -546,10 +588,40 @@ err_unroll_pools:
 
 static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
 {
+       enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
        int i, err, poolnum = MVPP2_BM_POOLS_NUM;
+       struct mvpp2_port *port;
+
+       if (priv->percpu_pools) {
+               for (i = 0; i < priv->port_count; i++) {
+                       port = priv->port_list[i];
+                       if (port->xdp_prog) {
+                               dma_dir = DMA_BIDIRECTIONAL;
+                               break;
+                       }
+               }
 
-       if (priv->percpu_pools)
                poolnum = mvpp2_get_nrxqs(priv) * 2;
+               for (i = 0; i < poolnum; i++) {
+                       /* the pool in use */
+                       int pn = i / (poolnum / 2);
+
+                       priv->page_pool[i] =
+                               mvpp2_create_page_pool(dev,
+                                                      mvpp2_pools[pn].buf_num,
+                                                      mvpp2_pools[pn].pkt_size,
+                                                      dma_dir);
+                       if (IS_ERR(priv->page_pool[i])) {
+                               int j;
+
+                               for (j = 0; j < i; j++) {
+                                       page_pool_destroy(priv->page_pool[j]);
+                                       priv->page_pool[j] = NULL;
+                               }
+                               return PTR_ERR(priv->page_pool[i]);
+                       }
+               }
+       }
 
        dev_info(dev, "using %d %s buffers\n", poolnum,
                 priv->percpu_pools ? "per-cpu" : "shared");
@@ -632,23 +704,31 @@ static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
 
 static void *mvpp2_buf_alloc(struct mvpp2_port *port,
                             struct mvpp2_bm_pool *bm_pool,
+                            struct page_pool *page_pool,
                             dma_addr_t *buf_dma_addr,
                             phys_addr_t *buf_phys_addr,
                             gfp_t gfp_mask)
 {
        dma_addr_t dma_addr;
+       struct page *page;
        void *data;
 
-       data = mvpp2_frag_alloc(bm_pool);
+       data = mvpp2_frag_alloc(bm_pool, page_pool);
        if (!data)
                return NULL;
 
-       dma_addr = dma_map_single(port->dev->dev.parent, data,
-                                 MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
-                                 DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
-               mvpp2_frag_free(bm_pool, data);
-               return NULL;
+       if (page_pool) {
+               page = (struct page *)data;
+               dma_addr = page_pool_get_dma_addr(page);
+               data = page_to_virt(page);
+       } else {
+               dma_addr = dma_map_single(port->dev->dev.parent, data,
+                                         MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
+                                         DMA_FROM_DEVICE);
+               if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
+                       mvpp2_frag_free(bm_pool, NULL, data);
+                       return NULL;
+               }
        }
        *buf_dma_addr = dma_addr;
        *buf_phys_addr = virt_to_phys(data);
@@ -706,6 +786,7 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
        int i, buf_size, total_size;
        dma_addr_t dma_addr;
        phys_addr_t phys_addr;
+       struct page_pool *pp = NULL;
        void *buf;
 
        if (port->priv->percpu_pools &&
@@ -726,8 +807,10 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
                return 0;
        }
 
+       if (port->priv->percpu_pools)
+               pp = port->priv->page_pool[bm_pool->id];
        for (i = 0; i < buf_num; i++) {
-               buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr,
+               buf = mvpp2_buf_alloc(port, bm_pool, pp, &dma_addr,
                                      &phys_addr, GFP_KERNEL);
                if (!buf)
                        break;
@@ -907,28 +990,27 @@ static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port)
 /* Initialize pools for swf, percpu buffers variant */
 static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port)
 {
-       struct mvpp2_bm_pool *p;
+       struct mvpp2_bm_pool *bm_pool;
        int i;
 
        for (i = 0; i < port->nrxqs; i++) {
-               p = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i,
-                                            mvpp2_pools[MVPP2_BM_SHORT].pkt_size);
-               if (!p)
+               bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i,
+                                                  mvpp2_pools[MVPP2_BM_SHORT].pkt_size);
+               if (!bm_pool)
                        return -ENOMEM;
 
-               port->priv->bm_pools[i].port_map |= BIT(port->id);
-               mvpp2_rxq_short_pool_set(port, i, port->priv->bm_pools[i].id);
+               bm_pool->port_map |= BIT(port->id);
+               mvpp2_rxq_short_pool_set(port, i, bm_pool->id);
        }
 
        for (i = 0; i < port->nrxqs; i++) {
-               p = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs,
-                                            mvpp2_pools[MVPP2_BM_LONG].pkt_size);
-               if (!p)
+               bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs,
+                                                  mvpp2_pools[MVPP2_BM_LONG].pkt_size);
+               if (!bm_pool)
                        return -ENOMEM;
 
-               port->priv->bm_pools[i + port->nrxqs].port_map |= BIT(port->id);
-               mvpp2_rxq_long_pool_set(port, i,
-                                       port->priv->bm_pools[i + port->nrxqs].id);
+               bm_pool->port_map |= BIT(port->id);
+               mvpp2_rxq_long_pool_set(port, i, bm_pool->id);
        }
 
        port->pool_long = NULL;
@@ -1114,6 +1196,17 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
        }
 }
 
+/* Only GOP port 0 has an XLG MAC */
+static bool mvpp2_port_supports_xlg(struct mvpp2_port *port)
+{
+       return port->gop_id == 0;
+}
+
+static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port)
+{
+       return !(port->priv->hw_version == MVPP22 && port->gop_id == 0);
+}
+
 /* Port configuration routines */
 static bool mvpp2_is_xlg(phy_interface_t interface)
 {
@@ -1121,6 +1214,17 @@ static bool mvpp2_is_xlg(phy_interface_t interface)
               interface == PHY_INTERFACE_MODE_XAUI;
 }
 
+static void mvpp2_modify(void __iomem *ptr, u32 mask, u32 set)
+{
+       u32 old, val;
+
+       old = val = readl(ptr);
+       val &= ~mask;
+       val |= set;
+       if (old != val)
+               writel(val, ptr);
+}
+
 static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
 {
        struct mvpp2 *priv = port->priv;
@@ -1194,7 +1298,7 @@ static int mvpp22_gop_init(struct mvpp2_port *port)
        case PHY_INTERFACE_MODE_RGMII_ID:
        case PHY_INTERFACE_MODE_RGMII_RXID:
        case PHY_INTERFACE_MODE_RGMII_TXID:
-               if (port->gop_id == 0)
+               if (!mvpp2_port_supports_rgmii(port))
                        goto invalid_conf;
                mvpp22_gop_init_rgmii(port);
                break;
@@ -1204,7 +1308,7 @@ static int mvpp22_gop_init(struct mvpp2_port *port)
                mvpp22_gop_init_sgmii(port);
                break;
        case PHY_INTERFACE_MODE_10GBASER:
-               if (port->gop_id != 0)
+               if (!mvpp2_port_supports_xlg(port))
                        goto invalid_conf;
                mvpp22_gop_init_10gkr(port);
                break;
@@ -1246,7 +1350,7 @@ static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
                writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
        }
 
-       if (port->gop_id == 0) {
+       if (mvpp2_port_supports_xlg(port)) {
                /* Enable the XLG/GIG irqs for this port */
                val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
                if (mvpp2_is_xlg(port->phy_interface))
@@ -1261,7 +1365,7 @@ static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
 {
        u32 val;
 
-       if (port->gop_id == 0) {
+       if (mvpp2_port_supports_xlg(port)) {
                val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
                val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
                         MVPP22_XLG_EXT_INT_MASK_GIG);
@@ -1290,7 +1394,7 @@ static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
                writel(val, port->base + MVPP22_GMAC_INT_MASK);
        }
 
-       if (port->gop_id == 0) {
+       if (mvpp2_port_supports_xlg(port)) {
                val = readl(port->base + MVPP22_XLG_INT_MASK);
                val |= MVPP22_XLG_INT_MASK_LINK;
                writel(val, port->base + MVPP22_XLG_INT_MASK);
@@ -1328,8 +1432,8 @@ static void mvpp2_port_enable(struct mvpp2_port *port)
 {
        u32 val;
 
-       /* Only GOP port 0 has an XLG MAC */
-       if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) {
+       if (mvpp2_port_supports_xlg(port) &&
+           mvpp2_is_xlg(port->phy_interface)) {
                val = readl(port->base + MVPP22_XLG_CTRL0_REG);
                val |= MVPP22_XLG_CTRL0_PORT_EN;
                val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
@@ -1346,8 +1450,8 @@ static void mvpp2_port_disable(struct mvpp2_port *port)
 {
        u32 val;
 
-       /* Only GOP port 0 has an XLG MAC */
-       if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) {
+       if (mvpp2_port_supports_xlg(port) &&
+           mvpp2_is_xlg(port->phy_interface)) {
                val = readl(port->base + MVPP22_XLG_CTRL0_REG);
                val &= ~MVPP22_XLG_CTRL0_PORT_EN;
                writel(val, port->base + MVPP22_XLG_CTRL0_REG);
@@ -1390,6 +1494,16 @@ static void mvpp2_port_loopback_set(struct mvpp2_port *port,
        writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
 }
 
+enum {
+       ETHTOOL_XDP_REDIRECT,
+       ETHTOOL_XDP_PASS,
+       ETHTOOL_XDP_DROP,
+       ETHTOOL_XDP_TX,
+       ETHTOOL_XDP_TX_ERR,
+       ETHTOOL_XDP_XMIT,
+       ETHTOOL_XDP_XMIT_ERR,
+};
+
 struct mvpp2_ethtool_counter {
        unsigned int offset;
        const char string[ETH_GSTRING_LEN];
@@ -1482,10 +1596,21 @@ static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = {
        { MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" },
 };
 
+static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = {
+       { ETHTOOL_XDP_REDIRECT, "rx_xdp_redirect", },
+       { ETHTOOL_XDP_PASS, "rx_xdp_pass", },
+       { ETHTOOL_XDP_DROP, "rx_xdp_drop", },
+       { ETHTOOL_XDP_TX, "rx_xdp_tx", },
+       { ETHTOOL_XDP_TX_ERR, "rx_xdp_tx_errors", },
+       { ETHTOOL_XDP_XMIT, "tx_xdp_xmit", },
+       { ETHTOOL_XDP_XMIT_ERR, "tx_xdp_xmit_errors", },
+};
+
 #define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs)    (ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \
                                                 ARRAY_SIZE(mvpp2_ethtool_port_regs) + \
                                                 (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \
-                                                (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)))
+                                                (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)) + \
+                                                ARRAY_SIZE(mvpp2_ethtool_xdp))
 
 static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
                                      u8 *data)
@@ -1524,10 +1649,57 @@ static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
                        data += ETH_GSTRING_LEN;
                }
        }
+
+       for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) {
+               strscpy(data, mvpp2_ethtool_xdp[i].string,
+                       ETH_GSTRING_LEN);
+               data += ETH_GSTRING_LEN;
+       }
+}
+
+static void
+mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats)
+{
+       unsigned int start;
+       unsigned int cpu;
+
+       /* Gather XDP Statistics */
+       for_each_possible_cpu(cpu) {
+               struct mvpp2_pcpu_stats *cpu_stats;
+               u64     xdp_redirect;
+               u64     xdp_pass;
+               u64     xdp_drop;
+               u64     xdp_xmit;
+               u64     xdp_xmit_err;
+               u64     xdp_tx;
+               u64     xdp_tx_err;
+
+               cpu_stats = per_cpu_ptr(port->stats, cpu);
+               do {
+                       start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+                       xdp_redirect = cpu_stats->xdp_redirect;
+                       xdp_pass   = cpu_stats->xdp_pass;
+                       xdp_drop = cpu_stats->xdp_drop;
+                       xdp_xmit   = cpu_stats->xdp_xmit;
+                       xdp_xmit_err   = cpu_stats->xdp_xmit_err;
+                       xdp_tx   = cpu_stats->xdp_tx;
+                       xdp_tx_err   = cpu_stats->xdp_tx_err;
+               } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+
+               xdp_stats->xdp_redirect += xdp_redirect;
+               xdp_stats->xdp_pass   += xdp_pass;
+               xdp_stats->xdp_drop += xdp_drop;
+               xdp_stats->xdp_xmit   += xdp_xmit;
+               xdp_stats->xdp_xmit_err   += xdp_xmit_err;
+               xdp_stats->xdp_tx   += xdp_tx;
+               xdp_stats->xdp_tx_err   += xdp_tx_err;
+       }
 }
 
 static void mvpp2_read_stats(struct mvpp2_port *port)
 {
+       struct mvpp2_pcpu_stats xdp_stats = {};
+       const struct mvpp2_ethtool_counter *s;
        u64 *pstats;
        int i, q;
 
@@ -1544,7 +1716,7 @@ static void mvpp2_read_stats(struct mvpp2_port *port)
        for (q = 0; q < port->ntxqs; q++)
                for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++)
                        *pstats++ += mvpp2_read_index(port->priv,
-                                                     MVPP22_CTRS_TX_CTR(port->id, i),
+                                                     MVPP22_CTRS_TX_CTR(port->id, q),
                                                      mvpp2_ethtool_txq_regs[i].offset);
 
        /* Rxqs are numbered from 0 from the user standpoint, but not from the
@@ -1553,8 +1725,39 @@ static void mvpp2_read_stats(struct mvpp2_port *port)
        for (q = 0; q < port->nrxqs; q++)
                for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++)
                        *pstats++ += mvpp2_read_index(port->priv,
-                                                     port->first_rxq + i,
+                                                     port->first_rxq + q,
                                                      mvpp2_ethtool_rxq_regs[i].offset);
+
+       /* Gather XDP Statistics */
+       mvpp2_get_xdp_stats(port, &xdp_stats);
+
+       for (i = 0, s = mvpp2_ethtool_xdp;
+                s < mvpp2_ethtool_xdp + ARRAY_SIZE(mvpp2_ethtool_xdp);
+            s++, i++) {
+               switch (s->offset) {
+               case ETHTOOL_XDP_REDIRECT:
+                       *pstats++ = xdp_stats.xdp_redirect;
+                       break;
+               case ETHTOOL_XDP_PASS:
+                       *pstats++ = xdp_stats.xdp_pass;
+                       break;
+               case ETHTOOL_XDP_DROP:
+                       *pstats++ = xdp_stats.xdp_drop;
+                       break;
+               case ETHTOOL_XDP_TX:
+                       *pstats++ = xdp_stats.xdp_tx;
+                       break;
+               case ETHTOOL_XDP_TX_ERR:
+                       *pstats++ = xdp_stats.xdp_tx_err;
+                       break;
+               case ETHTOOL_XDP_XMIT:
+                       *pstats++ = xdp_stats.xdp_xmit;
+                       break;
+               case ETHTOOL_XDP_XMIT_ERR:
+                       *pstats++ = xdp_stats.xdp_xmit_err;
+                       break;
+               }
+       }
 }
 
 static void mvpp2_gather_hw_statistics(struct work_struct *work)
@@ -2240,11 +2443,15 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
                struct mvpp2_txq_pcpu_buf *tx_buf =
                        txq_pcpu->buffs + txq_pcpu->txq_get_index;
 
-               if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma))
+               if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma) &&
+                   tx_buf->type != MVPP2_TYPE_XDP_TX)
                        dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
                                         tx_buf->size, DMA_TO_DEVICE);
-               if (tx_buf->skb)
+               if (tx_buf->type == MVPP2_TYPE_SKB && tx_buf->skb)
                        dev_kfree_skb_any(tx_buf->skb);
+               else if (tx_buf->type == MVPP2_TYPE_XDP_TX ||
+                        tx_buf->type == MVPP2_TYPE_XDP_NDO)
+                       xdp_return_frame(tx_buf->xdpf);
 
                mvpp2_txq_inc_get(txq_pcpu);
        }
@@ -2353,10 +2560,11 @@ static int mvpp2_aggr_txq_init(struct platform_device *pdev,
 /* Create a specified Rx queue */
 static int mvpp2_rxq_init(struct mvpp2_port *port,
                          struct mvpp2_rx_queue *rxq)
-
 {
+       struct mvpp2 *priv = port->priv;
        unsigned int thread;
        u32 rxq_dma;
+       int err;
 
        rxq->size = port->rx_ring_size;
 
@@ -2385,7 +2593,7 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
        put_cpu();
 
        /* Set Offset */
-       mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
+       mvpp2_rxq_offset_set(port, rxq->id, MVPP2_SKB_HEADROOM);
 
        /* Set coalescing pkts and time */
        mvpp2_rx_pkts_coal_set(port, rxq);
@@ -2394,7 +2602,43 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
        /* Add number of descriptors ready for receiving packets */
        mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
 
+       if (priv->percpu_pools) {
+               err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->id);
+               if (err < 0)
+                       goto err_free_dma;
+
+               err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->id);
+               if (err < 0)
+                       goto err_unregister_rxq_short;
+
+               /* Every RXQ has a pool for short and another for long packets */
+               err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_short,
+                                                MEM_TYPE_PAGE_POOL,
+                                                priv->page_pool[rxq->logic_rxq]);
+               if (err < 0)
+                       goto err_unregister_rxq_long;
+
+               err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_long,
+                                                MEM_TYPE_PAGE_POOL,
+                                                priv->page_pool[rxq->logic_rxq +
+                                                                port->nrxqs]);
+               if (err < 0)
+                       goto err_unregister_mem_rxq_short;
+       }
+
        return 0;
+
+err_unregister_mem_rxq_short:
+       xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq_short);
+err_unregister_rxq_long:
+       xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
+err_unregister_rxq_short:
+       xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
+err_free_dma:
+       dma_free_coherent(port->dev->dev.parent,
+                         rxq->size * MVPP2_DESC_ALIGNED_SIZE,
+                         rxq->descs, rxq->descs_dma);
+       return err;
 }
 
 /* Push packets received by the RXQ to BM pool */
@@ -2428,6 +2672,12 @@ static void mvpp2_rxq_deinit(struct mvpp2_port *port,
 {
        unsigned int thread;
 
+       if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_short))
+               xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
+
+       if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_long))
+               xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
+
        mvpp2_rxq_drop_pkts(port, rxq);
 
        if (rxq->descs)
@@ -2689,7 +2939,7 @@ err_cleanup:
 static int mvpp2_setup_txqs(struct mvpp2_port *port)
 {
        struct mvpp2_tx_queue *txq;
-       int queue, err, cpu;
+       int queue, err;
 
        for (queue = 0; queue < port->ntxqs; queue++) {
                txq = port->txqs[queue];
@@ -2698,8 +2948,8 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port)
                        goto err_cleanup;
 
                /* Assign this queue to a CPU */
-               cpu = queue % num_present_cpus();
-               netif_set_xps_queue(port->dev, cpumask_of(cpu), queue);
+               if (queue < num_possible_cpus())
+                       netif_set_xps_queue(port->dev, cpumask_of(queue), queue);
        }
 
        if (port->has_tx_irqs) {
@@ -2740,7 +2990,8 @@ static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
 
        mvpp22_gop_mask_irq(port);
 
-       if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface)) {
+       if (mvpp2_port_supports_xlg(port) &&
+           mvpp2_is_xlg(port->phy_interface)) {
                val = readl(port->base + MVPP22_XLG_INT_STAT);
                if (val & MVPP22_XLG_INT_STAT_LINK) {
                        event = true;
@@ -2868,14 +3119,15 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
 
 /* Allocate a new skb and add it to BM pool */
 static int mvpp2_rx_refill(struct mvpp2_port *port,
-                          struct mvpp2_bm_pool *bm_pool, int pool)
+                          struct mvpp2_bm_pool *bm_pool,
+                          struct page_pool *page_pool, int pool)
 {
        dma_addr_t dma_addr;
        phys_addr_t phys_addr;
        void *buf;
 
-       buf = mvpp2_buf_alloc(port, bm_pool, &dma_addr, &phys_addr,
-                             GFP_ATOMIC);
+       buf = mvpp2_buf_alloc(port, bm_pool, page_pool,
+                             &dma_addr, &phys_addr, GFP_ATOMIC);
        if (!buf)
                return -ENOMEM;
 
@@ -2916,15 +3168,251 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
        return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
 }
 
+static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, int nxmit_byte)
+{
+       unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+       struct mvpp2_tx_queue *aggr_txq;
+       struct mvpp2_txq_pcpu *txq_pcpu;
+       struct mvpp2_tx_queue *txq;
+       struct netdev_queue *nq;
+
+       txq = port->txqs[txq_id];
+       txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
+       nq = netdev_get_tx_queue(port->dev, txq_id);
+       aggr_txq = &port->priv->aggr_txqs[thread];
+
+       txq_pcpu->reserved_num -= nxmit;
+       txq_pcpu->count += nxmit;
+       aggr_txq->count += nxmit;
+
+       /* Enable transmit */
+       wmb();
+       mvpp2_aggr_txq_pend_desc_add(port, nxmit);
+
+       if (txq_pcpu->count >= txq_pcpu->stop_threshold)
+               netif_tx_stop_queue(nq);
+
+       /* Finalize TX processing */
+       if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
+               mvpp2_txq_done(port, txq, txq_pcpu);
+}
+
+static int
+mvpp2_xdp_submit_frame(struct mvpp2_port *port, u16 txq_id,
+                      struct xdp_frame *xdpf, bool dma_map)
+{
+       unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+       u32 tx_cmd = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE |
+                    MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
+       enum mvpp2_tx_buf_type buf_type;
+       struct mvpp2_txq_pcpu *txq_pcpu;
+       struct mvpp2_tx_queue *aggr_txq;
+       struct mvpp2_tx_desc *tx_desc;
+       struct mvpp2_tx_queue *txq;
+       int ret = MVPP2_XDP_TX;
+       dma_addr_t dma_addr;
+
+       txq = port->txqs[txq_id];
+       txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
+       aggr_txq = &port->priv->aggr_txqs[thread];
+
+       /* Check number of available descriptors */
+       if (mvpp2_aggr_desc_num_check(port, aggr_txq, 1) ||
+           mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 1)) {
+               ret = MVPP2_XDP_DROPPED;
+               goto out;
+       }
+
+       /* Get a descriptor for the first part of the packet */
+       tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+       mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
+       mvpp2_txdesc_size_set(port, tx_desc, xdpf->len);
+
+       if (dma_map) {
+               /* XDP_REDIRECT or AF_XDP */
+               dma_addr = dma_map_single(port->dev->dev.parent, xdpf->data,
+                                         xdpf->len, DMA_TO_DEVICE);
+
+               if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
+                       mvpp2_txq_desc_put(txq);
+                       ret = MVPP2_XDP_DROPPED;
+                       goto out;
+               }
+
+               buf_type = MVPP2_TYPE_XDP_NDO;
+       } else {
+               /* XDP_TX */
+               struct page *page = virt_to_page(xdpf->data);
+
+               dma_addr = page_pool_get_dma_addr(page) +
+                          sizeof(*xdpf) + xdpf->headroom;
+               dma_sync_single_for_device(port->dev->dev.parent, dma_addr,
+                                          xdpf->len, DMA_BIDIRECTIONAL);
+
+               buf_type = MVPP2_TYPE_XDP_TX;
+       }
+
+       mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr);
+
+       mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
+       mvpp2_txq_inc_put(port, txq_pcpu, xdpf, tx_desc, buf_type);
+
+out:
+       return ret;
+}
+
+static int
+mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp)
+{
+       struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
+       struct xdp_frame *xdpf;
+       u16 txq_id;
+       int ret;
+
+       xdpf = xdp_convert_buff_to_frame(xdp);
+       if (unlikely(!xdpf))
+               return MVPP2_XDP_DROPPED;
+
+       /* The first of the TX queues are used for XPS,
+        * the second half for XDP_TX
+        */
+       txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
+
+       ret = mvpp2_xdp_submit_frame(port, txq_id, xdpf, false);
+       if (ret == MVPP2_XDP_TX) {
+               u64_stats_update_begin(&stats->syncp);
+               stats->tx_bytes += xdpf->len;
+               stats->tx_packets++;
+               stats->xdp_tx++;
+               u64_stats_update_end(&stats->syncp);
+
+               mvpp2_xdp_finish_tx(port, txq_id, 1, xdpf->len);
+       } else {
+               u64_stats_update_begin(&stats->syncp);
+               stats->xdp_tx_err++;
+               u64_stats_update_end(&stats->syncp);
+       }
+
+       return ret;
+}
+
+static int
+mvpp2_xdp_xmit(struct net_device *dev, int num_frame,
+              struct xdp_frame **frames, u32 flags)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       int i, nxmit_byte = 0, nxmit = num_frame;
+       struct mvpp2_pcpu_stats *stats;
+       u16 txq_id;
+       u32 ret;
+
+       if (unlikely(test_bit(0, &port->state)))
+               return -ENETDOWN;
+
+       if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+               return -EINVAL;
+
+       /* The first of the TX queues are used for XPS,
+        * the second half for XDP_TX
+        */
+       txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
+
+       for (i = 0; i < num_frame; i++) {
+               ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true);
+               if (ret == MVPP2_XDP_TX) {
+                       nxmit_byte += frames[i]->len;
+               } else {
+                       xdp_return_frame_rx_napi(frames[i]);
+                       nxmit--;
+               }
+       }
+
+       if (likely(nxmit > 0))
+               mvpp2_xdp_finish_tx(port, txq_id, nxmit, nxmit_byte);
+
+       stats = this_cpu_ptr(port->stats);
+       u64_stats_update_begin(&stats->syncp);
+       stats->tx_bytes += nxmit_byte;
+       stats->tx_packets += nxmit;
+       stats->xdp_xmit += nxmit;
+       stats->xdp_xmit_err += num_frame - nxmit;
+       u64_stats_update_end(&stats->syncp);
+
+       return nxmit;
+}
+
+static int
+mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
+             struct bpf_prog *prog, struct xdp_buff *xdp,
+             struct page_pool *pp, struct mvpp2_pcpu_stats *stats)
+{
+       unsigned int len, sync, err;
+       struct page *page;
+       u32 ret, act;
+
+       len = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
+       act = bpf_prog_run_xdp(prog, xdp);
+
+       /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
+       sync = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
+       sync = max(sync, len);
+
+       switch (act) {
+       case XDP_PASS:
+               stats->xdp_pass++;
+               ret = MVPP2_XDP_PASS;
+               break;
+       case XDP_REDIRECT:
+               err = xdp_do_redirect(port->dev, xdp, prog);
+               if (unlikely(err)) {
+                       ret = MVPP2_XDP_DROPPED;
+                       page = virt_to_head_page(xdp->data);
+                       page_pool_put_page(pp, page, sync, true);
+               } else {
+                       ret = MVPP2_XDP_REDIR;
+                       stats->xdp_redirect++;
+               }
+               break;
+       case XDP_TX:
+               ret = mvpp2_xdp_xmit_back(port, xdp);
+               if (ret != MVPP2_XDP_TX) {
+                       page = virt_to_head_page(xdp->data);
+                       page_pool_put_page(pp, page, sync, true);
+               }
+               break;
+       default:
+               bpf_warn_invalid_xdp_action(act);
+               fallthrough;
+       case XDP_ABORTED:
+               trace_xdp_exception(port->dev, prog, act);
+               fallthrough;
+       case XDP_DROP:
+               page = virt_to_head_page(xdp->data);
+               page_pool_put_page(pp, page, sync, true);
+               ret = MVPP2_XDP_DROPPED;
+               stats->xdp_drop++;
+               break;
+       }
+
+       return ret;
+}
+
 /* Main rx processing */
 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
                    int rx_todo, struct mvpp2_rx_queue *rxq)
 {
        struct net_device *dev = port->dev;
+       struct mvpp2_pcpu_stats ps = {};
+       enum dma_data_direction dma_dir;
+       struct bpf_prog *xdp_prog;
+       struct xdp_buff xdp;
        int rx_received;
        int rx_done = 0;
-       u32 rcvd_pkts = 0;
-       u32 rcvd_bytes = 0;
+       u32 xdp_ret = 0;
+
+       rcu_read_lock();
+
+       xdp_prog = READ_ONCE(port->xdp_prog);
 
        /* Get number of received packets and clamp the to-do */
        rx_received = mvpp2_rxq_received(port, rxq->id);
@@ -2934,12 +3422,13 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
        while (rx_done < rx_todo) {
                struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
                struct mvpp2_bm_pool *bm_pool;
+               struct page_pool *pp = NULL;
                struct sk_buff *skb;
                unsigned int frag_size;
                dma_addr_t dma_addr;
                phys_addr_t phys_addr;
                u32 rx_status;
-               int pool, rx_bytes, err;
+               int pool, rx_bytes, err, ret;
                void *data;
 
                rx_done++;
@@ -2962,9 +3451,18 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
                if (rx_status & MVPP2_RXD_ERR_SUMMARY)
                        goto err_drop_frame;
 
+               if (port->priv->percpu_pools) {
+                       pp = port->priv->page_pool[pool];
+                       dma_dir = page_pool_get_dma_dir(pp);
+               } else {
+                       dma_dir = DMA_FROM_DEVICE;
+               }
+
                dma_sync_single_for_cpu(dev->dev.parent, dma_addr,
                                        rx_bytes + MVPP2_MH_SIZE,
-                                       DMA_FROM_DEVICE);
+                                       dma_dir);
+
+               /* Prefetch header */
                prefetch(data);
 
                if (bm_pool->frag_size > PAGE_SIZE)
@@ -2972,26 +3470,58 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
                else
                        frag_size = bm_pool->frag_size;
 
+               if (xdp_prog) {
+                       xdp.data_hard_start = data;
+                       xdp.data = data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM;
+                       xdp.data_end = xdp.data + rx_bytes;
+                       xdp.frame_sz = PAGE_SIZE;
+
+                       if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE)
+                               xdp.rxq = &rxq->xdp_rxq_short;
+                       else
+                               xdp.rxq = &rxq->xdp_rxq_long;
+
+                       xdp_set_data_meta_invalid(&xdp);
+
+                       ret = mvpp2_run_xdp(port, rxq, xdp_prog, &xdp, pp, &ps);
+
+                       if (ret) {
+                               xdp_ret |= ret;
+                               err = mvpp2_rx_refill(port, bm_pool, pp, pool);
+                               if (err) {
+                                       netdev_err(port->dev, "failed to refill BM pools\n");
+                                       goto err_drop_frame;
+                               }
+
+                               ps.rx_packets++;
+                               ps.rx_bytes += rx_bytes;
+                               continue;
+                       }
+               }
+
                skb = build_skb(data, frag_size);
                if (!skb) {
                        netdev_warn(port->dev, "skb build failed\n");
                        goto err_drop_frame;
                }
 
-               err = mvpp2_rx_refill(port, bm_pool, pool);
+               err = mvpp2_rx_refill(port, bm_pool, pp, pool);
                if (err) {
                        netdev_err(port->dev, "failed to refill BM pools\n");
                        goto err_drop_frame;
                }
 
-               dma_unmap_single_attrs(dev->dev.parent, dma_addr,
-                                      bm_pool->buf_size, DMA_FROM_DEVICE,
-                                      DMA_ATTR_SKIP_CPU_SYNC);
+               if (pp)
+                       page_pool_release_page(pp, virt_to_page(data));
+               else
+                       dma_unmap_single_attrs(dev->dev.parent, dma_addr,
+                                              bm_pool->buf_size, DMA_FROM_DEVICE,
+                                              DMA_ATTR_SKIP_CPU_SYNC);
 
-               rcvd_pkts++;
-               rcvd_bytes += rx_bytes;
+               ps.rx_packets++;
+               ps.rx_bytes += rx_bytes;
 
-               skb_reserve(skb, MVPP2_MH_SIZE + NET_SKB_PAD);
+               skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
                skb_put(skb, rx_bytes);
                skb->protocol = eth_type_trans(skb, dev);
                mvpp2_rx_csum(port, rx_status, skb);
@@ -3006,12 +3536,21 @@ err_drop_frame:
                mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
        }
 
-       if (rcvd_pkts) {
+       rcu_read_unlock();
+
+       if (xdp_ret & MVPP2_XDP_REDIR)
+               xdp_do_flush_map();
+
+       if (ps.rx_packets) {
                struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
 
                u64_stats_update_begin(&stats->syncp);
-               stats->rx_packets += rcvd_pkts;
-               stats->rx_bytes   += rcvd_bytes;
+               stats->rx_packets += ps.rx_packets;
+               stats->rx_bytes   += ps.rx_bytes;
+               /* xdp */
+               stats->xdp_redirect += ps.xdp_redirect;
+               stats->xdp_pass += ps.xdp_pass;
+               stats->xdp_drop += ps.xdp_drop;
                u64_stats_update_end(&stats->syncp);
        }
 
@@ -3072,11 +3611,11 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
                        /* Last descriptor */
                        mvpp2_txdesc_cmd_set(port, tx_desc,
                                             MVPP2_TXD_L_DESC);
-                       mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
+                       mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
                } else {
                        /* Descriptor in the middle: Not First, Not Last */
                        mvpp2_txdesc_cmd_set(port, tx_desc, 0);
-                       mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
+                       mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
                }
        }
 
@@ -3114,7 +3653,7 @@ static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
        mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
                                            MVPP2_TXD_F_DESC |
                                            MVPP2_TXD_PADDING_DISABLE);
-       mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
+       mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
 }
 
 static inline int mvpp2_tso_put_data(struct sk_buff *skb,
@@ -3143,14 +3682,14 @@ static inline int mvpp2_tso_put_data(struct sk_buff *skb,
        if (!left) {
                mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
                if (last) {
-                       mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
+                       mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
                        return 0;
                }
        } else {
                mvpp2_txdesc_cmd_set(port, tx_desc, 0);
        }
 
-       mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
+       mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
        return 0;
 }
 
@@ -3160,9 +3699,8 @@ static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
                        struct mvpp2_txq_pcpu *txq_pcpu)
 {
        struct mvpp2_port *port = netdev_priv(dev);
+       int hdr_sz, i, len, descs = 0;
        struct tso_t tso;
-       int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb);
-       int i, len, descs = 0;
 
        /* Check number of available descriptors */
        if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) ||
@@ -3170,7 +3708,8 @@ static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
                                             tso_count_descs(skb)))
                return 0;
 
-       tso_start(skb, &tso);
+       hdr_sz = tso_start(skb, &tso);
+
        len = skb->len - hdr_sz;
        while (len > 0) {
                int left = min_t(int, skb_shinfo(skb)->gso_size, len);
@@ -3263,12 +3802,12 @@ static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
                /* First and Last descriptor */
                tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
                mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
-               mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc);
+               mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
        } else {
                /* First but not Last */
                tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
                mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
-               mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc);
+               mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
 
                /* Continue with other skb fragments */
                if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
@@ -3430,8 +3969,7 @@ static void mvpp22_mode_reconfigure(struct mvpp2_port *port)
 
        mvpp22_pcs_reset_deassert(port);
 
-       /* Only GOP port 0 has an XLG MAC */
-       if (port->gop_id == 0) {
+       if (mvpp2_port_supports_xlg(port)) {
                ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG);
                ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
 
@@ -3443,7 +3981,7 @@ static void mvpp22_mode_reconfigure(struct mvpp2_port *port)
                writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG);
        }
 
-       if (port->gop_id == 0 && mvpp2_is_xlg(port->phy_interface))
+       if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(port->phy_interface))
                mvpp2_xlg_max_rx_size_set(port);
        else
                mvpp2_gmac_max_rx_size_set(port);
@@ -3482,6 +4020,8 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
        }
 
        netif_tx_start_all_queues(port->dev);
+
+       clear_bit(0, &port->state);
 }
 
 /* Set hw internals when stopping port */
@@ -3489,6 +4029,8 @@ static void mvpp2_stop_dev(struct mvpp2_port *port)
 {
        int i;
 
+       set_bit(0, &port->state);
+
        /* Disable interrupts on all threads */
        mvpp2_interrupts_disable(port);
 
@@ -3895,6 +4437,10 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
        }
 
        if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) {
+               if (port->xdp_prog) {
+                       netdev_err(dev, "Jumbo frames are not supported with XDP\n");
+                       return -EINVAL;
+               }
                if (priv->percpu_pools) {
                        netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu);
                        mvpp2_bm_switch_buffers(priv, false);
@@ -3940,6 +4486,33 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
        return err;
 }
 
+static int mvpp2_check_pagepool_dma(struct mvpp2_port *port)
+{
+       enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
+       struct mvpp2 *priv = port->priv;
+       int err = -1, i;
+
+       if (!priv->percpu_pools)
+               return err;
+
+       if (!priv->page_pool[0])
+               return -ENOMEM;
+
+       for (i = 0; i < priv->port_count; i++) {
+               port = priv->port_list[i];
+               if (port->xdp_prog) {
+                       dma_dir = DMA_BIDIRECTIONAL;
+                       break;
+               }
+       }
+
+       /* All pools are equal in terms of DMA direction */
+       if (priv->page_pool[0]->p.dma_dir != dma_dir)
+               err = mvpp2_bm_switch_buffers(priv, true);
+
+       return err;
+}
+
 static void
 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
@@ -4033,6 +4606,64 @@ static int mvpp2_set_features(struct net_device *dev,
        return 0;
 }
 
+static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf)
+{
+       struct bpf_prog *prog = bpf->prog, *old_prog;
+       bool running = netif_running(port->dev);
+       bool reset = !prog != !port->xdp_prog;
+
+       if (port->dev->mtu > ETH_DATA_LEN) {
+               NL_SET_ERR_MSG_MOD(bpf->extack, "XDP is not supported with jumbo frames enabled");
+               return -EOPNOTSUPP;
+       }
+
+       if (!port->priv->percpu_pools) {
+               NL_SET_ERR_MSG_MOD(bpf->extack, "Per CPU Pools required for XDP");
+               return -EOPNOTSUPP;
+       }
+
+       if (port->ntxqs < num_possible_cpus() * 2) {
+               NL_SET_ERR_MSG_MOD(bpf->extack, "XDP_TX needs two TX queues per CPU");
+               return -EOPNOTSUPP;
+       }
+
+       /* device is up and bpf is added/removed, must setup the RX queues */
+       if (running && reset)
+               mvpp2_stop(port->dev);
+
+       old_prog = xchg(&port->xdp_prog, prog);
+       if (old_prog)
+               bpf_prog_put(old_prog);
+
+       /* bpf is just replaced, RXQ and MTU are already setup */
+       if (!reset)
+               return 0;
+
+       /* device was up, restore the link */
+       if (running)
+               mvpp2_open(port->dev);
+
+       /* Check Page Pool DMA Direction */
+       mvpp2_check_pagepool_dma(port);
+
+       return 0;
+}
+
+static int mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+
+       switch (xdp->command) {
+       case XDP_SETUP_PROG:
+               return mvpp2_xdp_setup(port, xdp);
+       case XDP_QUERY_PROG:
+               xdp->prog_id = port->xdp_prog ? port->xdp_prog->aux->id : 0;
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
 /* Ethtool methods */
 
 static int mvpp2_ethtool_nway_reset(struct net_device *dev)
@@ -4383,6 +5014,8 @@ static const struct net_device_ops mvpp2_netdev_ops = {
        .ndo_vlan_rx_add_vid    = mvpp2_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = mvpp2_vlan_rx_kill_vid,
        .ndo_set_features       = mvpp2_set_features,
+       .ndo_bpf                = mvpp2_xdp,
+       .ndo_xdp_xmit           = mvpp2_xdp_xmit,
 };
 
 static const struct ethtool_ops mvpp2_eth_tool_ops = {
@@ -4756,26 +5389,30 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
        eth_hw_addr_random(dev);
 }
 
+static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
+{
+       return container_of(config, struct mvpp2_port, phylink_config);
+}
+
 static void mvpp2_phylink_validate(struct phylink_config *config,
                                   unsigned long *supported,
                                   struct phylink_link_state *state)
 {
-       struct mvpp2_port *port = container_of(config, struct mvpp2_port,
-                                              phylink_config);
+       struct mvpp2_port *port = mvpp2_phylink_to_port(config);
        __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
 
        /* Invalid combinations */
        switch (state->interface) {
        case PHY_INTERFACE_MODE_10GBASER:
        case PHY_INTERFACE_MODE_XAUI:
-               if (port->gop_id != 0)
+               if (!mvpp2_port_supports_xlg(port))
                        goto empty_set;
                break;
        case PHY_INTERFACE_MODE_RGMII:
        case PHY_INTERFACE_MODE_RGMII_ID:
        case PHY_INTERFACE_MODE_RGMII_RXID:
        case PHY_INTERFACE_MODE_RGMII_TXID:
-               if (port->priv->hw_version == MVPP22 && port->gop_id == 0)
+               if (!mvpp2_port_supports_rgmii(port))
                        goto empty_set;
                break;
        default:
@@ -4791,7 +5428,7 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
        case PHY_INTERFACE_MODE_10GBASER:
        case PHY_INTERFACE_MODE_XAUI:
        case PHY_INTERFACE_MODE_NA:
-               if (port->gop_id == 0) {
+               if (mvpp2_port_supports_xlg(port)) {
                        phylink_set(mask, 10000baseT_Full);
                        phylink_set(mask, 10000baseCR_Full);
                        phylink_set(mask, 10000baseSR_Full);
@@ -4902,8 +5539,7 @@ static void mvpp2_gmac_pcs_get_state(struct mvpp2_port *port,
 static void mvpp2_phylink_mac_pcs_get_state(struct phylink_config *config,
                                            struct phylink_link_state *state)
 {
-       struct mvpp2_port *port = container_of(config, struct mvpp2_port,
-                                              phylink_config);
+       struct mvpp2_port *port = mvpp2_phylink_to_port(config);
 
        if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
                u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG);
@@ -4920,8 +5556,7 @@ static void mvpp2_phylink_mac_pcs_get_state(struct phylink_config *config,
 
 static void mvpp2_mac_an_restart(struct phylink_config *config)
 {
-       struct mvpp2_port *port = container_of(config, struct mvpp2_port,
-                                              phylink_config);
+       struct mvpp2_port *port = mvpp2_phylink_to_port(config);
        u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
 
        writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
@@ -4933,38 +5568,21 @@ static void mvpp2_mac_an_restart(struct phylink_config *config)
 static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
                             const struct phylink_link_state *state)
 {
-       u32 old_ctrl0, ctrl0;
-       u32 old_ctrl4, ctrl4;
-
-       old_ctrl0 = ctrl0 = readl(port->base + MVPP22_XLG_CTRL0_REG);
-       old_ctrl4 = ctrl4 = readl(port->base + MVPP22_XLG_CTRL4_REG);
-
-       ctrl0 |= MVPP22_XLG_CTRL0_MAC_RESET_DIS;
-
-       if (state->pause & MLO_PAUSE_TX)
-               ctrl0 |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
-       else
-               ctrl0 &= ~MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
-
-       if (state->pause & MLO_PAUSE_RX)
-               ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
-       else
-               ctrl0 &= ~MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
+       u32 val;
 
-       ctrl4 &= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
-                  MVPP22_XLG_CTRL4_EN_IDLE_CHECK);
-       ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
+       mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
+                    MVPP22_XLG_CTRL0_MAC_RESET_DIS,
+                    MVPP22_XLG_CTRL0_MAC_RESET_DIS);
+       mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG,
+                    MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
+                    MVPP22_XLG_CTRL4_EN_IDLE_CHECK |
+                    MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC,
+                    MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC);
 
-       if (old_ctrl0 != ctrl0)
-               writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG);
-       if (old_ctrl4 != ctrl4)
-               writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG);
-
-       if (!(old_ctrl0 & MVPP22_XLG_CTRL0_MAC_RESET_DIS)) {
-               while (!(readl(port->base + MVPP22_XLG_CTRL0_REG) &
-                        MVPP22_XLG_CTRL0_MAC_RESET_DIS))
-                       continue;
-       }
+       /* Wait for reset to deassert */
+       do {
+               val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+       } while (!(val & MVPP22_XLG_CTRL0_MAC_RESET_DIS));
 }
 
 static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
@@ -5094,13 +5712,12 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
 static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
                             const struct phylink_link_state *state)
 {
-       struct net_device *dev = to_net_dev(config->dev);
-       struct mvpp2_port *port = netdev_priv(dev);
+       struct mvpp2_port *port = mvpp2_phylink_to_port(config);
        bool change_interface = port->phy_interface != state->interface;
 
        /* Check for invalid configuration */
        if (mvpp2_is_xlg(state->interface) && port->gop_id != 0) {
-               netdev_err(dev, "Invalid mode on %s\n", dev->name);
+               netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name);
                return;
        }
 
@@ -5140,25 +5757,26 @@ static void mvpp2_mac_link_up(struct phylink_config *config,
                              int speed, int duplex,
                              bool tx_pause, bool rx_pause)
 {
-       struct net_device *dev = to_net_dev(config->dev);
-       struct mvpp2_port *port = netdev_priv(dev);
+       struct mvpp2_port *port = mvpp2_phylink_to_port(config);
        u32 val;
 
        if (mvpp2_is_xlg(interface)) {
                if (!phylink_autoneg_inband(mode)) {
-                       val = readl(port->base + MVPP22_XLG_CTRL0_REG);
-                       val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
-                       val |= MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
-                       writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+                       val = MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
+                       if (tx_pause)
+                               val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
+                       if (rx_pause)
+                               val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
+
+                       mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
+                                    MVPP22_XLG_CTRL0_FORCE_LINK_DOWN |
+                                    MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
+                                    MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN |
+                                    MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, val);
                }
        } else {
                if (!phylink_autoneg_inband(mode)) {
-                       val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-                       val &= ~(MVPP2_GMAC_FORCE_LINK_DOWN |
-                                MVPP2_GMAC_CONFIG_MII_SPEED |
-                                MVPP2_GMAC_CONFIG_GMII_SPEED |
-                                MVPP2_GMAC_CONFIG_FULL_DUPLEX);
-                       val |= MVPP2_GMAC_FORCE_LINK_PASS;
+                       val = MVPP2_GMAC_FORCE_LINK_PASS;
 
                        if (speed == SPEED_1000 || speed == SPEED_2500)
                                val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
@@ -5168,34 +5786,40 @@ static void mvpp2_mac_link_up(struct phylink_config *config,
                        if (duplex == DUPLEX_FULL)
                                val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
 
-                       writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+                       mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
+                                    MVPP2_GMAC_FORCE_LINK_DOWN |
+                                    MVPP2_GMAC_FORCE_LINK_PASS |
+                                    MVPP2_GMAC_CONFIG_MII_SPEED |
+                                    MVPP2_GMAC_CONFIG_GMII_SPEED |
+                                    MVPP2_GMAC_CONFIG_FULL_DUPLEX, val);
                }
 
                /* We can always update the flow control enable bits;
                 * these will only be effective if flow control AN
                 * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled.
                 */
-               val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
-               val &= ~(MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN);
+               val = 0;
                if (tx_pause)
                        val |= MVPP22_CTRL4_TX_FC_EN;
                if (rx_pause)
                        val |= MVPP22_CTRL4_RX_FC_EN;
-               writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
+
+               mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG,
+                            MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN,
+                            val);
        }
 
        mvpp2_port_enable(port);
 
        mvpp2_egress_enable(port);
        mvpp2_ingress_enable(port);
-       netif_tx_wake_all_queues(dev);
+       netif_tx_wake_all_queues(port->dev);
 }
 
 static void mvpp2_mac_link_down(struct phylink_config *config,
                                unsigned int mode, phy_interface_t interface)
 {
-       struct net_device *dev = to_net_dev(config->dev);
-       struct mvpp2_port *port = netdev_priv(dev);
+       struct mvpp2_port *port = mvpp2_phylink_to_port(config);
        u32 val;
 
        if (!phylink_autoneg_inband(mode)) {
@@ -5212,7 +5836,7 @@ static void mvpp2_mac_link_down(struct phylink_config *config,
                }
        }
 
-       netif_tx_stop_all_queues(dev);
+       netif_tx_stop_all_queues(port->dev);
        mvpp2_egress_disable(port);
        mvpp2_ingress_disable(port);
 
@@ -5983,8 +6607,8 @@ static int mvpp2_remove(struct platform_device *pdev)
 {
        struct mvpp2 *priv = platform_get_drvdata(pdev);
        struct fwnode_handle *fwnode = pdev->dev.fwnode;
+       int i = 0, poolnum = MVPP2_BM_POOLS_NUM;
        struct fwnode_handle *port_fwnode;
-       int i = 0;
 
        mvpp2_dbgfs_cleanup(priv);
 
@@ -5998,7 +6622,10 @@ static int mvpp2_remove(struct platform_device *pdev)
 
        destroy_workqueue(priv->stats_queue);
 
-       for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
+       if (priv->percpu_pools)
+               poolnum = mvpp2_get_nrxqs(priv) * 2;
+
+       for (i = 0; i < poolnum; i++) {
                struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
 
                mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool);
index cd33c2e..f48eb66 100644 (file)
@@ -43,7 +43,7 @@ struct qmem {
        void            *base;
        dma_addr_t      iova;
        int             alloc_sz;
-       u             entry_sz;
+       u16             entry_sz;
        u8              align;
        u32             qsize;
 };
index b04f542..3a5b34a 100644 (file)
@@ -619,13 +619,14 @@ static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
                               struct sk_buff *skb, u16 qidx)
 {
        struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx);
-       int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
-       int tcp_data, seg_len, pkt_len, offset;
+       int hdr_len, tcp_data, seg_len, pkt_len, offset;
        struct nix_sqe_hdr_s *sqe_hdr;
        int first_sqe = sq->head;
        struct sg_list list;
        struct tso_t tso;
 
+       hdr_len = tso_start(skb, &tso);
+
        /* Map SKB's fragments to DMA.
         * It's done here to avoid mapping for every TSO segment's packet.
         */
@@ -636,7 +637,6 @@ static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
 
        netdev_tx_sent_queue(txq, skb->len);
 
-       tso_start(skb, &tso);
        tcp_data = skb->len - hdr_len;
        while (tcp_data > 0) {
                char *hdr;
index 3c89206..b792f63 100644 (file)
@@ -939,10 +939,10 @@ static int skge_rx_setup(struct skge_port *skge, struct skge_element *e,
        struct skge_rx_desc *rd = e->desc;
        dma_addr_t map;
 
-       map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
-                            PCI_DMA_FROMDEVICE);
+       map = dma_map_single(&skge->hw->pdev->dev, skb->data, bufsize,
+                            DMA_FROM_DEVICE);
 
-       if (pci_dma_mapping_error(skge->hw->pdev, map))
+       if (dma_mapping_error(&skge->hw->pdev->dev, map))
                return -1;
 
        rd->dma_lo = lower_32_bits(map);
@@ -990,10 +990,10 @@ static void skge_rx_clean(struct skge_port *skge)
                struct skge_rx_desc *rd = e->desc;
                rd->control = 0;
                if (e->skb) {
-                       pci_unmap_single(hw->pdev,
+                       dma_unmap_single(&hw->pdev->dev,
                                         dma_unmap_addr(e, mapaddr),
                                         dma_unmap_len(e, maplen),
-                                        PCI_DMA_FROMDEVICE);
+                                        DMA_FROM_DEVICE);
                        dev_kfree_skb(e->skb);
                        e->skb = NULL;
                }
@@ -2547,14 +2547,15 @@ static int skge_up(struct net_device *dev)
        rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc);
        tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc);
        skge->mem_size = tx_size + rx_size;
-       skge->mem = pci_alloc_consistent(hw->pdev, skge->mem_size, &skge->dma);
+       skge->mem = dma_alloc_coherent(&hw->pdev->dev, skge->mem_size,
+                                      &skge->dma, GFP_KERNEL);
        if (!skge->mem)
                return -ENOMEM;
 
        BUG_ON(skge->dma & 7);
 
        if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) {
-               dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n");
+               dev_err(&hw->pdev->dev, "dma_alloc_coherent region crosses 4G boundary\n");
                err = -EINVAL;
                goto free_pci_mem;
        }
@@ -2625,7 +2626,8 @@ static int skge_up(struct net_device *dev)
        skge_rx_clean(skge);
        kfree(skge->rx_ring.start);
  free_pci_mem:
-       pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma);
+       dma_free_coherent(&hw->pdev->dev, skge->mem_size, skge->mem,
+                         skge->dma);
        skge->mem = NULL;
 
        return err;
@@ -2715,7 +2717,8 @@ static int skge_down(struct net_device *dev)
 
        kfree(skge->rx_ring.start);
        kfree(skge->tx_ring.start);
-       pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma);
+       dma_free_coherent(&hw->pdev->dev, skge->mem_size, skge->mem,
+                         skge->dma);
        skge->mem = NULL;
        return 0;
 }
@@ -2749,8 +2752,8 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
        BUG_ON(td->control & BMU_OWN);
        e->skb = skb;
        len = skb_headlen(skb);
-       map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
-       if (pci_dma_mapping_error(hw->pdev, map))
+       map = dma_map_single(&hw->pdev->dev, skb->data, len, DMA_TO_DEVICE);
+       if (dma_mapping_error(&hw->pdev->dev, map))
                goto mapping_error;
 
        dma_unmap_addr_set(e, mapaddr, map);
@@ -2830,16 +2833,12 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
 
 mapping_unwind:
        e = skge->tx_ring.to_use;
-       pci_unmap_single(hw->pdev,
-                        dma_unmap_addr(e, mapaddr),
-                        dma_unmap_len(e, maplen),
-                        PCI_DMA_TODEVICE);
+       dma_unmap_single(&hw->pdev->dev, dma_unmap_addr(e, mapaddr),
+                        dma_unmap_len(e, maplen), DMA_TO_DEVICE);
        while (i-- > 0) {
                e = e->next;
-               pci_unmap_page(hw->pdev,
-                              dma_unmap_addr(e, mapaddr),
-                              dma_unmap_len(e, maplen),
-                              PCI_DMA_TODEVICE);
+               dma_unmap_page(&hw->pdev->dev, dma_unmap_addr(e, mapaddr),
+                              dma_unmap_len(e, maplen), DMA_TO_DEVICE);
        }
 
 mapping_error:
@@ -2856,13 +2855,11 @@ static inline void skge_tx_unmap(struct pci_dev *pdev, struct skge_element *e,
 {
        /* skb header vs. fragment */
        if (control & BMU_STF)
-               pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr),
-                                dma_unmap_len(e, maplen),
-                                PCI_DMA_TODEVICE);
+               dma_unmap_single(&pdev->dev, dma_unmap_addr(e, mapaddr),
+                                dma_unmap_len(e, maplen), DMA_TO_DEVICE);
        else
-               pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr),
-                              dma_unmap_len(e, maplen),
-                              PCI_DMA_TODEVICE);
+               dma_unmap_page(&pdev->dev, dma_unmap_addr(e, mapaddr),
+                              dma_unmap_len(e, maplen), DMA_TO_DEVICE);
 }
 
 /* Free all buffers in transmit ring */
@@ -3072,15 +3069,15 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
                if (!skb)
                        goto resubmit;
 
-               pci_dma_sync_single_for_cpu(skge->hw->pdev,
-                                           dma_unmap_addr(e, mapaddr),
-                                           dma_unmap_len(e, maplen),
-                                           PCI_DMA_FROMDEVICE);
+               dma_sync_single_for_cpu(&skge->hw->pdev->dev,
+                                       dma_unmap_addr(e, mapaddr),
+                                       dma_unmap_len(e, maplen),
+                                       DMA_FROM_DEVICE);
                skb_copy_from_linear_data(e->skb, skb->data, len);
-               pci_dma_sync_single_for_device(skge->hw->pdev,
-                                              dma_unmap_addr(e, mapaddr),
-                                              dma_unmap_len(e, maplen),
-                                              PCI_DMA_FROMDEVICE);
+               dma_sync_single_for_device(&skge->hw->pdev->dev,
+                                          dma_unmap_addr(e, mapaddr),
+                                          dma_unmap_len(e, maplen),
+                                          DMA_FROM_DEVICE);
                skge_rx_reuse(e, skge->rx_buf_size);
        } else {
                struct skge_element ee;
@@ -3100,10 +3097,9 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
                        goto resubmit;
                }
 
-               pci_unmap_single(skge->hw->pdev,
+               dma_unmap_single(&skge->hw->pdev->dev,
                                 dma_unmap_addr(&ee, mapaddr),
-                                dma_unmap_len(&ee, maplen),
-                                PCI_DMA_FROMDEVICE);
+                                dma_unmap_len(&ee, maplen), DMA_FROM_DEVICE);
        }
 
        skb_put(skb, len);
@@ -3895,12 +3891,12 @@ static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        pci_set_master(pdev);
 
-       if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+       if (!only_32bit_dma && !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
                using_dac = 1;
-               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-       } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+               err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+       } else if (!(err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) {
                using_dac = 0;
-               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+               err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
        }
 
        if (err) {
index 241f007..cec8124 100644 (file)
@@ -203,7 +203,7 @@ io_error:
 
 static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
 {
-       u16 v;
+       u16 v = 0;
        __gm_phy_read(hw, port, reg, &v);
        return v;
 }
@@ -1209,8 +1209,9 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
        struct sk_buff *skb = re->skb;
        int i;
 
-       re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE);
-       if (pci_dma_mapping_error(pdev, re->data_addr))
+       re->data_addr = dma_map_single(&pdev->dev, skb->data, size,
+                                      DMA_FROM_DEVICE);
+       if (dma_mapping_error(&pdev->dev, re->data_addr))
                goto mapping_error;
 
        dma_unmap_len_set(re, data_size, size);
@@ -1229,13 +1230,13 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
 
 map_page_error:
        while (--i >= 0) {
-               pci_unmap_page(pdev, re->frag_addr[i],
+               dma_unmap_page(&pdev->dev, re->frag_addr[i],
                               skb_frag_size(&skb_shinfo(skb)->frags[i]),
-                              PCI_DMA_FROMDEVICE);
+                              DMA_FROM_DEVICE);
        }
 
-       pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size),
-                        PCI_DMA_FROMDEVICE);
+       dma_unmap_single(&pdev->dev, re->data_addr,
+                        dma_unmap_len(re, data_size), DMA_FROM_DEVICE);
 
 mapping_error:
        if (net_ratelimit())
@@ -1249,13 +1250,13 @@ static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
        struct sk_buff *skb = re->skb;
        int i;
 
-       pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size),
-                        PCI_DMA_FROMDEVICE);
+       dma_unmap_single(&pdev->dev, re->data_addr,
+                        dma_unmap_len(re, data_size), DMA_FROM_DEVICE);
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
-               pci_unmap_page(pdev, re->frag_addr[i],
+               dma_unmap_page(&pdev->dev, re->frag_addr[i],
                               skb_frag_size(&skb_shinfo(skb)->frags[i]),
-                              PCI_DMA_FROMDEVICE);
+                              DMA_FROM_DEVICE);
 }
 
 /* Tell chip where to start receive checksum.
@@ -1592,10 +1593,9 @@ static int sky2_alloc_buffers(struct sky2_port *sky2)
        struct sky2_hw *hw = sky2->hw;
 
        /* must be power of 2 */
-       sky2->tx_le = pci_alloc_consistent(hw->pdev,
-                                          sky2->tx_ring_size *
-                                          sizeof(struct sky2_tx_le),
-                                          &sky2->tx_le_map);
+       sky2->tx_le = dma_alloc_coherent(&hw->pdev->dev,
+                                        sky2->tx_ring_size * sizeof(struct sky2_tx_le),
+                                        &sky2->tx_le_map, GFP_KERNEL);
        if (!sky2->tx_le)
                goto nomem;
 
@@ -1604,8 +1604,8 @@ static int sky2_alloc_buffers(struct sky2_port *sky2)
        if (!sky2->tx_ring)
                goto nomem;
 
-       sky2->rx_le = pci_zalloc_consistent(hw->pdev, RX_LE_BYTES,
-                                           &sky2->rx_le_map);
+       sky2->rx_le = dma_alloc_coherent(&hw->pdev->dev, RX_LE_BYTES,
+                                        &sky2->rx_le_map, GFP_KERNEL);
        if (!sky2->rx_le)
                goto nomem;
 
@@ -1626,14 +1626,14 @@ static void sky2_free_buffers(struct sky2_port *sky2)
        sky2_rx_clean(sky2);
 
        if (sky2->rx_le) {
-               pci_free_consistent(hw->pdev, RX_LE_BYTES,
-                                   sky2->rx_le, sky2->rx_le_map);
+               dma_free_coherent(&hw->pdev->dev, RX_LE_BYTES, sky2->rx_le,
+                                 sky2->rx_le_map);
                sky2->rx_le = NULL;
        }
        if (sky2->tx_le) {
-               pci_free_consistent(hw->pdev,
-                                   sky2->tx_ring_size * sizeof(struct sky2_tx_le),
-                                   sky2->tx_le, sky2->tx_le_map);
+               dma_free_coherent(&hw->pdev->dev,
+                                 sky2->tx_ring_size * sizeof(struct sky2_tx_le),
+                                 sky2->tx_le, sky2->tx_le_map);
                sky2->tx_le = NULL;
        }
        kfree(sky2->tx_ring);
@@ -1806,13 +1806,11 @@ static unsigned tx_le_req(const struct sk_buff *skb)
 static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
 {
        if (re->flags & TX_MAP_SINGLE)
-               pci_unmap_single(pdev, dma_unmap_addr(re, mapaddr),
-                                dma_unmap_len(re, maplen),
-                                PCI_DMA_TODEVICE);
+               dma_unmap_single(&pdev->dev, dma_unmap_addr(re, mapaddr),
+                                dma_unmap_len(re, maplen), DMA_TO_DEVICE);
        else if (re->flags & TX_MAP_PAGE)
-               pci_unmap_page(pdev, dma_unmap_addr(re, mapaddr),
-                              dma_unmap_len(re, maplen),
-                              PCI_DMA_TODEVICE);
+               dma_unmap_page(&pdev->dev, dma_unmap_addr(re, mapaddr),
+                              dma_unmap_len(re, maplen), DMA_TO_DEVICE);
        re->flags = 0;
 }
 
@@ -1840,9 +1838,10 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
                return NETDEV_TX_BUSY;
 
        len = skb_headlen(skb);
-       mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
+       mapping = dma_map_single(&hw->pdev->dev, skb->data, len,
+                                DMA_TO_DEVICE);
 
-       if (pci_dma_mapping_error(hw->pdev, mapping))
+       if (dma_mapping_error(&hw->pdev->dev, mapping))
                goto mapping_error;
 
        slot = sky2->tx_prod;
@@ -2464,16 +2463,17 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
 
        skb = netdev_alloc_skb_ip_align(sky2->netdev, length);
        if (likely(skb)) {
-               pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr,
-                                           length, PCI_DMA_FROMDEVICE);
+               dma_sync_single_for_cpu(&sky2->hw->pdev->dev, re->data_addr,
+                                       length, DMA_FROM_DEVICE);
                skb_copy_from_linear_data(re->skb, skb->data, length);
                skb->ip_summed = re->skb->ip_summed;
                skb->csum = re->skb->csum;
                skb_copy_hash(skb, re->skb);
                __vlan_hwaccel_copy_tag(skb, re->skb);
 
-               pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
-                                              length, PCI_DMA_FROMDEVICE);
+               dma_sync_single_for_device(&sky2->hw->pdev->dev,
+                                          re->data_addr, length,
+                                          DMA_FROM_DEVICE);
                __vlan_hwaccel_clear_tag(re->skb);
                skb_clear_hash(re->skb);
                re->skb->ip_summed = CHECKSUM_NONE;
@@ -4985,16 +4985,16 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        pci_set_master(pdev);
 
        if (sizeof(dma_addr_t) > sizeof(u32) &&
-           !(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))) {
+           !(err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))) {
                using_dac = 1;
-               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+               err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
                if (err < 0) {
                        dev_err(&pdev->dev, "unable to obtain 64 bit DMA "
                                "for consistent allocations\n");
                        goto err_out_free_regions;
                }
        } else {
-               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
                        dev_err(&pdev->dev, "no usable DMA configuration\n");
                        goto err_out_free_regions;
@@ -5038,8 +5038,9 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        /* ring for status responses */
        hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING);
-       hw->st_le = pci_alloc_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
-                                        &hw->st_dma);
+       hw->st_le = dma_alloc_coherent(&pdev->dev,
+                                      hw->st_size * sizeof(struct sky2_status_le),
+                                      &hw->st_dma, GFP_KERNEL);
        if (!hw->st_le) {
                err = -ENOMEM;
                goto err_out_reset;
@@ -5119,8 +5120,9 @@ err_out_free_netdev:
                pci_disable_msi(pdev);
        free_netdev(dev);
 err_out_free_pci:
-       pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
-                           hw->st_le, hw->st_dma);
+       dma_free_coherent(&pdev->dev,
+                         hw->st_size * sizeof(struct sky2_status_le),
+                         hw->st_le, hw->st_dma);
 err_out_reset:
        sky2_write8(hw, B0_CTST, CS_RST_SET);
 err_out_iounmap:
@@ -5164,8 +5166,9 @@ static void sky2_remove(struct pci_dev *pdev)
 
        if (hw->flags & SKY2_HW_USE_MSI)
                pci_disable_msi(pdev);
-       pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
-                           hw->st_le, hw->st_dma);
+       dma_free_coherent(&pdev->dev,
+                         hw->st_size * sizeof(struct sky2_status_le),
+                         hw->st_le, hw->st_dma);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
 
index f6a1f86..20db302 100644 (file)
@@ -344,29 +344,9 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
        /* Setup gmac */
        mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
        mcr_new = mcr_cur;
-       mcr_new &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
-                    MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
-                    MAC_MCR_FORCE_RX_FC);
        mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
                   MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
 
-       switch (state->speed) {
-       case SPEED_2500:
-       case SPEED_1000:
-               mcr_new |= MAC_MCR_SPEED_1000;
-               break;
-       case SPEED_100:
-               mcr_new |= MAC_MCR_SPEED_100;
-               break;
-       }
-       if (state->duplex == DUPLEX_FULL) {
-               mcr_new |= MAC_MCR_FORCE_DPX;
-               if (state->pause & MLO_PAUSE_TX)
-                       mcr_new |= MAC_MCR_FORCE_TX_FC;
-               if (state->pause & MLO_PAUSE_RX)
-                       mcr_new |= MAC_MCR_FORCE_RX_FC;
-       }
-
        /* Only update control register when needed! */
        if (mcr_new != mcr_cur)
                mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
@@ -443,6 +423,31 @@ static void mtk_mac_link_up(struct phylink_config *config,
                                           phylink_config);
        u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
 
+       mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
+                MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
+                MAC_MCR_FORCE_RX_FC);
+
+       /* Configure speed */
+       switch (speed) {
+       case SPEED_2500:
+       case SPEED_1000:
+               mcr |= MAC_MCR_SPEED_1000;
+               break;
+       case SPEED_100:
+               mcr |= MAC_MCR_SPEED_100;
+               break;
+       }
+
+       /* Configure duplex */
+       if (duplex == DUPLEX_FULL)
+               mcr |= MAC_MCR_FORCE_DPX;
+
+       /* Configure pause modes - phylink will avoid these for half duplex */
+       if (tx_pause)
+               mcr |= MAC_MCR_FORCE_TX_FC;
+       if (rx_pause)
+               mcr |= MAC_MCR_FORCE_RX_FC;
+
        mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
        mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
 }
index f1ace4f..1325055 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/regmap.h>
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
-#include <linux/workqueue.h>
 
 #define MTK_STAR_DRVNAME                       "mtk_star_emac"
 
@@ -262,7 +261,6 @@ struct mtk_star_priv {
        spinlock_t lock;
 
        struct rtnl_link_stats64 stats;
-       struct work_struct stats_work;
 };
 
 static struct device *mtk_star_get_dev(struct mtk_star_priv *priv)
@@ -432,42 +430,6 @@ static void mtk_star_intr_disable(struct mtk_star_priv *priv)
        regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0);
 }
 
-static void mtk_star_intr_enable_tx(struct mtk_star_priv *priv)
-{
-       regmap_clear_bits(priv->regs, MTK_STAR_REG_INT_MASK,
-                         MTK_STAR_BIT_INT_STS_TNTC);
-}
-
-static void mtk_star_intr_enable_rx(struct mtk_star_priv *priv)
-{
-       regmap_clear_bits(priv->regs, MTK_STAR_REG_INT_MASK,
-                         MTK_STAR_BIT_INT_STS_FNRC);
-}
-
-static void mtk_star_intr_enable_stats(struct mtk_star_priv *priv)
-{
-       regmap_clear_bits(priv->regs, MTK_STAR_REG_INT_MASK,
-                         MTK_STAR_REG_INT_STS_MIB_CNT_TH);
-}
-
-static void mtk_star_intr_disable_tx(struct mtk_star_priv *priv)
-{
-       regmap_set_bits(priv->regs, MTK_STAR_REG_INT_MASK,
-                       MTK_STAR_BIT_INT_STS_TNTC);
-}
-
-static void mtk_star_intr_disable_rx(struct mtk_star_priv *priv)
-{
-       regmap_set_bits(priv->regs, MTK_STAR_REG_INT_MASK,
-                       MTK_STAR_BIT_INT_STS_FNRC);
-}
-
-static void mtk_star_intr_disable_stats(struct mtk_star_priv *priv)
-{
-       regmap_set_bits(priv->regs, MTK_STAR_REG_INT_MASK,
-                       MTK_STAR_REG_INT_STS_MIB_CNT_TH);
-}
-
 static unsigned int mtk_star_intr_read(struct mtk_star_priv *priv)
 {
        unsigned int val;
@@ -663,20 +625,6 @@ static void mtk_star_update_stats(struct mtk_star_priv *priv)
        stats->rx_errors += stats->rx_fifo_errors;
 }
 
-/* This runs in process context and parallel TX and RX paths executing in
- * napi context may result in losing some stats data but this should happen
- * seldom enough to be acceptable.
- */
-static void mtk_star_update_stats_work(struct work_struct *work)
-{
-       struct mtk_star_priv *priv = container_of(work, struct mtk_star_priv,
-                                                stats_work);
-
-       mtk_star_update_stats(priv);
-       mtk_star_reset_counters(priv);
-       mtk_star_intr_enable_stats(priv);
-}
-
 static struct sk_buff *mtk_star_alloc_skb(struct net_device *ndev)
 {
        uintptr_t tail, offset;
@@ -767,42 +715,25 @@ static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv)
        mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx);
 }
 
-/* All processing for TX and RX happens in the napi poll callback. */
+/* All processing for TX and RX happens in the napi poll callback.
+ *
+ * FIXME: The interrupt handling should be more fine-grained with each
+ * interrupt enabled/disabled independently when needed. Unfortunatly this
+ * turned out to impact the driver's stability and until we have something
+ * working properly, we're disabling all interrupts during TX & RX processing
+ * or when resetting the counter registers.
+ */
 static irqreturn_t mtk_star_handle_irq(int irq, void *data)
 {
        struct mtk_star_priv *priv;
        struct net_device *ndev;
-       bool need_napi = false;
-       unsigned int status;
 
        ndev = data;
        priv = netdev_priv(ndev);
 
        if (netif_running(ndev)) {
-               status = mtk_star_intr_read(priv);
-
-               if (status & MTK_STAR_BIT_INT_STS_TNTC) {
-                       mtk_star_intr_disable_tx(priv);
-                       need_napi = true;
-               }
-
-               if (status & MTK_STAR_BIT_INT_STS_FNRC) {
-                       mtk_star_intr_disable_rx(priv);
-                       need_napi = true;
-               }
-
-               if (need_napi)
-                       napi_schedule(&priv->napi);
-
-               /* One of the counters reached 0x8000000 - update stats and
-                * reset all counters.
-                */
-               if (unlikely(status & MTK_STAR_REG_INT_STS_MIB_CNT_TH)) {
-                       mtk_star_intr_disable_stats(priv);
-                       schedule_work(&priv->stats_work);
-               }
-
-               mtk_star_intr_ack_all(priv);
+               mtk_star_intr_disable(priv);
+               napi_schedule(&priv->napi);
        }
 
        return IRQ_HANDLED;
@@ -1169,8 +1100,6 @@ static void mtk_star_tx_complete_all(struct mtk_star_priv *priv)
        if (wake && netif_queue_stopped(ndev))
                netif_wake_queue(ndev);
 
-       mtk_star_intr_enable_tx(priv);
-
        spin_unlock(&priv->lock);
 }
 
@@ -1332,20 +1261,32 @@ static int mtk_star_process_rx(struct mtk_star_priv *priv, int budget)
 static int mtk_star_poll(struct napi_struct *napi, int budget)
 {
        struct mtk_star_priv *priv;
+       unsigned int status;
        int received = 0;
 
        priv = container_of(napi, struct mtk_star_priv, napi);
 
-       /* Clean-up all TX descriptors. */
-       mtk_star_tx_complete_all(priv);
-       /* Receive up to $budget packets. */
-       received = mtk_star_process_rx(priv, budget);
+       status = mtk_star_intr_read(priv);
+       mtk_star_intr_ack_all(priv);
 
-       if (received < budget) {
-               napi_complete_done(napi, received);
-               mtk_star_intr_enable_rx(priv);
+       if (status & MTK_STAR_BIT_INT_STS_TNTC)
+               /* Clean-up all TX descriptors. */
+               mtk_star_tx_complete_all(priv);
+
+       if (status & MTK_STAR_BIT_INT_STS_FNRC)
+               /* Receive up to $budget packets. */
+               received = mtk_star_process_rx(priv, budget);
+
+       if (unlikely(status & MTK_STAR_REG_INT_STS_MIB_CNT_TH)) {
+               mtk_star_update_stats(priv);
+               mtk_star_reset_counters(priv);
        }
 
+       if (received < budget)
+               napi_complete_done(napi, received);
+
+       mtk_star_intr_enable(priv);
+
        return received;
 }
 
@@ -1448,7 +1389,7 @@ static int mtk_star_mdio_init(struct net_device *ndev)
        priv->mii->write = mtk_star_mdio_write;
        priv->mii->priv = priv;
 
-       ret = of_mdiobus_register(priv->mii, mdio_node);
+       ret = devm_of_mdiobus_register(dev, priv->mii, mdio_node);
 
 out_put_node:
        of_node_put(mdio_node);
@@ -1500,13 +1441,6 @@ static void mtk_star_clk_disable_unprepare(void *data)
        clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
 }
 
-static void mtk_star_mdiobus_unregister(void *data)
-{
-       struct mtk_star_priv *priv = data;
-
-       mdiobus_unregister(priv->mii);
-}
-
 static int mtk_star_probe(struct platform_device *pdev)
 {
        struct device_node *of_node;
@@ -1532,7 +1466,6 @@ static int mtk_star_probe(struct platform_device *pdev)
        ndev->max_mtu = MTK_STAR_MAX_FRAME_SIZE;
 
        spin_lock_init(&priv->lock);
-       INIT_WORK(&priv->stats_work, mtk_star_update_stats_work);
 
        base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(base))
@@ -1609,10 +1542,6 @@ static int mtk_star_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       ret = devm_add_action_or_reset(dev, mtk_star_mdiobus_unregister, priv);
-       if (ret)
-               return ret;
-
        ret = eth_platform_get_mac_address(dev, ndev->dev_addr);
        if (ret || !is_valid_ether_addr(ndev->dev_addr))
                eth_hw_addr_random(ndev);
index 5bd3cd3..2b8608f 100644 (file)
@@ -1816,7 +1816,7 @@ int mlx4_en_start_port(struct net_device *dev)
        queue_work(mdev->workqueue, &priv->rx_mode_task);
 
        if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
-               udp_tunnel_get_rx_info(dev);
+               udp_tunnel_nic_reset_ntf(dev);
 
        priv->port_up = true;
 
@@ -2628,89 +2628,32 @@ static int mlx4_en_get_phys_port_id(struct net_device *dev,
        return 0;
 }
 
-static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
+static int mlx4_udp_tunnel_sync(struct net_device *dev, unsigned int table)
 {
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct udp_tunnel_info ti;
        int ret;
-       struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
-                                                vxlan_add_task);
 
-       ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
-       if (ret)
-               goto out;
+       udp_tunnel_nic_get_port(dev, table, 0, &ti);
+       priv->vxlan_port = ti.port;
 
-       ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
-                                 VXLAN_STEER_BY_OUTER_MAC, 1);
-out:
-       if (ret) {
-               en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
-               return;
-       }
-}
-
-static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
-{
-       int ret;
-       struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
-                                                vxlan_del_task);
-       ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
-                                 VXLAN_STEER_BY_OUTER_MAC, 0);
+       ret = mlx4_config_vxlan_port(priv->mdev->dev, priv->vxlan_port);
        if (ret)
-               en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
+               return ret;
 
-       priv->vxlan_port = 0;
+       return mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
+                                  VXLAN_STEER_BY_OUTER_MAC,
+                                  !!priv->vxlan_port);
 }
 
-static void mlx4_en_add_vxlan_port(struct  net_device *dev,
-                                  struct udp_tunnel_info *ti)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       __be16 port = ti->port;
-       __be16 current_port;
-
-       if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
-               return;
-
-       if (ti->sa_family != AF_INET)
-               return;
-
-       if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
-               return;
-
-       current_port = priv->vxlan_port;
-       if (current_port && current_port != port) {
-               en_warn(priv, "vxlan port %d configured, can't add port %d\n",
-                       ntohs(current_port), ntohs(port));
-               return;
-       }
-
-       priv->vxlan_port = port;
-       queue_work(priv->mdev->workqueue, &priv->vxlan_add_task);
-}
-
-static void mlx4_en_del_vxlan_port(struct  net_device *dev,
-                                  struct udp_tunnel_info *ti)
-{
-       struct mlx4_en_priv *priv = netdev_priv(dev);
-       __be16 port = ti->port;
-       __be16 current_port;
-
-       if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
-               return;
-
-       if (ti->sa_family != AF_INET)
-               return;
-
-       if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
-               return;
-
-       current_port = priv->vxlan_port;
-       if (current_port != port) {
-               en_dbg(DRV, priv, "vxlan port %d isn't configured, ignoring\n", ntohs(port));
-               return;
-       }
-
-       queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
-}
+static const struct udp_tunnel_nic_info mlx4_udp_tunnels = {
+       .sync_table     = mlx4_udp_tunnel_sync,
+       .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
+                         UDP_TUNNEL_NIC_INFO_IPV4_ONLY,
+       .tables         = {
+               { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+       },
+};
 
 static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
                                                struct net_device *dev,
@@ -2914,8 +2857,8 @@ static const struct net_device_ops mlx4_netdev_ops = {
        .ndo_rx_flow_steer      = mlx4_en_filter_rfs,
 #endif
        .ndo_get_phys_port_id   = mlx4_en_get_phys_port_id,
-       .ndo_udp_tunnel_add     = mlx4_en_add_vxlan_port,
-       .ndo_udp_tunnel_del     = mlx4_en_del_vxlan_port,
+       .ndo_udp_tunnel_add     = udp_tunnel_nic_add_port,
+       .ndo_udp_tunnel_del     = udp_tunnel_nic_del_port,
        .ndo_features_check     = mlx4_en_features_check,
        .ndo_set_tx_maxrate     = mlx4_en_set_tx_maxrate,
        .ndo_bpf                = mlx4_xdp,
@@ -2948,8 +2891,8 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
        .ndo_rx_flow_steer      = mlx4_en_filter_rfs,
 #endif
        .ndo_get_phys_port_id   = mlx4_en_get_phys_port_id,
-       .ndo_udp_tunnel_add     = mlx4_en_add_vxlan_port,
-       .ndo_udp_tunnel_del     = mlx4_en_del_vxlan_port,
+       .ndo_udp_tunnel_add     = udp_tunnel_nic_add_port,
+       .ndo_udp_tunnel_del     = udp_tunnel_nic_del_port,
        .ndo_features_check     = mlx4_en_features_check,
        .ndo_set_tx_maxrate     = mlx4_en_set_tx_maxrate,
        .ndo_bpf                = mlx4_xdp,
@@ -3250,8 +3193,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
        INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
        INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
-       INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
-       INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
 #ifdef CONFIG_RFS_ACCEL
        INIT_LIST_HEAD(&priv->filters);
        spin_lock_init(&priv->filters_lock);
@@ -3406,6 +3347,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
                                       NETIF_F_GSO_UDP_TUNNEL |
                                       NETIF_F_GSO_UDP_TUNNEL_CSUM |
                                       NETIF_F_GSO_PARTIAL;
+
+               dev->udp_tunnel_nic_info = &mlx4_udp_tunnels;
        }
 
        dev->vlan_features = dev->hw_features;
index 3d9aa7d..954c22c 100644 (file)
@@ -4370,8 +4370,9 @@ static const struct pci_error_handlers mlx4_err_handler = {
        .resume         = mlx4_pci_resume,
 };
 
-static int mlx4_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused mlx4_suspend(struct device *dev_d)
 {
+       struct pci_dev *pdev = to_pci_dev(dev_d);
        struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
        struct mlx4_dev *dev = persist->dev;
 
@@ -4384,8 +4385,9 @@ static int mlx4_suspend(struct pci_dev *pdev, pm_message_t state)
        return 0;
 }
 
-static int mlx4_resume(struct pci_dev *pdev)
+static int __maybe_unused mlx4_resume(struct device *dev_d)
 {
+       struct pci_dev *pdev = to_pci_dev(dev_d);
        struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
        struct mlx4_dev *dev = persist->dev;
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -4414,14 +4416,15 @@ static int mlx4_resume(struct pci_dev *pdev)
        return ret;
 }
 
+static SIMPLE_DEV_PM_OPS(mlx4_pm_ops, mlx4_suspend, mlx4_resume);
+
 static struct pci_driver mlx4_driver = {
        .name           = DRV_NAME,
        .id_table       = mlx4_pci_table,
        .probe          = mlx4_init_one,
        .shutdown       = mlx4_shutdown,
        .remove         = mlx4_remove_one,
-       .suspend        = mlx4_suspend,
-       .resume         = mlx4_resume,
+       .driver.pm      = &mlx4_pm_ops,
        .err_handler    = &mlx4_err_handler,
 };
 
index 9f56036..a46efe3 100644 (file)
@@ -599,8 +599,6 @@ struct mlx4_en_priv {
        struct work_struct linkstate_task;
        struct delayed_work stats_task;
        struct delayed_work service_task;
-       struct work_struct vxlan_add_task;
-       struct work_struct vxlan_del_task;
        struct mlx4_en_perf_stats pstats;
        struct mlx4_en_pkt_stats pkstats;
        struct mlx4_en_counter_stats pf_stats;
index 4dfdbb8..99f1ec3 100644 (file)
@@ -134,12 +134,25 @@ config MLX5_FPGA_IPSEC
        mlx5_core driver will include the Innova FPGA core and allow building
        sandbox-specific client drivers.
 
+config MLX5_IPSEC
+       bool "Mellanox Technologies IPsec Connect-X support"
+       depends on MLX5_CORE_EN
+       depends on XFRM_OFFLOAD
+       depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
+       select MLX5_ACCEL
+       default n
+       help
+       Build IPsec support for the Connect-X family of network cards by Mellanox
+       Technologies.
+       Note: If you select this option, the mlx5_core driver will include
+       IPsec support for the Connect-X family.
+
 config MLX5_EN_IPSEC
        bool "IPSec XFRM cryptography-offload accelaration"
        depends on MLX5_CORE_EN
        depends on XFRM_OFFLOAD
        depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
-       depends on MLX5_FPGA_IPSEC
+       depends on MLX5_FPGA_IPSEC || MLX5_IPSEC
        default n
        help
          Build support for IPsec cryptography-offload accelaration in the NIC.
@@ -150,7 +163,10 @@ config MLX5_FPGA_TLS
        bool "Mellanox Technologies TLS Innova support"
        depends on TLS_DEVICE
        depends on TLS=y || MLX5_CORE=m
+       depends on MLX5_CORE_EN
        depends on MLX5_FPGA
+       depends on XPS
+       select MLX5_EN_TLS
        default n
        help
        Build TLS support for the Innova family of network cards by Mellanox
@@ -161,20 +177,19 @@ config MLX5_FPGA_TLS
 
 config MLX5_TLS
        bool "Mellanox Technologies TLS Connect-X support"
-       depends on MLX5_CORE_EN
        depends on TLS_DEVICE
        depends on TLS=y || MLX5_CORE=m
+       depends on MLX5_CORE_EN
+       depends on XPS
        select MLX5_ACCEL
+       select MLX5_EN_TLS
        default n
        help
        Build TLS support for the Connect-X family of network cards by Mellanox
        Technologies.
 
 config MLX5_EN_TLS
-       bool "TLS cryptography-offload accelaration"
-       depends on MLX5_CORE_EN
-       depends on MLX5_FPGA_TLS || MLX5_TLS
-       default y
+       bool
        help
        Build support for TLS cryptography-offload accelaration in the NIC.
        Note: Support for hardware with this capability needs to be selected
index b61e47b..10e6886 100644 (file)
@@ -35,7 +35,7 @@ mlx5_core-$(CONFIG_MLX5_EN_RXNFC)    += en_fs_ethtool.o
 mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
 mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
 mlx5_core-$(CONFIG_MLX5_ESWITCH)     += lag_mp.o lib/geneve.o lib/port_tun.o \
-                                       en_rep.o en/rep/bond.o
+                                       en_rep.o en/rep/bond.o en/mod_hdr.o
 mlx5_core-$(CONFIG_MLX5_CLS_ACT)     += en_tc.o en/rep/tc.o en/rep/neigh.o \
                                        en/mapping.o esw/chains.o en/tc_tun.o \
                                        en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
@@ -64,6 +64,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib
 #
 # Accelerations & FPGA
 #
+mlx5_core-$(CONFIG_MLX5_IPSEC) += accel/ipsec_offload.o
 mlx5_core-$(CONFIG_MLX5_FPGA_IPSEC) += fpga/ipsec.o
 mlx5_core-$(CONFIG_MLX5_FPGA_TLS)   += fpga/tls.o
 mlx5_core-$(CONFIG_MLX5_ACCEL)      += lib/crypto.o accel/tls.o accel/ipsec.o
@@ -71,10 +72,11 @@ mlx5_core-$(CONFIG_MLX5_ACCEL)      += lib/crypto.o accel/tls.o accel/ipsec.o
 mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o
 
 mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
-                                    en_accel/ipsec_stats.o
+                                    en_accel/ipsec_stats.o en_accel/ipsec_fs.o
 
 mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o \
-                                  en_accel/ktls.o en_accel/ktls_tx.o
+                                  en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \
+                                  en_accel/ktls_tx.o en_accel/ktls_rx.o
 
 mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \
                                        steering/dr_matcher.o steering/dr_rule.o \
index 8a4985d..09f5ce9 100644 (file)
  *
  */
 
-#ifdef CONFIG_MLX5_FPGA_IPSEC
-
 #include <linux/mlx5/device.h>
 
 #include "accel/ipsec.h"
 #include "mlx5_core.h"
 #include "fpga/ipsec.h"
+#include "accel/ipsec_offload.h"
+
+void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
+{
+       const struct mlx5_accel_ipsec_ops *ipsec_ops;
+       int err = 0;
+
+       ipsec_ops = (mlx5_ipsec_offload_ops(mdev)) ?
+                    mlx5_ipsec_offload_ops(mdev) :
+                    mlx5_fpga_ipsec_ops(mdev);
+
+       if (!ipsec_ops || !ipsec_ops->init) {
+               mlx5_core_dbg(mdev, "IPsec ops is not supported\n");
+               return;
+       }
+
+       err = ipsec_ops->init(mdev);
+       if (err) {
+               mlx5_core_warn_once(mdev, "Failed to start IPsec device, err = %d\n", err);
+               return;
+       }
+
+       mdev->ipsec_ops = ipsec_ops;
+}
+
+void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
+{
+       const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
+
+       if (!ipsec_ops || !ipsec_ops->cleanup)
+               return;
+
+       ipsec_ops->cleanup(mdev);
+}
 
 u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev)
 {
-       return mlx5_fpga_ipsec_device_caps(mdev);
+       const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
+
+       if (!ipsec_ops || !ipsec_ops->device_caps)
+               return 0;
+
+       return ipsec_ops->device_caps(mdev);
 }
 EXPORT_SYMBOL_GPL(mlx5_accel_ipsec_device_caps);
 
 unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev)
 {
-       return mlx5_fpga_ipsec_counters_count(mdev);
+       const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
+
+       if (!ipsec_ops || !ipsec_ops->counters_count)
+               return -EOPNOTSUPP;
+
+       return ipsec_ops->counters_count(mdev);
 }
 
 int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
                                   unsigned int count)
 {
-       return mlx5_fpga_ipsec_counters_read(mdev, counters, count);
+       const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
+
+       if (!ipsec_ops || !ipsec_ops->counters_read)
+               return -EOPNOTSUPP;
+
+       return ipsec_ops->counters_read(mdev, counters, count);
 }
 
 void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
                                       struct mlx5_accel_esp_xfrm *xfrm,
                                       u32 *sa_handle)
 {
+       const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
        __be32 saddr[4] = {}, daddr[4] = {};
 
+       if (!ipsec_ops || !ipsec_ops->create_hw_context)
+               return  ERR_PTR(-EOPNOTSUPP);
+
        if (!xfrm->attrs.is_ipv6) {
                saddr[3] = xfrm->attrs.saddr.a4;
                daddr[3] = xfrm->attrs.daddr.a4;
@@ -70,29 +121,18 @@ void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
                memcpy(daddr, xfrm->attrs.daddr.a6, sizeof(daddr));
        }
 
-       return mlx5_fpga_ipsec_create_sa_ctx(mdev, xfrm, saddr,
-                                            daddr, xfrm->attrs.spi,
-                                            xfrm->attrs.is_ipv6, sa_handle);
+       return ipsec_ops->create_hw_context(mdev, xfrm, saddr, daddr, xfrm->attrs.spi,
+                                           xfrm->attrs.is_ipv6, sa_handle);
 }
 
-void mlx5_accel_esp_free_hw_context(void *context)
+void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context)
 {
-       mlx5_fpga_ipsec_delete_sa_ctx(context);
-}
+       const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
 
-int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
-{
-       return mlx5_fpga_ipsec_init(mdev);
-}
-
-void mlx5_accel_ipsec_build_fs_cmds(void)
-{
-       mlx5_fpga_ipsec_build_fs_cmds();
-}
+       if (!ipsec_ops || !ipsec_ops->free_hw_context)
+               return;
 
-void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
-{
-       mlx5_fpga_ipsec_cleanup(mdev);
+       ipsec_ops->free_hw_context(context);
 }
 
 struct mlx5_accel_esp_xfrm *
@@ -100,9 +140,13 @@ mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
                           const struct mlx5_accel_esp_xfrm_attrs *attrs,
                           u32 flags)
 {
+       const struct mlx5_accel_ipsec_ops *ipsec_ops = mdev->ipsec_ops;
        struct mlx5_accel_esp_xfrm *xfrm;
 
-       xfrm = mlx5_fpga_esp_create_xfrm(mdev, attrs, flags);
+       if (!ipsec_ops || !ipsec_ops->esp_create_xfrm)
+               return ERR_PTR(-EOPNOTSUPP);
+
+       xfrm = ipsec_ops->esp_create_xfrm(mdev, attrs, flags);
        if (IS_ERR(xfrm))
                return xfrm;
 
@@ -113,15 +157,23 @@ EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm);
 
 void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
 {
-       mlx5_fpga_esp_destroy_xfrm(xfrm);
+       const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops;
+
+       if (!ipsec_ops || !ipsec_ops->esp_destroy_xfrm)
+               return;
+
+       ipsec_ops->esp_destroy_xfrm(xfrm);
 }
 EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm);
 
 int mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
                               const struct mlx5_accel_esp_xfrm_attrs *attrs)
 {
-       return mlx5_fpga_esp_modify_xfrm(xfrm, attrs);
+       const struct mlx5_accel_ipsec_ops *ipsec_ops = xfrm->mdev->ipsec_ops;
+
+       if (!ipsec_ops || !ipsec_ops->esp_modify_xfrm)
+               return -EOPNOTSUPP;
+
+       return ipsec_ops->esp_modify_xfrm(xfrm, attrs);
 }
 EXPORT_SYMBOL_GPL(mlx5_accel_esp_modify_xfrm);
-
-#endif
index e897476..fbb9c54 100644 (file)
@@ -37,7 +37,7 @@
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/accel.h>
 
-#ifdef CONFIG_MLX5_FPGA_IPSEC
+#ifdef CONFIG_MLX5_ACCEL
 
 #define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \
                              MLX5_ACCEL_IPSEC_CAP_DEVICE)
@@ -49,12 +49,30 @@ int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
 void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
                                       struct mlx5_accel_esp_xfrm *xfrm,
                                       u32 *sa_handle);
-void mlx5_accel_esp_free_hw_context(void *context);
+void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context);
 
-int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
-void mlx5_accel_ipsec_build_fs_cmds(void);
+void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
 void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
 
+struct mlx5_accel_ipsec_ops {
+       u32 (*device_caps)(struct mlx5_core_dev *mdev);
+       unsigned int (*counters_count)(struct mlx5_core_dev *mdev);
+       int (*counters_read)(struct mlx5_core_dev *mdev, u64 *counters, unsigned int count);
+       void* (*create_hw_context)(struct mlx5_core_dev *mdev,
+                                  struct mlx5_accel_esp_xfrm *xfrm,
+                                  const __be32 saddr[4], const __be32 daddr[4],
+                                  const __be32 spi, bool is_ipv6, u32 *sa_handle);
+       void (*free_hw_context)(void *context);
+       int (*init)(struct mlx5_core_dev *mdev);
+       void (*cleanup)(struct mlx5_core_dev *mdev);
+       struct mlx5_accel_esp_xfrm* (*esp_create_xfrm)(struct mlx5_core_dev *mdev,
+                                                      const struct mlx5_accel_esp_xfrm_attrs *attrs,
+                                                      u32 flags);
+       int (*esp_modify_xfrm)(struct mlx5_accel_esp_xfrm *xfrm,
+                              const struct mlx5_accel_esp_xfrm_attrs *attrs);
+       void (*esp_destroy_xfrm)(struct mlx5_accel_esp_xfrm *xfrm);
+};
+
 #else
 
 #define MLX5_IPSEC_DEV(mdev) false
@@ -67,23 +85,12 @@ mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
        return NULL;
 }
 
-static inline void mlx5_accel_esp_free_hw_context(void *context)
-{
-}
-
-static inline int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
-{
-       return 0;
-}
+static inline void mlx5_accel_esp_free_hw_context(struct mlx5_core_dev *mdev, void *context) {}
 
-static inline void mlx5_accel_ipsec_build_fs_cmds(void)
-{
-}
+static inline void mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev) {}
 
-static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
-{
-}
+static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev) {}
 
-#endif
+#endif /* CONFIG_MLX5_ACCEL */
 
 #endif /* __MLX5_ACCEL_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.c
new file mode 100644 (file)
index 0000000..2f13a25
--- /dev/null
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIBt
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#include "mlx5_core.h"
+#include "ipsec_offload.h"
+#include "lib/mlx5.h"
+#include "en_accel/ipsec_fs.h"
+
+#define MLX5_IPSEC_DEV_BASIC_CAPS (MLX5_ACCEL_IPSEC_CAP_DEVICE | MLX5_ACCEL_IPSEC_CAP_IPV6 | \
+                                  MLX5_ACCEL_IPSEC_CAP_LSO)
+
+struct mlx5_ipsec_sa_ctx {
+       struct rhash_head hash;
+       u32 enc_key_id;
+       u32 ipsec_obj_id;
+       /* hw ctx */
+       struct mlx5_core_dev *dev;
+       struct mlx5_ipsec_esp_xfrm *mxfrm;
+};
+
+struct mlx5_ipsec_esp_xfrm {
+       /* reference counter of SA ctx */
+       struct mlx5_ipsec_sa_ctx *sa_ctx;
+       struct mutex lock; /* protects mlx5_ipsec_esp_xfrm */
+       struct mlx5_accel_esp_xfrm accel_xfrm;
+};
+
+static u32 mlx5_ipsec_offload_device_caps(struct mlx5_core_dev *mdev)
+{
+       u32 caps = MLX5_IPSEC_DEV_BASIC_CAPS;
+
+       if (!mlx5_is_ipsec_device(mdev))
+               return 0;
+
+       if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
+           !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
+               return 0;
+
+       if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) &&
+           MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
+               caps |= MLX5_ACCEL_IPSEC_CAP_ESP;
+
+       if (MLX5_CAP_IPSEC(mdev, ipsec_esn)) {
+               caps |= MLX5_ACCEL_IPSEC_CAP_ESN;
+               caps |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN;
+       }
+
+       /* We can accommodate up to 2^24 different IPsec objects
+        * because we use up to 24 bit in flow table metadata
+        * to hold the IPsec Object unique handle.
+        */
+       WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24);
+       return caps;
+}
+
+static int
+mlx5_ipsec_offload_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
+                                          const struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+       if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) {
+               mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay (replay_type = %d)\n",
+                             attrs->replay_type);
+               return -EOPNOTSUPP;
+       }
+
+       if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) {
+               mlx5_core_err(mdev, "Only aes gcm keymat is supported (keymat_type = %d)\n",
+                             attrs->keymat_type);
+               return -EOPNOTSUPP;
+       }
+
+       if (attrs->keymat.aes_gcm.iv_algo !=
+           MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) {
+               mlx5_core_err(mdev, "Only iv sequence algo is supported (iv_algo = %d)\n",
+                             attrs->keymat.aes_gcm.iv_algo);
+               return -EOPNOTSUPP;
+       }
+
+       if (attrs->keymat.aes_gcm.key_len != 128 &&
+           attrs->keymat.aes_gcm.key_len != 256) {
+               mlx5_core_err(mdev, "Cannot offload xfrm states with key length other than 128/256 bit (key length = %d)\n",
+                             attrs->keymat.aes_gcm.key_len);
+               return -EOPNOTSUPP;
+       }
+
+       if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) &&
+           !MLX5_CAP_IPSEC(mdev, ipsec_esn)) {
+               mlx5_core_err(mdev, "Cannot offload xfrm states with ESN triggered\n");
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static struct mlx5_accel_esp_xfrm *
+mlx5_ipsec_offload_esp_create_xfrm(struct mlx5_core_dev *mdev,
+                                  const struct mlx5_accel_esp_xfrm_attrs *attrs,
+                                  u32 flags)
+{
+       struct mlx5_ipsec_esp_xfrm *mxfrm;
+       int err = 0;
+
+       err = mlx5_ipsec_offload_esp_validate_xfrm_attrs(mdev, attrs);
+       if (err)
+               return ERR_PTR(err);
+
+       mxfrm = kzalloc(sizeof(*mxfrm), GFP_KERNEL);
+       if (!mxfrm)
+               return ERR_PTR(-ENOMEM);
+
+       mutex_init(&mxfrm->lock);
+       memcpy(&mxfrm->accel_xfrm.attrs, attrs,
+              sizeof(mxfrm->accel_xfrm.attrs));
+
+       return &mxfrm->accel_xfrm;
+}
+
+static void mlx5_ipsec_offload_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
+{
+       struct mlx5_ipsec_esp_xfrm *mxfrm = container_of(xfrm, struct mlx5_ipsec_esp_xfrm,
+                                                        accel_xfrm);
+
+       /* assuming no sa_ctx are connected to this xfrm_ctx */
+       WARN_ON(mxfrm->sa_ctx);
+       kfree(mxfrm);
+}
+
+struct mlx5_ipsec_obj_attrs {
+       const struct aes_gcm_keymat *aes_gcm;
+       u32 accel_flags;
+       u32 esn_msb;
+       u32 enc_key_id;
+};
+
+static int mlx5_create_ipsec_obj(struct mlx5_core_dev *mdev,
+                                struct mlx5_ipsec_obj_attrs *attrs,
+                                u32 *ipsec_id)
+{
+       const struct aes_gcm_keymat *aes_gcm = attrs->aes_gcm;
+       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+       u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
+       void *obj, *salt_p, *salt_iv_p;
+       int err;
+
+       obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
+
+       /* salt and seq_iv */
+       salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt);
+       memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt));
+
+       switch (aes_gcm->icv_len) {
+       case 64:
+               MLX5_SET(ipsec_obj, obj, icv_length,
+                        MLX5_IPSEC_OBJECT_ICV_LEN_8B);
+               break;
+       case 96:
+               MLX5_SET(ipsec_obj, obj, icv_length,
+                        MLX5_IPSEC_OBJECT_ICV_LEN_12B);
+               break;
+       case 128:
+               MLX5_SET(ipsec_obj, obj, icv_length,
+                        MLX5_IPSEC_OBJECT_ICV_LEN_16B);
+               break;
+       default:
+               return -EINVAL;
+       }
+       salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
+       memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
+       /* esn */
+       if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
+               MLX5_SET(ipsec_obj, obj, esn_en, 1);
+               MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn_msb);
+               if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
+                       MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
+       }
+
+       MLX5_SET(ipsec_obj, obj, dekn, attrs->enc_key_id);
+
+       /* general object fields set */
+       MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+                MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+                MLX5_GENERAL_OBJECT_TYPES_IPSEC);
+
+       err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+       if (!err)
+               *ipsec_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+       return err;
+}
+
+static void mlx5_destroy_ipsec_obj(struct mlx5_core_dev *mdev, u32 ipsec_id)
+{
+       u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+       MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+                MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+                MLX5_GENERAL_OBJECT_TYPES_IPSEC);
+       MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, ipsec_id);
+
+       mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static void *mlx5_ipsec_offload_create_sa_ctx(struct mlx5_core_dev *mdev,
+                                             struct mlx5_accel_esp_xfrm *accel_xfrm,
+                                             const __be32 saddr[4], const __be32 daddr[4],
+                                             const __be32 spi, bool is_ipv6, u32 *hw_handle)
+{
+       struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs = &accel_xfrm->attrs;
+       struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm;
+       struct mlx5_ipsec_obj_attrs ipsec_attrs = {};
+       struct mlx5_ipsec_esp_xfrm *mxfrm;
+       struct mlx5_ipsec_sa_ctx *sa_ctx;
+       int err;
+
+       /* alloc SA context */
+       sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL);
+       if (!sa_ctx)
+               return ERR_PTR(-ENOMEM);
+
+       sa_ctx->dev = mdev;
+
+       mxfrm = container_of(accel_xfrm, struct mlx5_ipsec_esp_xfrm, accel_xfrm);
+       mutex_lock(&mxfrm->lock);
+       sa_ctx->mxfrm = mxfrm;
+
+       /* key */
+       err = mlx5_create_encryption_key(mdev, aes_gcm->aes_key,
+                                        aes_gcm->key_len / BITS_PER_BYTE,
+                                        MLX5_ACCEL_OBJ_IPSEC_KEY,
+                                        &sa_ctx->enc_key_id);
+       if (err) {
+               mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err);
+               goto err_sa_ctx;
+       }
+
+       ipsec_attrs.aes_gcm = aes_gcm;
+       ipsec_attrs.accel_flags = accel_xfrm->attrs.flags;
+       ipsec_attrs.esn_msb = accel_xfrm->attrs.esn;
+       ipsec_attrs.enc_key_id = sa_ctx->enc_key_id;
+       err = mlx5_create_ipsec_obj(mdev, &ipsec_attrs,
+                                   &sa_ctx->ipsec_obj_id);
+       if (err) {
+               mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err);
+               goto err_enc_key;
+       }
+
+       *hw_handle = sa_ctx->ipsec_obj_id;
+       mxfrm->sa_ctx = sa_ctx;
+       mutex_unlock(&mxfrm->lock);
+
+       return sa_ctx;
+
+err_enc_key:
+       mlx5_destroy_encryption_key(mdev, sa_ctx->enc_key_id);
+err_sa_ctx:
+       mutex_unlock(&mxfrm->lock);
+       kfree(sa_ctx);
+       return ERR_PTR(err);
+}
+
+static void mlx5_ipsec_offload_delete_sa_ctx(void *context)
+{
+       struct mlx5_ipsec_sa_ctx *sa_ctx = (struct mlx5_ipsec_sa_ctx *)context;
+       struct mlx5_ipsec_esp_xfrm *mxfrm = sa_ctx->mxfrm;
+
+       mutex_lock(&mxfrm->lock);
+       mlx5_destroy_ipsec_obj(sa_ctx->dev, sa_ctx->ipsec_obj_id);
+       mlx5_destroy_encryption_key(sa_ctx->dev, sa_ctx->enc_key_id);
+       kfree(sa_ctx);
+       mxfrm->sa_ctx = NULL;
+       mutex_unlock(&mxfrm->lock);
+}
+
+static int mlx5_ipsec_offload_init(struct mlx5_core_dev *mdev)
+{
+       return 0;
+}
+
+static int mlx5_modify_ipsec_obj(struct mlx5_core_dev *mdev,
+                                struct mlx5_ipsec_obj_attrs *attrs,
+                                u32 ipsec_id)
+{
+       u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {};
+       u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)];
+       u64 modify_field_select = 0;
+       u64 general_obj_types;
+       void *obj;
+       int err;
+
+       if (!(attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED))
+               return 0;
+
+       general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
+       if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
+               return -EINVAL;
+
+       /* general object fields set */
+       MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC);
+       MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, ipsec_id);
+       err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+       if (err) {
+               mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n",
+                             ipsec_id, err);
+               return err;
+       }
+
+       obj = MLX5_ADDR_OF(query_ipsec_obj_out, out, ipsec_object);
+       modify_field_select = MLX5_GET64(ipsec_obj, obj, modify_field_select);
+
+       /* esn */
+       if (!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP) ||
+           !(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB))
+               return -EOPNOTSUPP;
+
+       obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object);
+       MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn_msb);
+       if (attrs->accel_flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
+               MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
+
+       /* general object fields set */
+       MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
+
+       return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_ipsec_offload_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
+                                             const struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+       struct mlx5_ipsec_obj_attrs ipsec_attrs = {};
+       struct mlx5_core_dev *mdev = xfrm->mdev;
+       struct mlx5_ipsec_esp_xfrm *mxfrm;
+
+       int err = 0;
+
+       if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
+               return 0;
+
+       if (mlx5_ipsec_offload_esp_validate_xfrm_attrs(mdev, attrs))
+               return -EOPNOTSUPP;
+
+       mxfrm = container_of(xfrm, struct mlx5_ipsec_esp_xfrm, accel_xfrm);
+
+       mutex_lock(&mxfrm->lock);
+
+       if (!mxfrm->sa_ctx)
+               /* Not bound xfrm, change only sw attrs */
+               goto change_sw_xfrm_attrs;
+
+       /* need to add find and replace in ipsec_rhash_sa the sa_ctx */
+       /* modify device with new hw_sa */
+       ipsec_attrs.accel_flags = attrs->flags;
+       ipsec_attrs.esn_msb = attrs->esn;
+       err = mlx5_modify_ipsec_obj(mdev,
+                                   &ipsec_attrs,
+                                   mxfrm->sa_ctx->ipsec_obj_id);
+
+change_sw_xfrm_attrs:
+       if (!err)
+               memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs));
+
+       mutex_unlock(&mxfrm->lock);
+       return err;
+}
+
+static const struct mlx5_accel_ipsec_ops ipsec_offload_ops = {
+       .device_caps = mlx5_ipsec_offload_device_caps,
+       .create_hw_context = mlx5_ipsec_offload_create_sa_ctx,
+       .free_hw_context = mlx5_ipsec_offload_delete_sa_ctx,
+       .init = mlx5_ipsec_offload_init,
+       .esp_create_xfrm = mlx5_ipsec_offload_esp_create_xfrm,
+       .esp_destroy_xfrm = mlx5_ipsec_offload_esp_destroy_xfrm,
+       .esp_modify_xfrm = mlx5_ipsec_offload_esp_modify_xfrm,
+};
+
+const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev)
+{
+       if (!mlx5_ipsec_offload_device_caps(mdev))
+               return NULL;
+
+       return &ipsec_offload_ops;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h b/drivers/net/ethernet/mellanox/mlx5/core/accel/ipsec_offload.h
new file mode 100644 (file)
index 0000000..970c66d
--- /dev/null
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5_IPSEC_OFFLOAD_H__
+#define __MLX5_IPSEC_OFFLOAD_H__
+
+#include <linux/mlx5/driver.h>
+#include "accel/ipsec.h"
+
+#ifdef CONFIG_MLX5_IPSEC
+
+const struct mlx5_accel_ipsec_ops *mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev);
+static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev)
+{
+       if (!MLX5_CAP_GEN(mdev, ipsec_offload))
+               return false;
+
+       if (!MLX5_CAP_GEN(mdev, log_max_dek))
+               return false;
+
+       if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
+           MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
+               return false;
+
+       return MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) &&
+               MLX5_CAP_ETH(mdev, insert_trailer);
+}
+
+#else
+static inline const struct mlx5_accel_ipsec_ops *
+mlx5_ipsec_offload_ops(struct mlx5_core_dev *mdev) { return NULL; }
+static inline bool mlx5_is_ipsec_device(struct mlx5_core_dev *mdev)
+{
+       return false;
+}
+
+#endif /* CONFIG_MLX5_IPSEC */
+#endif /* __MLX5_IPSEC_OFFLOAD_H__ */
index cbf3d76..6c2b86a 100644 (file)
@@ -113,7 +113,9 @@ int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
                return -EINVAL;
        }
 
-       return mlx5_create_encryption_key(mdev, key, sz_bytes, p_key_id);
+       return mlx5_create_encryption_key(mdev, key, sz_bytes,
+                                         MLX5_ACCEL_OBJ_TLS_KEY,
+                                         p_key_id);
 }
 
 void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id)
index aefea46..fd874f0 100644 (file)
@@ -43,9 +43,20 @@ int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
                         u32 *p_key_id);
 void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
 
+static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev)
+{
+       return MLX5_CAP_GEN(mdev, tls_tx);
+}
+
+static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev)
+{
+       return MLX5_CAP_GEN(mdev, tls_rx);
+}
+
 static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev)
 {
-       if (!MLX5_CAP_GEN(mdev, tls_tx))
+       if (!mlx5_accel_is_ktls_tx(mdev) &&
+           !mlx5_accel_is_ktls_rx(mdev))
                return false;
 
        if (!MLX5_CAP_GEN(mdev, log_max_dek))
@@ -67,6 +78,12 @@ static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
        return false;
 }
 #else
+static inline bool mlx5_accel_is_ktls_tx(struct mlx5_core_dev *mdev)
+{ return false; }
+
+static inline bool mlx5_accel_is_ktls_rx(struct mlx5_core_dev *mdev)
+{ return false; }
+
 static inline int
 mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
                     struct tls_crypto_info *crypto_info,
index a99fe4b..c709e9a 100644 (file)
@@ -113,6 +113,8 @@ static const struct devlink_ops mlx5_devlink_ops = {
        .eswitch_inline_mode_get = mlx5_devlink_eswitch_inline_mode_get,
        .eswitch_encap_mode_set = mlx5_devlink_eswitch_encap_mode_set,
        .eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get,
+       .port_function_hw_addr_get = mlx5_devlink_port_function_hw_addr_get,
+       .port_function_hw_addr_set = mlx5_devlink_port_function_hw_addr_set,
 #endif
        .flash_update = mlx5_devlink_flash_update,
        .info_get = mlx5_devlink_info_get,
index a755127..ad3594c 100644 (file)
@@ -676,7 +676,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
        block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE;
        start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE;
 
-       /* Copy the block to local buffer to avoid HW override while being processed*/
+       /* Copy the block to local buffer to avoid HW override while being processed */
        memcpy(tmp_trace_block, tracer->buff.log_buf + start_offset,
               TRACER_BLOCK_SIZE_BYTE);
 
index 17ab7ef..4924a56 100644 (file)
@@ -23,6 +23,9 @@ static const char *const mlx5_rsc_sgmt_name[] = {
        MLX5_SGMT_STR_ASSING(SX_SLICE_ALL),
        MLX5_SGMT_STR_ASSING(RDB),
        MLX5_SGMT_STR_ASSING(RX_SLICE_ALL),
+       MLX5_SGMT_STR_ASSING(PRM_QUERY_QP),
+       MLX5_SGMT_STR_ASSING(PRM_QUERY_CQ),
+       MLX5_SGMT_STR_ASSING(PRM_QUERY_MKEY),
 };
 
 struct mlx5_rsc_dump {
@@ -130,11 +133,13 @@ struct mlx5_rsc_dump_cmd *mlx5_rsc_dump_cmd_create(struct mlx5_core_dev *dev,
        cmd->mem_size = key->size;
        return cmd;
 }
+EXPORT_SYMBOL(mlx5_rsc_dump_cmd_create);
 
 void mlx5_rsc_dump_cmd_destroy(struct mlx5_rsc_dump_cmd *cmd)
 {
        kfree(cmd);
 }
+EXPORT_SYMBOL(mlx5_rsc_dump_cmd_destroy);
 
 int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
                       struct page *page, int *size)
@@ -155,6 +160,7 @@ int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
 
        return more_dump;
 }
+EXPORT_SYMBOL(mlx5_rsc_dump_next);
 
 #define MLX5_RSC_DUMP_MENU_SEGMENT 0xffff
 static int mlx5_rsc_dump_menu(struct mlx5_core_dev *dev)
index 1482700..64c4956 100644 (file)
@@ -4,41 +4,10 @@
 #ifndef __MLX5_RSC_DUMP_H
 #define __MLX5_RSC_DUMP_H
 
+#include <linux/mlx5/rsc_dump.h>
 #include <linux/mlx5/driver.h>
 #include "mlx5_core.h"
 
-enum mlx5_sgmt_type {
-       MLX5_SGMT_TYPE_HW_CQPC,
-       MLX5_SGMT_TYPE_HW_SQPC,
-       MLX5_SGMT_TYPE_HW_RQPC,
-       MLX5_SGMT_TYPE_FULL_SRQC,
-       MLX5_SGMT_TYPE_FULL_CQC,
-       MLX5_SGMT_TYPE_FULL_EQC,
-       MLX5_SGMT_TYPE_FULL_QPC,
-       MLX5_SGMT_TYPE_SND_BUFF,
-       MLX5_SGMT_TYPE_RCV_BUFF,
-       MLX5_SGMT_TYPE_SRQ_BUFF,
-       MLX5_SGMT_TYPE_CQ_BUFF,
-       MLX5_SGMT_TYPE_EQ_BUFF,
-       MLX5_SGMT_TYPE_SX_SLICE,
-       MLX5_SGMT_TYPE_SX_SLICE_ALL,
-       MLX5_SGMT_TYPE_RDB,
-       MLX5_SGMT_TYPE_RX_SLICE_ALL,
-       MLX5_SGMT_TYPE_MENU,
-       MLX5_SGMT_TYPE_TERMINATE,
-
-       MLX5_SGMT_TYPE_NUM, /* Keep last */
-};
-
-struct mlx5_rsc_key {
-       enum mlx5_sgmt_type rsc;
-       int index1;
-       int index2;
-       int num_of_obj1;
-       int num_of_obj2;
-       int size;
-};
-
 #define MLX5_RSC_DUMP_ALL 0xFFFF
 struct mlx5_rsc_dump_cmd;
 struct mlx5_rsc_dump;
index 842db20..c446691 100644 (file)
@@ -191,13 +191,8 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
 
 struct mlx5e_tx_wqe {
        struct mlx5_wqe_ctrl_seg ctrl;
-       union {
-               struct {
-                       struct mlx5_wqe_eth_seg  eth;
-                       struct mlx5_wqe_data_seg data[0];
-               };
-               u8 tls_progress_params_ctx[0];
-       };
+       struct mlx5_wqe_eth_seg  eth;
+       struct mlx5_wqe_data_seg data[0];
 };
 
 struct mlx5e_rx_wqe_ll {
@@ -213,10 +208,7 @@ struct mlx5e_umr_wqe {
        struct mlx5_wqe_ctrl_seg       ctrl;
        struct mlx5_wqe_umr_ctrl_seg   uctrl;
        struct mlx5_mkey_seg           mkc;
-       union {
-               struct mlx5_mtt        inline_mtts[0];
-               u8                     tls_static_params_ctx[0];
-       };
+       struct mlx5_mtt                inline_mtts[0];
 };
 
 extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
@@ -271,6 +263,7 @@ enum {
        MLX5E_RQ_STATE_AM,
        MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
        MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
+       MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */
 };
 
 struct mlx5e_cq {
@@ -651,9 +644,11 @@ struct mlx5e_channel {
        /* AF_XDP zero-copy */
        struct mlx5e_rq            xskrq;
        struct mlx5e_xdpsq         xsksq;
-       struct mlx5e_icosq         xskicosq;
-       /* xskicosq can be accessed from any CPU - the spinlock protects it. */
-       spinlock_t                 xskicosq_lock;
+
+       /* Async ICOSQ */
+       struct mlx5e_icosq         async_icosq;
+       /* async_icosq can be accessed from any CPU - the spinlock protects it. */
+       spinlock_t                 async_icosq_lock;
 
        /* data path - accessed per napi poll */
        struct irq_desc *irq_desc;
@@ -857,26 +852,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
 
-static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
-{
-       switch (rq->wq_type) {
-       case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-               return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
-       default:
-               return mlx5_wq_cyc_get_size(&rq->wqe.wq);
-       }
-}
-
-static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
-{
-       switch (rq->wq_type) {
-       case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-               return rq->mpwqe.wq.cur_sz;
-       default:
-               return rq->wqe.wq.cur_sz;
-       }
-}
-
 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
 bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
                                struct mlx5e_params *params);
index 7be6b2d..9976de8 100644 (file)
@@ -29,6 +29,7 @@ struct mlx5e_dcbx {
        bool                       manual_buffer;
        u32                        cable_len;
        u32                        xoff;
+       u16                        port_buff_cell_sz;
 };
 
 #define MLX5E_MAX_DSCP (64)
index f8b2de4..a69c62d 100644 (file)
@@ -6,17 +6,16 @@
 int mlx5e_devlink_port_register(struct mlx5e_priv *priv)
 {
        struct devlink *devlink = priv_to_devlink(priv->mdev);
+       struct devlink_port_attrs attrs = {};
 
-       if (mlx5_core_is_pf(priv->mdev))
-               devlink_port_attrs_set(&priv->dl_port,
-                                      DEVLINK_PORT_FLAVOUR_PHYSICAL,
-                                      PCI_FUNC(priv->mdev->pdev->devfn),
-                                      false, 0,
-                                      NULL, 0);
-       else
-               devlink_port_attrs_set(&priv->dl_port,
-                                      DEVLINK_PORT_FLAVOUR_VIRTUAL,
-                                      0, false, 0, NULL, 0);
+       if (mlx5_core_is_pf(priv->mdev)) {
+               attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+               attrs.phys.port_number = PCI_FUNC(priv->mdev->pdev->devfn);
+       } else {
+               attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL;
+       }
+
+       devlink_port_attrs_set(&priv->dl_port, &attrs);
 
        return devlink_port_register(devlink, &priv->dl_port, 1);
 }
index 0416f77..6fdcd5e 100644 (file)
@@ -4,6 +4,8 @@
 #ifndef __MLX5E_FLOW_STEER_H__
 #define __MLX5E_FLOW_STEER_H__
 
+#include "mod_hdr.h"
+
 enum {
        MLX5E_TC_FT_LEVEL = 0,
        MLX5E_TC_TTC_FT_LEVEL,
@@ -105,11 +107,16 @@ enum mlx5e_tunnel_types {
 
 bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev);
 
+struct mlx5e_ttc_rule {
+       struct mlx5_flow_handle *rule;
+       struct mlx5_flow_destination default_dest;
+};
+
 /* L3/L4 traffic type classifier */
 struct mlx5e_ttc_table {
-       struct mlx5e_flow_table  ft;
-       struct mlx5_flow_handle  *rules[MLX5E_NUM_TT];
-       struct mlx5_flow_handle  *tunnel_rules[MLX5E_NUM_TUNNEL_TT];
+       struct mlx5e_flow_table ft;
+       struct mlx5e_ttc_rule rules[MLX5E_NUM_TT];
+       struct mlx5_flow_handle *tunnel_rules[MLX5E_NUM_TUNNEL_TT];
 };
 
 /* NIC prio FTS */
@@ -118,8 +125,15 @@ enum {
        MLX5E_L2_FT_LEVEL,
        MLX5E_TTC_FT_LEVEL,
        MLX5E_INNER_TTC_FT_LEVEL,
+#ifdef CONFIG_MLX5_EN_TLS
+       MLX5E_ACCEL_FS_TCP_FT_LEVEL,
+#endif
 #ifdef CONFIG_MLX5_EN_ARFS
-       MLX5E_ARFS_FT_LEVEL
+       MLX5E_ARFS_FT_LEVEL,
+#endif
+#ifdef CONFIG_MLX5_EN_IPSEC
+       MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
+       MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
 #endif
 };
 
@@ -211,6 +225,10 @@ static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) { return -EOPNOTSUP
 static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) {        return -EOPNOTSUPP; }
 #endif
 
+#ifdef CONFIG_MLX5_EN_TLS
+struct mlx5e_accel_fs_tcp;
+#endif
+
 struct mlx5e_flow_steering {
        struct mlx5_flow_namespace      *ns;
 #ifdef CONFIG_MLX5_EN_RXNFC
@@ -224,6 +242,9 @@ struct mlx5e_flow_steering {
 #ifdef CONFIG_MLX5_EN_ARFS
        struct mlx5e_arfs_tables        arfs;
 #endif
+#ifdef CONFIG_MLX5_EN_TLS
+       struct mlx5e_accel_fs_tcp      *accel_tcp;
+#endif
 };
 
 struct ttc_params {
@@ -248,6 +269,11 @@ void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
                                   struct mlx5e_ttc_table *ttc);
 
 void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
+int mlx5e_ttc_fwd_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type,
+                      struct mlx5_flow_destination *new_dest);
+struct mlx5_flow_destination
+mlx5e_ttc_get_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type);
+int mlx5e_ttc_fwd_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type);
 
 void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
 void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
index 7283443..3dc200b 100644 (file)
@@ -5,7 +5,7 @@
 #include "lib/eq.h"
 #include "lib/mlx5.h"
 
-int mlx5e_reporter_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name)
+int mlx5e_health_fmsg_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name)
 {
        int err;
 
@@ -20,7 +20,7 @@ int mlx5e_reporter_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name)
        return 0;
 }
 
-int mlx5e_reporter_named_obj_nest_end(struct devlink_fmsg *fmsg)
+int mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg)
 {
        int err;
 
@@ -35,7 +35,7 @@ int mlx5e_reporter_named_obj_nest_end(struct devlink_fmsg *fmsg)
        return 0;
 }
 
-int mlx5e_reporter_cq_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
+int mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
 {
        struct mlx5e_priv *priv = cq->channel->priv;
        u32 out[MLX5_ST_SZ_DW(query_cq_out)] = {};
@@ -50,7 +50,7 @@ int mlx5e_reporter_cq_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
        cqc = MLX5_ADDR_OF(query_cq_out, out, cq_context);
        hw_status = MLX5_GET(cqc, cqc, status);
 
-       err = mlx5e_reporter_named_obj_nest_start(fmsg, "CQ");
+       err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ");
        if (err)
                return err;
 
@@ -62,14 +62,22 @@ int mlx5e_reporter_cq_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
        if (err)
                return err;
 
-       err = mlx5e_reporter_named_obj_nest_end(fmsg);
+       err = devlink_fmsg_u32_pair_put(fmsg, "ci", mlx5_cqwq_get_ci(&cq->wq));
+       if (err)
+               return err;
+
+       err = devlink_fmsg_u32_pair_put(fmsg, "size", mlx5_cqwq_get_size(&cq->wq));
+       if (err)
+               return err;
+
+       err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
        if (err)
                return err;
 
        return 0;
 }
 
-int mlx5e_reporter_cq_common_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
+int mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
 {
        u8 cq_log_stride;
        u32 cq_sz;
@@ -78,7 +86,7 @@ int mlx5e_reporter_cq_common_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *
        cq_sz = mlx5_cqwq_get_size(&cq->wq);
        cq_log_stride = mlx5_cqwq_get_log_stride_size(&cq->wq);
 
-       err = mlx5e_reporter_named_obj_nest_start(fmsg, "CQ");
+       err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ");
        if (err)
                return err;
 
@@ -90,26 +98,48 @@ int mlx5e_reporter_cq_common_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *
        if (err)
                return err;
 
-       err = mlx5e_reporter_named_obj_nest_end(fmsg);
+       err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
        if (err)
                return err;
 
        return 0;
 }
 
-int mlx5e_health_create_reporters(struct mlx5e_priv *priv)
+int mlx5e_health_eq_diag_fmsg(struct mlx5_eq_comp *eq, struct devlink_fmsg *fmsg)
 {
        int err;
 
-       err = mlx5e_reporter_tx_create(priv);
+       err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "EQ");
        if (err)
                return err;
 
-       err = mlx5e_reporter_rx_create(priv);
+       err = devlink_fmsg_u8_pair_put(fmsg, "eqn", eq->core.eqn);
        if (err)
                return err;
 
-       return 0;
+       err = devlink_fmsg_u32_pair_put(fmsg, "irqn", eq->core.irqn);
+       if (err)
+               return err;
+
+       err = devlink_fmsg_u32_pair_put(fmsg, "vecidx", eq->core.vecidx);
+       if (err)
+               return err;
+
+       err = devlink_fmsg_u32_pair_put(fmsg, "ci", eq->core.cons_index);
+       if (err)
+               return err;
+
+       err = devlink_fmsg_u32_pair_put(fmsg, "size", eq->core.nent);
+       if (err)
+               return err;
+
+       return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+}
+
+void mlx5e_health_create_reporters(struct mlx5e_priv *priv)
+{
+       mlx5e_reporter_tx_create(priv);
+       mlx5e_reporter_rx_create(priv);
 }
 
 void mlx5e_health_destroy_reporters(struct mlx5e_priv *priv)
@@ -291,7 +321,7 @@ int mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
        if (err)
                return err;
 
-       err = mlx5e_reporter_named_obj_nest_start(fmsg, lbl);
+       err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, lbl);
        if (err)
                return err;
 
@@ -303,7 +333,7 @@ int mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
        if (err)
                return err;
 
-       err = mlx5e_reporter_named_obj_nest_end(fmsg);
+       err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
        if (err)
                return err;
 
index 38f97f7..b9aaddd 100644 (file)
@@ -16,23 +16,25 @@ static inline bool cqe_syndrome_needs_recover(u8 syndrome)
               syndrome == MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
 }
 
-int mlx5e_reporter_tx_create(struct mlx5e_priv *priv);
+void mlx5e_reporter_tx_create(struct mlx5e_priv *priv);
 void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv);
 void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq);
 int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq);
 
-int mlx5e_reporter_cq_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
-int mlx5e_reporter_cq_common_diagnose(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
-int mlx5e_reporter_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name);
-int mlx5e_reporter_named_obj_nest_end(struct devlink_fmsg *fmsg);
+int mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
+int mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
+int mlx5e_health_eq_diag_fmsg(struct mlx5_eq_comp *eq, struct devlink_fmsg *fmsg);
+int mlx5e_health_fmsg_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name);
+int mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg);
 
-int mlx5e_reporter_rx_create(struct mlx5e_priv *priv);
+void mlx5e_reporter_rx_create(struct mlx5e_priv *priv);
 void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv);
 void mlx5e_reporter_icosq_cqe_err(struct mlx5e_icosq *icosq);
 void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq);
 void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq);
 
 #define MLX5E_REPORTER_PER_Q_MAX_LEN 256
+#define MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC 2000
 
 struct mlx5e_err_ctx {
        int (*recover)(void *ctx);
@@ -46,7 +48,7 @@ int mlx5e_health_recover_channels(struct mlx5e_priv *priv);
 int mlx5e_health_report(struct mlx5e_priv *priv,
                        struct devlink_health_reporter *reporter, char *err_str,
                        struct mlx5e_err_ctx *err_ctx);
-int mlx5e_health_create_reporters(struct mlx5e_priv *priv);
+void mlx5e_health_create_reporters(struct mlx5e_priv *priv);
 void mlx5e_health_destroy_reporters(struct mlx5e_priv *priv);
 void mlx5e_health_channels_update(struct mlx5e_priv *priv);
 int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c
new file mode 100644 (file)
index 0000000..7edde4d
--- /dev/null
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2020 Mellanox Technologies
+
+#include <linux/jhash.h>
+#include "mod_hdr.h"
+
+#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
+
+struct mod_hdr_key {
+       int num_actions;
+       void *actions;
+};
+
+struct mlx5e_mod_hdr_handle {
+       /* a node of a hash table which keeps all the mod_hdr entries */
+       struct hlist_node mod_hdr_hlist;
+
+       struct mod_hdr_key key;
+
+       struct mlx5_modify_hdr *modify_hdr;
+
+       refcount_t refcnt;
+       struct completion res_ready;
+       int compl_result;
+};
+
+static u32 hash_mod_hdr_info(struct mod_hdr_key *key)
+{
+       return jhash(key->actions,
+                    key->num_actions * MLX5_MH_ACT_SZ, 0);
+}
+
+static int cmp_mod_hdr_info(struct mod_hdr_key *a, struct mod_hdr_key *b)
+{
+       if (a->num_actions != b->num_actions)
+               return 1;
+
+       return memcmp(a->actions, b->actions,
+                     a->num_actions * MLX5_MH_ACT_SZ);
+}
+
+void mlx5e_mod_hdr_tbl_init(struct mod_hdr_tbl *tbl)
+{
+       mutex_init(&tbl->lock);
+       hash_init(tbl->hlist);
+}
+
+void mlx5e_mod_hdr_tbl_destroy(struct mod_hdr_tbl *tbl)
+{
+       mutex_destroy(&tbl->lock);
+}
+
+static struct mlx5e_mod_hdr_handle *mod_hdr_get(struct mod_hdr_tbl *tbl,
+                                               struct mod_hdr_key *key,
+                                               u32 hash_key)
+{
+       struct mlx5e_mod_hdr_handle *mh, *found = NULL;
+
+       hash_for_each_possible(tbl->hlist, mh, mod_hdr_hlist, hash_key) {
+               if (!cmp_mod_hdr_info(&mh->key, key)) {
+                       refcount_inc(&mh->refcnt);
+                       found = mh;
+                       break;
+               }
+       }
+
+       return found;
+}
+
+struct mlx5e_mod_hdr_handle *
+mlx5e_mod_hdr_attach(struct mlx5_core_dev *mdev,
+                    struct mod_hdr_tbl *tbl,
+                    enum mlx5_flow_namespace_type namespace,
+                    struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
+{
+       int num_actions, actions_size, err;
+       struct mlx5e_mod_hdr_handle *mh;
+       struct mod_hdr_key key;
+       u32 hash_key;
+
+       num_actions  = mod_hdr_acts->num_actions;
+       actions_size = MLX5_MH_ACT_SZ * num_actions;
+
+       key.actions = mod_hdr_acts->actions;
+       key.num_actions = num_actions;
+
+       hash_key = hash_mod_hdr_info(&key);
+
+       mutex_lock(&tbl->lock);
+       mh = mod_hdr_get(tbl, &key, hash_key);
+       if (mh) {
+               mutex_unlock(&tbl->lock);
+               wait_for_completion(&mh->res_ready);
+
+               if (mh->compl_result < 0) {
+                       err = -EREMOTEIO;
+                       goto attach_header_err;
+               }
+               goto attach_header;
+       }
+
+       mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
+       if (!mh) {
+               mutex_unlock(&tbl->lock);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       mh->key.actions = (void *)mh + sizeof(*mh);
+       memcpy(mh->key.actions, key.actions, actions_size);
+       mh->key.num_actions = num_actions;
+       refcount_set(&mh->refcnt, 1);
+       init_completion(&mh->res_ready);
+
+       hash_add(tbl->hlist, &mh->mod_hdr_hlist, hash_key);
+       mutex_unlock(&tbl->lock);
+
+       mh->modify_hdr = mlx5_modify_header_alloc(mdev, namespace,
+                                                 mh->key.num_actions,
+                                                 mh->key.actions);
+       if (IS_ERR(mh->modify_hdr)) {
+               err = PTR_ERR(mh->modify_hdr);
+               mh->compl_result = err;
+               goto alloc_header_err;
+       }
+       mh->compl_result = 1;
+       complete_all(&mh->res_ready);
+
+attach_header:
+       return mh;
+
+alloc_header_err:
+       complete_all(&mh->res_ready);
+attach_header_err:
+       mlx5e_mod_hdr_detach(mdev, tbl, mh);
+       return ERR_PTR(err);
+}
+
+void mlx5e_mod_hdr_detach(struct mlx5_core_dev *mdev,
+                         struct mod_hdr_tbl *tbl,
+                         struct mlx5e_mod_hdr_handle *mh)
+{
+       if (!refcount_dec_and_mutex_lock(&mh->refcnt, &tbl->lock))
+               return;
+       hash_del(&mh->mod_hdr_hlist);
+       mutex_unlock(&tbl->lock);
+
+       if (mh->compl_result > 0)
+               mlx5_modify_header_dealloc(mdev, mh->modify_hdr);
+
+       kfree(mh);
+}
+
+struct mlx5_modify_hdr *mlx5e_mod_hdr_get(struct mlx5e_mod_hdr_handle *mh)
+{
+       return mh->modify_hdr;
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.h
new file mode 100644 (file)
index 0000000..33b23d8
--- /dev/null
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies */
+
+#ifndef __MLX5E_EN_MOD_HDR_H__
+#define __MLX5E_EN_MOD_HDR_H__
+
+#include <linux/hashtable.h>
+#include <linux/mlx5/fs.h>
+
+struct mlx5e_mod_hdr_handle;
+
+struct mlx5e_tc_mod_hdr_acts {
+       int num_actions;
+       int max_actions;
+       void *actions;
+};
+
+struct mlx5e_mod_hdr_handle *
+mlx5e_mod_hdr_attach(struct mlx5_core_dev *mdev,
+                    struct mod_hdr_tbl *tbl,
+                    enum mlx5_flow_namespace_type namespace,
+                    struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
+void mlx5e_mod_hdr_detach(struct mlx5_core_dev *mdev,
+                         struct mod_hdr_tbl *tbl,
+                         struct mlx5e_mod_hdr_handle *mh);
+struct mlx5_modify_hdr *mlx5e_mod_hdr_get(struct mlx5e_mod_hdr_handle *mh);
+
+void mlx5e_mod_hdr_tbl_init(struct mod_hdr_tbl *tbl);
+void mlx5e_mod_hdr_tbl_destroy(struct mod_hdr_tbl *tbl);
+
+#endif /* __MLX5E_EN_MOD_HDR_H__ */
index 989d8f4..a87273e 100644 (file)
@@ -11,33 +11,33 @@ struct mlx5e_xsk_param {
        u16 chunk_size;
 };
 
+struct mlx5e_cq_param {
+       u32                        cqc[MLX5_ST_SZ_DW(cqc)];
+       struct mlx5_wq_param       wq;
+       u16                        eq_ix;
+       u8                         cq_period_mode;
+};
+
 struct mlx5e_rq_param {
+       struct mlx5e_cq_param      cqp;
        u32                        rqc[MLX5_ST_SZ_DW(rqc)];
        struct mlx5_wq_param       wq;
        struct mlx5e_rq_frags_info frags_info;
 };
 
 struct mlx5e_sq_param {
+       struct mlx5e_cq_param      cqp;
        u32                        sqc[MLX5_ST_SZ_DW(sqc)];
        struct mlx5_wq_param       wq;
        bool                       is_mpw;
 };
 
-struct mlx5e_cq_param {
-       u32                        cqc[MLX5_ST_SZ_DW(cqc)];
-       struct mlx5_wq_param       wq;
-       u16                        eq_ix;
-       u8                         cq_period_mode;
-};
-
 struct mlx5e_channel_param {
        struct mlx5e_rq_param      rq;
-       struct mlx5e_sq_param      sq;
+       struct mlx5e_sq_param      txq_sq;
        struct mlx5e_sq_param      xdp_sq;
        struct mlx5e_sq_param      icosq;
-       struct mlx5e_cq_param      rx_cq;
-       struct mlx5e_cq_param      tx_cq;
-       struct mlx5e_cq_param      icosq_cq;
+       struct mlx5e_sq_param      async_icosq;
 };
 
 static inline bool mlx5e_qid_get_ch_if_in_group(struct mlx5e_params *params,
index 2a8950b..5de1cb9 100644 (file)
@@ -76,13 +76,31 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
        [MLX5E_100GAUI_2_100GBASE_CR2_KR2]      = 100000,
        [MLX5E_200GAUI_4_200GBASE_CR4_KR4]      = 200000,
        [MLX5E_400GAUI_8]                       = 400000,
+       [MLX5E_100GAUI_1_100GBASE_CR_KR]        = 100000,
+       [MLX5E_200GAUI_2_200GBASE_CR2_KR2]      = 200000,
+       [MLX5E_400GAUI_4_400GBASE_CR4_KR4]      = 400000,
 };
 
+bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev)
+{
+       struct mlx5e_port_eth_proto eproto;
+       int err;
+
+       if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet))
+               return true;
+
+       err = mlx5_port_query_eth_proto(mdev, 1, true, &eproto);
+       if (err)
+               return false;
+
+       return !!eproto.cap;
+}
+
 static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev,
                                     const u32 **arr, u32 *size,
                                     bool force_legacy)
 {
-       bool ext = force_legacy ? false : MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+       bool ext = force_legacy ? false : mlx5e_ptys_ext_supported(mdev);
 
        *size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) :
                      ARRAY_SIZE(mlx5e_link_speed);
@@ -177,7 +195,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
        bool ext;
        int err;
 
-       ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+       ext = mlx5e_ptys_ext_supported(mdev);
        err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
        if (err)
                goto out;
@@ -205,7 +223,7 @@ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
        int err;
        int i;
 
-       ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+       ext = mlx5e_ptys_ext_supported(mdev);
        err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
        if (err)
                return err;
index a2ddd44..7a7defe 100644 (file)
@@ -54,7 +54,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
 int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
 u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
                               bool force_legacy);
-
+bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev);
 int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out);
 int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
 int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
index ae99fac..673f1c8 100644 (file)
@@ -34,6 +34,7 @@
 int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
                            struct mlx5e_port_buffer *port_buffer)
 {
+       u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
        struct mlx5_core_dev *mdev = priv->mdev;
        int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
        u32 total_used = 0;
@@ -57,11 +58,11 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
                port_buffer->buffer[i].epsb =
                        MLX5_GET(bufferx_reg, buffer, epsb);
                port_buffer->buffer[i].size =
-                       MLX5_GET(bufferx_reg, buffer, size) << MLX5E_BUFFER_CELL_SHIFT;
+                       MLX5_GET(bufferx_reg, buffer, size) * port_buff_cell_sz;
                port_buffer->buffer[i].xon =
-                       MLX5_GET(bufferx_reg, buffer, xon_threshold) << MLX5E_BUFFER_CELL_SHIFT;
+                       MLX5_GET(bufferx_reg, buffer, xon_threshold) * port_buff_cell_sz;
                port_buffer->buffer[i].xoff =
-                       MLX5_GET(bufferx_reg, buffer, xoff_threshold) << MLX5E_BUFFER_CELL_SHIFT;
+                       MLX5_GET(bufferx_reg, buffer, xoff_threshold) * port_buff_cell_sz;
                total_used += port_buffer->buffer[i].size;
 
                mlx5e_dbg(HW, priv, "buffer %d: size=%d, xon=%d, xoff=%d, epsb=%d, lossy=%d\n", i,
@@ -73,7 +74,7 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
        }
 
        port_buffer->port_buffer_size =
-               MLX5_GET(pbmc_reg, out, port_buffer_size) << MLX5E_BUFFER_CELL_SHIFT;
+               MLX5_GET(pbmc_reg, out, port_buffer_size) * port_buff_cell_sz;
        port_buffer->spare_buffer_size =
                port_buffer->port_buffer_size - total_used;
 
@@ -88,9 +89,9 @@ out:
 static int port_set_buffer(struct mlx5e_priv *priv,
                           struct mlx5e_port_buffer *port_buffer)
 {
+       u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
        struct mlx5_core_dev *mdev = priv->mdev;
        int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
-       void *buffer;
        void *in;
        int err;
        int i;
@@ -104,16 +105,18 @@ static int port_set_buffer(struct mlx5e_priv *priv,
                goto out;
 
        for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
-               buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
-
-               MLX5_SET(bufferx_reg, buffer, size,
-                        port_buffer->buffer[i].size >> MLX5E_BUFFER_CELL_SHIFT);
-               MLX5_SET(bufferx_reg, buffer, lossy,
-                        port_buffer->buffer[i].lossy);
-               MLX5_SET(bufferx_reg, buffer, xoff_threshold,
-                        port_buffer->buffer[i].xoff >> MLX5E_BUFFER_CELL_SHIFT);
-               MLX5_SET(bufferx_reg, buffer, xon_threshold,
-                        port_buffer->buffer[i].xon >> MLX5E_BUFFER_CELL_SHIFT);
+               void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
+               u64 size = port_buffer->buffer[i].size;
+               u64 xoff = port_buffer->buffer[i].xoff;
+               u64 xon = port_buffer->buffer[i].xon;
+
+               do_div(size, port_buff_cell_sz);
+               do_div(xoff, port_buff_cell_sz);
+               do_div(xon, port_buff_cell_sz);
+               MLX5_SET(bufferx_reg, buffer, size, size);
+               MLX5_SET(bufferx_reg, buffer, lossy, port_buffer->buffer[i].lossy);
+               MLX5_SET(bufferx_reg, buffer, xoff_threshold, xoff);
+               MLX5_SET(bufferx_reg, buffer, xon_threshold, xon);
        }
 
        err = mlx5e_port_set_pbmc(mdev, in);
@@ -143,7 +146,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
 }
 
 static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
-                                u32 xoff, unsigned int max_mtu)
+                                u32 xoff, unsigned int max_mtu, u16 port_buff_cell_sz)
 {
        int i;
 
@@ -155,7 +158,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
                }
 
                if (port_buffer->buffer[i].size <
-                   (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) {
+                   (xoff + max_mtu + port_buff_cell_sz)) {
                        pr_err("buffer_size[%d]=%d is not enough for lossless buffer\n",
                               i, port_buffer->buffer[i].size);
                        return -ENOMEM;
@@ -175,6 +178,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
  *     @pfc_en: <input> current pfc configuration
  *     @buffer: <input> current prio to buffer mapping
  *     @xoff:   <input> xoff value
+ *     @port_buff_cell_sz: <input> port buffer cell_size
  *     @port_buffer: <output> port receive buffer configuration
  *     @change: <output>
  *
@@ -189,7 +193,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
  *     sets change to true if buffer configuration was modified.
  */
 static int update_buffer_lossy(unsigned int max_mtu,
-                              u8 pfc_en, u8 *buffer, u32 xoff,
+                              u8 pfc_en, u8 *buffer, u32 xoff, u16 port_buff_cell_sz,
                               struct mlx5e_port_buffer *port_buffer,
                               bool *change)
 {
@@ -225,7 +229,7 @@ static int update_buffer_lossy(unsigned int max_mtu,
        }
 
        if (changed) {
-               err = update_xoff_threshold(port_buffer, xoff, max_mtu);
+               err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz);
                if (err)
                        return err;
 
@@ -262,6 +266,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                                    u32 *buffer_size,
                                    u8 *prio2buffer)
 {
+       u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
        struct mlx5e_port_buffer port_buffer;
        u32 xoff = calculate_xoff(priv, mtu);
        bool update_prio2buffer = false;
@@ -282,7 +287,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
 
        if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
                update_buffer = true;
-               err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
+               err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
                if (err)
                        return err;
        }
@@ -292,7 +297,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                if (err)
                        return err;
 
-               err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
+               err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, port_buff_cell_sz,
                                          &port_buffer, &update_buffer);
                if (err)
                        return err;
@@ -304,7 +309,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                if (err)
                        return err;
 
-               err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
+               err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, port_buff_cell_sz,
                                          xoff, &port_buffer, &update_buffer);
                if (err)
                        return err;
@@ -329,7 +334,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
                        return -EINVAL;
 
                update_buffer = true;
-               err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
+               err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
                if (err)
                        return err;
        }
@@ -337,7 +342,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
        /* Need to update buffer configuration if xoff value is changed */
        if (!update_buffer && xoff != priv->dcbx.xoff) {
                update_buffer = true;
-               err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
+               err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
                if (err)
                        return err;
        }
index 34f55b8..80af7a5 100644 (file)
@@ -36,7 +36,6 @@
 #include "port.h"
 
 #define MLX5E_MAX_BUFFER 8
-#define MLX5E_BUFFER_CELL_SHIFT 7
 #define MLX5E_DEFAULT_CABLE_LEN 7 /* 7 meters */
 
 #define MLX5_BUFFER_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, pcam_reg) && \
index baa1624..9062920 100644 (file)
@@ -6,10 +6,10 @@
 #include <linux/rculist.h>
 #include <linux/rtnetlink.h>
 #include <linux/workqueue.h>
-#include <linux/rwlock.h>
 #include <linux/spinlock.h>
 #include <linux/notifier.h>
 #include <net/netevent.h>
+#include <net/arp.h>
 #include "neigh.h"
 #include "tc.h"
 #include "en_rep.h"
index 8071312..f8af109 100644 (file)
@@ -404,10 +404,12 @@ static void mlx5e_rep_indr_block_unbind(void *cb_priv)
 static LIST_HEAD(mlx5e_block_cb_list);
 
 static int
-mlx5e_rep_indr_setup_block(struct net_device *netdev,
+mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
                           struct mlx5e_rep_priv *rpriv,
                           struct flow_block_offload *f,
-                          flow_setup_cb_t *setup_cb)
+                          flow_setup_cb_t *setup_cb,
+                          void *data,
+                          void (*cleanup)(struct flow_block_cb *block_cb))
 {
        struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
        struct mlx5e_rep_indr_block_priv *indr_priv;
@@ -438,8 +440,10 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev,
                list_add(&indr_priv->list,
                         &rpriv->uplink_priv.tc_indr_block_priv_list);
 
-               block_cb = flow_block_cb_alloc(setup_cb, indr_priv, indr_priv,
-                                              mlx5e_rep_indr_block_unbind);
+               block_cb = flow_indr_block_cb_alloc(setup_cb, indr_priv, indr_priv,
+                                                   mlx5e_rep_indr_block_unbind,
+                                                   f, netdev, sch, data, rpriv,
+                                                   cleanup);
                if (IS_ERR(block_cb)) {
                        list_del(&indr_priv->list);
                        kfree(indr_priv);
@@ -458,7 +462,7 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev,
                if (!block_cb)
                        return -ENOENT;
 
-               flow_block_cb_remove(block_cb, f);
+               flow_indr_block_cb_remove(block_cb, f);
                list_del(&block_cb->driver_list);
                return 0;
        default:
@@ -468,16 +472,20 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev,
 }
 
 static
-int mlx5e_rep_indr_setup_cb(struct net_device *netdev, void *cb_priv,
-                           enum tc_setup_type type, void *type_data)
+int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
+                           enum tc_setup_type type, void *type_data,
+                           void *data,
+                           void (*cleanup)(struct flow_block_cb *block_cb))
 {
        switch (type) {
        case TC_SETUP_BLOCK:
-               return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
-                                                 mlx5e_rep_indr_setup_tc_cb);
+               return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data,
+                                                 mlx5e_rep_indr_setup_tc_cb,
+                                                 data, cleanup);
        case TC_SETUP_FT:
-               return mlx5e_rep_indr_setup_block(netdev, cb_priv, type_data,
-                                                 mlx5e_rep_indr_setup_ft_cb);
+               return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data,
+                                                 mlx5e_rep_indr_setup_ft_cb,
+                                                 data, cleanup);
        default:
                return -EOPNOTSUPP;
        }
@@ -496,7 +504,7 @@ int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv)
 void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv)
 {
        flow_indr_dev_unregister(mlx5e_rep_indr_setup_cb, rpriv,
-                                mlx5e_rep_indr_setup_tc_cb);
+                                mlx5e_rep_indr_block_unbind);
 }
 
 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
@@ -586,7 +594,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
                             struct mlx5e_tc_update_priv *tc_priv)
 {
 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
-       u32 chain = 0, reg_c0, reg_c1, tunnel_id, tuple_id;
+       u32 chain = 0, reg_c0, reg_c1, tunnel_id, zone_restore_id;
        struct mlx5_rep_uplink_priv *uplink_priv;
        struct mlx5e_rep_priv *uplink_rpriv;
        struct tc_skb_ext *tc_skb_ext;
@@ -623,11 +631,12 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
 
                tc_skb_ext->chain = chain;
 
-               tuple_id = reg_c1 & TUPLE_ID_MAX;
+               zone_restore_id = reg_c1 & ZONE_RESTORE_MAX;
 
                uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
                uplink_priv = &uplink_rpriv->uplink_priv;
-               if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb, tuple_id))
+               if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb,
+                                             zone_restore_id))
                        return false;
        }
 
index c209579..9913647 100644 (file)
@@ -3,6 +3,7 @@
 
 #include "health.h"
 #include "params.h"
+#include "txrx.h"
 
 static int mlx5e_query_rq_state(struct mlx5_core_dev *dev, u32 rqn, u8 *state)
 {
@@ -29,7 +30,8 @@ out:
 
 static int mlx5e_wait_for_icosq_flush(struct mlx5e_icosq *icosq)
 {
-       unsigned long exp_time = jiffies + msecs_to_jiffies(2000);
+       unsigned long exp_time = jiffies +
+                                msecs_to_jiffies(MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC);
 
        while (time_before(jiffies, exp_time)) {
                if (icosq->cc == icosq->pc)
@@ -123,25 +125,9 @@ static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state)
 
 static int mlx5e_rx_reporter_err_rq_cqe_recover(void *ctx)
 {
-       struct mlx5_core_dev *mdev;
-       struct net_device *dev;
-       struct mlx5e_rq *rq;
-       u8 state;
+       struct mlx5e_rq *rq = ctx;
        int err;
 
-       rq = ctx;
-       mdev = rq->mdev;
-       dev = rq->netdev;
-       err = mlx5e_query_rq_state(mdev, rq->rqn, &state);
-       if (err) {
-               netdev_err(dev, "Failed to query RQ 0x%x state. err = %d\n",
-                          rq->rqn, err);
-               goto out;
-       }
-
-       if (state != MLX5_RQC_STATE_ERR)
-               goto out;
-
        mlx5e_deactivate_rq(rq);
        mlx5e_free_rx_descs(rq);
 
@@ -191,19 +177,71 @@ static int mlx5e_rx_reporter_recover(struct devlink_health_reporter *reporter,
                         mlx5e_health_recover_channels(priv);
 }
 
+static int mlx5e_reporter_icosq_diagnose(struct mlx5e_icosq *icosq, u8 hw_state,
+                                        struct devlink_fmsg *fmsg)
+{
+       int err;
+
+       err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ");
+       if (err)
+               return err;
+
+       err = devlink_fmsg_u32_pair_put(fmsg, "sqn", icosq->sqn);
+       if (err)
+               return err;
+
+       err = devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state);
+       if (err)
+               return err;
+
+       err = devlink_fmsg_u32_pair_put(fmsg, "cc", icosq->cc);
+       if (err)
+               return err;
+
+       err = devlink_fmsg_u32_pair_put(fmsg, "pc", icosq->pc);
+       if (err)
+               return err;
+
+       err = devlink_fmsg_u32_pair_put(fmsg, "WQE size",
+                                       mlx5_wq_cyc_get_size(&icosq->wq));
+       if (err)
+               return err;
+
+       err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ");
+       if (err)
+               return err;
+
+       err = devlink_fmsg_u32_pair_put(fmsg, "cqn", icosq->cq.mcq.cqn);
+       if (err)
+               return err;
+
+       err = devlink_fmsg_u32_pair_put(fmsg, "cc", icosq->cq.wq.cc);
+       if (err)
+               return err;
+
+       err = devlink_fmsg_u32_pair_put(fmsg, "size", mlx5_cqwq_get_size(&icosq->cq.wq));
+       if (err)
+               return err;
+
+       err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+       if (err)
+               return err;
+
+       return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+}
+
 static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
                                                   struct devlink_fmsg *fmsg)
 {
        struct mlx5e_priv *priv = rq->channel->priv;
-       struct mlx5e_params *params;
        struct mlx5e_icosq *icosq;
        u8 icosq_hw_state;
+       u16 wqe_counter;
        int wqes_sz;
        u8 hw_state;
        u16 wq_head;
        int err;
 
-       params = &priv->channels.params;
        icosq = &rq->channel->icosq;
        err = mlx5e_query_rq_state(priv->mdev, rq->rqn, &hw_state);
        if (err)
@@ -214,8 +252,8 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
                return err;
 
        wqes_sz = mlx5e_rqwq_get_cur_sz(rq);
-       wq_head = params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
-                 rq->mpwqe.wq.head : mlx5_wq_cyc_get_head(&rq->wqe.wq);
+       wq_head = mlx5e_rqwq_get_head(rq);
+       wqe_counter = mlx5e_rqwq_get_wqe_counter(rq);
 
        err = devlink_fmsg_obj_nest_start(fmsg);
        if (err)
@@ -237,6 +275,10 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
        if (err)
                return err;
 
+       err = devlink_fmsg_u32_pair_put(fmsg, "WQE counter", wqe_counter);
+       if (err)
+               return err;
+
        err = devlink_fmsg_u32_pair_put(fmsg, "posted WQEs", wqes_sz);
        if (err)
                return err;
@@ -245,11 +287,15 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
        if (err)
                return err;
 
-       err = devlink_fmsg_u8_pair_put(fmsg, "ICOSQ HW state", icosq_hw_state);
+       err = mlx5e_health_cq_diag_fmsg(&rq->cq, fmsg);
        if (err)
                return err;
 
-       err = mlx5e_reporter_cq_diagnose(&rq->cq, fmsg);
+       err = mlx5e_health_eq_diag_fmsg(rq->cq.mcq.eq, fmsg);
+       if (err)
+               return err;
+
+       err = mlx5e_reporter_icosq_diagnose(icosq, icosq_hw_state, fmsg);
        if (err)
                return err;
 
@@ -279,11 +325,11 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
        rq_sz = mlx5e_rqwq_get_size(generic_rq);
        rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(priv->mdev, params, NULL));
 
-       err = mlx5e_reporter_named_obj_nest_start(fmsg, "Common config");
+       err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common config");
        if (err)
                goto unlock;
 
-       err = mlx5e_reporter_named_obj_nest_start(fmsg, "RQ");
+       err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ");
        if (err)
                goto unlock;
 
@@ -299,15 +345,15 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
        if (err)
                goto unlock;
 
-       err = mlx5e_reporter_named_obj_nest_end(fmsg);
+       err = mlx5e_health_cq_common_diag_fmsg(&generic_rq->cq, fmsg);
        if (err)
                goto unlock;
 
-       err = mlx5e_reporter_cq_common_diagnose(&generic_rq->cq, fmsg);
+       err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
        if (err)
                goto unlock;
 
-       err = mlx5e_reporter_named_obj_nest_end(fmsg);
+       err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
        if (err)
                goto unlock;
 
@@ -340,7 +386,7 @@ static int mlx5e_rx_reporter_dump_icosq(struct mlx5e_priv *priv, struct devlink_
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
                return 0;
 
-       err = mlx5e_reporter_named_obj_nest_start(fmsg, "SX Slice");
+       err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
        if (err)
                return err;
 
@@ -350,15 +396,15 @@ static int mlx5e_rx_reporter_dump_icosq(struct mlx5e_priv *priv, struct devlink_
        if (err)
                return err;
 
-       err = mlx5e_reporter_named_obj_nest_end(fmsg);
+       err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
        if (err)
                return err;
 
-       err = mlx5e_reporter_named_obj_nest_start(fmsg, "ICOSQ");
+       err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ");
        if (err)
                return err;
 
-       err = mlx5e_reporter_named_obj_nest_start(fmsg, "QPC");
+       err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
        if (err)
                return err;
 
@@ -370,11 +416,11 @@ static int mlx5e_rx_reporter_dump_icosq(struct mlx5e_priv *priv, struct devlink_
        if (err)
                return err;
 
-       err = mlx5e_reporter_named_obj_nest_end(fmsg);
+       err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
        if (err)
                return err;
 
-       err = mlx5e_reporter_named_obj_nest_start(fmsg, "send_buff");
+       err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff");
        if (err)
                return err;
 
@@ -385,11 +431,11 @@ static int mlx5e_rx_reporter_dump_icosq(struct mlx5e_priv *priv, struct devlink_
        if (err)
                return err;
 
-       err = mlx5e_reporter_named_obj_nest_end(fmsg);
+       err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
        if (err)
                return err;
 
-       return mlx5e_reporter_named_obj_nest_end(fmsg);
+       return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
 }
 
 static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
@@ -402,7 +448,7 @@ static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fms
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
                return 0;
 
-       err = mlx5e_reporter_named_obj_nest_start(fmsg, "RX Slice");
+       err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice");
        if (err)
                return err;
 
@@ -412,15 +458,15 @@ static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fms
        if (err)
                return err;
 
-       err = mlx5e_reporter_named_obj_nest_end(fmsg);
+       err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
        if (err)
                return err;
 
-       err = mlx5e_reporter_named_obj_nest_start(fmsg, "RQ");
+       err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ");
        if (err)
                return err;
 
-       err = mlx5e_reporter_named_obj_nest_start(fmsg, "QPC");
+       err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
        if (err)
                return err;
 
@@ -432,11 +478,11 @@ static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fms
        if (err)
                return err;
 
-       err = mlx5e_reporter_named_obj_nest_end(fmsg);
+       err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
        if (err)
                return err;
 
-       err = mlx5e_reporter_named_obj_nest_start(fmsg, "receive_buff");
+       err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "receive_buff");
        if (err)
                return err;
 
@@ -446,11 +492,11 @@ static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fms
        if (err)
                return err;
 
-       err = mlx5e_reporter_named_obj_nest_end(fmsg);
+       err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
        if (err)
                return err;
 
-       return mlx5e_reporter_named_obj_nest_end(fmsg);
+       return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
 }
 
 static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv,
@@ -462,7 +508,7 @@ static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv,
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
                return 0;
 
-       err = mlx5e_reporter_named_obj_nest_start(fmsg, "RX Slice");
+       err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice");
        if (err)
                return err;
 
@@ -472,7 +518,7 @@ static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv,
        if (err)
                return err;
 
-       err = mlx5e_reporter_named_obj_nest_end(fmsg);
+       err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
        if (err)
                return err;
 
@@ -563,22 +609,18 @@ static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
 
 #define MLX5E_REPORTER_RX_GRACEFUL_PERIOD 500
 
-int mlx5e_reporter_rx_create(struct mlx5e_priv *priv)
+void mlx5e_reporter_rx_create(struct mlx5e_priv *priv)
 {
-       struct devlink *devlink = priv_to_devlink(priv->mdev);
        struct devlink_health_reporter *reporter;
 
-       reporter = devlink_health_reporter_create(devlink,
-                                                 &mlx5_rx_reporter_ops,
-                                                 MLX5E_REPORTER_RX_GRACEFUL_PERIOD,
-                                                 priv);
+       reporter = devlink_port_health_reporter_create(&priv->dl_port, &mlx5_rx_reporter_ops,
+                                                      MLX5E_REPORTER_RX_GRACEFUL_PERIOD, priv);
        if (IS_ERR(reporter)) {
                netdev_warn(priv->netdev, "Failed to create rx reporter, err = %ld\n",
                            PTR_ERR(reporter));
-               return PTR_ERR(reporter);
+               return;
        }
        priv->rx_reporter = reporter;
-       return 0;
 }
 
 void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv)
@@ -586,5 +628,5 @@ void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv)
        if (!priv->rx_reporter)
                return;
 
-       devlink_health_reporter_destroy(priv->rx_reporter);
+       devlink_port_health_reporter_destroy(priv->rx_reporter);
 }
index 9805fc0..8be6eaa 100644 (file)
@@ -5,7 +5,8 @@
 
 static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
 {
-       unsigned long exp_time = jiffies + msecs_to_jiffies(2000);
+       unsigned long exp_time = jiffies +
+                                msecs_to_jiffies(MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC);
 
        while (time_before(jiffies, exp_time)) {
                if (sq->cc == sq->pc)
@@ -82,17 +83,40 @@ out:
        return err;
 }
 
+struct mlx5e_tx_timeout_ctx {
+       struct mlx5e_txqsq *sq;
+       signed int status;
+};
+
 static int mlx5e_tx_reporter_timeout_recover(void *ctx)
 {
+       struct mlx5e_tx_timeout_ctx *to_ctx;
+       struct mlx5e_priv *priv;
        struct mlx5_eq_comp *eq;
        struct mlx5e_txqsq *sq;
        int err;
 
-       sq = ctx;
+       to_ctx = ctx;
+       sq = to_ctx->sq;
        eq = sq->cq.mcq.eq;
+       priv = sq->channel->priv;
        err = mlx5e_health_channel_eq_recover(eq, sq->channel);
-       if (err)
-               clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+       if (!err) {
+               to_ctx->status = 0; /* this sq recovered */
+               return err;
+       }
+
+       err = mlx5e_safe_reopen_channels(priv);
+       if (!err) {
+               to_ctx->status = 1; /* all channels recovered */
+               return err;
+       }
+
+       to_ctx->status = err;
+       clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+       netdev_err(priv->netdev,
+                  "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n",
+                  err);
 
        return err;
 }
@@ -165,7 +189,11 @@ mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
        if (err)
                return err;
 
-       err = mlx5e_reporter_cq_diagnose(&sq->cq, fmsg);
+       err = mlx5e_health_cq_diag_fmsg(&sq->cq, fmsg);
+       if (err)
+               return err;
+
+       err = mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg);
        if (err)
                return err;
 
@@ -194,11 +222,11 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
        sq_sz = mlx5_wq_cyc_get_size(&generic_sq->wq);
        sq_stride = MLX5_SEND_WQE_BB;
 
-       err = mlx5e_reporter_named_obj_nest_start(fmsg, "Common Config");
+       err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common Config");
        if (err)
                goto unlock;
 
-       err = mlx5e_reporter_named_obj_nest_start(fmsg, "SQ");
+       err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ");
        if (err)
                goto unlock;
 
@@ -210,15 +238,15 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
        if (err)
                goto unlock;
 
-       err = mlx5e_reporter_cq_common_diagnose(&generic_sq->cq, fmsg);
+       err = mlx5e_health_cq_common_diag_fmsg(&generic_sq->cq, fmsg);
        if (err)
                goto unlock;
 
-       err = mlx5e_reporter_named_obj_nest_end(fmsg);
+       err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
        if (err)
                goto unlock;
 
-       err = mlx5e_reporter_named_obj_nest_end(fmsg);
+       err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
        if (err)
                goto unlock;
 
@@ -256,7 +284,7 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
                return 0;
 
-       err = mlx5e_reporter_named_obj_nest_start(fmsg, "SX Slice");
+       err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
        if (err)
                return err;
 
@@ -266,15 +294,15 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms
        if (err)
                return err;
 
-       err = mlx5e_reporter_named_obj_nest_end(fmsg);
+       err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
        if (err)
                return err;
 
-       err = mlx5e_reporter_named_obj_nest_start(fmsg, "SQ");
+       err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ");
        if (err)
                return err;
 
-       err = mlx5e_reporter_named_obj_nest_start(fmsg, "QPC");
+       err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
        if (err)
                return err;
 
@@ -286,11 +314,11 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms
        if (err)
                return err;
 
-       err = mlx5e_reporter_named_obj_nest_end(fmsg);
+       err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
        if (err)
                return err;
 
-       err = mlx5e_reporter_named_obj_nest_start(fmsg, "send_buff");
+       err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff");
        if (err)
                return err;
 
@@ -300,11 +328,11 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms
        if (err)
                return err;
 
-       err = mlx5e_reporter_named_obj_nest_end(fmsg);
+       err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
        if (err)
                return err;
 
-       return mlx5e_reporter_named_obj_nest_end(fmsg);
+       return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
 }
 
 static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
@@ -316,7 +344,7 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
                return 0;
 
-       err = mlx5e_reporter_named_obj_nest_start(fmsg, "SX Slice");
+       err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
        if (err)
                return err;
 
@@ -326,7 +354,7 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
        if (err)
                return err;
 
-       err = mlx5e_reporter_named_obj_nest_end(fmsg);
+       err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
        if (err)
                return err;
 
@@ -384,9 +412,11 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
 {
        struct mlx5e_priv *priv = sq->channel->priv;
        char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
+       struct mlx5e_tx_timeout_ctx to_ctx = {};
        struct mlx5e_err_ctx err_ctx = {};
 
-       err_ctx.ctx = sq;
+       to_ctx.sq = sq;
+       err_ctx.ctx = &to_ctx;
        err_ctx.recover = mlx5e_tx_reporter_timeout_recover;
        err_ctx.dump = mlx5e_tx_reporter_dump_sq;
        snprintf(err_str, sizeof(err_str),
@@ -394,7 +424,8 @@ int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq)
                 sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
                 jiffies_to_usecs(jiffies - sq->txq->trans_start));
 
-       return mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
+       mlx5e_health_report(priv, priv->tx_reporter, err_str, &err_ctx);
+       return to_ctx.status;
 }
 
 static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
@@ -406,25 +437,19 @@ static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
 
 #define MLX5_REPORTER_TX_GRACEFUL_PERIOD 500
 
-int mlx5e_reporter_tx_create(struct mlx5e_priv *priv)
+void mlx5e_reporter_tx_create(struct mlx5e_priv *priv)
 {
        struct devlink_health_reporter *reporter;
-       struct mlx5_core_dev *mdev = priv->mdev;
-       struct devlink *devlink;
-
-       devlink = priv_to_devlink(mdev);
-       reporter =
-               devlink_health_reporter_create(devlink, &mlx5_tx_reporter_ops,
-                                              MLX5_REPORTER_TX_GRACEFUL_PERIOD,
-                                              priv);
+
+       reporter = devlink_port_health_reporter_create(&priv->dl_port, &mlx5_tx_reporter_ops,
+                                                      MLX5_REPORTER_TX_GRACEFUL_PERIOD, priv);
        if (IS_ERR(reporter)) {
                netdev_warn(priv->netdev,
                            "Failed to create tx reporter, err = %ld\n",
                            PTR_ERR(reporter));
-               return PTR_ERR(reporter);
+               return;
        }
        priv->tx_reporter = reporter;
-       return 0;
 }
 
 void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv)
@@ -432,5 +457,5 @@ void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv)
        if (!priv->tx_reporter)
                return;
 
-       devlink_health_reporter_destroy(priv->tx_reporter);
+       devlink_port_health_reporter_destroy(priv->tx_reporter);
 }
index 4300255..c6bc922 100644 (file)
@@ -16,6 +16,8 @@
 
 #include "esw/chains.h"
 #include "en/tc_ct.h"
+#include "en/mod_hdr.h"
+#include "en/mapping.h"
 #include "en.h"
 #include "en_tc.h"
 #include "en_rep.h"
@@ -30,6 +32,9 @@
 #define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0)
 #define MLX5_FTE_ID_MASK MLX5_FTE_ID_MAX
 
+#define MLX5_CT_LABELS_BITS (mlx5e_tc_attr_to_reg_mappings[LABELS_TO_REG].mlen * 8)
+#define MLX5_CT_LABELS_MASK GENMASK(MLX5_CT_LABELS_BITS - 1, 0)
+
 #define ct_dbg(fmt, args...)\
        netdev_dbg(ct_priv->netdev, "ct_debug: " fmt "\n", ##args)
 
@@ -39,10 +44,14 @@ struct mlx5_tc_ct_priv {
        struct idr fte_ids;
        struct xarray tuple_ids;
        struct rhashtable zone_ht;
+       struct rhashtable ct_tuples_ht;
+       struct rhashtable ct_tuples_nat_ht;
        struct mlx5_flow_table *ct;
        struct mlx5_flow_table *ct_nat;
        struct mlx5_flow_table *post_ct;
        struct mutex control_lock; /* guards parallel adds/dels */
+       struct mapping_ctx *zone_mapping;
+       struct mapping_ctx *labels_mapping;
 };
 
 struct mlx5_ct_flow {
@@ -57,8 +66,8 @@ struct mlx5_ct_flow {
 
 struct mlx5_ct_zone_rule {
        struct mlx5_flow_handle *rule;
+       struct mlx5e_mod_hdr_handle *mh;
        struct mlx5_esw_flow_attr attr;
-       int tupleid;
        bool nat;
 };
 
@@ -74,6 +83,7 @@ struct mlx5_tc_ct_pre {
 struct mlx5_ct_ft {
        struct rhash_head node;
        u16 zone;
+       u32 zone_restore_id;
        refcount_t refcount;
        struct nf_flowtable *nf_ft;
        struct mlx5_tc_ct_priv *ct_priv;
@@ -82,12 +92,37 @@ struct mlx5_ct_ft {
        struct mlx5_tc_ct_pre pre_ct_nat;
 };
 
-struct mlx5_ct_entry {
+struct mlx5_ct_tuple {
+       u16 addr_type;
+       __be16 n_proto;
+       u8 ip_proto;
+       struct {
+               union {
+                       __be32 src_v4;
+                       struct in6_addr src_v6;
+               };
+               union {
+                       __be32 dst_v4;
+                       struct in6_addr dst_v6;
+               };
+       } ip;
+       struct {
+               __be16 src;
+               __be16 dst;
+       } port;
+
        u16 zone;
+};
+
+struct mlx5_ct_entry {
        struct rhash_head node;
+       struct rhash_head tuple_node;
+       struct rhash_head tuple_nat_node;
        struct mlx5_fc *counter;
        unsigned long cookie;
        unsigned long restore_cookie;
+       struct mlx5_ct_tuple tuple;
+       struct mlx5_ct_tuple tuple_nat;
        struct mlx5_ct_zone_rule zone_rules[2];
 };
 
@@ -106,6 +141,22 @@ static const struct rhashtable_params zone_params = {
        .automatic_shrinking = true,
 };
 
+static const struct rhashtable_params tuples_ht_params = {
+       .head_offset = offsetof(struct mlx5_ct_entry, tuple_node),
+       .key_offset = offsetof(struct mlx5_ct_entry, tuple),
+       .key_len = sizeof(((struct mlx5_ct_entry *)0)->tuple),
+       .automatic_shrinking = true,
+       .min_size = 16 * 1024,
+};
+
+static const struct rhashtable_params tuples_nat_ht_params = {
+       .head_offset = offsetof(struct mlx5_ct_entry, tuple_nat_node),
+       .key_offset = offsetof(struct mlx5_ct_entry, tuple_nat),
+       .key_len = sizeof(((struct mlx5_ct_entry *)0)->tuple_nat),
+       .automatic_shrinking = true,
+       .min_size = 16 * 1024,
+};
+
 static struct mlx5_tc_ct_priv *
 mlx5_tc_ct_get_ct_priv(struct mlx5e_priv *priv)
 {
@@ -119,6 +170,115 @@ mlx5_tc_ct_get_ct_priv(struct mlx5e_priv *priv)
 }
 
 static int
+mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
+{
+       struct flow_match_control control;
+       struct flow_match_basic basic;
+
+       flow_rule_match_basic(rule, &basic);
+       flow_rule_match_control(rule, &control);
+
+       tuple->n_proto = basic.key->n_proto;
+       tuple->ip_proto = basic.key->ip_proto;
+       tuple->addr_type = control.key->addr_type;
+
+       if (tuple->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+               struct flow_match_ipv4_addrs match;
+
+               flow_rule_match_ipv4_addrs(rule, &match);
+               tuple->ip.src_v4 = match.key->src;
+               tuple->ip.dst_v4 = match.key->dst;
+       } else if (tuple->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+               struct flow_match_ipv6_addrs match;
+
+               flow_rule_match_ipv6_addrs(rule, &match);
+               tuple->ip.src_v6 = match.key->src;
+               tuple->ip.dst_v6 = match.key->dst;
+       } else {
+               return -EOPNOTSUPP;
+       }
+
+       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+               struct flow_match_ports match;
+
+               flow_rule_match_ports(rule, &match);
+               switch (tuple->ip_proto) {
+               case IPPROTO_TCP:
+               case IPPROTO_UDP:
+                       tuple->port.src = match.key->src;
+                       tuple->port.dst = match.key->dst;
+                       break;
+               default:
+                       return -EOPNOTSUPP;
+               }
+       } else {
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+static int
+mlx5_tc_ct_rule_to_tuple_nat(struct mlx5_ct_tuple *tuple,
+                            struct flow_rule *rule)
+{
+       struct flow_action *flow_action = &rule->action;
+       struct flow_action_entry *act;
+       u32 offset, val, ip6_offset;
+       int i;
+
+       flow_action_for_each(i, act, flow_action) {
+               if (act->id != FLOW_ACTION_MANGLE)
+                       continue;
+
+               offset = act->mangle.offset;
+               val = act->mangle.val;
+               switch (act->mangle.htype) {
+               case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+                       if (offset == offsetof(struct iphdr, saddr))
+                               tuple->ip.src_v4 = cpu_to_be32(val);
+                       else if (offset == offsetof(struct iphdr, daddr))
+                               tuple->ip.dst_v4 = cpu_to_be32(val);
+                       else
+                               return -EOPNOTSUPP;
+                       break;
+
+               case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+                       ip6_offset = (offset - offsetof(struct ipv6hdr, saddr));
+                       ip6_offset /= 4;
+                       if (ip6_offset < 8)
+                               tuple->ip.src_v6.s6_addr32[ip6_offset] = cpu_to_be32(val);
+                       else
+                               return -EOPNOTSUPP;
+                       break;
+
+               case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+                       if (offset == offsetof(struct tcphdr, source))
+                               tuple->port.src = cpu_to_be16(val);
+                       else if (offset == offsetof(struct tcphdr, dest))
+                               tuple->port.dst = cpu_to_be16(val);
+                       else
+                               return -EOPNOTSUPP;
+                       break;
+
+               case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+                       if (offset == offsetof(struct udphdr, source))
+                               tuple->port.src = cpu_to_be16(val);
+                       else if (offset == offsetof(struct udphdr, dest))
+                               tuple->port.dst = cpu_to_be16(val);
+                       else
+                               return -EOPNOTSUPP;
+                       break;
+
+               default:
+                       return -EOPNOTSUPP;
+               }
+       }
+
+       return 0;
+}
+
+static int
 mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
                           struct flow_rule *rule)
 {
@@ -243,11 +403,12 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
        struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
        struct mlx5_eswitch *esw = ct_priv->esw;
 
-       ct_dbg("Deleting ct entry rule in zone %d", entry->zone);
+       ct_dbg("Deleting ct entry rule in zone %d", entry->tuple.zone);
 
        mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr);
-       mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr);
-       xa_erase(&ct_priv->tuple_ids, zone_rule->tupleid);
+       mlx5e_mod_hdr_detach(ct_priv->esw->dev,
+                            &esw->offloads.mod_hdr, zone_rule->mh);
+       mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
 }
 
 static void
@@ -280,8 +441,8 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv,
                               struct mlx5e_tc_mod_hdr_acts *mod_acts,
                               u8 ct_state,
                               u32 mark,
-                              u32 label,
-                              u32 tupleid)
+                              u32 labels_id,
+                              u8 zone_restore_id)
 {
        struct mlx5_eswitch *esw = ct_priv->esw;
        int err;
@@ -297,12 +458,12 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv,
                return err;
 
        err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
-                                       LABELS_TO_REG, label);
+                                       LABELS_TO_REG, labels_id);
        if (err)
                return err;
 
        err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
-                                       TUPLEID_TO_REG, tupleid);
+                                       ZONE_RESTORE_TO_REG, zone_restore_id);
        if (err)
                return err;
 
@@ -429,12 +590,10 @@ static int
 mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
                                struct mlx5_esw_flow_attr *attr,
                                struct flow_rule *flow_rule,
-                               u32 tupleid,
-                               bool nat)
+                               struct mlx5e_mod_hdr_handle **mh,
+                               u8 zone_restore_id, bool nat)
 {
        struct mlx5e_tc_mod_hdr_acts mod_acts = {};
-       struct mlx5_eswitch *esw = ct_priv->esw;
-       struct mlx5_modify_hdr *mod_hdr;
        struct flow_action_entry *meta;
        u16 ct_state = 0;
        int err;
@@ -443,13 +602,10 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
        if (!meta)
                return -EOPNOTSUPP;
 
-       if (meta->ct_metadata.labels[1] ||
-           meta->ct_metadata.labels[2] ||
-           meta->ct_metadata.labels[3]) {
-               ct_dbg("Failed to offload ct entry due to unsupported label");
+       err = mapping_add(ct_priv->labels_mapping, meta->ct_metadata.labels,
+                         &attr->ct_attr.ct_labels_id);
+       if (err)
                return -EOPNOTSUPP;
-       }
-
        if (nat) {
                err = mlx5_tc_ct_entry_create_nat(ct_priv, flow_rule,
                                                  &mod_acts);
@@ -463,25 +619,27 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
        err = mlx5_tc_ct_entry_set_registers(ct_priv, &mod_acts,
                                             ct_state,
                                             meta->ct_metadata.mark,
-                                            meta->ct_metadata.labels[0],
-                                            tupleid);
+                                            attr->ct_attr.ct_labels_id,
+                                            zone_restore_id);
        if (err)
                goto err_mapping;
 
-       mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB,
-                                          mod_acts.num_actions,
-                                          mod_acts.actions);
-       if (IS_ERR(mod_hdr)) {
-               err = PTR_ERR(mod_hdr);
+       *mh = mlx5e_mod_hdr_attach(ct_priv->esw->dev,
+                                  &ct_priv->esw->offloads.mod_hdr,
+                                  MLX5_FLOW_NAMESPACE_FDB,
+                                  &mod_acts);
+       if (IS_ERR(*mh)) {
+               err = PTR_ERR(*mh);
                goto err_mapping;
        }
-       attr->modify_hdr = mod_hdr;
+       attr->modify_hdr = mlx5e_mod_hdr_get(*mh);
 
        dealloc_mod_hdr_actions(&mod_acts);
        return 0;
 
 err_mapping:
        dealloc_mod_hdr_actions(&mod_acts);
+       mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
        return err;
 }
 
@@ -489,13 +647,12 @@ static int
 mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
                          struct flow_rule *flow_rule,
                          struct mlx5_ct_entry *entry,
-                         bool nat)
+                         bool nat, u8 zone_restore_id)
 {
        struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
        struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
        struct mlx5_eswitch *esw = ct_priv->esw;
        struct mlx5_flow_spec *spec = NULL;
-       u32 tupleid;
        int err;
 
        zone_rule->nat = nat;
@@ -504,18 +661,9 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
        if (!spec)
                return -ENOMEM;
 
-       /* Get tuple unique id */
-       err = xa_alloc(&ct_priv->tuple_ids, &tupleid, zone_rule,
-                      XA_LIMIT(1, TUPLE_ID_MAX), GFP_KERNEL);
-       if (err) {
-               netdev_warn(ct_priv->netdev,
-                           "Failed to allocate tuple id, err: %d\n", err);
-               goto err_xa_alloc;
-       }
-       zone_rule->tupleid = tupleid;
-
        err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule,
-                                             tupleid, nat);
+                                             &zone_rule->mh,
+                                             zone_restore_id, nat);
        if (err) {
                ct_dbg("Failed to create ct entry mod hdr");
                goto err_mod_hdr;
@@ -533,7 +681,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
 
        mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
        mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
-                                   entry->zone & MLX5_CT_ZONE_MASK,
+                                   entry->tuple.zone & MLX5_CT_ZONE_MASK,
                                    MLX5_CT_ZONE_MASK);
 
        zone_rule->rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
@@ -544,15 +692,14 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
        }
 
        kfree(spec);
-       ct_dbg("Offloaded ct entry rule in zone %d", entry->zone);
+       ct_dbg("Offloaded ct entry rule in zone %d", entry->tuple.zone);
 
        return 0;
 
 err_rule:
-       mlx5_modify_header_dealloc(esw->dev, attr->modify_hdr);
+       mlx5e_mod_hdr_detach(ct_priv->esw->dev,
+                            &esw->offloads.mod_hdr, zone_rule->mh);
 err_mod_hdr:
-       xa_erase(&ct_priv->tuple_ids, zone_rule->tupleid);
-err_xa_alloc:
        kfree(spec);
        return err;
 }
@@ -560,7 +707,8 @@ err_xa_alloc:
 static int
 mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
                           struct flow_rule *flow_rule,
-                          struct mlx5_ct_entry *entry)
+                          struct mlx5_ct_entry *entry,
+                          u8 zone_restore_id)
 {
        struct mlx5_eswitch *esw = ct_priv->esw;
        int err;
@@ -572,11 +720,13 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
                return err;
        }
 
-       err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false);
+       err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false,
+                                       zone_restore_id);
        if (err)
                goto err_orig;
 
-       err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true);
+       err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true,
+                                       zone_restore_id);
        if (err)
                goto err_nat;
 
@@ -613,11 +763,35 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
        if (!entry)
                return -ENOMEM;
 
-       entry->zone = ft->zone;
+       entry->tuple.zone = ft->zone;
        entry->cookie = flow->cookie;
        entry->restore_cookie = meta_action->ct_metadata.cookie;
 
-       err = mlx5_tc_ct_entry_add_rules(ct_priv, flow_rule, entry);
+       err = mlx5_tc_ct_rule_to_tuple(&entry->tuple, flow_rule);
+       if (err)
+               goto err_set;
+
+       memcpy(&entry->tuple_nat, &entry->tuple, sizeof(entry->tuple));
+       err = mlx5_tc_ct_rule_to_tuple_nat(&entry->tuple_nat, flow_rule);
+       if (err)
+               goto err_set;
+
+       err = rhashtable_insert_fast(&ct_priv->ct_tuples_ht,
+                                    &entry->tuple_node,
+                                    tuples_ht_params);
+       if (err)
+               goto err_tuple;
+
+       if (memcmp(&entry->tuple, &entry->tuple_nat, sizeof(entry->tuple))) {
+               err = rhashtable_insert_fast(&ct_priv->ct_tuples_nat_ht,
+                                            &entry->tuple_nat_node,
+                                            tuples_nat_ht_params);
+               if (err)
+                       goto err_tuple_nat;
+       }
+
+       err = mlx5_tc_ct_entry_add_rules(ct_priv, flow_rule, entry,
+                                        ft->zone_restore_id);
        if (err)
                goto err_rules;
 
@@ -631,12 +805,34 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
 err_insert:
        mlx5_tc_ct_entry_del_rules(ct_priv, entry);
 err_rules:
+       rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
+                              &entry->tuple_nat_node, tuples_nat_ht_params);
+err_tuple_nat:
+       if (entry->tuple_node.next)
+               rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
+                                      &entry->tuple_node,
+                                      tuples_ht_params);
+err_tuple:
+err_set:
        kfree(entry);
        netdev_warn(ct_priv->netdev,
                    "Failed to offload ct entry, err: %d\n", err);
        return err;
 }
 
+static void
+mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
+                       struct mlx5_ct_entry *entry)
+{
+       mlx5_tc_ct_entry_del_rules(ct_priv, entry);
+       if (entry->tuple_node.next)
+               rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
+                                      &entry->tuple_nat_node,
+                                      tuples_nat_ht_params);
+       rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
+                              tuples_ht_params);
+}
+
 static int
 mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft,
                                  struct flow_cls_offload *flow)
@@ -649,7 +845,7 @@ mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft,
        if (!entry)
                return -ENOENT;
 
-       mlx5_tc_ct_entry_del_rules(ft->ct_priv, entry);
+       mlx5_tc_ct_del_ft_entry(ft->ct_priv, entry);
        WARN_ON(rhashtable_remove_fast(&ft->ct_entries_ht,
                                       &entry->node,
                                       cts_ht_params));
@@ -672,7 +868,7 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
                return -ENOENT;
 
        mlx5_fc_query_cached(entry->counter, &bytes, &packets, &lastuse);
-       flow_stats_update(&f->stats, bytes, packets, lastuse,
+       flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
                          FLOW_ACTION_HW_STATS_DELAYED);
 
        return 0;
@@ -702,10 +898,71 @@ mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data,
        return -EOPNOTSUPP;
 }
 
+static bool
+mlx5_tc_ct_skb_to_tuple(struct sk_buff *skb, struct mlx5_ct_tuple *tuple,
+                       u16 zone)
+{
+       struct flow_keys flow_keys;
+
+       skb_reset_network_header(skb);
+       skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
+
+       tuple->zone = zone;
+
+       if (flow_keys.basic.ip_proto != IPPROTO_TCP &&
+           flow_keys.basic.ip_proto != IPPROTO_UDP)
+               return false;
+
+       tuple->port.src = flow_keys.ports.src;
+       tuple->port.dst = flow_keys.ports.dst;
+       tuple->n_proto = flow_keys.basic.n_proto;
+       tuple->ip_proto = flow_keys.basic.ip_proto;
+
+       switch (flow_keys.basic.n_proto) {
+       case htons(ETH_P_IP):
+               tuple->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+               tuple->ip.src_v4 = flow_keys.addrs.v4addrs.src;
+               tuple->ip.dst_v4 = flow_keys.addrs.v4addrs.dst;
+               break;
+
+       case htons(ETH_P_IPV6):
+               tuple->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+               tuple->ip.src_v6 = flow_keys.addrs.v6addrs.src;
+               tuple->ip.dst_v6 = flow_keys.addrs.v6addrs.dst;
+               break;
+       default:
+               goto out;
+       }
+
+       return true;
+
+out:
+       return false;
+}
+
+int
+mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
+                           struct mlx5_flow_spec *spec)
+{
+       u32 ctstate = 0, ctstate_mask = 0;
+
+       mlx5e_tc_match_to_reg_get_match(spec, CTSTATE_TO_REG,
+                                       &ctstate, &ctstate_mask);
+       if (ctstate_mask)
+               return -EOPNOTSUPP;
+
+       ctstate_mask |= MLX5_CT_STATE_TRK_BIT;
+       mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG,
+                                   ctstate, ctstate_mask);
+
+       return 0;
+}
+
 int
 mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
                       struct mlx5_flow_spec *spec,
                       struct flow_cls_offload *f,
+                      struct mlx5_ct_attr *ct_attr,
                       struct netlink_ext_ack *extack)
 {
        struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
@@ -716,6 +973,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
        u16 ct_state_on, ct_state_off;
        u16 ct_state, ct_state_mask;
        struct flow_match_ct match;
+       u32 ct_labels[4];
 
        if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT))
                return 0;
@@ -742,12 +1000,6 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
                return -EOPNOTSUPP;
        }
 
-       if (mask->ct_labels[1] || mask->ct_labels[2] || mask->ct_labels[3]) {
-               NL_SET_ERR_MSG_MOD(extack,
-                                  "only lower 32bits of ct_labels are supported for offload");
-               return -EOPNOTSUPP;
-       }
-
        ct_state_on = ct_state & ct_state_mask;
        ct_state_off = (ct_state & ct_state_mask) ^ ct_state_mask;
        trk = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
@@ -776,10 +1028,17 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
        if (mask->ct_mark)
                mlx5e_tc_match_to_reg_match(spec, MARK_TO_REG,
                                            key->ct_mark, mask->ct_mark);
-       if (mask->ct_labels[0])
-               mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG,
-                                           key->ct_labels[0],
-                                           mask->ct_labels[0]);
+       if (mask->ct_labels[0] || mask->ct_labels[1] || mask->ct_labels[2] ||
+           mask->ct_labels[3]) {
+               ct_labels[0] = key->ct_labels[0] & mask->ct_labels[0];
+               ct_labels[1] = key->ct_labels[1] & mask->ct_labels[1];
+               ct_labels[2] = key->ct_labels[2] & mask->ct_labels[2];
+               ct_labels[3] = key->ct_labels[3] & mask->ct_labels[3];
+               if (mapping_add(ct_priv->labels_mapping, ct_labels, &ct_attr->ct_labels_id))
+                       return -EOPNOTSUPP;
+               mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG, ct_attr->ct_labels_id,
+                                           MLX5_CT_LABELS_MASK);
+       }
 
        return 0;
 }
@@ -1054,6 +1313,10 @@ mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone,
        if (!ft)
                return ERR_PTR(-ENOMEM);
 
+       err = mapping_add(ct_priv->zone_mapping, &zone, &ft->zone_restore_id);
+       if (err)
+               goto err_mapping;
+
        ft->zone = zone;
        ft->nf_ft = nf_ft;
        ft->ct_priv = ct_priv;
@@ -1086,6 +1349,8 @@ err_insert:
 err_init:
        mlx5_tc_ct_free_pre_ct_tables(ft);
 err_alloc_pre_ct:
+       mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id);
+err_mapping:
        kfree(ft);
        return ERR_PTR(err);
 }
@@ -1096,7 +1361,8 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
        struct mlx5_tc_ct_priv *ct_priv = arg;
        struct mlx5_ct_entry *entry = ptr;
 
-       mlx5_tc_ct_entry_del_rules(ct_priv, entry);
+       mlx5_tc_ct_del_ft_entry(ct_priv, entry);
+       kfree(entry);
 }
 
 static void
@@ -1112,6 +1378,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
                                    mlx5_tc_ct_flush_ft_entry,
                                    ct_priv);
        mlx5_tc_ct_free_pre_ct_tables(ft);
+       mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id);
        kfree(ft);
 }
 
@@ -1137,8 +1404,9 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
  * + tuple + zone match +
  * +--------------------+
  *      | set mark
- *      | set label
+ *      | set labels_id
  *      | set established
+ *     | set zone_restore
  *      | do nat (if needed)
  *      v
  * +--------------+
@@ -1146,12 +1414,11 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
  * + fte_id match +------------------------>
  * +--------------+
  */
-static int
+static struct mlx5_flow_handle *
 __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
                          struct mlx5e_tc_flow *flow,
                          struct mlx5_flow_spec *orig_spec,
-                         struct mlx5_esw_flow_attr *attr,
-                         struct mlx5_flow_handle **flow_rule)
+                         struct mlx5_esw_flow_attr *attr)
 {
        struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
        bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
@@ -1171,7 +1438,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
        if (!post_ct_spec || !ct_flow) {
                kfree(post_ct_spec);
                kfree(ct_flow);
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
        }
 
        /* Register for CT established events */
@@ -1292,11 +1559,10 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
        }
 
        attr->ct_attr.ct_flow = ct_flow;
-       *flow_rule = ct_flow->post_ct_rule;
        dealloc_mod_hdr_actions(&pre_mod_acts);
        kfree(post_ct_spec);
 
-       return 0;
+       return rule;
 
 err_insert_orig:
        mlx5_eswitch_del_offloaded_rule(ct_priv->esw, ct_flow->post_ct_rule,
@@ -1314,16 +1580,14 @@ err_ft:
        kfree(post_ct_spec);
        kfree(ct_flow);
        netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err);
-       return err;
+       return ERR_PTR(err);
 }
 
-static int
+static struct mlx5_flow_handle *
 __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
-                               struct mlx5e_tc_flow *flow,
                                struct mlx5_flow_spec *orig_spec,
                                struct mlx5_esw_flow_attr *attr,
-                               struct mlx5e_tc_mod_hdr_acts *mod_acts,
-                               struct mlx5_flow_handle **flow_rule)
+                               struct mlx5e_tc_mod_hdr_acts *mod_acts)
 {
        struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
        struct mlx5_eswitch *esw = ct_priv->esw;
@@ -1335,7 +1599,7 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
 
        ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL);
        if (!ct_flow)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
        /* Base esw attributes on original rule attribute */
        pre_ct_attr = &ct_flow->pre_ct_attr;
@@ -1370,16 +1634,14 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
 
        attr->ct_attr.ct_flow = ct_flow;
        ct_flow->pre_ct_rule = rule;
-       *flow_rule = rule;
-
-       return 0;
+       return rule;
 
 err_insert:
        mlx5_modify_header_dealloc(priv->mdev, mod_hdr);
 err_set_registers:
        netdev_warn(priv->netdev,
                    "Failed to offload ct clear flow, err %d\n", err);
-       return err;
+       return ERR_PTR(err);
 }
 
 struct mlx5_flow_handle *
@@ -1391,22 +1653,18 @@ mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
 {
        bool clear_action = attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
        struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
-       struct mlx5_flow_handle *rule = ERR_PTR(-EINVAL);
-       int err;
+       struct mlx5_flow_handle *rule;
 
        if (!ct_priv)
                return ERR_PTR(-EOPNOTSUPP);
 
        mutex_lock(&ct_priv->control_lock);
+
        if (clear_action)
-               err = __mlx5_tc_ct_flow_offload_clear(priv, flow, spec, attr,
-                                                     mod_hdr_acts, &rule);
+               rule = __mlx5_tc_ct_flow_offload_clear(priv, spec, attr, mod_hdr_acts);
        else
-               err = __mlx5_tc_ct_flow_offload(priv, flow, spec, attr,
-                                               &rule);
+               rule = __mlx5_tc_ct_flow_offload(priv, flow, spec, attr);
        mutex_unlock(&ct_priv->control_lock);
-       if (err)
-               return ERR_PTR(err);
 
        return rule;
 }
@@ -1534,6 +1792,18 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
                goto err_alloc;
        }
 
+       ct_priv->zone_mapping = mapping_create(sizeof(u16), 0, true);
+       if (IS_ERR(ct_priv->zone_mapping)) {
+               err = PTR_ERR(ct_priv->zone_mapping);
+               goto err_mapping_zone;
+       }
+
+       ct_priv->labels_mapping = mapping_create(sizeof(u32) * 4, 0, true);
+       if (IS_ERR(ct_priv->labels_mapping)) {
+               err = PTR_ERR(ct_priv->labels_mapping);
+               goto err_mapping_labels;
+       }
+
        ct_priv->esw = esw;
        ct_priv->netdev = rpriv->netdev;
        ct_priv->ct = mlx5_esw_chains_create_global_table(esw);
@@ -1560,9 +1830,10 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
        }
 
        idr_init(&ct_priv->fte_ids);
-       xa_init_flags(&ct_priv->tuple_ids, XA_FLAGS_ALLOC1);
        mutex_init(&ct_priv->control_lock);
        rhashtable_init(&ct_priv->zone_ht, &zone_params);
+       rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params);
+       rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params);
 
        /* Done, set ct_priv to know it initializted */
        uplink_priv->ct_priv = ct_priv;
@@ -1574,6 +1845,10 @@ err_post_ct_tbl:
 err_ct_nat_tbl:
        mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct);
 err_ct_tbl:
+       mapping_destroy(ct_priv->labels_mapping);
+err_mapping_labels:
+       mapping_destroy(ct_priv->zone_mapping);
+err_mapping_zone:
        kfree(ct_priv);
 err_alloc:
 err_support:
@@ -1592,10 +1867,13 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
        mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->post_ct);
        mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct_nat);
        mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct);
+       mapping_destroy(ct_priv->zone_mapping);
+       mapping_destroy(ct_priv->labels_mapping);
 
+       rhashtable_destroy(&ct_priv->ct_tuples_ht);
+       rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
        rhashtable_destroy(&ct_priv->zone_ht);
        mutex_destroy(&ct_priv->control_lock);
-       xa_destroy(&ct_priv->tuple_ids);
        idr_destroy(&ct_priv->fte_ids);
        kfree(ct_priv);
 
@@ -1604,22 +1882,30 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
 
 bool
 mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
-                        struct sk_buff *skb, u32 tupleid)
+                        struct sk_buff *skb, u8 zone_restore_id)
 {
        struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv;
-       struct mlx5_ct_zone_rule *zone_rule;
+       struct mlx5_ct_tuple tuple = {};
        struct mlx5_ct_entry *entry;
+       u16 zone;
 
-       if (!ct_priv || !tupleid)
+       if (!ct_priv || !zone_restore_id)
                return true;
 
-       zone_rule = xa_load(&ct_priv->tuple_ids, tupleid);
-       if (!zone_rule)
+       if (mapping_find(ct_priv->zone_mapping, zone_restore_id, &zone))
                return false;
 
-       entry = container_of(zone_rule, struct mlx5_ct_entry,
-                            zone_rules[zone_rule->nat]);
-       tcf_ct_flow_table_restore_skb(skb, entry->restore_cookie);
+       if (!mlx5_tc_ct_skb_to_tuple(skb, &tuple, zone))
+               return false;
 
+       entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &tuple,
+                                      tuples_ht_params);
+       if (!entry)
+               entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_nat_ht,
+                                              &tuple, tuples_nat_ht_params);
+       if (!entry)
+               return false;
+
+       tcf_ct_flow_table_restore_skb(skb, entry->restore_cookie);
        return true;
 }
index 626f6c0..3baef91 100644 (file)
@@ -25,6 +25,7 @@ struct mlx5_ct_attr {
        u16 ct_action;
        struct mlx5_ct_flow *ct_flow;
        struct nf_flowtable *nf_ft;
+       u32 ct_labels_id;
 };
 
 #define zone_to_reg_ct {\
@@ -67,16 +68,17 @@ struct mlx5_ct_attr {
                                 misc_parameters_2.metadata_reg_c_5),\
 }
 
-#define tupleid_to_reg_ct {\
+#define zone_restore_to_reg_ct {\
        .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,\
        .moffset = 0,\
-       .mlen = 3,\
+       .mlen = 1,\
        .soffset = MLX5_BYTE_OFF(fte_match_param,\
-                                misc_parameters_2.metadata_reg_c_1),\
+                                misc_parameters_2.metadata_reg_c_1) + 3,\
 }
 
-#define TUPLE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[TUPLEID_TO_REG].mlen * 8)
-#define TUPLE_ID_MAX GENMASK(TUPLE_ID_BITS - 1, 0)
+#define REG_MAPPING_MLEN(reg) (mlx5e_tc_attr_to_reg_mappings[reg].mlen)
+#define ZONE_RESTORE_BITS (REG_MAPPING_MLEN(ZONE_RESTORE_TO_REG) * 8)
+#define ZONE_RESTORE_MAX GENMASK(ZONE_RESTORE_BITS - 1, 0)
 
 #if IS_ENABLED(CONFIG_MLX5_TC_CT)
 
@@ -89,8 +91,12 @@ int
 mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
                       struct mlx5_flow_spec *spec,
                       struct flow_cls_offload *f,
+                      struct mlx5_ct_attr *ct_attr,
                       struct netlink_ext_ack *extack);
 int
+mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
+                           struct mlx5_flow_spec *spec);
+int
 mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
                        struct mlx5_esw_flow_attr *attr,
                        const struct flow_action_entry *act,
@@ -109,7 +115,7 @@ mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv,
 
 bool
 mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
-                        struct sk_buff *skb, u32 tupleid);
+                        struct sk_buff *skb, u8 zone_restore_id);
 
 #else /* CONFIG_MLX5_TC_CT */
 
@@ -128,6 +134,7 @@ static inline int
 mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
                       struct mlx5_flow_spec *spec,
                       struct flow_cls_offload *f,
+                      struct mlx5_ct_attr *ct_attr,
                       struct netlink_ext_ack *extack)
 {
        struct flow_rule *rule = flow_cls_offload_flow_rule(f);
@@ -141,6 +148,13 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
 }
 
 static inline int
+mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
+                           struct mlx5_flow_spec *spec)
+{
+       return 0;
+}
+
+static inline int
 mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
                        struct mlx5_esw_flow_attr *attr,
                        const struct flow_action_entry *act,
@@ -170,10 +184,10 @@ mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv,
 
 static inline bool
 mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
-                        struct sk_buff *skb, u32 tupleid)
+                        struct sk_buff *skb, u8 zone_restore_id)
 {
-       if  (!tupleid)
-               return  true;
+       if (!zone_restore_id)
+               return true;
 
        return false;
 }
index bfd3e11..cf425a6 100644 (file)
 enum mlx5e_icosq_wqe_type {
        MLX5E_ICOSQ_WQE_NOP,
        MLX5E_ICOSQ_WQE_UMR_RX,
+#ifdef CONFIG_MLX5_EN_TLS
+       MLX5E_ICOSQ_WQE_UMR_TLS,
+       MLX5E_ICOSQ_WQE_SET_PSV_TLS,
+       MLX5E_ICOSQ_WQE_GET_PSV_TLS,
+#endif
 };
 
 static inline bool
@@ -114,9 +119,19 @@ struct mlx5e_icosq_wqe_info {
                struct {
                        struct mlx5e_rq *rq;
                } umr;
+#ifdef CONFIG_MLX5_EN_TLS
+               struct {
+                       struct mlx5e_ktls_offload_context_rx *priv_rx;
+               } tls_set_params;
+               struct {
+                       struct mlx5e_ktls_rx_resync_buf *buf;
+               } tls_get_params;
+#endif
        };
 };
 
+void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq);
+
 static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size)
 {
        struct mlx5_wq_cyc *wq = &sq->wq;
@@ -182,7 +197,7 @@ mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
 
 static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg)
 {
-       return cseg && !!cseg->tisn;
+       return cseg && !!cseg->tis_tir_num;
 }
 
 static inline u8
@@ -253,7 +268,7 @@ static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
        }
 }
 
-static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 sqn,
+static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 qn,
                                        struct mlx5_err_cqe *err_cqe)
 {
        struct mlx5_cqwq *wq = &cq->wq;
@@ -262,13 +277,53 @@ static inline void mlx5e_dump_error_cqe(struct mlx5e_cq *cq, u32 sqn,
        ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
 
        netdev_err(cq->channel->netdev,
-                  "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
-                  cq->mcq.cqn, ci, sqn,
+                  "Error cqe on cqn 0x%x, ci 0x%x, qn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
+                  cq->mcq.cqn, ci, qn,
                   get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
                   err_cqe->syndrome, err_cqe->vendor_err_synd);
        mlx5_dump_err_cqe(cq->mdev, err_cqe);
 }
 
+static inline u32 mlx5e_rqwq_get_size(struct mlx5e_rq *rq)
+{
+       switch (rq->wq_type) {
+       case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+               return mlx5_wq_ll_get_size(&rq->mpwqe.wq);
+       default:
+               return mlx5_wq_cyc_get_size(&rq->wqe.wq);
+       }
+}
+
+static inline u32 mlx5e_rqwq_get_cur_sz(struct mlx5e_rq *rq)
+{
+       switch (rq->wq_type) {
+       case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+               return rq->mpwqe.wq.cur_sz;
+       default:
+               return rq->wqe.wq.cur_sz;
+       }
+}
+
+static inline u16 mlx5e_rqwq_get_head(struct mlx5e_rq *rq)
+{
+       switch (rq->wq_type) {
+       case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+               return mlx5_wq_ll_get_head(&rq->mpwqe.wq);
+       default:
+               return mlx5_wq_cyc_get_head(&rq->wqe.wq);
+       }
+}
+
+static inline u16 mlx5e_rqwq_get_wqe_counter(struct mlx5e_rq *rq)
+{
+       switch (rq->wq_type) {
+       case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+               return mlx5_wq_ll_get_counter(&rq->mpwqe.wq);
+       default:
+               return mlx5_wq_cyc_get_counter(&rq->wqe.wq);
+       }
+}
+
 /* SW parser related functions */
 
 struct mlx5e_swp_spec {
index c9d308e..e0c1b01 100644 (file)
@@ -34,6 +34,7 @@
 #include <net/xdp_sock_drv.h>
 #include "en/xdp.h"
 #include "en/params.h"
+#include <linux/indirect_call_wrapper.h>
 
 int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
 {
@@ -114,7 +115,8 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
                xdpi.page.di    = *di;
        }
 
-       return sq->xmit_xdp_frame(sq, &xdptxd, &xdpi, 0);
+       return INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
+                              mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, 0);
 }
 
 /* returns true if packet was consumed by xdp */
@@ -237,7 +239,7 @@ enum {
        MLX5E_XDP_CHECK_START_MPWQE = 2,
 };
 
-static int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)
+INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)
 {
        if (unlikely(!sq->mpwqe.wqe)) {
                const u16 stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
@@ -256,10 +258,9 @@ static int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)
        return MLX5E_XDP_CHECK_OK;
 }
 
-static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
-                                      struct mlx5e_xdp_xmit_data *xdptxd,
-                                      struct mlx5e_xdp_info *xdpi,
-                                      int check_result)
+INDIRECT_CALLABLE_SCOPE bool
+mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_xmit_data *xdptxd,
+                          struct mlx5e_xdp_info *xdpi, int check_result)
 {
        struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
        struct mlx5e_xdpsq_stats *stats = sq->stats;
@@ -293,7 +294,7 @@ static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
        return true;
 }
 
-static int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
+INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
 {
        if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
                /* SQ is full, ring doorbell */
@@ -305,10 +306,9 @@ static int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
        return MLX5E_XDP_CHECK_OK;
 }
 
-static bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
-                                struct mlx5e_xdp_xmit_data *xdptxd,
-                                struct mlx5e_xdp_info *xdpi,
-                                int check_result)
+INDIRECT_CALLABLE_SCOPE bool
+mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_xmit_data *xdptxd,
+                    struct mlx5e_xdp_info *xdpi, int check_result)
 {
        struct mlx5_wq_cyc       *wq   = &sq->wq;
        u16                       pi   = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
@@ -506,6 +506,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
                struct xdp_frame *xdpf = frames[i];
                struct mlx5e_xdp_xmit_data xdptxd;
                struct mlx5e_xdp_info xdpi;
+               bool ret;
 
                xdptxd.data = xdpf->data;
                xdptxd.len = xdpf->len;
@@ -522,7 +523,9 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
                xdpi.frame.xdpf     = xdpf;
                xdpi.frame.dma_addr = xdptxd.dma_addr;
 
-               if (unlikely(!sq->xmit_xdp_frame(sq, &xdptxd, &xdpi, 0))) {
+               ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
+                                     mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, 0);
+               if (unlikely(!ret)) {
                        dma_unmap_single(sq->pdev, xdptxd.dma_addr,
                                         xdptxd.len, DMA_TO_DEVICE);
                        xdp_return_frame_rx_napi(xdpf);
index ca48c29..e806c13 100644 (file)
@@ -32,6 +32,8 @@
 #ifndef __MLX5_EN_XDP_H__
 #define __MLX5_EN_XDP_H__
 
+#include <linux/indirect_call_wrapper.h>
+
 #include "en.h"
 #include "en/txrx.h"
 
@@ -70,6 +72,17 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq);
 int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
                   u32 flags);
 
+INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
+                                                         struct mlx5e_xdp_xmit_data *xdptxd,
+                                                         struct mlx5e_xdp_info *xdpi,
+                                                         int check_result));
+INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
+                                                   struct mlx5e_xdp_xmit_data *xdptxd,
+                                                   struct mlx5e_xdp_info *xdpi,
+                                                   int check_result));
+INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq));
+INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq));
+
 static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
 {
        set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
index 2c80205..cc46414 100644 (file)
@@ -34,31 +34,13 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
        }
 }
 
-static void mlx5e_build_xskicosq_param(struct mlx5e_priv *priv,
-                                      u8 log_wq_size,
-                                      struct mlx5e_sq_param *param)
-{
-       void *sqc = param->sqc;
-       void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
-
-       mlx5e_build_sq_param_common(priv, param);
-
-       MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
-}
-
 static void mlx5e_build_xsk_cparam(struct mlx5e_priv *priv,
                                   struct mlx5e_params *params,
                                   struct mlx5e_xsk_param *xsk,
                                   struct mlx5e_channel_param *cparam)
 {
-       const u8 xskicosq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
-
        mlx5e_build_rq_param(priv, params, xsk, &cparam->rq);
        mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
-       mlx5e_build_xskicosq_param(priv, xskicosq_size, &cparam->icosq);
-       mlx5e_build_rx_cq_param(priv, params, xsk, &cparam->rx_cq);
-       mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq);
-       mlx5e_build_ico_cq_param(priv, xskicosq_size, &cparam->icosq_cq);
 }
 
 int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
@@ -66,7 +48,6 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
                   struct mlx5e_channel *c)
 {
        struct mlx5e_channel_param *cparam;
-       struct dim_cq_moder icocq_moder = {};
        int err;
 
        if (!mlx5e_validate_xsk_param(params, xsk, priv->mdev))
@@ -78,7 +59,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
 
        mlx5e_build_xsk_cparam(priv, params, xsk, cparam);
 
-       err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->xskrq.cq);
+       err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rq.cqp, &c->xskrq.cq);
        if (unlikely(err))
                goto err_free_cparam;
 
@@ -86,7 +67,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
        if (unlikely(err))
                goto err_close_rx_cq;
 
-       err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xsksq.cq);
+       err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &c->xsksq.cq);
        if (unlikely(err))
                goto err_close_rq;
 
@@ -100,31 +81,12 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
        if (unlikely(err))
                goto err_close_tx_cq;
 
-       err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->xskicosq.cq);
-       if (unlikely(err))
-               goto err_close_sq;
-
-       /* Create a dedicated SQ for posting NOPs whenever we need an IRQ to be
-        * triggered and NAPI to be called on the correct CPU.
-        */
-       err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->xskicosq);
-       if (unlikely(err))
-               goto err_close_icocq;
-
        kvfree(cparam);
 
-       spin_lock_init(&c->xskicosq_lock);
-
        set_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
 
        return 0;
 
-err_close_icocq:
-       mlx5e_close_cq(&c->xskicosq.cq);
-
-err_close_sq:
-       mlx5e_close_xdpsq(&c->xsksq);
-
 err_close_tx_cq:
        mlx5e_close_cq(&c->xsksq.cq);
 
@@ -148,32 +110,27 @@ void mlx5e_close_xsk(struct mlx5e_channel *c)
 
        mlx5e_close_rq(&c->xskrq);
        mlx5e_close_cq(&c->xskrq.cq);
-       mlx5e_close_icosq(&c->xskicosq);
-       mlx5e_close_cq(&c->xskicosq.cq);
        mlx5e_close_xdpsq(&c->xsksq);
        mlx5e_close_cq(&c->xsksq.cq);
 
        memset(&c->xskrq, 0, sizeof(c->xskrq));
        memset(&c->xsksq, 0, sizeof(c->xsksq));
-       memset(&c->xskicosq, 0, sizeof(c->xskicosq));
 }
 
 void mlx5e_activate_xsk(struct mlx5e_channel *c)
 {
-       mlx5e_activate_icosq(&c->xskicosq);
        set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
        /* TX queue is created active. */
 
-       spin_lock(&c->xskicosq_lock);
-       mlx5e_trigger_irq(&c->xskicosq);
-       spin_unlock(&c->xskicosq_lock);
+       spin_lock(&c->async_icosq_lock);
+       mlx5e_trigger_irq(&c->async_icosq);
+       spin_unlock(&c->async_icosq_lock);
 }
 
 void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
 {
        mlx5e_deactivate_rq(&c->xskrq);
        /* TX queue is disabled on close. */
-       mlx5e_deactivate_icosq(&c->xskicosq);
 }
 
 static int mlx5e_redirect_xsk_rqt(struct mlx5e_priv *priv, u16 ix, u32 rqn)
index 83dce9c..0dfbc96 100644 (file)
@@ -6,6 +6,7 @@
 #include "en/xdp.h"
 #include "en/params.h"
 #include <net/xdp_sock_drv.h>
+#include <linux/indirect_call_wrapper.h>
 
 int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
 {
@@ -26,19 +27,19 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
                return -ENXIO;
 
        if (!napi_if_scheduled_mark_missed(&c->napi)) {
-               /* To avoid WQE overrun, don't post a NOP if XSKICOSQ is not
+               /* To avoid WQE overrun, don't post a NOP if async_icosq is not
                 * active and not polled by NAPI. Return 0, because the upcoming
                 * activate will trigger the IRQ for us.
                 */
-               if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->xskicosq.state)))
+               if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->async_icosq.state)))
                        return 0;
 
-               if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state))
+               if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state))
                        return 0;
 
-               spin_lock(&c->xskicosq_lock);
-               mlx5e_trigger_irq(&c->xskicosq);
-               spin_unlock(&c->xskicosq_lock);
+               spin_lock(&c->async_icosq_lock);
+               mlx5e_trigger_irq(&c->async_icosq);
+               spin_unlock(&c->async_icosq_lock);
        }
 
        return 0;
@@ -75,8 +76,12 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
        xdpi.mode = MLX5E_XDP_XMIT_MODE_XSK;
 
        for (; budget; budget--) {
-               int check_result = sq->xmit_xdp_frame_check(sq);
+               int check_result = INDIRECT_CALL_2(sq->xmit_xdp_frame_check,
+                                                  mlx5e_xmit_xdp_frame_check_mpwqe,
+                                                  mlx5e_xmit_xdp_frame_check,
+                                                  sq);
                struct xdp_desc desc;
+               bool ret;
 
                if (unlikely(check_result < 0)) {
                        work_done = false;
@@ -98,7 +103,9 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
 
                xsk_buff_raw_dma_sync_for_device(umem, xdptxd.dma_addr, xdptxd.len);
 
-               if (unlikely(!sq->xmit_xdp_frame(sq, &xdptxd, &xdpi, check_result))) {
+               ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
+                                     mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, check_result);
+               if (unlikely(!ret)) {
                        if (sq->mpwqe.wqe)
                                mlx5e_xdp_mpwqe_complete(sq);
 
index 7b17fcd..331ca2b 100644 (file)
@@ -215,16 +215,3 @@ int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid)
        return umem ? mlx5e_xsk_enable_umem(priv, umem, ix) :
                      mlx5e_xsk_disable_umem(priv, ix);
 }
-
-u16 mlx5e_xsk_first_unused_channel(struct mlx5e_params *params, struct mlx5e_xsk *xsk)
-{
-       u16 res = xsk->refcnt ? params->num_channels : 0;
-
-       while (res) {
-               if (mlx5e_xsk_get_umem(params, xsk, res - 1))
-                       break;
-               --res;
-       }
-
-       return res;
-}
index 25b4cbe..bada949 100644 (file)
@@ -26,6 +26,4 @@ int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid)
 
 int mlx5e_xsk_resize_reuseq(struct xdp_umem *umem, u32 nentries);
 
-u16 mlx5e_xsk_first_unused_channel(struct mlx5e_params *params, struct mlx5e_xsk *xsk);
-
 #endif /* __MLX5_EN_XSK_UMEM_H__ */
index fac145d..110476b 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
 #include "en_accel/ipsec_rxtx.h"
+#include "en_accel/tls.h"
 #include "en_accel/tls_rxtx.h"
 #include "en.h"
 #include "en/txrx.h"
@@ -147,4 +148,13 @@ static inline bool mlx5e_accel_tx_finish(struct mlx5e_priv *priv,
        return true;
 }
 
+static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv)
+{
+       return mlx5e_ktls_init_rx(priv);
+}
+
+static inline void mlx5e_accel_cleanup_rx(struct mlx5e_priv *priv)
+{
+       mlx5e_ktls_cleanup_rx(priv);
+}
 #endif /* __MLX5E_EN_ACCEL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
new file mode 100644 (file)
index 0000000..4cdd9ea
--- /dev/null
@@ -0,0 +1,400 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#include <linux/netdevice.h>
+#include "en_accel/fs_tcp.h"
+#include "fs_core.h"
+
+enum accel_fs_tcp_type {
+       ACCEL_FS_IPV4_TCP,
+       ACCEL_FS_IPV6_TCP,
+       ACCEL_FS_TCP_NUM_TYPES,
+};
+
+struct mlx5e_accel_fs_tcp {
+       struct mlx5e_flow_table tables[ACCEL_FS_TCP_NUM_TYPES];
+       struct mlx5_flow_handle *default_rules[ACCEL_FS_TCP_NUM_TYPES];
+};
+
+static enum mlx5e_traffic_types fs_accel2tt(enum accel_fs_tcp_type i)
+{
+       switch (i) {
+       case ACCEL_FS_IPV4_TCP:
+               return MLX5E_TT_IPV4_TCP;
+       default: /* ACCEL_FS_IPV6_TCP */
+               return MLX5E_TT_IPV6_TCP;
+       }
+}
+
+static void accel_fs_tcp_set_ipv4_flow(struct mlx5_flow_spec *spec, struct sock *sk)
+{
+       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
+       MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_TCP);
+       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
+       MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4);
+       memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+                           outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
+              &inet_sk(sk)->inet_daddr, 4);
+       memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+                           outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+              &inet_sk(sk)->inet_rcv_saddr, 4);
+       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+                        outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
+       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+                        outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+}
+
+static void accel_fs_tcp_set_ipv6_flow(struct mlx5_flow_spec *spec, struct sock *sk)
+{
+       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
+       MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_TCP);
+       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
+       MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6);
+       memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+                           outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
+              &sk->sk_v6_daddr, 16);
+       memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+                           outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+              &inet6_sk(sk)->saddr, 16);
+       memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+                           outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
+              0xff, 16);
+       memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+                           outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+              0xff, 16);
+}
+
+void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule)
+{
+       mlx5_del_flow_rules(rule);
+}
+
+struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
+                                              struct sock *sk, u32 tirn,
+                                              uint32_t flow_tag)
+{
+       struct mlx5_flow_destination dest = {};
+       struct mlx5e_flow_table *ft = NULL;
+       struct mlx5e_accel_fs_tcp *fs_tcp;
+       MLX5_DECLARE_FLOW_ACT(flow_act);
+       struct mlx5_flow_handle *flow;
+       struct mlx5_flow_spec *spec;
+
+       spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+       if (!spec)
+               return ERR_PTR(-ENOMEM);
+
+       fs_tcp = priv->fs.accel_tcp;
+
+       spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+
+       switch (sk->sk_family) {
+       case AF_INET:
+               accel_fs_tcp_set_ipv4_flow(spec, sk);
+               ft = &fs_tcp->tables[ACCEL_FS_IPV4_TCP];
+               mlx5e_dbg(HW, priv, "%s flow is %pI4:%d -> %pI4:%d\n", __func__,
+                         &inet_sk(sk)->inet_rcv_saddr,
+                         inet_sk(sk)->inet_sport,
+                         &inet_sk(sk)->inet_daddr,
+                         inet_sk(sk)->inet_dport);
+               break;
+#if IS_ENABLED(CONFIG_IPV6)
+       case AF_INET6:
+               if (!sk->sk_ipv6only &&
+                   ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
+                       accel_fs_tcp_set_ipv4_flow(spec, sk);
+                       ft = &fs_tcp->tables[ACCEL_FS_IPV4_TCP];
+               } else {
+                       accel_fs_tcp_set_ipv6_flow(spec, sk);
+                       ft = &fs_tcp->tables[ACCEL_FS_IPV6_TCP];
+               }
+               break;
+#endif
+       default:
+               break;
+       }
+
+       if (!ft) {
+               flow = ERR_PTR(-EINVAL);
+               goto out;
+       }
+
+       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+                        outer_headers.tcp_dport);
+       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+                        outer_headers.tcp_sport);
+       MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_dport,
+                ntohs(inet_sk(sk)->inet_sport));
+       MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_sport,
+                ntohs(inet_sk(sk)->inet_dport));
+
+       dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+       dest.tir_num = tirn;
+       if (flow_tag != MLX5_FS_DEFAULT_FLOW_TAG) {
+               spec->flow_context.flow_tag = flow_tag;
+               spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
+       }
+
+       flow = mlx5_add_flow_rules(ft->t, spec, &flow_act, &dest, 1);
+
+       if (IS_ERR(flow))
+               netdev_err(priv->netdev, "mlx5_add_flow_rules() failed, flow is %ld\n",
+                          PTR_ERR(flow));
+
+out:
+       kvfree(spec);
+       return flow;
+}
+
+static int accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv,
+                                        enum accel_fs_tcp_type type)
+{
+       struct mlx5e_flow_table *accel_fs_t;
+       struct mlx5_flow_destination dest;
+       struct mlx5e_accel_fs_tcp *fs_tcp;
+       MLX5_DECLARE_FLOW_ACT(flow_act);
+       struct mlx5_flow_handle *rule;
+       int err = 0;
+
+       fs_tcp = priv->fs.accel_tcp;
+       accel_fs_t = &fs_tcp->tables[type];
+
+       dest = mlx5e_ttc_get_default_dest(priv, fs_accel2tt(type));
+       rule = mlx5_add_flow_rules(accel_fs_t->t, NULL, &flow_act, &dest, 1);
+       if (IS_ERR(rule)) {
+               err = PTR_ERR(rule);
+               netdev_err(priv->netdev,
+                          "%s: add default rule failed, accel_fs type=%d, err %d\n",
+                          __func__, type, err);
+               return err;
+       }
+
+       fs_tcp->default_rules[type] = rule;
+       return 0;
+}
+
+#define MLX5E_ACCEL_FS_TCP_NUM_GROUPS  (2)
+#define MLX5E_ACCEL_FS_TCP_GROUP1_SIZE (BIT(16) - 1)
+#define MLX5E_ACCEL_FS_TCP_GROUP2_SIZE (BIT(0))
+#define MLX5E_ACCEL_FS_TCP_TABLE_SIZE  (MLX5E_ACCEL_FS_TCP_GROUP1_SIZE +\
+                                        MLX5E_ACCEL_FS_TCP_GROUP2_SIZE)
+static int accel_fs_tcp_create_groups(struct mlx5e_flow_table *ft,
+                                     enum accel_fs_tcp_type type)
+{
+       int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+       void *outer_headers_c;
+       int ix = 0;
+       u32 *in;
+       int err;
+       u8 *mc;
+
+       ft->g = kcalloc(MLX5E_ACCEL_FS_TCP_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+       in = kvzalloc(inlen, GFP_KERNEL);
+       if  (!in || !ft->g) {
+               kvfree(ft->g);
+               kvfree(in);
+               return -ENOMEM;
+       }
+
+       mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+       outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
+       MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
+       MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_version);
+
+       switch (type) {
+       case ACCEL_FS_IPV4_TCP:
+       case ACCEL_FS_IPV6_TCP:
+               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport);
+               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport);
+               break;
+       default:
+               err = -EINVAL;
+               goto out;
+       }
+
+       switch (type) {
+       case ACCEL_FS_IPV4_TCP:
+               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
+                                src_ipv4_src_ipv6.ipv4_layout.ipv4);
+               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
+                                dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+               break;
+       case ACCEL_FS_IPV6_TCP:
+               memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+                                   src_ipv4_src_ipv6.ipv6_layout.ipv6),
+                      0xff, 16);
+               memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+                                   dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+                      0xff, 16);
+               break;
+       default:
+               err = -EINVAL;
+               goto out;
+       }
+
+       MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+       MLX5_SET_CFG(in, start_flow_index, ix);
+       ix += MLX5E_ACCEL_FS_TCP_GROUP1_SIZE;
+       MLX5_SET_CFG(in, end_flow_index, ix - 1);
+       ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+       if (IS_ERR(ft->g[ft->num_groups]))
+               goto err;
+       ft->num_groups++;
+
+       /* Default Flow Group */
+       memset(in, 0, inlen);
+       MLX5_SET_CFG(in, start_flow_index, ix);
+       ix += MLX5E_ACCEL_FS_TCP_GROUP2_SIZE;
+       MLX5_SET_CFG(in, end_flow_index, ix - 1);
+       ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+       if (IS_ERR(ft->g[ft->num_groups]))
+               goto err;
+       ft->num_groups++;
+
+       kvfree(in);
+       return 0;
+
+err:
+       err = PTR_ERR(ft->g[ft->num_groups]);
+       ft->g[ft->num_groups] = NULL;
+out:
+       kvfree(in);
+
+       return err;
+}
+
+static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_type type)
+{
+       struct mlx5e_flow_table *ft = &priv->fs.accel_tcp->tables[type];
+       struct mlx5_flow_table_attr ft_attr = {};
+       int err;
+
+       ft->num_groups = 0;
+
+       ft_attr.max_fte = MLX5E_ACCEL_FS_TCP_TABLE_SIZE;
+       ft_attr.level = MLX5E_ACCEL_FS_TCP_FT_LEVEL;
+       ft_attr.prio = MLX5E_NIC_PRIO;
+
+       ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
+       if (IS_ERR(ft->t)) {
+               err = PTR_ERR(ft->t);
+               ft->t = NULL;
+               return err;
+       }
+
+       netdev_dbg(priv->netdev, "Created fs accel table id %u level %u\n",
+                  ft->t->id, ft->t->level);
+
+       err = accel_fs_tcp_create_groups(ft, type);
+       if (err)
+               goto err;
+
+       err = accel_fs_tcp_add_default_rule(priv, type);
+       if (err)
+               goto err;
+
+       return 0;
+err:
+       mlx5e_destroy_flow_table(ft);
+       return err;
+}
+
+static int accel_fs_tcp_disable(struct mlx5e_priv *priv)
+{
+       int err, i;
+
+       for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
+               /* Modify ttc rules destination to point back to the indir TIRs */
+               err = mlx5e_ttc_fwd_default_dest(priv, fs_accel2tt(i));
+               if (err) {
+                       netdev_err(priv->netdev,
+                                  "%s: modify ttc[%d] default destination failed, err(%d)\n",
+                                  __func__, fs_accel2tt(i), err);
+                       return err;
+               }
+       }
+
+       return 0;
+}
+
+static int accel_fs_tcp_enable(struct mlx5e_priv *priv)
+{
+       struct mlx5_flow_destination dest = {};
+       int err, i;
+
+       dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+       for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
+               dest.ft = priv->fs.accel_tcp->tables[i].t;
+
+               /* Modify ttc rules destination to point on the accel_fs FTs */
+               err = mlx5e_ttc_fwd_dest(priv, fs_accel2tt(i), &dest);
+               if (err) {
+                       netdev_err(priv->netdev,
+                                  "%s: modify ttc[%d] destination to accel failed, err(%d)\n",
+                                  __func__, fs_accel2tt(i), err);
+                       return err;
+               }
+       }
+       return 0;
+}
+
+static void accel_fs_tcp_destroy_table(struct mlx5e_priv *priv, int i)
+{
+       struct mlx5e_accel_fs_tcp *fs_tcp;
+
+       fs_tcp = priv->fs.accel_tcp;
+       if (IS_ERR_OR_NULL(fs_tcp->tables[i].t))
+               return;
+
+       mlx5_del_flow_rules(fs_tcp->default_rules[i]);
+       mlx5e_destroy_flow_table(&fs_tcp->tables[i]);
+       fs_tcp->tables[i].t = NULL;
+}
+
+void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv)
+{
+       int i;
+
+       if (!priv->fs.accel_tcp)
+               return;
+
+       accel_fs_tcp_disable(priv);
+
+       for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++)
+               accel_fs_tcp_destroy_table(priv, i);
+
+       kfree(priv->fs.accel_tcp);
+       priv->fs.accel_tcp = NULL;
+}
+
+int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
+{
+       int i, err;
+
+       if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version))
+               return -EOPNOTSUPP;
+
+       priv->fs.accel_tcp = kzalloc(sizeof(*priv->fs.accel_tcp), GFP_KERNEL);
+       if (!priv->fs.accel_tcp)
+               return -ENOMEM;
+
+       for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
+               err = accel_fs_tcp_create_table(priv, i);
+               if (err)
+                       goto err_destroy_tables;
+       }
+
+       err = accel_fs_tcp_enable(priv);
+       if (err)
+               goto err_destroy_tables;
+
+       return 0;
+
+err_destroy_tables:
+       while (--i >= 0)
+               accel_fs_tcp_destroy_table(priv, i);
+
+       kfree(priv->fs.accel_tcp);
+       priv->fs.accel_tcp = NULL;
+       return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h
new file mode 100644 (file)
index 0000000..5892358
--- /dev/null
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5E_ACCEL_FS_TCP_H__
+#define __MLX5E_ACCEL_FS_TCP_H__
+
+#include "en.h"
+
+#ifdef CONFIG_MLX5_EN_TLS
+int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv);
+void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv);
+struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
+                                              struct sock *sk, u32 tirn,
+                                              uint32_t flow_tag);
+void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule);
+#else
+static inline int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv) { return 0; }
+static inline void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv) {}
+static inline struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
+                                                            struct sock *sk, u32 tirn,
+                                                            uint32_t flow_tag)
+{ return ERR_PTR(-EOPNOTSUPP); }
+static inline void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule) {}
+#endif
+
+#endif /* __MLX5E_ACCEL_FS_TCP_H__ */
+
index 92eb3ba..d39989c 100644 (file)
@@ -40,7 +40,7 @@
 #include "en.h"
 #include "en_accel/ipsec.h"
 #include "en_accel/ipsec_rxtx.h"
-
+#include "en_accel/ipsec_fs.h"
 
 static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
 {
@@ -111,7 +111,7 @@ static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
 static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
 {
        struct xfrm_replay_state_esn *replay_esn;
-       u32 seq_bottom;
+       u32 seq_bottom = 0;
        u8 overlap;
        u32 *esn;
 
@@ -121,7 +121,9 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
        }
 
        replay_esn = sa_entry->x->replay_esn;
-       seq_bottom = replay_esn->seq - replay_esn->replay_window + 1;
+       if (replay_esn->seq >= replay_esn->replay_window)
+               seq_bottom = replay_esn->seq - replay_esn->replay_window + 1;
+
        overlap = sa_entry->esn_state.overlap;
 
        sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x,
@@ -207,7 +209,7 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
 
 static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
 {
-       struct net_device *netdev = x->xso.dev;
+       struct net_device *netdev = x->xso.real_dev;
        struct mlx5e_priv *priv;
 
        priv = netdev_priv(netdev);
@@ -282,10 +284,31 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
        return 0;
 }
 
+static int mlx5e_xfrm_fs_add_rule(struct mlx5e_priv *priv,
+                                 struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+       if (!mlx5_is_ipsec_device(priv->mdev))
+               return 0;
+
+       return mlx5e_accel_ipsec_fs_add_rule(priv, &sa_entry->xfrm->attrs,
+                                            sa_entry->ipsec_obj_id,
+                                            &sa_entry->ipsec_rule);
+}
+
+static void mlx5e_xfrm_fs_del_rule(struct mlx5e_priv *priv,
+                                  struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+       if (!mlx5_is_ipsec_device(priv->mdev))
+               return;
+
+       mlx5e_accel_ipsec_fs_del_rule(priv, &sa_entry->xfrm->attrs,
+                                     &sa_entry->ipsec_rule);
+}
+
 static int mlx5e_xfrm_add_state(struct xfrm_state *x)
 {
        struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
-       struct net_device *netdev = x->xso.dev;
+       struct net_device *netdev = x->xso.real_dev;
        struct mlx5_accel_esp_xfrm_attrs attrs;
        struct mlx5e_priv *priv;
        unsigned int sa_handle;
@@ -329,10 +352,15 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
                goto err_xfrm;
        }
 
+       sa_entry->ipsec_obj_id = sa_handle;
+       err = mlx5e_xfrm_fs_add_rule(priv, sa_entry);
+       if (err)
+               goto err_hw_ctx;
+
        if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
                err = mlx5e_ipsec_sadb_rx_add(sa_entry, sa_handle);
                if (err)
-                       goto err_hw_ctx;
+                       goto err_add_rule;
        } else {
                sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
                                mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
@@ -341,8 +369,10 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
        x->xso.offload_handle = (unsigned long)sa_entry;
        goto out;
 
+err_add_rule:
+       mlx5e_xfrm_fs_del_rule(priv, sa_entry);
 err_hw_ctx:
-       mlx5_accel_esp_free_hw_context(sa_entry->hw_context);
+       mlx5_accel_esp_free_hw_context(priv->mdev, sa_entry->hw_context);
 err_xfrm:
        mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
 err_sa_entry:
@@ -366,13 +396,15 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
 static void mlx5e_xfrm_free_state(struct xfrm_state *x)
 {
        struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
+       struct mlx5e_priv *priv = netdev_priv(x->xso.dev);
 
        if (!sa_entry)
                return;
 
        if (sa_entry->hw_context) {
                flush_workqueue(sa_entry->ipsec->wq);
-               mlx5_accel_esp_free_hw_context(sa_entry->hw_context);
+               mlx5e_xfrm_fs_del_rule(priv, sa_entry);
+               mlx5_accel_esp_free_hw_context(sa_entry->xfrm->mdev, sa_entry->hw_context);
                mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
        }
 
@@ -405,6 +437,8 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv)
                kfree(ipsec);
                return -ENOMEM;
        }
+
+       mlx5e_accel_ipsec_fs_init(priv);
        netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
        return 0;
 }
@@ -416,6 +450,7 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
        if (!ipsec)
                return;
 
+       mlx5e_accel_ipsec_fs_cleanup(priv);
        destroy_workqueue(ipsec->wq);
 
        ida_destroy(&ipsec->halloc);
index c85151a..0fc8b4d 100644 (file)
@@ -75,6 +75,8 @@ struct mlx5e_ipsec_stats {
        u64 ipsec_cmd_drop;
 };
 
+struct mlx5e_accel_fs_esp;
+
 struct mlx5e_ipsec {
        struct mlx5e_priv *en_priv;
        DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS);
@@ -84,6 +86,7 @@ struct mlx5e_ipsec {
        struct mlx5e_ipsec_sw_stats sw_stats;
        struct mlx5e_ipsec_stats stats;
        struct workqueue_struct *wq;
+       struct mlx5e_accel_fs_esp *rx_fs;
 };
 
 struct mlx5e_ipsec_esn_state {
@@ -92,6 +95,11 @@ struct mlx5e_ipsec_esn_state {
        u8 overlap: 1;
 };
 
+struct mlx5e_ipsec_rule {
+       struct mlx5_flow_handle *rule;
+       struct mlx5_modify_hdr *set_modify_hdr;
+};
+
 struct mlx5e_ipsec_sa_entry {
        struct hlist_node hlist; /* Item in SADB_RX hashtable */
        struct mlx5e_ipsec_esn_state esn_state;
@@ -102,6 +110,8 @@ struct mlx5e_ipsec_sa_entry {
        void *hw_context;
        void (*set_iv_op)(struct sk_buff *skb, struct xfrm_state *x,
                          struct xfrm_offload *xo);
+       u32 ipsec_obj_id;
+       struct mlx5e_ipsec_rule ipsec_rule;
 };
 
 void mlx5e_ipsec_build_inverse_table(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
new file mode 100644 (file)
index 0000000..429428b
--- /dev/null
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#include <linux/netdevice.h>
+#include "accel/ipsec_offload.h"
+#include "ipsec_fs.h"
+#include "fs_core.h"
+
+#define NUM_IPSEC_FTE BIT(15)
+
+enum accel_fs_esp_type {
+       ACCEL_FS_ESP4,
+       ACCEL_FS_ESP6,
+       ACCEL_FS_ESP_NUM_TYPES,
+};
+
+struct mlx5e_ipsec_rx_err {
+       struct mlx5_flow_table *ft;
+       struct mlx5_flow_handle *rule;
+       struct mlx5_modify_hdr *copy_modify_hdr;
+};
+
+struct mlx5e_accel_fs_esp_prot {
+       struct mlx5_flow_table *ft;
+       struct mlx5_flow_group *miss_group;
+       struct mlx5_flow_handle *miss_rule;
+       struct mlx5_flow_destination default_dest;
+       struct mlx5e_ipsec_rx_err rx_err;
+       u32 refcnt;
+       struct mutex prot_mutex; /* protect ESP4/ESP6 protocol */
+};
+
+struct mlx5e_accel_fs_esp {
+       struct mlx5e_accel_fs_esp_prot fs_prot[ACCEL_FS_ESP_NUM_TYPES];
+};
+
+/* IPsec RX flow steering */
+static enum mlx5e_traffic_types fs_esp2tt(enum accel_fs_esp_type i)
+{
+       if (i == ACCEL_FS_ESP4)
+               return MLX5E_TT_IPV4_IPSEC_ESP;
+       return MLX5E_TT_IPV6_IPSEC_ESP;
+}
+
+static int rx_err_add_rule(struct mlx5e_priv *priv,
+                          struct mlx5e_accel_fs_esp_prot *fs_prot,
+                          struct mlx5e_ipsec_rx_err *rx_err)
+{
+       u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5_flow_act flow_act = {};
+       struct mlx5_modify_hdr *modify_hdr;
+       struct mlx5_flow_handle *fte;
+       struct mlx5_flow_spec *spec;
+       int err = 0;
+
+       spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+       if (!spec)
+               return -ENOMEM;
+
+       /* Action to copy 7 bit ipsec_syndrome to regB[0:6] */
+       MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
+       MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
+       MLX5_SET(copy_action_in, action, src_offset, 0);
+       MLX5_SET(copy_action_in, action, length, 7);
+       MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+       MLX5_SET(copy_action_in, action, dst_offset, 0);
+
+       modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
+                                             1, action);
+
+       if (IS_ERR(modify_hdr)) {
+               err = PTR_ERR(modify_hdr);
+               netdev_err(priv->netdev,
+                          "fail to alloc ipsec copy modify_header_id err=%d\n", err);
+               goto out_spec;
+       }
+
+       /* create fte */
+       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
+                         MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+       flow_act.modify_hdr = modify_hdr;
+       fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act,
+                                 &fs_prot->default_dest, 1);
+       if (IS_ERR(fte)) {
+               err = PTR_ERR(fte);
+               netdev_err(priv->netdev, "fail to add ipsec rx err copy rule err=%d\n", err);
+               goto out;
+       }
+
+       rx_err->rule = fte;
+       rx_err->copy_modify_hdr = modify_hdr;
+
+out:
+       if (err)
+               mlx5_modify_header_dealloc(mdev, modify_hdr);
+out_spec:
+       kfree(spec);
+       return err;
+}
+
+static void rx_err_del_rule(struct mlx5e_priv *priv,
+                           struct mlx5e_ipsec_rx_err *rx_err)
+{
+       if (rx_err->rule) {
+               mlx5_del_flow_rules(rx_err->rule);
+               rx_err->rule = NULL;
+       }
+
+       if (rx_err->copy_modify_hdr) {
+               mlx5_modify_header_dealloc(priv->mdev, rx_err->copy_modify_hdr);
+               rx_err->copy_modify_hdr = NULL;
+       }
+}
+
+static void rx_err_destroy_ft(struct mlx5e_priv *priv, struct mlx5e_ipsec_rx_err *rx_err)
+{
+       rx_err_del_rule(priv, rx_err);
+
+       if (rx_err->ft) {
+               mlx5_destroy_flow_table(rx_err->ft);
+               rx_err->ft = NULL;
+       }
+}
+
+static int rx_err_create_ft(struct mlx5e_priv *priv,
+                           struct mlx5e_accel_fs_esp_prot *fs_prot,
+                           struct mlx5e_ipsec_rx_err *rx_err)
+{
+       struct mlx5_flow_table_attr ft_attr = {};
+       struct mlx5_flow_table *ft;
+       int err;
+
+       ft_attr.max_fte = 1;
+       ft_attr.autogroup.max_num_groups = 1;
+       ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
+       ft_attr.prio = MLX5E_NIC_PRIO;
+       ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
+       if (IS_ERR(ft)) {
+               err = PTR_ERR(ft);
+               netdev_err(priv->netdev, "fail to create ipsec rx inline ft err=%d\n", err);
+               return err;
+       }
+
+       rx_err->ft = ft;
+       err = rx_err_add_rule(priv, fs_prot, rx_err);
+       if (err)
+               goto out_err;
+
+       return 0;
+
+out_err:
+       mlx5_destroy_flow_table(ft);
+       rx_err->ft = NULL;
+       return err;
+}
+
+static void rx_fs_destroy(struct mlx5e_accel_fs_esp_prot *fs_prot)
+{
+       if (fs_prot->miss_rule) {
+               mlx5_del_flow_rules(fs_prot->miss_rule);
+               fs_prot->miss_rule = NULL;
+       }
+
+       if (fs_prot->miss_group) {
+               mlx5_destroy_flow_group(fs_prot->miss_group);
+               fs_prot->miss_group = NULL;
+       }
+
+       if (fs_prot->ft) {
+               mlx5_destroy_flow_table(fs_prot->ft);
+               fs_prot->ft = NULL;
+       }
+}
+
+static int rx_fs_create(struct mlx5e_priv *priv,
+                       struct mlx5e_accel_fs_esp_prot *fs_prot)
+{
+       int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+       struct mlx5_flow_table_attr ft_attr = {};
+       struct mlx5_flow_group *miss_group;
+       struct mlx5_flow_handle *miss_rule;
+       MLX5_DECLARE_FLOW_ACT(flow_act);
+       struct mlx5_flow_spec *spec;
+       struct mlx5_flow_table *ft;
+       u32 *flow_group_in;
+       int err = 0;
+
+       flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+       spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+       if (!flow_group_in || !spec) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       /* Create FT */
+       ft_attr.max_fte = NUM_IPSEC_FTE;
+       ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
+       ft_attr.prio = MLX5E_NIC_PRIO;
+       ft_attr.autogroup.num_reserved_entries = 1;
+       ft_attr.autogroup.max_num_groups = 1;
+       ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
+       if (IS_ERR(ft)) {
+               err = PTR_ERR(ft);
+               netdev_err(priv->netdev, "fail to create ipsec rx ft err=%d\n", err);
+               goto out;
+       }
+       fs_prot->ft = ft;
+
+       /* Create miss_group */
+       MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
+       MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
+       miss_group = mlx5_create_flow_group(ft, flow_group_in);
+       if (IS_ERR(miss_group)) {
+               err = PTR_ERR(miss_group);
+               netdev_err(priv->netdev, "fail to create ipsec rx miss_group err=%d\n", err);
+               goto out;
+       }
+       fs_prot->miss_group = miss_group;
+
+       /* Create miss rule */
+       miss_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &fs_prot->default_dest, 1);
+       if (IS_ERR(miss_rule)) {
+               err = PTR_ERR(miss_rule);
+               netdev_err(priv->netdev, "fail to create ipsec rx miss_rule err=%d\n", err);
+               goto out;
+       }
+       fs_prot->miss_rule = miss_rule;
+
+out:
+       kfree(flow_group_in);
+       kfree(spec);
+       return err;
+}
+
+static int rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
+{
+       struct mlx5e_accel_fs_esp_prot *fs_prot;
+       struct mlx5e_accel_fs_esp *accel_esp;
+
+       accel_esp = priv->ipsec->rx_fs;
+
+       /* The netdev unreg already happened, so all offloaded rule are already removed */
+       fs_prot = &accel_esp->fs_prot[type];
+
+       rx_fs_destroy(fs_prot);
+
+       rx_err_destroy_ft(priv, &fs_prot->rx_err);
+
+       return 0;
+}
+
+static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
+{
+       struct mlx5e_accel_fs_esp_prot *fs_prot;
+       struct mlx5e_accel_fs_esp *accel_esp;
+       int err;
+
+       accel_esp = priv->ipsec->rx_fs;
+       fs_prot = &accel_esp->fs_prot[type];
+
+       fs_prot->default_dest = mlx5e_ttc_get_default_dest(priv, fs_esp2tt(type));
+
+       err = rx_err_create_ft(priv, fs_prot, &fs_prot->rx_err);
+       if (err)
+               return err;
+
+       err = rx_fs_create(priv, fs_prot);
+       if (err)
+               rx_destroy(priv, type);
+
+       return err;
+}
+
+static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
+{
+       struct mlx5e_accel_fs_esp_prot *fs_prot;
+       struct mlx5_flow_destination dest = {};
+       struct mlx5e_accel_fs_esp *accel_esp;
+       int err = 0;
+
+       accel_esp = priv->ipsec->rx_fs;
+       fs_prot = &accel_esp->fs_prot[type];
+       mutex_lock(&fs_prot->prot_mutex);
+       if (fs_prot->refcnt++)
+               goto out;
+
+       /* create FT */
+       err = rx_create(priv, type);
+       if (err) {
+               fs_prot->refcnt--;
+               goto out;
+       }
+
+       /* connect */
+       dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+       dest.ft = fs_prot->ft;
+       mlx5e_ttc_fwd_dest(priv, fs_esp2tt(type), &dest);
+
+out:
+       mutex_unlock(&fs_prot->prot_mutex);
+       return err;
+}
+
+static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
+{
+       struct mlx5e_accel_fs_esp_prot *fs_prot;
+       struct mlx5e_accel_fs_esp *accel_esp;
+
+       accel_esp = priv->ipsec->rx_fs;
+       fs_prot = &accel_esp->fs_prot[type];
+       mutex_lock(&fs_prot->prot_mutex);
+       if (--fs_prot->refcnt)
+               goto out;
+
+       /* disconnect */
+       mlx5e_ttc_fwd_default_dest(priv, fs_esp2tt(type));
+
+       /* remove FT */
+       rx_destroy(priv, type);
+
+out:
+       mutex_unlock(&fs_prot->prot_mutex);
+}
+
+static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs,
+                            u32 ipsec_obj_id,
+                            struct mlx5_flow_spec *spec,
+                            struct mlx5_flow_act *flow_act)
+{
+       u8 ip_version = attrs->is_ipv6 ? 6 : 4;
+
+       spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS;
+
+       /* ip_version */
+       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
+       MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ip_version);
+
+       /* Non fragmented */
+       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
+       MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
+
+       /* ESP header */
+       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
+       MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
+
+       /* SPI number */
+       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
+       MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi,
+                be32_to_cpu(attrs->spi));
+
+       if (ip_version == 4) {
+               memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+                                   outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
+                      &attrs->saddr.a4, 4);
+               memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+                                   outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+                      &attrs->daddr.a4, 4);
+               MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+                                outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
+               MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+                                outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+       } else {
+               memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+                                   outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
+                      &attrs->saddr.a6, 16);
+               memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+                                   outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+                      &attrs->daddr.a6, 16);
+               memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+                                   outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
+                      0xff, 16);
+               memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+                                   outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+                      0xff, 16);
+       }
+
+       flow_act->ipsec_obj_id = ipsec_obj_id;
+       flow_act->flags |= FLOW_ACT_NO_APPEND;
+}
+
+static int rx_add_rule(struct mlx5e_priv *priv,
+                      struct mlx5_accel_esp_xfrm_attrs *attrs,
+                      u32 ipsec_obj_id,
+                      struct mlx5e_ipsec_rule *ipsec_rule)
+{
+       u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+       struct mlx5_modify_hdr *modify_hdr = NULL;
+       struct mlx5e_accel_fs_esp_prot *fs_prot;
+       struct mlx5_flow_destination dest = {};
+       struct mlx5e_accel_fs_esp *accel_esp;
+       struct mlx5_flow_act flow_act = {};
+       struct mlx5_flow_handle *rule;
+       enum accel_fs_esp_type type;
+       struct mlx5_flow_spec *spec;
+       int err = 0;
+
+       accel_esp = priv->ipsec->rx_fs;
+       type = attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4;
+       fs_prot = &accel_esp->fs_prot[type];
+
+       err = rx_ft_get(priv, type);
+       if (err)
+               return err;
+
+       spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+       if (!spec) {
+               err = -ENOMEM;
+               goto out_err;
+       }
+
+       setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act);
+
+       /* Set 1  bit ipsec marker */
+       /* Set 24 bit ipsec_obj_id */
+       MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+       MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+       MLX5_SET(set_action_in, action, data, (ipsec_obj_id << 1) | 0x1);
+       MLX5_SET(set_action_in, action, offset, 7);
+       MLX5_SET(set_action_in, action, length, 25);
+
+       modify_hdr = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL,
+                                             1, action);
+       if (IS_ERR(modify_hdr)) {
+               err = PTR_ERR(modify_hdr);
+               netdev_err(priv->netdev,
+                          "fail to alloc ipsec set modify_header_id err=%d\n", err);
+               modify_hdr = NULL;
+               goto out_err;
+       }
+
+       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+                         MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT |
+                         MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+       dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+       flow_act.modify_hdr = modify_hdr;
+       dest.ft = fs_prot->rx_err.ft;
+       rule = mlx5_add_flow_rules(fs_prot->ft, spec, &flow_act, &dest, 1);
+       if (IS_ERR(rule)) {
+               err = PTR_ERR(rule);
+               netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n",
+                          attrs->action, err);
+               goto out_err;
+       }
+
+       ipsec_rule->rule = rule;
+       ipsec_rule->set_modify_hdr = modify_hdr;
+       goto out;
+
+out_err:
+       if (modify_hdr)
+               mlx5_modify_header_dealloc(priv->mdev, modify_hdr);
+       rx_ft_put(priv, type);
+
+out:
+       kvfree(spec);
+       return err;
+}
+
+static void rx_del_rule(struct mlx5e_priv *priv,
+                       struct mlx5_accel_esp_xfrm_attrs *attrs,
+                       struct mlx5e_ipsec_rule *ipsec_rule)
+{
+       mlx5_del_flow_rules(ipsec_rule->rule);
+       ipsec_rule->rule = NULL;
+
+       mlx5_modify_header_dealloc(priv->mdev, ipsec_rule->set_modify_hdr);
+       ipsec_rule->set_modify_hdr = NULL;
+
+       rx_ft_put(priv, attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4);
+}
+
+int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
+                                 struct mlx5_accel_esp_xfrm_attrs *attrs,
+                                 u32 ipsec_obj_id,
+                                 struct mlx5e_ipsec_rule *ipsec_rule)
+{
+       if (!priv->ipsec->rx_fs || attrs->action != MLX5_ACCEL_ESP_ACTION_DECRYPT)
+               return -EOPNOTSUPP;
+
+       return rx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule);
+}
+
+void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
+                                  struct mlx5_accel_esp_xfrm_attrs *attrs,
+                                  struct mlx5e_ipsec_rule *ipsec_rule)
+{
+       if (!priv->ipsec->rx_fs)
+               return;
+
+       rx_del_rule(priv, attrs, ipsec_rule);
+}
+
+static void fs_cleanup_rx(struct mlx5e_priv *priv)
+{
+       struct mlx5e_accel_fs_esp_prot *fs_prot;
+       struct mlx5e_accel_fs_esp *accel_esp;
+       enum accel_fs_esp_type i;
+
+       accel_esp = priv->ipsec->rx_fs;
+       for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
+               fs_prot = &accel_esp->fs_prot[i];
+               mutex_destroy(&fs_prot->prot_mutex);
+               WARN_ON(fs_prot->refcnt);
+       }
+       kfree(priv->ipsec->rx_fs);
+       priv->ipsec->rx_fs = NULL;
+}
+
+static int fs_init_rx(struct mlx5e_priv *priv)
+{
+       struct mlx5e_accel_fs_esp_prot *fs_prot;
+       struct mlx5e_accel_fs_esp *accel_esp;
+       enum accel_fs_esp_type i;
+
+       priv->ipsec->rx_fs =
+               kzalloc(sizeof(struct mlx5e_accel_fs_esp), GFP_KERNEL);
+       if (!priv->ipsec->rx_fs)
+               return -ENOMEM;
+
+       accel_esp = priv->ipsec->rx_fs;
+       for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
+               fs_prot = &accel_esp->fs_prot[i];
+               mutex_init(&fs_prot->prot_mutex);
+       }
+
+       return 0;
+}
+
+void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv)
+{
+       if (!priv->ipsec->rx_fs)
+               return;
+
+       fs_cleanup_rx(priv);
+}
+
+int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv)
+{
+       if (!mlx5_is_ipsec_device(priv->mdev) || !priv->ipsec)
+               return -EOPNOTSUPP;
+
+       return fs_init_rx(priv);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.h
new file mode 100644 (file)
index 0000000..3389b3b
--- /dev/null
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5_IPSEC_STEERING_H__
+#define __MLX5_IPSEC_STEERING_H__
+
+#include "en.h"
+#include "ipsec.h"
+#include "accel/ipsec_offload.h"
+#include "en/fs.h"
+
+#ifdef CONFIG_MLX5_EN_IPSEC
+void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv);
+int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv);
+int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
+                                 struct mlx5_accel_esp_xfrm_attrs *attrs,
+                                 u32 ipsec_obj_id,
+                                 struct mlx5e_ipsec_rule *ipsec_rule);
+void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
+                                  struct mlx5_accel_esp_xfrm_attrs *attrs,
+                                  struct mlx5e_ipsec_rule *ipsec_rule);
+#else
+static inline void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv) {}
+static inline int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv) { return 0; }
+#endif
+#endif /* __MLX5_IPSEC_STEERING_H__ */
index 824b87a..93a8d68 100644 (file)
@@ -360,6 +360,62 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
        return skb;
 }
 
+enum {
+       MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED,
+       MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
+       MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER,
+};
+
+void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
+                                      struct sk_buff *skb,
+                                      struct mlx5_cqe64 *cqe)
+{
+       u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
+       u8 ipsec_syndrome = ipsec_meta_data & 0xFF;
+       struct mlx5e_priv *priv;
+       struct xfrm_offload *xo;
+       struct xfrm_state *xs;
+       struct sec_path *sp;
+       u32  sa_handle;
+
+       sa_handle = MLX5_IPSEC_METADATA_HANDLE(ipsec_meta_data);
+       priv = netdev_priv(netdev);
+       sp = secpath_set(skb);
+       if (unlikely(!sp)) {
+               atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
+               return;
+       }
+
+       xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
+       if (unlikely(!xs)) {
+               atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
+               return;
+       }
+
+       sp = skb_sec_path(skb);
+       sp->xvec[sp->len++] = xs;
+       sp->olen++;
+
+       xo = xfrm_offload(skb);
+       xo->flags = CRYPTO_DONE;
+
+       switch (ipsec_syndrome & MLX5_IPSEC_METADATA_SYNDROM_MASK) {
+       case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
+               xo->status = CRYPTO_SUCCESS;
+               if (WARN_ON_ONCE(priv->ipsec->no_trailer))
+                       xo->flags |= XFRM_ESP_NO_TRAILER;
+               break;
+       case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED:
+               xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
+               break;
+       case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER:
+               xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
+               break;
+       default:
+               atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
+       }
+}
+
 bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
                               netdev_features_t features)
 {
index ba02643..2a47673 100644 (file)
 #ifndef __MLX5E_IPSEC_RXTX_H__
 #define __MLX5E_IPSEC_RXTX_H__
 
-#ifdef CONFIG_MLX5_EN_IPSEC
-
 #include <linux/skbuff.h>
 #include <net/xfrm.h>
 #include "en.h"
 #include "en/txrx.h"
 
+#define MLX5_IPSEC_METADATA_MARKER_MASK      (0x80)
+#define MLX5_IPSEC_METADATA_SYNDROM_MASK     (0x7F)
+#define MLX5_IPSEC_METADATA_HANDLE(metadata) (((metadata) >> 8) & 0xFF)
+
+#ifdef CONFIG_MLX5_EN_IPSEC
+
 struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
                                          struct sk_buff *skb, u32 *cqe_bcnt);
 void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
@@ -55,7 +59,21 @@ void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
 bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv,
                               struct mlx5_wqe_eth_seg *eseg,
                               struct sk_buff *skb);
+void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
+                                      struct sk_buff *skb,
+                                      struct mlx5_cqe64 *cqe);
+static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe)
+{
+       return !!(MLX5_IPSEC_METADATA_MARKER_MASK & be32_to_cpu(cqe->ft_metadata));
+}
+#else
+static inline
+void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
+                                      struct sk_buff *skb,
+                                      struct mlx5_cqe64 *cqe)
+{}
 
+static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; }
 #endif /* CONFIG_MLX5_EN_IPSEC */
 
 #endif /* __MLX5E_IPSEC_RXTX_H__ */
index 452fcf5..1b39269 100644 (file)
@@ -3,31 +3,8 @@
 
 #include "en.h"
 #include "en_accel/ktls.h"
-
-u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq)
-{
-       u16 num_dumps, stop_room = 0;
-
-       num_dumps = mlx5e_ktls_dumps_num_wqes(sq, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
-
-       stop_room += mlx5e_stop_room_for_wqe(MLX5E_KTLS_STATIC_WQEBBS);
-       stop_room += mlx5e_stop_room_for_wqe(MLX5E_KTLS_PROGRESS_WQEBBS);
-       stop_room += num_dumps * mlx5e_stop_room_for_wqe(MLX5E_KTLS_DUMP_WQEBBS);
-
-       return stop_room;
-}
-
-static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
-{
-       u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
-       void *tisc;
-
-       tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
-
-       MLX5_SET(tisc, tisc, tls_en, 1);
-
-       return mlx5e_create_tis(mdev, in, tisn);
-}
+#include "en_accel/ktls_utils.h"
+#include "en_accel/fs_tcp.h"
 
 static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
                          enum tls_offload_ctx_dir direction,
@@ -35,42 +12,17 @@ static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
                          u32 start_offload_tcp_sn)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
-       struct mlx5e_ktls_offload_context_tx *tx_priv;
-       struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct mlx5_core_dev *mdev = priv->mdev;
        int err;
 
-       if (WARN_ON(direction != TLS_OFFLOAD_CTX_DIR_TX))
-               return -EINVAL;
-
        if (WARN_ON(!mlx5e_ktls_type_check(mdev, crypto_info)))
                return -EOPNOTSUPP;
 
-       tx_priv = kvzalloc(sizeof(*tx_priv), GFP_KERNEL);
-       if (!tx_priv)
-               return -ENOMEM;
-
-       tx_priv->expected_seq = start_offload_tcp_sn;
-       tx_priv->crypto_info  = *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
-       mlx5e_set_ktls_tx_priv_ctx(tls_ctx, tx_priv);
-
-       /* tc and underlay_qpn values are not in use for tls tis */
-       err = mlx5e_ktls_create_tis(mdev, &tx_priv->tisn);
-       if (err)
-               goto create_tis_fail;
-
-       err = mlx5_ktls_create_key(mdev, crypto_info, &tx_priv->key_id);
-       if (err)
-               goto encryption_key_create_fail;
+       if (direction == TLS_OFFLOAD_CTX_DIR_TX)
+               err = mlx5e_ktls_add_tx(netdev, sk, crypto_info, start_offload_tcp_sn);
+       else
+               err = mlx5e_ktls_add_rx(netdev, sk, crypto_info, start_offload_tcp_sn);
 
-       mlx5e_ktls_tx_offload_set_pending(tx_priv);
-
-       return 0;
-
-encryption_key_create_fail:
-       mlx5e_destroy_tis(priv->mdev, tx_priv->tisn);
-create_tis_fail:
-       kvfree(tx_priv);
        return err;
 }
 
@@ -78,29 +30,72 @@ static void mlx5e_ktls_del(struct net_device *netdev,
                           struct tls_context *tls_ctx,
                           enum tls_offload_ctx_dir direction)
 {
-       struct mlx5e_priv *priv = netdev_priv(netdev);
-       struct mlx5e_ktls_offload_context_tx *tx_priv =
-               mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
+       if (direction == TLS_OFFLOAD_CTX_DIR_TX)
+               mlx5e_ktls_del_tx(netdev, tls_ctx);
+       else
+               mlx5e_ktls_del_rx(netdev, tls_ctx);
+}
 
-       mlx5e_destroy_tis(priv->mdev, tx_priv->tisn);
-       mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id);
-       kvfree(tx_priv);
+static int mlx5e_ktls_resync(struct net_device *netdev,
+                            struct sock *sk, u32 seq, u8 *rcd_sn,
+                            enum tls_offload_ctx_dir direction)
+{
+       if (unlikely(direction != TLS_OFFLOAD_CTX_DIR_RX))
+               return -EOPNOTSUPP;
+
+       mlx5e_ktls_rx_resync(netdev, sk, seq, rcd_sn);
+       return 0;
 }
 
 static const struct tlsdev_ops mlx5e_ktls_ops = {
        .tls_dev_add = mlx5e_ktls_add,
        .tls_dev_del = mlx5e_ktls_del,
+       .tls_dev_resync = mlx5e_ktls_resync,
 };
 
 void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
 {
        struct net_device *netdev = priv->netdev;
+       struct mlx5_core_dev *mdev = priv->mdev;
 
-       if (!mlx5_accel_is_ktls_device(priv->mdev))
-               return;
+       if (mlx5_accel_is_ktls_tx(mdev)) {
+               netdev->hw_features |= NETIF_F_HW_TLS_TX;
+               netdev->features    |= NETIF_F_HW_TLS_TX;
+       }
 
-       netdev->hw_features |= NETIF_F_HW_TLS_TX;
-       netdev->features    |= NETIF_F_HW_TLS_TX;
+       if (mlx5_accel_is_ktls_rx(mdev))
+               netdev->hw_features |= NETIF_F_HW_TLS_RX;
 
        netdev->tlsdev_ops = &mlx5e_ktls_ops;
 }
+
+int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       int err = 0;
+
+       mutex_lock(&priv->state_lock);
+       if (enable)
+               err = mlx5e_accel_fs_tcp_create(priv);
+       else
+               mlx5e_accel_fs_tcp_destroy(priv);
+       mutex_unlock(&priv->state_lock);
+
+       return err;
+}
+
+int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
+{
+       int err = 0;
+
+       if (priv->netdev->features & NETIF_F_HW_TLS_RX)
+               err = mlx5e_accel_fs_tcp_create(priv);
+
+       return err;
+}
+
+void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
+{
+       if (priv->netdev->features & NETIF_F_HW_TLS_RX)
+               mlx5e_accel_fs_tcp_destroy(priv);
+}
index c618089..baa58b6 100644 (file)
 #include "en.h"
 
 #ifdef CONFIG_MLX5_EN_TLS
-#include <net/tls.h>
-#include "accel/tls.h"
-#include "en_accel/tls_rxtx.h"
 
-#define MLX5E_KTLS_STATIC_UMR_WQE_SZ \
-       (offsetof(struct mlx5e_umr_wqe, tls_static_params_ctx) + \
-        MLX5_ST_SZ_BYTES(tls_static_params))
-#define MLX5E_KTLS_STATIC_WQEBBS \
-       (DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_BB))
-
-#define MLX5E_KTLS_PROGRESS_WQE_SZ \
-       (offsetof(struct mlx5e_tx_wqe, tls_progress_params_ctx) + \
-        MLX5_ST_SZ_BYTES(tls_progress_params))
-#define MLX5E_KTLS_PROGRESS_WQEBBS \
-       (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB))
-
-struct mlx5e_dump_wqe {
-       struct mlx5_wqe_ctrl_seg ctrl;
-       struct mlx5_wqe_data_seg data;
-};
-
-#define MLX5E_TLS_FETCH_UMR_WQE(sq, pi) \
-       ((struct mlx5e_umr_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, MLX5E_KTLS_STATIC_UMR_WQE_SZ))
-#define MLX5E_TLS_FETCH_PROGRESS_WQE(sq, pi) \
-       ((struct mlx5e_tx_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, MLX5E_KTLS_PROGRESS_WQE_SZ))
-#define MLX5E_TLS_FETCH_DUMP_WQE(sq, pi) \
-       ((struct mlx5e_dump_wqe *)mlx5e_fetch_wqe(&(sq)->wq, pi, \
-                                                 sizeof(struct mlx5e_dump_wqe)))
-
-#define MLX5E_KTLS_DUMP_WQEBBS \
-       (DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
-
-enum {
-       MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD     = 0,
-       MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_OFFLOAD        = 1,
-       MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_AUTHENTICATION = 2,
-};
-
-enum {
-       MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START     = 0,
-       MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING  = 1,
-       MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 2,
-};
-
-struct mlx5e_ktls_offload_context_tx {
-       struct tls_offload_context_tx *tx_ctx;
-       struct tls12_crypto_info_aes_gcm_128 crypto_info;
-       u32 expected_seq;
-       u32 tisn;
-       u32 key_id;
-       bool ctx_post_pending;
-};
-
-struct mlx5e_ktls_offload_context_tx_shadow {
-       struct tls_offload_context_tx         tx_ctx;
-       struct mlx5e_ktls_offload_context_tx *priv_tx;
-};
+void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
+int mlx5e_ktls_init_rx(struct mlx5e_priv *priv);
+void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv);
+int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable);
+#else
 
-static inline void
-mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx,
-                          struct mlx5e_ktls_offload_context_tx *priv_tx)
+static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
 {
-       struct tls_offload_context_tx *tx_ctx = tls_offload_ctx_tx(tls_ctx);
-       struct mlx5e_ktls_offload_context_tx_shadow *shadow;
-
-       BUILD_BUG_ON(sizeof(*shadow) > TLS_OFFLOAD_CONTEXT_SIZE_TX);
-
-       shadow = (struct mlx5e_ktls_offload_context_tx_shadow *)tx_ctx;
-
-       shadow->priv_tx = priv_tx;
-       priv_tx->tx_ctx = tx_ctx;
 }
 
-static inline struct mlx5e_ktls_offload_context_tx *
-mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
+static inline int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
 {
-       struct tls_offload_context_tx *tx_ctx = tls_offload_ctx_tx(tls_ctx);
-       struct mlx5e_ktls_offload_context_tx_shadow *shadow;
-
-       BUILD_BUG_ON(sizeof(*shadow) > TLS_OFFLOAD_CONTEXT_SIZE_TX);
-
-       shadow = (struct mlx5e_ktls_offload_context_tx_shadow *)tx_ctx;
-
-       return shadow->priv_tx;
+       return 0;
 }
 
-void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv);
-void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx);
-
-bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
-                             struct sk_buff *skb, int datalen,
-                             struct mlx5e_accel_tx_tls_state *state);
-void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
-                                          struct mlx5e_tx_wqe_info *wi,
-                                          u32 *dma_fifo_cc);
-u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq);
-
-static inline u8
-mlx5e_ktls_dumps_num_wqes(struct mlx5e_txqsq *sq, unsigned int nfrags,
-                         unsigned int sync_len)
+static inline void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
 {
-       /* Given the MTU and sync_len, calculates an upper bound for the
-        * number of DUMP WQEs needed for the TX resync of a record.
-        */
-       return nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu);
 }
-#else
 
-static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
+static inline int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable)
 {
+       netdev_warn(netdev, "kTLS is not supported\n");
+       return -EOPNOTSUPP;
 }
 
-static inline void
-mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
-                                     struct mlx5e_tx_wqe_info *wi,
-                                     u32 *dma_fifo_cc) {}
 #endif
 
 #endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
new file mode 100644 (file)
index 0000000..acf6d80
--- /dev/null
@@ -0,0 +1,680 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2019 Mellanox Technologies.
+
+#include <net/inet6_hashtables.h>
+#include "en_accel/en_accel.h"
+#include "en_accel/tls.h"
+#include "en_accel/ktls_txrx.h"
+#include "en_accel/ktls_utils.h"
+#include "en_accel/fs_tcp.h"
+
+struct accel_rule {
+       struct work_struct work;
+       struct mlx5e_priv *priv;
+       struct mlx5_flow_handle *rule;
+};
+
+#define PROGRESS_PARAMS_WRITE_UNIT     64
+#define PROGRESS_PARAMS_PADDED_SIZE    \
+               (ALIGN(sizeof(struct mlx5_wqe_tls_progress_params_seg), \
+                      PROGRESS_PARAMS_WRITE_UNIT))
+
+struct mlx5e_ktls_rx_resync_buf {
+       union {
+               struct mlx5_wqe_tls_progress_params_seg progress;
+               u8 pad[PROGRESS_PARAMS_PADDED_SIZE];
+       } ____cacheline_aligned_in_smp;
+       dma_addr_t dma_addr;
+       struct mlx5e_ktls_offload_context_rx *priv_rx;
+};
+
+enum {
+       MLX5E_PRIV_RX_FLAG_DELETING,
+       MLX5E_NUM_PRIV_RX_FLAGS,
+};
+
+struct mlx5e_ktls_rx_resync_ctx {
+       struct tls_offload_resync_async core;
+       struct work_struct work;
+       struct mlx5e_priv *priv;
+       refcount_t refcnt;
+       __be64 sw_rcd_sn_be;
+       u32 seq;
+};
+
+struct mlx5e_ktls_offload_context_rx {
+       struct tls12_crypto_info_aes_gcm_128 crypto_info;
+       struct accel_rule rule;
+       struct sock *sk;
+       struct mlx5e_rq_stats *stats;
+       struct completion add_ctx;
+       u32 tirn;
+       u32 key_id;
+       u32 rxq;
+       DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS);
+
+       /* resync */
+       struct mlx5e_ktls_rx_resync_ctx resync;
+};
+
+static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn)
+{
+       int err, inlen;
+       void *tirc;
+       u32 *in;
+
+       inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+       in = kvzalloc(inlen, GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+       MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.td.tdn);
+       MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+       MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
+       MLX5_SET(tirc, tirc, indirect_table, rqtn);
+       MLX5_SET(tirc, tirc, tls_en, 1);
+       MLX5_SET(tirc, tirc, self_lb_block,
+                MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST |
+                MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST);
+
+       err = mlx5_core_create_tir(mdev, in, tirn);
+
+       kvfree(in);
+       return err;
+}
+
+static void accel_rule_handle_work(struct work_struct *work)
+{
+       struct mlx5e_ktls_offload_context_rx *priv_rx;
+       struct accel_rule *accel_rule;
+       struct mlx5_flow_handle *rule;
+
+       accel_rule = container_of(work, struct accel_rule, work);
+       priv_rx = container_of(accel_rule, struct mlx5e_ktls_offload_context_rx, rule);
+       if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
+               goto out;
+
+       rule = mlx5e_accel_fs_add_sk(accel_rule->priv, priv_rx->sk,
+                                    priv_rx->tirn, MLX5_FS_DEFAULT_FLOW_TAG);
+       if (!IS_ERR_OR_NULL(rule))
+               accel_rule->rule = rule;
+out:
+       complete(&priv_rx->add_ctx);
+}
+
+static void accel_rule_init(struct accel_rule *rule, struct mlx5e_priv *priv,
+                           struct sock *sk)
+{
+       INIT_WORK(&rule->work, accel_rule_handle_work);
+       rule->priv = priv;
+}
+
+static void icosq_fill_wi(struct mlx5e_icosq *sq, u16 pi,
+                         struct mlx5e_icosq_wqe_info *wi)
+{
+       sq->db.wqe_info[pi] = *wi;
+}
+
+static struct mlx5_wqe_ctrl_seg *
+post_static_params(struct mlx5e_icosq *sq,
+                  struct mlx5e_ktls_offload_context_rx *priv_rx)
+{
+       struct mlx5e_set_tls_static_params_wqe *wqe;
+       struct mlx5e_icosq_wqe_info wi;
+       u16 pi, num_wqebbs, room;
+
+       num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS;
+       room = mlx5e_stop_room_for_wqe(num_wqebbs);
+       if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room)))
+               return ERR_PTR(-ENOSPC);
+
+       pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
+       wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
+       mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info,
+                                      priv_rx->tirn, priv_rx->key_id,
+                                      priv_rx->resync.seq, false,
+                                      TLS_OFFLOAD_CTX_DIR_RX);
+       wi = (struct mlx5e_icosq_wqe_info) {
+               .wqe_type = MLX5E_ICOSQ_WQE_UMR_TLS,
+               .num_wqebbs = num_wqebbs,
+               .tls_set_params.priv_rx = priv_rx,
+       };
+       icosq_fill_wi(sq, pi, &wi);
+       sq->pc += num_wqebbs;
+
+       return &wqe->ctrl;
+}
+
+static struct mlx5_wqe_ctrl_seg *
+post_progress_params(struct mlx5e_icosq *sq,
+                    struct mlx5e_ktls_offload_context_rx *priv_rx,
+                    u32 next_record_tcp_sn)
+{
+       struct mlx5e_set_tls_progress_params_wqe *wqe;
+       struct mlx5e_icosq_wqe_info wi;
+       u16 pi, num_wqebbs, room;
+
+       num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS;
+       room = mlx5e_stop_room_for_wqe(num_wqebbs);
+       if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room)))
+               return ERR_PTR(-ENOSPC);
+
+       pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
+       wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi);
+       mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_rx->tirn, false,
+                                        next_record_tcp_sn,
+                                        TLS_OFFLOAD_CTX_DIR_RX);
+       wi = (struct mlx5e_icosq_wqe_info) {
+               .wqe_type = MLX5E_ICOSQ_WQE_SET_PSV_TLS,
+               .num_wqebbs = num_wqebbs,
+               .tls_set_params.priv_rx = priv_rx,
+       };
+
+       icosq_fill_wi(sq, pi, &wi);
+       sq->pc += num_wqebbs;
+
+       return &wqe->ctrl;
+}
+
+static int post_rx_param_wqes(struct mlx5e_channel *c,
+                             struct mlx5e_ktls_offload_context_rx *priv_rx,
+                             u32 next_record_tcp_sn)
+{
+       struct mlx5_wqe_ctrl_seg *cseg;
+       struct mlx5e_icosq *sq;
+       int err;
+
+       err = 0;
+       sq = &c->async_icosq;
+       spin_lock(&c->async_icosq_lock);
+
+       cseg = post_static_params(sq, priv_rx);
+       if (IS_ERR(cseg))
+               goto err_out;
+       cseg = post_progress_params(sq, priv_rx, next_record_tcp_sn);
+       if (IS_ERR(cseg))
+               goto err_out;
+
+       mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
+unlock:
+       spin_unlock(&c->async_icosq_lock);
+
+       return err;
+
+err_out:
+       priv_rx->stats->tls_resync_req_skip++;
+       err = PTR_ERR(cseg);
+       complete(&priv_rx->add_ctx);
+       goto unlock;
+}
+
+static void
+mlx5e_set_ktls_rx_priv_ctx(struct tls_context *tls_ctx,
+                          struct mlx5e_ktls_offload_context_rx *priv_rx)
+{
+       struct mlx5e_ktls_offload_context_rx **ctx =
+               __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX);
+
+       BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_rx *) >
+                    TLS_OFFLOAD_CONTEXT_SIZE_RX);
+
+       *ctx = priv_rx;
+}
+
+static struct mlx5e_ktls_offload_context_rx *
+mlx5e_get_ktls_rx_priv_ctx(struct tls_context *tls_ctx)
+{
+       struct mlx5e_ktls_offload_context_rx **ctx =
+               __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX);
+
+       return *ctx;
+}
+
+/* Re-sync */
+/* Runs in work context */
+static struct mlx5_wqe_ctrl_seg *
+resync_post_get_progress_params(struct mlx5e_icosq *sq,
+                               struct mlx5e_ktls_offload_context_rx *priv_rx)
+{
+       struct mlx5e_get_tls_progress_params_wqe *wqe;
+       struct mlx5e_ktls_rx_resync_buf *buf;
+       struct mlx5e_icosq_wqe_info wi;
+       struct mlx5_wqe_ctrl_seg *cseg;
+       struct mlx5_seg_get_psv *psv;
+       struct device *pdev;
+       int err;
+       u16 pi;
+
+       buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+       if (unlikely(!buf)) {
+               err = -ENOMEM;
+               goto err_out;
+       }
+
+       pdev = sq->channel->priv->mdev->device;
+       buf->dma_addr = dma_map_single(pdev, &buf->progress,
+                                      PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(pdev, buf->dma_addr))) {
+               err = -ENOMEM;
+               goto err_out;
+       }
+
+       buf->priv_rx = priv_rx;
+
+       BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1);
+       if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
+               err = -ENOSPC;
+               goto err_out;
+       }
+
+       pi = mlx5e_icosq_get_next_pi(sq, 1);
+       wqe = MLX5E_TLS_FETCH_GET_PROGRESS_PARAMS_WQE(sq, pi);
+
+#define GET_PSV_DS_CNT (DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS))
+
+       cseg = &wqe->ctrl;
+       cseg->opmod_idx_opcode =
+               cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_GET_PSV |
+                           (MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS << 24));
+       cseg->qpn_ds =
+               cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | GET_PSV_DS_CNT);
+
+       psv = &wqe->psv;
+       psv->num_psv      = 1 << 4;
+       psv->l_key        = sq->channel->mkey_be;
+       psv->psv_index[0] = cpu_to_be32(priv_rx->tirn);
+       psv->va           = cpu_to_be64(buf->dma_addr);
+
+       wi = (struct mlx5e_icosq_wqe_info) {
+               .wqe_type = MLX5E_ICOSQ_WQE_GET_PSV_TLS,
+               .num_wqebbs = 1,
+               .tls_get_params.buf = buf,
+       };
+       icosq_fill_wi(sq, pi, &wi);
+       sq->pc++;
+
+       return cseg;
+
+err_out:
+       priv_rx->stats->tls_resync_req_skip++;
+       return ERR_PTR(err);
+}
+
+/* Function is called with elevated refcount.
+ * It decreases it only if no WQE is posted.
+ */
+static void resync_handle_work(struct work_struct *work)
+{
+       struct mlx5e_ktls_offload_context_rx *priv_rx;
+       struct mlx5e_ktls_rx_resync_ctx *resync;
+       struct mlx5_wqe_ctrl_seg *cseg;
+       struct mlx5e_channel *c;
+       struct mlx5e_icosq *sq;
+       struct mlx5_wq_cyc *wq;
+
+       resync = container_of(work, struct mlx5e_ktls_rx_resync_ctx, work);
+       priv_rx = container_of(resync, struct mlx5e_ktls_offload_context_rx, resync);
+
+       if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
+               refcount_dec(&resync->refcnt);
+               return;
+       }
+
+       c = resync->priv->channels.c[priv_rx->rxq];
+       sq = &c->async_icosq;
+       wq = &sq->wq;
+
+       spin_lock(&c->async_icosq_lock);
+
+       cseg = resync_post_get_progress_params(sq, priv_rx);
+       if (IS_ERR(cseg)) {
+               refcount_dec(&resync->refcnt);
+               goto unlock;
+       }
+       mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
+unlock:
+       spin_unlock(&c->async_icosq_lock);
+}
+
+static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
+                       struct mlx5e_priv *priv)
+{
+       INIT_WORK(&resync->work, resync_handle_work);
+       resync->priv = priv;
+       refcount_set(&resync->refcnt, 1);
+}
+
+/* Function can be called with the refcount being either elevated or not.
+ * It does not affect the refcount.
+ */
+static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx,
+                                  struct mlx5e_channel *c)
+{
+       struct tls12_crypto_info_aes_gcm_128 *info = &priv_rx->crypto_info;
+       struct mlx5_wqe_ctrl_seg *cseg;
+       struct mlx5e_icosq *sq;
+       int err;
+
+       memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq));
+       err = 0;
+
+       sq = &c->async_icosq;
+       spin_lock(&c->async_icosq_lock);
+
+       cseg = post_static_params(sq, priv_rx);
+       if (IS_ERR(cseg)) {
+               priv_rx->stats->tls_resync_res_skip++;
+               err = PTR_ERR(cseg);
+               goto unlock;
+       }
+       /* Do not increment priv_rx refcnt, CQE handling is empty */
+       mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
+       priv_rx->stats->tls_resync_res_ok++;
+unlock:
+       spin_unlock(&c->async_icosq_lock);
+
+       return err;
+}
+
+/* Function is called with elevated refcount, it decreases it. */
+void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
+                                         struct mlx5e_icosq *sq)
+{
+       struct mlx5e_ktls_rx_resync_buf *buf = wi->tls_get_params.buf;
+       struct mlx5e_ktls_offload_context_rx *priv_rx;
+       struct mlx5e_ktls_rx_resync_ctx *resync;
+       u8 tracker_state, auth_state, *ctx;
+       u32 hw_seq;
+
+       priv_rx = buf->priv_rx;
+       resync = &priv_rx->resync;
+
+       if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
+               goto out;
+
+       dma_sync_single_for_cpu(resync->priv->mdev->device, buf->dma_addr,
+                               PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
+
+       ctx = buf->progress.ctx;
+       tracker_state = MLX5_GET(tls_progress_params, ctx, record_tracker_state);
+       auth_state = MLX5_GET(tls_progress_params, ctx, auth_state);
+       if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING ||
+           auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) {
+               priv_rx->stats->tls_resync_req_skip++;
+               goto out;
+       }
+
+       hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn);
+       tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
+       priv_rx->stats->tls_resync_req_end++;
+out:
+       refcount_dec(&resync->refcnt);
+       kfree(buf);
+}
+
+/* Runs in NAPI.
+ * Function elevates the refcount, unless no work is queued.
+ */
+static bool resync_queue_get_psv(struct sock *sk)
+{
+       struct mlx5e_ktls_offload_context_rx *priv_rx;
+       struct mlx5e_ktls_rx_resync_ctx *resync;
+
+       priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_get_ctx(sk));
+       if (unlikely(!priv_rx))
+               return false;
+
+       if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
+               return false;
+
+       resync = &priv_rx->resync;
+       refcount_inc(&resync->refcnt);
+       if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work)))
+               refcount_dec(&resync->refcnt);
+
+       return true;
+}
+
+/* Runs in NAPI */
+static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
+{
+       struct ethhdr *eth = (struct ethhdr *)(skb->data);
+       struct net_device *netdev = rq->netdev;
+       struct sock *sk = NULL;
+       unsigned int datalen;
+       struct iphdr *iph;
+       struct tcphdr *th;
+       __be32 seq;
+       int depth = 0;
+
+       __vlan_get_protocol(skb, eth->h_proto, &depth);
+       iph = (struct iphdr *)(skb->data + depth);
+
+       if (iph->version == 4) {
+               depth += sizeof(struct iphdr);
+               th = (void *)iph + sizeof(struct iphdr);
+
+               sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
+                                            iph->saddr, th->source, iph->daddr,
+                                            th->dest, netdev->ifindex);
+#if IS_ENABLED(CONFIG_IPV6)
+       } else {
+               struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph;
+
+               depth += sizeof(struct ipv6hdr);
+               th = (void *)ipv6h + sizeof(struct ipv6hdr);
+
+               sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
+                                               &ipv6h->saddr, th->source,
+                                               &ipv6h->daddr, ntohs(th->dest),
+                                               netdev->ifindex, 0);
+#endif
+       }
+
+       depth += sizeof(struct tcphdr);
+
+       if (unlikely(!sk || sk->sk_state == TCP_TIME_WAIT))
+               return;
+
+       if (unlikely(!resync_queue_get_psv(sk)))
+               return;
+
+       skb->sk = sk;
+       skb->destructor = sock_edemux;
+
+       seq = th->seq;
+       datalen = skb->len - depth;
+       tls_offload_rx_resync_async_request_start(sk, seq, datalen);
+       rq->stats->tls_resync_req_start++;
+}
+
+void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk,
+                         u32 seq, u8 *rcd_sn)
+{
+       struct mlx5e_ktls_offload_context_rx *priv_rx;
+       struct mlx5e_ktls_rx_resync_ctx *resync;
+       struct mlx5e_priv *priv;
+       struct mlx5e_channel *c;
+
+       priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_get_ctx(sk));
+       if (unlikely(!priv_rx))
+               return;
+
+       resync = &priv_rx->resync;
+       resync->sw_rcd_sn_be = *(__be64 *)rcd_sn;
+       resync->seq = seq;
+
+       priv = netdev_priv(netdev);
+       c = priv->channels.c[priv_rx->rxq];
+
+       resync_handle_seq_match(priv_rx, c);
+}
+
+/* End of resync section */
+
+void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
+                             struct mlx5_cqe64 *cqe, u32 *cqe_bcnt)
+{
+       struct mlx5e_rq_stats *stats = rq->stats;
+
+       switch (get_cqe_tls_offload(cqe)) {
+       case CQE_TLS_OFFLOAD_DECRYPTED:
+               skb->decrypted = 1;
+               stats->tls_decrypted_packets++;
+               stats->tls_decrypted_bytes += *cqe_bcnt;
+               break;
+       case CQE_TLS_OFFLOAD_RESYNC:
+               stats->tls_resync_req_pkt++;
+               resync_update_sn(rq, skb);
+               break;
+       default: /* CQE_TLS_OFFLOAD_ERROR: */
+               stats->tls_err++;
+               break;
+       }
+}
+
+void mlx5e_ktls_handle_ctx_completion(struct mlx5e_icosq_wqe_info *wi)
+{
+       struct mlx5e_ktls_offload_context_rx *priv_rx = wi->tls_set_params.priv_rx;
+       struct accel_rule *rule = &priv_rx->rule;
+
+       if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
+               complete(&priv_rx->add_ctx);
+               return;
+       }
+       queue_work(rule->priv->tls->rx_wq, &rule->work);
+}
+
+static int mlx5e_ktls_sk_get_rxq(struct sock *sk)
+{
+       int rxq = sk_rx_queue_get(sk);
+
+       if (unlikely(rxq == -1))
+               rxq = 0;
+
+       return rxq;
+}
+
+int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
+                     struct tls_crypto_info *crypto_info,
+                     u32 start_offload_tcp_sn)
+{
+       struct mlx5e_ktls_offload_context_rx *priv_rx;
+       struct mlx5e_ktls_rx_resync_ctx *resync;
+       struct tls_context *tls_ctx;
+       struct mlx5_core_dev *mdev;
+       struct mlx5e_priv *priv;
+       int rxq, err;
+       u32 rqtn;
+
+       tls_ctx = tls_get_ctx(sk);
+       priv = netdev_priv(netdev);
+       mdev = priv->mdev;
+       priv_rx = kzalloc(sizeof(*priv_rx), GFP_KERNEL);
+       if (unlikely(!priv_rx))
+               return -ENOMEM;
+
+       err = mlx5_ktls_create_key(mdev, crypto_info, &priv_rx->key_id);
+       if (err)
+               goto err_create_key;
+
+       priv_rx->crypto_info  =
+               *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+
+       rxq = mlx5e_ktls_sk_get_rxq(sk);
+       priv_rx->rxq = rxq;
+       priv_rx->sk = sk;
+
+       priv_rx->stats = &priv->channel_stats[rxq].rq;
+       mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
+
+       rqtn = priv->direct_tir[rxq].rqt.rqtn;
+
+       err = mlx5e_ktls_create_tir(mdev, &priv_rx->tirn, rqtn);
+       if (err)
+               goto err_create_tir;
+
+       init_completion(&priv_rx->add_ctx);
+
+       accel_rule_init(&priv_rx->rule, priv, sk);
+       resync = &priv_rx->resync;
+       resync_init(resync, priv);
+       tls_offload_ctx_rx(tls_ctx)->resync_async = &resync->core;
+       tls_offload_rx_resync_set_type(sk, TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC);
+
+       err = post_rx_param_wqes(priv->channels.c[rxq], priv_rx, start_offload_tcp_sn);
+       if (err)
+               goto err_post_wqes;
+
+       priv_rx->stats->tls_ctx++;
+
+       return 0;
+
+err_post_wqes:
+       mlx5_core_destroy_tir(mdev, priv_rx->tirn);
+err_create_tir:
+       mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
+err_create_key:
+       kfree(priv_rx);
+       return err;
+}
+
+/* Elevated refcount on the resync object means there are
+ * outstanding operations (uncompleted GET_PSV WQEs) that
+ * will read the resync / priv_rx objects once completed.
+ * Wait for them to avoid use-after-free.
+ */
+static void wait_for_resync(struct net_device *netdev,
+                           struct mlx5e_ktls_rx_resync_ctx *resync)
+{
+#define MLX5E_KTLS_RX_RESYNC_TIMEOUT 20000 /* msecs */
+       unsigned long exp_time = jiffies + msecs_to_jiffies(MLX5E_KTLS_RX_RESYNC_TIMEOUT);
+       unsigned int refcnt;
+
+       do {
+               refcnt = refcount_read(&resync->refcnt);
+               if (refcnt == 1)
+                       return;
+
+               msleep(20);
+       } while (time_before(jiffies, exp_time));
+
+       netdev_warn(netdev,
+                   "Failed waiting for kTLS RX resync refcnt to be released (%u).\n",
+                   refcnt);
+}
+
+void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
+{
+       struct mlx5e_ktls_offload_context_rx *priv_rx;
+       struct mlx5e_ktls_rx_resync_ctx *resync;
+       struct mlx5_core_dev *mdev;
+       struct mlx5e_priv *priv;
+
+       priv = netdev_priv(netdev);
+       mdev = priv->mdev;
+
+       priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx);
+       set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags);
+       mlx5e_set_ktls_rx_priv_ctx(tls_ctx, NULL);
+       napi_synchronize(&priv->channels.c[priv_rx->rxq]->napi);
+       if (!cancel_work_sync(&priv_rx->rule.work))
+               /* completion is needed, as the priv_rx in the add flow
+                * is maintained on the wqe info (wi), not on the socket.
+                */
+               wait_for_completion(&priv_rx->add_ctx);
+       resync = &priv_rx->resync;
+       if (cancel_work_sync(&resync->work))
+               refcount_dec(&resync->refcnt);
+       wait_for_resync(netdev, resync);
+
+       priv_rx->stats->tls_del++;
+       if (priv_rx->rule.rule)
+               mlx5e_accel_fs_del_sk(priv_rx->rule.rule);
+
+       mlx5_core_destroy_tir(mdev, priv_rx->tirn);
+       mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
+       kfree(priv_rx);
+}
index 3cd78d9..0e6698d 100644 (file)
 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
 // Copyright (c) 2019 Mellanox Technologies.
 
-#include <linux/tls.h>
-#include "en.h"
-#include "en/txrx.h"
-#include "en_accel/ktls.h"
+#include "en_accel/ktls_txrx.h"
+#include "en_accel/ktls_utils.h"
 
-enum {
-       MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2 = 0x2,
+struct mlx5e_dump_wqe {
+       struct mlx5_wqe_ctrl_seg ctrl;
+       struct mlx5_wqe_data_seg data;
 };
 
-enum {
-       MLX5E_ENCRYPTION_STANDARD_TLS = 0x1,
-};
+#define MLX5E_KTLS_DUMP_WQEBBS \
+       (DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
 
-#define EXTRACT_INFO_FIELDS do { \
-       salt    = info->salt;    \
-       rec_seq = info->rec_seq; \
-       salt_sz    = sizeof(info->salt);    \
-       rec_seq_sz = sizeof(info->rec_seq); \
-} while (0)
+static u8
+mlx5e_ktls_dumps_num_wqes(struct mlx5e_txqsq *sq, unsigned int nfrags,
+                         unsigned int sync_len)
+{
+       /* Given the MTU and sync_len, calculates an upper bound for the
+        * number of DUMP WQEs needed for the TX resync of a record.
+        */
+       return nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu);
+}
 
-static void
-fill_static_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
+u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq)
 {
-       struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
-       char *initial_rn, *gcm_iv;
-       u16 salt_sz, rec_seq_sz;
-       char *salt, *rec_seq;
-       u8 tls_version;
+       u16 num_dumps, stop_room = 0;
+
+       num_dumps = mlx5e_ktls_dumps_num_wqes(sq, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
 
-       EXTRACT_INFO_FIELDS;
+       stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
+       stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
+       stop_room += num_dumps * mlx5e_stop_room_for_wqe(MLX5E_KTLS_DUMP_WQEBBS);
 
-       gcm_iv      = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
-       initial_rn  = MLX5_ADDR_OF(tls_static_params, ctx, initial_record_number);
+       return stop_room;
+}
+
+static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
+{
+       u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
+       void *tisc;
 
-       memcpy(gcm_iv,      salt,    salt_sz);
-       memcpy(initial_rn,  rec_seq, rec_seq_sz);
+       tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
 
-       tls_version = MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2;
+       MLX5_SET(tisc, tisc, tls_en, 1);
 
-       MLX5_SET(tls_static_params, ctx, tls_version, tls_version);
-       MLX5_SET(tls_static_params, ctx, const_1, 1);
-       MLX5_SET(tls_static_params, ctx, const_2, 2);
-       MLX5_SET(tls_static_params, ctx, encryption_standard,
-                MLX5E_ENCRYPTION_STANDARD_TLS);
-       MLX5_SET(tls_static_params, ctx, dek_index, priv_tx->key_id);
+       return mlx5e_create_tis(mdev, in, tisn);
 }
 
+struct mlx5e_ktls_offload_context_tx {
+       struct tls_offload_context_tx *tx_ctx;
+       struct tls12_crypto_info_aes_gcm_128 crypto_info;
+       u32 expected_seq;
+       u32 tisn;
+       u32 key_id;
+       bool ctx_post_pending;
+};
+
 static void
-build_static_params(struct mlx5e_umr_wqe *wqe, u16 pc, u32 sqn,
-                   struct mlx5e_ktls_offload_context_tx *priv_tx,
-                   bool fence)
+mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx,
+                          struct mlx5e_ktls_offload_context_tx *priv_tx)
 {
-       struct mlx5_wqe_ctrl_seg     *cseg  = &wqe->ctrl;
-       struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
+       struct mlx5e_ktls_offload_context_tx **ctx =
+               __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
 
-#define STATIC_PARAMS_DS_CNT \
-       DIV_ROUND_UP(MLX5E_KTLS_STATIC_UMR_WQE_SZ, MLX5_SEND_WQE_DS)
+       BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_tx *) >
+                    TLS_OFFLOAD_CONTEXT_SIZE_TX);
 
-       cseg->opmod_idx_opcode = cpu_to_be32((pc << 8) | MLX5_OPCODE_UMR |
-                                            (MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS << 24));
-       cseg->qpn_ds           = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
-                                            STATIC_PARAMS_DS_CNT);
-       cseg->fm_ce_se         = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
-       cseg->tisn             = cpu_to_be32(priv_tx->tisn << 8);
+       *ctx = priv_tx;
+}
 
-       ucseg->flags = MLX5_UMR_INLINE;
-       ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);
+static struct mlx5e_ktls_offload_context_tx *
+mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
+{
+       struct mlx5e_ktls_offload_context_tx **ctx =
+               __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
 
-       fill_static_params_ctx(wqe->tls_static_params_ctx, priv_tx);
+       return *ctx;
 }
 
-static void
-fill_progress_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
+int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
+                     struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn)
 {
-       MLX5_SET(tls_progress_params, ctx, tisn, priv_tx->tisn);
-       MLX5_SET(tls_progress_params, ctx, record_tracker_state,
-                MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START);
-       MLX5_SET(tls_progress_params, ctx, auth_state,
-                MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD);
+       struct mlx5e_ktls_offload_context_tx *priv_tx;
+       struct tls_context *tls_ctx;
+       struct mlx5_core_dev *mdev;
+       struct mlx5e_priv *priv;
+       int err;
+
+       tls_ctx = tls_get_ctx(sk);
+       priv = netdev_priv(netdev);
+       mdev = priv->mdev;
+
+       priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL);
+       if (!priv_tx)
+               return -ENOMEM;
+
+       err = mlx5_ktls_create_key(mdev, crypto_info, &priv_tx->key_id);
+       if (err)
+               goto err_create_key;
+
+       priv_tx->expected_seq = start_offload_tcp_sn;
+       priv_tx->crypto_info  =
+               *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
+       priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx);
+
+       mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
+
+       err = mlx5e_ktls_create_tis(mdev, &priv_tx->tisn);
+       if (err)
+               goto err_create_tis;
+
+       priv_tx->ctx_post_pending = true;
+
+       return 0;
+
+err_create_tis:
+       mlx5_ktls_destroy_key(mdev, priv_tx->key_id);
+err_create_key:
+       kfree(priv_tx);
+       return err;
 }
 
-static void
-build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn,
-                     struct mlx5e_ktls_offload_context_tx *priv_tx,
-                     bool fence)
+void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx)
 {
-       struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
-
-#define PROGRESS_PARAMS_DS_CNT \
-       DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_DS)
+       struct mlx5e_ktls_offload_context_tx *priv_tx;
+       struct mlx5_core_dev *mdev;
+       struct mlx5e_priv *priv;
 
-       cseg->opmod_idx_opcode =
-               cpu_to_be32((pc << 8) | MLX5_OPCODE_SET_PSV |
-                           (MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS << 24));
-       cseg->qpn_ds           = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
-                                            PROGRESS_PARAMS_DS_CNT);
-       cseg->fm_ce_se         = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
+       priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
+       priv = netdev_priv(netdev);
+       mdev = priv->mdev;
 
-       fill_progress_params_ctx(wqe->tls_progress_params_ctx, priv_tx);
+       mlx5e_destroy_tis(mdev, priv_tx->tisn);
+       mlx5_ktls_destroy_key(mdev, priv_tx->key_id);
+       kfree(priv_tx);
 }
 
 static void tx_fill_wi(struct mlx5e_txqsq *sq,
@@ -115,11 +149,6 @@ static void tx_fill_wi(struct mlx5e_txqsq *sq,
        };
 }
 
-void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
-{
-       priv_tx->ctx_post_pending = true;
-}
-
 static bool
 mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
 {
@@ -135,12 +164,15 @@ post_static_params(struct mlx5e_txqsq *sq,
                   struct mlx5e_ktls_offload_context_tx *priv_tx,
                   bool fence)
 {
-       u16 pi, num_wqebbs = MLX5E_KTLS_STATIC_WQEBBS;
-       struct mlx5e_umr_wqe *umr_wqe;
+       struct mlx5e_set_tls_static_params_wqe *wqe;
+       u16 pi, num_wqebbs;
 
+       num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS;
        pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
-       umr_wqe = MLX5E_TLS_FETCH_UMR_WQE(sq, pi);
-       build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
+       wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
+       mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_tx->crypto_info,
+                                      priv_tx->tisn, priv_tx->key_id, 0, fence,
+                                      TLS_OFFLOAD_CTX_DIR_TX);
        tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
        sq->pc += num_wqebbs;
 }
@@ -150,12 +182,14 @@ post_progress_params(struct mlx5e_txqsq *sq,
                     struct mlx5e_ktls_offload_context_tx *priv_tx,
                     bool fence)
 {
-       u16 pi, num_wqebbs = MLX5E_KTLS_PROGRESS_WQEBBS;
-       struct mlx5e_tx_wqe *wqe;
+       struct mlx5e_set_tls_progress_params_wqe *wqe;
+       u16 pi, num_wqebbs;
 
+       num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS;
        pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
-       wqe = MLX5E_TLS_FETCH_PROGRESS_WQE(sq, pi);
-       build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
+       wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi);
+       mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_tx->tisn, fence, 0,
+                                        TLS_OFFLOAD_CTX_DIR_TX);
        tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
        sq->pc += num_wqebbs;
 }
@@ -284,7 +318,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
 
        cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8)  | MLX5_OPCODE_DUMP);
        cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | ds_cnt);
-       cseg->tisn             = cpu_to_be32(tisn << 8);
+       cseg->tis_tir_num      = cpu_to_be32(tisn << 8);
        cseg->fm_ce_se         = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
 
        fsz = skb_frag_size(frag);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.c
new file mode 100644 (file)
index 0000000..ac29aeb
--- /dev/null
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#include "en_accel/ktls_txrx.h"
+#include "en_accel/ktls_utils.h"
+
+enum {
+       MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2 = 0x2,
+};
+
+enum {
+       MLX5E_ENCRYPTION_STANDARD_TLS = 0x1,
+};
+
+#define EXTRACT_INFO_FIELDS do { \
+       salt    = info->salt;    \
+       rec_seq = info->rec_seq; \
+       salt_sz    = sizeof(info->salt);    \
+       rec_seq_sz = sizeof(info->rec_seq); \
+} while (0)
+
+static void
+fill_static_params(struct mlx5_wqe_tls_static_params_seg *params,
+                  struct tls12_crypto_info_aes_gcm_128 *info,
+                  u32 key_id, u32 resync_tcp_sn)
+{
+       char *initial_rn, *gcm_iv;
+       u16 salt_sz, rec_seq_sz;
+       char *salt, *rec_seq;
+       u8 tls_version;
+       u8 *ctx;
+
+       ctx = params->ctx;
+
+       EXTRACT_INFO_FIELDS;
+
+       gcm_iv      = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
+       initial_rn  = MLX5_ADDR_OF(tls_static_params, ctx, initial_record_number);
+
+       memcpy(gcm_iv,      salt,    salt_sz);
+       memcpy(initial_rn,  rec_seq, rec_seq_sz);
+
+       tls_version = MLX5E_STATIC_PARAMS_CONTEXT_TLS_1_2;
+
+       MLX5_SET(tls_static_params, ctx, tls_version, tls_version);
+       MLX5_SET(tls_static_params, ctx, const_1, 1);
+       MLX5_SET(tls_static_params, ctx, const_2, 2);
+       MLX5_SET(tls_static_params, ctx, encryption_standard,
+                MLX5E_ENCRYPTION_STANDARD_TLS);
+       MLX5_SET(tls_static_params, ctx, resync_tcp_sn, resync_tcp_sn);
+       MLX5_SET(tls_static_params, ctx, dek_index, key_id);
+}
+
+void
+mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe,
+                              u16 pc, u32 sqn,
+                              struct tls12_crypto_info_aes_gcm_128 *info,
+                              u32 tis_tir_num, u32 key_id, u32 resync_tcp_sn,
+                              bool fence, enum tls_offload_ctx_dir direction)
+{
+       struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
+       struct mlx5_wqe_ctrl_seg     *cseg  = &wqe->ctrl;
+       u8 opmod = direction == TLS_OFFLOAD_CTX_DIR_TX ?
+               MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS :
+               MLX5_OPC_MOD_TLS_TIR_STATIC_PARAMS;
+
+#define STATIC_PARAMS_DS_CNT DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS)
+
+       cseg->opmod_idx_opcode = cpu_to_be32((pc << 8) | MLX5_OPCODE_UMR | (opmod << 24));
+       cseg->qpn_ds           = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
+                                            STATIC_PARAMS_DS_CNT);
+       cseg->fm_ce_se         = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
+       cseg->tis_tir_num      = cpu_to_be32(tis_tir_num << 8);
+
+       ucseg->flags = MLX5_UMR_INLINE;
+       ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);
+
+       fill_static_params(&wqe->params, info, key_id, resync_tcp_sn);
+}
+
+static void
+fill_progress_params(struct mlx5_wqe_tls_progress_params_seg *params, u32 tis_tir_num,
+                    u32 next_record_tcp_sn)
+{
+       u8 *ctx = params->ctx;
+
+       params->tis_tir_num = cpu_to_be32(tis_tir_num);
+
+       MLX5_SET(tls_progress_params, ctx, next_record_tcp_sn,
+                next_record_tcp_sn);
+       MLX5_SET(tls_progress_params, ctx, record_tracker_state,
+                MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START);
+       MLX5_SET(tls_progress_params, ctx, auth_state,
+                MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD);
+}
+
+void
+mlx5e_ktls_build_progress_params(struct mlx5e_set_tls_progress_params_wqe *wqe,
+                                u16 pc, u32 sqn,
+                                u32 tis_tir_num, bool fence,
+                                u32 next_record_tcp_sn,
+                                enum tls_offload_ctx_dir direction)
+{
+       struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+       u8 opmod = direction == TLS_OFFLOAD_CTX_DIR_TX ?
+               MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS :
+               MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS;
+
+#define PROGRESS_PARAMS_DS_CNT DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS)
+
+       cseg->opmod_idx_opcode =
+               cpu_to_be32((pc << 8) | MLX5_OPCODE_SET_PSV | (opmod << 24));
+       cseg->qpn_ds           = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
+                                            PROGRESS_PARAMS_DS_CNT);
+       cseg->fm_ce_se         = fence ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
+
+       fill_progress_params(&wqe->params, tis_tir_num, next_record_tcp_sn);
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
new file mode 100644 (file)
index 0000000..ff4c740
--- /dev/null
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5E_KTLS_TXRX_H__
+#define __MLX5E_KTLS_TXRX_H__
+
+#ifdef CONFIG_MLX5_EN_TLS
+
+#include <net/tls.h>
+#include "en.h"
+#include "en/txrx.h"
+
+struct mlx5e_accel_tx_tls_state {
+       u32 tls_tisn;
+};
+
+u16 mlx5e_ktls_get_stop_room(struct mlx5e_txqsq *sq);
+
+bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
+                             struct sk_buff *skb, int datalen,
+                             struct mlx5e_accel_tx_tls_state *state);
+void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
+                             struct mlx5_cqe64 *cqe, u32 *cqe_bcnt);
+
+void mlx5e_ktls_handle_ctx_completion(struct mlx5e_icosq_wqe_info *wi);
+void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
+                                         struct mlx5e_icosq *sq);
+
+void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
+                                          struct mlx5e_tx_wqe_info *wi,
+                                          u32 *dma_fifo_cc);
+#else
+static inline void
+mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
+                                     struct mlx5e_tx_wqe_info *wi,
+                                     u32 *dma_fifo_cc)
+{
+}
+
+#endif /* CONFIG_MLX5_EN_TLS */
+
+#endif /* __MLX5E_TLS_TXRX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_utils.h
new file mode 100644 (file)
index 0000000..e5c180f
--- /dev/null
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5E_KTLS_UTILS_H__
+#define __MLX5E_KTLS_UTILS_H__
+
+#include <net/tls.h>
+#include "en.h"
+#include "accel/tls.h"
+
+enum {
+       MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD     = 0,
+       MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_OFFLOAD        = 1,
+       MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_AUTHENTICATION = 2,
+};
+
+enum {
+       MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START     = 0,
+       MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING  = 1,
+       MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING = 2,
+};
+
+int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
+                     struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn);
+void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx);
+int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
+                     struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn);
+void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx);
+void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk, u32 seq, u8 *rcd_sn);
+
+struct mlx5e_set_tls_static_params_wqe {
+       struct mlx5_wqe_ctrl_seg ctrl;
+       struct mlx5_wqe_umr_ctrl_seg uctrl;
+       struct mlx5_mkey_seg mkc;
+       struct mlx5_wqe_tls_static_params_seg params;
+};
+
+struct mlx5e_set_tls_progress_params_wqe {
+       struct mlx5_wqe_ctrl_seg ctrl;
+       struct mlx5_wqe_tls_progress_params_seg params;
+};
+
+struct mlx5e_get_tls_progress_params_wqe {
+       struct mlx5_wqe_ctrl_seg ctrl;
+       struct mlx5_seg_get_psv  psv;
+};
+
+#define MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS \
+       (DIV_ROUND_UP(sizeof(struct mlx5e_set_tls_static_params_wqe), MLX5_SEND_WQE_BB))
+
+#define MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS \
+       (DIV_ROUND_UP(sizeof(struct mlx5e_set_tls_progress_params_wqe), MLX5_SEND_WQE_BB))
+
+#define MLX5E_KTLS_GET_PROGRESS_WQEBBS \
+       (DIV_ROUND_UP(sizeof(struct mlx5e_get_tls_progress_params_wqe), MLX5_SEND_WQE_BB))
+
+#define MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi) \
+       ((struct mlx5e_set_tls_static_params_wqe *)\
+        mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_set_tls_static_params_wqe)))
+
+#define MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi) \
+       ((struct mlx5e_set_tls_progress_params_wqe *)\
+        mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_set_tls_progress_params_wqe)))
+
+#define MLX5E_TLS_FETCH_GET_PROGRESS_PARAMS_WQE(sq, pi) \
+       ((struct mlx5e_get_tls_progress_params_wqe *)\
+        mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_get_tls_progress_params_wqe)))
+
+#define MLX5E_TLS_FETCH_DUMP_WQE(sq, pi) \
+       ((struct mlx5e_dump_wqe *)\
+        mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_dump_wqe)))
+
+void
+mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe,
+                              u16 pc, u32 sqn,
+                              struct tls12_crypto_info_aes_gcm_128 *info,
+                              u32 tis_tir_num, u32 key_id, u32 resync_tcp_sn,
+                              bool fence, enum tls_offload_ctx_dir direction);
+void
+mlx5e_ktls_build_progress_params(struct mlx5e_set_tls_progress_params_wqe *wqe,
+                                u16 pc, u32 sqn,
+                                u32 tis_tir_num, bool fence,
+                                u32 next_record_tcp_sn,
+                                enum tls_offload_ctx_dir direction);
+
+#endif /* __MLX5E_TLS_UTILS_H__ */
index 1fbb5a9..fee991f 100644 (file)
@@ -197,6 +197,7 @@ void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
                return;
        }
 
+       /* FPGA */
        if (!mlx5_accel_is_tls_device(priv->mdev))
                return;
 
@@ -221,11 +222,21 @@ void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
 
 int mlx5e_tls_init(struct mlx5e_priv *priv)
 {
-       struct mlx5e_tls *tls = kzalloc(sizeof(*tls), GFP_KERNEL);
+       struct mlx5e_tls *tls;
 
+       if (!mlx5_accel_is_tls_device(priv->mdev))
+               return 0;
+
+       tls = kzalloc(sizeof(*tls), GFP_KERNEL);
        if (!tls)
                return -ENOMEM;
 
+       tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx");
+       if (!tls->rx_wq) {
+               kfree(tls);
+               return -ENOMEM;
+       }
+
        priv->tls = tls;
        return 0;
 }
@@ -237,20 +248,7 @@ void mlx5e_tls_cleanup(struct mlx5e_priv *priv)
        if (!tls)
                return;
 
+       destroy_workqueue(tls->rx_wq);
        kfree(tls);
        priv->tls = NULL;
 }
-
-u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq)
-{
-       struct mlx5_core_dev *mdev = sq->channel->mdev;
-
-       if (!mlx5_accel_is_tls_device(mdev))
-               return 0;
-
-       if (MLX5_CAP_GEN(mdev, tls_tx))
-               return mlx5e_ktls_get_stop_room(sq);
-
-       /* Resync SKB. */
-       return mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
-}
index 9219bdb..bd270a8 100644 (file)
@@ -53,6 +53,7 @@ struct mlx5e_tls_sw_stats {
 
 struct mlx5e_tls {
        struct mlx5e_tls_sw_stats sw_stats;
+       struct workqueue_struct *rx_wq;
 };
 
 struct mlx5e_tls_offload_context_tx {
@@ -86,6 +87,11 @@ mlx5e_get_tls_rx_context(struct tls_context *tls_ctx)
                            base);
 }
 
+static inline bool mlx5e_is_tls_on(struct mlx5e_priv *priv)
+{
+       return priv->tls;
+}
+
 void mlx5e_tls_build_netdev(struct mlx5e_priv *priv);
 int mlx5e_tls_init(struct mlx5e_priv *priv);
 void mlx5e_tls_cleanup(struct mlx5e_priv *priv);
@@ -94,8 +100,6 @@ int mlx5e_tls_get_count(struct mlx5e_priv *priv);
 int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data);
 int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data);
 
-u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq);
-
 #else
 
 static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
@@ -104,17 +108,13 @@ static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
                mlx5e_ktls_build_netdev(priv);
 }
 
+static inline bool mlx5e_is_tls_on(struct mlx5e_priv *priv) { return false; }
 static inline int mlx5e_tls_init(struct mlx5e_priv *priv) { return 0; }
 static inline void mlx5e_tls_cleanup(struct mlx5e_priv *priv) { }
 static inline int mlx5e_tls_get_count(struct mlx5e_priv *priv) { return 0; }
 static inline int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) { return 0; }
 static inline int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) { return 0; }
 
-static inline u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq)
-{
-       return 0;
-}
-
 #endif
 
 #endif /* __MLX5E_TLS_H__ */
index 05454a8..b0c31d4 100644 (file)
@@ -278,9 +278,10 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
        if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
                goto err_out;
 
-       if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx))
+       if (mlx5_accel_is_ktls_tx(sq->channel->mdev))
                return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state);
 
+       /* FPGA */
        skb_seq = ntohl(tcp_hdr(skb)->seq);
        context = mlx5e_get_tls_tx_context(tls_ctx);
        expected_seq = context->expected_seq;
@@ -305,7 +306,7 @@ err_out:
 void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
                             struct mlx5e_accel_tx_tls_state *state)
 {
-       cseg->tisn = cpu_to_be32(state->tls_tisn << 8);
+       cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8);
 }
 
 static int tls_update_resync_sn(struct net_device *netdev,
@@ -354,15 +355,13 @@ out:
        return 0;
 }
 
-void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
-                            u32 *cqe_bcnt)
+/* FPGA tls rx handler */
+void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb,
+                                     u32 *cqe_bcnt)
 {
        struct mlx5e_tls_metadata *mdata;
        struct mlx5e_priv *priv;
 
-       if (!is_metadata_hdr_valid(skb))
-               return;
-
        /* Use the metadata */
        mdata = (struct mlx5e_tls_metadata *)(skb->data + ETH_HLEN);
        switch (mdata->content.recv.syndrome) {
@@ -370,13 +369,13 @@ void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
                skb->decrypted = 1;
                break;
        case SYNDROM_RESYNC_REQUEST:
-               tls_update_resync_sn(netdev, skb, mdata);
-               priv = netdev_priv(netdev);
+               tls_update_resync_sn(rq->netdev, skb, mdata);
+               priv = netdev_priv(rq->netdev);
                atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request);
                break;
        case SYNDROM_AUTH_FAILED:
                /* Authentication failure will be observed and verified by kTLS */
-               priv = netdev_priv(netdev);
+               priv = netdev_priv(rq->netdev);
                atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail);
                break;
        default:
@@ -387,3 +386,18 @@ void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
        remove_metadata_hdr(skb);
        *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
 }
+
+u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq)
+{
+       struct mlx5_core_dev *mdev = sq->channel->mdev;
+
+       if (!mlx5_accel_is_tls_device(mdev))
+               return 0;
+
+       if (mlx5_accel_is_ktls_device(mdev))
+               return mlx5e_ktls_get_stop_room(sq);
+
+       /* FPGA */
+       /* Resync SKB. */
+       return mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+}
index a50d039..5f162ad 100644 (file)
 #ifndef __MLX5E_TLS_RXTX_H__
 #define __MLX5E_TLS_RXTX_H__
 
+#include "accel/accel.h"
+#include "en_accel/ktls_txrx.h"
+
 #ifdef CONFIG_MLX5_EN_TLS
 
 #include <linux/skbuff.h>
 #include "en.h"
 #include "en/txrx.h"
 
-struct mlx5e_accel_tx_tls_state {
-       u32 tls_tisn;
-};
+u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq);
 
 bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
                             struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state);
 void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
                             struct mlx5e_accel_tx_tls_state *state);
 
-void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb,
-                            u32 *cqe_bcnt);
+void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb,
+                                     u32 *cqe_bcnt);
+
+static inline void
+mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
+                       struct mlx5_cqe64 *cqe, u32 *cqe_bcnt)
+{
+       if (unlikely(get_cqe_tls_offload(cqe))) /* cqe bit indicates a TLS device */
+               return mlx5e_ktls_handle_rx_skb(rq, skb, cqe, cqe_bcnt);
+
+       if (unlikely(test_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state) && is_metadata_hdr_valid(skb)))
+               return mlx5e_tls_handle_rx_skb_metadata(rq, skb, cqe_bcnt);
+}
+
+#else
+
+static inline bool
+mlx5e_accel_is_tls(struct mlx5_cqe64 *cqe, struct sk_buff *skb) { return false; }
+static inline void
+mlx5e_tls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
+                       struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) {}
+static inline u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq)
+{
+       return 0;
+}
 
 #endif /* CONFIG_MLX5_EN_TLS */
 
index 014639e..39475f6 100644 (file)
@@ -90,23 +90,15 @@ static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type)
 
 static int arfs_disable(struct mlx5e_priv *priv)
 {
-       struct mlx5_flow_destination dest = {};
-       struct mlx5e_tir *tir = priv->indir_tir;
-       int err = 0;
-       int tt;
-       int i;
+       int err, i;
 
-       dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
        for (i = 0; i < ARFS_NUM_TYPES; i++) {
-               dest.tir_num = tir[i].tirn;
-               tt = arfs_get_tt(i);
-               /* Modify ttc rules destination to bypass the aRFS tables*/
-               err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
-                                                  &dest, NULL);
+               /* Modify ttc rules destination back to their default */
+               err = mlx5e_ttc_fwd_default_dest(priv, arfs_get_tt(i));
                if (err) {
                        netdev_err(priv->netdev,
-                                  "%s: modify ttc destination failed\n",
-                                  __func__);
+                                  "%s: modify ttc[%d] default destination failed, err(%d)\n",
+                                  __func__, arfs_get_tt(i), err);
                        return err;
                }
        }
@@ -125,21 +117,17 @@ int mlx5e_arfs_disable(struct mlx5e_priv *priv)
 int mlx5e_arfs_enable(struct mlx5e_priv *priv)
 {
        struct mlx5_flow_destination dest = {};
-       int err = 0;
-       int tt;
-       int i;
+       int err, i;
 
        dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
        for (i = 0; i < ARFS_NUM_TYPES; i++) {
                dest.ft = priv->fs.arfs.arfs_tables[i].ft.t;
-               tt = arfs_get_tt(i);
                /* Modify ttc rules destination to point on the aRFS FTs */
-               err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
-                                                  &dest, NULL);
+               err = mlx5e_ttc_fwd_dest(priv, arfs_get_tt(i), &dest);
                if (err) {
                        netdev_err(priv->netdev,
-                                  "%s: modify ttc destination failed err=%d\n",
-                                  __func__, err);
+                                  "%s: modify ttc[%d] dest to arfs, failed err(%d)\n",
+                                  __func__, arfs_get_tt(i), err);
                        arfs_disable(priv);
                        return err;
                }
@@ -186,8 +174,10 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
                return -EINVAL;
        }
 
+       /* FIXME: Must use mlx5e_ttc_get_default_dest(),
+        * but can't since TTC default is not setup yet !
+        */
        dest.tir_num = tir[tt].tirn;
-
        arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, NULL,
                                                   &flow_act,
                                                   &dest, 1);
@@ -220,7 +210,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
                        sizeof(*ft->g), GFP_KERNEL);
        in = kvzalloc(inlen, GFP_KERNEL);
        if  (!in || !ft->g) {
-               kvfree(ft->g);
+               kfree(ft->g);
                kvfree(in);
                return -ENOMEM;
        }
index bc102d0..d20243d 100644 (file)
@@ -1217,6 +1217,24 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
        return 0;
 }
 
+#define MLX5E_BUFFER_CELL_SHIFT 7
+
+static u16 mlx5e_query_port_buffers_cell_size(struct mlx5e_priv *priv)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       u32 out[MLX5_ST_SZ_DW(sbcam_reg)] = {};
+       u32 in[MLX5_ST_SZ_DW(sbcam_reg)] = {};
+
+       if (!MLX5_CAP_GEN(mdev, sbcam_reg))
+               return (1 << MLX5E_BUFFER_CELL_SHIFT);
+
+       if (mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
+                                MLX5_REG_SBCAM, 0, 0))
+               return (1 << MLX5E_BUFFER_CELL_SHIFT);
+
+       return MLX5_GET(sbcam_reg, out, cap_cell_size);
+}
+
 void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
 {
        struct mlx5e_dcbx *dcbx = &priv->dcbx;
@@ -1234,6 +1252,7 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
        if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
                priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
 
+       priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv);
        priv->dcbx.manual_buffer = false;
        priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
 
index ec5658b..af849bc 100644 (file)
@@ -194,13 +194,31 @@ void mlx5e_build_ptys2ethtool_map(void)
                                       ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
                                       ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT,
                                       ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT);
+       MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GAUI_1_100GBASE_CR_KR, ext,
+                                      ETHTOOL_LINK_MODE_100000baseKR_Full_BIT,
+                                      ETHTOOL_LINK_MODE_100000baseSR_Full_BIT,
+                                      ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT,
+                                      ETHTOOL_LINK_MODE_100000baseDR_Full_BIT,
+                                      ETHTOOL_LINK_MODE_100000baseCR_Full_BIT);
+       MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_200GAUI_2_200GBASE_CR2_KR2, ext,
+                                      ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT,
+                                      ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT,
+                                      ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT,
+                                      ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT,
+                                      ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT);
+       MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_400GAUI_4_400GBASE_CR4_KR4, ext,
+                                      ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT,
+                                      ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT,
+                                      ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
+                                      ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT,
+                                      ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT);
 }
 
 static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev,
                                        struct ptys2ethtool_config **arr,
                                        u32 *size)
 {
-       bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+       bool ext = mlx5e_ptys_ext_supported(mdev);
 
        *arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
        *size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
@@ -883,7 +901,7 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
                               struct ethtool_link_ksettings *link_ksettings)
 {
        unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
-       bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+       bool ext = mlx5e_ptys_ext_supported(mdev);
 
        ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
 }
@@ -913,7 +931,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
                           __func__, err);
                goto err_query_regs;
        }
-       ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+       ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
        eth_proto_cap    = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
                                              eth_proto_capability);
        eth_proto_admin  = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
@@ -1012,7 +1030,8 @@ static u32 mlx5e_ethtool2ptys_ext_adver_link(const unsigned long *link_modes)
        unsigned long modes[2];
 
        for (i = 0; i < MLX5E_EXT_LINK_MODES_NUMBER; ++i) {
-               if (*ptys2ext_ethtool_table[i].advertised == 0)
+               if (ptys2ext_ethtool_table[i].advertised[0] == 0 &&
+                   ptys2ext_ethtool_table[i].advertised[1] == 0)
                        continue;
                memset(modes, 0, sizeof(modes));
                bitmap_and(modes, ptys2ext_ethtool_table[i].advertised,
@@ -1066,7 +1085,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
        autoneg = link_ksettings->base.autoneg;
        speed = link_ksettings->base.speed;
 
-       ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+       ext_supported = mlx5e_ptys_ext_supported(mdev);
        ext = ext_requested(autoneg, adver, ext_supported);
        if (!ext_supported && ext)
                return -EOPNOTSUPP;
index 73d3dc0..64d002d 100644 (file)
@@ -672,9 +672,9 @@ static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
        int i;
 
        for (i = 0; i < MLX5E_NUM_TT; i++) {
-               if (!IS_ERR_OR_NULL(ttc->rules[i])) {
-                       mlx5_del_flow_rules(ttc->rules[i]);
-                       ttc->rules[i] = NULL;
+               if (!IS_ERR_OR_NULL(ttc->rules[i].rule)) {
+                       mlx5_del_flow_rules(ttc->rules[i].rule);
+                       ttc->rules[i].rule = NULL;
                }
        }
 
@@ -857,7 +857,8 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv,
                                          struct mlx5e_ttc_table *ttc)
 {
        struct mlx5_flow_destination dest = {};
-       struct mlx5_flow_handle **rules;
+       struct mlx5_flow_handle **trules;
+       struct mlx5e_ttc_rule *rules;
        struct mlx5_flow_table *ft;
        int tt;
        int err;
@@ -867,39 +868,47 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv,
 
        dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
        for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
+               struct mlx5e_ttc_rule *rule = &rules[tt];
+
                if (tt == MLX5E_TT_ANY)
                        dest.tir_num = params->any_tt_tirn;
                else
                        dest.tir_num = params->indir_tirn[tt];
-               rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
-                                                   ttc_rules[tt].etype,
-                                                   ttc_rules[tt].proto);
-               if (IS_ERR(rules[tt]))
+
+               rule->rule = mlx5e_generate_ttc_rule(priv, ft, &dest,
+                                                    ttc_rules[tt].etype,
+                                                    ttc_rules[tt].proto);
+               if (IS_ERR(rule->rule)) {
+                       err = PTR_ERR(rule->rule);
+                       rule->rule = NULL;
                        goto del_rules;
+               }
+               rule->default_dest = dest;
        }
 
        if (!params->inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
                return 0;
 
-       rules     = ttc->tunnel_rules;
+       trules    = ttc->tunnel_rules;
        dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
        dest.ft   = params->inner_ttc->ft.t;
        for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
                if (!mlx5e_tunnel_proto_supported(priv->mdev,
                                                  ttc_tunnel_rules[tt].proto))
                        continue;
-               rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
-                                                   ttc_tunnel_rules[tt].etype,
-                                                   ttc_tunnel_rules[tt].proto);
-               if (IS_ERR(rules[tt]))
+               trules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
+                                                    ttc_tunnel_rules[tt].etype,
+                                                    ttc_tunnel_rules[tt].proto);
+               if (IS_ERR(trules[tt])) {
+                       err = PTR_ERR(trules[tt]);
+                       trules[tt] = NULL;
                        goto del_rules;
+               }
        }
 
        return 0;
 
 del_rules:
-       err = PTR_ERR(rules[tt]);
-       rules[tt] = NULL;
        mlx5e_cleanup_ttc_rules(ttc);
        return err;
 }
@@ -1015,33 +1024,38 @@ static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv,
                                                struct mlx5e_ttc_table *ttc)
 {
        struct mlx5_flow_destination dest = {};
-       struct mlx5_flow_handle **rules;
+       struct mlx5e_ttc_rule *rules;
        struct mlx5_flow_table *ft;
        int err;
        int tt;
 
        ft = ttc->ft.t;
        rules = ttc->rules;
-
        dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+
        for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
+               struct mlx5e_ttc_rule *rule = &rules[tt];
+
                if (tt == MLX5E_TT_ANY)
                        dest.tir_num = params->any_tt_tirn;
                else
                        dest.tir_num = params->indir_tirn[tt];
 
-               rules[tt] = mlx5e_generate_inner_ttc_rule(priv, ft, &dest,
-                                                         ttc_rules[tt].etype,
-                                                         ttc_rules[tt].proto);
-               if (IS_ERR(rules[tt]))
+               rule->rule = mlx5e_generate_inner_ttc_rule(priv, ft, &dest,
+                                                          ttc_rules[tt].etype,
+                                                          ttc_rules[tt].proto);
+               if (IS_ERR(rule->rule)) {
+                       err = PTR_ERR(rule->rule);
+                       rule->rule = NULL;
                        goto del_rules;
+               }
+               rule->default_dest = dest;
        }
 
        return 0;
 
 del_rules:
-       err = PTR_ERR(rules[tt]);
-       rules[tt] = NULL;
+
        mlx5e_cleanup_ttc_rules(ttc);
        return err;
 }
@@ -1210,6 +1224,30 @@ err:
        return err;
 }
 
+int mlx5e_ttc_fwd_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type,
+                      struct mlx5_flow_destination *new_dest)
+{
+       return mlx5_modify_rule_destination(priv->fs.ttc.rules[type].rule, new_dest, NULL);
+}
+
+struct mlx5_flow_destination
+mlx5e_ttc_get_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type)
+{
+       struct mlx5_flow_destination *dest = &priv->fs.ttc.rules[type].default_dest;
+
+       WARN_ONCE(dest->type != MLX5_FLOW_DESTINATION_TYPE_TIR,
+                 "TTC[%d] default dest is not setup yet", type);
+
+       return *dest;
+}
+
+int mlx5e_ttc_fwd_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type)
+{
+       struct mlx5_flow_destination dest = mlx5e_ttc_get_default_dest(priv, type);
+
+       return mlx5e_ttc_fwd_dest(priv, type, &dest);
+}
+
 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
                                   struct mlx5e_l2_rule *ai)
 {
index a836a02..9d5d8b2 100644 (file)
@@ -65,6 +65,7 @@
 #include "en/hv_vhca_stats.h"
 #include "en/devlink.h"
 #include "lib/mlx5.h"
+#include "fpga/ipsec.h"
 
 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
 {
@@ -231,7 +232,6 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
 
        cseg->qpn_ds    = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
                                      ds_cnt);
-       cseg->fm_ce_se  = MLX5_WQE_CTRL_CQ_UPDATE;
        cseg->umr_mkey  = rq->mkey_be;
 
        ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
@@ -496,7 +496,8 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
                rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
 
 #ifdef CONFIG_MLX5_EN_IPSEC
-               if (c->priv->ipsec)
+               if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) &&
+                   c->priv->ipsec)
                        rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
                else
 #endif
@@ -873,6 +874,9 @@ int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
        if (err)
                goto err_destroy_rq;
 
+       if (mlx5e_is_tls_on(c->priv) && !mlx5_accel_is_ktls_device(c->mdev))
+               __set_bit(MLX5E_RQ_STATE_FPGA_TLS, &c->rq.state); /* must be FPGA */
+
        if (MLX5_CAP_ETH(c->mdev, cqe_checksum_full))
                __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &c->rq.state);
 
@@ -1441,6 +1445,7 @@ void mlx5e_close_icosq(struct mlx5e_icosq *sq)
        struct mlx5e_channel *c = sq->channel;
 
        mlx5e_destroy_sq(c->mdev, sq->sqn);
+       mlx5e_free_icosq_descs(sq);
        mlx5e_free_icosq(sq);
 }
 
@@ -1675,7 +1680,7 @@ static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
 
        for (tc = 0; tc < c->num_tc; tc++) {
                err = mlx5e_open_cq(c, params->tx_cq_moderation,
-                                   &cparam->tx_cq, &c->sq[tc].cq);
+                                   &cparam->txq_sq.cqp, &c->sq[tc].cq);
                if (err)
                        goto err_close_tx_cqs;
        }
@@ -1707,7 +1712,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
                int txq_ix = c->ix + tc * params->num_channels;
 
                err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
-                                      params, &cparam->sq, &c->sq[tc], tc);
+                                      params, &cparam->txq_sq, &c->sq[tc], tc);
                if (err)
                        goto err_close_sqs;
        }
@@ -1817,34 +1822,43 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
        struct dim_cq_moder icocq_moder = {0, 0};
        int err;
 
-       err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
+       err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq.cqp, &c->async_icosq.cq);
        if (err)
                return err;
 
+       err = mlx5e_open_cq(c, icocq_moder, &cparam->async_icosq.cqp, &c->icosq.cq);
+       if (err)
+               goto err_close_async_icosq_cq;
+
        err = mlx5e_open_tx_cqs(c, params, cparam);
        if (err)
                goto err_close_icosq_cq;
 
-       err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xdpsq.cq);
+       err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &c->xdpsq.cq);
        if (err)
                goto err_close_tx_cqs;
 
-       err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
+       err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rq.cqp, &c->rq.cq);
        if (err)
                goto err_close_xdp_tx_cqs;
 
-       /* XDP SQ CQ params are same as normal TXQ sq CQ params */
        err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
-                                    &cparam->tx_cq, &c->rq_xdpsq.cq) : 0;
+                                    &cparam->xdp_sq.cqp, &c->rq_xdpsq.cq) : 0;
        if (err)
                goto err_close_rx_cq;
 
        napi_enable(&c->napi);
 
-       err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
+       spin_lock_init(&c->async_icosq_lock);
+
+       err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq);
        if (err)
                goto err_disable_napi;
 
+       err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
+       if (err)
+               goto err_close_async_icosq;
+
        err = mlx5e_open_sqs(c, params, cparam);
        if (err)
                goto err_close_icosq;
@@ -1879,6 +1893,9 @@ err_close_sqs:
 err_close_icosq:
        mlx5e_close_icosq(&c->icosq);
 
+err_close_async_icosq:
+       mlx5e_close_icosq(&c->async_icosq);
+
 err_disable_napi:
        napi_disable(&c->napi);
 
@@ -1897,6 +1914,9 @@ err_close_tx_cqs:
 err_close_icosq_cq:
        mlx5e_close_cq(&c->icosq.cq);
 
+err_close_async_icosq_cq:
+       mlx5e_close_cq(&c->async_icosq.cq);
+
        return err;
 }
 
@@ -1908,6 +1928,7 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
                mlx5e_close_xdpsq(&c->rq_xdpsq);
        mlx5e_close_sqs(c);
        mlx5e_close_icosq(&c->icosq);
+       mlx5e_close_icosq(&c->async_icosq);
        napi_disable(&c->napi);
        if (c->xdp)
                mlx5e_close_cq(&c->rq_xdpsq.cq);
@@ -1915,6 +1936,7 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
        mlx5e_close_cq(&c->xdpsq.cq);
        mlx5e_close_tx_cqs(c);
        mlx5e_close_cq(&c->icosq.cq);
+       mlx5e_close_cq(&c->async_icosq.cq);
 }
 
 static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
@@ -1995,6 +2017,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
        for (tc = 0; tc < c->num_tc; tc++)
                mlx5e_activate_txqsq(&c->sq[tc]);
        mlx5e_activate_icosq(&c->icosq);
+       mlx5e_activate_icosq(&c->async_icosq);
        mlx5e_activate_rq(&c->rq);
 
        if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
@@ -2009,6 +2032,7 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
                mlx5e_deactivate_xsk(c);
 
        mlx5e_deactivate_rq(&c->rq);
+       mlx5e_deactivate_icosq(&c->async_icosq);
        mlx5e_deactivate_icosq(&c->icosq);
        for (tc = 0; tc < c->num_tc; tc++)
                mlx5e_deactivate_txqsq(&c->sq[tc]);
@@ -2138,6 +2162,7 @@ void mlx5e_build_rq_param(struct mlx5e_priv *priv,
        MLX5_SET(rqc, rqc, scatter_fcs,    params->scatter_fcs_en);
 
        param->wq.buf_numa_node = dev_to_node(mdev->device);
+       mlx5e_build_rx_cq_param(priv, params, xsk, &param->cqp);
 }
 
 static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
@@ -2180,6 +2205,7 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
        mlx5e_build_sq_param_common(priv, param);
        MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
        MLX5_SET(sqc, sqc, allow_swp, allow_swp);
+       mlx5e_build_tx_cq_param(priv, params, &param->cqp);
 }
 
 static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -2256,6 +2282,7 @@ void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
 
        MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
        MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
+       mlx5e_build_ico_cq_param(priv, log_wq_size, &param->cqp);
 }
 
 void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
@@ -2268,6 +2295,7 @@ void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
        mlx5e_build_sq_param_common(priv, param);
        MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
        param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
+       mlx5e_build_tx_cq_param(priv, params, &param->cqp);
 }
 
 static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
@@ -2282,22 +2310,29 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
        }
 }
 
+static u8 mlx5e_build_async_icosq_log_wq_sz(struct net_device *netdev)
+{
+       if (netdev->hw_features & NETIF_F_HW_TLS_RX)
+               return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
+
+       return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+}
+
 static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
                                      struct mlx5e_params *params,
                                      struct mlx5e_channel_param *cparam)
 {
-       u8 icosq_log_wq_sz;
+       u8 icosq_log_wq_sz, async_icosq_log_wq_sz;
 
        mlx5e_build_rq_param(priv, params, NULL, &cparam->rq);
 
        icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq);
+       async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(priv->netdev);
 
-       mlx5e_build_sq_param(priv, params, &cparam->sq);
+       mlx5e_build_sq_param(priv, params, &cparam->txq_sq);
        mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
        mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
-       mlx5e_build_rx_cq_param(priv, params, NULL, &cparam->rx_cq);
-       mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq);
-       mlx5e_build_ico_cq_param(priv, icosq_log_wq_sz, &cparam->icosq_cq);
+       mlx5e_build_icosq_param(priv, async_icosq_log_wq_sz, &cparam->async_icosq);
 }
 
 int mlx5e_open_channels(struct mlx5e_priv *priv,
@@ -3104,9 +3139,6 @@ int mlx5e_open(struct net_device *netdev)
                mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
        mutex_unlock(&priv->state_lock);
 
-       if (mlx5_vxlan_allowed(priv->mdev->vxlan))
-               udp_tunnel_get_rx_info(netdev);
-
        return err;
 }
 
@@ -3831,6 +3863,7 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
 #ifdef CONFIG_MLX5_EN_ARFS
        err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
 #endif
+       err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TLS_RX, mlx5e_ktls_set_feature_rx);
 
        if (err) {
                netdev->features = oper_features;
@@ -4332,8 +4365,6 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
 {
        struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
                                               tx_timeout_work);
-       bool report_failed = false;
-       int err;
        int i;
 
        rtnl_lock();
@@ -4351,18 +4382,10 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
                        continue;
 
                if (mlx5e_reporter_tx_timeout(sq))
-                       report_failed = true;
+               /* break if tried to reopened channels */
+                       break;
        }
 
-       if (!report_failed)
-               goto unlock;
-
-       err = mlx5e_safe_reopen_channels(priv);
-       if (err)
-               netdev_err(priv->netdev,
-                          "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n",
-                          err);
-
 unlock:
        mutex_unlock(&priv->state_lock);
        rtnl_unlock();
@@ -5061,6 +5084,9 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
        if (err)
                mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
        mlx5e_build_nic_netdev(netdev);
+       err = mlx5e_devlink_port_register(priv);
+       if (err)
+               mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err);
        mlx5e_health_create_reporters(priv);
 
        return 0;
@@ -5069,6 +5095,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
 static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
 {
        mlx5e_health_destroy_reporters(priv);
+       mlx5e_devlink_port_unregister(priv);
        mlx5e_tls_cleanup(priv);
        mlx5e_ipsec_cleanup(priv);
        mlx5e_netdev_cleanup(priv->netdev, priv);
@@ -5121,8 +5148,18 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
        if (err)
                goto err_destroy_flow_steering;
 
+       err = mlx5e_accel_init_rx(priv);
+       if (err)
+               goto err_tc_nic_cleanup;
+
+#ifdef CONFIG_MLX5_EN_ARFS
+       priv->netdev->rx_cpu_rmap =  mlx5_eq_table_get_rmap(priv->mdev);
+#endif
+
        return 0;
 
+err_tc_nic_cleanup:
+       mlx5e_tc_nic_cleanup(priv);
 err_destroy_flow_steering:
        mlx5e_destroy_flow_steering(priv);
 err_destroy_xsk_tirs:
@@ -5146,6 +5183,7 @@ err_destroy_q_counters:
 
 static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
 {
+       mlx5e_accel_cleanup_rx(priv);
        mlx5e_tc_nic_cleanup(priv);
        mlx5e_destroy_flow_steering(priv);
        mlx5e_destroy_direct_tirs(priv, priv->xsk_tir);
@@ -5202,6 +5240,8 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
        rtnl_lock();
        if (netif_running(netdev))
                mlx5e_open(netdev);
+       if (mlx5_vxlan_allowed(priv->mdev->vxlan))
+               udp_tunnel_get_rx_info(netdev);
        netif_device_attach(netdev);
        rtnl_unlock();
 }
@@ -5216,6 +5256,8 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
        rtnl_lock();
        if (netif_running(priv->netdev))
                mlx5e_close(priv->netdev);
+       if (mlx5_vxlan_allowed(priv->mdev->vxlan))
+               udp_tunnel_drop_rx_info(priv->netdev);
        netif_device_detach(priv->netdev);
        rtnl_unlock();
 
@@ -5288,10 +5330,6 @@ int mlx5e_netdev_init(struct net_device *netdev,
        /* netdev init */
        netif_carrier_off(netdev);
 
-#ifdef CONFIG_MLX5_EN_ARFS
-       netdev->rx_cpu_rmap =  mlx5_eq_table_get_rmap(mdev);
-#endif
-
        return 0;
 
 err_free_cpumask:
@@ -5494,16 +5532,10 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
                goto err_destroy_netdev;
        }
 
-       err = mlx5e_devlink_port_register(priv);
-       if (err) {
-               mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err);
-               goto err_detach;
-       }
-
        err = register_netdev(netdev);
        if (err) {
                mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
-               goto err_devlink_port_unregister;
+               goto err_detach;
        }
 
        mlx5e_devlink_port_type_eth_set(priv);
@@ -5511,8 +5543,6 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
        mlx5e_dcbnl_init_app(priv);
        return priv;
 
-err_devlink_port_unregister:
-       mlx5e_devlink_port_unregister(priv);
 err_detach:
        mlx5e_detach(mdev, priv);
 err_destroy_netdev:
@@ -5533,7 +5563,6 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
        priv = vpriv;
        mlx5e_dcbnl_delete_app(priv);
        unregister_netdev(priv->netdev);
-       mlx5e_devlink_port_unregister(priv);
        mlx5e_detach(mdev, vpriv);
        mlx5e_destroy_netdev(priv);
 }
index 006807e..c300729 100644 (file)
@@ -35,7 +35,6 @@
 #include <net/switchdev.h>
 #include <net/pkt_cls.h>
 #include <net/act_api.h>
-#include <net/arp.h>
 #include <net/devlink.h>
 #include <net/ipv6_stubs.h>
 
@@ -1181,17 +1180,12 @@ is_devlink_port_supported(const struct mlx5_core_dev *dev,
               mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport);
 }
 
-static unsigned int
-vport_to_devlink_port_index(const struct mlx5_core_dev *dev, u16 vport_num)
-{
-       return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num;
-}
-
 static int register_devlink_port(struct mlx5_core_dev *dev,
                                 struct mlx5e_rep_priv *rpriv)
 {
        struct devlink *devlink = priv_to_devlink(dev);
        struct mlx5_eswitch_rep *rep = rpriv->rep;
+       struct devlink_port_attrs attrs = {};
        struct netdev_phys_item_id ppid = {};
        unsigned int dl_port_index = 0;
        u16 pfnum;
@@ -1200,23 +1194,24 @@ static int register_devlink_port(struct mlx5_core_dev *dev,
                return 0;
 
        mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
-       dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
+       dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, rep->vport);
        pfnum = PCI_FUNC(dev->pdev->devfn);
-
-       if (rep->vport == MLX5_VPORT_UPLINK)
-               devlink_port_attrs_set(&rpriv->dl_port,
-                                      DEVLINK_PORT_FLAVOUR_PHYSICAL,
-                                      pfnum, false, 0,
-                                      &ppid.id[0], ppid.id_len);
-       else if (rep->vport == MLX5_VPORT_PF)
-               devlink_port_attrs_pci_pf_set(&rpriv->dl_port,
-                                             &ppid.id[0], ppid.id_len,
-                                             pfnum);
-       else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport))
+       if (rep->vport == MLX5_VPORT_UPLINK) {
+               attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+               attrs.phys.port_number = pfnum;
+               memcpy(attrs.switch_id.id, &ppid.id[0], ppid.id_len);
+               attrs.switch_id.id_len = ppid.id_len;
+               devlink_port_attrs_set(&rpriv->dl_port, &attrs);
+       } else if (rep->vport == MLX5_VPORT_PF) {
+               memcpy(rpriv->dl_port.attrs.switch_id.id, &ppid.id[0], ppid.id_len);
+               rpriv->dl_port.attrs.switch_id.id_len = ppid.id_len;
+               devlink_port_attrs_pci_pf_set(&rpriv->dl_port, pfnum);
+       } else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport)) {
+               memcpy(rpriv->dl_port.attrs.switch_id.id, &ppid.id[0], ppid.id_len);
+               rpriv->dl_port.attrs.switch_id.id_len = ppid.id_len;
                devlink_port_attrs_pci_vf_set(&rpriv->dl_port,
-                                             &ppid.id[0], ppid.id_len,
                                              pfnum, rep->vport - 1);
-
+       }
        return devlink_port_register(devlink, &rpriv->dl_port, dl_port_index);
 }
 
index dbb1c63..74860f3 100644 (file)
@@ -578,6 +578,33 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
        return !!err;
 }
 
+void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
+{
+       u16 sqcc;
+
+       sqcc = sq->cc;
+
+       while (sqcc != sq->pc) {
+               struct mlx5e_icosq_wqe_info *wi;
+               u16 ci;
+
+               ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
+               wi = &sq->db.wqe_info[ci];
+               sqcc += wi->num_wqebbs;
+#ifdef CONFIG_MLX5_EN_TLS
+               switch (wi->wqe_type) {
+               case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
+                       mlx5e_ktls_handle_ctx_completion(wi);
+                       break;
+               case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
+                       mlx5e_ktls_handle_get_psv_completion(wi, sq);
+                       break;
+               }
+#endif
+       }
+       sq->cc = sqcc;
+}
+
 int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
 {
        struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
@@ -633,6 +660,16 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
                                break;
                        case MLX5E_ICOSQ_WQE_NOP:
                                break;
+#ifdef CONFIG_MLX5_EN_TLS
+                       case MLX5E_ICOSQ_WQE_UMR_TLS:
+                               break;
+                       case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
+                               mlx5e_ktls_handle_ctx_completion(wi);
+                               break;
+                       case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
+                               mlx5e_ktls_handle_get_psv_completion(wi, sq);
+                               break;
+#endif
                        default:
                                netdev_WARN_ONCE(cq->channel->netdev,
                                                 "Bad WQE type in ICOSQ WQE info: 0x%x\n",
@@ -936,9 +973,14 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
                goto csum_unnecessary;
 
        if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
-               if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
+               u8 ipproto = get_ip_proto(skb, network_depth, proto);
+
+               if (unlikely(ipproto == IPPROTO_SCTP))
                        goto csum_unnecessary;
 
+               if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
+                       goto csum_none;
+
                stats->csum_complete++;
                skb->ip_summed = CHECKSUM_COMPLETE;
                skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
@@ -982,9 +1024,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
 
        skb->mac_len = ETH_HLEN;
 
-#ifdef CONFIG_MLX5_EN_TLS
-       mlx5e_tls_handle_rx_skb(netdev, skb, &cqe_bcnt);
-#endif
+       mlx5e_tls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
+
+       if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
+               mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe);
 
        if (lro_num_seg > 1) {
                mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
@@ -1152,8 +1195,10 @@ static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
        struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
 
        if (cqe_syndrome_needs_recover(err_cqe->syndrome) &&
-           !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state))
+           !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) {
+               mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe);
                queue_work(rq->channel->priv->wq, &rq->recover_work);
+       }
 }
 
 void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
@@ -1221,7 +1266,10 @@ void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
                goto free_wqe;
        }
 
-       skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
+       skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
+                             mlx5e_skb_from_cqe_linear,
+                             mlx5e_skb_from_cqe_nonlinear,
+                             rq, cqe, wi, cqe_bcnt);
        if (!skb) {
                /* probably for XDP */
                if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
index f009fe0..e3b2f59 100644 (file)
@@ -163,6 +163,19 @@ static const struct counter_desc sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_congst_umr) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_arfs_err) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_recover) },
+#ifdef CONFIG_MLX5_EN_TLS
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_packets) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_decrypted_bytes) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_ctx) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_del) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_pkt) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_start) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_end) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_req_skip) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_ok) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_resync_res_skip) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_tls_err) },
+#endif
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_events) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_poll) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, ch_arm) },
@@ -275,6 +288,19 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
                s->rx_congst_umr  += rq_stats->congst_umr;
                s->rx_arfs_err    += rq_stats->arfs_err;
                s->rx_recover     += rq_stats->recover;
+#ifdef CONFIG_MLX5_EN_TLS
+               s->rx_tls_decrypted_packets += rq_stats->tls_decrypted_packets;
+               s->rx_tls_decrypted_bytes   += rq_stats->tls_decrypted_bytes;
+               s->rx_tls_ctx               += rq_stats->tls_ctx;
+               s->rx_tls_del               += rq_stats->tls_del;
+               s->rx_tls_resync_req_pkt    += rq_stats->tls_resync_req_pkt;
+               s->rx_tls_resync_req_start  += rq_stats->tls_resync_req_start;
+               s->rx_tls_resync_req_end    += rq_stats->tls_resync_req_end;
+               s->rx_tls_resync_req_skip   += rq_stats->tls_resync_req_skip;
+               s->rx_tls_resync_res_ok     += rq_stats->tls_resync_res_ok;
+               s->rx_tls_resync_res_skip   += rq_stats->tls_resync_res_skip;
+               s->rx_tls_err               += rq_stats->tls_err;
+#endif
                s->ch_events      += ch_stats->events;
                s->ch_poll        += ch_stats->poll;
                s->ch_arm         += ch_stats->arm;
@@ -1475,6 +1501,19 @@ static const struct counter_desc rq_stats_desc[] = {
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, congst_umr) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, arfs_err) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, recover) },
+#ifdef CONFIG_MLX5_EN_TLS
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_packets) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_decrypted_bytes) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_ctx) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_del) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_pkt) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_start) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_end) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_req_skip) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_ok) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_resync_res_skip) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, tls_err) },
+#endif
 };
 
 static const struct counter_desc sq_stats_desc[] = {
index 2b83ba9..2e1cca1 100644 (file)
@@ -186,6 +186,18 @@ struct mlx5e_sw_stats {
        u64 tx_tls_skip_no_sync_data;
        u64 tx_tls_drop_no_sync_data;
        u64 tx_tls_drop_bypass_req;
+
+       u64 rx_tls_decrypted_packets;
+       u64 rx_tls_decrypted_bytes;
+       u64 rx_tls_ctx;
+       u64 rx_tls_del;
+       u64 rx_tls_resync_req_pkt;
+       u64 rx_tls_resync_req_start;
+       u64 rx_tls_resync_req_end;
+       u64 rx_tls_resync_req_skip;
+       u64 rx_tls_resync_res_ok;
+       u64 rx_tls_resync_res_skip;
+       u64 rx_tls_err;
 #endif
 
        u64 rx_xsk_packets;
@@ -305,6 +317,19 @@ struct mlx5e_rq_stats {
        u64 congst_umr;
        u64 arfs_err;
        u64 recover;
+#ifdef CONFIG_MLX5_EN_TLS
+       u64 tls_decrypted_packets;
+       u64 tls_decrypted_bytes;
+       u64 tls_ctx;
+       u64 tls_del;
+       u64 tls_resync_req_pkt;
+       u64 tls_resync_req_start;
+       u64 tls_resync_req_end;
+       u64 tls_resync_req_skip;
+       u64 tls_resync_res_ok;
+       u64 tls_resync_res_skip;
+       u64 tls_err;
+#endif
 };
 
 struct mlx5e_sq_stats {
index 7fc84f5..7a0c22d 100644 (file)
@@ -63,6 +63,7 @@
 #include "en/tc_tun.h"
 #include "en/mapping.h"
 #include "en/tc_ct.h"
+#include "en/mod_hdr.h"
 #include "lib/devcom.h"
 #include "lib/geneve.h"
 #include "diag/en_tc_tracepoint.h"
@@ -140,8 +141,7 @@ struct mlx5e_tc_flow {
         */
        struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
        struct mlx5e_tc_flow    *peer_flow;
-       struct mlx5e_mod_hdr_entry *mh; /* attached mod header instance */
-       struct list_head        mod_hdr; /* flows sharing the same mod hdr ID */
+       struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */
        struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
        struct list_head        hairpin; /* flows sharing the same hairpin */
        struct list_head        peer;    /* flows with peer flow */
@@ -180,17 +180,17 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
        },
        [TUNNEL_TO_REG] = {
                .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
-               .moffset = 3,
-               .mlen = 1,
+               .moffset = 1,
+               .mlen = 3,
                .soffset = MLX5_BYTE_OFF(fte_match_param,
                                         misc_parameters_2.metadata_reg_c_1),
        },
        [ZONE_TO_REG] = zone_to_reg_ct,
+       [ZONE_RESTORE_TO_REG] = zone_restore_to_reg_ct,
        [CTSTATE_TO_REG] = ctstate_to_reg_ct,
        [MARK_TO_REG] = mark_to_reg_ct,
        [LABELS_TO_REG] = labels_to_reg_ct,
        [FTEID_TO_REG] = fteid_to_reg_ct,
-       [TUPLEID_TO_REG] = tupleid_to_reg_ct,
 };
 
 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
@@ -219,6 +219,28 @@ mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
        spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
 }
 
+void
+mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
+                               enum mlx5e_tc_attr_to_reg type,
+                               u32 *data,
+                               u32 *mask)
+{
+       int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
+       int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
+       void *headers_c = spec->match_criteria;
+       void *headers_v = spec->match_value;
+       void *fmask, *fval;
+
+       fmask = headers_c + soffset;
+       fval = headers_v + soffset;
+
+       memcpy(mask, fmask, match_len);
+       memcpy(data, fval, match_len);
+
+       *mask = be32_to_cpu((__force __be32)(*mask << (32 - (match_len * 8))));
+       *data = be32_to_cpu((__force __be32)(*data << (32 - (match_len * 8))));
+}
+
 int
 mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
                          struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
@@ -287,29 +309,6 @@ struct mlx5e_hairpin_entry {
        struct completion res_ready;
 };
 
-struct mod_hdr_key {
-       int num_actions;
-       void *actions;
-};
-
-struct mlx5e_mod_hdr_entry {
-       /* a node of a hash table which keeps all the mod_hdr entries */
-       struct hlist_node mod_hdr_hlist;
-
-       /* protects flows list */
-       spinlock_t flows_lock;
-       /* flows sharing the same mod_hdr entry */
-       struct list_head flows;
-
-       struct mod_hdr_key key;
-
-       struct mlx5_modify_hdr *modify_hdr;
-
-       refcount_t refcnt;
-       struct completion res_ready;
-       int compl_result;
-};
-
 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
                              struct mlx5e_tc_flow *flow);
 
@@ -386,148 +385,43 @@ static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
        return flow_flag_test(flow, OFFLOADED);
 }
 
-static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
-{
-       return jhash(key->actions,
-                    key->num_actions * MLX5_MH_ACT_SZ, 0);
-}
-
-static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
-                                  struct mod_hdr_key *b)
+static int get_flow_name_space(struct mlx5e_tc_flow *flow)
 {
-       if (a->num_actions != b->num_actions)
-               return 1;
-
-       return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
+       return mlx5e_is_eswitch_flow(flow) ?
+               MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
 }
 
 static struct mod_hdr_tbl *
-get_mod_hdr_table(struct mlx5e_priv *priv, int namespace)
+get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
 
-       return namespace == MLX5_FLOW_NAMESPACE_FDB ? &esw->offloads.mod_hdr :
+       return get_flow_name_space(flow) == MLX5_FLOW_NAMESPACE_FDB ?
+               &esw->offloads.mod_hdr :
                &priv->fs.tc.mod_hdr;
 }
 
-static struct mlx5e_mod_hdr_entry *
-mlx5e_mod_hdr_get(struct mod_hdr_tbl *tbl, struct mod_hdr_key *key, u32 hash_key)
-{
-       struct mlx5e_mod_hdr_entry *mh, *found = NULL;
-
-       hash_for_each_possible(tbl->hlist, mh, mod_hdr_hlist, hash_key) {
-               if (!cmp_mod_hdr_info(&mh->key, key)) {
-                       refcount_inc(&mh->refcnt);
-                       found = mh;
-                       break;
-               }
-       }
-
-       return found;
-}
-
-static void mlx5e_mod_hdr_put(struct mlx5e_priv *priv,
-                             struct mlx5e_mod_hdr_entry *mh,
-                             int namespace)
-{
-       struct mod_hdr_tbl *tbl = get_mod_hdr_table(priv, namespace);
-
-       if (!refcount_dec_and_mutex_lock(&mh->refcnt, &tbl->lock))
-               return;
-       hash_del(&mh->mod_hdr_hlist);
-       mutex_unlock(&tbl->lock);
-
-       WARN_ON(!list_empty(&mh->flows));
-       if (mh->compl_result > 0)
-               mlx5_modify_header_dealloc(priv->mdev, mh->modify_hdr);
-
-       kfree(mh);
-}
-
-static int get_flow_name_space(struct mlx5e_tc_flow *flow)
-{
-       return mlx5e_is_eswitch_flow(flow) ?
-               MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
-}
 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
                                struct mlx5e_tc_flow *flow,
                                struct mlx5e_tc_flow_parse_attr *parse_attr)
 {
-       int num_actions, actions_size, namespace, err;
-       struct mlx5e_mod_hdr_entry *mh;
-       struct mod_hdr_tbl *tbl;
-       struct mod_hdr_key key;
-       u32 hash_key;
-
-       num_actions  = parse_attr->mod_hdr_acts.num_actions;
-       actions_size = MLX5_MH_ACT_SZ * num_actions;
-
-       key.actions = parse_attr->mod_hdr_acts.actions;
-       key.num_actions = num_actions;
-
-       hash_key = hash_mod_hdr_info(&key);
-
-       namespace = get_flow_name_space(flow);
-       tbl = get_mod_hdr_table(priv, namespace);
-
-       mutex_lock(&tbl->lock);
-       mh = mlx5e_mod_hdr_get(tbl, &key, hash_key);
-       if (mh) {
-               mutex_unlock(&tbl->lock);
-               wait_for_completion(&mh->res_ready);
-
-               if (mh->compl_result < 0) {
-                       err = -EREMOTEIO;
-                       goto attach_header_err;
-               }
-               goto attach_flow;
-       }
-
-       mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
-       if (!mh) {
-               mutex_unlock(&tbl->lock);
-               return -ENOMEM;
-       }
-
-       mh->key.actions = (void *)mh + sizeof(*mh);
-       memcpy(mh->key.actions, key.actions, actions_size);
-       mh->key.num_actions = num_actions;
-       spin_lock_init(&mh->flows_lock);
-       INIT_LIST_HEAD(&mh->flows);
-       refcount_set(&mh->refcnt, 1);
-       init_completion(&mh->res_ready);
-
-       hash_add(tbl->hlist, &mh->mod_hdr_hlist, hash_key);
-       mutex_unlock(&tbl->lock);
+       struct mlx5_modify_hdr *modify_hdr;
+       struct mlx5e_mod_hdr_handle *mh;
 
-       mh->modify_hdr = mlx5_modify_header_alloc(priv->mdev, namespace,
-                                                 mh->key.num_actions,
-                                                 mh->key.actions);
-       if (IS_ERR(mh->modify_hdr)) {
-               err = PTR_ERR(mh->modify_hdr);
-               mh->compl_result = err;
-               goto alloc_header_err;
-       }
-       mh->compl_result = 1;
-       complete_all(&mh->res_ready);
+       mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
+                                 get_flow_name_space(flow),
+                                 &parse_attr->mod_hdr_acts);
+       if (IS_ERR(mh))
+               return PTR_ERR(mh);
 
-attach_flow:
-       flow->mh = mh;
-       spin_lock(&mh->flows_lock);
-       list_add(&flow->mod_hdr, &mh->flows);
-       spin_unlock(&mh->flows_lock);
+       modify_hdr = mlx5e_mod_hdr_get(mh);
        if (mlx5e_is_eswitch_flow(flow))
-               flow->esw_attr->modify_hdr = mh->modify_hdr;
+               flow->esw_attr->modify_hdr = modify_hdr;
        else
-               flow->nic_attr->modify_hdr = mh->modify_hdr;
+               flow->nic_attr->modify_hdr = modify_hdr;
+       flow->mh = mh;
 
        return 0;
-
-alloc_header_err:
-       complete_all(&mh->res_ready);
-attach_header_err:
-       mlx5e_mod_hdr_put(priv, mh, namespace);
-       return err;
 }
 
 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
@@ -537,11 +431,8 @@ static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
        if (!flow->mh)
                return;
 
-       spin_lock(&flow->mh->flows_lock);
-       list_del(&flow->mod_hdr);
-       spin_unlock(&flow->mh->flows_lock);
-
-       mlx5e_mod_hdr_put(priv, flow->mh, get_flow_name_space(flow));
+       mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow),
+                            flow->mh);
        flow->mh = NULL;
 }
 
@@ -3086,6 +2977,7 @@ struct ipv6_hoplimit_word {
 
 static int is_action_keys_supported(const struct flow_action_entry *act,
                                    bool ct_flow, bool *modify_ip_header,
+                                   bool *modify_tuple,
                                    struct netlink_ext_ack *extack)
 {
        u32 mask, offset;
@@ -3108,7 +3000,10 @@ static int is_action_keys_supported(const struct flow_action_entry *act,
                        *modify_ip_header = true;
                }
 
-               if (ct_flow && offset >= offsetof(struct iphdr, saddr)) {
+               if (offset >= offsetof(struct iphdr, saddr))
+                       *modify_tuple = true;
+
+               if (ct_flow && *modify_tuple) {
                        NL_SET_ERR_MSG_MOD(extack,
                                           "can't offload re-write of ipv4 address with action ct");
                        return -EOPNOTSUPP;
@@ -3123,28 +3018,36 @@ static int is_action_keys_supported(const struct flow_action_entry *act,
                        *modify_ip_header = true;
                }
 
-               if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr)) {
+               if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr))
+                       *modify_tuple = true;
+
+               if (ct_flow && *modify_tuple) {
                        NL_SET_ERR_MSG_MOD(extack,
                                           "can't offload re-write of ipv6 address with action ct");
                        return -EOPNOTSUPP;
                }
-       } else if (ct_flow && (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
-                              htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP)) {
-               NL_SET_ERR_MSG_MOD(extack,
-                                  "can't offload re-write of transport header ports with action ct");
-               return -EOPNOTSUPP;
+       } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
+                  htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) {
+               *modify_tuple = true;
+               if (ct_flow) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "can't offload re-write of transport header ports with action ct");
+                       return -EOPNOTSUPP;
+               }
        }
 
        return 0;
 }
 
-static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
+static bool modify_header_match_supported(struct mlx5e_priv *priv,
+                                         struct mlx5_flow_spec *spec,
                                          struct flow_action *flow_action,
                                          u32 actions, bool ct_flow,
+                                         bool ct_clear,
                                          struct netlink_ext_ack *extack)
 {
        const struct flow_action_entry *act;
-       bool modify_ip_header;
+       bool modify_ip_header, modify_tuple;
        void *headers_c;
        void *headers_v;
        u16 ethertype;
@@ -3161,23 +3064,39 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
                goto out_ok;
 
        modify_ip_header = false;
+       modify_tuple = false;
        flow_action_for_each(i, act, flow_action) {
                if (act->id != FLOW_ACTION_MANGLE &&
                    act->id != FLOW_ACTION_ADD)
                        continue;
 
                err = is_action_keys_supported(act, ct_flow,
-                                              &modify_ip_header, extack);
+                                              &modify_ip_header,
+                                              &modify_tuple, extack);
                if (err)
                        return err;
        }
 
+       /* Add ct_state=-trk match so it will be offloaded for non ct flows
+        * (or after clear action), as otherwise, since the tuple is changed,
+        *  we can't restore ct state
+        */
+       if (!ct_clear && modify_tuple &&
+           mlx5_tc_ct_add_no_trk_match(priv, spec)) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "can't offload tuple modify header with ct matches");
+               netdev_info(priv->netdev,
+                           "can't offload tuple modify header with ct matches");
+               return false;
+       }
+
        ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
        if (modify_ip_header && ip_proto != IPPROTO_TCP &&
            ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
                NL_SET_ERR_MSG_MOD(extack,
                                   "can't offload re-write of non TCP/UDP");
-               pr_info("can't offload re-write of ip proto %d\n", ip_proto);
+               netdev_info(priv->netdev, "can't offload re-write of ip proto %d\n",
+                           ip_proto);
                return false;
        }
 
@@ -3191,13 +3110,14 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
                                    struct mlx5e_tc_flow *flow,
                                    struct netlink_ext_ack *extack)
 {
-       bool ct_flow;
+       bool ct_flow = false, ct_clear = false;
        u32 actions;
 
-       ct_flow = flow_flag_test(flow, CT);
        if (mlx5e_is_eswitch_flow(flow)) {
                actions = flow->esw_attr->action;
-
+               ct_clear = flow->esw_attr->ct_attr.ct_action &
+                          TCA_CT_ACT_CLEAR;
+               ct_flow = flow_flag_test(flow, CT) && !ct_clear;
                if (flow->esw_attr->split_count && ct_flow) {
                        /* All registers used by ct are cleared when using
                         * split rules.
@@ -3211,9 +3131,10 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
        }
 
        if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
-               return modify_header_match_supported(&parse_attr->spec,
+               return modify_header_match_supported(priv, &parse_attr->spec,
                                                     flow_action, actions,
-                                                    ct_flow, extack);
+                                                    ct_flow, ct_clear,
+                                                    extack);
 
        return true;
 }
@@ -4408,7 +4329,6 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
        flow->priv = priv;
        for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
                INIT_LIST_HEAD(&flow->encaps[out_index].list);
-       INIT_LIST_HEAD(&flow->mod_hdr);
        INIT_LIST_HEAD(&flow->hairpin);
        INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
        refcount_set(&flow->refcnt, 1);
@@ -4480,11 +4400,13 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
        if (err)
                goto err_free;
 
-       err = parse_tc_fdb_actions(priv, &rule->action, flow, extack, filter_dev);
+       /* actions validation depends on parsing the ct matches first */
+       err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f,
+                                    &flow->esw_attr->ct_attr, extack);
        if (err)
                goto err_free;
 
-       err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f, extack);
+       err = parse_tc_fdb_actions(priv, &rule->action, flow, extack, filter_dev);
        if (err)
                goto err_free;
 
@@ -4670,9 +4592,10 @@ static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
                                           struct mlx5e_rep_priv *rpriv)
 {
        /* Offloaded flow rule is allowed to duplicate on non-uplink representor
-        * sharing tc block with other slaves of a lag device.
+        * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this
+        * function is called from NIC mode.
         */
-       return netif_is_lag_port(dev) && rpriv->rep->vport != MLX5_VPORT_UPLINK;
+       return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
 }
 
 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
@@ -4686,13 +4609,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
 
        rcu_read_lock();
        flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
-       rcu_read_unlock();
        if (flow) {
                /* Same flow rule offloaded to non-uplink representor sharing tc block,
                 * just return 0.
                 */
                if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
-                       goto out;
+                       goto rcu_unlock;
 
                NL_SET_ERR_MSG_MOD(extack,
                                   "flow cookie already exists, ignoring");
@@ -4700,8 +4622,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
                                 "flow cookie %lx already exists, ignoring\n",
                                 f->cookie);
                err = -EEXIST;
-               goto out;
+               goto rcu_unlock;
        }
+rcu_unlock:
+       rcu_read_unlock();
+       if (flow)
+               goto out;
 
        trace_mlx5e_configure_flower(f);
        err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
@@ -4828,7 +4754,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
 no_peer_counter:
        mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
 out:
-       flow_stats_update(&f->stats, bytes, packets, lastuse,
+       flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
                          FLOW_ACTION_HW_STATS_DELAYED);
        trace_mlx5e_stats_flower(f);
 errout:
@@ -4946,7 +4872,7 @@ void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
        dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
        dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
        rpriv->prev_vf_vport_stats = cur_stats;
-       flow_stats_update(&ma->stats, dbytes, dpkts, jiffies,
+       flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
                          FLOW_ACTION_HW_STATS_DELAYED);
 }
 
@@ -5011,9 +4937,8 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
        struct mlx5e_tc_table *tc = &priv->fs.tc;
        int err;
 
+       mlx5e_mod_hdr_tbl_init(&tc->mod_hdr);
        mutex_init(&tc->t_lock);
-       mutex_init(&tc->mod_hdr.lock);
-       hash_init(tc->mod_hdr.hlist);
        mutex_init(&tc->hairpin_tbl_lock);
        hash_init(tc->hairpin_tbl);
 
@@ -5051,7 +4976,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
                                                      &tc->netdevice_nb,
                                                      &tc->netdevice_nn);
 
-       mutex_destroy(&tc->mod_hdr.lock);
+       mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr);
        mutex_destroy(&tc->hairpin_tbl_lock);
 
        rhashtable_destroy(&tc->ht);
index 5c330b0..437f680 100644 (file)
 
 #ifdef CONFIG_MLX5_ESWITCH
 
+int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
+
+struct mlx5e_tc_update_priv {
+       struct net_device *tun_dev;
+};
+
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+
 struct tunnel_match_key {
        struct flow_dissector_key_control enc_control;
        struct flow_dissector_key_keyid enc_key_id;
@@ -62,9 +70,9 @@ struct tunnel_match_enc_opts {
  * Upper TUNNEL_INFO_BITS for general tunnel info.
  * Lower ENC_OPTS_BITS bits for enc_opts.
  */
-#define TUNNEL_INFO_BITS 6
+#define TUNNEL_INFO_BITS 12
 #define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0)
-#define ENC_OPTS_BITS 2
+#define ENC_OPTS_BITS 12
 #define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0)
 #define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS)
 #define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0)
@@ -114,8 +122,6 @@ void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_l
 struct mlx5e_neigh_hash_entry;
 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
 
-int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
-
 void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
 
 enum mlx5e_tc_attr_to_reg {
@@ -123,10 +129,10 @@ enum mlx5e_tc_attr_to_reg {
        TUNNEL_TO_REG,
        CTSTATE_TO_REG,
        ZONE_TO_REG,
+       ZONE_RESTORE_TO_REG,
        MARK_TO_REG,
        LABELS_TO_REG,
        FTEID_TO_REG,
-       TUPLEID_TO_REG,
 };
 
 struct mlx5e_tc_attr_to_reg_mapping {
@@ -142,16 +148,6 @@ extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[];
 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
                                    struct net_device *out_dev);
 
-struct mlx5e_tc_update_priv {
-       struct net_device *tun_dev;
-};
-
-struct mlx5e_tc_mod_hdr_acts {
-       int num_actions;
-       int max_actions;
-       void *actions;
-};
-
 int mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
                              struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
                              enum mlx5e_tc_attr_to_reg type,
@@ -162,6 +158,11 @@ void mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
                                 u32 data,
                                 u32 mask);
 
+void mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
+                                    enum mlx5e_tc_attr_to_reg type,
+                                    u32 *data,
+                                    u32 *mask);
+
 int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
                          int namespace,
                          struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
@@ -174,8 +175,6 @@ void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
                            struct flow_match_basic *match, bool outer,
                            void *headers_c, void *headers_v);
 
-#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
-
 int mlx5e_tc_nic_init(struct mlx5e_priv *priv);
 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
 
index 6d40606..da596de 100644 (file)
@@ -38,7 +38,6 @@
 #include "en/txrx.h"
 #include "ipoib/ipoib.h"
 #include "en_accel/en_accel.h"
-#include "en_accel/ktls.h"
 #include "lib/clock.h"
 
 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
index 8480278..e3dbab2 100644 (file)
@@ -149,17 +149,17 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
        }
 
        mlx5e_poll_ico_cq(&c->icosq.cq);
+       if (mlx5e_poll_ico_cq(&c->async_icosq.cq))
+               /* Don't clear the flag if nothing was polled to prevent
+                * queueing more WQEs and overflowing the async ICOSQ.
+                */
+               clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state);
 
        busy |= INDIRECT_CALL_2(rq->post_wqes,
                                mlx5e_post_rx_mpwqes,
                                mlx5e_post_rx_wqes,
                                rq);
        if (xsk_open) {
-               if (mlx5e_poll_ico_cq(&c->xskicosq.cq))
-                       /* Don't clear the flag if nothing was polled to prevent
-                        * queueing more WQEs and overflowing XSKICOSQ.
-                        */
-                       clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state);
                busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq);
                busy_xsk |= mlx5e_napi_xsk_post(xsksq, xskrq);
        }
@@ -189,11 +189,11 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
 
        mlx5e_cq_arm(&rq->cq);
        mlx5e_cq_arm(&c->icosq.cq);
+       mlx5e_cq_arm(&c->async_icosq.cq);
        mlx5e_cq_arm(&c->xdpsq.cq);
 
        if (xsk_open) {
                mlx5e_handle_rx_dim(xskrq);
-               mlx5e_cq_arm(&c->xskicosq.cq);
                mlx5e_cq_arm(&xsksq->cq);
                mlx5e_cq_arm(&xskrq->cq);
        }
index 5dc335e..b68976b 100644 (file)
@@ -217,7 +217,6 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
        }
 
        /* Create ingress allow rule */
-       memset(spec, 0, sizeof(*spec));
        spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
        vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
index 1116ab9..e8f900e 100644 (file)
@@ -42,6 +42,7 @@
 #include "fs_core.h"
 #include "devlink.h"
 #include "ecpf.h"
+#include "en/mod_hdr.h"
 
 enum {
        MLX5_ACTION_NONE = 0,
@@ -63,6 +64,29 @@ struct vport_addr {
 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
 static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
 
+static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
+{
+       if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+               return -EOPNOTSUPP;
+
+       if (!MLX5_ESWITCH_MANAGER(dev))
+               return -EOPNOTSUPP;
+
+       return 0;
+}
+
+struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink)
+{
+       struct mlx5_core_dev *dev = devlink_priv(devlink);
+       int err;
+
+       err = mlx5_eswitch_check(dev);
+       if (err)
+               return ERR_PTR(err);
+
+       return dev->priv.eswitch;
+}
+
 struct mlx5_vport *__must_check
 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
 {
@@ -1127,7 +1151,7 @@ int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
                                                  MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW);
 }
 
-static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
+static void node_guid_gen_from_mac(u64 *node_guid, const u8 *mac)
 {
        ((u8 *)node_guid)[7] = mac[0];
        ((u8 *)node_guid)[6] = mac[1];
@@ -1628,7 +1652,17 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
                return 0;
 
        mutex_lock(&esw->mode_lock);
-       ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
+       if (esw->mode == MLX5_ESWITCH_NONE) {
+               ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
+       } else {
+               enum mlx5_eswitch_vport_event vport_events;
+
+               vport_events = (esw->mode == MLX5_ESWITCH_LEGACY) ?
+                                       MLX5_LEGACY_SRIOV_VPORT_EVENTS : MLX5_VPORT_UC_ADDR_CHANGE;
+               ret = mlx5_eswitch_load_vf_vports(esw, num_vfs, vport_events);
+               if (!ret)
+                       esw->esw_funcs.num_vfs = num_vfs;
+       }
        mutex_unlock(&esw->mode_lock);
        return ret;
 }
@@ -1675,6 +1709,7 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
 
        mutex_lock(&esw->mode_lock);
        mlx5_eswitch_disable_locked(esw, clear_vf);
+       esw->esw_funcs.num_vfs = 0;
        mutex_unlock(&esw->mode_lock);
 }
 
@@ -1725,10 +1760,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
 
        mutex_init(&esw->offloads.encap_tbl_lock);
        hash_init(esw->offloads.encap_tbl);
-       mutex_init(&esw->offloads.mod_hdr.lock);
-       hash_init(esw->offloads.mod_hdr.hlist);
        mutex_init(&esw->offloads.decap_tbl_lock);
        hash_init(esw->offloads.decap_tbl);
+       mlx5e_mod_hdr_tbl_init(&esw->offloads.mod_hdr);
        atomic64_set(&esw->offloads.num_flows, 0);
        ida_init(&esw->offloads.vport_metadata_ida);
        mutex_init(&esw->state_lock);
@@ -1770,7 +1804,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
        mutex_destroy(&esw->mode_lock);
        mutex_destroy(&esw->state_lock);
        ida_destroy(&esw->offloads.vport_metadata_ida);
-       mutex_destroy(&esw->offloads.mod_hdr.lock);
+       mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr);
        mutex_destroy(&esw->offloads.encap_tbl_lock);
        mutex_destroy(&esw->offloads.decap_tbl_lock);
        kfree(esw->vports);
@@ -1778,46 +1812,135 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
 }
 
 /* Vport Administration */
-int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
-                              u16 vport, u8 mac[ETH_ALEN])
+static int
+mlx5_esw_set_vport_mac_locked(struct mlx5_eswitch *esw,
+                             struct mlx5_vport *evport, const u8 *mac)
 {
-       struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
+       u16 vport_num = evport->vport;
        u64 node_guid;
        int err = 0;
 
-       if (IS_ERR(evport))
-               return PTR_ERR(evport);
        if (is_multicast_ether_addr(mac))
                return -EINVAL;
 
-       mutex_lock(&esw->state_lock);
-
        if (evport->info.spoofchk && !is_valid_ether_addr(mac))
                mlx5_core_warn(esw->dev,
                               "Set invalid MAC while spoofchk is on, vport(%d)\n",
-                              vport);
+                              vport_num);
 
-       err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
+       err = mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, mac);
        if (err) {
                mlx5_core_warn(esw->dev,
                               "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
-                              vport, err);
-               goto unlock;
+                              vport_num, err);
+               return err;
        }
 
        node_guid_gen_from_mac(&node_guid, mac);
-       err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
+       err = mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, node_guid);
        if (err)
                mlx5_core_warn(esw->dev,
                               "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
-                              vport, err);
+                              vport_num, err);
 
        ether_addr_copy(evport->info.mac, mac);
        evport->info.node_guid = node_guid;
        if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
                err = esw_acl_ingress_lgcy_setup(esw, evport);
 
-unlock:
+       return err;
+}
+
+int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
+                              u16 vport, const u8 *mac)
+{
+       struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
+       int err = 0;
+
+       if (IS_ERR(evport))
+               return PTR_ERR(evport);
+
+       mutex_lock(&esw->state_lock);
+       err = mlx5_esw_set_vport_mac_locked(esw, evport, mac);
+       mutex_unlock(&esw->state_lock);
+       return err;
+}
+
+static bool
+is_port_function_supported(const struct mlx5_eswitch *esw, u16 vport_num)
+{
+       return vport_num == MLX5_VPORT_PF ||
+              mlx5_eswitch_is_vf_vport(esw, vport_num);
+}
+
+int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink,
+                                          struct devlink_port *port,
+                                          u8 *hw_addr, int *hw_addr_len,
+                                          struct netlink_ext_ack *extack)
+{
+       struct mlx5_eswitch *esw;
+       struct mlx5_vport *vport;
+       int err = -EOPNOTSUPP;
+       u16 vport_num;
+
+       esw = mlx5_devlink_eswitch_get(devlink);
+       if (IS_ERR(esw))
+               return PTR_ERR(esw);
+
+       vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
+       if (!is_port_function_supported(esw, vport_num))
+               return -EOPNOTSUPP;
+
+       vport = mlx5_eswitch_get_vport(esw, vport_num);
+       if (IS_ERR(vport)) {
+               NL_SET_ERR_MSG_MOD(extack, "Invalid port");
+               return PTR_ERR(vport);
+       }
+
+       mutex_lock(&esw->state_lock);
+       if (vport->enabled) {
+               ether_addr_copy(hw_addr, vport->info.mac);
+               *hw_addr_len = ETH_ALEN;
+               err = 0;
+       } else {
+               NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
+       }
+       mutex_unlock(&esw->state_lock);
+       return err;
+}
+
+int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink,
+                                          struct devlink_port *port,
+                                          const u8 *hw_addr, int hw_addr_len,
+                                          struct netlink_ext_ack *extack)
+{
+       struct mlx5_eswitch *esw;
+       struct mlx5_vport *vport;
+       int err = -EOPNOTSUPP;
+       u16 vport_num;
+
+       esw = mlx5_devlink_eswitch_get(devlink);
+       if (IS_ERR(esw)) {
+               NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr");
+               return PTR_ERR(esw);
+       }
+
+       vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
+       if (!is_port_function_supported(esw, vport_num)) {
+               NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr");
+               return -EINVAL;
+       }
+       vport = mlx5_eswitch_get_vport(esw, vport_num);
+       if (IS_ERR(vport)) {
+               NL_SET_ERR_MSG_MOD(extack, "Invalid port");
+               return PTR_ERR(vport);
+       }
+
+       mutex_lock(&esw->state_lock);
+       if (vport->enabled)
+               err = mlx5_esw_set_vport_mac_locked(esw, vport, hw_addr);
+       else
+               NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
        mutex_unlock(&esw->state_lock);
        return err;
 }
index a5175e9..b68e02a 100644 (file)
 #include "lib/mpfs.h"
 #include "en/tc_ct.h"
 
-#define FDB_TC_MAX_CHAIN 3
-#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1)
-#define FDB_TC_SLOW_PATH_CHAIN (FDB_FT_CHAIN + 1)
-
-/* The index of the last real chain (FT) + 1 as chain zero is valid as well */
-#define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1)
-
-#define FDB_TC_MAX_PRIO 16
-#define FDB_TC_LEVELS_PER_PRIO 2
-
 #ifdef CONFIG_MLX5_ESWITCH
 
 #define ESW_OFFLOADS_DEFAULT_NUM_GROUPS 15
@@ -311,7 +301,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf);
 void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf);
 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
-                              u16 vport, u8 mac[ETH_ALEN]);
+                              u16 vport, const u8 *mac);
 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
                                 u16 vport, int link_state);
 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
@@ -450,6 +440,15 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
                                        struct netlink_ext_ack *extack);
 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
                                        enum devlink_eswitch_encap_mode *encap);
+int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink,
+                                          struct devlink_port *port,
+                                          u8 *hw_addr, int *hw_addr_len,
+                                          struct netlink_ext_ack *extack);
+int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink,
+                                          struct devlink_port *port,
+                                          const u8 *hw_addr, int hw_addr_len,
+                                          struct netlink_ext_ack *extack);
+
 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
 
 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
@@ -514,16 +513,9 @@ static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
                MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
 }
 
-static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev)
+static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev)
 {
-       /* Ideally device should have the functions changed supported
-        * capability regardless of it being ECPF or PF wherever such
-        * event should be processed such as on eswitch manager device.
-        * However, some ECPF based device might not have this capability
-        * set. Hence OR for ECPF check to cover such device.
-        */
-       return MLX5_CAP_ESW(dev, esw_functions_changed) ||
-              mlx5_core_is_ecpf_esw_manager(dev);
+       return mlx5_core_is_ecpf_esw_manager(dev);
 }
 
 static inline int mlx5_eswitch_uplink_idx(struct mlx5_eswitch *esw)
@@ -565,6 +557,19 @@ static inline u16 mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw,
        return index;
 }
 
+static inline unsigned int
+mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
+                                    u16 vport_num)
+{
+       return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num;
+}
+
+static inline u16
+mlx5_esw_devlink_port_index_to_vport_num(unsigned int dl_port_index)
+{
+       return dl_port_index & 0xffff;
+}
+
 /* TODO: This mlx5e_tc function shouldn't be called by eswitch */
 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
 
@@ -634,6 +639,7 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
        for ((vport) = (nvfs);                                          \
             (vport) >= (esw)->first_host_vport; (vport)--)
 
+struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink);
 struct mlx5_vport *__must_check
 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
 
index 060354b..db856d7 100644 (file)
@@ -1578,13 +1578,6 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
 {
        int err, err1;
 
-       if (esw->mode != MLX5_ESWITCH_LEGACY &&
-           !mlx5_core_is_ecpf_esw_manager(esw->dev)) {
-               NL_SET_ERR_MSG_MOD(extack,
-                                  "Can't set offloads mode, SRIOV legacy not enabled");
-               return -EINVAL;
-       }
-
        mlx5_eswitch_disable_locked(esw, false);
        err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
                                         esw->dev->priv.sriov.num_vfs);
@@ -2279,17 +2272,6 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
        return 0;
 }
 
-static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
-{
-       if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
-               return -EOPNOTSUPP;
-
-       if(!MLX5_ESWITCH_MANAGER(dev))
-               return -EPERM;
-
-       return 0;
-}
-
 static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw)
 {
        /* devlink commands in NONE eswitch mode are currently supported only
@@ -2302,25 +2284,19 @@ static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw)
 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
                                  struct netlink_ext_ack *extack)
 {
-       struct mlx5_core_dev *dev = devlink_priv(devlink);
-       struct mlx5_eswitch *esw = dev->priv.eswitch;
        u16 cur_mlx5_mode, mlx5_mode = 0;
-       int err;
+       struct mlx5_eswitch *esw;
+       int err = 0;
 
-       err = mlx5_eswitch_check(dev);
-       if (err)
-               return err;
+       esw = mlx5_devlink_eswitch_get(devlink);
+       if (IS_ERR(esw))
+               return PTR_ERR(esw);
 
        if (esw_mode_from_devlink(mode, &mlx5_mode))
                return -EINVAL;
 
        mutex_lock(&esw->mode_lock);
-       err = eswitch_devlink_esw_mode_check(esw);
-       if (err)
-               goto unlock;
-
        cur_mlx5_mode = esw->mode;
-
        if (cur_mlx5_mode == mlx5_mode)
                goto unlock;
 
@@ -2338,16 +2314,15 @@ unlock:
 
 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
 {
-       struct mlx5_core_dev *dev = devlink_priv(devlink);
-       struct mlx5_eswitch *esw = dev->priv.eswitch;
+       struct mlx5_eswitch *esw;
        int err;
 
-       err = mlx5_eswitch_check(dev);
-       if (err)
-               return err;
+       esw = mlx5_devlink_eswitch_get(devlink);
+       if (IS_ERR(esw))
+               return PTR_ERR(esw);
 
        mutex_lock(&esw->mode_lock);
-       err = eswitch_devlink_esw_mode_check(dev->priv.eswitch);
+       err = eswitch_devlink_esw_mode_check(esw);
        if (err)
                goto unlock;
 
@@ -2361,13 +2336,13 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
                                         struct netlink_ext_ack *extack)
 {
        struct mlx5_core_dev *dev = devlink_priv(devlink);
-       struct mlx5_eswitch *esw = dev->priv.eswitch;
        int err, vport, num_vport;
+       struct mlx5_eswitch *esw;
        u8 mlx5_mode;
 
-       err = mlx5_eswitch_check(dev);
-       if (err)
-               return err;
+       esw = mlx5_devlink_eswitch_get(devlink);
+       if (IS_ERR(esw))
+               return PTR_ERR(esw);
 
        mutex_lock(&esw->mode_lock);
        err = eswitch_devlink_esw_mode_check(esw);
@@ -2424,13 +2399,12 @@ out:
 
 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
 {
-       struct mlx5_core_dev *dev = devlink_priv(devlink);
-       struct mlx5_eswitch *esw = dev->priv.eswitch;
+       struct mlx5_eswitch *esw;
        int err;
 
-       err = mlx5_eswitch_check(dev);
-       if (err)
-               return err;
+       esw = mlx5_devlink_eswitch_get(devlink);
+       if (IS_ERR(esw))
+               return PTR_ERR(esw);
 
        mutex_lock(&esw->mode_lock);
        err = eswitch_devlink_esw_mode_check(esw);
@@ -2448,12 +2422,12 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
                                        struct netlink_ext_ack *extack)
 {
        struct mlx5_core_dev *dev = devlink_priv(devlink);
-       struct mlx5_eswitch *esw = dev->priv.eswitch;
+       struct mlx5_eswitch *esw;
        int err;
 
-       err = mlx5_eswitch_check(dev);
-       if (err)
-               return err;
+       esw = mlx5_devlink_eswitch_get(devlink);
+       if (IS_ERR(esw))
+               return PTR_ERR(esw);
 
        mutex_lock(&esw->mode_lock);
        err = eswitch_devlink_esw_mode_check(esw);
@@ -2508,13 +2482,13 @@ unlock:
 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
                                        enum devlink_eswitch_encap_mode *encap)
 {
-       struct mlx5_core_dev *dev = devlink_priv(devlink);
-       struct mlx5_eswitch *esw = dev->priv.eswitch;
+       struct mlx5_eswitch *esw;
        int err;
 
-       err = mlx5_eswitch_check(dev);
-       if (err)
-               return err;
+       esw = mlx5_devlink_eswitch_get(devlink);
+       if (IS_ERR(esw))
+               return PTR_ERR(esw);
+
 
        mutex_lock(&esw->mode_lock);
        err = eswitch_devlink_esw_mode_check(esw);
index b463787..cc67366 100644 (file)
@@ -359,7 +359,7 @@ u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
        return ret;
 }
 
-unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
+static unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
 {
        struct mlx5_fpga_device *fdev = mdev->fpga;
 
@@ -370,8 +370,8 @@ unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
                        number_of_ipsec_counters);
 }
 
-int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
-                                 unsigned int counters_count)
+static int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
+                                        unsigned int counters_count)
 {
        struct mlx5_fpga_device *fdev = mdev->fpga;
        unsigned int i;
@@ -665,12 +665,10 @@ static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev,
        return true;
 }
 
-void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
-                                   struct mlx5_accel_esp_xfrm *accel_xfrm,
-                                   const __be32 saddr[4],
-                                   const __be32 daddr[4],
-                                   const __be32 spi, bool is_ipv6,
-                                   u32 *sa_handle)
+static void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
+                                          struct mlx5_accel_esp_xfrm *accel_xfrm,
+                                          const __be32 saddr[4], const __be32 daddr[4],
+                                          const __be32 spi, bool is_ipv6, u32 *sa_handle)
 {
        struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
        struct mlx5_fpga_esp_xfrm *fpga_xfrm =
@@ -862,7 +860,7 @@ mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
        mutex_unlock(&fipsec->sa_hash_lock);
 }
 
-void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
+static void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
 {
        struct mlx5_fpga_esp_xfrm *fpga_xfrm =
                        ((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm;
@@ -1264,7 +1262,7 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flo
        }
 }
 
-int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
+static int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
 {
        struct mlx5_fpga_conn_attr init_attr = {0};
        struct mlx5_fpga_device *fdev = mdev->fpga;
@@ -1346,7 +1344,7 @@ static void destroy_rules_rb(struct rb_root *root)
        }
 }
 
-void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
+static void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
 {
        struct mlx5_fpga_device *fdev = mdev->fpga;
 
@@ -1451,7 +1449,7 @@ mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
        return 0;
 }
 
-struct mlx5_accel_esp_xfrm *
+static struct mlx5_accel_esp_xfrm *
 mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
                          const struct mlx5_accel_esp_xfrm_attrs *attrs,
                          u32 flags)
@@ -1479,7 +1477,7 @@ mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
        return &fpga_xfrm->accel_xfrm;
 }
 
-void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
+static void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
 {
        struct mlx5_fpga_esp_xfrm *fpga_xfrm =
                        container_of(xfrm, struct mlx5_fpga_esp_xfrm,
@@ -1488,8 +1486,8 @@ void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
        kfree(fpga_xfrm);
 }
 
-int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
-                             const struct mlx5_accel_esp_xfrm_attrs *attrs)
+static int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
+                                    const struct mlx5_accel_esp_xfrm_attrs *attrs)
 {
        struct mlx5_core_dev *mdev = xfrm->mdev;
        struct mlx5_fpga_device *fdev = mdev->fpga;
@@ -1560,3 +1558,24 @@ change_sw_xfrm_attrs:
        mutex_unlock(&fpga_xfrm->lock);
        return err;
 }
+
+static const struct mlx5_accel_ipsec_ops fpga_ipsec_ops = {
+       .device_caps = mlx5_fpga_ipsec_device_caps,
+       .counters_count = mlx5_fpga_ipsec_counters_count,
+       .counters_read = mlx5_fpga_ipsec_counters_read,
+       .create_hw_context = mlx5_fpga_ipsec_create_sa_ctx,
+       .free_hw_context = mlx5_fpga_ipsec_delete_sa_ctx,
+       .init = mlx5_fpga_ipsec_init,
+       .cleanup = mlx5_fpga_ipsec_cleanup,
+       .esp_create_xfrm = mlx5_fpga_esp_create_xfrm,
+       .esp_modify_xfrm = mlx5_fpga_esp_modify_xfrm,
+       .esp_destroy_xfrm = mlx5_fpga_esp_destroy_xfrm,
+};
+
+const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
+{
+       if (!mlx5_fpga_is_ipsec_device(mdev))
+               return NULL;
+
+       return &fpga_ipsec_ops;
+}
index 9ba637f..db88eb4 100644 (file)
 #include "fs_cmd.h"
 
 #ifdef CONFIG_MLX5_FPGA_IPSEC
+const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev);
 u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
-unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev);
-int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
-                                 unsigned int counters_count);
-
-void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
-                                   struct mlx5_accel_esp_xfrm *accel_xfrm,
-                                   const __be32 saddr[4],
-                                   const __be32 daddr[4],
-                                   const __be32 spi, bool is_ipv6,
-                                   u32 *sa_handle);
-void mlx5_fpga_ipsec_delete_sa_ctx(void *context);
-
-int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev);
-void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev);
-void mlx5_fpga_ipsec_build_fs_cmds(void);
-
-struct mlx5_accel_esp_xfrm *
-mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
-                         const struct mlx5_accel_esp_xfrm_attrs *attrs,
-                         u32 flags);
-void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm);
-int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
-                             const struct mlx5_accel_esp_xfrm_attrs *attrs);
-
 const struct mlx5_flow_cmds *
 mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type);
+void mlx5_fpga_ipsec_build_fs_cmds(void);
 #else
-static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
-{
-       return 0;
-}
-
+static inline
+const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
+{ return NULL; }
+static inline u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev) { return 0; }
 static inline const struct mlx5_flow_cmds *
 mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
 {
        return mlx5_fs_cmd_get_default(type);
 }
 
+static inline void mlx5_fpga_ipsec_build_fs_cmds(void) {};
+
 #endif /* CONFIG_MLX5_FPGA_IPSEC */
 #endif /* __MLX5_FPGA_IPSEC_H__ */
index 465a107..fee1697 100644 (file)
@@ -459,6 +459,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
                MLX5_SET(flow_context, in_flow_context, modify_header_id,
                         fte->action.modify_hdr->id);
 
+       MLX5_SET(flow_context, in_flow_context, ipsec_obj_id, fte->action.ipsec_obj_id);
+
        vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
 
        MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
index 13e2fb7..6904ad9 100644 (file)
@@ -41,7 +41,6 @@
 #include "diag/fs_tracepoint.h"
 #include "accel/ipsec.h"
 #include "fpga/ipsec.h"
-#include "eswitch.h"
 
 #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
                                         sizeof(struct init_tree_node))
 #define ETHTOOL_PRIO_NUM_LEVELS 1
 #define ETHTOOL_NUM_PRIOS 11
 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
-/* Vlan, mac, ttc, inner ttc, aRFS */
-#define KERNEL_NIC_PRIO_NUM_LEVELS 5
+/* Vlan, mac, ttc, inner ttc, {aRFS/accel and esp/esp_err} */
+#define KERNEL_NIC_PRIO_NUM_LEVELS 6
 #define KERNEL_NIC_NUM_PRIOS 1
 /* One more level for tc */
 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
index 825b662..afe7f0b 100644 (file)
 #include <linux/llist.h>
 #include <steering/fs_dr.h>
 
+#define FDB_TC_MAX_CHAIN 3
+#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1)
+#define FDB_TC_SLOW_PATH_CHAIN (FDB_FT_CHAIN + 1)
+
+/* The index of the last real chain (FT) + 1 as chain zero is valid as well */
+#define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1)
+
+#define FDB_TC_MAX_PRIO 16
+#define FDB_TC_LEVELS_PER_PRIO 2
+
 struct mlx5_modify_hdr {
        enum mlx5_flow_namespace_type ns_type;
        union {
index a5fbe73..02558ac 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/module.h>
 #include "mlx5_core.h"
 #include "../../mlxfw/mlxfw.h"
+#include "accel/tls.h"
 
 enum {
        MCQS_IDENTIFIER_BOOT_IMG        = 0x1,
@@ -236,7 +237,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
                        return err;
        }
 
-       if (MLX5_CAP_GEN(dev, tls_tx)) {
+       if (mlx5_accel_is_ktls_tx(dev) || mlx5_accel_is_ktls_rx(dev)) {
                err = mlx5_core_get_caps(dev, MLX5_CAP_TLS);
                if (err)
                        return err;
@@ -249,6 +250,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
                        return err;
        }
 
+       if (MLX5_CAP_GEN(dev, ipsec_offload)) {
+               err = mlx5_core_get_caps(dev, MLX5_CAP_IPSEC);
+               if (err)
+                       return err;
+       }
+
        return 0;
 }
 
index dcea87e..57eb91b 100644 (file)
@@ -6,7 +6,7 @@
 
 int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
                               void *key, u32 sz_bytes,
-                              u32 *p_key_id)
+                              u32 key_type, u32 *p_key_id)
 {
        u32 in[MLX5_ST_SZ_DW(create_encryption_key_in)] = {};
        u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
@@ -41,8 +41,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
        memcpy(key_p, key, sz_bytes);
 
        MLX5_SET(encryption_key_obj, obj, key_size, general_obj_key_size);
-       MLX5_SET(encryption_key_obj, obj, key_type,
-                MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS);
+       MLX5_SET(encryption_key_obj, obj, key_type, key_type);
        MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
                 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
        MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
index 2495392..d046db7 100644 (file)
@@ -80,8 +80,14 @@ void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats)
 int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data);
 
 /* Crypto */
+enum {
+       MLX5_ACCEL_OBJ_TLS_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS,
+       MLX5_ACCEL_OBJ_IPSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC,
+};
+
 int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
-                              void *key, u32 sz_bytes, u32 *p_key_id);
+                              void *key, u32 sz_bytes,
+                              u32 key_type, u32 *p_key_id);
 void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
 
 static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
index 82c766a..be34330 100644 (file)
@@ -40,7 +40,6 @@
 
 struct mlx5_vxlan {
        struct mlx5_core_dev            *mdev;
-       spinlock_t                      lock; /* protect vxlan table */
        /* max_num_ports is usuallly 4, 16 buckets is more than enough */
        DECLARE_HASHTABLE(htable, 4);
        int                             num_ports;
@@ -78,45 +77,47 @@ static int mlx5_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
        return mlx5_cmd_exec_in(mdev, delete_vxlan_udp_dport, in);
 }
 
-static struct mlx5_vxlan_port*
-mlx5_vxlan_lookup_port_locked(struct mlx5_vxlan *vxlan, u16 port)
+bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port)
 {
        struct mlx5_vxlan_port *vxlanp;
+       bool found = false;
 
-       hash_for_each_possible(vxlan->htable, vxlanp, hlist, port) {
-               if (vxlanp->udp_port == port)
-                       return vxlanp;
-       }
+       if (!mlx5_vxlan_allowed(vxlan))
+               return NULL;
 
-       return NULL;
+       rcu_read_lock();
+       hash_for_each_possible_rcu(vxlan->htable, vxlanp, hlist, port)
+               if (vxlanp->udp_port == port) {
+                       found = true;
+                       break;
+               }
+       rcu_read_unlock();
+
+       return found;
 }
 
-struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port)
+static struct mlx5_vxlan_port *vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port)
 {
        struct mlx5_vxlan_port *vxlanp;
 
-       if (!mlx5_vxlan_allowed(vxlan))
-               return NULL;
-
-       spin_lock_bh(&vxlan->lock);
-       vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port);
-       spin_unlock_bh(&vxlan->lock);
-
-       return vxlanp;
+       hash_for_each_possible(vxlan->htable, vxlanp, hlist, port)
+               if (vxlanp->udp_port == port)
+                       return vxlanp;
+       return NULL;
 }
 
 int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
 {
        struct mlx5_vxlan_port *vxlanp;
-       int ret = -ENOSPC;
+       int ret = 0;
 
-       vxlanp = mlx5_vxlan_lookup_port(vxlan, port);
+       mutex_lock(&vxlan->sync_lock);
+       vxlanp = vxlan_lookup_port(vxlan, port);
        if (vxlanp) {
                refcount_inc(&vxlanp->refcount);
-               return 0;
+               goto unlock;
        }
 
-       mutex_lock(&vxlan->sync_lock);
        if (vxlan->num_ports >= mlx5_vxlan_max_udp_ports(vxlan->mdev)) {
                mlx5_core_info(vxlan->mdev,
                               "UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n",
@@ -138,9 +139,7 @@ int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
        vxlanp->udp_port = port;
        refcount_set(&vxlanp->refcount, 1);
 
-       spin_lock_bh(&vxlan->lock);
-       hash_add(vxlan->htable, &vxlanp->hlist, port);
-       spin_unlock_bh(&vxlan->lock);
+       hash_add_rcu(vxlan->htable, &vxlanp->hlist, port);
 
        vxlan->num_ports++;
        mutex_unlock(&vxlan->sync_lock);
@@ -157,34 +156,26 @@ unlock:
 int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port)
 {
        struct mlx5_vxlan_port *vxlanp;
-       bool remove = false;
        int ret = 0;
 
        mutex_lock(&vxlan->sync_lock);
 
-       spin_lock_bh(&vxlan->lock);
-       vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port);
+       vxlanp = vxlan_lookup_port(vxlan, port);
        if (!vxlanp) {
                ret = -ENOENT;
                goto out_unlock;
        }
 
        if (refcount_dec_and_test(&vxlanp->refcount)) {
-               hash_del(&vxlanp->hlist);
-               remove = true;
-       }
-
-out_unlock:
-       spin_unlock_bh(&vxlan->lock);
-
-       if (remove) {
+               hash_del_rcu(&vxlanp->hlist);
+               synchronize_rcu();
                mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
                kfree(vxlanp);
                vxlan->num_ports--;
        }
 
+out_unlock:
        mutex_unlock(&vxlan->sync_lock);
-
        return ret;
 }
 
@@ -201,7 +192,6 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
 
        vxlan->mdev = mdev;
        mutex_init(&vxlan->sync_lock);
-       spin_lock_init(&vxlan->lock);
        hash_init(vxlan->htable);
 
        /* Hardware adds 4789 (IANA_VXLAN_UDP_PORT) by default */
index 8fb0eb0..6d599f4 100644 (file)
@@ -50,15 +50,14 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev);
 void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan);
 int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port);
 int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port);
-struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port);
+bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port);
 #else
 static inline struct mlx5_vxlan*
 mlx5_vxlan_create(struct mlx5_core_dev *mdev) { return ERR_PTR(-EOPNOTSUPP); }
 static inline void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) { return; }
 static inline int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
 static inline int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
-static inline struct mx5_vxlan_port*
-mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) { return NULL; }
+static inline bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) { return false; }
 #endif
 
 #endif /* __MLX5_VXLAN_H__ */
index 8b65890..e32d46c 100644 (file)
@@ -1089,11 +1089,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
                goto err_fpga_start;
        }
 
-       err = mlx5_accel_ipsec_init(dev);
-       if (err) {
-               mlx5_core_err(dev, "IPSec device start failed %d\n", err);
-               goto err_ipsec_start;
-       }
+       mlx5_accel_ipsec_init(dev);
 
        err = mlx5_accel_tls_init(dev);
        if (err) {
@@ -1135,7 +1131,6 @@ err_fs:
        mlx5_accel_tls_cleanup(dev);
 err_tls_start:
        mlx5_accel_ipsec_cleanup(dev);
-err_ipsec_start:
        mlx5_fpga_device_stop(dev);
 err_fpga_start:
        mlx5_rsc_dump_cleanup(dev);
@@ -1628,7 +1623,7 @@ static int __init init(void)
        get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
 
        mlx5_core_verify_params();
-       mlx5_accel_ipsec_build_fs_cmds();
+       mlx5_fpga_ipsec_build_fs_cmds();
        mlx5_register_debugfs();
 
        err = pci_register_driver(&mlx5_core_driver);
index 9f829e6..e4186e8 100644 (file)
@@ -293,7 +293,40 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
        return 0;
 }
 
-static int mlx5_eeprom_page(int offset)
+static int mlx5_query_module_id(struct mlx5_core_dev *dev, int module_num,
+                               u8 *module_id)
+{
+       u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
+       u32 out[MLX5_ST_SZ_DW(mcia_reg)];
+       int err, status;
+       u8 *ptr;
+
+       MLX5_SET(mcia_reg, in, i2c_device_address, MLX5_I2C_ADDR_LOW);
+       MLX5_SET(mcia_reg, in, module, module_num);
+       MLX5_SET(mcia_reg, in, device_address, 0);
+       MLX5_SET(mcia_reg, in, page_number, 0);
+       MLX5_SET(mcia_reg, in, size, 1);
+       MLX5_SET(mcia_reg, in, l, 0);
+
+       err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+                                  sizeof(out), MLX5_REG_MCIA, 0, 0);
+       if (err)
+               return err;
+
+       status = MLX5_GET(mcia_reg, out, status);
+       if (status) {
+               mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
+                             status);
+               return -EIO;
+       }
+       ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
+
+       *module_id = ptr[0];
+
+       return 0;
+}
+
+static int mlx5_qsfp_eeprom_page(u16 offset)
 {
        if (offset < MLX5_EEPROM_PAGE_LENGTH)
                /* Addresses between 0-255 - page 00 */
@@ -307,7 +340,7 @@ static int mlx5_eeprom_page(int offset)
                    MLX5_EEPROM_HIGH_PAGE_LENGTH);
 }
 
-static int mlx5_eeprom_high_page_offset(int page_num)
+static int mlx5_qsfp_eeprom_high_page_offset(int page_num)
 {
        if (!page_num) /* Page 0 always start from low page */
                return 0;
@@ -316,35 +349,62 @@ static int mlx5_eeprom_high_page_offset(int page_num)
        return page_num * MLX5_EEPROM_HIGH_PAGE_LENGTH;
 }
 
+static void mlx5_qsfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset)
+{
+       *i2c_addr = MLX5_I2C_ADDR_LOW;
+       *page_num = mlx5_qsfp_eeprom_page(*offset);
+       *offset -=  mlx5_qsfp_eeprom_high_page_offset(*page_num);
+}
+
+static void mlx5_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset)
+{
+       *i2c_addr = MLX5_I2C_ADDR_LOW;
+       *page_num = 0;
+
+       if (*offset < MLX5_EEPROM_PAGE_LENGTH)
+               return;
+
+       *i2c_addr = MLX5_I2C_ADDR_HIGH;
+       *offset -= MLX5_EEPROM_PAGE_LENGTH;
+}
+
 int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
                             u16 offset, u16 size, u8 *data)
 {
-       int module_num, page_num, status, err;
+       int module_num, status, err, page_num = 0;
+       u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
        u32 out[MLX5_ST_SZ_DW(mcia_reg)];
-       u32 in[MLX5_ST_SZ_DW(mcia_reg)];
-       u16 i2c_addr;
-       void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
+       u16 i2c_addr = 0;
+       u8 module_id;
+       void *ptr;
 
        err = mlx5_query_module_num(dev, &module_num);
        if (err)
                return err;
 
-       memset(in, 0, sizeof(in));
-       size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
-
-       /* Get the page number related to the given offset */
-       page_num = mlx5_eeprom_page(offset);
+       err = mlx5_query_module_id(dev, module_num, &module_id);
+       if (err)
+               return err;
 
-       /* Set the right offset according to the page number,
-        * For page_num > 0, relative offset is always >= 128 (high page).
-        */
-       offset -= mlx5_eeprom_high_page_offset(page_num);
+       switch (module_id) {
+       case MLX5_MODULE_ID_SFP:
+               mlx5_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+               break;
+       case MLX5_MODULE_ID_QSFP:
+       case MLX5_MODULE_ID_QSFP_PLUS:
+       case MLX5_MODULE_ID_QSFP28:
+               mlx5_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+               break;
+       default:
+               mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
+               return -EINVAL;
+       }
 
        if (offset + size > MLX5_EEPROM_PAGE_LENGTH)
                /* Cross pages read, read until offset 256 in low page */
                size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
 
-       i2c_addr = MLX5_I2C_ADDR_LOW;
+       size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
 
        MLX5_SET(mcia_reg, in, l, 0);
        MLX5_SET(mcia_reg, in, module, module_num);
@@ -365,6 +425,7 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
                return -EIO;
        }
 
+       ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
        memcpy(data, ptr, size);
 
        return size;
index c107d92..88cdb9b 100644 (file)
@@ -173,7 +173,7 @@ int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
 EXPORT_SYMBOL_GPL(mlx5_query_mac_address);
 
 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
-                                     u16 vport, u8 *addr)
+                                     u16 vport, const u8 *addr)
 {
        void *in;
        int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
index 4cadc33..e5c4dcd 100644 (file)
@@ -172,6 +172,11 @@ static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
        return !equal && !smaller;
 }
 
+static inline u16 mlx5_wq_cyc_get_counter(struct mlx5_wq_cyc *wq)
+{
+       return wq->wqe_ctr;
+}
+
 static inline u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
 {
        return wq->fbc.sz_m1 + 1;
@@ -290,4 +295,14 @@ static inline void mlx5_wq_ll_update_db_record(struct mlx5_wq_ll *wq)
        *wq->db = cpu_to_be32(wq->wqe_ctr);
 }
 
+static inline u16 mlx5_wq_ll_get_head(struct mlx5_wq_ll *wq)
+{
+       return wq->head;
+}
+
+static inline u16 mlx5_wq_ll_get_counter(struct mlx5_wq_ll *wq)
+{
+       return wq->wqe_ctr;
+}
+
 #endif /* __MLX5_WQ_H__ */
index 4aeabb3..8927243 100644 (file)
@@ -30,7 +30,8 @@ mlxsw_spectrum-objs           := spectrum.o spectrum_buffers.o \
                                   spectrum_mr_tcam.o spectrum_mr.o \
                                   spectrum_qdisc.o spectrum_span.o \
                                   spectrum_nve.o spectrum_nve_vxlan.o \
-                                  spectrum_dpipe.o spectrum_trap.o
+                                  spectrum_dpipe.o spectrum_trap.o \
+                                  spectrum_ethtool.o spectrum_policer.o
 mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB)    += spectrum_dcb.o
 mlxsw_spectrum-$(CONFIG_PTP_1588_CLOCK)                += spectrum_ptp.o
 obj-$(CONFIG_MLXSW_MINIMAL)    += mlxsw_minimal.o
index e9ccd33..1363168 100644 (file)
@@ -1524,7 +1524,8 @@ static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
 {
        return (rxl_a->func == rxl_b->func &&
                rxl_a->local_port == rxl_b->local_port &&
-               rxl_a->trap_id == rxl_b->trap_id);
+               rxl_a->trap_id == rxl_b->trap_id &&
+               rxl_a->mirror_reason == rxl_b->mirror_reason);
 }
 
 static struct mlxsw_rx_listener_item *
@@ -2044,7 +2045,8 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
                rxl = &rxl_item->rxl;
                if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
                     rxl->local_port == local_port) &&
-                   rxl->trap_id == rx_info->trap_id) {
+                   rxl->trap_id == rx_info->trap_id &&
+                   rxl->mirror_reason == rx_info->mirror_reason) {
                        if (rxl_item->enabled)
                                found = true;
                        break;
@@ -2122,6 +2124,7 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
                                  enum devlink_port_flavour flavour,
                                  u32 port_number, bool split,
                                  u32 split_port_subnumber,
+                                 bool splittable, u32 lanes,
                                  const unsigned char *switch_id,
                                  unsigned char switch_id_len)
 {
@@ -2129,12 +2132,19 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
        struct mlxsw_core_port *mlxsw_core_port =
                                        &mlxsw_core->ports[local_port];
        struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
+       struct devlink_port_attrs attrs = {};
        int err;
 
+       attrs.split = split;
+       attrs.lanes = lanes;
+       attrs.splittable = splittable;
+       attrs.flavour = flavour;
+       attrs.phys.port_number = port_number;
+       attrs.phys.split_subport_number = split_port_subnumber;
+       memcpy(attrs.switch_id.id, switch_id, switch_id_len);
+       attrs.switch_id.id_len = switch_id_len;
        mlxsw_core_port->local_port = local_port;
-       devlink_port_attrs_set(devlink_port, flavour, port_number,
-                              split, split_port_subnumber,
-                              switch_id, switch_id_len);
+       devlink_port_attrs_set(devlink_port, &attrs);
        err = devlink_port_register(devlink, devlink_port, local_port);
        if (err)
                memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
@@ -2154,12 +2164,14 @@ static void __mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
                         u32 port_number, bool split,
                         u32 split_port_subnumber,
+                        bool splittable, u32 lanes,
                         const unsigned char *switch_id,
                         unsigned char switch_id_len)
 {
        return __mlxsw_core_port_init(mlxsw_core, local_port,
                                      DEVLINK_PORT_FLAVOUR_PHYSICAL,
                                      port_number, split, split_port_subnumber,
+                                     splittable, lanes,
                                      switch_id, switch_id_len);
 }
 EXPORT_SYMBOL(mlxsw_core_port_init);
@@ -2181,7 +2193,7 @@ int mlxsw_core_cpu_port_init(struct mlxsw_core *mlxsw_core,
 
        err = __mlxsw_core_port_init(mlxsw_core, MLXSW_PORT_CPU_PORT,
                                     DEVLINK_PORT_FLAVOUR_CPU,
-                                    0, false, 0,
+                                    0, false, 0, false, 0,
                                     switch_id, switch_id_len);
        if (err)
                return err;
index 22b0dfa..c1c1e03 100644 (file)
@@ -61,6 +61,7 @@ void mlxsw_core_ptp_transmitted(struct mlxsw_core *mlxsw_core,
 struct mlxsw_rx_listener {
        void (*func)(struct sk_buff *skb, u8 local_port, void *priv);
        u8 local_port;
+       u8 mirror_reason;
        u16 trap_id;
 };
 
@@ -176,6 +177,7 @@ struct mlxsw_rx_info {
                u16 lag_id;
        } u;
        u8 lag_port_index;
+       u8 mirror_reason;
        int trap_id;
 };
 
@@ -191,8 +193,8 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
 
 void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port);
 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
-                        u32 port_number, bool split,
-                        u32 split_port_subnumber,
+                        u32 port_number, bool split, u32 split_port_subnumber,
+                        bool splittable, u32 lanes,
                         const unsigned char *switch_id,
                         unsigned char switch_id_len);
 void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port);
index c3d0431..4d699fe 100644 (file)
@@ -67,7 +67,9 @@ struct mlxsw_afa {
        struct rhashtable set_ht;
        struct rhashtable fwd_entry_ht;
        struct rhashtable cookie_ht;
+       struct rhashtable policer_ht;
        struct idr cookie_idr;
+       struct list_head policer_list;
 };
 
 #define MLXSW_AFA_SET_LEN 0xA8
@@ -88,9 +90,11 @@ struct mlxsw_afa_set {
        struct rhash_head ht_node;
        struct mlxsw_afa_set_ht_key ht_key;
        u32 kvdl_index;
-       bool shared; /* Inserted in hashtable (doesn't mean that
+       u8 shared:1, /* Inserted in hashtable (doesn't mean that
                      * kvdl_index is valid).
                      */
+          has_trap:1,
+          has_police:1;
        unsigned int ref_count;
        struct mlxsw_afa_set *next; /* Pointer to the next set. */
        struct mlxsw_afa_set *prev; /* Pointer to the previous set,
@@ -175,6 +179,21 @@ static const struct rhashtable_params mlxsw_afa_cookie_ht_params = {
        .automatic_shrinking = true,
 };
 
+struct mlxsw_afa_policer {
+       struct rhash_head ht_node;
+       struct list_head list; /* Member of policer_list */
+       refcount_t ref_count;
+       u32 fa_index;
+       u16 policer_index;
+};
+
+static const struct rhashtable_params mlxsw_afa_policer_ht_params = {
+       .key_len = sizeof(u32),
+       .key_offset = offsetof(struct mlxsw_afa_policer, fa_index),
+       .head_offset = offsetof(struct mlxsw_afa_policer, ht_node),
+       .automatic_shrinking = true,
+};
+
 struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
                                   const struct mlxsw_afa_ops *ops,
                                   void *ops_priv)
@@ -196,12 +215,19 @@ struct mlxsw_afa *mlxsw_afa_create(unsigned int max_acts_per_set,
                              &mlxsw_afa_cookie_ht_params);
        if (err)
                goto err_cookie_rhashtable_init;
+       err = rhashtable_init(&mlxsw_afa->policer_ht,
+                             &mlxsw_afa_policer_ht_params);
+       if (err)
+               goto err_policer_rhashtable_init;
        idr_init(&mlxsw_afa->cookie_idr);
+       INIT_LIST_HEAD(&mlxsw_afa->policer_list);
        mlxsw_afa->max_acts_per_set = max_acts_per_set;
        mlxsw_afa->ops = ops;
        mlxsw_afa->ops_priv = ops_priv;
        return mlxsw_afa;
 
+err_policer_rhashtable_init:
+       rhashtable_destroy(&mlxsw_afa->cookie_ht);
 err_cookie_rhashtable_init:
        rhashtable_destroy(&mlxsw_afa->fwd_entry_ht);
 err_fwd_entry_rhashtable_init:
@@ -214,8 +240,10 @@ EXPORT_SYMBOL(mlxsw_afa_create);
 
 void mlxsw_afa_destroy(struct mlxsw_afa *mlxsw_afa)
 {
+       WARN_ON(!list_empty(&mlxsw_afa->policer_list));
        WARN_ON(!idr_is_empty(&mlxsw_afa->cookie_idr));
        idr_destroy(&mlxsw_afa->cookie_idr);
+       rhashtable_destroy(&mlxsw_afa->policer_ht);
        rhashtable_destroy(&mlxsw_afa->cookie_ht);
        rhashtable_destroy(&mlxsw_afa->fwd_entry_ht);
        rhashtable_destroy(&mlxsw_afa->set_ht);
@@ -836,19 +864,172 @@ err_cookie_get:
        return ERR_PTR(err);
 }
 
+static struct mlxsw_afa_policer *
+mlxsw_afa_policer_create(struct mlxsw_afa *mlxsw_afa, u32 fa_index,
+                        u64 rate_bytes_ps, u32 burst,
+                        struct netlink_ext_ack *extack)
+{
+       struct mlxsw_afa_policer *policer;
+       int err;
+
+       policer = kzalloc(sizeof(*policer), GFP_KERNEL);
+       if (!policer)
+               return ERR_PTR(-ENOMEM);
+
+       err = mlxsw_afa->ops->policer_add(mlxsw_afa->ops_priv, rate_bytes_ps,
+                                         burst, &policer->policer_index,
+                                         extack);
+       if (err)
+               goto err_policer_add;
+
+       refcount_set(&policer->ref_count, 1);
+       policer->fa_index = fa_index;
+
+       err = rhashtable_insert_fast(&mlxsw_afa->policer_ht, &policer->ht_node,
+                                    mlxsw_afa_policer_ht_params);
+       if (err)
+               goto err_rhashtable_insert;
+
+       list_add_tail(&policer->list, &mlxsw_afa->policer_list);
+
+       return policer;
+
+err_rhashtable_insert:
+       mlxsw_afa->ops->policer_del(mlxsw_afa->ops_priv,
+                                   policer->policer_index);
+err_policer_add:
+       kfree(policer);
+       return ERR_PTR(err);
+}
+
+static void mlxsw_afa_policer_destroy(struct mlxsw_afa *mlxsw_afa,
+                                     struct mlxsw_afa_policer *policer)
+{
+       list_del(&policer->list);
+       rhashtable_remove_fast(&mlxsw_afa->policer_ht, &policer->ht_node,
+                              mlxsw_afa_policer_ht_params);
+       mlxsw_afa->ops->policer_del(mlxsw_afa->ops_priv,
+                                   policer->policer_index);
+       kfree(policer);
+}
+
+static struct mlxsw_afa_policer *
+mlxsw_afa_policer_get(struct mlxsw_afa *mlxsw_afa, u32 fa_index,
+                     u64 rate_bytes_ps, u32 burst,
+                     struct netlink_ext_ack *extack)
+{
+       struct mlxsw_afa_policer *policer;
+
+       policer = rhashtable_lookup_fast(&mlxsw_afa->policer_ht, &fa_index,
+                                        mlxsw_afa_policer_ht_params);
+       if (policer) {
+               refcount_inc(&policer->ref_count);
+               return policer;
+       }
+
+       return mlxsw_afa_policer_create(mlxsw_afa, fa_index, rate_bytes_ps,
+                                       burst, extack);
+}
+
+static void mlxsw_afa_policer_put(struct mlxsw_afa *mlxsw_afa,
+                                 struct mlxsw_afa_policer *policer)
+{
+       if (!refcount_dec_and_test(&policer->ref_count))
+               return;
+       mlxsw_afa_policer_destroy(mlxsw_afa, policer);
+}
+
+struct mlxsw_afa_policer_ref {
+       struct mlxsw_afa_resource resource;
+       struct mlxsw_afa_policer *policer;
+};
+
+static void
+mlxsw_afa_policer_ref_destroy(struct mlxsw_afa_block *block,
+                             struct mlxsw_afa_policer_ref *policer_ref)
+{
+       mlxsw_afa_resource_del(&policer_ref->resource);
+       mlxsw_afa_policer_put(block->afa, policer_ref->policer);
+       kfree(policer_ref);
+}
+
+static void
+mlxsw_afa_policer_ref_destructor(struct mlxsw_afa_block *block,
+                                struct mlxsw_afa_resource *resource)
+{
+       struct mlxsw_afa_policer_ref *policer_ref;
+
+       policer_ref = container_of(resource, struct mlxsw_afa_policer_ref,
+                                  resource);
+       mlxsw_afa_policer_ref_destroy(block, policer_ref);
+}
+
+static struct mlxsw_afa_policer_ref *
+mlxsw_afa_policer_ref_create(struct mlxsw_afa_block *block, u32 fa_index,
+                            u64 rate_bytes_ps, u32 burst,
+                            struct netlink_ext_ack *extack)
+{
+       struct mlxsw_afa_policer_ref *policer_ref;
+       struct mlxsw_afa_policer *policer;
+       int err;
+
+       policer_ref = kzalloc(sizeof(*policer_ref), GFP_KERNEL);
+       if (!policer_ref)
+               return ERR_PTR(-ENOMEM);
+
+       policer = mlxsw_afa_policer_get(block->afa, fa_index, rate_bytes_ps,
+                                       burst, extack);
+       if (IS_ERR(policer)) {
+               err = PTR_ERR(policer);
+               goto err_policer_get;
+       }
+
+       policer_ref->policer = policer;
+       policer_ref->resource.destructor = mlxsw_afa_policer_ref_destructor;
+       mlxsw_afa_resource_add(block, &policer_ref->resource);
+
+       return policer_ref;
+
+err_policer_get:
+       kfree(policer_ref);
+       return ERR_PTR(err);
+}
+
 #define MLXSW_AFA_ONE_ACTION_LEN 32
 #define MLXSW_AFA_PAYLOAD_OFFSET 4
 
-static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
-                                          u8 action_code, u8 action_size)
+enum mlxsw_afa_action_type {
+       MLXSW_AFA_ACTION_TYPE_TRAP,
+       MLXSW_AFA_ACTION_TYPE_POLICE,
+       MLXSW_AFA_ACTION_TYPE_OTHER,
+};
+
+static bool
+mlxsw_afa_block_need_split(const struct mlxsw_afa_block *block,
+                          enum mlxsw_afa_action_type type)
+{
+       struct mlxsw_afa_set *cur_set = block->cur_set;
+
+       /* Due to a hardware limitation, police action cannot be in the same
+        * action set with MLXSW_AFA_TRAP_CODE or MLXSW_AFA_TRAPWU_CODE
+        * actions. Work around this limitation by creating a new action set
+        * and place the new action there.
+        */
+       return (cur_set->has_trap && type == MLXSW_AFA_ACTION_TYPE_POLICE) ||
+              (cur_set->has_police && type == MLXSW_AFA_ACTION_TYPE_TRAP);
+}
+
+static char *mlxsw_afa_block_append_action_ext(struct mlxsw_afa_block *block,
+                                              u8 action_code, u8 action_size,
+                                              enum mlxsw_afa_action_type type)
 {
        char *oneact;
        char *actions;
 
        if (block->finished)
                return ERR_PTR(-EINVAL);
-       if (block->cur_act_index + action_size >
-           block->afa->max_acts_per_set) {
+       if (block->cur_act_index + action_size > block->afa->max_acts_per_set ||
+           mlxsw_afa_block_need_split(block, type)) {
                struct mlxsw_afa_set *set;
 
                /* The appended action won't fit into the current action set,
@@ -863,6 +1044,17 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
                block->cur_set = set;
        }
 
+       switch (type) {
+       case MLXSW_AFA_ACTION_TYPE_TRAP:
+               block->cur_set->has_trap = true;
+               break;
+       case MLXSW_AFA_ACTION_TYPE_POLICE:
+               block->cur_set->has_police = true;
+               break;
+       default:
+               break;
+       }
+
        actions = block->cur_set->ht_key.enc_actions;
        oneact = actions + block->cur_act_index * MLXSW_AFA_ONE_ACTION_LEN;
        block->cur_act_index += action_size;
@@ -870,6 +1062,14 @@ static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
        return oneact + MLXSW_AFA_PAYLOAD_OFFSET;
 }
 
+static char *mlxsw_afa_block_append_action(struct mlxsw_afa_block *block,
+                                          u8 action_code, u8 action_size)
+{
+       return mlxsw_afa_block_append_action_ext(block, action_code,
+                                                action_size,
+                                                MLXSW_AFA_ACTION_TYPE_OTHER);
+}
+
 /* VLAN Action
  * -----------
  * VLAN action is used for manipulating VLANs. It can be used to implement QinQ,
@@ -1048,11 +1248,20 @@ mlxsw_afa_trap_mirror_pack(char *payload, bool mirror_enable,
        mlxsw_afa_trap_mirror_agent_set(payload, mirror_agent);
 }
 
+static char *mlxsw_afa_block_append_action_trap(struct mlxsw_afa_block *block,
+                                               u8 action_code, u8 action_size)
+{
+       return mlxsw_afa_block_append_action_ext(block, action_code,
+                                                action_size,
+                                                MLXSW_AFA_ACTION_TYPE_TRAP);
+}
+
 static int mlxsw_afa_block_append_drop_plain(struct mlxsw_afa_block *block,
                                             bool ingress)
 {
-       char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_TRAP_CODE,
-                                                 MLXSW_AFA_TRAP_SIZE);
+       char *act = mlxsw_afa_block_append_action_trap(block,
+                                                      MLXSW_AFA_TRAP_CODE,
+                                                      MLXSW_AFA_TRAP_SIZE);
 
        if (IS_ERR(act))
                return PTR_ERR(act);
@@ -1081,8 +1290,8 @@ mlxsw_afa_block_append_drop_with_cookie(struct mlxsw_afa_block *block,
        }
        cookie_index = cookie_ref->cookie->cookie_index;
 
-       act = mlxsw_afa_block_append_action(block, MLXSW_AFA_TRAPWU_CODE,
-                                           MLXSW_AFA_TRAPWU_SIZE);
+       act = mlxsw_afa_block_append_action_trap(block, MLXSW_AFA_TRAPWU_CODE,
+                                                MLXSW_AFA_TRAPWU_SIZE);
        if (IS_ERR(act)) {
                NL_SET_ERR_MSG_MOD(extack, "Cannot append drop with cookie action");
                err = PTR_ERR(act);
@@ -1113,8 +1322,9 @@ EXPORT_SYMBOL(mlxsw_afa_block_append_drop);
 
 int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id)
 {
-       char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_TRAP_CODE,
-                                                 MLXSW_AFA_TRAP_SIZE);
+       char *act = mlxsw_afa_block_append_action_trap(block,
+                                                      MLXSW_AFA_TRAP_CODE,
+                                                      MLXSW_AFA_TRAP_SIZE);
 
        if (IS_ERR(act))
                return PTR_ERR(act);
@@ -1127,8 +1337,9 @@ EXPORT_SYMBOL(mlxsw_afa_block_append_trap);
 int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
                                            u16 trap_id)
 {
-       char *act = mlxsw_afa_block_append_action(block, MLXSW_AFA_TRAP_CODE,
-                                                 MLXSW_AFA_TRAP_SIZE);
+       char *act = mlxsw_afa_block_append_action_trap(block,
+                                                      MLXSW_AFA_TRAP_CODE,
+                                                      MLXSW_AFA_TRAP_SIZE);
 
        if (IS_ERR(act))
                return PTR_ERR(act);
@@ -1199,9 +1410,10 @@ static int
 mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
                                        u8 mirror_agent)
 {
-       char *act = mlxsw_afa_block_append_action(block,
-                                                 MLXSW_AFA_TRAP_CODE,
-                                                 MLXSW_AFA_TRAP_SIZE);
+       char *act = mlxsw_afa_block_append_action_trap(block,
+                                                      MLXSW_AFA_TRAP_CODE,
+                                                      MLXSW_AFA_TRAP_SIZE);
+
        if (IS_ERR(act))
                return PTR_ERR(act);
        mlxsw_afa_trap_pack(act, MLXSW_AFA_TRAP_TRAP_ACTION_NOP,
@@ -1496,6 +1708,19 @@ EXPORT_SYMBOL(mlxsw_afa_block_append_fwd);
 #define MLXSW_AFA_POLCNT_CODE 0x08
 #define MLXSW_AFA_POLCNT_SIZE 1
 
+enum {
+       MLXSW_AFA_POLCNT_COUNTER,
+       MLXSW_AFA_POLCNT_POLICER,
+};
+
+/* afa_polcnt_c_p
+ * Counter or policer.
+ * Indicates whether the action binds a policer or a counter to the flow.
+ * 0: Counter
+ * 1: Policer
+ */
+MLXSW_ITEM32(afa, polcnt, c_p, 0x00, 31, 1);
+
 enum mlxsw_afa_polcnt_counter_set_type {
        /* No count */
        MLXSW_AFA_POLCNT_COUNTER_SET_TYPE_NO_COUNT = 0x00,
@@ -1515,15 +1740,28 @@ MLXSW_ITEM32(afa, polcnt, counter_set_type, 0x04, 24, 8);
  */
 MLXSW_ITEM32(afa, polcnt, counter_index, 0x04, 0, 24);
 
+/* afa_polcnt_pid
+ * Policer ID.
+ * Reserved when c_p = 0
+ */
+MLXSW_ITEM32(afa, polcnt, pid, 0x08, 0, 14);
+
 static inline void
 mlxsw_afa_polcnt_pack(char *payload,
                      enum mlxsw_afa_polcnt_counter_set_type set_type,
                      u32 counter_index)
 {
+       mlxsw_afa_polcnt_c_p_set(payload, MLXSW_AFA_POLCNT_COUNTER);
        mlxsw_afa_polcnt_counter_set_type_set(payload, set_type);
        mlxsw_afa_polcnt_counter_index_set(payload, counter_index);
 }
 
+static void mlxsw_afa_polcnt_policer_pack(char *payload, u16 policer_index)
+{
+       mlxsw_afa_polcnt_c_p_set(payload, MLXSW_AFA_POLCNT_POLICER);
+       mlxsw_afa_polcnt_pid_set(payload, policer_index);
+}
+
 int mlxsw_afa_block_append_allocated_counter(struct mlxsw_afa_block *block,
                                             u32 counter_index)
 {
@@ -1567,6 +1805,40 @@ err_append_allocated_counter:
 }
 EXPORT_SYMBOL(mlxsw_afa_block_append_counter);
 
+int mlxsw_afa_block_append_police(struct mlxsw_afa_block *block,
+                                 u32 fa_index, u64 rate_bytes_ps, u32 burst,
+                                 u16 *p_policer_index,
+                                 struct netlink_ext_ack *extack)
+{
+       struct mlxsw_afa_policer_ref *policer_ref;
+       char *act;
+       int err;
+
+       policer_ref = mlxsw_afa_policer_ref_create(block, fa_index,
+                                                  rate_bytes_ps,
+                                                  burst, extack);
+       if (IS_ERR(policer_ref))
+               return PTR_ERR(policer_ref);
+       *p_policer_index = policer_ref->policer->policer_index;
+
+       act = mlxsw_afa_block_append_action_ext(block, MLXSW_AFA_POLCNT_CODE,
+                                               MLXSW_AFA_POLCNT_SIZE,
+                                               MLXSW_AFA_ACTION_TYPE_POLICE);
+       if (IS_ERR(act)) {
+               NL_SET_ERR_MSG_MOD(extack, "Cannot append police action");
+               err = PTR_ERR(act);
+               goto err_append_action;
+       }
+       mlxsw_afa_polcnt_policer_pack(act, *p_policer_index);
+
+       return 0;
+
+err_append_action:
+       mlxsw_afa_policer_ref_destroy(block, policer_ref);
+       return err;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_police);
+
 /* Virtual Router and Forwarding Domain Action
  * -------------------------------------------
  * Virtual Switch action is used for manipulate the Virtual Router (VR),
@@ -1684,3 +1956,54 @@ int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
        return 0;
 }
 EXPORT_SYMBOL(mlxsw_afa_block_append_mcrouter);
+
+/* L4 Port Action
+ * --------------
+ * The L4_PORT_ACTION is used for modifying the sport and dport fields of the packet, e.g. for NAT.
+ * If (the L4 is TCP) or if (the L4 is UDP and checksum field!=0) then the L4 checksum is updated.
+ */
+
+#define MLXSW_AFA_L4PORT_CODE 0x12
+#define MLXSW_AFA_L4PORT_SIZE 1
+
+enum mlxsw_afa_l4port_s_d {
+       /* configure src_l4_port */
+       MLXSW_AFA_L4PORT_S_D_SRC,
+       /* configure dst_l4_port */
+       MLXSW_AFA_L4PORT_S_D_DST,
+};
+
+/* afa_l4port_s_d
+ * Source or destination.
+ */
+MLXSW_ITEM32(afa, l4port, s_d, 0x00, 31, 1);
+
+/* afa_l4port_l4_port
+ * Number of port to change to.
+ */
+MLXSW_ITEM32(afa, l4port, l4_port, 0x08, 0, 16);
+
+static void mlxsw_afa_l4port_pack(char *payload, enum mlxsw_afa_l4port_s_d s_d, u16 l4_port)
+{
+       mlxsw_afa_l4port_s_d_set(payload, s_d);
+       mlxsw_afa_l4port_l4_port_set(payload, l4_port);
+}
+
+int mlxsw_afa_block_append_l4port(struct mlxsw_afa_block *block, bool is_dport, u16 l4_port,
+                                 struct netlink_ext_ack *extack)
+{
+       enum mlxsw_afa_l4port_s_d s_d = is_dport ? MLXSW_AFA_L4PORT_S_D_DST :
+                                                  MLXSW_AFA_L4PORT_S_D_SRC;
+       char *act = mlxsw_afa_block_append_action(block,
+                                                 MLXSW_AFA_L4PORT_CODE,
+                                                 MLXSW_AFA_L4PORT_SIZE);
+
+       if (IS_ERR(act)) {
+               NL_SET_ERR_MSG_MOD(extack, "Cannot append L4_PORT action");
+               return PTR_ERR(act);
+       }
+
+       mlxsw_afa_l4port_pack(act, s_d, l4_port);
+       return 0;
+}
+EXPORT_SYMBOL(mlxsw_afa_block_append_l4port);
index 8c2705e..b652497 100644 (file)
@@ -26,6 +26,10 @@ struct mlxsw_afa_ops {
                          bool ingress, int *p_span_id);
        void (*mirror_del)(void *priv, u8 local_in_port, int span_id,
                           bool ingress);
+       int (*policer_add)(void *priv, u64 rate_bytes_ps, u32 burst,
+                          u16 *p_policer_index,
+                          struct netlink_ext_ack *extack);
+       void (*policer_del)(void *priv, u16 policer_index);
        bool dummy_first_set;
 };
 
@@ -82,5 +86,11 @@ int mlxsw_afa_block_append_fid_set(struct mlxsw_afa_block *block, u16 fid,
 int mlxsw_afa_block_append_mcrouter(struct mlxsw_afa_block *block,
                                    u16 expected_irif, u16 min_mtu,
                                    bool rmid_valid, u32 kvdl_index);
+int mlxsw_afa_block_append_l4port(struct mlxsw_afa_block *block, bool is_dport, u16 l4_port,
+                                 struct netlink_ext_ack *extack);
+int mlxsw_afa_block_append_police(struct mlxsw_afa_block *block,
+                                 u32 fa_index, u64 rate_bytes_ps, u32 burst,
+                                 u16 *p_policer_index,
+                                 struct netlink_ext_ack *extack);
 
 #endif
index c4caeea..c010db2 100644 (file)
@@ -164,8 +164,8 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module)
        int err;
 
        err = mlxsw_core_port_init(mlxsw_m->core, local_port,
-                                  module + 1, false, 0,
-                                  mlxsw_m->base_mac,
+                                  module + 1, false, 0, false,
+                                  0, mlxsw_m->base_mac,
                                   sizeof(mlxsw_m->base_mac));
        if (err) {
                dev_err(mlxsw_m->bus_info->dev, "Port %d: Failed to init core port\n",
index fd0e97d..1c64b03 100644 (file)
@@ -547,9 +547,9 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
 {
        struct pci_dev *pdev = mlxsw_pci->pdev;
        struct mlxsw_pci_queue_elem_info *elem_info;
+       struct mlxsw_rx_info rx_info = {};
        char *wqe;
        struct sk_buff *skb;
-       struct mlxsw_rx_info rx_info;
        u16 byte_count;
        int err;
 
@@ -582,6 +582,10 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
                if (mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2)
                        cookie_index = mlxsw_pci_cqe2_user_def_val_orig_pkt_len_get(cqe);
                mlxsw_skb_cb(skb)->cookie_index = cookie_index;
+       } else if (rx_info.trap_id >= MLXSW_TRAP_ID_MIRROR_SESSION0 &&
+                  rx_info.trap_id <= MLXSW_TRAP_ID_MIRROR_SESSION7 &&
+                  mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) {
+               rx_info.mirror_reason = mlxsw_pci_cqe2_mirror_reason_get(cqe);
        }
 
        byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
@@ -1414,23 +1418,12 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
        u16 num_pages;
        int err;
 
-       mutex_init(&mlxsw_pci->cmd.lock);
-       init_waitqueue_head(&mlxsw_pci->cmd.wait);
-
        mlxsw_pci->core = mlxsw_core;
 
        mbox = mlxsw_cmd_mbox_alloc();
        if (!mbox)
                return -ENOMEM;
 
-       err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
-       if (err)
-               goto mbox_put;
-
-       err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
-       if (err)
-               goto err_out_mbox_alloc;
-
        err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
        if (err)
                goto err_sw_reset;
@@ -1537,9 +1530,6 @@ err_query_fw:
        mlxsw_pci_free_irq_vectors(mlxsw_pci);
 err_alloc_irq:
 err_sw_reset:
-       mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
-err_out_mbox_alloc:
-       mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
 mbox_put:
        mlxsw_cmd_mbox_free(mbox);
        return err;
@@ -1553,8 +1543,6 @@ static void mlxsw_pci_fini(void *bus_priv)
        mlxsw_pci_aqs_fini(mlxsw_pci);
        mlxsw_pci_fw_area_fini(mlxsw_pci);
        mlxsw_pci_free_irq_vectors(mlxsw_pci);
-       mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
-       mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
 }
 
 static struct mlxsw_pci_queue *
@@ -1776,6 +1764,37 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
        .features               = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
 };
 
+static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci)
+{
+       int err;
+
+       mutex_init(&mlxsw_pci->cmd.lock);
+       init_waitqueue_head(&mlxsw_pci->cmd.wait);
+
+       err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+       if (err)
+               goto err_in_mbox_alloc;
+
+       err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+       if (err)
+               goto err_out_mbox_alloc;
+
+       return 0;
+
+err_out_mbox_alloc:
+       mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+err_in_mbox_alloc:
+       mutex_destroy(&mlxsw_pci->cmd.lock);
+       return err;
+}
+
+static void mlxsw_pci_cmd_fini(struct mlxsw_pci *mlxsw_pci)
+{
+       mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+       mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+       mutex_destroy(&mlxsw_pci->cmd.lock);
+}
+
 static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        const char *driver_name = pdev->driver->name;
@@ -1831,6 +1850,10 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        mlxsw_pci->pdev = pdev;
        pci_set_drvdata(pdev, mlxsw_pci);
 
+       err = mlxsw_pci_cmd_init(mlxsw_pci);
+       if (err)
+               goto err_pci_cmd_init;
+
        mlxsw_pci->bus_info.device_kind = driver_name;
        mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
        mlxsw_pci->bus_info.dev = &pdev->dev;
@@ -1848,6 +1871,8 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        return 0;
 
 err_bus_device_register:
+       mlxsw_pci_cmd_fini(mlxsw_pci);
+err_pci_cmd_init:
        iounmap(mlxsw_pci->hw_addr);
 err_ioremap:
 err_pci_resource_len_check:
@@ -1865,6 +1890,7 @@ static void mlxsw_pci_remove(struct pci_dev *pdev)
        struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
 
        mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
+       mlxsw_pci_cmd_fini(mlxsw_pci);
        iounmap(mlxsw_pci->hw_addr);
        pci_release_regions(mlxsw_pci->pdev);
        pci_disable_device(mlxsw_pci->pdev);
index 32c7cab..a2c1fbd 100644 (file)
@@ -176,7 +176,7 @@ MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14);
 /* pci_cqe_trap_id
  * Trap ID that captured the packet.
  */
-MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 9);
+MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 10);
 
 /* pci_cqe_crc
  * Length include CRC. Indicates the length field includes
@@ -213,6 +213,11 @@ mlxsw_pci_cqe_item_helpers(dqn, 0, 12, 12);
  */
 MLXSW_ITEM32(pci, cqe2, user_def_val_orig_pkt_len, 0x14, 0, 20);
 
+/* pci_cqe_mirror_reason
+ * Mirror reason.
+ */
+MLXSW_ITEM32(pci, cqe2, mirror_reason, 0x18, 24, 8);
+
 /* pci_cqe_owner
  * Ownership bit.
  */
index fcb88d4..3c5b254 100644 (file)
@@ -3405,11 +3405,20 @@ MLXSW_ITEM32(reg, qpcr, violate_action, 0x18, 0, 4);
  */
 MLXSW_ITEM64(reg, qpcr, violate_count, 0x20, 0, 64);
 
+/* Packets */
 #define MLXSW_REG_QPCR_LOWEST_CIR      1
 #define MLXSW_REG_QPCR_HIGHEST_CIR     (2 * 1000 * 1000 * 1000) /* 2Gpps */
 #define MLXSW_REG_QPCR_LOWEST_CBS      4
 #define MLXSW_REG_QPCR_HIGHEST_CBS     24
 
+/* Bandwidth */
+#define MLXSW_REG_QPCR_LOWEST_CIR_BITS         1024 /* bps */
+#define MLXSW_REG_QPCR_HIGHEST_CIR_BITS                2000000000000ULL /* 2Tbps */
+#define MLXSW_REG_QPCR_LOWEST_CBS_BITS_SP1     4
+#define MLXSW_REG_QPCR_LOWEST_CBS_BITS_SP2     4
+#define MLXSW_REG_QPCR_HIGHEST_CBS_BITS_SP1    25
+#define MLXSW_REG_QPCR_HIGHEST_CBS_BITS_SP2    31
+
 static inline void mlxsw_reg_qpcr_pack(char *payload, u16 pid,
                                       enum mlxsw_reg_qpcr_ir_units ir_units,
                                       bool bytes, u32 cir, u16 cbs)
@@ -5438,6 +5447,56 @@ static inline void mlxsw_reg_pplr_pack(char *payload, u8 local_port,
                                 MLXSW_REG_PPLR_LB_TYPE_BIT_PHY_LOCAL : 0);
 }
 
+/* PDDR - Port Diagnostics Database Register
+ * -----------------------------------------
+ * The PDDR enables to read the Phy debug database
+ */
+#define MLXSW_REG_PDDR_ID 0x5031
+#define MLXSW_REG_PDDR_LEN 0x100
+
+MLXSW_REG_DEFINE(pddr, MLXSW_REG_PDDR_ID, MLXSW_REG_PDDR_LEN);
+
+/* reg_pddr_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pddr, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_pddr_page_select {
+       MLXSW_REG_PDDR_PAGE_SELECT_TROUBLESHOOTING_INFO = 1,
+};
+
+/* reg_pddr_page_select
+ * Page select index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pddr, page_select, 0x04, 0, 8);
+
+enum mlxsw_reg_pddr_trblsh_group_opcode {
+       /* Monitor opcodes */
+       MLXSW_REG_PDDR_TRBLSH_GROUP_OPCODE_MONITOR,
+};
+
+/* reg_pddr_group_opcode
+ * Group selector.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pddr, trblsh_group_opcode, 0x08, 0, 16);
+
+/* reg_pddr_status_opcode
+ * Group selector.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pddr, trblsh_status_opcode, 0x0C, 0, 16);
+
+static inline void mlxsw_reg_pddr_pack(char *payload, u8 local_port,
+                                      u8 page_select)
+{
+       MLXSW_REG_ZERO(pddr, payload);
+       mlxsw_reg_pddr_local_port_set(payload, local_port);
+       mlxsw_reg_pddr_page_select_set(payload, page_select);
+}
+
 /* PMTM - Port Module Type Mapping Register
  * ----------------------------------------
  * The PMTM allows query or configuration of module types.
@@ -5728,7 +5787,7 @@ MLXSW_ITEM32(reg, hpkt, trap_group, 0x00, 12, 6);
  * Note: A trap ID can only be associated with a single trap group. The device
  * will associate the trap ID with the last trap group configured.
  */
-MLXSW_ITEM32(reg, hpkt, trap_id, 0x00, 0, 9);
+MLXSW_ITEM32(reg, hpkt, trap_id, 0x00, 0, 10);
 
 enum {
        MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT,
@@ -8612,6 +8671,13 @@ MLXSW_REG_DEFINE(mpat, MLXSW_REG_MPAT_ID, MLXSW_REG_MPAT_LEN);
  */
 MLXSW_ITEM32(reg, mpat, pa_id, 0x00, 28, 4);
 
+/* reg_mpat_session_id
+ * Mirror Session ID.
+ * Used for MIRROR_SESSION<i> trap.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, session_id, 0x00, 24, 4);
+
 /* reg_mpat_system_port
  * A unique port identifier for the final destination of the packet.
  * Access: RW
@@ -8669,6 +8735,18 @@ enum mlxsw_reg_mpat_span_type {
  */
 MLXSW_ITEM32(reg, mpat, span_type, 0x04, 0, 4);
 
+/* reg_mpat_pide
+ * Policer enable.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, pide, 0x0C, 15, 1);
+
+/* reg_mpat_pid
+ * Policer ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, pid, 0x0C, 0, 14);
+
 /* Remote SPAN - Ethernet VLAN
  * - - - - - - - - - - - - - -
  */
@@ -9452,6 +9530,114 @@ MLXSW_ITEM32(reg, mogcr, ptp_iftc, 0x00, 1, 1);
  */
 MLXSW_ITEM32(reg, mogcr, ptp_eftc, 0x00, 0, 1);
 
+/* reg_mogcr_mirroring_pid_base
+ * Base policer id for mirroring policers.
+ * Must have an even value (e.g. 1000, not 1001).
+ * Reserved when SwitchX/-2, Switch-IB/2, Spectrum-1 and Quantum.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mogcr, mirroring_pid_base, 0x0C, 0, 14);
+
+/* MPAGR - Monitoring Port Analyzer Global Register
+ * ------------------------------------------------
+ * This register is used for global port analyzer configurations.
+ * Note: This register is not supported by current FW versions for Spectrum-1.
+ */
+#define MLXSW_REG_MPAGR_ID 0x9089
+#define MLXSW_REG_MPAGR_LEN 0x0C
+
+MLXSW_REG_DEFINE(mpagr, MLXSW_REG_MPAGR_ID, MLXSW_REG_MPAGR_LEN);
+
+enum mlxsw_reg_mpagr_trigger {
+       MLXSW_REG_MPAGR_TRIGGER_EGRESS,
+       MLXSW_REG_MPAGR_TRIGGER_INGRESS,
+       MLXSW_REG_MPAGR_TRIGGER_INGRESS_WRED,
+       MLXSW_REG_MPAGR_TRIGGER_INGRESS_SHARED_BUFFER,
+       MLXSW_REG_MPAGR_TRIGGER_INGRESS_ING_CONG,
+       MLXSW_REG_MPAGR_TRIGGER_INGRESS_EGR_CONG,
+       MLXSW_REG_MPAGR_TRIGGER_EGRESS_ECN,
+       MLXSW_REG_MPAGR_TRIGGER_EGRESS_HIGH_LATENCY,
+};
+
+/* reg_mpagr_trigger
+ * Mirror trigger.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mpagr, trigger, 0x00, 0, 4);
+
+/* reg_mpagr_pa_id
+ * Port analyzer ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpagr, pa_id, 0x04, 0, 4);
+
+/* reg_mpagr_probability_rate
+ * Sampling rate.
+ * Valid values are: 1 to 3.5*10^9
+ * Value of 1 means "sample all". Default is 1.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpagr, probability_rate, 0x08, 0, 32);
+
+static inline void mlxsw_reg_mpagr_pack(char *payload,
+                                       enum mlxsw_reg_mpagr_trigger trigger,
+                                       u8 pa_id, u32 probability_rate)
+{
+       MLXSW_REG_ZERO(mpagr, payload);
+       mlxsw_reg_mpagr_trigger_set(payload, trigger);
+       mlxsw_reg_mpagr_pa_id_set(payload, pa_id);
+       mlxsw_reg_mpagr_probability_rate_set(payload, probability_rate);
+}
+
+/* MOMTE - Monitoring Mirror Trigger Enable Register
+ * -------------------------------------------------
+ * This register is used to configure the mirror enable for different mirror
+ * reasons.
+ */
+#define MLXSW_REG_MOMTE_ID 0x908D
+#define MLXSW_REG_MOMTE_LEN 0x10
+
+MLXSW_REG_DEFINE(momte, MLXSW_REG_MOMTE_ID, MLXSW_REG_MOMTE_LEN);
+
+/* reg_momte_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, momte, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_momte_type {
+       MLXSW_REG_MOMTE_TYPE_WRED = 0x20,
+       MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_TCLASS = 0x31,
+       MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_TCLASS_DESCRIPTORS = 0x32,
+       MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_EGRESS_PORT = 0x33,
+       MLXSW_REG_MOMTE_TYPE_ING_CONG = 0x40,
+       MLXSW_REG_MOMTE_TYPE_EGR_CONG = 0x50,
+       MLXSW_REG_MOMTE_TYPE_ECN = 0x60,
+       MLXSW_REG_MOMTE_TYPE_HIGH_LATENCY = 0x70,
+};
+
+/* reg_momte_type
+ * Type of mirroring.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, momte, type, 0x04, 0, 8);
+
+/* reg_momte_tclass_en
+ * TClass/PG mirror enable. Each bit represents corresponding tclass.
+ * 0: disable (default)
+ * 1: enable
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, momte, tclass_en, 0x08, 0x08, 1);
+
+static inline void mlxsw_reg_momte_pack(char *payload, u8 local_port,
+                                       enum mlxsw_reg_momte_type type)
+{
+       MLXSW_REG_ZERO(momte, payload);
+       mlxsw_reg_momte_local_port_set(payload, local_port);
+       mlxsw_reg_momte_type_set(payload, type);
+}
+
 /* MTPPPC - Time Precision Packet Port Configuration
  * -------------------------------------------------
  * This register serves for configuration of which PTP messages should be
@@ -10758,6 +10944,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
        MLXSW_REG(pbmc),
        MLXSW_REG(pspa),
        MLXSW_REG(pplr),
+       MLXSW_REG(pddr),
        MLXSW_REG(pmtm),
        MLXSW_REG(htgt),
        MLXSW_REG(hpkt),
@@ -10802,6 +10989,8 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
        MLXSW_REG(mgpc),
        MLXSW_REG(mprs),
        MLXSW_REG(mogcr),
+       MLXSW_REG(mpagr),
+       MLXSW_REG(momte),
        MLXSW_REG(mtpppc),
        MLXSW_REG(mtpptr),
        MLXSW_REG(mtptpt),
index d62496e..a56c9e1 100644 (file)
@@ -47,6 +47,7 @@ enum mlxsw_res_id {
        MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB,
        MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB,
        MLXSW_RES_ID_ACL_MAX_BF_LOG,
+       MLXSW_RES_ID_MAX_GLOBAL_POLICERS,
        MLXSW_RES_ID_MAX_CPU_POLICERS,
        MLXSW_RES_ID_MAX_VRS,
        MLXSW_RES_ID_MAX_RIFS,
@@ -105,6 +106,7 @@ static u16 mlxsw_res_ids[] = {
        [MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB] = 0x2952,
        [MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB] = 0x2953,
        [MLXSW_RES_ID_ACL_MAX_BF_LOG] = 0x2960,
+       [MLXSW_RES_ID_MAX_GLOBAL_POLICERS] = 0x2A10,
        [MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13,
        [MLXSW_RES_ID_MAX_VRS] = 0x2C01,
        [MLXSW_RES_ID_MAX_RIFS] = 0x2C02,
index 5ffa32b..519eb44 100644 (file)
@@ -45,8 +45,8 @@
 #include "../mlxfw/mlxfw.h"
 
 #define MLXSW_SP1_FWREV_MAJOR 13
-#define MLXSW_SP1_FWREV_MINOR 2000
-#define MLXSW_SP1_FWREV_SUBMINOR 2714
+#define MLXSW_SP1_FWREV_MINOR 2007
+#define MLXSW_SP1_FWREV_SUBMINOR 1168
 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
 
 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -62,8 +62,8 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
        "." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
 
 #define MLXSW_SP2_FWREV_MAJOR 29
-#define MLXSW_SP2_FWREV_MINOR 2000
-#define MLXSW_SP2_FWREV_SUBMINOR 2714
+#define MLXSW_SP2_FWREV_MINOR 2007
+#define MLXSW_SP2_FWREV_SUBMINOR 1168
 
 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
        .major = MLXSW_SP2_FWREV_MAJOR,
@@ -76,10 +76,24 @@ static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
        "." __stringify(MLXSW_SP2_FWREV_MINOR) \
        "." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
 
+#define MLXSW_SP3_FWREV_MAJOR 30
+#define MLXSW_SP3_FWREV_MINOR 2007
+#define MLXSW_SP3_FWREV_SUBMINOR 1168
+
+static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
+       .major = MLXSW_SP3_FWREV_MAJOR,
+       .minor = MLXSW_SP3_FWREV_MINOR,
+       .subminor = MLXSW_SP3_FWREV_SUBMINOR,
+};
+
+#define MLXSW_SP3_FW_FILENAME \
+       "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
+       "." __stringify(MLXSW_SP3_FWREV_MINOR) \
+       "." __stringify(MLXSW_SP3_FWREV_SUBMINOR) ".mfa2"
+
 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
-static const char mlxsw_sp_driver_version[] = "1.0";
 
 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
        0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
@@ -161,43 +175,6 @@ struct mlxsw_sp_mlxfw_dev {
        struct mlxsw_sp *mlxsw_sp;
 };
 
-struct mlxsw_sp_ptp_ops {
-       struct mlxsw_sp_ptp_clock *
-               (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev);
-       void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock);
-
-       struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp);
-       void (*fini)(struct mlxsw_sp_ptp_state *ptp_state);
-
-       /* Notify a driver that a packet that might be PTP was received. Driver
-        * is responsible for freeing the passed-in SKB.
-        */
-       void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
-                       u8 local_port);
-
-       /* Notify a driver that a timestamped packet was transmitted. Driver
-        * is responsible for freeing the passed-in SKB.
-        */
-       void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
-                           u8 local_port);
-
-       int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port,
-                           struct hwtstamp_config *config);
-       int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port,
-                           struct hwtstamp_config *config);
-       void (*shaper_work)(struct work_struct *work);
-       int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp,
-                          struct ethtool_ts_info *info);
-       int (*get_stats_count)(void);
-       void (*get_stats_strings)(u8 **p);
-       void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
-                         u64 *data, int data_index);
-};
-
-struct mlxsw_sp_span_ops {
-       u32 (*buffsize_get)(int mtu, u32 speed);
-};
-
 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
                                    u16 component_index, u32 *p_max_size,
                                    u8 *p_align_bits, u16 *p_max_write_size)
@@ -580,8 +557,8 @@ static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
        return 0;
 }
 
-static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
-                                         bool is_up)
+int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                                  bool is_up)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
        char paos_pl[MLXSW_REG_PAOS_LEN];
@@ -978,8 +955,10 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
 
                lossy = !(pfc || pause_en);
                thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
+               thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells);
                delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
                                                        pfc, pause_en);
+               delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells);
                total_cells = thres_cells + delay_cells;
 
                taken_headroom_cells += total_cells;
@@ -993,8 +972,8 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
 }
 
-static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
-                                     int mtu, bool pause_en)
+int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                              int mtu, bool pause_en)
 {
        u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
        bool dcb_en = !!mlxsw_sp_port->dcb.ets;
@@ -1086,8 +1065,8 @@ static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device
        return -EINVAL;
 }
 
-static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
-                                      int prio, char *ppcnt_pl)
+int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
+                               int prio, char *ppcnt_pl)
 {
        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
@@ -1344,1699 +1323,220 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
 
        mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
        if (!mlxsw_sp_port_vlan)
-               return 0;
-       mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
-
-       return 0;
-}
-
-static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
-                            void *type_data)
-{
-       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-
-       switch (type) {
-       case TC_SETUP_BLOCK:
-               return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
-       case TC_SETUP_QDISC_RED:
-               return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
-       case TC_SETUP_QDISC_PRIO:
-               return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
-       case TC_SETUP_QDISC_ETS:
-               return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
-       case TC_SETUP_QDISC_TBF:
-               return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
-       case TC_SETUP_QDISC_FIFO:
-               return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
-       default:
-               return -EOPNOTSUPP;
-       }
-}
-
-static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
-{
-       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-
-       if (!enable) {
-               if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
-                   mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
-                       netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
-                       return -EINVAL;
-               }
-               mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
-               mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
-       } else {
-               mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
-               mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
-       }
-       return 0;
-}
-
-static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
-{
-       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-       char pplr_pl[MLXSW_REG_PPLR_LEN];
-       int err;
-
-       if (netif_running(dev))
-               mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
-
-       mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
-       err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
-                             pplr_pl);
-
-       if (netif_running(dev))
-               mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
-
-       return err;
-}
-
-typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
-
-static int mlxsw_sp_handle_feature(struct net_device *dev,
-                                  netdev_features_t wanted_features,
-                                  netdev_features_t feature,
-                                  mlxsw_sp_feature_handler feature_handler)
-{
-       netdev_features_t changes = wanted_features ^ dev->features;
-       bool enable = !!(wanted_features & feature);
-       int err;
-
-       if (!(changes & feature))
-               return 0;
-
-       err = feature_handler(dev, enable);
-       if (err) {
-               netdev_err(dev, "%s feature %pNF failed, err %d\n",
-                          enable ? "Enable" : "Disable", &feature, err);
-               return err;
-       }
-
-       if (enable)
-               dev->features |= feature;
-       else
-               dev->features &= ~feature;
-
-       return 0;
-}
-static int mlxsw_sp_set_features(struct net_device *dev,
-                                netdev_features_t features)
-{
-       netdev_features_t oper_features = dev->features;
-       int err = 0;
-
-       err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
-                                      mlxsw_sp_feature_hw_tc);
-       err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
-                                      mlxsw_sp_feature_loopback);
-
-       if (err) {
-               dev->features = oper_features;
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static struct devlink_port *
-mlxsw_sp_port_get_devlink_port(struct net_device *dev)
-{
-       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-
-       return mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
-                                               mlxsw_sp_port->local_port);
-}
-
-static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
-                                     struct ifreq *ifr)
-{
-       struct hwtstamp_config config;
-       int err;
-
-       if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
-               return -EFAULT;
-
-       err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
-                                                            &config);
-       if (err)
-               return err;
-
-       if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
-               return -EFAULT;
-
-       return 0;
-}
-
-static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
-                                     struct ifreq *ifr)
-{
-       struct hwtstamp_config config;
-       int err;
-
-       err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
-                                                            &config);
-       if (err)
-               return err;
-
-       if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
-               return -EFAULT;
-
-       return 0;
-}
-
-static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
-{
-       struct hwtstamp_config config = {0};
-
-       mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
-}
-
-static int
-mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
-       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-
-       switch (cmd) {
-       case SIOCSHWTSTAMP:
-               return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
-       case SIOCGHWTSTAMP:
-               return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
-       default:
-               return -EOPNOTSUPP;
-       }
-}
-
-static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
-       .ndo_open               = mlxsw_sp_port_open,
-       .ndo_stop               = mlxsw_sp_port_stop,
-       .ndo_start_xmit         = mlxsw_sp_port_xmit,
-       .ndo_setup_tc           = mlxsw_sp_setup_tc,
-       .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
-       .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
-       .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
-       .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
-       .ndo_has_offload_stats  = mlxsw_sp_port_has_offload_stats,
-       .ndo_get_offload_stats  = mlxsw_sp_port_get_offload_stats,
-       .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
-       .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
-       .ndo_set_features       = mlxsw_sp_set_features,
-       .ndo_get_devlink_port   = mlxsw_sp_port_get_devlink_port,
-       .ndo_do_ioctl           = mlxsw_sp_port_ioctl,
-};
-
-static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
-                                     struct ethtool_drvinfo *drvinfo)
-{
-       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-
-       strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind,
-               sizeof(drvinfo->driver));
-       strlcpy(drvinfo->version, mlxsw_sp_driver_version,
-               sizeof(drvinfo->version));
-       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
-                "%d.%d.%d",
-                mlxsw_sp->bus_info->fw_rev.major,
-                mlxsw_sp->bus_info->fw_rev.minor,
-                mlxsw_sp->bus_info->fw_rev.subminor);
-       strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
-               sizeof(drvinfo->bus_info));
-}
-
-static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
-                                        struct ethtool_pauseparam *pause)
-{
-       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-
-       pause->rx_pause = mlxsw_sp_port->link.rx_pause;
-       pause->tx_pause = mlxsw_sp_port->link.tx_pause;
-}
-
-static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
-                                  struct ethtool_pauseparam *pause)
-{
-       char pfcc_pl[MLXSW_REG_PFCC_LEN];
-
-       mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
-       mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
-       mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
-
-       return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
-                              pfcc_pl);
-}
-
-static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
-                                       struct ethtool_pauseparam *pause)
-{
-       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-       bool pause_en = pause->tx_pause || pause->rx_pause;
-       int err;
-
-       if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
-               netdev_err(dev, "PFC already enabled on port\n");
-               return -EINVAL;
-       }
-
-       if (pause->autoneg) {
-               netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
-               return -EINVAL;
-       }
-
-       err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
-       if (err) {
-               netdev_err(dev, "Failed to configure port's headroom\n");
-               return err;
-       }
-
-       err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
-       if (err) {
-               netdev_err(dev, "Failed to set PAUSE parameters\n");
-               goto err_port_pause_configure;
-       }
-
-       mlxsw_sp_port->link.rx_pause = pause->rx_pause;
-       mlxsw_sp_port->link.tx_pause = pause->tx_pause;
-
-       return 0;
-
-err_port_pause_configure:
-       pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
-       mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
-       return err;
-}
-
-struct mlxsw_sp_port_hw_stats {
-       char str[ETH_GSTRING_LEN];
-       u64 (*getter)(const char *payload);
-       bool cells_bytes;
-};
-
-static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
-       {
-               .str = "a_frames_transmitted_ok",
-               .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
-       },
-       {
-               .str = "a_frames_received_ok",
-               .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
-       },
-       {
-               .str = "a_frame_check_sequence_errors",
-               .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
-       },
-       {
-               .str = "a_alignment_errors",
-               .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
-       },
-       {
-               .str = "a_octets_transmitted_ok",
-               .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
-       },
-       {
-               .str = "a_octets_received_ok",
-               .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
-       },
-       {
-               .str = "a_multicast_frames_xmitted_ok",
-               .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
-       },
-       {
-               .str = "a_broadcast_frames_xmitted_ok",
-               .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
-       },
-       {
-               .str = "a_multicast_frames_received_ok",
-               .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
-       },
-       {
-               .str = "a_broadcast_frames_received_ok",
-               .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
-       },
-       {
-               .str = "a_in_range_length_errors",
-               .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
-       },
-       {
-               .str = "a_out_of_range_length_field",
-               .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
-       },
-       {
-               .str = "a_frame_too_long_errors",
-               .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
-       },
-       {
-               .str = "a_symbol_error_during_carrier",
-               .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
-       },
-       {
-               .str = "a_mac_control_frames_transmitted",
-               .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
-       },
-       {
-               .str = "a_mac_control_frames_received",
-               .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
-       },
-       {
-               .str = "a_unsupported_opcodes_received",
-               .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
-       },
-       {
-               .str = "a_pause_mac_ctrl_frames_received",
-               .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
-       },
-       {
-               .str = "a_pause_mac_ctrl_frames_xmitted",
-               .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
-       },
-};
-
-#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
-
-static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = {
-       {
-               .str = "if_in_discards",
-               .getter = mlxsw_reg_ppcnt_if_in_discards_get,
-       },
-       {
-               .str = "if_out_discards",
-               .getter = mlxsw_reg_ppcnt_if_out_discards_get,
-       },
-       {
-               .str = "if_out_errors",
-               .getter = mlxsw_reg_ppcnt_if_out_errors_get,
-       },
-};
-
-#define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \
-       ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats)
-
-static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = {
-       {
-               .str = "ether_stats_undersize_pkts",
-               .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get,
-       },
-       {
-               .str = "ether_stats_oversize_pkts",
-               .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get,
-       },
-       {
-               .str = "ether_stats_fragments",
-               .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get,
-       },
-       {
-               .str = "ether_pkts64octets",
-               .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get,
-       },
-       {
-               .str = "ether_pkts65to127octets",
-               .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get,
-       },
-       {
-               .str = "ether_pkts128to255octets",
-               .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get,
-       },
-       {
-               .str = "ether_pkts256to511octets",
-               .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get,
-       },
-       {
-               .str = "ether_pkts512to1023octets",
-               .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get,
-       },
-       {
-               .str = "ether_pkts1024to1518octets",
-               .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get,
-       },
-       {
-               .str = "ether_pkts1519to2047octets",
-               .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get,
-       },
-       {
-               .str = "ether_pkts2048to4095octets",
-               .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get,
-       },
-       {
-               .str = "ether_pkts4096to8191octets",
-               .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get,
-       },
-       {
-               .str = "ether_pkts8192to10239octets",
-               .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get,
-       },
-};
-
-#define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \
-       ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats)
-
-static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = {
-       {
-               .str = "dot3stats_fcs_errors",
-               .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get,
-       },
-       {
-               .str = "dot3stats_symbol_errors",
-               .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get,
-       },
-       {
-               .str = "dot3control_in_unknown_opcodes",
-               .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get,
-       },
-       {
-               .str = "dot3in_pause_frames",
-               .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get,
-       },
-};
-
-#define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \
-       ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats)
-
-static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_ext_stats[] = {
-       {
-               .str = "ecn_marked",
-               .getter = mlxsw_reg_ppcnt_ecn_marked_get,
-       },
-};
-
-#define MLXSW_SP_PORT_HW_EXT_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_ext_stats)
-
-static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = {
-       {
-               .str = "discard_ingress_general",
-               .getter = mlxsw_reg_ppcnt_ingress_general_get,
-       },
-       {
-               .str = "discard_ingress_policy_engine",
-               .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get,
-       },
-       {
-               .str = "discard_ingress_vlan_membership",
-               .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get,
-       },
-       {
-               .str = "discard_ingress_tag_frame_type",
-               .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get,
-       },
-       {
-               .str = "discard_egress_vlan_membership",
-               .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get,
-       },
-       {
-               .str = "discard_loopback_filter",
-               .getter = mlxsw_reg_ppcnt_loopback_filter_get,
-       },
-       {
-               .str = "discard_egress_general",
-               .getter = mlxsw_reg_ppcnt_egress_general_get,
-       },
-       {
-               .str = "discard_egress_hoq",
-               .getter = mlxsw_reg_ppcnt_egress_hoq_get,
-       },
-       {
-               .str = "discard_egress_policy_engine",
-               .getter = mlxsw_reg_ppcnt_egress_policy_engine_get,
-       },
-       {
-               .str = "discard_ingress_tx_link_down",
-               .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get,
-       },
-       {
-               .str = "discard_egress_stp_filter",
-               .getter = mlxsw_reg_ppcnt_egress_stp_filter_get,
-       },
-       {
-               .str = "discard_egress_sll",
-               .getter = mlxsw_reg_ppcnt_egress_sll_get,
-       },
-};
-
-#define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \
-       ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats)
-
-static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
-       {
-               .str = "rx_octets_prio",
-               .getter = mlxsw_reg_ppcnt_rx_octets_get,
-       },
-       {
-               .str = "rx_frames_prio",
-               .getter = mlxsw_reg_ppcnt_rx_frames_get,
-       },
-       {
-               .str = "tx_octets_prio",
-               .getter = mlxsw_reg_ppcnt_tx_octets_get,
-       },
-       {
-               .str = "tx_frames_prio",
-               .getter = mlxsw_reg_ppcnt_tx_frames_get,
-       },
-       {
-               .str = "rx_pause_prio",
-               .getter = mlxsw_reg_ppcnt_rx_pause_get,
-       },
-       {
-               .str = "rx_pause_duration_prio",
-               .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
-       },
-       {
-               .str = "tx_pause_prio",
-               .getter = mlxsw_reg_ppcnt_tx_pause_get,
-       },
-       {
-               .str = "tx_pause_duration_prio",
-               .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
-       },
-};
-
-#define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
-
-static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
-       {
-               .str = "tc_transmit_queue_tc",
-               .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
-               .cells_bytes = true,
-       },
-       {
-               .str = "tc_no_buffer_discard_uc_tc",
-               .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
-       },
-};
-
-#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
-
-#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
-                                        MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \
-                                        MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \
-                                        MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \
-                                        MLXSW_SP_PORT_HW_EXT_STATS_LEN + \
-                                        MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \
-                                        (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \
-                                         IEEE_8021QAZ_MAX_TCS) + \
-                                        (MLXSW_SP_PORT_HW_TC_STATS_LEN * \
-                                         TC_MAX_QUEUE))
-
-static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
-{
-       int i;
-
-       for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
-               snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
-                        mlxsw_sp_port_hw_prio_stats[i].str, prio);
-               *p += ETH_GSTRING_LEN;
-       }
-}
-
-static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
-{
-       int i;
-
-       for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
-               snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
-                        mlxsw_sp_port_hw_tc_stats[i].str, tc);
-               *p += ETH_GSTRING_LEN;
-       }
-}
-
-static void mlxsw_sp_port_get_strings(struct net_device *dev,
-                                     u32 stringset, u8 *data)
-{
-       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-       u8 *p = data;
-       int i;
-
-       switch (stringset) {
-       case ETH_SS_STATS:
-               for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
-                       memcpy(p, mlxsw_sp_port_hw_stats[i].str,
-                              ETH_GSTRING_LEN);
-                       p += ETH_GSTRING_LEN;
-               }
-
-               for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) {
-                       memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str,
-                              ETH_GSTRING_LEN);
-                       p += ETH_GSTRING_LEN;
-               }
-
-               for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) {
-                       memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str,
-                              ETH_GSTRING_LEN);
-                       p += ETH_GSTRING_LEN;
-               }
-
-               for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) {
-                       memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str,
-                              ETH_GSTRING_LEN);
-                       p += ETH_GSTRING_LEN;
-               }
-
-               for (i = 0; i < MLXSW_SP_PORT_HW_EXT_STATS_LEN; i++) {
-                       memcpy(p, mlxsw_sp_port_hw_ext_stats[i].str,
-                              ETH_GSTRING_LEN);
-                       p += ETH_GSTRING_LEN;
-               }
-
-               for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) {
-                       memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str,
-                              ETH_GSTRING_LEN);
-                       p += ETH_GSTRING_LEN;
-               }
-
-               for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
-                       mlxsw_sp_port_get_prio_strings(&p, i);
-
-               for (i = 0; i < TC_MAX_QUEUE; i++)
-                       mlxsw_sp_port_get_tc_strings(&p, i);
-
-               mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_strings(&p);
-               break;
-       }
-}
-
-static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
-                                    enum ethtool_phys_id_state state)
-{
-       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-       char mlcr_pl[MLXSW_REG_MLCR_LEN];
-       bool active;
-
-       switch (state) {
-       case ETHTOOL_ID_ACTIVE:
-               active = true;
-               break;
-       case ETHTOOL_ID_INACTIVE:
-               active = false;
-               break;
-       default:
-               return -EOPNOTSUPP;
-       }
-
-       mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
-       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
-}
-
-static int
-mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
-                              int *p_len, enum mlxsw_reg_ppcnt_grp grp)
-{
-       switch (grp) {
-       case MLXSW_REG_PPCNT_IEEE_8023_CNT:
-               *p_hw_stats = mlxsw_sp_port_hw_stats;
-               *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
-               break;
-       case MLXSW_REG_PPCNT_RFC_2863_CNT:
-               *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats;
-               *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN;
-               break;
-       case MLXSW_REG_PPCNT_RFC_2819_CNT:
-               *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats;
-               *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
-               break;
-       case MLXSW_REG_PPCNT_RFC_3635_CNT:
-               *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats;
-               *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
-               break;
-       case MLXSW_REG_PPCNT_EXT_CNT:
-               *p_hw_stats = mlxsw_sp_port_hw_ext_stats;
-               *p_len = MLXSW_SP_PORT_HW_EXT_STATS_LEN;
-               break;
-       case MLXSW_REG_PPCNT_DISCARD_CNT:
-               *p_hw_stats = mlxsw_sp_port_hw_discard_stats;
-               *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
-               break;
-       case MLXSW_REG_PPCNT_PRIO_CNT:
-               *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
-               *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
-               break;
-       case MLXSW_REG_PPCNT_TC_CNT:
-               *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
-               *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
-               break;
-       default:
-               WARN_ON(1);
-               return -EOPNOTSUPP;
-       }
-       return 0;
-}
-
-static void __mlxsw_sp_port_get_stats(struct net_device *dev,
-                                     enum mlxsw_reg_ppcnt_grp grp, int prio,
-                                     u64 *data, int data_index)
-{
-       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-       struct mlxsw_sp_port_hw_stats *hw_stats;
-       char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
-       int i, len;
-       int err;
-
-       err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
-       if (err)
-               return;
-       mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
-       for (i = 0; i < len; i++) {
-               data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
-               if (!hw_stats[i].cells_bytes)
-                       continue;
-               data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
-                                                           data[data_index + i]);
-       }
-}
-
-static void mlxsw_sp_port_get_stats(struct net_device *dev,
-                                   struct ethtool_stats *stats, u64 *data)
-{
-       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-       int i, data_index = 0;
-
-       /* IEEE 802.3 Counters */
-       __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
-                                 data, data_index);
-       data_index = MLXSW_SP_PORT_HW_STATS_LEN;
-
-       /* RFC 2863 Counters */
-       __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0,
-                                 data, data_index);
-       data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN;
-
-       /* RFC 2819 Counters */
-       __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0,
-                                 data, data_index);
-       data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
-
-       /* RFC 3635 Counters */
-       __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0,
-                                 data, data_index);
-       data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
-
-       /* Extended Counters */
-       __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
-                                 data, data_index);
-       data_index += MLXSW_SP_PORT_HW_EXT_STATS_LEN;
-
-       /* Discard Counters */
-       __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0,
-                                 data, data_index);
-       data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
-
-       /* Per-Priority Counters */
-       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
-               __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
-                                         data, data_index);
-               data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
-       }
-
-       /* Per-TC Counters */
-       for (i = 0; i < TC_MAX_QUEUE; i++) {
-               __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
-                                         data, data_index);
-               data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
-       }
-
-       /* PTP counters */
-       mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats(mlxsw_sp_port,
-                                                   data, data_index);
-       data_index += mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count();
-}
-
-static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
-{
-       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-
-       switch (sset) {
-       case ETH_SS_STATS:
-               return MLXSW_SP_PORT_ETHTOOL_STATS_LEN +
-                      mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count();
-       default:
-               return -EOPNOTSUPP;
-       }
-}
-
-struct mlxsw_sp1_port_link_mode {
-       enum ethtool_link_mode_bit_indices mask_ethtool;
-       u32 mask;
-       u32 speed;
-};
-
-static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = {
-       {
-               .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
-               .mask_ethtool   = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
-               .speed          = SPEED_100,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
-                                 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
-               .mask_ethtool   = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
-               .speed          = SPEED_1000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
-               .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
-               .speed          = SPEED_10000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
-                                 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
-               .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
-               .speed          = SPEED_10000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
-                                 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
-                                 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
-                                 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
-               .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
-               .speed          = SPEED_10000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
-               .mask_ethtool   = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
-               .speed          = SPEED_20000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
-               .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
-               .speed          = SPEED_40000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
-               .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
-               .speed          = SPEED_40000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
-               .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
-               .speed          = SPEED_40000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
-               .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
-               .speed          = SPEED_40000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
-               .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
-               .speed          = SPEED_25000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
-               .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
-               .speed          = SPEED_25000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
-               .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
-               .speed          = SPEED_25000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
-               .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
-               .speed          = SPEED_50000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
-               .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
-               .speed          = SPEED_50000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
-               .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
-               .speed          = SPEED_50000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
-               .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
-               .speed          = SPEED_100000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
-               .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
-               .speed          = SPEED_100000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
-               .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
-               .speed          = SPEED_100000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
-               .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
-               .speed          = SPEED_100000,
-       },
-};
-
-#define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode)
-
-static void
-mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp,
-                                  u32 ptys_eth_proto,
-                                  struct ethtool_link_ksettings *cmd)
-{
-       if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
-                             MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
-                             MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
-                             MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
-                             MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
-                             MLXSW_REG_PTYS_ETH_SPEED_SGMII))
-               ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
-
-       if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
-                             MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
-                             MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
-                             MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
-                             MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
-               ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
-}
-
-static void
-mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
-                        u8 width, unsigned long *mode)
-{
-       int i;
-
-       for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
-               if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask)
-                       __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool,
-                                 mode);
-       }
-}
-
-static u32
-mlxsw_sp1_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto)
-{
-       int i;
-
-       for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
-               if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask)
-                       return mlxsw_sp1_port_link_mode[i].speed;
-       }
-
-       return SPEED_UNKNOWN;
-}
-
-static void
-mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
-                                u32 ptys_eth_proto,
-                                struct ethtool_link_ksettings *cmd)
-{
-       cmd->base.speed = SPEED_UNKNOWN;
-       cmd->base.duplex = DUPLEX_UNKNOWN;
-
-       if (!carrier_ok)
-               return;
-
-       cmd->base.speed = mlxsw_sp1_from_ptys_speed(mlxsw_sp, ptys_eth_proto);
-       if (cmd->base.speed != SPEED_UNKNOWN)
-               cmd->base.duplex = DUPLEX_FULL;
-}
-
-static u32
-mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width,
-                             const struct ethtool_link_ksettings *cmd)
-{
-       u32 ptys_proto = 0;
-       int i;
-
-       for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
-               if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool,
-                            cmd->link_modes.advertising))
-                       ptys_proto |= mlxsw_sp1_port_link_mode[i].mask;
-       }
-       return ptys_proto;
-}
-
-static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u8 width,
-                                  u32 speed)
-{
-       u32 ptys_proto = 0;
-       int i;
-
-       for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
-               if (speed == mlxsw_sp1_port_link_mode[i].speed)
-                       ptys_proto |= mlxsw_sp1_port_link_mode[i].mask;
-       }
-       return ptys_proto;
-}
-
-static void
-mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload,
-                           u8 local_port, u32 proto_admin, bool autoneg)
-{
-       mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg);
-}
-
-static void
-mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload,
-                             u32 *p_eth_proto_cap, u32 *p_eth_proto_admin,
-                             u32 *p_eth_proto_oper)
-{
-       mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin,
-                                 p_eth_proto_oper);
-}
-
-static const struct mlxsw_sp_port_type_speed_ops
-mlxsw_sp1_port_type_speed_ops = {
-       .from_ptys_supported_port       = mlxsw_sp1_from_ptys_supported_port,
-       .from_ptys_link                 = mlxsw_sp1_from_ptys_link,
-       .from_ptys_speed                = mlxsw_sp1_from_ptys_speed,
-       .from_ptys_speed_duplex         = mlxsw_sp1_from_ptys_speed_duplex,
-       .to_ptys_advert_link            = mlxsw_sp1_to_ptys_advert_link,
-       .to_ptys_speed                  = mlxsw_sp1_to_ptys_speed,
-       .reg_ptys_eth_pack              = mlxsw_sp1_reg_ptys_eth_pack,
-       .reg_ptys_eth_unpack            = mlxsw_sp1_reg_ptys_eth_unpack,
-};
-
-static const enum ethtool_link_mode_bit_indices
-mlxsw_sp2_mask_ethtool_sgmii_100m[] = {
-       ETHTOOL_LINK_MODE_100baseT_Full_BIT,
-};
-
-#define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \
-       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m)
-
-static const enum ethtool_link_mode_bit_indices
-mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = {
-       ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
-       ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
-};
-
-#define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \
-       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii)
-
-static const enum ethtool_link_mode_bit_indices
-mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = {
-       ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
-};
-
-#define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \
-       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii)
-
-static const enum ethtool_link_mode_bit_indices
-mlxsw_sp2_mask_ethtool_5gbase_r[] = {
-       ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
-};
-
-#define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \
-       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r)
-
-static const enum ethtool_link_mode_bit_indices
-mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = {
-       ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
-       ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
-       ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
-       ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
-       ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
-       ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
-       ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
-};
-
-#define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \
-       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g)
-
-static const enum ethtool_link_mode_bit_indices
-mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = {
-       ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
-       ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
-       ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
-       ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
-};
-
-#define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \
-       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g)
-
-static const enum ethtool_link_mode_bit_indices
-mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = {
-       ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
-       ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
-       ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
-};
-
-#define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \
-       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr)
-
-static const enum ethtool_link_mode_bit_indices
-mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = {
-       ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
-       ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
-       ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
-};
-
-#define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \
-       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2)
-
-static const enum ethtool_link_mode_bit_indices
-mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = {
-       ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
-       ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
-       ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
-       ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
-       ETHTOOL_LINK_MODE_50000baseDR_Full_BIT,
-};
-
-#define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \
-       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr)
-
-static const enum ethtool_link_mode_bit_indices
-mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = {
-       ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
-       ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
-       ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
-       ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
-};
-
-#define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \
-       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4)
-
-static const enum ethtool_link_mode_bit_indices
-mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = {
-       ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
-       ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
-       ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
-       ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
-       ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT,
-};
-
-#define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \
-       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2)
-
-static const enum ethtool_link_mode_bit_indices
-mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = {
-       ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
-       ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
-       ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
-       ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT,
-       ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
-};
-
-#define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \
-       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4)
-
-static const enum ethtool_link_mode_bit_indices
-mlxsw_sp2_mask_ethtool_400gaui_8[] = {
-       ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
-       ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
-       ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
-       ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT,
-       ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT,
-};
-
-#define MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN \
-       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_8)
-
-#define MLXSW_SP_PORT_MASK_WIDTH_1X    BIT(0)
-#define MLXSW_SP_PORT_MASK_WIDTH_2X    BIT(1)
-#define MLXSW_SP_PORT_MASK_WIDTH_4X    BIT(2)
-#define MLXSW_SP_PORT_MASK_WIDTH_8X    BIT(3)
-
-static u8 mlxsw_sp_port_mask_width_get(u8 width)
-{
-       switch (width) {
-       case 1:
-               return MLXSW_SP_PORT_MASK_WIDTH_1X;
-       case 2:
-               return MLXSW_SP_PORT_MASK_WIDTH_2X;
-       case 4:
-               return MLXSW_SP_PORT_MASK_WIDTH_4X;
-       case 8:
-               return MLXSW_SP_PORT_MASK_WIDTH_8X;
-       default:
-               WARN_ON_ONCE(1);
-               return 0;
-       }
-}
-
-struct mlxsw_sp2_port_link_mode {
-       const enum ethtool_link_mode_bit_indices *mask_ethtool;
-       int m_ethtool_len;
-       u32 mask;
-       u32 speed;
-       u8 mask_width;
-};
-
-static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
-       {
-               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M,
-               .mask_ethtool   = mlxsw_sp2_mask_ethtool_sgmii_100m,
-               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN,
-               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_1X |
-                                 MLXSW_SP_PORT_MASK_WIDTH_2X |
-                                 MLXSW_SP_PORT_MASK_WIDTH_4X |
-                                 MLXSW_SP_PORT_MASK_WIDTH_8X,
-               .speed          = SPEED_100,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII,
-               .mask_ethtool   = mlxsw_sp2_mask_ethtool_1000base_x_sgmii,
-               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN,
-               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_1X |
-                                 MLXSW_SP_PORT_MASK_WIDTH_2X |
-                                 MLXSW_SP_PORT_MASK_WIDTH_4X |
-                                 MLXSW_SP_PORT_MASK_WIDTH_8X,
-               .speed          = SPEED_1000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII,
-               .mask_ethtool   = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii,
-               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN,
-               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_1X |
-                                 MLXSW_SP_PORT_MASK_WIDTH_2X |
-                                 MLXSW_SP_PORT_MASK_WIDTH_4X |
-                                 MLXSW_SP_PORT_MASK_WIDTH_8X,
-               .speed          = SPEED_2500,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R,
-               .mask_ethtool   = mlxsw_sp2_mask_ethtool_5gbase_r,
-               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN,
-               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_1X |
-                                 MLXSW_SP_PORT_MASK_WIDTH_2X |
-                                 MLXSW_SP_PORT_MASK_WIDTH_4X |
-                                 MLXSW_SP_PORT_MASK_WIDTH_8X,
-               .speed          = SPEED_5000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G,
-               .mask_ethtool   = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g,
-               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN,
-               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_1X |
-                                 MLXSW_SP_PORT_MASK_WIDTH_2X |
-                                 MLXSW_SP_PORT_MASK_WIDTH_4X |
-                                 MLXSW_SP_PORT_MASK_WIDTH_8X,
-               .speed          = SPEED_10000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G,
-               .mask_ethtool   = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g,
-               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN,
-               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_4X |
-                                 MLXSW_SP_PORT_MASK_WIDTH_8X,
-               .speed          = SPEED_40000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR,
-               .mask_ethtool   = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr,
-               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN,
-               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_1X |
-                                 MLXSW_SP_PORT_MASK_WIDTH_2X |
-                                 MLXSW_SP_PORT_MASK_WIDTH_4X |
-                                 MLXSW_SP_PORT_MASK_WIDTH_8X,
-               .speed          = SPEED_25000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2,
-               .mask_ethtool   = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2,
-               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN,
-               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_2X |
-                                 MLXSW_SP_PORT_MASK_WIDTH_4X |
-                                 MLXSW_SP_PORT_MASK_WIDTH_8X,
-               .speed          = SPEED_50000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR,
-               .mask_ethtool   = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr,
-               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN,
-               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_1X,
-               .speed          = SPEED_50000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4,
-               .mask_ethtool   = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4,
-               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN,
-               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_4X |
-                                 MLXSW_SP_PORT_MASK_WIDTH_8X,
-               .speed          = SPEED_100000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2,
-               .mask_ethtool   = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2,
-               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN,
-               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_2X,
-               .speed          = SPEED_100000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4,
-               .mask_ethtool   = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4,
-               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN,
-               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_4X |
-                                 MLXSW_SP_PORT_MASK_WIDTH_8X,
-               .speed          = SPEED_200000,
-       },
-       {
-               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8,
-               .mask_ethtool   = mlxsw_sp2_mask_ethtool_400gaui_8,
-               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN,
-               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_8X,
-               .speed          = SPEED_400000,
-       },
-};
-
-#define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode)
-
-static void
-mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp,
-                                  u32 ptys_eth_proto,
-                                  struct ethtool_link_ksettings *cmd)
-{
-       ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
-       ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
-}
-
-static void
-mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode,
-                         unsigned long *mode)
-{
-       int i;
+               return 0;
+       mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
 
-       for (i = 0; i < link_mode->m_ethtool_len; i++)
-               __set_bit(link_mode->mask_ethtool[i], mode);
+       return 0;
 }
 
-static void
-mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
-                        u8 width, unsigned long *mode)
+static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
+                                  struct flow_block_offload *f)
 {
-       u8 mask_width = mlxsw_sp_port_mask_width_get(width);
-       int i;
-
-       for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
-               if ((ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) &&
-                   (mask_width & mlxsw_sp2_port_link_mode[i].mask_width))
-                       mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i],
-                                                 mode);
+       switch (f->binder_type) {
+       case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
+               return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true);
+       case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
+               return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
+       case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
+               return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
+       default:
+               return -EOPNOTSUPP;
        }
 }
 
-static u32
-mlxsw_sp2_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto)
+static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
+                            void *type_data)
 {
-       int i;
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 
-       for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
-               if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask)
-                       return mlxsw_sp2_port_link_mode[i].speed;
+       switch (type) {
+       case TC_SETUP_BLOCK:
+               return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
+       case TC_SETUP_QDISC_RED:
+               return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
+       case TC_SETUP_QDISC_PRIO:
+               return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
+       case TC_SETUP_QDISC_ETS:
+               return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
+       case TC_SETUP_QDISC_TBF:
+               return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
+       case TC_SETUP_QDISC_FIFO:
+               return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
+       default:
+               return -EOPNOTSUPP;
        }
-
-       return SPEED_UNKNOWN;
-}
-
-static void
-mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
-                                u32 ptys_eth_proto,
-                                struct ethtool_link_ksettings *cmd)
-{
-       cmd->base.speed = SPEED_UNKNOWN;
-       cmd->base.duplex = DUPLEX_UNKNOWN;
-
-       if (!carrier_ok)
-               return;
-
-       cmd->base.speed = mlxsw_sp2_from_ptys_speed(mlxsw_sp, ptys_eth_proto);
-       if (cmd->base.speed != SPEED_UNKNOWN)
-               cmd->base.duplex = DUPLEX_FULL;
 }
 
-static bool
-mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode,
-                          const unsigned long *mode)
+static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
 {
-       int cnt = 0;
-       int i;
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 
-       for (i = 0; i < link_mode->m_ethtool_len; i++) {
-               if (test_bit(link_mode->mask_ethtool[i], mode))
-                       cnt++;
+       if (!enable) {
+               if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
+                   mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
+                       netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
+                       return -EINVAL;
+               }
+               mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
+               mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
+       } else {
+               mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
+               mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
        }
-
-       return cnt == link_mode->m_ethtool_len;
+       return 0;
 }
 
-static u32
-mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width,
-                             const struct ethtool_link_ksettings *cmd)
+static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
 {
-       u8 mask_width = mlxsw_sp_port_mask_width_get(width);
-       u32 ptys_proto = 0;
-       int i;
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       char pplr_pl[MLXSW_REG_PPLR_LEN];
+       int err;
 
-       for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
-               if ((mask_width & mlxsw_sp2_port_link_mode[i].mask_width) &&
-                   mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i],
-                                              cmd->link_modes.advertising))
-                       ptys_proto |= mlxsw_sp2_port_link_mode[i].mask;
-       }
-       return ptys_proto;
-}
+       if (netif_running(dev))
+               mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
 
-static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp,
-                                  u8 width, u32 speed)
-{
-       u8 mask_width = mlxsw_sp_port_mask_width_get(width);
-       u32 ptys_proto = 0;
-       int i;
+       mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
+       err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
+                             pplr_pl);
 
-       for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
-               if ((speed == mlxsw_sp2_port_link_mode[i].speed) &&
-                   (mask_width & mlxsw_sp2_port_link_mode[i].mask_width))
-                       ptys_proto |= mlxsw_sp2_port_link_mode[i].mask;
-       }
-       return ptys_proto;
-}
+       if (netif_running(dev))
+               mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
 
-static void
-mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload,
-                           u8 local_port, u32 proto_admin,
-                           bool autoneg)
-{
-       mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg);
+       return err;
 }
 
-static void
-mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload,
-                             u32 *p_eth_proto_cap, u32 *p_eth_proto_admin,
-                             u32 *p_eth_proto_oper)
-{
-       mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap,
-                                     p_eth_proto_admin, p_eth_proto_oper);
-}
-
-static const struct mlxsw_sp_port_type_speed_ops
-mlxsw_sp2_port_type_speed_ops = {
-       .from_ptys_supported_port       = mlxsw_sp2_from_ptys_supported_port,
-       .from_ptys_link                 = mlxsw_sp2_from_ptys_link,
-       .from_ptys_speed                = mlxsw_sp2_from_ptys_speed,
-       .from_ptys_speed_duplex         = mlxsw_sp2_from_ptys_speed_duplex,
-       .to_ptys_advert_link            = mlxsw_sp2_to_ptys_advert_link,
-       .to_ptys_speed                  = mlxsw_sp2_to_ptys_speed,
-       .reg_ptys_eth_pack              = mlxsw_sp2_reg_ptys_eth_pack,
-       .reg_ptys_eth_unpack            = mlxsw_sp2_reg_ptys_eth_unpack,
-};
+typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
 
-static void
-mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap,
-                                u8 width, struct ethtool_link_ksettings *cmd)
+static int mlxsw_sp_handle_feature(struct net_device *dev,
+                                  netdev_features_t wanted_features,
+                                  netdev_features_t feature,
+                                  mlxsw_sp_feature_handler feature_handler)
 {
-       const struct mlxsw_sp_port_type_speed_ops *ops;
+       netdev_features_t changes = wanted_features ^ dev->features;
+       bool enable = !!(wanted_features & feature);
+       int err;
 
-       ops = mlxsw_sp->port_type_speed_ops;
+       if (!(changes & feature))
+               return 0;
 
-       ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
-       ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
-       ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+       err = feature_handler(dev, enable);
+       if (err) {
+               netdev_err(dev, "%s feature %pNF failed, err %d\n",
+                          enable ? "Enable" : "Disable", &feature, err);
+               return err;
+       }
 
-       ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd);
-       ops->from_ptys_link(mlxsw_sp, eth_proto_cap, width,
-                           cmd->link_modes.supported);
-}
+       if (enable)
+               dev->features |= feature;
+       else
+               dev->features &= ~feature;
 
-static void
-mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp,
-                                u32 eth_proto_admin, bool autoneg, u8 width,
-                                struct ethtool_link_ksettings *cmd)
+       return 0;
+}
+static int mlxsw_sp_set_features(struct net_device *dev,
+                                netdev_features_t features)
 {
-       const struct mlxsw_sp_port_type_speed_ops *ops;
-
-       ops = mlxsw_sp->port_type_speed_ops;
+       netdev_features_t oper_features = dev->features;
+       int err = 0;
 
-       if (!autoneg)
-               return;
+       err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
+                                      mlxsw_sp_feature_hw_tc);
+       err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
+                                      mlxsw_sp_feature_loopback);
 
-       ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
-       ops->from_ptys_link(mlxsw_sp, eth_proto_admin, width,
-                           cmd->link_modes.advertising);
-}
-
-static u8
-mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type)
-{
-       switch (connector_type) {
-       case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR:
-               return PORT_OTHER;
-       case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE:
-               return PORT_NONE;
-       case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP:
-               return PORT_TP;
-       case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI:
-               return PORT_AUI;
-       case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC:
-               return PORT_BNC;
-       case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII:
-               return PORT_MII;
-       case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE:
-               return PORT_FIBRE;
-       case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA:
-               return PORT_DA;
-       case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER:
-               return PORT_OTHER;
-       default:
-               WARN_ON_ONCE(1);
-               return PORT_OTHER;
+       if (err) {
+               dev->features = oper_features;
+               return -EINVAL;
        }
+
+       return 0;
 }
 
-static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
-                                           struct ethtool_link_ksettings *cmd)
+static struct devlink_port *
+mlxsw_sp_port_get_devlink_port(struct net_device *dev)
 {
-       u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-       const struct mlxsw_sp_port_type_speed_ops *ops;
-       char ptys_pl[MLXSW_REG_PTYS_LEN];
-       u8 connector_type;
-       bool autoneg;
+
+       return mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
+                                               mlxsw_sp_port->local_port);
+}
+
+static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                                     struct ifreq *ifr)
+{
+       struct hwtstamp_config config;
        int err;
 
-       ops = mlxsw_sp->port_type_speed_ops;
+       if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+               return -EFAULT;
 
-       autoneg = mlxsw_sp_port->link.autoneg;
-       ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
-                              0, false);
-       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+       err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
+                                                            &config);
        if (err)
                return err;
-       ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
-                                &eth_proto_admin, &eth_proto_oper);
 
-       mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap,
-                                        mlxsw_sp_port->mapping.width, cmd);
-
-       mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg,
-                                        mlxsw_sp_port->mapping.width, cmd);
-
-       cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
-       connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl);
-       cmd->base.port = mlxsw_sp_port_connector_port(connector_type);
-       ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev),
-                                   eth_proto_oper, cmd);
+       if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+               return -EFAULT;
 
        return 0;
 }
 
-static int
-mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
-                                const struct ethtool_link_ksettings *cmd)
+static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
+                                     struct ifreq *ifr)
 {
-       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-       const struct mlxsw_sp_port_type_speed_ops *ops;
-       char ptys_pl[MLXSW_REG_PTYS_LEN];
-       u32 eth_proto_cap, eth_proto_new;
-       bool autoneg;
+       struct hwtstamp_config config;
        int err;
 
-       ops = mlxsw_sp->port_type_speed_ops;
-
-       ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
-                              0, false);
-       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
-       if (err)
-               return err;
-       ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap, NULL, NULL);
-
-       autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
-       eth_proto_new = autoneg ?
-               ops->to_ptys_advert_link(mlxsw_sp, mlxsw_sp_port->mapping.width,
-                                        cmd) :
-               ops->to_ptys_speed(mlxsw_sp, mlxsw_sp_port->mapping.width,
-                                  cmd->base.speed);
-
-       eth_proto_new = eth_proto_new & eth_proto_cap;
-       if (!eth_proto_new) {
-               netdev_err(dev, "No supported speed requested\n");
-               return -EINVAL;
-       }
-
-       ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
-                              eth_proto_new, autoneg);
-       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+       err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
+                                                            &config);
        if (err)
                return err;
 
-       mlxsw_sp_port->link.autoneg = autoneg;
-
-       if (!netif_running(dev))
-               return 0;
-
-       mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
-       mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+       if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+               return -EFAULT;
 
        return 0;
 }
 
-static int mlxsw_sp_get_module_info(struct net_device *netdev,
-                                   struct ethtool_modinfo *modinfo)
+static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
 {
-       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-       int err;
-
-       err = mlxsw_env_get_module_info(mlxsw_sp->core,
-                                       mlxsw_sp_port->mapping.module,
-                                       modinfo);
+       struct hwtstamp_config config = {0};
 
-       return err;
+       mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
 }
 
-static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
-                                     struct ethtool_eeprom *ee,
-                                     u8 *data)
+static int
+mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
-       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-       int err;
-
-       err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core,
-                                         mlxsw_sp_port->mapping.module, ee,
-                                         data);
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 
-       return err;
+       switch (cmd) {
+       case SIOCSHWTSTAMP:
+               return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
+       case SIOCGHWTSTAMP:
+               return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
+       default:
+               return -EOPNOTSUPP;
+       }
 }
 
-static int
-mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info)
-{
-       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-
-       return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info);
-}
-
-static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
-       .get_drvinfo            = mlxsw_sp_port_get_drvinfo,
-       .get_link               = ethtool_op_get_link,
-       .get_pauseparam         = mlxsw_sp_port_get_pauseparam,
-       .set_pauseparam         = mlxsw_sp_port_set_pauseparam,
-       .get_strings            = mlxsw_sp_port_get_strings,
-       .set_phys_id            = mlxsw_sp_port_set_phys_id,
-       .get_ethtool_stats      = mlxsw_sp_port_get_stats,
-       .get_sset_count         = mlxsw_sp_port_get_sset_count,
-       .get_link_ksettings     = mlxsw_sp_port_get_link_ksettings,
-       .set_link_ksettings     = mlxsw_sp_port_set_link_ksettings,
-       .get_module_info        = mlxsw_sp_get_module_info,
-       .get_module_eeprom      = mlxsw_sp_get_module_eeprom,
-       .get_ts_info            = mlxsw_sp_get_ts_info,
+static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
+       .ndo_open               = mlxsw_sp_port_open,
+       .ndo_stop               = mlxsw_sp_port_stop,
+       .ndo_start_xmit         = mlxsw_sp_port_xmit,
+       .ndo_setup_tc           = mlxsw_sp_setup_tc,
+       .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
+       .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
+       .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
+       .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
+       .ndo_has_offload_stats  = mlxsw_sp_port_has_offload_stats,
+       .ndo_get_offload_stats  = mlxsw_sp_port_get_offload_stats,
+       .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
+       .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
+       .ndo_set_features       = mlxsw_sp_set_features,
+       .ndo_get_devlink_port   = mlxsw_sp_port_get_devlink_port,
+       .ndo_do_ioctl           = mlxsw_sp_port_ioctl,
 };
 
 static int
@@ -3244,12 +1744,16 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
        struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
        bool split = !!split_base_local_port;
        struct mlxsw_sp_port *mlxsw_sp_port;
+       u32 lanes = port_mapping->width;
        struct net_device *dev;
+       bool splittable;
        int err;
 
+       splittable = lanes > 1 && !split;
        err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
                                   port_mapping->module + 1, split,
-                                  port_mapping->lane / port_mapping->width,
+                                  port_mapping->lane / lanes,
+                                  splittable, lanes,
                                   mlxsw_sp->base_mac,
                                   sizeof(mlxsw_sp->base_mac));
        if (err) {
@@ -3743,13 +2247,6 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
                return -EINVAL;
        }
 
-       /* Split ports cannot be split. */
-       if (mlxsw_sp_port->split) {
-               netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
-               NL_SET_ERR_MSG_MOD(extack, "Port cannot be split further");
-               return -EINVAL;
-       }
-
        max_width = mlxsw_core_module_max_width(mlxsw_core,
                                                mlxsw_sp_port->mapping.module);
        if (max_width < 0) {
@@ -3758,19 +2255,13 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
                return max_width;
        }
 
-       /* Split port with non-max and 1 module width cannot be split. */
-       if (mlxsw_sp_port->mapping.width != max_width || max_width == 1) {
+       /* Split port with non-max cannot be split. */
+       if (mlxsw_sp_port->mapping.width != max_width) {
                netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n");
                NL_SET_ERR_MSG_MOD(extack, "Port cannot be split");
                return -EINVAL;
        }
 
-       if (count == 1 || !is_power_of_2(count) || count > max_width) {
-               netdev_err(mlxsw_sp_port->dev, "Invalid split count\n");
-               NL_SET_ERR_MSG_MOD(extack, "Invalid split count");
-               return -EINVAL;
-       }
-
        offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
        if (offset < 0) {
                netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
@@ -4332,52 +2823,6 @@ static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
        .get_stats      = mlxsw_sp2_get_stats,
 };
 
-static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed)
-{
-       return mtu * 5 / 2;
-}
-
-static const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = {
-       .buffsize_get = mlxsw_sp1_span_buffsize_get,
-};
-
-#define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
-#define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50
-
-static u32 __mlxsw_sp_span_buffsize_get(int mtu, u32 speed, u32 buffer_factor)
-{
-       return 3 * mtu + buffer_factor * speed / 1000;
-}
-
-static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed)
-{
-       int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR;
-
-       return __mlxsw_sp_span_buffsize_get(mtu, speed, factor);
-}
-
-static const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = {
-       .buffsize_get = mlxsw_sp2_span_buffsize_get,
-};
-
-static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed)
-{
-       int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR;
-
-       return __mlxsw_sp_span_buffsize_get(mtu, speed, factor);
-}
-
-static const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = {
-       .buffsize_get = mlxsw_sp3_span_buffsize_get,
-};
-
-u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed)
-{
-       u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu);
-
-       return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1;
-}
-
 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
                                    unsigned long event, void *ptr);
 
@@ -4415,6 +2860,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
                goto err_fids_init;
        }
 
+       err = mlxsw_sp_policers_init(mlxsw_sp);
+       if (err) {
+               dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n");
+               goto err_policers_init;
+       }
+
        err = mlxsw_sp_traps_init(mlxsw_sp);
        if (err) {
                dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
@@ -4574,6 +3025,8 @@ err_buffers_init:
 err_devlink_traps_init:
        mlxsw_sp_traps_fini(mlxsw_sp);
 err_traps_init:
+       mlxsw_sp_policers_fini(mlxsw_sp);
+err_policers_init:
        mlxsw_sp_fids_fini(mlxsw_sp);
 err_fids_init:
        mlxsw_sp_kvdl_fini(mlxsw_sp);
@@ -4592,6 +3045,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
        mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
        mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
        mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
+       mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
        mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
        mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
        mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
@@ -4600,6 +3054,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
        mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
        mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
        mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
+       mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
        mlxsw_sp->listeners = mlxsw_sp1_listener;
        mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
        mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
@@ -4619,6 +3074,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
        mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
        mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
        mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
+       mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
        mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
        mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
        mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
@@ -4627,6 +3083,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
        mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
        mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
        mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
+       mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
        mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
 
        return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
@@ -4638,10 +3095,13 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
 
+       mlxsw_sp->req_rev = &mlxsw_sp3_fw_rev;
+       mlxsw_sp->fw_filename = MLXSW_SP3_FW_FILENAME;
        mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
        mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
        mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
        mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
+       mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
        mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
        mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
        mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
@@ -4650,6 +3110,7 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
        mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
        mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
        mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
+       mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
        mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
 
        return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
@@ -4679,6 +3140,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
        mlxsw_sp_buffers_fini(mlxsw_sp);
        mlxsw_sp_devlink_traps_fini(mlxsw_sp);
        mlxsw_sp_traps_fini(mlxsw_sp);
+       mlxsw_sp_policers_fini(mlxsw_sp);
        mlxsw_sp_fids_fini(mlxsw_sp);
        mlxsw_sp_kvdl_fini(mlxsw_sp);
 }
@@ -4890,6 +3352,10 @@ static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
        if (err)
                goto err_resources_counter_register;
 
+       err = mlxsw_sp_policer_resources_register(mlxsw_core);
+       if (err)
+               goto err_resources_counter_register;
+
        return 0;
 
 err_resources_counter_register:
@@ -4914,6 +3380,10 @@ static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
        if (err)
                goto err_resources_counter_register;
 
+       err = mlxsw_sp_policer_resources_register(mlxsw_core);
+       if (err)
+               goto err_resources_counter_register;
+
        return 0;
 
 err_resources_counter_register:
@@ -6328,3 +4798,4 @@ MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
+MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);
index 6f96ca5..6ab1b6d 100644 (file)
@@ -62,6 +62,8 @@ enum mlxsw_sp_resource_id {
        MLXSW_SP_RESOURCE_COUNTERS,
        MLXSW_SP_RESOURCE_COUNTERS_FLOW,
        MLXSW_SP_RESOURCE_COUNTERS_RIF,
+       MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
+       MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
 };
 
 struct mlxsw_sp_port;
@@ -120,6 +122,7 @@ struct mlxsw_sp_kvdl;
 struct mlxsw_sp_nve;
 struct mlxsw_sp_kvdl_ops;
 struct mlxsw_sp_mr_tcam_ops;
+struct mlxsw_sp_acl_rulei_ops;
 struct mlxsw_sp_acl_tcam_ops;
 struct mlxsw_sp_nve_ops;
 struct mlxsw_sp_sb_vals;
@@ -150,6 +153,7 @@ struct mlxsw_sp {
        struct mlxsw_afa *afa;
        struct mlxsw_sp_acl *acl;
        struct mlxsw_sp_fid_core *fid_core;
+       struct mlxsw_sp_policer_core *policer_core;
        struct mlxsw_sp_kvdl *kvdl;
        struct mlxsw_sp_nve *nve;
        struct notifier_block netdevice_nb;
@@ -164,6 +168,7 @@ struct mlxsw_sp {
        const struct mlxsw_afa_ops *afa_ops;
        const struct mlxsw_afk_ops *afk_ops;
        const struct mlxsw_sp_mr_tcam_ops *mr_tcam_ops;
+       const struct mlxsw_sp_acl_rulei_ops *acl_rulei_ops;
        const struct mlxsw_sp_acl_tcam_ops *acl_tcam_ops;
        const struct mlxsw_sp_nve_ops **nve_ops_arr;
        const struct mlxsw_sp_rif_ops **rif_ops_arr;
@@ -171,11 +176,45 @@ struct mlxsw_sp {
        const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
        const struct mlxsw_sp_ptp_ops *ptp_ops;
        const struct mlxsw_sp_span_ops *span_ops;
+       const struct mlxsw_sp_policer_core_ops *policer_core_ops;
        const struct mlxsw_listener *listeners;
        size_t listeners_count;
        u32 lowest_shaper_bs;
 };
 
+struct mlxsw_sp_ptp_ops {
+       struct mlxsw_sp_ptp_clock *
+               (*clock_init)(struct mlxsw_sp *mlxsw_sp, struct device *dev);
+       void (*clock_fini)(struct mlxsw_sp_ptp_clock *clock);
+
+       struct mlxsw_sp_ptp_state *(*init)(struct mlxsw_sp *mlxsw_sp);
+       void (*fini)(struct mlxsw_sp_ptp_state *ptp_state);
+
+       /* Notify a driver that a packet that might be PTP was received. Driver
+        * is responsible for freeing the passed-in SKB.
+        */
+       void (*receive)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+                       u8 local_port);
+
+       /* Notify a driver that a timestamped packet was transmitted. Driver
+        * is responsible for freeing the passed-in SKB.
+        */
+       void (*transmitted)(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
+                           u8 local_port);
+
+       int (*hwtstamp_get)(struct mlxsw_sp_port *mlxsw_sp_port,
+                           struct hwtstamp_config *config);
+       int (*hwtstamp_set)(struct mlxsw_sp_port *mlxsw_sp_port,
+                           struct hwtstamp_config *config);
+       void (*shaper_work)(struct work_struct *work);
+       int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp,
+                          struct ethtool_ts_info *info);
+       int (*get_stats_count)(void);
+       void (*get_stats_strings)(u8 **p);
+       void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
+                         u64 *data, int data_index);
+};
+
 static inline struct mlxsw_sp_upper *
 mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
 {
@@ -374,12 +413,30 @@ mlxsw_sp_port_vlan_find_by_vid(const struct mlxsw_sp_port *mlxsw_sp_port,
        return NULL;
 }
 
+static inline u32
+mlxsw_sp_port_headroom_8x_adjust(const struct mlxsw_sp_port *mlxsw_sp_port,
+                                u32 size_cells)
+{
+       /* Ports with eight lanes use two headroom buffers between which the
+        * configured headroom size is split. Therefore, multiply the calculated
+        * headroom size by two.
+        */
+       return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells;
+}
+
 enum mlxsw_sp_flood_type {
        MLXSW_SP_FLOOD_TYPE_UC,
        MLXSW_SP_FLOOD_TYPE_BC,
        MLXSW_SP_FLOOD_TYPE_MC,
 };
 
+int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                              int mtu, bool pause_en);
+int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
+                               int prio, char *ppcnt_pl);
+int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                                  bool is_up);
+
 /* spectrum_buffers.c */
 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp);
@@ -486,7 +543,6 @@ int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
                                unsigned int *p_counter_index);
 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
                                unsigned int counter_index);
-u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed);
 bool mlxsw_sp_port_dev_check(const struct net_device *dev);
 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev);
 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
@@ -633,8 +689,10 @@ struct mlxsw_sp_acl_rule_info {
        u8 action_created:1,
           ingress_bind_blocker:1,
           egress_bind_blocker:1,
-          counter_valid:1;
+          counter_valid:1,
+          policer_index_valid:1;
        unsigned int counter_index;
+       u16 policer_index;
 };
 
 /* spectrum_flow.c */
@@ -658,7 +716,6 @@ struct mlxsw_sp_flow_block {
 
 struct mlxsw_sp_flow_block_binding {
        struct list_head list;
-       struct net_device *dev;
        struct mlxsw_sp_port *mlxsw_sp_port;
        bool ingress;
 };
@@ -716,8 +773,9 @@ mlxsw_sp_flow_block_is_mixed_bound(const struct mlxsw_sp_flow_block *block)
 struct mlxsw_sp_flow_block *mlxsw_sp_flow_block_create(struct mlxsw_sp *mlxsw_sp,
                                                       struct net *net);
 void mlxsw_sp_flow_block_destroy(struct mlxsw_sp_flow_block *block);
-int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
-                           struct flow_block_offload *f);
+int mlxsw_sp_setup_tc_block_clsact(struct mlxsw_sp_port *mlxsw_sp_port,
+                                  struct flow_block_offload *f,
+                                  bool ingress);
 
 /* spectrum_acl.c */
 struct mlxsw_sp_acl_ruleset;
@@ -795,6 +853,10 @@ int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp *mlxsw_sp,
                                  enum flow_action_mangle_base htype,
                                  u32 offset, u32 mask, u32 val,
                                  struct netlink_ext_ack *extack);
+int mlxsw_sp_acl_rulei_act_police(struct mlxsw_sp *mlxsw_sp,
+                                 struct mlxsw_sp_acl_rule_info *rulei,
+                                 u32 index, u64 rate_bytes_ps,
+                                 u32 burst, struct netlink_ext_ack *extack);
 int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
                                 struct mlxsw_sp_acl_rule_info *rulei,
                                 struct netlink_ext_ack *extack);
@@ -827,7 +889,8 @@ struct mlxsw_sp_acl_rule_info *
 mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule);
 int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
                                struct mlxsw_sp_acl_rule *rule,
-                               u64 *packets, u64 *bytes, u64 *last_use,
+                               u64 *packets, u64 *bytes, u64 *drops,
+                               u64 *last_use,
                                enum flow_action_hw_stats *used_hw_stats);
 
 struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp);
@@ -843,6 +906,17 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
 u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp);
 int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val);
 
+struct mlxsw_sp_acl_mangle_action;
+
+struct mlxsw_sp_acl_rulei_ops {
+       int (*act_mangle_field)(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei,
+                               struct mlxsw_sp_acl_mangle_action *mact, u32 val,
+                               struct netlink_ext_ack *extack);
+};
+
+extern struct mlxsw_sp_acl_rulei_ops mlxsw_sp1_acl_rulei_ops;
+extern struct mlxsw_sp_acl_rulei_ops mlxsw_sp2_acl_rulei_ops;
+
 /* spectrum_acl_tcam.c */
 struct mlxsw_sp_acl_tcam;
 struct mlxsw_sp_acl_tcam_region;
@@ -898,6 +972,30 @@ extern const struct mlxsw_afk_ops mlxsw_sp1_afk_ops;
 extern const struct mlxsw_afk_ops mlxsw_sp2_afk_ops;
 
 /* spectrum_matchall.c */
+enum mlxsw_sp_mall_action_type {
+       MLXSW_SP_MALL_ACTION_TYPE_MIRROR,
+       MLXSW_SP_MALL_ACTION_TYPE_SAMPLE,
+       MLXSW_SP_MALL_ACTION_TYPE_TRAP,
+};
+
+struct mlxsw_sp_mall_mirror_entry {
+       const struct net_device *to_dev;
+       int span_id;
+};
+
+struct mlxsw_sp_mall_entry {
+       struct list_head list;
+       unsigned long cookie;
+       unsigned int priority;
+       enum mlxsw_sp_mall_action_type type;
+       bool ingress;
+       union {
+               struct mlxsw_sp_mall_mirror_entry mirror;
+               struct mlxsw_sp_port_sample sample;
+       };
+       struct rcu_head rcu;
+};
+
 int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp,
                          struct mlxsw_sp_flow_block *block,
                          struct tc_cls_matchall_offload *f);
@@ -944,6 +1042,8 @@ int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
                          struct tc_tbf_qopt_offload *p);
 int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
                           struct tc_fifo_qopt_offload *p);
+int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port,
+                                             struct flow_block_offload *f);
 
 /* spectrum_fid.c */
 bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
@@ -1102,4 +1202,40 @@ static inline struct net *mlxsw_sp_net(struct mlxsw_sp *mlxsw_sp)
        return mlxsw_core_net(mlxsw_sp->core);
 }
 
+/* spectrum_ethtool.c */
+extern const struct ethtool_ops mlxsw_sp_port_ethtool_ops;
+extern const struct mlxsw_sp_port_type_speed_ops mlxsw_sp1_port_type_speed_ops;
+extern const struct mlxsw_sp_port_type_speed_ops mlxsw_sp2_port_type_speed_ops;
+
+/* spectrum_policer.c */
+extern const struct mlxsw_sp_policer_core_ops mlxsw_sp1_policer_core_ops;
+extern const struct mlxsw_sp_policer_core_ops mlxsw_sp2_policer_core_ops;
+
+enum mlxsw_sp_policer_type {
+       MLXSW_SP_POLICER_TYPE_SINGLE_RATE,
+
+       __MLXSW_SP_POLICER_TYPE_MAX,
+       MLXSW_SP_POLICER_TYPE_MAX = __MLXSW_SP_POLICER_TYPE_MAX - 1,
+};
+
+struct mlxsw_sp_policer_params {
+       u64 rate;
+       u64 burst;
+       bool bytes;
+};
+
+int mlxsw_sp_policer_add(struct mlxsw_sp *mlxsw_sp,
+                        enum mlxsw_sp_policer_type type,
+                        const struct mlxsw_sp_policer_params *params,
+                        struct netlink_ext_ack *extack, u16 *p_policer_index);
+void mlxsw_sp_policer_del(struct mlxsw_sp *mlxsw_sp,
+                         enum mlxsw_sp_policer_type type,
+                         u16 policer_index);
+int mlxsw_sp_policer_drops_counter_get(struct mlxsw_sp *mlxsw_sp,
+                                      enum mlxsw_sp_policer_type type,
+                                      u16 policer_index, u64 *p_drops);
+int mlxsw_sp_policers_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_policers_fini(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_policer_resources_register(struct mlxsw_core *mlxsw_core);
+
 #endif
index 47da9ee..8cfa03a 100644 (file)
@@ -66,6 +66,7 @@ struct mlxsw_sp_acl_rule {
        u64 last_used;
        u64 last_packets;
        u64 last_bytes;
+       u64 last_drops;
        unsigned long priv[];
        /* priv has to be always the last item */
 };
@@ -508,6 +509,8 @@ enum mlxsw_sp_acl_mangle_field {
        MLXSW_SP_ACL_MANGLE_FIELD_IP_DSFIELD,
        MLXSW_SP_ACL_MANGLE_FIELD_IP_DSCP,
        MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN,
+       MLXSW_SP_ACL_MANGLE_FIELD_IP_SPORT,
+       MLXSW_SP_ACL_MANGLE_FIELD_IP_DPORT,
 };
 
 struct mlxsw_sp_acl_mangle_action {
@@ -538,13 +541,26 @@ struct mlxsw_sp_acl_mangle_action {
        MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_IP6,       \
                                   _offset, _mask, _shift, _field)
 
+#define MLXSW_SP_ACL_MANGLE_ACTION_TCP(_offset, _mask, _shift, _field) \
+       MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_TCP, _offset, _mask, _shift, _field)
+
+#define MLXSW_SP_ACL_MANGLE_ACTION_UDP(_offset, _mask, _shift, _field) \
+       MLXSW_SP_ACL_MANGLE_ACTION(FLOW_ACT_MANGLE_HDR_TYPE_UDP, _offset, _mask, _shift, _field)
+
 static struct mlxsw_sp_acl_mangle_action mlxsw_sp_acl_mangle_actions[] = {
        MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff00ffff, 16, IP_DSFIELD),
        MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xff03ffff, 18, IP_DSCP),
        MLXSW_SP_ACL_MANGLE_ACTION_IP4(0, 0xfffcffff, 16, IP_ECN),
+
        MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf00fffff, 20, IP_DSFIELD),
        MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xf03fffff, 22, IP_DSCP),
        MLXSW_SP_ACL_MANGLE_ACTION_IP6(0, 0xffcfffff, 20, IP_ECN),
+
+       MLXSW_SP_ACL_MANGLE_ACTION_TCP(0, 0x0000ffff, 16, IP_SPORT),
+       MLXSW_SP_ACL_MANGLE_ACTION_TCP(0, 0xffff0000, 0,  IP_DPORT),
+
+       MLXSW_SP_ACL_MANGLE_ACTION_UDP(0, 0x0000ffff, 16, IP_SPORT),
+       MLXSW_SP_ACL_MANGLE_ACTION_UDP(0, 0xffff0000, 0,  IP_DPORT),
 };
 
 static int
@@ -563,11 +579,48 @@ mlxsw_sp_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
        case MLXSW_SP_ACL_MANGLE_FIELD_IP_ECN:
                return mlxsw_afa_block_append_qos_ecn(rulei->act_block,
                                                      val, extack);
+       default:
+               return -EOPNOTSUPP;
        }
+}
 
-       /* We shouldn't have gotten a match in the first place! */
-       WARN_ONCE(1, "Unhandled mangle field");
-       return -EINVAL;
+static int mlxsw_sp1_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
+                                               struct mlxsw_sp_acl_rule_info *rulei,
+                                               struct mlxsw_sp_acl_mangle_action *mact,
+                                               u32 val, struct netlink_ext_ack *extack)
+{
+       int err;
+
+       err = mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp, rulei, mact, val, extack);
+       if (err != -EOPNOTSUPP)
+               return err;
+
+       NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
+       return err;
+}
+
+static int mlxsw_sp2_acl_rulei_act_mangle_field(struct mlxsw_sp *mlxsw_sp,
+                                               struct mlxsw_sp_acl_rule_info *rulei,
+                                               struct mlxsw_sp_acl_mangle_action *mact,
+                                               u32 val, struct netlink_ext_ack *extack)
+{
+       int err;
+
+       err = mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp, rulei, mact, val, extack);
+       if (err != -EOPNOTSUPP)
+               return err;
+
+       switch (mact->field) {
+       case MLXSW_SP_ACL_MANGLE_FIELD_IP_SPORT:
+               return mlxsw_afa_block_append_l4port(rulei->act_block, false, val, extack);
+       case MLXSW_SP_ACL_MANGLE_FIELD_IP_DPORT:
+               return mlxsw_afa_block_append_l4port(rulei->act_block, true, val, extack);
+       default:
+               break;
+       }
+
+       NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
+       return err;
 }
 
 int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp *mlxsw_sp,
@@ -576,6 +629,7 @@ int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp *mlxsw_sp,
                                  u32 offset, u32 mask, u32 val,
                                  struct netlink_ext_ack *extack)
 {
+       const struct mlxsw_sp_acl_rulei_ops *acl_rulei_ops = mlxsw_sp->acl_rulei_ops;
        struct mlxsw_sp_acl_mangle_action *mact;
        size_t i;
 
@@ -585,16 +639,34 @@ int mlxsw_sp_acl_rulei_act_mangle(struct mlxsw_sp *mlxsw_sp,
                    mact->offset == offset &&
                    mact->mask == mask) {
                        val >>= mact->shift;
-                       return mlxsw_sp_acl_rulei_act_mangle_field(mlxsw_sp,
-                                                                  rulei, mact,
-                                                                  val, extack);
+                       return acl_rulei_ops->act_mangle_field(mlxsw_sp,
+                                                              rulei, mact,
+                                                              val, extack);
                }
        }
 
-       NL_SET_ERR_MSG_MOD(extack, "Unsupported mangle field");
+       NL_SET_ERR_MSG_MOD(extack, "Unknown mangle field");
        return -EINVAL;
 }
 
+int mlxsw_sp_acl_rulei_act_police(struct mlxsw_sp *mlxsw_sp,
+                                 struct mlxsw_sp_acl_rule_info *rulei,
+                                 u32 index, u64 rate_bytes_ps,
+                                 u32 burst, struct netlink_ext_ack *extack)
+{
+       int err;
+
+       err = mlxsw_afa_block_append_police(rulei->act_block, index,
+                                           rate_bytes_ps, burst,
+                                           &rulei->policer_index, extack);
+       if (err)
+               return err;
+
+       rulei->policer_index_valid = true;
+
+       return 0;
+}
+
 int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
                                 struct mlxsw_sp_acl_rule_info *rulei,
                                 struct netlink_ext_ack *extack)
@@ -815,13 +887,16 @@ static void mlxsw_sp_acl_rule_activity_update_work(struct work_struct *work)
 
 int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
                                struct mlxsw_sp_acl_rule *rule,
-                               u64 *packets, u64 *bytes, u64 *last_use,
+                               u64 *packets, u64 *bytes, u64 *drops,
+                               u64 *last_use,
                                enum flow_action_hw_stats *used_hw_stats)
 
 {
+       enum mlxsw_sp_policer_type type = MLXSW_SP_POLICER_TYPE_SINGLE_RATE;
        struct mlxsw_sp_acl_rule_info *rulei;
        u64 current_packets = 0;
        u64 current_bytes = 0;
+       u64 current_drops = 0;
        int err;
 
        rulei = mlxsw_sp_acl_rule_rulei(rule);
@@ -833,12 +908,21 @@ int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
                        return err;
                *used_hw_stats = FLOW_ACTION_HW_STATS_IMMEDIATE;
        }
+       if (rulei->policer_index_valid) {
+               err = mlxsw_sp_policer_drops_counter_get(mlxsw_sp, type,
+                                                        rulei->policer_index,
+                                                        &current_drops);
+               if (err)
+                       return err;
+       }
        *packets = current_packets - rule->last_packets;
        *bytes = current_bytes - rule->last_bytes;
+       *drops = current_drops - rule->last_drops;
        *last_use = rule->last_used;
 
        rule->last_bytes = current_bytes;
        rule->last_packets = current_packets;
+       rule->last_drops = current_drops;
 
        return 0;
 }
@@ -930,3 +1014,11 @@ int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val)
        return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(mlxsw_sp,
                                                           &acl->tcam, val);
 }
+
+struct mlxsw_sp_acl_rulei_ops mlxsw_sp1_acl_rulei_ops = {
+       .act_mangle_field = mlxsw_sp1_acl_rulei_act_mangle_field,
+};
+
+struct mlxsw_sp_acl_rulei_ops mlxsw_sp2_acl_rulei_ops = {
+       .act_mangle_field = mlxsw_sp2_acl_rulei_act_mangle_field,
+};
index 73d5601..90372d1 100644 (file)
@@ -136,11 +136,13 @@ mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port,
                        const struct net_device *out_dev,
                        bool ingress, int *p_span_id)
 {
+       struct mlxsw_sp_span_agent_parms agent_parms = {};
        struct mlxsw_sp_port *mlxsw_sp_port;
        struct mlxsw_sp *mlxsw_sp = priv;
        int err;
 
-       err = mlxsw_sp_span_agent_get(mlxsw_sp, out_dev, p_span_id);
+       agent_parms.to_dev = out_dev;
+       err = mlxsw_sp_span_agent_get(mlxsw_sp, p_span_id, &agent_parms);
        if (err)
                return err;
 
@@ -167,6 +169,29 @@ mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress)
        mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
 }
 
+static int mlxsw_sp_act_policer_add(void *priv, u64 rate_bytes_ps, u32 burst,
+                                   u16 *p_policer_index,
+                                   struct netlink_ext_ack *extack)
+{
+       struct mlxsw_sp_policer_params params;
+       struct mlxsw_sp *mlxsw_sp = priv;
+
+       params.rate = rate_bytes_ps;
+       params.burst = burst;
+       params.bytes = true;
+       return mlxsw_sp_policer_add(mlxsw_sp,
+                                   MLXSW_SP_POLICER_TYPE_SINGLE_RATE,
+                                   &params, extack, p_policer_index);
+}
+
+static void mlxsw_sp_act_policer_del(void *priv, u16 policer_index)
+{
+       struct mlxsw_sp *mlxsw_sp = priv;
+
+       mlxsw_sp_policer_del(mlxsw_sp, MLXSW_SP_POLICER_TYPE_SINGLE_RATE,
+                            policer_index);
+}
+
 const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = {
        .kvdl_set_add           = mlxsw_sp1_act_kvdl_set_add,
        .kvdl_set_del           = mlxsw_sp_act_kvdl_set_del,
@@ -177,6 +202,8 @@ const struct mlxsw_afa_ops mlxsw_sp1_act_afa_ops = {
        .counter_index_put      = mlxsw_sp_act_counter_index_put,
        .mirror_add             = mlxsw_sp_act_mirror_add,
        .mirror_del             = mlxsw_sp_act_mirror_del,
+       .policer_add            = mlxsw_sp_act_policer_add,
+       .policer_del            = mlxsw_sp_act_policer_del,
 };
 
 const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops = {
@@ -189,6 +216,8 @@ const struct mlxsw_afa_ops mlxsw_sp2_act_afa_ops = {
        .counter_index_put      = mlxsw_sp_act_counter_index_put,
        .mirror_add             = mlxsw_sp_act_mirror_add,
        .mirror_del             = mlxsw_sp_act_mirror_del,
+       .policer_add            = mlxsw_sp_act_policer_add,
+       .policer_del            = mlxsw_sp_act_policer_del,
        .dummy_first_set        = true,
 };
 
index 21bfb2f..6f84557 100644 (file)
@@ -312,6 +312,7 @@ static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
 
                if (i == MLXSW_SP_PB_UNUSED)
                        continue;
+               size = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, size);
                mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
        }
        mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
index 49a72a8..0d3fb2e 100644 (file)
@@ -110,8 +110,8 @@ static int mlxsw_sp_port_pg_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
 }
 
-static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
-                                     struct ieee_ets *ets)
+static int mlxsw_sp_port_headroom_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                                         struct ieee_ets *ets)
 {
        bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
        struct ieee_ets *my_ets = mlxsw_sp_port->dcb.ets;
@@ -138,7 +138,7 @@ static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
        err = mlxsw_sp_port_pg_destroy(mlxsw_sp_port, my_ets->prio_tc,
                                       ets->prio_tc);
        if (err)
-               netdev_warn(dev, "Failed to remove ununsed PGs\n");
+               netdev_warn(dev, "Failed to remove unused PGs\n");
 
        return 0;
 
@@ -180,7 +180,7 @@ static int __mlxsw_sp_dcbnl_ieee_setets(struct mlxsw_sp_port *mlxsw_sp_port,
        }
 
        /* Ingress configuration. */
-       err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, ets);
+       err = mlxsw_sp_port_headroom_ets_set(mlxsw_sp_port, ets);
        if (err)
                goto err_port_headroom_set;
 
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
new file mode 100644 (file)
index 0000000..14c78f7
--- /dev/null
@@ -0,0 +1,1644 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2020 Mellanox Technologies. All rights reserved */
+
+#include "reg.h"
+#include "spectrum.h"
+#include "core_env.h"
+
+static const char mlxsw_sp_driver_version[] = "1.0";
+
+static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
+                                     struct ethtool_drvinfo *drvinfo)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+       strlcpy(drvinfo->driver, mlxsw_sp->bus_info->device_kind,
+               sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, mlxsw_sp_driver_version,
+               sizeof(drvinfo->version));
+       snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+                "%d.%d.%d",
+                mlxsw_sp->bus_info->fw_rev.major,
+                mlxsw_sp->bus_info->fw_rev.minor,
+                mlxsw_sp->bus_info->fw_rev.subminor);
+       strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
+               sizeof(drvinfo->bus_info));
+}
+
+struct mlxsw_sp_ethtool_link_ext_state_opcode_mapping {
+       u32 status_opcode;
+       enum ethtool_link_ext_state link_ext_state;
+       u8 link_ext_substate;
+};
+
+static const struct mlxsw_sp_ethtool_link_ext_state_opcode_mapping
+mlxsw_sp_link_ext_state_opcode_map[] = {
+       {2, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+               ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED},
+       {3, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+               ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED},
+       {4, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+               ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED},
+       {36, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+               ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE},
+       {38, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+               ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE},
+       {39, ETHTOOL_LINK_EXT_STATE_AUTONEG,
+               ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD},
+
+       {5, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+               ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED},
+       {6, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+               ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT},
+       {7, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+               ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY},
+       {8, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, 0},
+       {14, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+               ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT},
+
+       {9, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+               ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK},
+       {10, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+               ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK},
+       {11, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+               ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS},
+       {12, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+               ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED},
+       {13, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+               ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED},
+
+       {15, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, 0},
+       {17, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY,
+               ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS},
+       {42, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY,
+               ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE},
+
+       {1024, ETHTOOL_LINK_EXT_STATE_NO_CABLE, 0},
+
+       {16, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+               ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
+       {20, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+               ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
+       {29, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+               ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
+       {1025, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+               ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
+       {1029, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+               ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE},
+       {1031, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, 0},
+
+       {1027, ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE, 0},
+
+       {23, ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE, 0},
+
+       {1032, ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED, 0},
+
+       {1030, ETHTOOL_LINK_EXT_STATE_OVERHEAT, 0},
+};
+
+static void
+mlxsw_sp_port_set_link_ext_state(struct mlxsw_sp_ethtool_link_ext_state_opcode_mapping
+                                link_ext_state_mapping,
+                                struct ethtool_link_ext_state_info *link_ext_state_info)
+{
+       switch (link_ext_state_mapping.link_ext_state) {
+       case ETHTOOL_LINK_EXT_STATE_AUTONEG:
+               link_ext_state_info->autoneg =
+                       link_ext_state_mapping.link_ext_substate;
+               break;
+       case ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE:
+               link_ext_state_info->link_training =
+                       link_ext_state_mapping.link_ext_substate;
+               break;
+       case ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH:
+               link_ext_state_info->link_logical_mismatch =
+                       link_ext_state_mapping.link_ext_substate;
+               break;
+       case ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY:
+               link_ext_state_info->bad_signal_integrity =
+                       link_ext_state_mapping.link_ext_substate;
+               break;
+       case ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE:
+               link_ext_state_info->cable_issue =
+                       link_ext_state_mapping.link_ext_substate;
+               break;
+       default:
+               break;
+       }
+
+       link_ext_state_info->link_ext_state = link_ext_state_mapping.link_ext_state;
+}
+
+static int
+mlxsw_sp_port_get_link_ext_state(struct net_device *dev,
+                                struct ethtool_link_ext_state_info *link_ext_state_info)
+{
+       struct mlxsw_sp_ethtool_link_ext_state_opcode_mapping link_ext_state_mapping;
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       char pddr_pl[MLXSW_REG_PDDR_LEN];
+       int opcode, err, i;
+       u32 status_opcode;
+
+       if (netif_carrier_ok(dev))
+               return -ENODATA;
+
+       mlxsw_reg_pddr_pack(pddr_pl, mlxsw_sp_port->local_port,
+                           MLXSW_REG_PDDR_PAGE_SELECT_TROUBLESHOOTING_INFO);
+
+       opcode = MLXSW_REG_PDDR_TRBLSH_GROUP_OPCODE_MONITOR;
+       mlxsw_reg_pddr_trblsh_group_opcode_set(pddr_pl, opcode);
+
+       err = mlxsw_reg_query(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pddr),
+                             pddr_pl);
+       if (err)
+               return err;
+
+       status_opcode = mlxsw_reg_pddr_trblsh_status_opcode_get(pddr_pl);
+       if (!status_opcode)
+               return -ENODATA;
+
+       for (i = 0; i < ARRAY_SIZE(mlxsw_sp_link_ext_state_opcode_map); i++) {
+               link_ext_state_mapping = mlxsw_sp_link_ext_state_opcode_map[i];
+               if (link_ext_state_mapping.status_opcode == status_opcode) {
+                       mlxsw_sp_port_set_link_ext_state(link_ext_state_mapping,
+                                                        link_ext_state_info);
+                       return 0;
+               }
+       }
+
+       return -ENODATA;
+}
+
+static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
+                                        struct ethtool_pauseparam *pause)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+       pause->rx_pause = mlxsw_sp_port->link.rx_pause;
+       pause->tx_pause = mlxsw_sp_port->link.tx_pause;
+}
+
+static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                                  struct ethtool_pauseparam *pause)
+{
+       char pfcc_pl[MLXSW_REG_PFCC_LEN];
+
+       mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
+       mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
+       mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
+
+       return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
+                              pfcc_pl);
+}
+
+static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
+                                       struct ethtool_pauseparam *pause)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       bool pause_en = pause->tx_pause || pause->rx_pause;
+       int err;
+
+       if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
+               netdev_err(dev, "PFC already enabled on port\n");
+               return -EINVAL;
+       }
+
+       if (pause->autoneg) {
+               netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
+               return -EINVAL;
+       }
+
+       err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
+       if (err) {
+               netdev_err(dev, "Failed to configure port's headroom\n");
+               return err;
+       }
+
+       err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
+       if (err) {
+               netdev_err(dev, "Failed to set PAUSE parameters\n");
+               goto err_port_pause_configure;
+       }
+
+       mlxsw_sp_port->link.rx_pause = pause->rx_pause;
+       mlxsw_sp_port->link.tx_pause = pause->tx_pause;
+
+       return 0;
+
+err_port_pause_configure:
+       pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
+       mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
+       return err;
+}
+
+struct mlxsw_sp_port_hw_stats {
+       char str[ETH_GSTRING_LEN];
+       u64 (*getter)(const char *payload);
+       bool cells_bytes;
+};
+
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
+       {
+               .str = "a_frames_transmitted_ok",
+               .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
+       },
+       {
+               .str = "a_frames_received_ok",
+               .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
+       },
+       {
+               .str = "a_frame_check_sequence_errors",
+               .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
+       },
+       {
+               .str = "a_alignment_errors",
+               .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
+       },
+       {
+               .str = "a_octets_transmitted_ok",
+               .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
+       },
+       {
+               .str = "a_octets_received_ok",
+               .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
+       },
+       {
+               .str = "a_multicast_frames_xmitted_ok",
+               .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
+       },
+       {
+               .str = "a_broadcast_frames_xmitted_ok",
+               .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
+       },
+       {
+               .str = "a_multicast_frames_received_ok",
+               .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
+       },
+       {
+               .str = "a_broadcast_frames_received_ok",
+               .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
+       },
+       {
+               .str = "a_in_range_length_errors",
+               .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
+       },
+       {
+               .str = "a_out_of_range_length_field",
+               .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
+       },
+       {
+               .str = "a_frame_too_long_errors",
+               .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
+       },
+       {
+               .str = "a_symbol_error_during_carrier",
+               .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
+       },
+       {
+               .str = "a_mac_control_frames_transmitted",
+               .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
+       },
+       {
+               .str = "a_mac_control_frames_received",
+               .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
+       },
+       {
+               .str = "a_unsupported_opcodes_received",
+               .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
+       },
+       {
+               .str = "a_pause_mac_ctrl_frames_received",
+               .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
+       },
+       {
+               .str = "a_pause_mac_ctrl_frames_xmitted",
+               .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
+       },
+};
+
+#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
+
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = {
+       {
+               .str = "if_in_discards",
+               .getter = mlxsw_reg_ppcnt_if_in_discards_get,
+       },
+       {
+               .str = "if_out_discards",
+               .getter = mlxsw_reg_ppcnt_if_out_discards_get,
+       },
+       {
+               .str = "if_out_errors",
+               .getter = mlxsw_reg_ppcnt_if_out_errors_get,
+       },
+};
+
+#define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \
+       ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats)
+
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = {
+       {
+               .str = "ether_stats_undersize_pkts",
+               .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get,
+       },
+       {
+               .str = "ether_stats_oversize_pkts",
+               .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get,
+       },
+       {
+               .str = "ether_stats_fragments",
+               .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get,
+       },
+       {
+               .str = "ether_pkts64octets",
+               .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get,
+       },
+       {
+               .str = "ether_pkts65to127octets",
+               .getter = mlxsw_reg_ppcnt_ether_stats_pkts65to127octets_get,
+       },
+       {
+               .str = "ether_pkts128to255octets",
+               .getter = mlxsw_reg_ppcnt_ether_stats_pkts128to255octets_get,
+       },
+       {
+               .str = "ether_pkts256to511octets",
+               .getter = mlxsw_reg_ppcnt_ether_stats_pkts256to511octets_get,
+       },
+       {
+               .str = "ether_pkts512to1023octets",
+               .getter = mlxsw_reg_ppcnt_ether_stats_pkts512to1023octets_get,
+       },
+       {
+               .str = "ether_pkts1024to1518octets",
+               .getter = mlxsw_reg_ppcnt_ether_stats_pkts1024to1518octets_get,
+       },
+       {
+               .str = "ether_pkts1519to2047octets",
+               .getter = mlxsw_reg_ppcnt_ether_stats_pkts1519to2047octets_get,
+       },
+       {
+               .str = "ether_pkts2048to4095octets",
+               .getter = mlxsw_reg_ppcnt_ether_stats_pkts2048to4095octets_get,
+       },
+       {
+               .str = "ether_pkts4096to8191octets",
+               .getter = mlxsw_reg_ppcnt_ether_stats_pkts4096to8191octets_get,
+       },
+       {
+               .str = "ether_pkts8192to10239octets",
+               .getter = mlxsw_reg_ppcnt_ether_stats_pkts8192to10239octets_get,
+       },
+};
+
+#define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \
+       ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats)
+
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = {
+       {
+               .str = "dot3stats_fcs_errors",
+               .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get,
+       },
+       {
+               .str = "dot3stats_symbol_errors",
+               .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get,
+       },
+       {
+               .str = "dot3control_in_unknown_opcodes",
+               .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get,
+       },
+       {
+               .str = "dot3in_pause_frames",
+               .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get,
+       },
+};
+
+#define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \
+       ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats)
+
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_ext_stats[] = {
+       {
+               .str = "ecn_marked",
+               .getter = mlxsw_reg_ppcnt_ecn_marked_get,
+       },
+};
+
+#define MLXSW_SP_PORT_HW_EXT_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_ext_stats)
+
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = {
+       {
+               .str = "discard_ingress_general",
+               .getter = mlxsw_reg_ppcnt_ingress_general_get,
+       },
+       {
+               .str = "discard_ingress_policy_engine",
+               .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get,
+       },
+       {
+               .str = "discard_ingress_vlan_membership",
+               .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get,
+       },
+       {
+               .str = "discard_ingress_tag_frame_type",
+               .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get,
+       },
+       {
+               .str = "discard_egress_vlan_membership",
+               .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get,
+       },
+       {
+               .str = "discard_loopback_filter",
+               .getter = mlxsw_reg_ppcnt_loopback_filter_get,
+       },
+       {
+               .str = "discard_egress_general",
+               .getter = mlxsw_reg_ppcnt_egress_general_get,
+       },
+       {
+               .str = "discard_egress_hoq",
+               .getter = mlxsw_reg_ppcnt_egress_hoq_get,
+       },
+       {
+               .str = "discard_egress_policy_engine",
+               .getter = mlxsw_reg_ppcnt_egress_policy_engine_get,
+       },
+       {
+               .str = "discard_ingress_tx_link_down",
+               .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get,
+       },
+       {
+               .str = "discard_egress_stp_filter",
+               .getter = mlxsw_reg_ppcnt_egress_stp_filter_get,
+       },
+       {
+               .str = "discard_egress_sll",
+               .getter = mlxsw_reg_ppcnt_egress_sll_get,
+       },
+};
+
+#define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \
+       ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats)
+
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
+       {
+               .str = "rx_octets_prio",
+               .getter = mlxsw_reg_ppcnt_rx_octets_get,
+       },
+       {
+               .str = "rx_frames_prio",
+               .getter = mlxsw_reg_ppcnt_rx_frames_get,
+       },
+       {
+               .str = "tx_octets_prio",
+               .getter = mlxsw_reg_ppcnt_tx_octets_get,
+       },
+       {
+               .str = "tx_frames_prio",
+               .getter = mlxsw_reg_ppcnt_tx_frames_get,
+       },
+       {
+               .str = "rx_pause_prio",
+               .getter = mlxsw_reg_ppcnt_rx_pause_get,
+       },
+       {
+               .str = "rx_pause_duration_prio",
+               .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
+       },
+       {
+               .str = "tx_pause_prio",
+               .getter = mlxsw_reg_ppcnt_tx_pause_get,
+       },
+       {
+               .str = "tx_pause_duration_prio",
+               .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
+       },
+};
+
+#define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
+
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
+       {
+               .str = "tc_transmit_queue_tc",
+               .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
+               .cells_bytes = true,
+       },
+       {
+               .str = "tc_no_buffer_discard_uc_tc",
+               .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
+       },
+};
+
+#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
+
+#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
+                                        MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \
+                                        MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \
+                                        MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \
+                                        MLXSW_SP_PORT_HW_EXT_STATS_LEN + \
+                                        MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \
+                                        (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \
+                                         IEEE_8021QAZ_MAX_TCS) + \
+                                        (MLXSW_SP_PORT_HW_TC_STATS_LEN * \
+                                         TC_MAX_QUEUE))
+
+static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
+{
+       int i;
+
+       for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
+               snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
+                        mlxsw_sp_port_hw_prio_stats[i].str, prio);
+               *p += ETH_GSTRING_LEN;
+       }
+}
+
+static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
+{
+       int i;
+
+       for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
+               snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
+                        mlxsw_sp_port_hw_tc_stats[i].str, tc);
+               *p += ETH_GSTRING_LEN;
+       }
+}
+
+static void mlxsw_sp_port_get_strings(struct net_device *dev,
+                                     u32 stringset, u8 *data)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       u8 *p = data;
+       int i;
+
+       switch (stringset) {
+       case ETH_SS_STATS:
+               for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
+                       memcpy(p, mlxsw_sp_port_hw_stats[i].str,
+                              ETH_GSTRING_LEN);
+                       p += ETH_GSTRING_LEN;
+               }
+
+               for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) {
+                       memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str,
+                              ETH_GSTRING_LEN);
+                       p += ETH_GSTRING_LEN;
+               }
+
+               for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) {
+                       memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str,
+                              ETH_GSTRING_LEN);
+                       p += ETH_GSTRING_LEN;
+               }
+
+               for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) {
+                       memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str,
+                              ETH_GSTRING_LEN);
+                       p += ETH_GSTRING_LEN;
+               }
+
+               for (i = 0; i < MLXSW_SP_PORT_HW_EXT_STATS_LEN; i++) {
+                       memcpy(p, mlxsw_sp_port_hw_ext_stats[i].str,
+                              ETH_GSTRING_LEN);
+                       p += ETH_GSTRING_LEN;
+               }
+
+               for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) {
+                       memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str,
+                              ETH_GSTRING_LEN);
+                       p += ETH_GSTRING_LEN;
+               }
+
+               for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+                       mlxsw_sp_port_get_prio_strings(&p, i);
+
+               for (i = 0; i < TC_MAX_QUEUE; i++)
+                       mlxsw_sp_port_get_tc_strings(&p, i);
+
+               mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_strings(&p);
+               break;
+       }
+}
+
+static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
+                                    enum ethtool_phys_id_state state)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char mlcr_pl[MLXSW_REG_MLCR_LEN];
+       bool active;
+
+       switch (state) {
+       case ETHTOOL_ID_ACTIVE:
+               active = true;
+               break;
+       case ETHTOOL_ID_INACTIVE:
+               active = false;
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
+}
+
+static int
+mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
+                              int *p_len, enum mlxsw_reg_ppcnt_grp grp)
+{
+       switch (grp) {
+       case MLXSW_REG_PPCNT_IEEE_8023_CNT:
+               *p_hw_stats = mlxsw_sp_port_hw_stats;
+               *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
+               break;
+       case MLXSW_REG_PPCNT_RFC_2863_CNT:
+               *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats;
+               *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN;
+               break;
+       case MLXSW_REG_PPCNT_RFC_2819_CNT:
+               *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats;
+               *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
+               break;
+       case MLXSW_REG_PPCNT_RFC_3635_CNT:
+               *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats;
+               *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
+               break;
+       case MLXSW_REG_PPCNT_EXT_CNT:
+               *p_hw_stats = mlxsw_sp_port_hw_ext_stats;
+               *p_len = MLXSW_SP_PORT_HW_EXT_STATS_LEN;
+               break;
+       case MLXSW_REG_PPCNT_DISCARD_CNT:
+               *p_hw_stats = mlxsw_sp_port_hw_discard_stats;
+               *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
+               break;
+       case MLXSW_REG_PPCNT_PRIO_CNT:
+               *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
+               *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
+               break;
+       case MLXSW_REG_PPCNT_TC_CNT:
+               *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
+               *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
+               break;
+       default:
+               WARN_ON(1);
+               return -EOPNOTSUPP;
+       }
+       return 0;
+}
+
+static void __mlxsw_sp_port_get_stats(struct net_device *dev,
+                                     enum mlxsw_reg_ppcnt_grp grp, int prio,
+                                     u64 *data, int data_index)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       struct mlxsw_sp_port_hw_stats *hw_stats;
+       char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+       int i, len;
+       int err;
+
+       err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
+       if (err)
+               return;
+       mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
+       for (i = 0; i < len; i++) {
+               data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
+               if (!hw_stats[i].cells_bytes)
+                       continue;
+               data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
+                                                           data[data_index + i]);
+       }
+}
+
+static void mlxsw_sp_port_get_stats(struct net_device *dev,
+                                   struct ethtool_stats *stats, u64 *data)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       int i, data_index = 0;
+
+       /* IEEE 802.3 Counters */
+       __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
+                                 data, data_index);
+       data_index = MLXSW_SP_PORT_HW_STATS_LEN;
+
+       /* RFC 2863 Counters */
+       __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0,
+                                 data, data_index);
+       data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN;
+
+       /* RFC 2819 Counters */
+       __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0,
+                                 data, data_index);
+       data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
+
+       /* RFC 3635 Counters */
+       __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0,
+                                 data, data_index);
+       data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
+
+       /* Extended Counters */
+       __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
+                                 data, data_index);
+       data_index += MLXSW_SP_PORT_HW_EXT_STATS_LEN;
+
+       /* Discard Counters */
+       __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0,
+                                 data, data_index);
+       data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
+
+       /* Per-Priority Counters */
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+               __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
+                                         data, data_index);
+               data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
+       }
+
+       /* Per-TC Counters */
+       for (i = 0; i < TC_MAX_QUEUE; i++) {
+               __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
+                                         data, data_index);
+               data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
+       }
+
+       /* PTP counters */
+       mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats(mlxsw_sp_port,
+                                                   data, data_index);
+       data_index += mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count();
+}
+
+static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+       switch (sset) {
+       case ETH_SS_STATS:
+               return MLXSW_SP_PORT_ETHTOOL_STATS_LEN +
+                       mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count();
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static void
+mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap,
+                                u8 width, struct ethtool_link_ksettings *cmd)
+{
+       const struct mlxsw_sp_port_type_speed_ops *ops;
+
+       ops = mlxsw_sp->port_type_speed_ops;
+
+       ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
+       ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
+       ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+
+       ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd);
+       ops->from_ptys_link(mlxsw_sp, eth_proto_cap, width,
+                           cmd->link_modes.supported);
+}
+
+static void
+mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp,
+                                u32 eth_proto_admin, bool autoneg, u8 width,
+                                struct ethtool_link_ksettings *cmd)
+{
+       const struct mlxsw_sp_port_type_speed_ops *ops;
+
+       ops = mlxsw_sp->port_type_speed_ops;
+
+       if (!autoneg)
+               return;
+
+       ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
+       ops->from_ptys_link(mlxsw_sp, eth_proto_admin, width,
+                           cmd->link_modes.advertising);
+}
+
+static u8
+mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type)
+{
+       switch (connector_type) {
+       case MLXSW_REG_PTYS_CONNECTOR_TYPE_UNKNOWN_OR_NO_CONNECTOR:
+               return PORT_OTHER;
+       case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_NONE:
+               return PORT_NONE;
+       case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_TP:
+               return PORT_TP;
+       case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_AUI:
+               return PORT_AUI;
+       case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_BNC:
+               return PORT_BNC;
+       case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_MII:
+               return PORT_MII;
+       case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_FIBRE:
+               return PORT_FIBRE;
+       case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_DA:
+               return PORT_DA;
+       case MLXSW_REG_PTYS_CONNECTOR_TYPE_PORT_OTHER:
+               return PORT_OTHER;
+       default:
+               WARN_ON_ONCE(1);
+               return PORT_OTHER;
+       }
+}
+
+static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
+                                           struct ethtool_link_ksettings *cmd)
+{
+       u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       const struct mlxsw_sp_port_type_speed_ops *ops;
+       char ptys_pl[MLXSW_REG_PTYS_LEN];
+       u8 connector_type;
+       bool autoneg;
+       int err;
+
+       ops = mlxsw_sp->port_type_speed_ops;
+
+       autoneg = mlxsw_sp_port->link.autoneg;
+       ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
+                              0, false);
+       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+       if (err)
+               return err;
+       ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
+                                &eth_proto_admin, &eth_proto_oper);
+
+       mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap,
+                                        mlxsw_sp_port->mapping.width, cmd);
+
+       mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg,
+                                        mlxsw_sp_port->mapping.width, cmd);
+
+       cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+       connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl);
+       cmd->base.port = mlxsw_sp_port_connector_port(connector_type);
+       ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev),
+                                   eth_proto_oper, cmd);
+
+       return 0;
+}
+
+static int
+mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
+                                const struct ethtool_link_ksettings *cmd)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       const struct mlxsw_sp_port_type_speed_ops *ops;
+       char ptys_pl[MLXSW_REG_PTYS_LEN];
+       u32 eth_proto_cap, eth_proto_new;
+       bool autoneg;
+       int err;
+
+       ops = mlxsw_sp->port_type_speed_ops;
+
+       ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
+                              0, false);
+       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+       if (err)
+               return err;
+       ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap, NULL, NULL);
+
+       autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
+       eth_proto_new = autoneg ?
+               ops->to_ptys_advert_link(mlxsw_sp, mlxsw_sp_port->mapping.width,
+                                        cmd) :
+               ops->to_ptys_speed(mlxsw_sp, mlxsw_sp_port->mapping.width,
+                                  cmd->base.speed);
+
+       eth_proto_new = eth_proto_new & eth_proto_cap;
+       if (!eth_proto_new) {
+               netdev_err(dev, "No supported speed requested\n");
+               return -EINVAL;
+       }
+
+       ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
+                              eth_proto_new, autoneg);
+       err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+       if (err)
+               return err;
+
+       mlxsw_sp_port->link.autoneg = autoneg;
+
+       if (!netif_running(dev))
+               return 0;
+
+       mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+       mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+
+       return 0;
+}
+
+static int mlxsw_sp_get_module_info(struct net_device *netdev,
+                                   struct ethtool_modinfo *modinfo)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       int err;
+
+       err = mlxsw_env_get_module_info(mlxsw_sp->core,
+                                       mlxsw_sp_port->mapping.module,
+                                       modinfo);
+
+       return err;
+}
+
+static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
+                                     struct ethtool_eeprom *ee, u8 *data)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       int err;
+
+       err = mlxsw_env_get_module_eeprom(netdev, mlxsw_sp->core,
+                                         mlxsw_sp_port->mapping.module, ee,
+                                         data);
+
+       return err;
+}
+
+static int
+mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+       return mlxsw_sp->ptp_ops->get_ts_info(mlxsw_sp, info);
+}
+
+const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
+       .get_drvinfo            = mlxsw_sp_port_get_drvinfo,
+       .get_link               = ethtool_op_get_link,
+       .get_link_ext_state     = mlxsw_sp_port_get_link_ext_state,
+       .get_pauseparam         = mlxsw_sp_port_get_pauseparam,
+       .set_pauseparam         = mlxsw_sp_port_set_pauseparam,
+       .get_strings            = mlxsw_sp_port_get_strings,
+       .set_phys_id            = mlxsw_sp_port_set_phys_id,
+       .get_ethtool_stats      = mlxsw_sp_port_get_stats,
+       .get_sset_count         = mlxsw_sp_port_get_sset_count,
+       .get_link_ksettings     = mlxsw_sp_port_get_link_ksettings,
+       .set_link_ksettings     = mlxsw_sp_port_set_link_ksettings,
+       .get_module_info        = mlxsw_sp_get_module_info,
+       .get_module_eeprom      = mlxsw_sp_get_module_eeprom,
+       .get_ts_info            = mlxsw_sp_get_ts_info,
+};
+
+struct mlxsw_sp1_port_link_mode {
+       enum ethtool_link_mode_bit_indices mask_ethtool;
+       u32 mask;
+       u32 speed;
+};
+
+static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = {
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+               .speed          = SPEED_100,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
+                                 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+               .speed          = SPEED_1000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+               .speed          = SPEED_10000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
+                                 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+               .speed          = SPEED_10000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+                                 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+                                 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+                                 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+               .speed          = SPEED_10000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
+               .speed          = SPEED_20000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+               .speed          = SPEED_40000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+               .speed          = SPEED_40000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+               .speed          = SPEED_40000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+               .speed          = SPEED_40000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+               .speed          = SPEED_25000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+               .speed          = SPEED_25000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+               .speed          = SPEED_25000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+               .speed          = SPEED_50000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+               .speed          = SPEED_50000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+               .speed          = SPEED_50000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+               .speed          = SPEED_100000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+               .speed          = SPEED_100000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+               .speed          = SPEED_100000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
+               .mask_ethtool   = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+               .speed          = SPEED_100000,
+       },
+};
+
+#define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode)
+
+static void
+mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp,
+                                  u32 ptys_eth_proto,
+                                  struct ethtool_link_ksettings *cmd)
+{
+       if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
+                             MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
+                             MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_SGMII))
+               ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+
+       if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
+                             MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
+                             MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
+               ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
+}
+
+static void
+mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
+                        u8 width, unsigned long *mode)
+{
+       int i;
+
+       for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+               if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask)
+                       __set_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool,
+                                 mode);
+       }
+}
+
+static u32
+mlxsw_sp1_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto)
+{
+       int i;
+
+       for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+               if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask)
+                       return mlxsw_sp1_port_link_mode[i].speed;
+       }
+
+       return SPEED_UNKNOWN;
+}
+
+static void
+mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
+                                u32 ptys_eth_proto,
+                                struct ethtool_link_ksettings *cmd)
+{
+       cmd->base.speed = SPEED_UNKNOWN;
+       cmd->base.duplex = DUPLEX_UNKNOWN;
+
+       if (!carrier_ok)
+               return;
+
+       cmd->base.speed = mlxsw_sp1_from_ptys_speed(mlxsw_sp, ptys_eth_proto);
+       if (cmd->base.speed != SPEED_UNKNOWN)
+               cmd->base.duplex = DUPLEX_FULL;
+}
+
+static u32
+mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width,
+                             const struct ethtool_link_ksettings *cmd)
+{
+       u32 ptys_proto = 0;
+       int i;
+
+       for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+               if (test_bit(mlxsw_sp1_port_link_mode[i].mask_ethtool,
+                            cmd->link_modes.advertising))
+                       ptys_proto |= mlxsw_sp1_port_link_mode[i].mask;
+       }
+       return ptys_proto;
+}
+
+static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u8 width,
+                                  u32 speed)
+{
+       u32 ptys_proto = 0;
+       int i;
+
+       for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+               if (speed == mlxsw_sp1_port_link_mode[i].speed)
+                       ptys_proto |= mlxsw_sp1_port_link_mode[i].mask;
+       }
+       return ptys_proto;
+}
+
+static void
+mlxsw_sp1_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload,
+                           u8 local_port, u32 proto_admin, bool autoneg)
+{
+       mlxsw_reg_ptys_eth_pack(payload, local_port, proto_admin, autoneg);
+}
+
+static void
+mlxsw_sp1_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload,
+                             u32 *p_eth_proto_cap, u32 *p_eth_proto_admin,
+                             u32 *p_eth_proto_oper)
+{
+       mlxsw_reg_ptys_eth_unpack(payload, p_eth_proto_cap, p_eth_proto_admin,
+                                 p_eth_proto_oper);
+}
+
+const struct mlxsw_sp_port_type_speed_ops mlxsw_sp1_port_type_speed_ops = {
+       .from_ptys_supported_port       = mlxsw_sp1_from_ptys_supported_port,
+       .from_ptys_link                 = mlxsw_sp1_from_ptys_link,
+       .from_ptys_speed                = mlxsw_sp1_from_ptys_speed,
+       .from_ptys_speed_duplex         = mlxsw_sp1_from_ptys_speed_duplex,
+       .to_ptys_advert_link            = mlxsw_sp1_to_ptys_advert_link,
+       .to_ptys_speed                  = mlxsw_sp1_to_ptys_speed,
+       .reg_ptys_eth_pack              = mlxsw_sp1_reg_ptys_eth_pack,
+       .reg_ptys_eth_unpack            = mlxsw_sp1_reg_ptys_eth_unpack,
+};
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_sgmii_100m[] = {
+       ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN \
+       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_sgmii_100m)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = {
+       ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+       ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN \
+       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = {
+       ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \
+       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_5gbase_r[] = {
+       ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN \
+       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_5gbase_r)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g[] = {
+       ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+       ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+       ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+       ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+       ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+       ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+       ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN \
+       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g[] = {
+       ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+       ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+       ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+       ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN \
+       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr[] = {
+       ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+       ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+       ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN \
+       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2[] = {
+       ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+       ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+       ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN \
+       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr[] = {
+       ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
+       ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
+       ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
+       ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+       ETHTOOL_LINK_MODE_50000baseDR_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN \
+       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4[] = {
+       ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+       ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+       ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+       ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN \
+       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = {
+       ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
+       ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
+       ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
+       ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
+       ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN \
+       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = {
+       ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
+       ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
+       ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
+       ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT,
+       ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN \
+       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4)
+
+static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_400gaui_8[] = {
+       ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
+       ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
+       ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT,
+       ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT,
+       ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN \
+       ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_8)
+
+#define MLXSW_SP_PORT_MASK_WIDTH_1X    BIT(0)
+#define MLXSW_SP_PORT_MASK_WIDTH_2X    BIT(1)
+#define MLXSW_SP_PORT_MASK_WIDTH_4X    BIT(2)
+#define MLXSW_SP_PORT_MASK_WIDTH_8X    BIT(3)
+
+static u8 mlxsw_sp_port_mask_width_get(u8 width)
+{
+       switch (width) {
+       case 1:
+               return MLXSW_SP_PORT_MASK_WIDTH_1X;
+       case 2:
+               return MLXSW_SP_PORT_MASK_WIDTH_2X;
+       case 4:
+               return MLXSW_SP_PORT_MASK_WIDTH_4X;
+       case 8:
+               return MLXSW_SP_PORT_MASK_WIDTH_8X;
+       default:
+               WARN_ON_ONCE(1);
+               return 0;
+       }
+}
+
+struct mlxsw_sp2_port_link_mode {
+       const enum ethtool_link_mode_bit_indices *mask_ethtool;
+       int m_ethtool_len;
+       u32 mask;
+       u32 speed;
+       u8 mask_width;
+};
+
+static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
+       {
+               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M,
+               .mask_ethtool   = mlxsw_sp2_mask_ethtool_sgmii_100m,
+               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN,
+               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_1X |
+                                 MLXSW_SP_PORT_MASK_WIDTH_2X |
+                                 MLXSW_SP_PORT_MASK_WIDTH_4X |
+                                 MLXSW_SP_PORT_MASK_WIDTH_8X,
+               .speed          = SPEED_100,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII,
+               .mask_ethtool   = mlxsw_sp2_mask_ethtool_1000base_x_sgmii,
+               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN,
+               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_1X |
+                                 MLXSW_SP_PORT_MASK_WIDTH_2X |
+                                 MLXSW_SP_PORT_MASK_WIDTH_4X |
+                                 MLXSW_SP_PORT_MASK_WIDTH_8X,
+               .speed          = SPEED_1000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII,
+               .mask_ethtool   = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii,
+               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN,
+               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_1X |
+                                 MLXSW_SP_PORT_MASK_WIDTH_2X |
+                                 MLXSW_SP_PORT_MASK_WIDTH_4X |
+                                 MLXSW_SP_PORT_MASK_WIDTH_8X,
+               .speed          = SPEED_2500,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R,
+               .mask_ethtool   = mlxsw_sp2_mask_ethtool_5gbase_r,
+               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN,
+               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_1X |
+                                 MLXSW_SP_PORT_MASK_WIDTH_2X |
+                                 MLXSW_SP_PORT_MASK_WIDTH_4X |
+                                 MLXSW_SP_PORT_MASK_WIDTH_8X,
+               .speed          = SPEED_5000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G,
+               .mask_ethtool   = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g,
+               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN,
+               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_1X |
+                                 MLXSW_SP_PORT_MASK_WIDTH_2X |
+                                 MLXSW_SP_PORT_MASK_WIDTH_4X |
+                                 MLXSW_SP_PORT_MASK_WIDTH_8X,
+               .speed          = SPEED_10000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G,
+               .mask_ethtool   = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g,
+               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN,
+               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_4X |
+                                 MLXSW_SP_PORT_MASK_WIDTH_8X,
+               .speed          = SPEED_40000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR,
+               .mask_ethtool   = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr,
+               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN,
+               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_1X |
+                                 MLXSW_SP_PORT_MASK_WIDTH_2X |
+                                 MLXSW_SP_PORT_MASK_WIDTH_4X |
+                                 MLXSW_SP_PORT_MASK_WIDTH_8X,
+               .speed          = SPEED_25000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2,
+               .mask_ethtool   = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2,
+               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN,
+               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_2X |
+                                 MLXSW_SP_PORT_MASK_WIDTH_4X |
+                                 MLXSW_SP_PORT_MASK_WIDTH_8X,
+               .speed          = SPEED_50000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR,
+               .mask_ethtool   = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr,
+               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN,
+               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_1X,
+               .speed          = SPEED_50000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4,
+               .mask_ethtool   = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4,
+               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN,
+               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_4X |
+                                 MLXSW_SP_PORT_MASK_WIDTH_8X,
+               .speed          = SPEED_100000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2,
+               .mask_ethtool   = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2,
+               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN,
+               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_2X,
+               .speed          = SPEED_100000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4,
+               .mask_ethtool   = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4,
+               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN,
+               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_4X |
+                                 MLXSW_SP_PORT_MASK_WIDTH_8X,
+               .speed          = SPEED_200000,
+       },
+       {
+               .mask           = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8,
+               .mask_ethtool   = mlxsw_sp2_mask_ethtool_400gaui_8,
+               .m_ethtool_len  = MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN,
+               .mask_width     = MLXSW_SP_PORT_MASK_WIDTH_8X,
+               .speed          = SPEED_400000,
+       },
+};
+
+#define MLXSW_SP2_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp2_port_link_mode)
+
+static void
+mlxsw_sp2_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp,
+                                  u32 ptys_eth_proto,
+                                  struct ethtool_link_ksettings *cmd)
+{
+       ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
+       ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
+}
+
+static void
+mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode,
+                         unsigned long *mode)
+{
+       int i;
+
+       for (i = 0; i < link_mode->m_ethtool_len; i++)
+               __set_bit(link_mode->mask_ethtool[i], mode);
+}
+
+static void
+mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
+                        u8 width, unsigned long *mode)
+{
+       u8 mask_width = mlxsw_sp_port_mask_width_get(width);
+       int i;
+
+       for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+               if ((ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) &&
+                   (mask_width & mlxsw_sp2_port_link_mode[i].mask_width))
+                       mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i],
+                                                 mode);
+       }
+}
+
+static u32
+mlxsw_sp2_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto)
+{
+       int i;
+
+       for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+               if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask)
+                       return mlxsw_sp2_port_link_mode[i].speed;
+       }
+
+       return SPEED_UNKNOWN;
+}
+
+static void
+mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
+                                u32 ptys_eth_proto,
+                                struct ethtool_link_ksettings *cmd)
+{
+       cmd->base.speed = SPEED_UNKNOWN;
+       cmd->base.duplex = DUPLEX_UNKNOWN;
+
+       if (!carrier_ok)
+               return;
+
+       cmd->base.speed = mlxsw_sp2_from_ptys_speed(mlxsw_sp, ptys_eth_proto);
+       if (cmd->base.speed != SPEED_UNKNOWN)
+               cmd->base.duplex = DUPLEX_FULL;
+}
+
+static bool
+mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode,
+                          const unsigned long *mode)
+{
+       int cnt = 0;
+       int i;
+
+       for (i = 0; i < link_mode->m_ethtool_len; i++) {
+               if (test_bit(link_mode->mask_ethtool[i], mode))
+                       cnt++;
+       }
+
+       return cnt == link_mode->m_ethtool_len;
+}
+
+static u32
+mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width,
+                             const struct ethtool_link_ksettings *cmd)
+{
+       u8 mask_width = mlxsw_sp_port_mask_width_get(width);
+       u32 ptys_proto = 0;
+       int i;
+
+       for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+               if ((mask_width & mlxsw_sp2_port_link_mode[i].mask_width) &&
+                   mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i],
+                                              cmd->link_modes.advertising))
+                       ptys_proto |= mlxsw_sp2_port_link_mode[i].mask;
+       }
+       return ptys_proto;
+}
+
+static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp,
+                                  u8 width, u32 speed)
+{
+       u8 mask_width = mlxsw_sp_port_mask_width_get(width);
+       u32 ptys_proto = 0;
+       int i;
+
+       for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+               if ((speed == mlxsw_sp2_port_link_mode[i].speed) &&
+                   (mask_width & mlxsw_sp2_port_link_mode[i].mask_width))
+                       ptys_proto |= mlxsw_sp2_port_link_mode[i].mask;
+       }
+       return ptys_proto;
+}
+
+static void
+mlxsw_sp2_reg_ptys_eth_pack(struct mlxsw_sp *mlxsw_sp, char *payload,
+                           u8 local_port, u32 proto_admin,
+                           bool autoneg)
+{
+       mlxsw_reg_ptys_ext_eth_pack(payload, local_port, proto_admin, autoneg);
+}
+
+static void
+mlxsw_sp2_reg_ptys_eth_unpack(struct mlxsw_sp *mlxsw_sp, char *payload,
+                             u32 *p_eth_proto_cap, u32 *p_eth_proto_admin,
+                             u32 *p_eth_proto_oper)
+{
+       mlxsw_reg_ptys_ext_eth_unpack(payload, p_eth_proto_cap,
+                                     p_eth_proto_admin, p_eth_proto_oper);
+}
+
+const struct mlxsw_sp_port_type_speed_ops mlxsw_sp2_port_type_speed_ops = {
+       .from_ptys_supported_port       = mlxsw_sp2_from_ptys_supported_port,
+       .from_ptys_link                 = mlxsw_sp2_from_ptys_link,
+       .from_ptys_speed                = mlxsw_sp2_from_ptys_speed,
+       .from_ptys_speed_duplex         = mlxsw_sp2_from_ptys_speed_duplex,
+       .to_ptys_advert_link            = mlxsw_sp2_to_ptys_advert_link,
+       .to_ptys_speed                  = mlxsw_sp2_to_ptys_speed,
+       .reg_ptys_eth_pack              = mlxsw_sp2_reg_ptys_eth_pack,
+       .reg_ptys_eth_unpack            = mlxsw_sp2_reg_ptys_eth_unpack,
+};
index 47b66f3..0456cda 100644 (file)
@@ -219,8 +219,7 @@ static int mlxsw_sp_setup_tc_block_bind(struct mlxsw_sp_port *mlxsw_sp_port,
                                               mlxsw_sp_tc_block_release);
                if (IS_ERR(block_cb)) {
                        mlxsw_sp_flow_block_destroy(flow_block);
-                       err = PTR_ERR(block_cb);
-                       goto err_cb_register;
+                       return PTR_ERR(block_cb);
                }
                register_block = true;
        } else {
@@ -247,7 +246,6 @@ static int mlxsw_sp_setup_tc_block_bind(struct mlxsw_sp_port *mlxsw_sp_port,
 err_block_bind:
        if (!flow_block_cb_decref(block_cb))
                flow_block_cb_free(block_cb);
-err_cb_register:
        return err;
 }
 
@@ -279,18 +277,10 @@ static void mlxsw_sp_setup_tc_block_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
        }
 }
 
-int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
-                           struct flow_block_offload *f)
+int mlxsw_sp_setup_tc_block_clsact(struct mlxsw_sp_port *mlxsw_sp_port,
+                                  struct flow_block_offload *f,
+                                  bool ingress)
 {
-       bool ingress;
-
-       if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
-               ingress = true;
-       else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
-               ingress = false;
-       else
-               return -EOPNOTSUPP;
-
        f->driver_block_list = &mlxsw_sp_block_cb_list;
 
        switch (f->command) {
index 51e1b39..41855e5 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/netdevice.h>
+#include <linux/log2.h>
 #include <net/net_namespace.h>
 #include <net/flow_dissector.h>
 #include <net/pkt_cls.h>
@@ -22,6 +23,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
 {
        const struct flow_action_entry *act;
        int mirror_act_count = 0;
+       int police_act_count = 0;
        int err, i;
 
        if (!flow_action_has_entries(flow_action))
@@ -180,6 +182,28 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
                                return err;
                        break;
                        }
+               case FLOW_ACTION_POLICE: {
+                       u32 burst;
+
+                       if (police_act_count++) {
+                               NL_SET_ERR_MSG_MOD(extack, "Multiple police actions per rule are not supported");
+                               return -EOPNOTSUPP;
+                       }
+
+                       /* The kernel might adjust the requested burst size so
+                        * that it is not exactly a power of two. Re-adjust it
+                        * here since the hardware only supports burst sizes
+                        * that are a power of two.
+                        */
+                       burst = roundup_pow_of_two(act->police.burst);
+                       err = mlxsw_sp_acl_rulei_act_police(mlxsw_sp, rulei,
+                                                           act->police.index,
+                                                           act->police.rate_bytes_ps,
+                                                           burst, extack);
+                       if (err)
+                               return err;
+                       break;
+                       }
                default:
                        NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
                        dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
@@ -616,6 +640,7 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
        u64 packets;
        u64 lastuse;
        u64 bytes;
+       u64 drops;
        int err;
 
        ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
@@ -629,11 +654,12 @@ int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
                return -EINVAL;
 
        err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
-                                         &lastuse, &used_hw_stats);
+                                         &drops, &lastuse, &used_hw_stats);
        if (err)
                goto err_rule_get_stats;
 
-       flow_stats_update(&f->stats, bytes, packets, lastuse, used_hw_stats);
+       flow_stats_update(&f->stats, bytes, packets, drops, lastuse,
+                         used_hw_stats);
 
        mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
        return 0;
index f1a44a8..f30599a 100644 (file)
 #include "spectrum_span.h"
 #include "reg.h"
 
-enum mlxsw_sp_mall_action_type {
-       MLXSW_SP_MALL_ACTION_TYPE_MIRROR,
-       MLXSW_SP_MALL_ACTION_TYPE_SAMPLE,
-};
-
-struct mlxsw_sp_mall_mirror_entry {
-       const struct net_device *to_dev;
-       int span_id;
-};
-
-struct mlxsw_sp_mall_entry {
-       struct list_head list;
-       unsigned long cookie;
-       unsigned int priority;
-       enum mlxsw_sp_mall_action_type type;
-       bool ingress;
-       union {
-               struct mlxsw_sp_mall_mirror_entry mirror;
-               struct mlxsw_sp_port_sample sample;
-       };
-       struct rcu_head rcu;
-};
-
 static struct mlxsw_sp_mall_entry *
 mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block *block, unsigned long cookie)
 {
@@ -50,6 +27,7 @@ mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port,
                              struct mlxsw_sp_mall_entry *mall_entry)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       struct mlxsw_sp_span_agent_parms agent_parms = {};
        struct mlxsw_sp_span_trigger_parms parms;
        enum mlxsw_sp_span_trigger trigger;
        int err;
@@ -59,8 +37,9 @@ mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port,
                return -EINVAL;
        }
 
-       err = mlxsw_sp_span_agent_get(mlxsw_sp, mall_entry->mirror.to_dev,
-                                     &mall_entry->mirror.span_id);
+       agent_parms.to_dev = mall_entry->mirror.to_dev;
+       err = mlxsw_sp_span_agent_get(mlxsw_sp, &mall_entry->mirror.span_id,
+                                     &agent_parms);
        if (err)
                return err;
 
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_policer.c
new file mode 100644 (file)
index 0000000..39052e5
--- /dev/null
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2020 Mellanox Technologies. All rights reserved */
+
+#include <linux/idr.h>
+#include <linux/log2.h>
+#include <linux/mutex.h>
+#include <linux/netlink.h>
+#include <net/devlink.h>
+
+#include "spectrum.h"
+
+struct mlxsw_sp_policer_family {
+       enum mlxsw_sp_policer_type type;
+       enum mlxsw_reg_qpcr_g qpcr_type;
+       struct mlxsw_sp *mlxsw_sp;
+       u16 start_index; /* Inclusive */
+       u16 end_index; /* Exclusive */
+       struct idr policer_idr;
+       struct mutex lock; /* Protects policer_idr */
+       atomic_t policers_count;
+       const struct mlxsw_sp_policer_family_ops *ops;
+};
+
+struct mlxsw_sp_policer {
+       struct mlxsw_sp_policer_params params;
+       u16 index;
+};
+
+struct mlxsw_sp_policer_family_ops {
+       int (*init)(struct mlxsw_sp_policer_family *family);
+       void (*fini)(struct mlxsw_sp_policer_family *family);
+       int (*policer_index_alloc)(struct mlxsw_sp_policer_family *family,
+                                  struct mlxsw_sp_policer *policer);
+       struct mlxsw_sp_policer * (*policer_index_free)(struct mlxsw_sp_policer_family *family,
+                                                       u16 policer_index);
+       int (*policer_init)(struct mlxsw_sp_policer_family *family,
+                           const struct mlxsw_sp_policer *policer);
+       int (*policer_params_check)(const struct mlxsw_sp_policer_family *family,
+                                   const struct mlxsw_sp_policer_params *params,
+                                   struct netlink_ext_ack *extack);
+};
+
+struct mlxsw_sp_policer_core {
+       struct mlxsw_sp_policer_family *family_arr[MLXSW_SP_POLICER_TYPE_MAX + 1];
+       const struct mlxsw_sp_policer_core_ops *ops;
+       u8 lowest_bs_bits;
+       u8 highest_bs_bits;
+};
+
+struct mlxsw_sp_policer_core_ops {
+       int (*init)(struct mlxsw_sp_policer_core *policer_core);
+};
+
+static u64 mlxsw_sp_policer_rate_bytes_ps_kbps(u64 rate_bytes_ps)
+{
+       return div_u64(rate_bytes_ps, 1000) * BITS_PER_BYTE;
+}
+
+static u8 mlxsw_sp_policer_burst_bytes_hw_units(u64 burst_bytes)
+{
+       /* Provided burst size is in bytes. The ASIC burst size value is
+        * (2 ^ bs) * 512 bits. Convert the provided size to 512-bit units.
+        */
+       u64 bs512 = div_u64(burst_bytes, 64);
+
+       if (!bs512)
+               return 0;
+
+       return fls64(bs512) - 1;
+}
+
+static u64 mlxsw_sp_policer_single_rate_occ_get(void *priv)
+{
+       struct mlxsw_sp_policer_family *family = priv;
+
+       return atomic_read(&family->policers_count);
+}
+
+static int
+mlxsw_sp_policer_single_rate_family_init(struct mlxsw_sp_policer_family *family)
+{
+       struct mlxsw_core *core = family->mlxsw_sp->core;
+       struct devlink *devlink;
+
+       /* CPU policers are allocated from the first N policers in the global
+        * range, so skip them.
+        */
+       if (!MLXSW_CORE_RES_VALID(core, MAX_GLOBAL_POLICERS) ||
+           !MLXSW_CORE_RES_VALID(core, MAX_CPU_POLICERS))
+               return -EIO;
+
+       family->start_index = MLXSW_CORE_RES_GET(core, MAX_CPU_POLICERS);
+       family->end_index = MLXSW_CORE_RES_GET(core, MAX_GLOBAL_POLICERS);
+
+       atomic_set(&family->policers_count, 0);
+       devlink = priv_to_devlink(core);
+       devlink_resource_occ_get_register(devlink,
+                                         MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
+                                         mlxsw_sp_policer_single_rate_occ_get,
+                                         family);
+
+       return 0;
+}
+
+static void
+mlxsw_sp_policer_single_rate_family_fini(struct mlxsw_sp_policer_family *family)
+{
+       struct devlink *devlink = priv_to_devlink(family->mlxsw_sp->core);
+
+       devlink_resource_occ_get_unregister(devlink,
+                                           MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS);
+       WARN_ON(atomic_read(&family->policers_count) != 0);
+}
+
+static int
+mlxsw_sp_policer_single_rate_index_alloc(struct mlxsw_sp_policer_family *family,
+                                        struct mlxsw_sp_policer *policer)
+{
+       int id;
+
+       mutex_lock(&family->lock);
+       id = idr_alloc(&family->policer_idr, policer, family->start_index,
+                      family->end_index, GFP_KERNEL);
+       mutex_unlock(&family->lock);
+
+       if (id < 0)
+               return id;
+
+       atomic_inc(&family->policers_count);
+       policer->index = id;
+
+       return 0;
+}
+
+static struct mlxsw_sp_policer *
+mlxsw_sp_policer_single_rate_index_free(struct mlxsw_sp_policer_family *family,
+                                       u16 policer_index)
+{
+       struct mlxsw_sp_policer *policer;
+
+       atomic_dec(&family->policers_count);
+
+       mutex_lock(&family->lock);
+       policer = idr_remove(&family->policer_idr, policer_index);
+       mutex_unlock(&family->lock);
+
+       WARN_ON(!policer);
+
+       return policer;
+}
+
+static int
+mlxsw_sp_policer_single_rate_init(struct mlxsw_sp_policer_family *family,
+                                 const struct mlxsw_sp_policer *policer)
+{
+       u64 rate_kbps = mlxsw_sp_policer_rate_bytes_ps_kbps(policer->params.rate);
+       u8 bs = mlxsw_sp_policer_burst_bytes_hw_units(policer->params.burst);
+       struct mlxsw_sp *mlxsw_sp = family->mlxsw_sp;
+       char qpcr_pl[MLXSW_REG_QPCR_LEN];
+
+       mlxsw_reg_qpcr_pack(qpcr_pl, policer->index, MLXSW_REG_QPCR_IR_UNITS_K,
+                           true, rate_kbps, bs);
+       mlxsw_reg_qpcr_clear_counter_set(qpcr_pl, true);
+
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qpcr), qpcr_pl);
+}
+
+static int
+mlxsw_sp_policer_single_rate_params_check(const struct mlxsw_sp_policer_family *family,
+                                         const struct mlxsw_sp_policer_params *params,
+                                         struct netlink_ext_ack *extack)
+{
+       struct mlxsw_sp_policer_core *policer_core = family->mlxsw_sp->policer_core;
+       u64 rate_bps = params->rate * BITS_PER_BYTE;
+       u8 bs;
+
+       if (!params->bytes) {
+               NL_SET_ERR_MSG_MOD(extack, "Only bandwidth policing is currently supported by single rate policers");
+               return -EINVAL;
+       }
+
+       if (!is_power_of_2(params->burst)) {
+               NL_SET_ERR_MSG_MOD(extack, "Policer burst size is not power of two");
+               return -EINVAL;
+       }
+
+       bs = mlxsw_sp_policer_burst_bytes_hw_units(params->burst);
+
+       if (bs < policer_core->lowest_bs_bits) {
+               NL_SET_ERR_MSG_MOD(extack, "Policer burst size lower than limit");
+               return -EINVAL;
+       }
+
+       if (bs > policer_core->highest_bs_bits) {
+               NL_SET_ERR_MSG_MOD(extack, "Policer burst size higher than limit");
+               return -EINVAL;
+       }
+
+       if (rate_bps < MLXSW_REG_QPCR_LOWEST_CIR_BITS) {
+               NL_SET_ERR_MSG_MOD(extack, "Policer rate lower than limit");
+               return -EINVAL;
+       }
+
+       if (rate_bps > MLXSW_REG_QPCR_HIGHEST_CIR_BITS) {
+               NL_SET_ERR_MSG_MOD(extack, "Policer rate higher than limit");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static const struct mlxsw_sp_policer_family_ops mlxsw_sp_policer_single_rate_ops = {
+       .init                   = mlxsw_sp_policer_single_rate_family_init,
+       .fini                   = mlxsw_sp_policer_single_rate_family_fini,
+       .policer_index_alloc    = mlxsw_sp_policer_single_rate_index_alloc,
+       .policer_index_free     = mlxsw_sp_policer_single_rate_index_free,
+       .policer_init           = mlxsw_sp_policer_single_rate_init,
+       .policer_params_check   = mlxsw_sp_policer_single_rate_params_check,
+};
+
+static const struct mlxsw_sp_policer_family mlxsw_sp_policer_single_rate_family = {
+       .type           = MLXSW_SP_POLICER_TYPE_SINGLE_RATE,
+       .qpcr_type      = MLXSW_REG_QPCR_G_GLOBAL,
+       .ops            = &mlxsw_sp_policer_single_rate_ops,
+};
+
+static const struct mlxsw_sp_policer_family *mlxsw_sp_policer_family_arr[] = {
+       [MLXSW_SP_POLICER_TYPE_SINGLE_RATE]     = &mlxsw_sp_policer_single_rate_family,
+};
+
+int mlxsw_sp_policer_add(struct mlxsw_sp *mlxsw_sp,
+                        enum mlxsw_sp_policer_type type,
+                        const struct mlxsw_sp_policer_params *params,
+                        struct netlink_ext_ack *extack, u16 *p_policer_index)
+{
+       struct mlxsw_sp_policer_family *family;
+       struct mlxsw_sp_policer *policer;
+       int err;
+
+       family = mlxsw_sp->policer_core->family_arr[type];
+
+       err = family->ops->policer_params_check(family, params, extack);
+       if (err)
+               return err;
+
+       policer = kmalloc(sizeof(*policer), GFP_KERNEL);
+       if (!policer)
+               return -ENOMEM;
+       policer->params = *params;
+
+       err = family->ops->policer_index_alloc(family, policer);
+       if (err) {
+               NL_SET_ERR_MSG_MOD(extack, "Failed to allocate policer index");
+               goto err_policer_index_alloc;
+       }
+
+       err = family->ops->policer_init(family, policer);
+       if (err) {
+               NL_SET_ERR_MSG_MOD(extack, "Failed to initialize policer");
+               goto err_policer_init;
+       }
+
+       *p_policer_index = policer->index;
+
+       return 0;
+
+err_policer_init:
+       family->ops->policer_index_free(family, policer->index);
+err_policer_index_alloc:
+       kfree(policer);
+       return err;
+}
+
+void mlxsw_sp_policer_del(struct mlxsw_sp *mlxsw_sp,
+                         enum mlxsw_sp_policer_type type, u16 policer_index)
+{
+       struct mlxsw_sp_policer_family *family;
+       struct mlxsw_sp_policer *policer;
+
+       family = mlxsw_sp->policer_core->family_arr[type];
+       policer = family->ops->policer_index_free(family, policer_index);
+       kfree(policer);
+}
+
+int mlxsw_sp_policer_drops_counter_get(struct mlxsw_sp *mlxsw_sp,
+                                      enum mlxsw_sp_policer_type type,
+                                      u16 policer_index, u64 *p_drops)
+{
+       struct mlxsw_sp_policer_family *family;
+       char qpcr_pl[MLXSW_REG_QPCR_LEN];
+       int err;
+
+       family = mlxsw_sp->policer_core->family_arr[type];
+
+       MLXSW_REG_ZERO(qpcr, qpcr_pl);
+       mlxsw_reg_qpcr_pid_set(qpcr_pl, policer_index);
+       mlxsw_reg_qpcr_g_set(qpcr_pl, family->qpcr_type);
+       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(qpcr), qpcr_pl);
+       if (err)
+               return err;
+
+       *p_drops = mlxsw_reg_qpcr_violate_count_get(qpcr_pl);
+
+       return 0;
+}
+
+static int
+mlxsw_sp_policer_family_register(struct mlxsw_sp *mlxsw_sp,
+                                const struct mlxsw_sp_policer_family *tmpl)
+{
+       struct mlxsw_sp_policer_family *family;
+       int err;
+
+       family = kmemdup(tmpl, sizeof(*family), GFP_KERNEL);
+       if (!family)
+               return -ENOMEM;
+
+       family->mlxsw_sp = mlxsw_sp;
+       idr_init(&family->policer_idr);
+       mutex_init(&family->lock);
+
+       err = family->ops->init(family);
+       if (err)
+               goto err_family_init;
+
+       if (WARN_ON(family->start_index >= family->end_index)) {
+               err = -EINVAL;
+               goto err_index_check;
+       }
+
+       mlxsw_sp->policer_core->family_arr[tmpl->type] = family;
+
+       return 0;
+
+err_index_check:
+       family->ops->fini(family);
+err_family_init:
+       mutex_destroy(&family->lock);
+       idr_destroy(&family->policer_idr);
+       kfree(family);
+       return err;
+}
+
+static void
+mlxsw_sp_policer_family_unregister(struct mlxsw_sp *mlxsw_sp,
+                                  struct mlxsw_sp_policer_family *family)
+{
+       family->ops->fini(family);
+       mutex_destroy(&family->lock);
+       WARN_ON(!idr_is_empty(&family->policer_idr));
+       idr_destroy(&family->policer_idr);
+       kfree(family);
+}
+
+int mlxsw_sp_policers_init(struct mlxsw_sp *mlxsw_sp)
+{
+       struct mlxsw_sp_policer_core *policer_core;
+       int i, err;
+
+       policer_core = kzalloc(sizeof(*policer_core), GFP_KERNEL);
+       if (!policer_core)
+               return -ENOMEM;
+       mlxsw_sp->policer_core = policer_core;
+       policer_core->ops = mlxsw_sp->policer_core_ops;
+
+       err = policer_core->ops->init(policer_core);
+       if (err)
+               goto err_init;
+
+       for (i = 0; i < MLXSW_SP_POLICER_TYPE_MAX + 1; i++) {
+               err = mlxsw_sp_policer_family_register(mlxsw_sp, mlxsw_sp_policer_family_arr[i]);
+               if (err)
+                       goto err_family_register;
+       }
+
+       return 0;
+
+err_family_register:
+       for (i--; i >= 0; i--) {
+               struct mlxsw_sp_policer_family *family;
+
+               family = mlxsw_sp->policer_core->family_arr[i];
+               mlxsw_sp_policer_family_unregister(mlxsw_sp, family);
+       }
+err_init:
+       kfree(mlxsw_sp->policer_core);
+       return err;
+}
+
+void mlxsw_sp_policers_fini(struct mlxsw_sp *mlxsw_sp)
+{
+       int i;
+
+       for (i = MLXSW_SP_POLICER_TYPE_MAX; i >= 0; i--) {
+               struct mlxsw_sp_policer_family *family;
+
+               family = mlxsw_sp->policer_core->family_arr[i];
+               mlxsw_sp_policer_family_unregister(mlxsw_sp, family);
+       }
+
+       kfree(mlxsw_sp->policer_core);
+}
+
+int mlxsw_sp_policer_resources_register(struct mlxsw_core *mlxsw_core)
+{
+       u64 global_policers, cpu_policers, single_rate_policers;
+       struct devlink *devlink = priv_to_devlink(mlxsw_core);
+       struct devlink_resource_size_params size_params;
+       int err;
+
+       if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_GLOBAL_POLICERS) ||
+           !MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
+               return -EIO;
+
+       global_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_GLOBAL_POLICERS);
+       cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
+       single_rate_policers = global_policers - cpu_policers;
+
+       devlink_resource_size_params_init(&size_params, global_policers,
+                                         global_policers, 1,
+                                         DEVLINK_RESOURCE_UNIT_ENTRY);
+       err = devlink_resource_register(devlink, "global_policers",
+                                       global_policers,
+                                       MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
+                                       DEVLINK_RESOURCE_ID_PARENT_TOP,
+                                       &size_params);
+       if (err)
+               return err;
+
+       devlink_resource_size_params_init(&size_params, single_rate_policers,
+                                         single_rate_policers, 1,
+                                         DEVLINK_RESOURCE_UNIT_ENTRY);
+       err = devlink_resource_register(devlink, "single_rate_policers",
+                                       single_rate_policers,
+                                       MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
+                                       MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
+                                       &size_params);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+static int
+mlxsw_sp1_policer_core_init(struct mlxsw_sp_policer_core *policer_core)
+{
+       policer_core->lowest_bs_bits = MLXSW_REG_QPCR_LOWEST_CBS_BITS_SP1;
+       policer_core->highest_bs_bits = MLXSW_REG_QPCR_HIGHEST_CBS_BITS_SP1;
+
+       return 0;
+}
+
+const struct mlxsw_sp_policer_core_ops mlxsw_sp1_policer_core_ops = {
+       .init = mlxsw_sp1_policer_core_init,
+};
+
+static int
+mlxsw_sp2_policer_core_init(struct mlxsw_sp_policer_core *policer_core)
+{
+       policer_core->lowest_bs_bits = MLXSW_REG_QPCR_LOWEST_CBS_BITS_SP2;
+       policer_core->highest_bs_bits = MLXSW_REG_QPCR_HIGHEST_CBS_BITS_SP2;
+
+       return 0;
+}
+
+const struct mlxsw_sp_policer_core_ops mlxsw_sp2_policer_core_ops = {
+       .init = mlxsw_sp2_policer_core_init,
+};
index 670a43f..a5ce1ee 100644 (file)
@@ -8,6 +8,7 @@
 #include <net/red.h>
 
 #include "spectrum.h"
+#include "spectrum_span.h"
 #include "reg.h"
 
 #define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
@@ -1272,6 +1273,480 @@ int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
        }
 }
 
+struct mlxsw_sp_qevent_block {
+       struct list_head binding_list;
+       struct list_head mall_entry_list;
+       struct mlxsw_sp *mlxsw_sp;
+};
+
+struct mlxsw_sp_qevent_binding {
+       struct list_head list;
+       struct mlxsw_sp_port *mlxsw_sp_port;
+       u32 handle;
+       int tclass_num;
+       enum mlxsw_sp_span_trigger span_trigger;
+};
+
+static LIST_HEAD(mlxsw_sp_qevent_block_cb_list);
+
+static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp,
+                                           struct mlxsw_sp_mall_entry *mall_entry,
+                                           struct mlxsw_sp_qevent_binding *qevent_binding)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
+       struct mlxsw_sp_span_trigger_parms trigger_parms = {};
+       struct mlxsw_sp_span_agent_parms agent_parms = {
+               .to_dev = mall_entry->mirror.to_dev,
+       };
+       int span_id;
+       int err;
+
+       err = mlxsw_sp_span_agent_get(mlxsw_sp, &span_id, &agent_parms);
+       if (err)
+               return err;
+
+       err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, true);
+       if (err)
+               goto err_analyzed_port_get;
+
+       trigger_parms.span_id = span_id;
+       err = mlxsw_sp_span_agent_bind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
+                                      &trigger_parms);
+       if (err)
+               goto err_agent_bind;
+
+       err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, qevent_binding->span_trigger,
+                                          qevent_binding->tclass_num);
+       if (err)
+               goto err_trigger_enable;
+
+       mall_entry->mirror.span_id = span_id;
+       return 0;
+
+err_trigger_enable:
+       mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
+                                  &trigger_parms);
+err_agent_bind:
+       mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true);
+err_analyzed_port_get:
+       mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
+       return err;
+}
+
+static void mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp *mlxsw_sp,
+                                              struct mlxsw_sp_mall_entry *mall_entry,
+                                              struct mlxsw_sp_qevent_binding *qevent_binding)
+{
+       struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
+       struct mlxsw_sp_span_trigger_parms trigger_parms = {
+               .span_id = mall_entry->mirror.span_id,
+       };
+
+       mlxsw_sp_span_trigger_disable(mlxsw_sp_port, qevent_binding->span_trigger,
+                                     qevent_binding->tclass_num);
+       mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
+                                  &trigger_parms);
+       mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true);
+       mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
+}
+
+static int mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp,
+                                          struct mlxsw_sp_mall_entry *mall_entry,
+                                          struct mlxsw_sp_qevent_binding *qevent_binding)
+{
+       switch (mall_entry->type) {
+       case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
+               return mlxsw_sp_qevent_mirror_configure(mlxsw_sp, mall_entry, qevent_binding);
+       default:
+               /* This should have been validated away. */
+               WARN_ON(1);
+               return -EOPNOTSUPP;
+       }
+}
+
+static void mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp *mlxsw_sp,
+                                             struct mlxsw_sp_mall_entry *mall_entry,
+                                             struct mlxsw_sp_qevent_binding *qevent_binding)
+{
+       switch (mall_entry->type) {
+       case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
+               return mlxsw_sp_qevent_mirror_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
+       default:
+               WARN_ON(1);
+               return;
+       }
+}
+
+static int mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block,
+                                            struct mlxsw_sp_qevent_binding *qevent_binding)
+{
+       struct mlxsw_sp_mall_entry *mall_entry;
+       int err;
+
+       list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list) {
+               err = mlxsw_sp_qevent_entry_configure(qevent_block->mlxsw_sp, mall_entry,
+                                                     qevent_binding);
+               if (err)
+                       goto err_entry_configure;
+       }
+
+       return 0;
+
+err_entry_configure:
+       list_for_each_entry_continue_reverse(mall_entry, &qevent_block->mall_entry_list, list)
+               mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
+                                                 qevent_binding);
+       return err;
+}
+
+static void mlxsw_sp_qevent_binding_deconfigure(struct mlxsw_sp_qevent_block *qevent_block,
+                                               struct mlxsw_sp_qevent_binding *qevent_binding)
+{
+       struct mlxsw_sp_mall_entry *mall_entry;
+
+       list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list)
+               mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
+                                                 qevent_binding);
+}
+
+static int mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block)
+{
+       struct mlxsw_sp_qevent_binding *qevent_binding;
+       int err;
+
+       list_for_each_entry(qevent_binding, &qevent_block->binding_list, list) {
+               err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding);
+               if (err)
+                       goto err_binding_configure;
+       }
+
+       return 0;
+
+err_binding_configure:
+       list_for_each_entry_continue_reverse(qevent_binding, &qevent_block->binding_list, list)
+               mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
+       return err;
+}
+
+static void mlxsw_sp_qevent_block_deconfigure(struct mlxsw_sp_qevent_block *qevent_block)
+{
+       struct mlxsw_sp_qevent_binding *qevent_binding;
+
+       list_for_each_entry(qevent_binding, &qevent_block->binding_list, list)
+               mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
+}
+
+static struct mlxsw_sp_mall_entry *
+mlxsw_sp_qevent_mall_entry_find(struct mlxsw_sp_qevent_block *block, unsigned long cookie)
+{
+       struct mlxsw_sp_mall_entry *mall_entry;
+
+       list_for_each_entry(mall_entry, &block->mall_entry_list, list)
+               if (mall_entry->cookie == cookie)
+                       return mall_entry;
+
+       return NULL;
+}
+
+static int mlxsw_sp_qevent_mall_replace(struct mlxsw_sp *mlxsw_sp,
+                                       struct mlxsw_sp_qevent_block *qevent_block,
+                                       struct tc_cls_matchall_offload *f)
+{
+       struct mlxsw_sp_mall_entry *mall_entry;
+       struct flow_action_entry *act;
+       int err;
+
+       /* It should not currently be possible to replace a matchall rule. So
+        * this must be a new rule.
+        */
+       if (!list_empty(&qevent_block->mall_entry_list)) {
+               NL_SET_ERR_MSG(f->common.extack, "At most one filter supported");
+               return -EOPNOTSUPP;
+       }
+       if (f->rule->action.num_entries != 1) {
+               NL_SET_ERR_MSG(f->common.extack, "Only singular actions supported");
+               return -EOPNOTSUPP;
+       }
+       if (f->common.chain_index) {
+               NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
+               return -EOPNOTSUPP;
+       }
+       if (f->common.protocol != htons(ETH_P_ALL)) {
+               NL_SET_ERR_MSG(f->common.extack, "Protocol matching not supported");
+               return -EOPNOTSUPP;
+       }
+
+       act = &f->rule->action.entries[0];
+       if (!(act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED)) {
+               NL_SET_ERR_MSG(f->common.extack, "HW counters not supported on qevents");
+               return -EOPNOTSUPP;
+       }
+
+       mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
+       if (!mall_entry)
+               return -ENOMEM;
+       mall_entry->cookie = f->cookie;
+
+       if (act->id == FLOW_ACTION_MIRRED) {
+               mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
+               mall_entry->mirror.to_dev = act->dev;
+       } else {
+               NL_SET_ERR_MSG(f->common.extack, "Unsupported action");
+               err = -EOPNOTSUPP;
+               goto err_unsupported_action;
+       }
+
+       list_add_tail(&mall_entry->list, &qevent_block->mall_entry_list);
+
+       err = mlxsw_sp_qevent_block_configure(qevent_block);
+       if (err)
+               goto err_block_configure;
+
+       return 0;
+
+err_block_configure:
+       list_del(&mall_entry->list);
+err_unsupported_action:
+       kfree(mall_entry);
+       return err;
+}
+
+static void mlxsw_sp_qevent_mall_destroy(struct mlxsw_sp_qevent_block *qevent_block,
+                                        struct tc_cls_matchall_offload *f)
+{
+       struct mlxsw_sp_mall_entry *mall_entry;
+
+       mall_entry = mlxsw_sp_qevent_mall_entry_find(qevent_block, f->cookie);
+       if (!mall_entry)
+               return;
+
+       mlxsw_sp_qevent_block_deconfigure(qevent_block);
+
+       list_del(&mall_entry->list);
+       kfree(mall_entry);
+}
+
+static int mlxsw_sp_qevent_block_mall_cb(struct mlxsw_sp_qevent_block *qevent_block,
+                                        struct tc_cls_matchall_offload *f)
+{
+       struct mlxsw_sp *mlxsw_sp = qevent_block->mlxsw_sp;
+
+       switch (f->command) {
+       case TC_CLSMATCHALL_REPLACE:
+               return mlxsw_sp_qevent_mall_replace(mlxsw_sp, qevent_block, f);
+       case TC_CLSMATCHALL_DESTROY:
+               mlxsw_sp_qevent_mall_destroy(qevent_block, f);
+               return 0;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int mlxsw_sp_qevent_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+       struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
+
+       switch (type) {
+       case TC_SETUP_CLSMATCHALL:
+               return mlxsw_sp_qevent_block_mall_cb(qevent_block, type_data);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static struct mlxsw_sp_qevent_block *mlxsw_sp_qevent_block_create(struct mlxsw_sp *mlxsw_sp,
+                                                                 struct net *net)
+{
+       struct mlxsw_sp_qevent_block *qevent_block;
+
+       qevent_block = kzalloc(sizeof(*qevent_block), GFP_KERNEL);
+       if (!qevent_block)
+               return NULL;
+
+       INIT_LIST_HEAD(&qevent_block->binding_list);
+       INIT_LIST_HEAD(&qevent_block->mall_entry_list);
+       qevent_block->mlxsw_sp = mlxsw_sp;
+       return qevent_block;
+}
+
+static void
+mlxsw_sp_qevent_block_destroy(struct mlxsw_sp_qevent_block *qevent_block)
+{
+       WARN_ON(!list_empty(&qevent_block->binding_list));
+       WARN_ON(!list_empty(&qevent_block->mall_entry_list));
+       kfree(qevent_block);
+}
+
+static void mlxsw_sp_qevent_block_release(void *cb_priv)
+{
+       struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
+
+       mlxsw_sp_qevent_block_destroy(qevent_block);
+}
+
+static struct mlxsw_sp_qevent_binding *
+mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, int tclass_num,
+                              enum mlxsw_sp_span_trigger span_trigger)
+{
+       struct mlxsw_sp_qevent_binding *binding;
+
+       binding = kzalloc(sizeof(*binding), GFP_KERNEL);
+       if (!binding)
+               return ERR_PTR(-ENOMEM);
+
+       binding->mlxsw_sp_port = mlxsw_sp_port;
+       binding->handle = handle;
+       binding->tclass_num = tclass_num;
+       binding->span_trigger = span_trigger;
+       return binding;
+}
+
+static void
+mlxsw_sp_qevent_binding_destroy(struct mlxsw_sp_qevent_binding *binding)
+{
+       kfree(binding);
+}
+
+static struct mlxsw_sp_qevent_binding *
+mlxsw_sp_qevent_binding_lookup(struct mlxsw_sp_qevent_block *block,
+                              struct mlxsw_sp_port *mlxsw_sp_port,
+                              u32 handle,
+                              enum mlxsw_sp_span_trigger span_trigger)
+{
+       struct mlxsw_sp_qevent_binding *qevent_binding;
+
+       list_for_each_entry(qevent_binding, &block->binding_list, list)
+               if (qevent_binding->mlxsw_sp_port == mlxsw_sp_port &&
+                   qevent_binding->handle == handle &&
+                   qevent_binding->span_trigger == span_trigger)
+                       return qevent_binding;
+       return NULL;
+}
+
+static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port,
+                                              struct flow_block_offload *f,
+                                              enum mlxsw_sp_span_trigger span_trigger)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       struct mlxsw_sp_qevent_binding *qevent_binding;
+       struct mlxsw_sp_qevent_block *qevent_block;
+       struct flow_block_cb *block_cb;
+       struct mlxsw_sp_qdisc *qdisc;
+       bool register_block = false;
+       int err;
+
+       block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
+       if (!block_cb) {
+               qevent_block = mlxsw_sp_qevent_block_create(mlxsw_sp, f->net);
+               if (!qevent_block)
+                       return -ENOMEM;
+               block_cb = flow_block_cb_alloc(mlxsw_sp_qevent_block_cb, mlxsw_sp, qevent_block,
+                                              mlxsw_sp_qevent_block_release);
+               if (IS_ERR(block_cb)) {
+                       mlxsw_sp_qevent_block_destroy(qevent_block);
+                       return PTR_ERR(block_cb);
+               }
+               register_block = true;
+       } else {
+               qevent_block = flow_block_cb_priv(block_cb);
+       }
+       flow_block_cb_incref(block_cb);
+
+       qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port, f->sch->handle);
+       if (!qdisc) {
+               NL_SET_ERR_MSG(f->extack, "Qdisc not offloaded");
+               err = -ENOENT;
+               goto err_find_qdisc;
+       }
+
+       if (WARN_ON(mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
+                                                  span_trigger))) {
+               err = -EEXIST;
+               goto err_binding_exists;
+       }
+
+       qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port, f->sch->handle,
+                                                       qdisc->tclass_num, span_trigger);
+       if (IS_ERR(qevent_binding)) {
+               err = PTR_ERR(qevent_binding);
+               goto err_binding_create;
+       }
+
+       err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding);
+       if (err)
+               goto err_binding_configure;
+
+       list_add(&qevent_binding->list, &qevent_block->binding_list);
+
+       if (register_block) {
+               flow_block_cb_add(block_cb, f);
+               list_add_tail(&block_cb->driver_list, &mlxsw_sp_qevent_block_cb_list);
+       }
+
+       return 0;
+
+err_binding_configure:
+       mlxsw_sp_qevent_binding_destroy(qevent_binding);
+err_binding_create:
+err_binding_exists:
+err_find_qdisc:
+       if (!flow_block_cb_decref(block_cb))
+               flow_block_cb_free(block_cb);
+       return err;
+}
+
+static void mlxsw_sp_setup_tc_block_qevent_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
+                                                 struct flow_block_offload *f,
+                                                 enum mlxsw_sp_span_trigger span_trigger)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       struct mlxsw_sp_qevent_binding *qevent_binding;
+       struct mlxsw_sp_qevent_block *qevent_block;
+       struct flow_block_cb *block_cb;
+
+       block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
+       if (!block_cb)
+               return;
+       qevent_block = flow_block_cb_priv(block_cb);
+
+       qevent_binding = mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
+                                                       span_trigger);
+       if (!qevent_binding)
+               return;
+
+       list_del(&qevent_binding->list);
+       mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
+       mlxsw_sp_qevent_binding_destroy(qevent_binding);
+
+       if (!flow_block_cb_decref(block_cb)) {
+               flow_block_cb_remove(block_cb, f);
+               list_del(&block_cb->driver_list);
+       }
+}
+
+static int mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port,
+                                         struct flow_block_offload *f,
+                                         enum mlxsw_sp_span_trigger span_trigger)
+{
+       f->driver_block_list = &mlxsw_sp_qevent_block_cb_list;
+
+       switch (f->command) {
+       case FLOW_BLOCK_BIND:
+               return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f, span_trigger);
+       case FLOW_BLOCK_UNBIND:
+               mlxsw_sp_setup_tc_block_qevent_unbind(mlxsw_sp_port, f, span_trigger);
+               return 0;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port,
+                                             struct flow_block_offload *f)
+{
+       return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f, MLXSW_SP_SPAN_TRIGGER_EARLY_DROP);
+}
+
 int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
 {
        struct mlxsw_sp_qdisc_state *qdisc_state;
index 770de02..019ed50 100644 (file)
@@ -6262,7 +6262,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
        }
 
        fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
-       if (WARN_ON(!fib_work))
+       if (!fib_work)
                return NOTIFY_BAD;
 
        fib_work->mlxsw_sp = router->mlxsw_sp;
index 304eb8c..323eaf9 100644 (file)
 struct mlxsw_sp_span {
        struct work_struct work;
        struct mlxsw_sp *mlxsw_sp;
+       const struct mlxsw_sp_span_trigger_ops **span_trigger_ops_arr;
+       const struct mlxsw_sp_span_entry_ops **span_entry_ops_arr;
+       size_t span_entry_ops_arr_size;
        struct list_head analyzed_ports_list;
        struct mutex analyzed_ports_lock; /* Protects analyzed_ports_list */
        struct list_head trigger_entries_list;
+       u16 policer_id_base;
+       refcount_t policer_id_base_ref_count;
        atomic_t active_entries_count;
        int entries_count;
        struct mlxsw_sp_span_entry entries[];
@@ -38,12 +43,31 @@ struct mlxsw_sp_span_analyzed_port {
 
 struct mlxsw_sp_span_trigger_entry {
        struct list_head list; /* Member of trigger_entries_list */
+       struct mlxsw_sp_span *span;
+       const struct mlxsw_sp_span_trigger_ops *ops;
        refcount_t ref_count;
        u8 local_port;
        enum mlxsw_sp_span_trigger trigger;
        struct mlxsw_sp_span_trigger_parms parms;
 };
 
+enum mlxsw_sp_span_trigger_type {
+       MLXSW_SP_SPAN_TRIGGER_TYPE_PORT,
+       MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL,
+};
+
+struct mlxsw_sp_span_trigger_ops {
+       int (*bind)(struct mlxsw_sp_span_trigger_entry *trigger_entry);
+       void (*unbind)(struct mlxsw_sp_span_trigger_entry *trigger_entry);
+       bool (*matches)(struct mlxsw_sp_span_trigger_entry *trigger_entry,
+                       enum mlxsw_sp_span_trigger trigger,
+                       struct mlxsw_sp_port *mlxsw_sp_port);
+       int (*enable)(struct mlxsw_sp_span_trigger_entry *trigger_entry,
+                     struct mlxsw_sp_port *mlxsw_sp_port, u8 tc);
+       void (*disable)(struct mlxsw_sp_span_trigger_entry *trigger_entry,
+                       struct mlxsw_sp_port *mlxsw_sp_port, u8 tc);
+};
+
 static void mlxsw_sp_span_respin_work(struct work_struct *work);
 
 static u64 mlxsw_sp_span_occ_get(void *priv)
@@ -57,7 +81,7 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
 {
        struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
        struct mlxsw_sp_span *span;
-       int i, entries_count;
+       int i, entries_count, err;
 
        if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
                return -EIO;
@@ -66,6 +90,7 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
        span = kzalloc(struct_size(span, entries, entries_count), GFP_KERNEL);
        if (!span)
                return -ENOMEM;
+       refcount_set(&span->policer_id_base_ref_count, 0);
        span->entries_count = entries_count;
        atomic_set(&span->active_entries_count, 0);
        mutex_init(&span->analyzed_ports_lock);
@@ -77,11 +102,20 @@ int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
        for (i = 0; i < mlxsw_sp->span->entries_count; i++)
                mlxsw_sp->span->entries[i].id = i;
 
+       err = mlxsw_sp->span_ops->init(mlxsw_sp);
+       if (err)
+               goto err_init;
+
        devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
                                          mlxsw_sp_span_occ_get, mlxsw_sp);
        INIT_WORK(&span->work, mlxsw_sp_span_respin_work);
 
        return 0;
+
+err_init:
+       mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock);
+       kfree(mlxsw_sp->span);
+       return err;
 }
 
 void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
@@ -97,8 +131,41 @@ void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
        kfree(mlxsw_sp->span);
 }
 
+static bool mlxsw_sp1_span_cpu_can_handle(const struct net_device *dev)
+{
+       return !dev;
+}
+
+static int mlxsw_sp1_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp,
+                                         const struct net_device *to_dev,
+                                         struct mlxsw_sp_span_parms *sparmsp)
+{
+       return -EOPNOTSUPP;
+}
+
+static int
+mlxsw_sp1_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry,
+                                  struct mlxsw_sp_span_parms sparms)
+{
+       return -EOPNOTSUPP;
+}
+
+static void
+mlxsw_sp1_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+}
+
+static const
+struct mlxsw_sp_span_entry_ops mlxsw_sp1_span_entry_ops_cpu = {
+       .can_handle = mlxsw_sp1_span_cpu_can_handle,
+       .parms_set = mlxsw_sp1_span_entry_cpu_parms,
+       .configure = mlxsw_sp1_span_entry_cpu_configure,
+       .deconfigure = mlxsw_sp1_span_entry_cpu_deconfigure,
+};
+
 static int
-mlxsw_sp_span_entry_phys_parms(const struct net_device *to_dev,
+mlxsw_sp_span_entry_phys_parms(struct mlxsw_sp *mlxsw_sp,
+                              const struct net_device *to_dev,
                               struct mlxsw_sp_span_parms *sparmsp)
 {
        sparmsp->dest_port = netdev_priv(to_dev);
@@ -118,6 +185,8 @@ mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry,
        /* Create a new port analayzer entry for local_port. */
        mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
                            MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
+       mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable);
+       mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id);
 
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
 }
@@ -374,7 +443,8 @@ out:
 }
 
 static int
-mlxsw_sp_span_entry_gretap4_parms(const struct net_device *to_dev,
+mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp,
+                                 const struct net_device *to_dev,
                                  struct mlxsw_sp_span_parms *sparmsp)
 {
        struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
@@ -413,6 +483,8 @@ mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry,
        /* Create a new port analayzer entry for local_port. */
        mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
                            MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
+       mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable);
+       mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id);
        mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
        mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
                                    MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
@@ -475,7 +547,8 @@ out:
 }
 
 static int
-mlxsw_sp_span_entry_gretap6_parms(const struct net_device *to_dev,
+mlxsw_sp_span_entry_gretap6_parms(struct mlxsw_sp *mlxsw_sp,
+                                 const struct net_device *to_dev,
                                  struct mlxsw_sp_span_parms *sparmsp)
 {
        struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev);
@@ -514,6 +587,8 @@ mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry,
        /* Create a new port analayzer entry for local_port. */
        mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
                            MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
+       mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable);
+       mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id);
        mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
        mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
                                    MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
@@ -549,7 +624,8 @@ mlxsw_sp_span_vlan_can_handle(const struct net_device *dev)
 }
 
 static int
-mlxsw_sp_span_entry_vlan_parms(const struct net_device *to_dev,
+mlxsw_sp_span_entry_vlan_parms(struct mlxsw_sp *mlxsw_sp,
+                              const struct net_device *to_dev,
                               struct mlxsw_sp_span_parms *sparmsp)
 {
        struct net_device *real_dev;
@@ -576,6 +652,8 @@ mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry,
 
        mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
                            MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
+       mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable);
+       mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id);
        mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
 
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
@@ -597,7 +675,61 @@ struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = {
 };
 
 static const
-struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = {
+struct mlxsw_sp_span_entry_ops *mlxsw_sp1_span_entry_ops_arr[] = {
+       &mlxsw_sp1_span_entry_ops_cpu,
+       &mlxsw_sp_span_entry_ops_phys,
+#if IS_ENABLED(CONFIG_NET_IPGRE)
+       &mlxsw_sp_span_entry_ops_gretap4,
+#endif
+#if IS_ENABLED(CONFIG_IPV6_GRE)
+       &mlxsw_sp_span_entry_ops_gretap6,
+#endif
+       &mlxsw_sp_span_entry_ops_vlan,
+};
+
+static bool mlxsw_sp2_span_cpu_can_handle(const struct net_device *dev)
+{
+       return !dev;
+}
+
+static int mlxsw_sp2_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp,
+                                         const struct net_device *to_dev,
+                                         struct mlxsw_sp_span_parms *sparmsp)
+{
+       sparmsp->dest_port = mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
+       return 0;
+}
+
+static int
+mlxsw_sp2_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry,
+                                  struct mlxsw_sp_span_parms sparms)
+{
+       /* Mirroring to the CPU port is like mirroring to any other physical
+        * port. Its local port is used instead of that of the physical port.
+        */
+       return mlxsw_sp_span_entry_phys_configure(span_entry, sparms);
+}
+
+static void
+mlxsw_sp2_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+       enum mlxsw_reg_mpat_span_type span_type;
+
+       span_type = MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH;
+       mlxsw_sp_span_entry_deconfigure_common(span_entry, span_type);
+}
+
+static const
+struct mlxsw_sp_span_entry_ops mlxsw_sp2_span_entry_ops_cpu = {
+       .can_handle = mlxsw_sp2_span_cpu_can_handle,
+       .parms_set = mlxsw_sp2_span_entry_cpu_parms,
+       .configure = mlxsw_sp2_span_entry_cpu_configure,
+       .deconfigure = mlxsw_sp2_span_entry_cpu_deconfigure,
+};
+
+static const
+struct mlxsw_sp_span_entry_ops *mlxsw_sp2_span_entry_ops_arr[] = {
+       &mlxsw_sp2_span_entry_ops_cpu,
        &mlxsw_sp_span_entry_ops_phys,
 #if IS_ENABLED(CONFIG_NET_IPGRE)
        &mlxsw_sp_span_entry_ops_gretap4,
@@ -609,7 +741,8 @@ struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = {
 };
 
 static int
-mlxsw_sp_span_entry_nop_parms(const struct net_device *to_dev,
+mlxsw_sp_span_entry_nop_parms(struct mlxsw_sp *mlxsw_sp,
+                             const struct net_device *to_dev,
                              struct mlxsw_sp_span_parms *sparmsp)
 {
        return mlxsw_sp_span_entry_unoffloadable(sparmsp);
@@ -644,16 +777,15 @@ mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp,
                goto set_parms;
 
        if (sparms.dest_port->mlxsw_sp != mlxsw_sp) {
-               netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance",
-                          sparms.dest_port->dev->name);
+               dev_err(mlxsw_sp->bus_info->dev,
+                       "Cannot mirror to a port which belongs to a different mlxsw instance\n");
                sparms.dest_port = NULL;
                goto set_parms;
        }
 
        err = span_entry->ops->configure(span_entry, sparms);
        if (err) {
-               netdev_err(span_entry->to_dev, "Failed to offload mirror to %s",
-                          sparms.dest_port->dev->name);
+               dev_err(mlxsw_sp->bus_info->dev, "Failed to offload mirror\n");
                sparms.dest_port = NULL;
                goto set_parms;
        }
@@ -669,6 +801,45 @@ mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry)
                span_entry->ops->deconfigure(span_entry);
 }
 
+static int mlxsw_sp_span_policer_id_base_set(struct mlxsw_sp_span *span,
+                                            u16 policer_id)
+{
+       struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp;
+       u16 policer_id_base;
+       int err;
+
+       /* Policers set on SPAN agents must be in the range of
+        * `policer_id_base .. policer_id_base + max_span_agents - 1`. If the
+        * base is set and the new policer is not within the range, then we
+        * must error out.
+        */
+       if (refcount_read(&span->policer_id_base_ref_count)) {
+               if (policer_id < span->policer_id_base ||
+                   policer_id >= span->policer_id_base + span->entries_count)
+                       return -EINVAL;
+
+               refcount_inc(&span->policer_id_base_ref_count);
+               return 0;
+       }
+
+       /* Base must be even. */
+       policer_id_base = policer_id % 2 == 0 ? policer_id : policer_id - 1;
+       err = mlxsw_sp->span_ops->policer_id_base_set(mlxsw_sp,
+                                                     policer_id_base);
+       if (err)
+               return err;
+
+       span->policer_id_base = policer_id_base;
+       refcount_set(&span->policer_id_base_ref_count, 1);
+
+       return 0;
+}
+
+static void mlxsw_sp_span_policer_id_base_unset(struct mlxsw_sp_span *span)
+{
+       refcount_dec(&span->policer_id_base_ref_count);
+}
+
 static struct mlxsw_sp_span_entry *
 mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
                           const struct net_device *to_dev,
@@ -688,6 +859,15 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
        if (!span_entry)
                return NULL;
 
+       if (sparms.policer_enable) {
+               int err;
+
+               err = mlxsw_sp_span_policer_id_base_set(mlxsw_sp->span,
+                                                       sparms.policer_id);
+               if (err)
+                       return NULL;
+       }
+
        atomic_inc(&mlxsw_sp->span->active_entries_count);
        span_entry->ops = ops;
        refcount_set(&span_entry->ref_count, 1);
@@ -702,6 +882,8 @@ static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
 {
        mlxsw_sp_span_entry_deconfigure(span_entry);
        atomic_dec(&mlxsw_sp->span->active_entries_count);
+       if (span_entry->parms.policer_enable)
+               mlxsw_sp_span_policer_id_base_unset(mlxsw_sp->span);
 }
 
 struct mlxsw_sp_span_entry *
@@ -741,6 +923,24 @@ mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
 }
 
 static struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_find_by_parms(struct mlxsw_sp *mlxsw_sp,
+                                 const struct net_device *to_dev,
+                                 const struct mlxsw_sp_span_parms *sparms)
+{
+       int i;
+
+       for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
+               struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
+
+               if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev &&
+                   curr->parms.policer_enable == sparms->policer_enable &&
+                   curr->parms.policer_id == sparms->policer_id)
+                       return curr;
+       }
+       return NULL;
+}
+
+static struct mlxsw_sp_span_entry *
 mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
                        const struct net_device *to_dev,
                        const struct mlxsw_sp_span_entry_ops *ops,
@@ -748,7 +948,8 @@ mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
 {
        struct mlxsw_sp_span_entry *span_entry;
 
-       span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev);
+       span_entry = mlxsw_sp_span_entry_find_by_parms(mlxsw_sp, to_dev,
+                                                      &sparms);
        if (span_entry) {
                /* Already exists, just take a reference */
                refcount_inc(&span_entry->ref_count);
@@ -766,6 +967,14 @@ static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
        return 0;
 }
 
+static u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu,
+                                     u32 speed)
+{
+       u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu);
+
+       return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1;
+}
+
 static int
 mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
 {
@@ -782,6 +991,7 @@ mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
                speed = 0;
 
        buffsize = mlxsw_sp_span_buffsize_get(mlxsw_sp, speed, mtu);
+       buffsize = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, buffsize);
        mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, buffsize);
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
 }
@@ -856,11 +1066,12 @@ static const struct mlxsw_sp_span_entry_ops *
 mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
                        const struct net_device *to_dev)
 {
+       struct mlxsw_sp_span *span = mlxsw_sp->span;
        size_t i;
 
-       for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i)
-               if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev))
-                       return mlxsw_sp_span_entry_types[i];
+       for (i = 0; i < span->span_entry_ops_arr_size; ++i)
+               if (span->span_entry_ops_arr[i]->can_handle(to_dev))
+                       return span->span_entry_ops_arr[i];
 
        return NULL;
 }
@@ -882,7 +1093,7 @@ static void mlxsw_sp_span_respin_work(struct work_struct *work)
                if (!refcount_read(&curr->ref_count))
                        continue;
 
-               err = curr->ops->parms_set(curr->to_dev, &sparms);
+               err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms);
                if (err)
                        continue;
 
@@ -901,9 +1112,10 @@ void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
        mlxsw_core_schedule_work(&mlxsw_sp->span->work);
 }
 
-int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp,
-                           const struct net_device *to_dev, int *p_span_id)
+int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id,
+                           const struct mlxsw_sp_span_agent_parms *parms)
 {
+       const struct net_device *to_dev = parms->to_dev;
        const struct mlxsw_sp_span_entry_ops *ops;
        struct mlxsw_sp_span_entry *span_entry;
        struct mlxsw_sp_span_parms sparms;
@@ -918,10 +1130,12 @@ int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp,
        }
 
        memset(&sparms, 0, sizeof(sparms));
-       err = ops->parms_set(to_dev, &sparms);
+       err = ops->parms_set(mlxsw_sp, to_dev, &sparms);
        if (err)
                return err;
 
+       sparms.policer_id = parms->policer_id;
+       sparms.policer_enable = parms->policer_enable;
        span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
        if (!span_entry)
                return -ENOBUFS;
@@ -1050,9 +1264,9 @@ out_unlock:
 }
 
 static int
-__mlxsw_sp_span_trigger_entry_bind(struct mlxsw_sp_span *span,
-                                  struct mlxsw_sp_span_trigger_entry *
-                                  trigger_entry, bool enable)
+__mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span *span,
+                                 struct mlxsw_sp_span_trigger_entry *
+                                 trigger_entry, bool enable)
 {
        char mpar_pl[MLXSW_REG_MPAR_LEN];
        enum mlxsw_reg_mpar_i_e i_e;
@@ -1075,19 +1289,254 @@ __mlxsw_sp_span_trigger_entry_bind(struct mlxsw_sp_span *span,
 }
 
 static int
-mlxsw_sp_span_trigger_entry_bind(struct mlxsw_sp_span *span,
-                                struct mlxsw_sp_span_trigger_entry *
-                                trigger_entry)
+mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span_trigger_entry *
+                               trigger_entry)
+{
+       return __mlxsw_sp_span_trigger_port_bind(trigger_entry->span,
+                                                trigger_entry, true);
+}
+
+static void
+mlxsw_sp_span_trigger_port_unbind(struct mlxsw_sp_span_trigger_entry *
+                                 trigger_entry)
+{
+       __mlxsw_sp_span_trigger_port_bind(trigger_entry->span, trigger_entry,
+                                         false);
+}
+
+static bool
+mlxsw_sp_span_trigger_port_matches(struct mlxsw_sp_span_trigger_entry *
+                                  trigger_entry,
+                                  enum mlxsw_sp_span_trigger trigger,
+                                  struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       return trigger_entry->trigger == trigger &&
+              trigger_entry->local_port == mlxsw_sp_port->local_port;
+}
+
+static int
+mlxsw_sp_span_trigger_port_enable(struct mlxsw_sp_span_trigger_entry *
+                                 trigger_entry,
+                                 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc)
 {
-       return __mlxsw_sp_span_trigger_entry_bind(span, trigger_entry, true);
+       /* Port trigger are enabled during binding. */
+       return 0;
 }
 
 static void
-mlxsw_sp_span_trigger_entry_unbind(struct mlxsw_sp_span *span,
-                                  struct mlxsw_sp_span_trigger_entry *
+mlxsw_sp_span_trigger_port_disable(struct mlxsw_sp_span_trigger_entry *
+                                  trigger_entry,
+                                  struct mlxsw_sp_port *mlxsw_sp_port, u8 tc)
+{
+}
+
+static const struct mlxsw_sp_span_trigger_ops
+mlxsw_sp_span_trigger_port_ops = {
+       .bind = mlxsw_sp_span_trigger_port_bind,
+       .unbind = mlxsw_sp_span_trigger_port_unbind,
+       .matches = mlxsw_sp_span_trigger_port_matches,
+       .enable = mlxsw_sp_span_trigger_port_enable,
+       .disable = mlxsw_sp_span_trigger_port_disable,
+};
+
+static int
+mlxsw_sp1_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry *
                                   trigger_entry)
 {
-       __mlxsw_sp_span_trigger_entry_bind(span, trigger_entry, false);
+       return -EOPNOTSUPP;
+}
+
+static void
+mlxsw_sp1_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry *
+                                    trigger_entry)
+{
+}
+
+static bool
+mlxsw_sp1_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry *
+                                     trigger_entry,
+                                     enum mlxsw_sp_span_trigger trigger,
+                                     struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       WARN_ON_ONCE(1);
+       return false;
+}
+
+static int
+mlxsw_sp1_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry *
+                                    trigger_entry,
+                                    struct mlxsw_sp_port *mlxsw_sp_port,
+                                    u8 tc)
+{
+       return -EOPNOTSUPP;
+}
+
+static void
+mlxsw_sp1_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry *
+                                     trigger_entry,
+                                     struct mlxsw_sp_port *mlxsw_sp_port,
+                                     u8 tc)
+{
+}
+
+static const struct mlxsw_sp_span_trigger_ops
+mlxsw_sp1_span_trigger_global_ops = {
+       .bind = mlxsw_sp1_span_trigger_global_bind,
+       .unbind = mlxsw_sp1_span_trigger_global_unbind,
+       .matches = mlxsw_sp1_span_trigger_global_matches,
+       .enable = mlxsw_sp1_span_trigger_global_enable,
+       .disable = mlxsw_sp1_span_trigger_global_disable,
+};
+
+static const struct mlxsw_sp_span_trigger_ops *
+mlxsw_sp1_span_trigger_ops_arr[] = {
+       [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops,
+       [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] =
+               &mlxsw_sp1_span_trigger_global_ops,
+};
+
+static int
+mlxsw_sp2_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry *
+                                  trigger_entry)
+{
+       struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp;
+       enum mlxsw_reg_mpagr_trigger trigger;
+       char mpagr_pl[MLXSW_REG_MPAGR_LEN];
+
+       switch (trigger_entry->trigger) {
+       case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP:
+               trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_SHARED_BUFFER;
+               break;
+       case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP:
+               trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_WRED;
+               break;
+       case MLXSW_SP_SPAN_TRIGGER_ECN:
+               trigger = MLXSW_REG_MPAGR_TRIGGER_EGRESS_ECN;
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               return -EINVAL;
+       }
+
+       mlxsw_reg_mpagr_pack(mpagr_pl, trigger, trigger_entry->parms.span_id,
+                            1);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpagr), mpagr_pl);
+}
+
+static void
+mlxsw_sp2_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry *
+                                    trigger_entry)
+{
+       /* There is no unbinding for global triggers. The trigger should be
+        * disabled on all ports by now.
+        */
+}
+
+static bool
+mlxsw_sp2_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry *
+                                     trigger_entry,
+                                     enum mlxsw_sp_span_trigger trigger,
+                                     struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       return trigger_entry->trigger == trigger;
+}
+
+static int
+__mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry *
+                                      trigger_entry,
+                                      struct mlxsw_sp_port *mlxsw_sp_port,
+                                      u8 tc, bool enable)
+{
+       struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp;
+       char momte_pl[MLXSW_REG_MOMTE_LEN];
+       enum mlxsw_reg_momte_type type;
+       int err;
+
+       switch (trigger_entry->trigger) {
+       case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP:
+               type = MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_TCLASS;
+               break;
+       case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP:
+               type = MLXSW_REG_MOMTE_TYPE_WRED;
+               break;
+       case MLXSW_SP_SPAN_TRIGGER_ECN:
+               type = MLXSW_REG_MOMTE_TYPE_ECN;
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               return -EINVAL;
+       }
+
+       /* Query existing configuration in order to only change the state of
+        * the specified traffic class.
+        */
+       mlxsw_reg_momte_pack(momte_pl, mlxsw_sp_port->local_port, type);
+       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(momte), momte_pl);
+       if (err)
+               return err;
+
+       mlxsw_reg_momte_tclass_en_set(momte_pl, tc, enable);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(momte), momte_pl);
+}
+
+static int
+mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry *
+                                    trigger_entry,
+                                    struct mlxsw_sp_port *mlxsw_sp_port,
+                                    u8 tc)
+{
+       return __mlxsw_sp2_span_trigger_global_enable(trigger_entry,
+                                                     mlxsw_sp_port, tc, true);
+}
+
+static void
+mlxsw_sp2_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry *
+                                     trigger_entry,
+                                     struct mlxsw_sp_port *mlxsw_sp_port,
+                                     u8 tc)
+{
+       __mlxsw_sp2_span_trigger_global_enable(trigger_entry, mlxsw_sp_port, tc,
+                                              false);
+}
+
+static const struct mlxsw_sp_span_trigger_ops
+mlxsw_sp2_span_trigger_global_ops = {
+       .bind = mlxsw_sp2_span_trigger_global_bind,
+       .unbind = mlxsw_sp2_span_trigger_global_unbind,
+       .matches = mlxsw_sp2_span_trigger_global_matches,
+       .enable = mlxsw_sp2_span_trigger_global_enable,
+       .disable = mlxsw_sp2_span_trigger_global_disable,
+};
+
+static const struct mlxsw_sp_span_trigger_ops *
+mlxsw_sp2_span_trigger_ops_arr[] = {
+       [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops,
+       [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] =
+               &mlxsw_sp2_span_trigger_global_ops,
+};
+
+static void
+mlxsw_sp_span_trigger_ops_set(struct mlxsw_sp_span_trigger_entry *trigger_entry)
+{
+       struct mlxsw_sp_span *span = trigger_entry->span;
+       enum mlxsw_sp_span_trigger_type type;
+
+       switch (trigger_entry->trigger) {
+       case MLXSW_SP_SPAN_TRIGGER_INGRESS: /* fall-through */
+       case MLXSW_SP_SPAN_TRIGGER_EGRESS:
+               type = MLXSW_SP_SPAN_TRIGGER_TYPE_PORT;
+               break;
+       case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP: /* fall-through */
+       case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP: /* fall-through */
+       case MLXSW_SP_SPAN_TRIGGER_ECN:
+               type = MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL;
+               break;
+       default:
+               WARN_ON_ONCE(1);
+               return;
+       }
+
+       trigger_entry->ops = span->span_trigger_ops_arr[type];
 }
 
 static struct mlxsw_sp_span_trigger_entry *
@@ -1105,12 +1554,15 @@ mlxsw_sp_span_trigger_entry_create(struct mlxsw_sp_span *span,
                return ERR_PTR(-ENOMEM);
 
        refcount_set(&trigger_entry->ref_count, 1);
-       trigger_entry->local_port = mlxsw_sp_port->local_port;
+       trigger_entry->local_port = mlxsw_sp_port ? mlxsw_sp_port->local_port :
+                                                   0;
        trigger_entry->trigger = trigger;
        memcpy(&trigger_entry->parms, parms, sizeof(trigger_entry->parms));
+       trigger_entry->span = span;
+       mlxsw_sp_span_trigger_ops_set(trigger_entry);
        list_add_tail(&trigger_entry->list, &span->trigger_entries_list);
 
-       err = mlxsw_sp_span_trigger_entry_bind(span, trigger_entry);
+       err = trigger_entry->ops->bind(trigger_entry);
        if (err)
                goto err_trigger_entry_bind;
 
@@ -1127,7 +1579,7 @@ mlxsw_sp_span_trigger_entry_destroy(struct mlxsw_sp_span *span,
                                    struct mlxsw_sp_span_trigger_entry *
                                    trigger_entry)
 {
-       mlxsw_sp_span_trigger_entry_unbind(span, trigger_entry);
+       trigger_entry->ops->unbind(trigger_entry);
        list_del(&trigger_entry->list);
        kfree(trigger_entry);
 }
@@ -1140,8 +1592,8 @@ mlxsw_sp_span_trigger_entry_find(struct mlxsw_sp_span *span,
        struct mlxsw_sp_span_trigger_entry *trigger_entry;
 
        list_for_each_entry(trigger_entry, &span->trigger_entries_list, list) {
-               if (trigger_entry->trigger == trigger &&
-                   trigger_entry->local_port == mlxsw_sp_port->local_port)
+               if (trigger_entry->ops->matches(trigger_entry, trigger,
+                                               mlxsw_sp_port))
                        return trigger_entry;
        }
 
@@ -1206,3 +1658,138 @@ void mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp,
 
        mlxsw_sp_span_trigger_entry_destroy(mlxsw_sp->span, trigger_entry);
 }
+
+int mlxsw_sp_span_trigger_enable(struct mlxsw_sp_port *mlxsw_sp_port,
+                                enum mlxsw_sp_span_trigger trigger, u8 tc)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       struct mlxsw_sp_span_trigger_entry *trigger_entry;
+
+       ASSERT_RTNL();
+
+       trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
+                                                        trigger,
+                                                        mlxsw_sp_port);
+       if (WARN_ON_ONCE(!trigger_entry))
+               return -EINVAL;
+
+       return trigger_entry->ops->enable(trigger_entry, mlxsw_sp_port, tc);
+}
+
+void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port,
+                                  enum mlxsw_sp_span_trigger trigger, u8 tc)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       struct mlxsw_sp_span_trigger_entry *trigger_entry;
+
+       ASSERT_RTNL();
+
+       trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
+                                                        trigger,
+                                                        mlxsw_sp_port);
+       if (WARN_ON_ONCE(!trigger_entry))
+               return;
+
+       return trigger_entry->ops->disable(trigger_entry, mlxsw_sp_port, tc);
+}
+
+static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp)
+{
+       size_t arr_size = ARRAY_SIZE(mlxsw_sp1_span_entry_ops_arr);
+
+       /* Must be first to avoid NULL pointer dereference by subsequent
+        * can_handle() callbacks.
+        */
+       if (WARN_ON(mlxsw_sp1_span_entry_ops_arr[0] !=
+                   &mlxsw_sp1_span_entry_ops_cpu))
+               return -EINVAL;
+
+       mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp1_span_trigger_ops_arr;
+       mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp1_span_entry_ops_arr;
+       mlxsw_sp->span->span_entry_ops_arr_size = arr_size;
+
+       return 0;
+}
+
+static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed)
+{
+       return mtu * 5 / 2;
+}
+
+static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
+                                             u16 policer_id_base)
+{
+       return -EOPNOTSUPP;
+}
+
+const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = {
+       .init = mlxsw_sp1_span_init,
+       .buffsize_get = mlxsw_sp1_span_buffsize_get,
+       .policer_id_base_set = mlxsw_sp1_span_policer_id_base_set,
+};
+
+static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp)
+{
+       size_t arr_size = ARRAY_SIZE(mlxsw_sp2_span_entry_ops_arr);
+
+       /* Must be first to avoid NULL pointer dereference by subsequent
+        * can_handle() callbacks.
+        */
+       if (WARN_ON(mlxsw_sp2_span_entry_ops_arr[0] !=
+                   &mlxsw_sp2_span_entry_ops_cpu))
+               return -EINVAL;
+
+       mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp2_span_trigger_ops_arr;
+       mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp2_span_entry_ops_arr;
+       mlxsw_sp->span->span_entry_ops_arr_size = arr_size;
+
+       return 0;
+}
+
+#define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
+#define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50
+
+static u32 __mlxsw_sp_span_buffsize_get(int mtu, u32 speed, u32 buffer_factor)
+{
+       return 3 * mtu + buffer_factor * speed / 1000;
+}
+
+static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed)
+{
+       int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR;
+
+       return __mlxsw_sp_span_buffsize_get(mtu, speed, factor);
+}
+
+static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
+                                             u16 policer_id_base)
+{
+       char mogcr_pl[MLXSW_REG_MOGCR_LEN];
+       int err;
+
+       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl);
+       if (err)
+               return err;
+
+       mlxsw_reg_mogcr_mirroring_pid_base_set(mogcr_pl, policer_id_base);
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl);
+}
+
+const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = {
+       .init = mlxsw_sp2_span_init,
+       .buffsize_get = mlxsw_sp2_span_buffsize_get,
+       .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set,
+};
+
+static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed)
+{
+       int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR;
+
+       return __mlxsw_sp_span_buffsize_get(mtu, speed, factor);
+}
+
+const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = {
+       .init = mlxsw_sp2_span_init,
+       .buffsize_get = mlxsw_sp3_span_buffsize_get,
+       .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set,
+};
index 9f6dd2d..1c746dd 100644 (file)
@@ -21,19 +21,37 @@ struct mlxsw_sp_span_parms {
        union mlxsw_sp_l3addr daddr;
        union mlxsw_sp_l3addr saddr;
        u16 vid;
+       u16 policer_id;
+       bool policer_enable;
 };
 
 enum mlxsw_sp_span_trigger {
        MLXSW_SP_SPAN_TRIGGER_INGRESS,
        MLXSW_SP_SPAN_TRIGGER_EGRESS,
+       MLXSW_SP_SPAN_TRIGGER_TAIL_DROP,
+       MLXSW_SP_SPAN_TRIGGER_EARLY_DROP,
+       MLXSW_SP_SPAN_TRIGGER_ECN,
 };
 
 struct mlxsw_sp_span_trigger_parms {
        int span_id;
 };
 
+struct mlxsw_sp_span_agent_parms {
+       const struct net_device *to_dev;
+       u16 policer_id;
+       bool policer_enable;
+};
+
 struct mlxsw_sp_span_entry_ops;
 
+struct mlxsw_sp_span_ops {
+       int (*init)(struct mlxsw_sp *mlxsw_sp);
+       u32 (*buffsize_get)(int mtu, u32 speed);
+       int (*policer_id_base_set)(struct mlxsw_sp *mlxsw_sp,
+                                  u16 policer_id_base);
+};
+
 struct mlxsw_sp_span_entry {
        const struct net_device *to_dev;
        const struct mlxsw_sp_span_entry_ops *ops;
@@ -44,7 +62,8 @@ struct mlxsw_sp_span_entry {
 
 struct mlxsw_sp_span_entry_ops {
        bool (*can_handle)(const struct net_device *to_dev);
-       int (*parms_set)(const struct net_device *to_dev,
+       int (*parms_set)(struct mlxsw_sp *mlxsw_sp,
+                        const struct net_device *to_dev,
                         struct mlxsw_sp_span_parms *sparmsp);
        int (*configure)(struct mlxsw_sp_span_entry *span_entry,
                         struct mlxsw_sp_span_parms sparms);
@@ -65,8 +84,8 @@ void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
 int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu);
 void mlxsw_sp_span_speed_update_work(struct work_struct *work);
 
-int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp,
-                           const struct net_device *to_dev, int *p_span_id);
+int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id,
+                           const struct mlxsw_sp_span_agent_parms *parms);
 void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id);
 int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port,
                                    bool ingress);
@@ -81,5 +100,13 @@ mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp,
                           enum mlxsw_sp_span_trigger trigger,
                           struct mlxsw_sp_port *mlxsw_sp_port,
                           const struct mlxsw_sp_span_trigger_parms *parms);
+int mlxsw_sp_span_trigger_enable(struct mlxsw_sp_port *mlxsw_sp_port,
+                                enum mlxsw_sp_span_trigger trigger, u8 tc);
+void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port,
+                                  enum mlxsw_sp_span_trigger trigger, u8 tc);
+
+extern const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops;
+extern const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops;
+extern const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops;
 
 #endif
index 4ff1e62..1e56113 100644 (file)
@@ -281,7 +281,7 @@ static int mlxsw_sib_port_create(struct mlxsw_sib *mlxsw_sib, u8 local_port,
        int err;
 
        err = mlxsw_core_port_init(mlxsw_sib->core, local_port,
-                                  module + 1, false, 0,
+                                  module + 1, false, 0, false, 0,
                                   mlxsw_sib->hw_id, sizeof(mlxsw_sib->hw_id));
        if (err) {
                dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to init core port\n",
index b438f55..6f9a725 100644 (file)
@@ -1107,7 +1107,7 @@ static int mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
        int err;
 
        err = mlxsw_core_port_init(mlxsw_sx->core, local_port,
-                                  module + 1, false, 0,
+                                  module + 1, false, 0, false, 0,
                                   mlxsw_sx->hw_id, sizeof(mlxsw_sx->hw_id));
        if (err) {
                dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n",
index 28e6069..3390988 100644 (file)
@@ -107,8 +107,16 @@ enum {
        MLXSW_TRAP_ID_ACL2 = 0x1C2,
        MLXSW_TRAP_ID_DISCARD_INGRESS_ACL = 0x1C3,
        MLXSW_TRAP_ID_DISCARD_EGRESS_ACL = 0x1C4,
+       MLXSW_TRAP_ID_MIRROR_SESSION0 = 0x220,
+       MLXSW_TRAP_ID_MIRROR_SESSION1 = 0x221,
+       MLXSW_TRAP_ID_MIRROR_SESSION2 = 0x222,
+       MLXSW_TRAP_ID_MIRROR_SESSION3 = 0x223,
+       MLXSW_TRAP_ID_MIRROR_SESSION4 = 0x224,
+       MLXSW_TRAP_ID_MIRROR_SESSION5 = 0x225,
+       MLXSW_TRAP_ID_MIRROR_SESSION6 = 0x226,
+       MLXSW_TRAP_ID_MIRROR_SESSION7 = 0x227,
 
-       MLXSW_TRAP_ID_MAX = 0x1FF
+       MLXSW_TRAP_ID_MAX = 0x3FF,
 };
 
 enum mlxsw_event_trap_id {
index 4fe6aed..bb646b6 100644 (file)
@@ -4390,9 +4390,9 @@ static int ksz_alloc_desc(struct dev_info *adapter)
                DESC_ALIGNMENT;
 
        adapter->desc_pool.alloc_virt =
-               pci_zalloc_consistent(adapter->pdev,
-                                     adapter->desc_pool.alloc_size,
-                                     &adapter->desc_pool.dma_addr);
+               dma_alloc_coherent(&adapter->pdev->dev,
+                                  adapter->desc_pool.alloc_size,
+                                  &adapter->desc_pool.dma_addr, GFP_KERNEL);
        if (adapter->desc_pool.alloc_virt == NULL) {
                adapter->desc_pool.alloc_size = 0;
                return 1;
@@ -4431,7 +4431,8 @@ static int ksz_alloc_desc(struct dev_info *adapter)
 static void free_dma_buf(struct dev_info *adapter, struct ksz_dma_buf *dma_buf,
        int direction)
 {
-       pci_unmap_single(adapter->pdev, dma_buf->dma, dma_buf->len, direction);
+       dma_unmap_single(&adapter->pdev->dev, dma_buf->dma, dma_buf->len,
+                        direction);
        dev_kfree_skb(dma_buf->skb);
        dma_buf->skb = NULL;
        dma_buf->dma = 0;
@@ -4456,16 +4457,15 @@ static void ksz_init_rx_buffers(struct dev_info *adapter)
 
                dma_buf = DMA_BUFFER(desc);
                if (dma_buf->skb && dma_buf->len != adapter->mtu)
-                       free_dma_buf(adapter, dma_buf, PCI_DMA_FROMDEVICE);
+                       free_dma_buf(adapter, dma_buf, DMA_FROM_DEVICE);
                dma_buf->len = adapter->mtu;
                if (!dma_buf->skb)
                        dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC);
                if (dma_buf->skb && !dma_buf->dma)
-                       dma_buf->dma = pci_map_single(
-                               adapter->pdev,
-                               skb_tail_pointer(dma_buf->skb),
-                               dma_buf->len,
-                               PCI_DMA_FROMDEVICE);
+                       dma_buf->dma = dma_map_single(&adapter->pdev->dev,
+                                               skb_tail_pointer(dma_buf->skb),
+                                               dma_buf->len,
+                                               DMA_FROM_DEVICE);
 
                /* Set descriptor. */
                set_rx_buf(desc, dma_buf->dma);
@@ -4543,11 +4543,10 @@ static void ksz_free_desc(struct dev_info *adapter)
 
        /* Free memory. */
        if (adapter->desc_pool.alloc_virt)
-               pci_free_consistent(
-                       adapter->pdev,
-                       adapter->desc_pool.alloc_size,
-                       adapter->desc_pool.alloc_virt,
-                       adapter->desc_pool.dma_addr);
+               dma_free_coherent(&adapter->pdev->dev,
+                                 adapter->desc_pool.alloc_size,
+                                 adapter->desc_pool.alloc_virt,
+                                 adapter->desc_pool.dma_addr);
 
        /* Reset resource pool. */
        adapter->desc_pool.alloc_size = 0;
@@ -4590,12 +4589,10 @@ static void ksz_free_buffers(struct dev_info *adapter,
 static void ksz_free_mem(struct dev_info *adapter)
 {
        /* Free transmit buffers. */
-       ksz_free_buffers(adapter, &adapter->hw.tx_desc_info,
-               PCI_DMA_TODEVICE);
+       ksz_free_buffers(adapter, &adapter->hw.tx_desc_info, DMA_TO_DEVICE);
 
        /* Free receive buffers. */
-       ksz_free_buffers(adapter, &adapter->hw.rx_desc_info,
-               PCI_DMA_FROMDEVICE);
+       ksz_free_buffers(adapter, &adapter->hw.rx_desc_info, DMA_FROM_DEVICE);
 
        /* Free descriptors. */
        ksz_free_desc(adapter);
@@ -4657,9 +4654,8 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev)
 
                dma_buf->len = skb_headlen(skb);
 
-               dma_buf->dma = pci_map_single(
-                       hw_priv->pdev, skb->data, dma_buf->len,
-                       PCI_DMA_TODEVICE);
+               dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, skb->data,
+                                             dma_buf->len, DMA_TO_DEVICE);
                set_tx_buf(desc, dma_buf->dma);
                set_tx_len(desc, dma_buf->len);
 
@@ -4676,11 +4672,10 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev)
                        dma_buf = DMA_BUFFER(desc);
                        dma_buf->len = skb_frag_size(this_frag);
 
-                       dma_buf->dma = pci_map_single(
-                               hw_priv->pdev,
-                               skb_frag_address(this_frag),
-                               dma_buf->len,
-                               PCI_DMA_TODEVICE);
+                       dma_buf->dma = dma_map_single(&hw_priv->pdev->dev,
+                                                     skb_frag_address(this_frag),
+                                                     dma_buf->len,
+                                                     DMA_TO_DEVICE);
                        set_tx_buf(desc, dma_buf->dma);
                        set_tx_len(desc, dma_buf->len);
 
@@ -4700,9 +4695,8 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev)
        } else {
                dma_buf->len = len;
 
-               dma_buf->dma = pci_map_single(
-                       hw_priv->pdev, skb->data, dma_buf->len,
-                       PCI_DMA_TODEVICE);
+               dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, skb->data,
+                                             dma_buf->len, DMA_TO_DEVICE);
                set_tx_buf(desc, dma_buf->dma);
                set_tx_len(desc, dma_buf->len);
        }
@@ -4756,9 +4750,8 @@ static void transmit_cleanup(struct dev_info *hw_priv, int normal)
                }
 
                dma_buf = DMA_BUFFER(desc);
-               pci_unmap_single(
-                       hw_priv->pdev, dma_buf->dma, dma_buf->len,
-                       PCI_DMA_TODEVICE);
+               dma_unmap_single(&hw_priv->pdev->dev, dma_buf->dma,
+                                dma_buf->len, DMA_TO_DEVICE);
 
                /* This descriptor contains the last buffer in the packet. */
                if (dma_buf->skb) {
@@ -4991,9 +4984,8 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
        packet_len = status.rx.frame_len - 4;
 
        dma_buf = DMA_BUFFER(desc);
-       pci_dma_sync_single_for_cpu(
-               hw_priv->pdev, dma_buf->dma, packet_len + 4,
-               PCI_DMA_FROMDEVICE);
+       dma_sync_single_for_cpu(&hw_priv->pdev->dev, dma_buf->dma,
+                               packet_len + 4, DMA_FROM_DEVICE);
 
        do {
                /* skb->data != skb->head */
@@ -6935,8 +6927,8 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
 
        result = -ENODEV;
 
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
-                       pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+       if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) ||
+           dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)))
                return result;
 
        reg_base = pci_resource_start(pdev, 0);
@@ -7155,17 +7147,14 @@ static void pcidev_exit(struct pci_dev *pdev)
        kfree(info);
 }
 
-#ifdef CONFIG_PM
-static int pcidev_resume(struct pci_dev *pdev)
+static int __maybe_unused pcidev_resume(struct device *dev_d)
 {
        int i;
-       struct platform_info *info = pci_get_drvdata(pdev);
+       struct platform_info *info = dev_get_drvdata(dev_d);
        struct dev_info *hw_priv = &info->dev_info;
        struct ksz_hw *hw = &hw_priv->hw;
 
-       pci_set_power_state(pdev, PCI_D0);
-       pci_restore_state(pdev);
-       pci_enable_wake(pdev, PCI_D0, 0);
+       device_wakeup_disable(dev_d);
 
        if (hw_priv->wol_enable)
                hw_cfg_wol_pme(hw, 0);
@@ -7182,10 +7171,10 @@ static int pcidev_resume(struct pci_dev *pdev)
        return 0;
 }
 
-static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused pcidev_suspend(struct device *dev_d)
 {
        int i;
-       struct platform_info *info = pci_get_drvdata(pdev);
+       struct platform_info *info = dev_get_drvdata(dev_d);
        struct dev_info *hw_priv = &info->dev_info;
        struct ksz_hw *hw = &hw_priv->hw;
 
@@ -7207,12 +7196,9 @@ static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state)
                hw_cfg_wol_pme(hw, 1);
        }
 
-       pci_save_state(pdev);
-       pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
-       pci_set_power_state(pdev, pci_choose_state(pdev, state));
+       device_wakeup_enable(dev_d);
        return 0;
 }
-#endif
 
 static char pcidev_name[] = "ksz884xp";
 
@@ -7226,11 +7212,10 @@ static const struct pci_device_id pcidev_table[] = {
 
 MODULE_DEVICE_TABLE(pci, pcidev_table);
 
+static SIMPLE_DEV_PM_OPS(pcidev_pm_ops, pcidev_suspend, pcidev_resume);
+
 static struct pci_driver pci_device_driver = {
-#ifdef CONFIG_PM
-       .suspend        = pcidev_suspend,
-       .resume         = pcidev_resume,
-#endif
+       .driver.pm      = &pcidev_pm_ops,
        .name           = pcidev_name,
        .id_table       = pcidev_table,
        .probe          = pcidev_init,
index c5c5c68..de93cc6 100644 (file)
@@ -807,26 +807,29 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter)
        data |= MAC_CR_CNTR_RST_;
        lan743x_csr_write(adapter, MAC_CR, data);
 
-       mac_addr_hi = lan743x_csr_read(adapter, MAC_RX_ADDRH);
-       mac_addr_lo = lan743x_csr_read(adapter, MAC_RX_ADDRL);
-       adapter->mac_address[0] = mac_addr_lo & 0xFF;
-       adapter->mac_address[1] = (mac_addr_lo >> 8) & 0xFF;
-       adapter->mac_address[2] = (mac_addr_lo >> 16) & 0xFF;
-       adapter->mac_address[3] = (mac_addr_lo >> 24) & 0xFF;
-       adapter->mac_address[4] = mac_addr_hi & 0xFF;
-       adapter->mac_address[5] = (mac_addr_hi >> 8) & 0xFF;
-
-       if (((mac_addr_hi & 0x0000FFFF) == 0x0000FFFF) &&
-           mac_addr_lo == 0xFFFFFFFF) {
-               mac_address_valid = false;
-       } else if (!is_valid_ether_addr(adapter->mac_address)) {
-               mac_address_valid = false;
-       }
-
-       if (!mac_address_valid)
-               eth_random_addr(adapter->mac_address);
+       if (!is_valid_ether_addr(adapter->mac_address)) {
+               mac_addr_hi = lan743x_csr_read(adapter, MAC_RX_ADDRH);
+               mac_addr_lo = lan743x_csr_read(adapter, MAC_RX_ADDRL);
+               adapter->mac_address[0] = mac_addr_lo & 0xFF;
+               adapter->mac_address[1] = (mac_addr_lo >> 8) & 0xFF;
+               adapter->mac_address[2] = (mac_addr_lo >> 16) & 0xFF;
+               adapter->mac_address[3] = (mac_addr_lo >> 24) & 0xFF;
+               adapter->mac_address[4] = mac_addr_hi & 0xFF;
+               adapter->mac_address[5] = (mac_addr_hi >> 8) & 0xFF;
+
+               if (((mac_addr_hi & 0x0000FFFF) == 0x0000FFFF) &&
+                   mac_addr_lo == 0xFFFFFFFF) {
+                       mac_address_valid = false;
+               } else if (!is_valid_ether_addr(adapter->mac_address)) {
+                       mac_address_valid = false;
+               }
+
+               if (!mac_address_valid)
+                       eth_random_addr(adapter->mac_address);
+       }
        lan743x_mac_set_address(adapter, adapter->mac_address);
        ether_addr_copy(netdev->dev_addr, adapter->mac_address);
+
        return 0;
 }
 
@@ -1739,10 +1742,9 @@ done:
 static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx)
 {
        if (tx->head_cpu_ptr) {
-               pci_free_consistent(tx->adapter->pdev,
-                                   sizeof(*tx->head_cpu_ptr),
-                                   (void *)(tx->head_cpu_ptr),
-                                   tx->head_dma_ptr);
+               dma_free_coherent(&tx->adapter->pdev->dev,
+                                 sizeof(*tx->head_cpu_ptr), tx->head_cpu_ptr,
+                                 tx->head_dma_ptr);
                tx->head_cpu_ptr = NULL;
                tx->head_dma_ptr = 0;
        }
@@ -1750,10 +1752,9 @@ static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx)
        tx->buffer_info = NULL;
 
        if (tx->ring_cpu_ptr) {
-               pci_free_consistent(tx->adapter->pdev,
-                                   tx->ring_allocation_size,
-                                   tx->ring_cpu_ptr,
-                                   tx->ring_dma_ptr);
+               dma_free_coherent(&tx->adapter->pdev->dev,
+                                 tx->ring_allocation_size, tx->ring_cpu_ptr,
+                                 tx->ring_dma_ptr);
                tx->ring_allocation_size = 0;
                tx->ring_cpu_ptr = NULL;
                tx->ring_dma_ptr = 0;
@@ -1777,8 +1778,8 @@ static int lan743x_tx_ring_init(struct lan743x_tx *tx)
                                     sizeof(struct lan743x_tx_descriptor),
                                     PAGE_SIZE);
        dma_ptr = 0;
-       cpu_ptr = pci_zalloc_consistent(tx->adapter->pdev,
-                                       ring_allocation_size, &dma_ptr);
+       cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev,
+                                    ring_allocation_size, &dma_ptr, GFP_KERNEL);
        if (!cpu_ptr) {
                ret = -ENOMEM;
                goto cleanup;
@@ -1795,8 +1796,9 @@ static int lan743x_tx_ring_init(struct lan743x_tx *tx)
        }
        tx->buffer_info = (struct lan743x_tx_buffer_info *)cpu_ptr;
        dma_ptr = 0;
-       cpu_ptr = pci_zalloc_consistent(tx->adapter->pdev,
-                                       sizeof(*tx->head_cpu_ptr), &dma_ptr);
+       cpu_ptr = dma_alloc_coherent(&tx->adapter->pdev->dev,
+                                    sizeof(*tx->head_cpu_ptr), &dma_ptr,
+                                    GFP_KERNEL);
        if (!cpu_ptr) {
                ret = -ENOMEM;
                goto cleanup;
@@ -2044,14 +2046,13 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
 {
        struct skb_shared_hwtstamps *hwtstamps = NULL;
        int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
+       int current_head_index = *rx->head_cpu_ptr;
        struct lan743x_rx_buffer_info *buffer_info;
        struct lan743x_rx_descriptor *descriptor;
-       int current_head_index = -1;
        int extension_index = -1;
        int first_index = -1;
        int last_index = -1;
 
-       current_head_index = *rx->head_cpu_ptr;
        if (current_head_index < 0 || current_head_index >= rx->ring_size)
                goto done;
 
@@ -2278,10 +2279,9 @@ static void lan743x_rx_ring_cleanup(struct lan743x_rx *rx)
        }
 
        if (rx->head_cpu_ptr) {
-               pci_free_consistent(rx->adapter->pdev,
-                                   sizeof(*rx->head_cpu_ptr),
-                                   rx->head_cpu_ptr,
-                                   rx->head_dma_ptr);
+               dma_free_coherent(&rx->adapter->pdev->dev,
+                                 sizeof(*rx->head_cpu_ptr), rx->head_cpu_ptr,
+                                 rx->head_dma_ptr);
                rx->head_cpu_ptr = NULL;
                rx->head_dma_ptr = 0;
        }
@@ -2290,10 +2290,9 @@ static void lan743x_rx_ring_cleanup(struct lan743x_rx *rx)
        rx->buffer_info = NULL;
 
        if (rx->ring_cpu_ptr) {
-               pci_free_consistent(rx->adapter->pdev,
-                                   rx->ring_allocation_size,
-                                   rx->ring_cpu_ptr,
-                                   rx->ring_dma_ptr);
+               dma_free_coherent(&rx->adapter->pdev->dev,
+                                 rx->ring_allocation_size, rx->ring_cpu_ptr,
+                                 rx->ring_dma_ptr);
                rx->ring_allocation_size = 0;
                rx->ring_cpu_ptr = NULL;
                rx->ring_dma_ptr = 0;
@@ -2324,8 +2323,8 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
                                     sizeof(struct lan743x_rx_descriptor),
                                     PAGE_SIZE);
        dma_ptr = 0;
-       cpu_ptr = pci_zalloc_consistent(rx->adapter->pdev,
-                                       ring_allocation_size, &dma_ptr);
+       cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev,
+                                    ring_allocation_size, &dma_ptr, GFP_KERNEL);
        if (!cpu_ptr) {
                ret = -ENOMEM;
                goto cleanup;
@@ -2342,8 +2341,9 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
        }
        rx->buffer_info = (struct lan743x_rx_buffer_info *)cpu_ptr;
        dma_ptr = 0;
-       cpu_ptr = pci_zalloc_consistent(rx->adapter->pdev,
-                                       sizeof(*rx->head_cpu_ptr), &dma_ptr);
+       cpu_ptr = dma_alloc_coherent(&rx->adapter->pdev->dev,
+                                    sizeof(*rx->head_cpu_ptr), &dma_ptr,
+                                    GFP_KERNEL);
        if (!cpu_ptr) {
                ret = -ENOMEM;
                goto cleanup;
@@ -2817,6 +2817,7 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
 {
        struct lan743x_adapter *adapter = NULL;
        struct net_device *netdev = NULL;
+       const void *mac_addr;
        int ret = -ENODEV;
 
        netdev = devm_alloc_etherdev(&pdev->dev,
@@ -2833,6 +2834,10 @@ static int lan743x_pcidev_probe(struct pci_dev *pdev,
                              NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
        netdev->max_mtu = LAN743X_MAX_FRAME_SIZE;
 
+       mac_addr = of_get_mac_address(pdev->dev.of_node);
+       if (!IS_ERR(mac_addr))
+               ether_addr_copy(adapter->mac_address, mac_addr);
+
        ret = lan743x_pci_init(adapter, pdev);
        if (ret)
                goto return_error;
@@ -3091,6 +3096,8 @@ static const struct pci_device_id lan743x_pcidev_tbl[] = {
        { 0, }
 };
 
+MODULE_DEVICE_TABLE(pci, lan743x_pcidev_tbl);
+
 static struct pci_driver lan743x_pcidev_driver = {
        .name     = DRIVER_NAME,
        .id_table = lan743x_pcidev_tbl,
index bcec058..ee7bb7e 100644 (file)
@@ -11,22 +11,24 @@ config NET_VENDOR_MICROSEMI
 
 if NET_VENDOR_MICROSEMI
 
+# Users should depend on NET_SWITCHDEV, HAS_IOMEM
+config MSCC_OCELOT_SWITCH_LIB
+       select REGMAP_MMIO
+       select PHYLIB
+       tristate
+       help
+         This is a hardware support library for Ocelot network switches. It is
+         used by switchdev as well as by DSA drivers.
+
 config MSCC_OCELOT_SWITCH
        tristate "Ocelot switch driver"
        depends on NET_SWITCHDEV
        depends on HAS_IOMEM
-       select PHYLIB
-       select REGMAP_MMIO
-       help
-         This driver supports the Ocelot network switch device.
-
-config MSCC_OCELOT_SWITCH_OCELOT
-       tristate "Ocelot switch driver on Ocelot"
-       depends on MSCC_OCELOT_SWITCH
-       depends on GENERIC_PHY
        depends on OF_NET
+       select MSCC_OCELOT_SWITCH_LIB
+       select GENERIC_PHY
        help
          This driver supports the Ocelot network switch device as present on
-         the Ocelot SoCs.
+         the Ocelot SoCs (VSC7514).
 
 endif # NET_VENDOR_MICROSEMI
index 91b33b5..58f94c3 100644 (file)
@@ -1,5 +1,13 @@
 # SPDX-License-Identifier: (GPL-2.0 OR MIT)
-obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot_common.o
-mscc_ocelot_common-y := ocelot.o ocelot_io.o
-mscc_ocelot_common-y += ocelot_regs.o ocelot_tc.o ocelot_police.o ocelot_ace.o ocelot_flower.o ocelot_ptp.o
-obj-$(CONFIG_MSCC_OCELOT_SWITCH_OCELOT) += ocelot_board.o
+obj-$(CONFIG_MSCC_OCELOT_SWITCH_LIB) += mscc_ocelot_switch_lib.o
+mscc_ocelot_switch_lib-y := \
+       ocelot.o \
+       ocelot_io.o \
+       ocelot_police.o \
+       ocelot_vcap.o \
+       ocelot_flower.o \
+       ocelot_ptp.o
+obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot.o
+mscc_ocelot-y := \
+       ocelot_vsc7514.o \
+       ocelot_net.o
index 9cfe1fd..f2d94b0 100644 (file)
@@ -4,42 +4,13 @@
  *
  * Copyright (c) 2017 Microsemi Corporation
  */
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
 #include <linux/if_bridge.h>
-#include <linux/if_ether.h>
-#include <linux/if_vlan.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/phy.h>
-#include <linux/skbuff.h>
-#include <linux/iopoll.h>
-#include <net/arp.h>
-#include <net/netevent.h>
-#include <net/rtnetlink.h>
-#include <net/switchdev.h>
-
 #include "ocelot.h"
-#include "ocelot_ace.h"
+#include "ocelot_vcap.h"
 
 #define TABLE_UPDATE_SLEEP_US 10
 #define TABLE_UPDATE_TIMEOUT_US 100000
 
-/* MAC table entry types.
- * ENTRYTYPE_NORMAL is subject to aging.
- * ENTRYTYPE_LOCKED is not subject to aging.
- * ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast.
- * ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast.
- */
-enum macaccess_entry_type {
-       ENTRYTYPE_NORMAL = 0,
-       ENTRYTYPE_LOCKED,
-       ENTRYTYPE_MACv4,
-       ENTRYTYPE_MACv6,
-};
-
 struct ocelot_mact_entry {
        u8 mac[ETH_ALEN];
        u16 vid;
@@ -84,10 +55,9 @@ static void ocelot_mact_select(struct ocelot *ocelot,
 
 }
 
-static int ocelot_mact_learn(struct ocelot *ocelot, int port,
-                            const unsigned char mac[ETH_ALEN],
-                            unsigned int vid,
-                            enum macaccess_entry_type type)
+int ocelot_mact_learn(struct ocelot *ocelot, int port,
+                     const unsigned char mac[ETH_ALEN],
+                     unsigned int vid, enum macaccess_entry_type type)
 {
        ocelot_mact_select(ocelot, mac, vid);
 
@@ -100,10 +70,10 @@ static int ocelot_mact_learn(struct ocelot *ocelot, int port,
 
        return ocelot_mact_wait_for_completion(ocelot);
 }
+EXPORT_SYMBOL(ocelot_mact_learn);
 
-static int ocelot_mact_forget(struct ocelot *ocelot,
-                             const unsigned char mac[ETH_ALEN],
-                             unsigned int vid)
+int ocelot_mact_forget(struct ocelot *ocelot,
+                      const unsigned char mac[ETH_ALEN], unsigned int vid)
 {
        ocelot_mact_select(ocelot, mac, vid);
 
@@ -114,6 +84,7 @@ static int ocelot_mact_forget(struct ocelot *ocelot,
 
        return ocelot_mact_wait_for_completion(ocelot);
 }
+EXPORT_SYMBOL(ocelot_mact_forget);
 
 static void ocelot_mact_init(struct ocelot *ocelot)
 {
@@ -168,20 +139,6 @@ static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask)
        return ocelot_vlant_wait_for_completion(ocelot);
 }
 
-static void ocelot_vlan_mode(struct ocelot *ocelot, int port,
-                            netdev_features_t features)
-{
-       u32 val;
-
-       /* Filtering */
-       val = ocelot_read(ocelot, ANA_VLANMASK);
-       if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
-               val |= BIT(port);
-       else
-               val &= ~BIT(port);
-       ocelot_write(ocelot, val, ANA_VLANMASK);
-}
-
 static int ocelot_port_set_native_vlan(struct ocelot *ocelot, int port,
                                       u16 vid)
 {
@@ -295,26 +252,6 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
 }
 EXPORT_SYMBOL(ocelot_vlan_add);
 
-static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid,
-                              bool untagged)
-{
-       struct ocelot_port_private *priv = netdev_priv(dev);
-       struct ocelot_port *ocelot_port = &priv->port;
-       struct ocelot *ocelot = ocelot_port->ocelot;
-       int port = priv->chip_port;
-       int ret;
-
-       ret = ocelot_vlan_add(ocelot, port, vid, pvid, untagged);
-       if (ret)
-               return ret;
-
-       /* Add the port MAC address to with the right VLAN information */
-       ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, vid,
-                         ENTRYTYPE_LOCKED);
-
-       return 0;
-}
-
 int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
 {
        struct ocelot_port *ocelot_port = ocelot->ports[port];
@@ -338,30 +275,6 @@ int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
 }
 EXPORT_SYMBOL(ocelot_vlan_del);
 
-static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid)
-{
-       struct ocelot_port_private *priv = netdev_priv(dev);
-       struct ocelot *ocelot = priv->port.ocelot;
-       int port = priv->chip_port;
-       int ret;
-
-       /* 8021q removes VID 0 on module unload for all interfaces
-        * with VLAN filtering feature. We need to keep it to receive
-        * untagged traffic.
-        */
-       if (vid == 0)
-               return 0;
-
-       ret = ocelot_vlan_del(ocelot, port, vid);
-       if (ret)
-               return ret;
-
-       /* Del the port MAC address to with the right VLAN information */
-       ocelot_mact_forget(ocelot, dev->dev_addr, vid);
-
-       return 0;
-}
-
 static void ocelot_vlan_init(struct ocelot *ocelot)
 {
        u16 port, vid;
@@ -396,18 +309,6 @@ static void ocelot_vlan_init(struct ocelot *ocelot)
        }
 }
 
-/* Watermark encode
- * Bit 8:   Unit; 0:1, 1:16
- * Bit 7-0: Value to be multiplied with unit
- */
-static u16 ocelot_wm_enc(u16 value)
-{
-       if (value >= BIT(8))
-               return BIT(8) | (value / 16);
-
-       return value;
-}
-
 void ocelot_adjust_link(struct ocelot *ocelot, int port,
                        struct phy_device *phydev)
 {
@@ -476,10 +377,8 @@ void ocelot_adjust_link(struct ocelot *ocelot, int port,
                         ANA_PFC_PFC_CFG, port);
 
        /* Core: Enable port for frame transfer */
-       ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
-                        QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
-                        QSYS_SWITCH_PORT_MODE_PORT_ENA,
-                        QSYS_SWITCH_PORT_MODE, port);
+       ocelot_fields_write(ocelot, port,
+                           QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
 
        /* Flow control */
        ocelot_write_rix(ocelot, SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) |
@@ -492,15 +391,6 @@ void ocelot_adjust_link(struct ocelot *ocelot, int port,
 }
 EXPORT_SYMBOL(ocelot_adjust_link);
 
-static void ocelot_port_adjust_link(struct net_device *dev)
-{
-       struct ocelot_port_private *priv = netdev_priv(dev);
-       struct ocelot *ocelot = priv->port.ocelot;
-       int port = priv->chip_port;
-
-       ocelot_adjust_link(ocelot, port, dev->phydev);
-}
-
 void ocelot_port_enable(struct ocelot *ocelot, int port,
                        struct phy_device *phy)
 {
@@ -514,85 +404,15 @@ void ocelot_port_enable(struct ocelot *ocelot, int port,
 }
 EXPORT_SYMBOL(ocelot_port_enable);
 
-static int ocelot_port_open(struct net_device *dev)
-{
-       struct ocelot_port_private *priv = netdev_priv(dev);
-       struct ocelot_port *ocelot_port = &priv->port;
-       struct ocelot *ocelot = ocelot_port->ocelot;
-       int port = priv->chip_port;
-       int err;
-
-       if (priv->serdes) {
-               err = phy_set_mode_ext(priv->serdes, PHY_MODE_ETHERNET,
-                                      ocelot_port->phy_mode);
-               if (err) {
-                       netdev_err(dev, "Could not set mode of SerDes\n");
-                       return err;
-               }
-       }
-
-       err = phy_connect_direct(dev, priv->phy, &ocelot_port_adjust_link,
-                                ocelot_port->phy_mode);
-       if (err) {
-               netdev_err(dev, "Could not attach to PHY\n");
-               return err;
-       }
-
-       dev->phydev = priv->phy;
-
-       phy_attached_info(priv->phy);
-       phy_start(priv->phy);
-
-       ocelot_port_enable(ocelot, port, priv->phy);
-
-       return 0;
-}
-
 void ocelot_port_disable(struct ocelot *ocelot, int port)
 {
        struct ocelot_port *ocelot_port = ocelot->ports[port];
 
        ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG);
-       ocelot_rmw_rix(ocelot, 0, QSYS_SWITCH_PORT_MODE_PORT_ENA,
-                      QSYS_SWITCH_PORT_MODE, port);
+       ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0);
 }
 EXPORT_SYMBOL(ocelot_port_disable);
 
-static int ocelot_port_stop(struct net_device *dev)
-{
-       struct ocelot_port_private *priv = netdev_priv(dev);
-       struct ocelot *ocelot = priv->port.ocelot;
-       int port = priv->chip_port;
-
-       phy_disconnect(priv->phy);
-
-       dev->phydev = NULL;
-
-       ocelot_port_disable(ocelot, port);
-
-       return 0;
-}
-
-/* Generate the IFH for frame injection
- *
- * The IFH is a 128bit-value
- * bit 127: bypass the analyzer processing
- * bit 56-67: destination mask
- * bit 28-29: pop_cnt: 3 disables all rewriting of the frame
- * bit 20-27: cpu extraction queue mask
- * bit 16: tag type 0: C-tag, 1: S-tag
- * bit 0-11: VID
- */
-static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info)
-{
-       ifh[0] = IFH_INJ_BYPASS | ((0x1ff & info->rew_op) << 21);
-       ifh[1] = (0xf00 & info->port) >> 8;
-       ifh[2] = (0xff & info->port) << 24;
-       ifh[3] = (info->tag_type << 16) | info->vid;
-
-       return 0;
-}
-
 int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port,
                                 struct sk_buff *skb)
 {
@@ -611,77 +431,6 @@ int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port,
 }
 EXPORT_SYMBOL(ocelot_port_add_txtstamp_skb);
 
-static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-       struct ocelot_port_private *priv = netdev_priv(dev);
-       struct skb_shared_info *shinfo = skb_shinfo(skb);
-       struct ocelot_port *ocelot_port = &priv->port;
-       struct ocelot *ocelot = ocelot_port->ocelot;
-       u32 val, ifh[OCELOT_TAG_LEN / 4];
-       struct frame_info info = {};
-       u8 grp = 0; /* Send everything on CPU group 0 */
-       unsigned int i, count, last;
-       int port = priv->chip_port;
-
-       val = ocelot_read(ocelot, QS_INJ_STATUS);
-       if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))) ||
-           (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp))))
-               return NETDEV_TX_BUSY;
-
-       ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
-                        QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
-
-       info.port = BIT(port);
-       info.tag_type = IFH_TAG_TYPE_C;
-       info.vid = skb_vlan_tag_get(skb);
-
-       /* Check if timestamping is needed */
-       if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP) {
-               info.rew_op = ocelot_port->ptp_cmd;
-               if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP)
-                       info.rew_op |= (ocelot_port->ts_id  % 4) << 3;
-       }
-
-       ocelot_gen_ifh(ifh, &info);
-
-       for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
-               ocelot_write_rix(ocelot, (__force u32)cpu_to_be32(ifh[i]),
-                                QS_INJ_WR, grp);
-
-       count = (skb->len + 3) / 4;
-       last = skb->len % 4;
-       for (i = 0; i < count; i++) {
-               ocelot_write_rix(ocelot, ((u32 *)skb->data)[i], QS_INJ_WR, grp);
-       }
-
-       /* Add padding */
-       while (i < (OCELOT_BUFFER_CELL_SZ / 4)) {
-               ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
-               i++;
-       }
-
-       /* Indicate EOF and valid bytes in last word */
-       ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
-                        QS_INJ_CTRL_VLD_BYTES(skb->len < OCELOT_BUFFER_CELL_SZ ? 0 : last) |
-                        QS_INJ_CTRL_EOF,
-                        QS_INJ_CTRL, grp);
-
-       /* Add dummy CRC */
-       ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
-       skb_tx_timestamp(skb);
-
-       dev->stats.tx_packets++;
-       dev->stats.tx_bytes += skb->len;
-
-       if (!ocelot_port_add_txtstamp_skb(ocelot_port, skb)) {
-               ocelot_port->ts_id++;
-               return NETDEV_TX_OK;
-       }
-
-       dev_kfree_skb_any(skb);
-       return NETDEV_TX_OK;
-}
-
 static void ocelot_get_hwtimestamp(struct ocelot *ocelot,
                                   struct timespec64 *ts)
 {
@@ -767,117 +516,14 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
 }
 EXPORT_SYMBOL(ocelot_get_txtstamp);
 
-static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr)
-{
-       struct ocelot_port_private *priv = netdev_priv(dev);
-       struct ocelot_port *ocelot_port = &priv->port;
-       struct ocelot *ocelot = ocelot_port->ocelot;
-
-       return ocelot_mact_forget(ocelot, addr, ocelot_port->pvid);
-}
-
-static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr)
-{
-       struct ocelot_port_private *priv = netdev_priv(dev);
-       struct ocelot_port *ocelot_port = &priv->port;
-       struct ocelot *ocelot = ocelot_port->ocelot;
-
-       return ocelot_mact_learn(ocelot, PGID_CPU, addr, ocelot_port->pvid,
-                                ENTRYTYPE_LOCKED);
-}
-
-static void ocelot_set_rx_mode(struct net_device *dev)
-{
-       struct ocelot_port_private *priv = netdev_priv(dev);
-       struct ocelot *ocelot = priv->port.ocelot;
-       u32 val;
-       int i;
-
-       /* This doesn't handle promiscuous mode because the bridge core is
-        * setting IFF_PROMISC on all slave interfaces and all frames would be
-        * forwarded to the CPU port.
-        */
-       val = GENMASK(ocelot->num_phys_ports - 1, 0);
-       for (i = ocelot->num_phys_ports + 1; i < PGID_CPU; i++)
-               ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i);
-
-       __dev_mc_sync(dev, ocelot_mc_sync, ocelot_mc_unsync);
-}
-
-static int ocelot_port_get_phys_port_name(struct net_device *dev,
-                                         char *buf, size_t len)
-{
-       struct ocelot_port_private *priv = netdev_priv(dev);
-       int port = priv->chip_port;
-       int ret;
-
-       ret = snprintf(buf, len, "p%d", port);
-       if (ret >= len)
-               return -EINVAL;
-
-       return 0;
-}
-
-static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
-{
-       struct ocelot_port_private *priv = netdev_priv(dev);
-       struct ocelot_port *ocelot_port = &priv->port;
-       struct ocelot *ocelot = ocelot_port->ocelot;
-       const struct sockaddr *addr = p;
-
-       /* Learn the new net device MAC address in the mac table. */
-       ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data, ocelot_port->pvid,
-                         ENTRYTYPE_LOCKED);
-       /* Then forget the previous one. */
-       ocelot_mact_forget(ocelot, dev->dev_addr, ocelot_port->pvid);
-
-       ether_addr_copy(dev->dev_addr, addr->sa_data);
-       return 0;
-}
-
-static void ocelot_get_stats64(struct net_device *dev,
-                              struct rtnl_link_stats64 *stats)
-{
-       struct ocelot_port_private *priv = netdev_priv(dev);
-       struct ocelot *ocelot = priv->port.ocelot;
-       int port = priv->chip_port;
-
-       /* Configure the port to read the stats from */
-       ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port),
-                    SYS_STAT_CFG);
-
-       /* Get Rx stats */
-       stats->rx_bytes = ocelot_read(ocelot, SYS_COUNT_RX_OCTETS);
-       stats->rx_packets = ocelot_read(ocelot, SYS_COUNT_RX_SHORTS) +
-                           ocelot_read(ocelot, SYS_COUNT_RX_FRAGMENTS) +
-                           ocelot_read(ocelot, SYS_COUNT_RX_JABBERS) +
-                           ocelot_read(ocelot, SYS_COUNT_RX_LONGS) +
-                           ocelot_read(ocelot, SYS_COUNT_RX_64) +
-                           ocelot_read(ocelot, SYS_COUNT_RX_65_127) +
-                           ocelot_read(ocelot, SYS_COUNT_RX_128_255) +
-                           ocelot_read(ocelot, SYS_COUNT_RX_256_1023) +
-                           ocelot_read(ocelot, SYS_COUNT_RX_1024_1526) +
-                           ocelot_read(ocelot, SYS_COUNT_RX_1527_MAX);
-       stats->multicast = ocelot_read(ocelot, SYS_COUNT_RX_MULTICAST);
-       stats->rx_dropped = dev->stats.rx_dropped;
-
-       /* Get Tx stats */
-       stats->tx_bytes = ocelot_read(ocelot, SYS_COUNT_TX_OCTETS);
-       stats->tx_packets = ocelot_read(ocelot, SYS_COUNT_TX_64) +
-                           ocelot_read(ocelot, SYS_COUNT_TX_65_127) +
-                           ocelot_read(ocelot, SYS_COUNT_TX_128_511) +
-                           ocelot_read(ocelot, SYS_COUNT_TX_512_1023) +
-                           ocelot_read(ocelot, SYS_COUNT_TX_1024_1526) +
-                           ocelot_read(ocelot, SYS_COUNT_TX_1527_MAX);
-       stats->tx_dropped = ocelot_read(ocelot, SYS_COUNT_TX_DROPS) +
-                           ocelot_read(ocelot, SYS_COUNT_TX_AGING);
-       stats->collisions = ocelot_read(ocelot, SYS_COUNT_TX_COLLISION);
-}
-
 int ocelot_fdb_add(struct ocelot *ocelot, int port,
                   const unsigned char *addr, u16 vid)
 {
        struct ocelot_port *ocelot_port = ocelot->ports[port];
+       int pgid = port;
+
+       if (port == ocelot->npi)
+               pgid = PGID_CPU;
 
        if (!vid) {
                if (!ocelot_port->vlan_aware)
@@ -893,23 +539,10 @@ int ocelot_fdb_add(struct ocelot *ocelot, int port,
                        return -EINVAL;
        }
 
-       return ocelot_mact_learn(ocelot, port, addr, vid, ENTRYTYPE_LOCKED);
+       return ocelot_mact_learn(ocelot, pgid, addr, vid, ENTRYTYPE_LOCKED);
 }
 EXPORT_SYMBOL(ocelot_fdb_add);
 
-static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
-                              struct net_device *dev,
-                              const unsigned char *addr,
-                              u16 vid, u16 flags,
-                              struct netlink_ext_ack *extack)
-{
-       struct ocelot_port_private *priv = netdev_priv(dev);
-       struct ocelot *ocelot = priv->port.ocelot;
-       int port = priv->chip_port;
-
-       return ocelot_fdb_add(ocelot, port, addr, vid);
-}
-
 int ocelot_fdb_del(struct ocelot *ocelot, int port,
                   const unsigned char *addr, u16 vid)
 {
@@ -917,26 +550,8 @@ int ocelot_fdb_del(struct ocelot *ocelot, int port,
 }
 EXPORT_SYMBOL(ocelot_fdb_del);
 
-static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
-                              struct net_device *dev,
-                              const unsigned char *addr, u16 vid)
-{
-       struct ocelot_port_private *priv = netdev_priv(dev);
-       struct ocelot *ocelot = priv->port.ocelot;
-       int port = priv->chip_port;
-
-       return ocelot_fdb_del(ocelot, port, addr, vid);
-}
-
-struct ocelot_dump_ctx {
-       struct net_device *dev;
-       struct sk_buff *skb;
-       struct netlink_callback *cb;
-       int idx;
-};
-
-static int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
-                                  bool is_static, void *data)
+int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
+                           bool is_static, void *data)
 {
        struct ocelot_dump_ctx *dump = data;
        u32 portid = NETLINK_CB(dump->cb->skb).portid;
@@ -977,6 +592,7 @@ nla_put_failure:
        nlmsg_cancel(dump->skb, nlh);
        return -EMSGSIZE;
 }
+EXPORT_SYMBOL(ocelot_port_fdb_do_dump);
 
 static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col,
                            struct ocelot_mact_entry *entry)
@@ -1058,74 +674,6 @@ int ocelot_fdb_dump(struct ocelot *ocelot, int port,
 }
 EXPORT_SYMBOL(ocelot_fdb_dump);
 
-static int ocelot_port_fdb_dump(struct sk_buff *skb,
-                               struct netlink_callback *cb,
-                               struct net_device *dev,
-                               struct net_device *filter_dev, int *idx)
-{
-       struct ocelot_port_private *priv = netdev_priv(dev);
-       struct ocelot *ocelot = priv->port.ocelot;
-       struct ocelot_dump_ctx dump = {
-               .dev = dev,
-               .skb = skb,
-               .cb = cb,
-               .idx = *idx,
-       };
-       int port = priv->chip_port;
-       int ret;
-
-       ret = ocelot_fdb_dump(ocelot, port, ocelot_port_fdb_do_dump, &dump);
-
-       *idx = dump.idx;
-
-       return ret;
-}
-
-static int ocelot_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
-                                 u16 vid)
-{
-       return ocelot_vlan_vid_add(dev, vid, false, false);
-}
-
-static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
-                                  u16 vid)
-{
-       return ocelot_vlan_vid_del(dev, vid);
-}
-
-static int ocelot_set_features(struct net_device *dev,
-                              netdev_features_t features)
-{
-       netdev_features_t changed = dev->features ^ features;
-       struct ocelot_port_private *priv = netdev_priv(dev);
-       struct ocelot *ocelot = priv->port.ocelot;
-       int port = priv->chip_port;
-
-       if ((dev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
-           priv->tc.offload_cnt) {
-               netdev_err(dev,
-                          "Cannot disable HW TC offload while offloads active\n");
-               return -EBUSY;
-       }
-
-       if (changed & NETIF_F_HW_VLAN_CTAG_FILTER)
-               ocelot_vlan_mode(ocelot, port, features);
-
-       return 0;
-}
-
-static int ocelot_get_port_parent_id(struct net_device *dev,
-                                    struct netdev_phys_item_id *ppid)
-{
-       struct ocelot_port_private *priv = netdev_priv(dev);
-       struct ocelot *ocelot = priv->port.ocelot;
-
-       ppid->id_len = sizeof(ocelot->base_mac);
-       memcpy(&ppid->id, &ocelot->base_mac, ppid->id_len);
-
-       return 0;
-}
-
 int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
 {
        return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config,
@@ -1198,46 +746,6 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
 }
 EXPORT_SYMBOL(ocelot_hwstamp_set);
 
-static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
-       struct ocelot_port_private *priv = netdev_priv(dev);
-       struct ocelot *ocelot = priv->port.ocelot;
-       int port = priv->chip_port;
-
-       /* If the attached PHY device isn't capable of timestamping operations,
-        * use our own (when possible).
-        */
-       if (!phy_has_hwtstamp(dev->phydev) && ocelot->ptp) {
-               switch (cmd) {
-               case SIOCSHWTSTAMP:
-                       return ocelot_hwstamp_set(ocelot, port, ifr);
-               case SIOCGHWTSTAMP:
-                       return ocelot_hwstamp_get(ocelot, port, ifr);
-               }
-       }
-
-       return phy_mii_ioctl(dev->phydev, ifr, cmd);
-}
-
-static const struct net_device_ops ocelot_port_netdev_ops = {
-       .ndo_open                       = ocelot_port_open,
-       .ndo_stop                       = ocelot_port_stop,
-       .ndo_start_xmit                 = ocelot_port_xmit,
-       .ndo_set_rx_mode                = ocelot_set_rx_mode,
-       .ndo_get_phys_port_name         = ocelot_port_get_phys_port_name,
-       .ndo_set_mac_address            = ocelot_port_set_mac_address,
-       .ndo_get_stats64                = ocelot_get_stats64,
-       .ndo_fdb_add                    = ocelot_port_fdb_add,
-       .ndo_fdb_del                    = ocelot_port_fdb_del,
-       .ndo_fdb_dump                   = ocelot_port_fdb_dump,
-       .ndo_vlan_rx_add_vid            = ocelot_vlan_rx_add_vid,
-       .ndo_vlan_rx_kill_vid           = ocelot_vlan_rx_kill_vid,
-       .ndo_set_features               = ocelot_set_features,
-       .ndo_get_port_parent_id         = ocelot_get_port_parent_id,
-       .ndo_setup_tc                   = ocelot_setup_tc,
-       .ndo_do_ioctl                   = ocelot_ioctl,
-};
-
 void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
 {
        int i;
@@ -1251,16 +759,6 @@ void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
 }
 EXPORT_SYMBOL(ocelot_get_strings);
 
-static void ocelot_port_get_strings(struct net_device *netdev, u32 sset,
-                                   u8 *data)
-{
-       struct ocelot_port_private *priv = netdev_priv(netdev);
-       struct ocelot *ocelot = priv->port.ocelot;
-       int port = priv->chip_port;
-
-       ocelot_get_strings(ocelot, port, sset, data);
-}
-
 static void ocelot_update_stats(struct ocelot *ocelot)
 {
        int i, j;
@@ -1314,17 +812,6 @@ void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
 }
 EXPORT_SYMBOL(ocelot_get_ethtool_stats);
 
-static void ocelot_port_get_ethtool_stats(struct net_device *dev,
-                                         struct ethtool_stats *stats,
-                                         u64 *data)
-{
-       struct ocelot_port_private *priv = netdev_priv(dev);
-       struct ocelot *ocelot = priv->port.ocelot;
-       int port = priv->chip_port;
-
-       ocelot_get_ethtool_stats(ocelot, port, data);
-}
-
 int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
 {
        if (sset != ETH_SS_STATS)
@@ -1334,15 +821,6 @@ int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
 }
 EXPORT_SYMBOL(ocelot_get_sset_count);
 
-static int ocelot_port_get_sset_count(struct net_device *dev, int sset)
-{
-       struct ocelot_port_private *priv = netdev_priv(dev);
-       struct ocelot *ocelot = priv->port.ocelot;
-       int port = priv->chip_port;
-
-       return ocelot_get_sset_count(ocelot, port, sset);
-}
-
 int ocelot_get_ts_info(struct ocelot *ocelot, int port,
                       struct ethtool_ts_info *info)
 {
@@ -1368,28 +846,6 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port,
 }
 EXPORT_SYMBOL(ocelot_get_ts_info);
 
-static int ocelot_port_get_ts_info(struct net_device *dev,
-                                  struct ethtool_ts_info *info)
-{
-       struct ocelot_port_private *priv = netdev_priv(dev);
-       struct ocelot *ocelot = priv->port.ocelot;
-       int port = priv->chip_port;
-
-       if (!ocelot->ptp)
-               return ethtool_op_get_ts_info(dev, info);
-
-       return ocelot_get_ts_info(ocelot, port, info);
-}
-
-static const struct ethtool_ops ocelot_ethtool_ops = {
-       .get_strings            = ocelot_port_get_strings,
-       .get_ethtool_stats      = ocelot_port_get_ethtool_stats,
-       .get_sset_count         = ocelot_port_get_sset_count,
-       .get_link_ksettings     = phy_ethtool_get_link_ksettings,
-       .set_link_ksettings     = phy_ethtool_set_link_ksettings,
-       .get_ts_info            = ocelot_port_get_ts_info,
-};
-
 void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
 {
        u32 port_cfg;
@@ -1445,16 +901,6 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
 }
 EXPORT_SYMBOL(ocelot_bridge_stp_state_set);
 
-static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port,
-                                          struct switchdev_trans *trans,
-                                          u8 state)
-{
-       if (switchdev_trans_ph_prepare(trans))
-               return;
-
-       ocelot_bridge_stp_state_set(ocelot, port, state);
-}
-
 void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs)
 {
        unsigned int age_period = ANA_AUTOAGE_AGE_PERIOD(msecs / 2000);
@@ -1469,165 +915,142 @@ void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs)
 }
 EXPORT_SYMBOL(ocelot_set_ageing_time);
 
-static void ocelot_port_attr_ageing_set(struct ocelot *ocelot, int port,
-                                       unsigned long ageing_clock_t)
-{
-       unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
-       u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
-
-       ocelot_set_ageing_time(ocelot, ageing_time);
-}
-
-static void ocelot_port_attr_mc_set(struct ocelot *ocelot, int port, bool mc)
+static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot,
+                                                    const unsigned char *addr,
+                                                    u16 vid)
 {
-       u32 cpu_fwd_mcast = ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA |
-                           ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA |
-                           ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA;
-       u32 val = 0;
+       struct ocelot_multicast *mc;
 
-       if (mc)
-               val = cpu_fwd_mcast;
+       list_for_each_entry(mc, &ocelot->multicast, list) {
+               if (ether_addr_equal(mc->addr, addr) && mc->vid == vid)
+                       return mc;
+       }
 
-       ocelot_rmw_gix(ocelot, val, cpu_fwd_mcast,
-                      ANA_PORT_CPU_FWD_CFG, port);
+       return NULL;
 }
 
-static int ocelot_port_attr_set(struct net_device *dev,
-                               const struct switchdev_attr *attr,
-                               struct switchdev_trans *trans)
+static enum macaccess_entry_type ocelot_classify_mdb(const unsigned char *addr)
 {
-       struct ocelot_port_private *priv = netdev_priv(dev);
-       struct ocelot *ocelot = priv->port.ocelot;
-       int port = priv->chip_port;
-       int err = 0;
-
-       switch (attr->id) {
-       case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
-               ocelot_port_attr_stp_state_set(ocelot, port, trans,
-                                              attr->u.stp_state);
-               break;
-       case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
-               ocelot_port_attr_ageing_set(ocelot, port, attr->u.ageing_time);
-               break;
-       case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
-               ocelot_port_vlan_filtering(ocelot, port,
-                                          attr->u.vlan_filtering);
-               break;
-       case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
-               ocelot_port_attr_mc_set(ocelot, port, !attr->u.mc_disabled);
-               break;
-       default:
-               err = -EOPNOTSUPP;
-               break;
-       }
-
-       return err;
+       if (addr[0] == 0x01 && addr[1] == 0x00 && addr[2] == 0x5e)
+               return ENTRYTYPE_MACv4;
+       if (addr[0] == 0x33 && addr[1] == 0x33)
+               return ENTRYTYPE_MACv6;
+       return ENTRYTYPE_NORMAL;
 }
 
-static int ocelot_port_obj_add_vlan(struct net_device *dev,
-                                   const struct switchdev_obj_port_vlan *vlan,
-                                   struct switchdev_trans *trans)
+static int ocelot_mdb_get_pgid(struct ocelot *ocelot,
+                              enum macaccess_entry_type entry_type)
 {
-       int ret;
-       u16 vid;
+       int pgid;
 
-       for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
-               ret = ocelot_vlan_vid_add(dev, vid,
-                                         vlan->flags & BRIDGE_VLAN_INFO_PVID,
-                                         vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
+       /* According to VSC7514 datasheet 3.9.1.5 IPv4 Multicast Entries and
+        * 3.9.1.6 IPv6 Multicast Entries, "Instead of a lookup in the
+        * destination mask table (PGID), the destination set is programmed as
+        * part of the entry MAC address.", and the DEST_IDX is set to 0.
+        */
+       if (entry_type == ENTRYTYPE_MACv4 ||
+           entry_type == ENTRYTYPE_MACv6)
+               return 0;
 
-static int ocelot_port_vlan_del_vlan(struct net_device *dev,
-                                    const struct switchdev_obj_port_vlan *vlan)
-{
-       int ret;
-       u16 vid;
+       for_each_nonreserved_multicast_dest_pgid(ocelot, pgid) {
+               struct ocelot_multicast *mc;
+               bool used = false;
 
-       for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
-               ret = ocelot_vlan_vid_del(dev, vid);
+               list_for_each_entry(mc, &ocelot->multicast, list) {
+                       if (mc->pgid == pgid) {
+                               used = true;
+                               break;
+                       }
+               }
 
-               if (ret)
-                       return ret;
+               if (!used)
+                       return pgid;
        }
 
-       return 0;
+       return -1;
 }
 
-static struct ocelot_multicast *ocelot_multicast_get(struct ocelot *ocelot,
-                                                    const unsigned char *addr,
-                                                    u16 vid)
+static void ocelot_encode_ports_to_mdb(unsigned char *addr,
+                                      struct ocelot_multicast *mc,
+                                      enum macaccess_entry_type entry_type)
 {
-       struct ocelot_multicast *mc;
+       memcpy(addr, mc->addr, ETH_ALEN);
 
-       list_for_each_entry(mc, &ocelot->multicast, list) {
-               if (ether_addr_equal(mc->addr, addr) && mc->vid == vid)
-                       return mc;
+       if (entry_type == ENTRYTYPE_MACv4) {
+               addr[0] = 0;
+               addr[1] = mc->ports >> 8;
+               addr[2] = mc->ports & 0xff;
+       } else if (entry_type == ENTRYTYPE_MACv6) {
+               addr[0] = mc->ports >> 8;
+               addr[1] = mc->ports & 0xff;
        }
-
-       return NULL;
 }
 
-static int ocelot_port_obj_add_mdb(struct net_device *dev,
-                                  const struct switchdev_obj_port_mdb *mdb,
-                                  struct switchdev_trans *trans)
+int ocelot_port_mdb_add(struct ocelot *ocelot, int port,
+                       const struct switchdev_obj_port_mdb *mdb)
 {
-       struct ocelot_port_private *priv = netdev_priv(dev);
-       struct ocelot_port *ocelot_port = &priv->port;
-       struct ocelot *ocelot = ocelot_port->ocelot;
+       struct ocelot_port *ocelot_port = ocelot->ports[port];
+       enum macaccess_entry_type entry_type;
        unsigned char addr[ETH_ALEN];
        struct ocelot_multicast *mc;
-       int port = priv->chip_port;
        u16 vid = mdb->vid;
        bool new = false;
 
+       if (port == ocelot->npi)
+               port = ocelot->num_phys_ports;
+
        if (!vid)
                vid = ocelot_port->pvid;
 
+       entry_type = ocelot_classify_mdb(mdb->addr);
+
        mc = ocelot_multicast_get(ocelot, mdb->addr, vid);
        if (!mc) {
+               int pgid = ocelot_mdb_get_pgid(ocelot, entry_type);
+
+               if (pgid < 0) {
+                       dev_err(ocelot->dev,
+                               "No more PGIDs available for mdb %pM vid %d\n",
+                               mdb->addr, vid);
+                       return -ENOSPC;
+               }
+
                mc = devm_kzalloc(ocelot->dev, sizeof(*mc), GFP_KERNEL);
                if (!mc)
                        return -ENOMEM;
 
                memcpy(mc->addr, mdb->addr, ETH_ALEN);
                mc->vid = vid;
+               mc->pgid = pgid;
 
                list_add_tail(&mc->list, &ocelot->multicast);
                new = true;
        }
 
-       memcpy(addr, mc->addr, ETH_ALEN);
-       addr[0] = 0;
-
        if (!new) {
-               addr[2] = mc->ports << 0;
-               addr[1] = mc->ports << 8;
+               ocelot_encode_ports_to_mdb(addr, mc, entry_type);
                ocelot_mact_forget(ocelot, addr, vid);
        }
 
        mc->ports |= BIT(port);
-       addr[2] = mc->ports << 0;
-       addr[1] = mc->ports << 8;
+       ocelot_encode_ports_to_mdb(addr, mc, entry_type);
 
-       return ocelot_mact_learn(ocelot, 0, addr, vid, ENTRYTYPE_MACv4);
+       return ocelot_mact_learn(ocelot, mc->pgid, addr, vid, entry_type);
 }
+EXPORT_SYMBOL(ocelot_port_mdb_add);
 
-static int ocelot_port_obj_del_mdb(struct net_device *dev,
-                                  const struct switchdev_obj_port_mdb *mdb)
+int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
+                       const struct switchdev_obj_port_mdb *mdb)
 {
-       struct ocelot_port_private *priv = netdev_priv(dev);
-       struct ocelot_port *ocelot_port = &priv->port;
-       struct ocelot *ocelot = ocelot_port->ocelot;
+       struct ocelot_port *ocelot_port = ocelot->ports[port];
+       enum macaccess_entry_type entry_type;
        unsigned char addr[ETH_ALEN];
        struct ocelot_multicast *mc;
-       int port = priv->chip_port;
        u16 vid = mdb->vid;
 
+       if (port == ocelot->npi)
+               port = ocelot->num_phys_ports;
+
        if (!vid)
                vid = ocelot_port->pvid;
 
@@ -1635,10 +1058,9 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev,
        if (!mc)
                return -ENOENT;
 
-       memcpy(addr, mc->addr, ETH_ALEN);
-       addr[2] = mc->ports << 0;
-       addr[1] = mc->ports << 8;
-       addr[0] = 0;
+       entry_type = ocelot_classify_mdb(mdb->addr);
+
+       ocelot_encode_ports_to_mdb(addr, mc, entry_type);
        ocelot_mact_forget(ocelot, addr, vid);
 
        mc->ports &= ~BIT(port);
@@ -1648,55 +1070,11 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev,
                return 0;
        }
 
-       addr[2] = mc->ports << 0;
-       addr[1] = mc->ports << 8;
-
-       return ocelot_mact_learn(ocelot, 0, addr, vid, ENTRYTYPE_MACv4);
-}
-
-static int ocelot_port_obj_add(struct net_device *dev,
-                              const struct switchdev_obj *obj,
-                              struct switchdev_trans *trans,
-                              struct netlink_ext_ack *extack)
-{
-       int ret = 0;
-
-       switch (obj->id) {
-       case SWITCHDEV_OBJ_ID_PORT_VLAN:
-               ret = ocelot_port_obj_add_vlan(dev,
-                                              SWITCHDEV_OBJ_PORT_VLAN(obj),
-                                              trans);
-               break;
-       case SWITCHDEV_OBJ_ID_PORT_MDB:
-               ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj),
-                                             trans);
-               break;
-       default:
-               return -EOPNOTSUPP;
-       }
+       ocelot_encode_ports_to_mdb(addr, mc, entry_type);
 
-       return ret;
-}
-
-static int ocelot_port_obj_del(struct net_device *dev,
-                              const struct switchdev_obj *obj)
-{
-       int ret = 0;
-
-       switch (obj->id) {
-       case SWITCHDEV_OBJ_ID_PORT_VLAN:
-               ret = ocelot_port_vlan_del_vlan(dev,
-                                               SWITCHDEV_OBJ_PORT_VLAN(obj));
-               break;
-       case SWITCHDEV_OBJ_ID_PORT_MDB:
-               ret = ocelot_port_obj_del_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj));
-               break;
-       default:
-               return -EOPNOTSUPP;
-       }
-
-       return ret;
+       return ocelot_mact_learn(ocelot, mc->pgid, addr, vid, entry_type);
 }
+EXPORT_SYMBOL(ocelot_port_mdb_del);
 
 int ocelot_port_bridge_join(struct ocelot *ocelot, int port,
                            struct net_device *bridge)
@@ -1735,10 +1113,10 @@ static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
        int i, port, lag;
 
        /* Reset destination and aggregation PGIDS */
-       for (port = 0; port < ocelot->num_phys_ports; port++)
+       for_each_unicast_dest_pgid(ocelot, port)
                ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, port);
 
-       for (i = PGID_AGGR; i < PGID_SRC; i++)
+       for_each_aggr_pgid(ocelot, i)
                ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0),
                                 ANA_PGID_PGID, i);
 
@@ -1760,7 +1138,7 @@ static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
                        aggr_count++;
                }
 
-               for (i = PGID_AGGR; i < PGID_SRC; i++) {
+               for_each_aggr_pgid(ocelot, i) {
                        u32 ac;
 
                        ac = ocelot_read_rix(ocelot, ANA_PGID_PGID, i);
@@ -1788,8 +1166,8 @@ static void ocelot_setup_lag(struct ocelot *ocelot, int lag)
        }
 }
 
-static int ocelot_port_lag_join(struct ocelot *ocelot, int port,
-                               struct net_device *bond)
+int ocelot_port_lag_join(struct ocelot *ocelot, int port,
+                        struct net_device *bond)
 {
        struct net_device *ndev;
        u32 bond_mask = 0;
@@ -1826,9 +1204,10 @@ static int ocelot_port_lag_join(struct ocelot *ocelot, int port,
 
        return 0;
 }
+EXPORT_SYMBOL(ocelot_port_lag_join);
 
-static void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
-                                 struct net_device *bond)
+void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
+                          struct net_device *bond)
 {
        u32 port_cfg;
        int i;
@@ -1856,151 +1235,7 @@ static void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
 
        ocelot_set_aggr_pgids(ocelot);
 }
-
-/* Checks if the net_device instance given to us originate from our driver. */
-static bool ocelot_netdevice_dev_check(const struct net_device *dev)
-{
-       return dev->netdev_ops == &ocelot_port_netdev_ops;
-}
-
-static int ocelot_netdevice_port_event(struct net_device *dev,
-                                      unsigned long event,
-                                      struct netdev_notifier_changeupper_info *info)
-{
-       struct ocelot_port_private *priv = netdev_priv(dev);
-       struct ocelot_port *ocelot_port = &priv->port;
-       struct ocelot *ocelot = ocelot_port->ocelot;
-       int port = priv->chip_port;
-       int err = 0;
-
-       switch (event) {
-       case NETDEV_CHANGEUPPER:
-               if (netif_is_bridge_master(info->upper_dev)) {
-                       if (info->linking) {
-                               err = ocelot_port_bridge_join(ocelot, port,
-                                                             info->upper_dev);
-                       } else {
-                               err = ocelot_port_bridge_leave(ocelot, port,
-                                                              info->upper_dev);
-                       }
-               }
-               if (netif_is_lag_master(info->upper_dev)) {
-                       if (info->linking)
-                               err = ocelot_port_lag_join(ocelot, port,
-                                                          info->upper_dev);
-                       else
-                               ocelot_port_lag_leave(ocelot, port,
-                                                     info->upper_dev);
-               }
-               break;
-       default:
-               break;
-       }
-
-       return err;
-}
-
-static int ocelot_netdevice_event(struct notifier_block *unused,
-                                 unsigned long event, void *ptr)
-{
-       struct netdev_notifier_changeupper_info *info = ptr;
-       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-       int ret = 0;
-
-       if (!ocelot_netdevice_dev_check(dev))
-               return 0;
-
-       if (event == NETDEV_PRECHANGEUPPER &&
-           netif_is_lag_master(info->upper_dev)) {
-               struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
-               struct netlink_ext_ack *extack;
-
-               if (lag_upper_info &&
-                   lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
-                       extack = netdev_notifier_info_to_extack(&info->info);
-                       NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
-
-                       ret = -EINVAL;
-                       goto notify;
-               }
-       }
-
-       if (netif_is_lag_master(dev)) {
-               struct net_device *slave;
-               struct list_head *iter;
-
-               netdev_for_each_lower_dev(dev, slave, iter) {
-                       ret = ocelot_netdevice_port_event(slave, event, info);
-                       if (ret)
-                               goto notify;
-               }
-       } else {
-               ret = ocelot_netdevice_port_event(dev, event, info);
-       }
-
-notify:
-       return notifier_from_errno(ret);
-}
-
-struct notifier_block ocelot_netdevice_nb __read_mostly = {
-       .notifier_call = ocelot_netdevice_event,
-};
-EXPORT_SYMBOL(ocelot_netdevice_nb);
-
-static int ocelot_switchdev_event(struct notifier_block *unused,
-                                 unsigned long event, void *ptr)
-{
-       struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
-       int err;
-
-       switch (event) {
-       case SWITCHDEV_PORT_ATTR_SET:
-               err = switchdev_handle_port_attr_set(dev, ptr,
-                                                    ocelot_netdevice_dev_check,
-                                                    ocelot_port_attr_set);
-               return notifier_from_errno(err);
-       }
-
-       return NOTIFY_DONE;
-}
-
-struct notifier_block ocelot_switchdev_nb __read_mostly = {
-       .notifier_call = ocelot_switchdev_event,
-};
-EXPORT_SYMBOL(ocelot_switchdev_nb);
-
-static int ocelot_switchdev_blocking_event(struct notifier_block *unused,
-                                          unsigned long event, void *ptr)
-{
-       struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
-       int err;
-
-       switch (event) {
-               /* Blocking events. */
-       case SWITCHDEV_PORT_OBJ_ADD:
-               err = switchdev_handle_port_obj_add(dev, ptr,
-                                                   ocelot_netdevice_dev_check,
-                                                   ocelot_port_obj_add);
-               return notifier_from_errno(err);
-       case SWITCHDEV_PORT_OBJ_DEL:
-               err = switchdev_handle_port_obj_del(dev, ptr,
-                                                   ocelot_netdevice_dev_check,
-                                                   ocelot_port_obj_del);
-               return notifier_from_errno(err);
-       case SWITCHDEV_PORT_ATTR_SET:
-               err = switchdev_handle_port_attr_set(dev, ptr,
-                                                    ocelot_netdevice_dev_check,
-                                                    ocelot_port_attr_set);
-               return notifier_from_errno(err);
-       }
-
-       return NOTIFY_DONE;
-}
-
-struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = {
-       .notifier_call = ocelot_switchdev_blocking_event,
-};
-EXPORT_SYMBOL(ocelot_switchdev_blocking_nb);
+EXPORT_SYMBOL(ocelot_port_lag_leave);
 
 /* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
  * The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG.
@@ -2012,6 +1247,7 @@ void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
 {
        struct ocelot_port *ocelot_port = ocelot->ports[port];
        int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN;
+       int pause_start, pause_stop;
        int atop_wm;
 
        if (port == ocelot->npi) {
@@ -2025,20 +1261,20 @@ void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
 
        ocelot_port_writel(ocelot_port, maxlen, DEV_MAC_MAXLEN_CFG);
 
-       /* Set Pause WM hysteresis
-        * 152 = 6 * maxlen / OCELOT_BUFFER_CELL_SZ
-        * 101 = 4 * maxlen / OCELOT_BUFFER_CELL_SZ
-        */
-       ocelot_write_rix(ocelot, SYS_PAUSE_CFG_PAUSE_ENA |
-                        SYS_PAUSE_CFG_PAUSE_STOP(101) |
-                        SYS_PAUSE_CFG_PAUSE_START(152), SYS_PAUSE_CFG, port);
+       /* Set Pause watermark hysteresis */
+       pause_start = 6 * maxlen / OCELOT_BUFFER_CELL_SZ;
+       pause_stop = 4 * maxlen / OCELOT_BUFFER_CELL_SZ;
+       ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_START,
+                           pause_start);
+       ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_STOP,
+                           pause_stop);
 
        /* Tail dropping watermark */
        atop_wm = (ocelot->shared_queue_sz - 9 * maxlen) /
                   OCELOT_BUFFER_CELL_SZ;
-       ocelot_write_rix(ocelot, ocelot_wm_enc(9 * maxlen),
+       ocelot_write_rix(ocelot, ocelot->ops->wm_enc(9 * maxlen),
                         SYS_ATOP, port);
-       ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG);
+       ocelot_write(ocelot, ocelot->ops->wm_enc(atop_wm), SYS_ATOP_TOT_CFG);
 }
 EXPORT_SYMBOL(ocelot_port_set_maxlen);
 
@@ -2094,6 +1330,9 @@ void ocelot_init_port(struct ocelot *ocelot, int port)
        ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_HIGH_CFG);
        ocelot_port_writel(ocelot_port, 0, DEV_MAC_FC_MAC_LOW_CFG);
 
+       /* Enable transmission of pause frames */
+       ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1);
+
        /* Drop frames with multicast source address */
        ocelot_rmw_gix(ocelot, ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA,
                       ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA,
@@ -2109,52 +1348,6 @@ void ocelot_init_port(struct ocelot *ocelot, int port)
 }
 EXPORT_SYMBOL(ocelot_init_port);
 
-int ocelot_probe_port(struct ocelot *ocelot, u8 port,
-                     void __iomem *regs,
-                     struct phy_device *phy)
-{
-       struct ocelot_port_private *priv;
-       struct ocelot_port *ocelot_port;
-       struct net_device *dev;
-       int err;
-
-       dev = alloc_etherdev(sizeof(struct ocelot_port_private));
-       if (!dev)
-               return -ENOMEM;
-       SET_NETDEV_DEV(dev, ocelot->dev);
-       priv = netdev_priv(dev);
-       priv->dev = dev;
-       priv->phy = phy;
-       priv->chip_port = port;
-       ocelot_port = &priv->port;
-       ocelot_port->ocelot = ocelot;
-       ocelot_port->regs = regs;
-       ocelot->ports[port] = ocelot_port;
-
-       dev->netdev_ops = &ocelot_port_netdev_ops;
-       dev->ethtool_ops = &ocelot_ethtool_ops;
-
-       dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXFCS |
-               NETIF_F_HW_TC;
-       dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
-
-       memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN);
-       dev->dev_addr[ETH_ALEN - 1] += port;
-       ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid,
-                         ENTRYTYPE_LOCKED);
-
-       ocelot_init_port(ocelot, port);
-
-       err = register_netdev(dev);
-       if (err) {
-               dev_err(ocelot->dev, "register_netdev failed\n");
-               free_netdev(dev);
-       }
-
-       return err;
-}
-EXPORT_SYMBOL(ocelot_probe_port);
-
 /* Configure and enable the CPU port module, which is a set of queues.
  * If @npi contains a valid port index, the CPU port module is connected
  * to the Node Processor Interface (NPI). This is the mode through which
@@ -2188,27 +1381,25 @@ void ocelot_configure_cpu(struct ocelot *ocelot, int npi,
                             QSYS_EXT_CPU_CFG);
 
                /* Enable NPI port */
-               ocelot_write_rix(ocelot,
-                                QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
-                                QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
-                                QSYS_SWITCH_PORT_MODE_PORT_ENA,
-                                QSYS_SWITCH_PORT_MODE, npi);
+               ocelot_fields_write(ocelot, npi,
+                                   QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
                /* NPI port Injection/Extraction configuration */
-               ocelot_write_rix(ocelot,
-                                SYS_PORT_MODE_INCL_XTR_HDR(extraction) |
-                                SYS_PORT_MODE_INCL_INJ_HDR(injection),
-                                SYS_PORT_MODE, npi);
+               ocelot_fields_write(ocelot, npi, SYS_PORT_MODE_INCL_XTR_HDR,
+                                   extraction);
+               ocelot_fields_write(ocelot, npi, SYS_PORT_MODE_INCL_INJ_HDR,
+                                   injection);
+
+               /* Disable transmission of pause frames */
+               ocelot_fields_write(ocelot, npi, SYS_PAUSE_CFG_PAUSE_ENA, 0);
        }
 
        /* Enable CPU port module */
-       ocelot_write_rix(ocelot, QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE |
-                        QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(1) |
-                        QSYS_SWITCH_PORT_MODE_PORT_ENA,
-                        QSYS_SWITCH_PORT_MODE, cpu);
+       ocelot_fields_write(ocelot, cpu, QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
        /* CPU port Injection/Extraction configuration */
-       ocelot_write_rix(ocelot, SYS_PORT_MODE_INCL_XTR_HDR(extraction) |
-                        SYS_PORT_MODE_INCL_INJ_HDR(injection),
-                        SYS_PORT_MODE, cpu);
+       ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_XTR_HDR,
+                           extraction);
+       ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_INJ_HDR,
+                           injection);
 
        /* Configure the CPU port to be VLAN aware */
        ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) |
@@ -2255,7 +1446,7 @@ int ocelot_init(struct ocelot *ocelot)
        INIT_LIST_HEAD(&ocelot->multicast);
        ocelot_mact_init(ocelot);
        ocelot_vlan_init(ocelot);
-       ocelot_ace_init(ocelot);
+       ocelot_vcap_init(ocelot);
 
        for (port = 0; port < ocelot->num_phys_ports; port++) {
                /* Clear all counters (5 groups) */
@@ -2311,7 +1502,7 @@ int ocelot_init(struct ocelot *ocelot)
        }
 
        /* Allow broadcast MAC frames. */
-       for (i = ocelot->num_phys_ports + 1; i < PGID_CPU; i++) {
+       for_each_nonreserved_multicast_dest_pgid(ocelot, i) {
                u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0));
 
                ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i);
index f0a15aa..dc29e05 100644 (file)
@@ -25,7 +25,6 @@
 #include <soc/mscc/ocelot.h>
 #include "ocelot_rew.h"
 #include "ocelot_qs.h"
-#include "ocelot_tc.h"
 
 #define OCELOT_BUFFER_CELL_SZ 60
 
@@ -47,6 +46,14 @@ struct ocelot_multicast {
        unsigned char addr[ETH_ALEN];
        u16 vid;
        u16 ports;
+       int pgid;
+};
+
+struct ocelot_port_tc {
+       bool block_shared;
+       unsigned long offload_cnt;
+
+       unsigned long police_id;
 };
 
 struct ocelot_port_private {
@@ -60,15 +67,42 @@ struct ocelot_port_private {
        struct ocelot_port_tc tc;
 };
 
+struct ocelot_dump_ctx {
+       struct net_device *dev;
+       struct sk_buff *skb;
+       struct netlink_callback *cb;
+       int idx;
+};
+
+/* MAC table entry types.
+ * ENTRYTYPE_NORMAL is subject to aging.
+ * ENTRYTYPE_LOCKED is not subject to aging.
+ * ENTRYTYPE_MACv4 is not subject to aging. For IPv4 multicast.
+ * ENTRYTYPE_MACv6 is not subject to aging. For IPv6 multicast.
+ */
+enum macaccess_entry_type {
+       ENTRYTYPE_NORMAL = 0,
+       ENTRYTYPE_LOCKED,
+       ENTRYTYPE_MACv4,
+       ENTRYTYPE_MACv6,
+};
+
+int ocelot_port_fdb_do_dump(const unsigned char *addr, u16 vid,
+                           bool is_static, void *data);
+int ocelot_mact_learn(struct ocelot *ocelot, int port,
+                     const unsigned char mac[ETH_ALEN],
+                     unsigned int vid, enum macaccess_entry_type type);
+int ocelot_mact_forget(struct ocelot *ocelot,
+                      const unsigned char mac[ETH_ALEN], unsigned int vid);
+int ocelot_port_lag_join(struct ocelot *ocelot, int port,
+                        struct net_device *bond);
+void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
+                          struct net_device *bond);
+
 u32 ocelot_port_readl(struct ocelot_port *port, u32 reg);
 void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
 
-#define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val))
-#define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val))
-
-int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops);
-int ocelot_probe_port(struct ocelot *ocelot, u8 port,
-                     void __iomem *regs,
+int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
                      struct phy_device *phy);
 
 void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
@@ -79,7 +113,4 @@ extern struct notifier_block ocelot_netdevice_nb;
 extern struct notifier_block ocelot_switchdev_nb;
 extern struct notifier_block ocelot_switchdev_blocking_nb;
 
-#define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val))
-#define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val))
-
 #endif
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
deleted file mode 100644 (file)
index 4a15d2f..0000000
+++ /dev/null
@@ -1,626 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MIT)
-/*
- * Microsemi Ocelot Switch driver
- *
- * Copyright (c) 2017 Microsemi Corporation
- */
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/of_net.h>
-#include <linux/netdevice.h>
-#include <linux/of_mdio.h>
-#include <linux/of_platform.h>
-#include <linux/mfd/syscon.h>
-#include <linux/skbuff.h>
-#include <net/switchdev.h>
-
-#include <soc/mscc/ocelot_vcap.h>
-#include "ocelot.h"
-
-#define IFH_EXTRACT_BITFIELD64(x, o, w) (((x) >> (o)) & GENMASK_ULL((w) - 1, 0))
-#define VSC7514_VCAP_IS2_CNT 64
-#define VSC7514_VCAP_IS2_ENTRY_WIDTH 376
-#define VSC7514_VCAP_IS2_ACTION_WIDTH 99
-#define VSC7514_VCAP_PORT_CNT 11
-
-static int ocelot_parse_ifh(u32 *_ifh, struct frame_info *info)
-{
-       u8 llen, wlen;
-       u64 ifh[2];
-
-       ifh[0] = be64_to_cpu(((__force __be64 *)_ifh)[0]);
-       ifh[1] = be64_to_cpu(((__force __be64 *)_ifh)[1]);
-
-       wlen = IFH_EXTRACT_BITFIELD64(ifh[0], 7,  8);
-       llen = IFH_EXTRACT_BITFIELD64(ifh[0], 15,  6);
-
-       info->len = OCELOT_BUFFER_CELL_SZ * wlen + llen - 80;
-
-       info->timestamp = IFH_EXTRACT_BITFIELD64(ifh[0], 21, 32);
-
-       info->port = IFH_EXTRACT_BITFIELD64(ifh[1], 43, 4);
-
-       info->tag_type = IFH_EXTRACT_BITFIELD64(ifh[1], 16,  1);
-       info->vid = IFH_EXTRACT_BITFIELD64(ifh[1], 0,  12);
-
-       return 0;
-}
-
-static int ocelot_rx_frame_word(struct ocelot *ocelot, u8 grp, bool ifh,
-                               u32 *rval)
-{
-       u32 val;
-       u32 bytes_valid;
-
-       val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
-       if (val == XTR_NOT_READY) {
-               if (ifh)
-                       return -EIO;
-
-               do {
-                       val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
-               } while (val == XTR_NOT_READY);
-       }
-
-       switch (val) {
-       case XTR_ABORT:
-               return -EIO;
-       case XTR_EOF_0:
-       case XTR_EOF_1:
-       case XTR_EOF_2:
-       case XTR_EOF_3:
-       case XTR_PRUNED:
-               bytes_valid = XTR_VALID_BYTES(val);
-               val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
-               if (val == XTR_ESCAPE)
-                       *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
-               else
-                       *rval = val;
-
-               return bytes_valid;
-       case XTR_ESCAPE:
-               *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
-
-               return 4;
-       default:
-               *rval = val;
-
-               return 4;
-       }
-}
-
-static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
-{
-       struct ocelot *ocelot = arg;
-       int i = 0, grp = 0;
-       int err = 0;
-
-       if (!(ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)))
-               return IRQ_NONE;
-
-       do {
-               struct skb_shared_hwtstamps *shhwtstamps;
-               struct ocelot_port_private *priv;
-               struct ocelot_port *ocelot_port;
-               u64 tod_in_ns, full_ts_in_ns;
-               struct frame_info info = {};
-               struct net_device *dev;
-               u32 ifh[4], val, *buf;
-               struct timespec64 ts;
-               int sz, len, buf_len;
-               struct sk_buff *skb;
-
-               for (i = 0; i < OCELOT_TAG_LEN / 4; i++) {
-                       err = ocelot_rx_frame_word(ocelot, grp, true, &ifh[i]);
-                       if (err != 4)
-                               break;
-               }
-
-               if (err != 4)
-                       break;
-
-               /* At this point the IFH was read correctly, so it is safe to
-                * presume that there is no error. The err needs to be reset
-                * otherwise a frame could come in CPU queue between the while
-                * condition and the check for error later on. And in that case
-                * the new frame is just removed and not processed.
-                */
-               err = 0;
-
-               ocelot_parse_ifh(ifh, &info);
-
-               ocelot_port = ocelot->ports[info.port];
-               priv = container_of(ocelot_port, struct ocelot_port_private,
-                                   port);
-               dev = priv->dev;
-
-               skb = netdev_alloc_skb(dev, info.len);
-
-               if (unlikely(!skb)) {
-                       netdev_err(dev, "Unable to allocate sk_buff\n");
-                       err = -ENOMEM;
-                       break;
-               }
-               buf_len = info.len - ETH_FCS_LEN;
-               buf = (u32 *)skb_put(skb, buf_len);
-
-               len = 0;
-               do {
-                       sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
-                       *buf++ = val;
-                       len += sz;
-               } while (len < buf_len);
-
-               /* Read the FCS */
-               sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
-               /* Update the statistics if part of the FCS was read before */
-               len -= ETH_FCS_LEN - sz;
-
-               if (unlikely(dev->features & NETIF_F_RXFCS)) {
-                       buf = (u32 *)skb_put(skb, ETH_FCS_LEN);
-                       *buf = val;
-               }
-
-               if (sz < 0) {
-                       err = sz;
-                       break;
-               }
-
-               if (ocelot->ptp) {
-                       ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
-
-                       tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
-                       if ((tod_in_ns & 0xffffffff) < info.timestamp)
-                               full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) |
-                                               info.timestamp;
-                       else
-                               full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) |
-                                               info.timestamp;
-
-                       shhwtstamps = skb_hwtstamps(skb);
-                       memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
-                       shhwtstamps->hwtstamp = full_ts_in_ns;
-               }
-
-               /* Everything we see on an interface that is in the HW bridge
-                * has already been forwarded.
-                */
-               if (ocelot->bridge_mask & BIT(info.port))
-                       skb->offload_fwd_mark = 1;
-
-               skb->protocol = eth_type_trans(skb, dev);
-               if (!skb_defer_rx_timestamp(skb))
-                       netif_rx(skb);
-               dev->stats.rx_bytes += len;
-               dev->stats.rx_packets++;
-       } while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp));
-
-       if (err)
-               while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp))
-                       ocelot_read_rix(ocelot, QS_XTR_RD, grp);
-
-       return IRQ_HANDLED;
-}
-
-static irqreturn_t ocelot_ptp_rdy_irq_handler(int irq, void *arg)
-{
-       struct ocelot *ocelot = arg;
-
-       ocelot_get_txtstamp(ocelot);
-
-       return IRQ_HANDLED;
-}
-
-static const struct of_device_id mscc_ocelot_match[] = {
-       { .compatible = "mscc,vsc7514-switch" },
-       { }
-};
-MODULE_DEVICE_TABLE(of, mscc_ocelot_match);
-
-static int ocelot_reset(struct ocelot *ocelot)
-{
-       int retries = 100;
-       u32 val;
-
-       regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
-       regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
-
-       do {
-               msleep(1);
-               regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT],
-                                 &val);
-       } while (val && --retries);
-
-       if (!retries)
-               return -ETIMEDOUT;
-
-       regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
-       regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
-
-       return 0;
-}
-
-static const struct ocelot_ops ocelot_ops = {
-       .reset                  = ocelot_reset,
-};
-
-static const struct vcap_field vsc7514_vcap_is2_keys[] = {
-       /* Common: 46 bits */
-       [VCAP_IS2_TYPE]                         = {  0,   4},
-       [VCAP_IS2_HK_FIRST]                     = {  4,   1},
-       [VCAP_IS2_HK_PAG]                       = {  5,   8},
-       [VCAP_IS2_HK_IGR_PORT_MASK]             = { 13,  12},
-       [VCAP_IS2_HK_RSV2]                      = { 25,   1},
-       [VCAP_IS2_HK_HOST_MATCH]                = { 26,   1},
-       [VCAP_IS2_HK_L2_MC]                     = { 27,   1},
-       [VCAP_IS2_HK_L2_BC]                     = { 28,   1},
-       [VCAP_IS2_HK_VLAN_TAGGED]               = { 29,   1},
-       [VCAP_IS2_HK_VID]                       = { 30,  12},
-       [VCAP_IS2_HK_DEI]                       = { 42,   1},
-       [VCAP_IS2_HK_PCP]                       = { 43,   3},
-       /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */
-       [VCAP_IS2_HK_L2_DMAC]                   = { 46,  48},
-       [VCAP_IS2_HK_L2_SMAC]                   = { 94,  48},
-       /* MAC_ETYPE (TYPE=000) */
-       [VCAP_IS2_HK_MAC_ETYPE_ETYPE]           = {142,  16},
-       [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0]     = {158,  16},
-       [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1]     = {174,   8},
-       [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2]     = {182,   3},
-       /* MAC_LLC (TYPE=001) */
-       [VCAP_IS2_HK_MAC_LLC_L2_LLC]            = {142,  40},
-       /* MAC_SNAP (TYPE=010) */
-       [VCAP_IS2_HK_MAC_SNAP_L2_SNAP]          = {142,  40},
-       /* MAC_ARP (TYPE=011) */
-       [VCAP_IS2_HK_MAC_ARP_SMAC]              = { 46,  48},
-       [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK]     = { 94,   1},
-       [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK]    = { 95,   1},
-       [VCAP_IS2_HK_MAC_ARP_LEN_OK]            = { 96,   1},
-       [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH]      = { 97,   1},
-       [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH]      = { 98,   1},
-       [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN]    = { 99,   1},
-       [VCAP_IS2_HK_MAC_ARP_OPCODE]            = {100,   2},
-       [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP]        = {102,  32},
-       [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP]        = {134,  32},
-       [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP]        = {166,   1},
-       /* IP4_TCP_UDP / IP4_OTHER common */
-       [VCAP_IS2_HK_IP4]                       = { 46,   1},
-       [VCAP_IS2_HK_L3_FRAGMENT]               = { 47,   1},
-       [VCAP_IS2_HK_L3_FRAG_OFS_GT0]           = { 48,   1},
-       [VCAP_IS2_HK_L3_OPTIONS]                = { 49,   1},
-       [VCAP_IS2_HK_IP4_L3_TTL_GT0]            = { 50,   1},
-       [VCAP_IS2_HK_L3_TOS]                    = { 51,   8},
-       [VCAP_IS2_HK_L3_IP4_DIP]                = { 59,  32},
-       [VCAP_IS2_HK_L3_IP4_SIP]                = { 91,  32},
-       [VCAP_IS2_HK_DIP_EQ_SIP]                = {123,   1},
-       /* IP4_TCP_UDP (TYPE=100) */
-       [VCAP_IS2_HK_TCP]                       = {124,   1},
-       [VCAP_IS2_HK_L4_SPORT]                  = {125,  16},
-       [VCAP_IS2_HK_L4_DPORT]                  = {141,  16},
-       [VCAP_IS2_HK_L4_RNG]                    = {157,   8},
-       [VCAP_IS2_HK_L4_SPORT_EQ_DPORT]         = {165,   1},
-       [VCAP_IS2_HK_L4_SEQUENCE_EQ0]           = {166,   1},
-       [VCAP_IS2_HK_L4_URG]                    = {167,   1},
-       [VCAP_IS2_HK_L4_ACK]                    = {168,   1},
-       [VCAP_IS2_HK_L4_PSH]                    = {169,   1},
-       [VCAP_IS2_HK_L4_RST]                    = {170,   1},
-       [VCAP_IS2_HK_L4_SYN]                    = {171,   1},
-       [VCAP_IS2_HK_L4_FIN]                    = {172,   1},
-       [VCAP_IS2_HK_L4_1588_DOM]               = {173,   8},
-       [VCAP_IS2_HK_L4_1588_VER]               = {181,   4},
-       /* IP4_OTHER (TYPE=101) */
-       [VCAP_IS2_HK_IP4_L3_PROTO]              = {124,   8},
-       [VCAP_IS2_HK_L3_PAYLOAD]                = {132,  56},
-       /* IP6_STD (TYPE=110) */
-       [VCAP_IS2_HK_IP6_L3_TTL_GT0]            = { 46,   1},
-       [VCAP_IS2_HK_L3_IP6_SIP]                = { 47, 128},
-       [VCAP_IS2_HK_IP6_L3_PROTO]              = {175,   8},
-       /* OAM (TYPE=111) */
-       [VCAP_IS2_HK_OAM_MEL_FLAGS]             = {142,   7},
-       [VCAP_IS2_HK_OAM_VER]                   = {149,   5},
-       [VCAP_IS2_HK_OAM_OPCODE]                = {154,   8},
-       [VCAP_IS2_HK_OAM_FLAGS]                 = {162,   8},
-       [VCAP_IS2_HK_OAM_MEPID]                 = {170,  16},
-       [VCAP_IS2_HK_OAM_CCM_CNTS_EQ0]          = {186,   1},
-       [VCAP_IS2_HK_OAM_IS_Y1731]              = {187,   1},
-};
-
-static const struct vcap_field vsc7514_vcap_is2_actions[] = {
-       [VCAP_IS2_ACT_HIT_ME_ONCE]              = {  0,  1},
-       [VCAP_IS2_ACT_CPU_COPY_ENA]             = {  1,  1},
-       [VCAP_IS2_ACT_CPU_QU_NUM]               = {  2,  3},
-       [VCAP_IS2_ACT_MASK_MODE]                = {  5,  2},
-       [VCAP_IS2_ACT_MIRROR_ENA]               = {  7,  1},
-       [VCAP_IS2_ACT_LRN_DIS]                  = {  8,  1},
-       [VCAP_IS2_ACT_POLICE_ENA]               = {  9,  1},
-       [VCAP_IS2_ACT_POLICE_IDX]               = { 10,  9},
-       [VCAP_IS2_ACT_POLICE_VCAP_ONLY]         = { 19,  1},
-       [VCAP_IS2_ACT_PORT_MASK]                = { 20, 11},
-       [VCAP_IS2_ACT_REW_OP]                   = { 31,  9},
-       [VCAP_IS2_ACT_SMAC_REPLACE_ENA]         = { 40,  1},
-       [VCAP_IS2_ACT_RSV]                      = { 41,  2},
-       [VCAP_IS2_ACT_ACL_ID]                   = { 43,  6},
-       [VCAP_IS2_ACT_HIT_CNT]                  = { 49, 32},
-};
-
-static const struct vcap_props vsc7514_vcap_props[] = {
-       [VCAP_IS2] = {
-               .tg_width = 2,
-               .sw_count = 4,
-               .entry_count = VSC7514_VCAP_IS2_CNT,
-               .entry_width = VSC7514_VCAP_IS2_ENTRY_WIDTH,
-               .action_count = VSC7514_VCAP_IS2_CNT +
-                               VSC7514_VCAP_PORT_CNT + 2,
-               .action_width = 99,
-               .action_type_width = 1,
-               .action_table = {
-                       [IS2_ACTION_TYPE_NORMAL] = {
-                               .width = 49,
-                               .count = 2
-                       },
-                       [IS2_ACTION_TYPE_SMAC_SIP] = {
-                               .width = 6,
-                               .count = 4
-                       },
-               },
-               .counter_words = 4,
-               .counter_width = 32,
-       },
-};
-
-static struct ptp_clock_info ocelot_ptp_clock_info = {
-       .owner          = THIS_MODULE,
-       .name           = "ocelot ptp",
-       .max_adj        = 0x7fffffff,
-       .n_alarm        = 0,
-       .n_ext_ts       = 0,
-       .n_per_out      = OCELOT_PTP_PINS_NUM,
-       .n_pins         = OCELOT_PTP_PINS_NUM,
-       .pps            = 0,
-       .gettime64      = ocelot_ptp_gettime64,
-       .settime64      = ocelot_ptp_settime64,
-       .adjtime        = ocelot_ptp_adjtime,
-       .adjfine        = ocelot_ptp_adjfine,
-       .verify         = ocelot_ptp_verify,
-       .enable         = ocelot_ptp_enable,
-};
-
-static int mscc_ocelot_probe(struct platform_device *pdev)
-{
-       struct device_node *np = pdev->dev.of_node;
-       struct device_node *ports, *portnp;
-       int err, irq_xtr, irq_ptp_rdy;
-       struct ocelot *ocelot;
-       struct regmap *hsio;
-       unsigned int i;
-
-       struct {
-               enum ocelot_target id;
-               char *name;
-               u8 optional:1;
-       } io_target[] = {
-               { SYS, "sys" },
-               { REW, "rew" },
-               { QSYS, "qsys" },
-               { ANA, "ana" },
-               { QS, "qs" },
-               { S2, "s2" },
-               { PTP, "ptp", 1 },
-       };
-
-       if (!np && !pdev->dev.platform_data)
-               return -ENODEV;
-
-       ocelot = devm_kzalloc(&pdev->dev, sizeof(*ocelot), GFP_KERNEL);
-       if (!ocelot)
-               return -ENOMEM;
-
-       platform_set_drvdata(pdev, ocelot);
-       ocelot->dev = &pdev->dev;
-
-       for (i = 0; i < ARRAY_SIZE(io_target); i++) {
-               struct regmap *target;
-               struct resource *res;
-
-               res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-                                                  io_target[i].name);
-
-               target = ocelot_regmap_init(ocelot, res);
-               if (IS_ERR(target)) {
-                       if (io_target[i].optional) {
-                               ocelot->targets[io_target[i].id] = NULL;
-                               continue;
-                       }
-                       return PTR_ERR(target);
-               }
-
-               ocelot->targets[io_target[i].id] = target;
-       }
-
-       hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio");
-       if (IS_ERR(hsio)) {
-               dev_err(&pdev->dev, "missing hsio syscon\n");
-               return PTR_ERR(hsio);
-       }
-
-       ocelot->targets[HSIO] = hsio;
-
-       err = ocelot_chip_init(ocelot, &ocelot_ops);
-       if (err)
-               return err;
-
-       irq_xtr = platform_get_irq_byname(pdev, "xtr");
-       if (irq_xtr < 0)
-               return -ENODEV;
-
-       err = devm_request_threaded_irq(&pdev->dev, irq_xtr, NULL,
-                                       ocelot_xtr_irq_handler, IRQF_ONESHOT,
-                                       "frame extraction", ocelot);
-       if (err)
-               return err;
-
-       irq_ptp_rdy = platform_get_irq_byname(pdev, "ptp_rdy");
-       if (irq_ptp_rdy > 0 && ocelot->targets[PTP]) {
-               err = devm_request_threaded_irq(&pdev->dev, irq_ptp_rdy, NULL,
-                                               ocelot_ptp_rdy_irq_handler,
-                                               IRQF_ONESHOT, "ptp ready",
-                                               ocelot);
-               if (err)
-                       return err;
-
-               /* Both the PTP interrupt and the PTP bank are available */
-               ocelot->ptp = 1;
-       }
-
-       ports = of_get_child_by_name(np, "ethernet-ports");
-       if (!ports) {
-               dev_err(&pdev->dev, "no ethernet-ports child node found\n");
-               return -ENODEV;
-       }
-
-       ocelot->num_phys_ports = of_get_child_count(ports);
-
-       ocelot->ports = devm_kcalloc(&pdev->dev, ocelot->num_phys_ports,
-                                    sizeof(struct ocelot_port *), GFP_KERNEL);
-
-       ocelot->vcap_is2_keys = vsc7514_vcap_is2_keys;
-       ocelot->vcap_is2_actions = vsc7514_vcap_is2_actions;
-       ocelot->vcap = vsc7514_vcap_props;
-
-       ocelot_init(ocelot);
-       if (ocelot->ptp) {
-               err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info);
-               if (err) {
-                       dev_err(ocelot->dev,
-                               "Timestamp initialization failed\n");
-                       ocelot->ptp = 0;
-               }
-       }
-
-       /* No NPI port */
-       ocelot_configure_cpu(ocelot, -1, OCELOT_TAG_PREFIX_NONE,
-                            OCELOT_TAG_PREFIX_NONE);
-
-       for_each_available_child_of_node(ports, portnp) {
-               struct ocelot_port_private *priv;
-               struct ocelot_port *ocelot_port;
-               struct device_node *phy_node;
-               phy_interface_t phy_mode;
-               struct phy_device *phy;
-               struct resource *res;
-               struct phy *serdes;
-               void __iomem *regs;
-               char res_name[8];
-               u32 port;
-
-               if (of_property_read_u32(portnp, "reg", &port))
-                       continue;
-
-               snprintf(res_name, sizeof(res_name), "port%d", port);
-
-               res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-                                                  res_name);
-               regs = devm_ioremap_resource(&pdev->dev, res);
-               if (IS_ERR(regs))
-                       continue;
-
-               phy_node = of_parse_phandle(portnp, "phy-handle", 0);
-               if (!phy_node)
-                       continue;
-
-               phy = of_phy_find_device(phy_node);
-               of_node_put(phy_node);
-               if (!phy)
-                       continue;
-
-               err = ocelot_probe_port(ocelot, port, regs, phy);
-               if (err) {
-                       of_node_put(portnp);
-                       goto out_put_ports;
-               }
-
-               ocelot_port = ocelot->ports[port];
-               priv = container_of(ocelot_port, struct ocelot_port_private,
-                                   port);
-
-               of_get_phy_mode(portnp, &phy_mode);
-
-               ocelot_port->phy_mode = phy_mode;
-
-               switch (ocelot_port->phy_mode) {
-               case PHY_INTERFACE_MODE_NA:
-                       continue;
-               case PHY_INTERFACE_MODE_SGMII:
-                       break;
-               case PHY_INTERFACE_MODE_QSGMII:
-                       /* Ensure clock signals and speed is set on all
-                        * QSGMII links
-                        */
-                       ocelot_port_writel(ocelot_port,
-                                          DEV_CLOCK_CFG_LINK_SPEED
-                                          (OCELOT_SPEED_1000),
-                                          DEV_CLOCK_CFG);
-                       break;
-               default:
-                       dev_err(ocelot->dev,
-                               "invalid phy mode for port%d, (Q)SGMII only\n",
-                               port);
-                       of_node_put(portnp);
-                       err = -EINVAL;
-                       goto out_put_ports;
-               }
-
-               serdes = devm_of_phy_get(ocelot->dev, portnp, NULL);
-               if (IS_ERR(serdes)) {
-                       err = PTR_ERR(serdes);
-                       if (err == -EPROBE_DEFER)
-                               dev_dbg(ocelot->dev, "deferring probe\n");
-                       else
-                               dev_err(ocelot->dev,
-                                       "missing SerDes phys for port%d\n",
-                                       port);
-
-                       of_node_put(portnp);
-                       goto out_put_ports;
-               }
-
-               priv->serdes = serdes;
-       }
-
-       register_netdevice_notifier(&ocelot_netdevice_nb);
-       register_switchdev_notifier(&ocelot_switchdev_nb);
-       register_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
-
-       dev_info(&pdev->dev, "Ocelot switch probed\n");
-
-out_put_ports:
-       of_node_put(ports);
-       return err;
-}
-
-static int mscc_ocelot_remove(struct platform_device *pdev)
-{
-       struct ocelot *ocelot = platform_get_drvdata(pdev);
-
-       ocelot_deinit_timestamp(ocelot);
-       ocelot_deinit(ocelot);
-       unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
-       unregister_switchdev_notifier(&ocelot_switchdev_nb);
-       unregister_netdevice_notifier(&ocelot_netdevice_nb);
-
-       return 0;
-}
-
-static struct platform_driver mscc_ocelot_driver = {
-       .probe = mscc_ocelot_probe,
-       .remove = mscc_ocelot_remove,
-       .driver = {
-               .name = "ocelot-switch",
-               .of_match_table = mscc_ocelot_match,
-       },
-};
-
-module_platform_driver(mscc_ocelot_driver);
-
-MODULE_DESCRIPTION("Microsemi Ocelot switch driver");
-MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@bootlin.com>");
-MODULE_LICENSE("Dual MIT/GPL");
index 5ce172e..ec1b6e2 100644 (file)
@@ -6,13 +6,12 @@
 #include <net/pkt_cls.h>
 #include <net/tc_act/tc_gact.h>
 
-#include "ocelot_ace.h"
+#include "ocelot_vcap.h"
 
 static int ocelot_flower_parse_action(struct flow_cls_offload *f,
-                                     struct ocelot_ace_rule *ace)
+                                     struct ocelot_vcap_filter *filter)
 {
        const struct flow_action_entry *a;
-       s64 burst;
        u64 rate;
        int i;
 
@@ -26,17 +25,16 @@ static int ocelot_flower_parse_action(struct flow_cls_offload *f,
        flow_action_for_each(i, a, &f->rule->action) {
                switch (a->id) {
                case FLOW_ACTION_DROP:
-                       ace->action = OCELOT_ACL_ACTION_DROP;
+                       filter->action = OCELOT_VCAP_ACTION_DROP;
                        break;
                case FLOW_ACTION_TRAP:
-                       ace->action = OCELOT_ACL_ACTION_TRAP;
+                       filter->action = OCELOT_VCAP_ACTION_TRAP;
                        break;
                case FLOW_ACTION_POLICE:
-                       ace->action = OCELOT_ACL_ACTION_POLICE;
+                       filter->action = OCELOT_VCAP_ACTION_POLICE;
                        rate = a->police.rate_bytes_ps;
-                       ace->pol.rate = div_u64(rate, 1000) * 8;
-                       burst = rate * PSCHED_NS2TICKS(a->police.burst);
-                       ace->pol.burst = div_u64(burst, PSCHED_TICKS_PER_SEC);
+                       filter->pol.rate = div_u64(rate, 1000) * 8;
+                       filter->pol.burst = a->police.burst;
                        break;
                default:
                        return -EOPNOTSUPP;
@@ -47,7 +45,7 @@ static int ocelot_flower_parse_action(struct flow_cls_offload *f,
 }
 
 static int ocelot_flower_parse(struct flow_cls_offload *f,
-                              struct ocelot_ace_rule *ace)
+                              struct ocelot_vcap_filter *filter)
 {
        struct flow_rule *rule = flow_cls_offload_flow_rule(f);
        struct flow_dissector *dissector = rule->match.dissector;
@@ -88,14 +86,14 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
                        return -EOPNOTSUPP;
 
                flow_rule_match_eth_addrs(rule, &match);
-               ace->type = OCELOT_ACE_TYPE_ETYPE;
-               ether_addr_copy(ace->frame.etype.dmac.value,
+               filter->key_type = OCELOT_VCAP_KEY_ETYPE;
+               ether_addr_copy(filter->key.etype.dmac.value,
                                match.key->dst);
-               ether_addr_copy(ace->frame.etype.smac.value,
+               ether_addr_copy(filter->key.etype.smac.value,
                                match.key->src);
-               ether_addr_copy(ace->frame.etype.dmac.mask,
+               ether_addr_copy(filter->key.etype.dmac.mask,
                                match.mask->dst);
-               ether_addr_copy(ace->frame.etype.smac.mask,
+               ether_addr_copy(filter->key.etype.smac.mask,
                                match.mask->src);
                goto finished_key_parsing;
        }
@@ -105,18 +103,18 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
 
                flow_rule_match_basic(rule, &match);
                if (ntohs(match.key->n_proto) == ETH_P_IP) {
-                       ace->type = OCELOT_ACE_TYPE_IPV4;
-                       ace->frame.ipv4.proto.value[0] =
+                       filter->key_type = OCELOT_VCAP_KEY_IPV4;
+                       filter->key.ipv4.proto.value[0] =
                                match.key->ip_proto;
-                       ace->frame.ipv4.proto.mask[0] =
+                       filter->key.ipv4.proto.mask[0] =
                                match.mask->ip_proto;
                        match_protocol = false;
                }
                if (ntohs(match.key->n_proto) == ETH_P_IPV6) {
-                       ace->type = OCELOT_ACE_TYPE_IPV6;
-                       ace->frame.ipv6.proto.value[0] =
+                       filter->key_type = OCELOT_VCAP_KEY_IPV6;
+                       filter->key.ipv6.proto.value[0] =
                                match.key->ip_proto;
-                       ace->frame.ipv6.proto.mask[0] =
+                       filter->key.ipv6.proto.mask[0] =
                                match.mask->ip_proto;
                        match_protocol = false;
                }
@@ -128,16 +126,16 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
                u8 *tmp;
 
                flow_rule_match_ipv4_addrs(rule, &match);
-               tmp = &ace->frame.ipv4.sip.value.addr[0];
+               tmp = &filter->key.ipv4.sip.value.addr[0];
                memcpy(tmp, &match.key->src, 4);
 
-               tmp = &ace->frame.ipv4.sip.mask.addr[0];
+               tmp = &filter->key.ipv4.sip.mask.addr[0];
                memcpy(tmp, &match.mask->src, 4);
 
-               tmp = &ace->frame.ipv4.dip.value.addr[0];
+               tmp = &filter->key.ipv4.dip.value.addr[0];
                memcpy(tmp, &match.key->dst, 4);
 
-               tmp = &ace->frame.ipv4.dip.mask.addr[0];
+               tmp = &filter->key.ipv4.dip.mask.addr[0];
                memcpy(tmp, &match.mask->dst, 4);
                match_protocol = false;
        }
@@ -151,10 +149,10 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
                struct flow_match_ports match;
 
                flow_rule_match_ports(rule, &match);
-               ace->frame.ipv4.sport.value = ntohs(match.key->src);
-               ace->frame.ipv4.sport.mask = ntohs(match.mask->src);
-               ace->frame.ipv4.dport.value = ntohs(match.key->dst);
-               ace->frame.ipv4.dport.mask = ntohs(match.mask->dst);
+               filter->key.ipv4.sport.value = ntohs(match.key->src);
+               filter->key.ipv4.sport.mask = ntohs(match.mask->src);
+               filter->key.ipv4.dport.value = ntohs(match.key->dst);
+               filter->key.ipv4.dport.mask = ntohs(match.mask->dst);
                match_protocol = false;
        }
 
@@ -162,11 +160,11 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
                struct flow_match_vlan match;
 
                flow_rule_match_vlan(rule, &match);
-               ace->type = OCELOT_ACE_TYPE_ANY;
-               ace->vlan.vid.value = match.key->vlan_id;
-               ace->vlan.vid.mask = match.mask->vlan_id;
-               ace->vlan.pcp.value[0] = match.key->vlan_priority;
-               ace->vlan.pcp.mask[0] = match.mask->vlan_priority;
+               filter->key_type = OCELOT_VCAP_KEY_ANY;
+               filter->vlan.vid.value = match.key->vlan_id;
+               filter->vlan.vid.mask = match.mask->vlan_id;
+               filter->vlan.pcp.value[0] = match.key->vlan_priority;
+               filter->vlan.pcp.mask[0] = match.mask->vlan_priority;
                match_protocol = false;
        }
 
@@ -175,99 +173,77 @@ finished_key_parsing:
                /* TODO: support SNAP, LLC etc */
                if (proto < ETH_P_802_3_MIN)
                        return -EOPNOTSUPP;
-               ace->type = OCELOT_ACE_TYPE_ETYPE;
-               *(u16 *)ace->frame.etype.etype.value = htons(proto);
-               *(u16 *)ace->frame.etype.etype.mask = 0xffff;
+               filter->key_type = OCELOT_VCAP_KEY_ETYPE;
+               *(__be16 *)filter->key.etype.etype.value = htons(proto);
+               *(__be16 *)filter->key.etype.etype.mask = htons(0xffff);
        }
-       /* else, a rule of type OCELOT_ACE_TYPE_ANY is implicitly added */
+       /* else, a filter of type OCELOT_VCAP_KEY_ANY is implicitly added */
 
-       ace->prio = f->common.prio;
-       ace->id = f->cookie;
-       return ocelot_flower_parse_action(f, ace);
+       filter->prio = f->common.prio;
+       filter->id = f->cookie;
+       return ocelot_flower_parse_action(f, filter);
 }
 
-static
-struct ocelot_ace_rule *ocelot_ace_rule_create(struct ocelot *ocelot, int port,
-                                              struct flow_cls_offload *f)
+static struct ocelot_vcap_filter
+*ocelot_vcap_filter_create(struct ocelot *ocelot, int port,
+                        struct flow_cls_offload *f)
 {
-       struct ocelot_ace_rule *ace;
+       struct ocelot_vcap_filter *filter;
 
-       ace = kzalloc(sizeof(*ace), GFP_KERNEL);
-       if (!ace)
+       filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+       if (!filter)
                return NULL;
 
-       ace->ingress_port_mask = BIT(port);
-       return ace;
+       filter->ingress_port_mask = BIT(port);
+       return filter;
 }
 
 int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
                              struct flow_cls_offload *f, bool ingress)
 {
-       struct ocelot_ace_rule *ace;
+       struct ocelot_vcap_filter *filter;
        int ret;
 
-       ace = ocelot_ace_rule_create(ocelot, port, f);
-       if (!ace)
+       filter = ocelot_vcap_filter_create(ocelot, port, f);
+       if (!filter)
                return -ENOMEM;
 
-       ret = ocelot_flower_parse(f, ace);
+       ret = ocelot_flower_parse(f, filter);
        if (ret) {
-               kfree(ace);
+               kfree(filter);
                return ret;
        }
 
-       return ocelot_ace_rule_offload_add(ocelot, ace, f->common.extack);
+       return ocelot_vcap_filter_add(ocelot, filter, f->common.extack);
 }
 EXPORT_SYMBOL_GPL(ocelot_cls_flower_replace);
 
 int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,
                              struct flow_cls_offload *f, bool ingress)
 {
-       struct ocelot_ace_rule ace;
+       struct ocelot_vcap_filter filter;
 
-       ace.prio = f->common.prio;
-       ace.id = f->cookie;
+       filter.prio = f->common.prio;
+       filter.id = f->cookie;
 
-       return ocelot_ace_rule_offload_del(ocelot, &ace);
+       return ocelot_vcap_filter_del(ocelot, &filter);
 }
 EXPORT_SYMBOL_GPL(ocelot_cls_flower_destroy);
 
 int ocelot_cls_flower_stats(struct ocelot *ocelot, int port,
                            struct flow_cls_offload *f, bool ingress)
 {
-       struct ocelot_ace_rule ace;
+       struct ocelot_vcap_filter filter;
        int ret;
 
-       ace.prio = f->common.prio;
-       ace.id = f->cookie;
-       ret = ocelot_ace_rule_stats_update(ocelot, &ace);
+       filter.prio = f->common.prio;
+       filter.id = f->cookie;
+       ret = ocelot_vcap_filter_stats_update(ocelot, &filter);
        if (ret)
                return ret;
 
-       flow_stats_update(&f->stats, 0x0, ace.stats.pkts, 0x0,
+       flow_stats_update(&f->stats, 0x0, filter.stats.pkts, 0, 0x0,
                          FLOW_ACTION_HW_STATS_IMMEDIATE);
        return 0;
 }
 EXPORT_SYMBOL_GPL(ocelot_cls_flower_stats);
-
-int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
-                              struct flow_cls_offload *f,
-                              bool ingress)
-{
-       struct ocelot *ocelot = priv->port.ocelot;
-       int port = priv->chip_port;
-
-       if (!ingress)
-               return -EOPNOTSUPP;
-
-       switch (f->command) {
-       case FLOW_CLS_REPLACE:
-               return ocelot_cls_flower_replace(ocelot, port, f, ingress);
-       case FLOW_CLS_DESTROY:
-               return ocelot_cls_flower_destroy(ocelot, port, f, ingress);
-       case FLOW_CLS_STATS:
-               return ocelot_cls_flower_stats(ocelot, port, f, ingress);
-       default:
-               return -EOPNOTSUPP;
-       }
-}
index b229b1c..d227112 100644 (file)
@@ -49,13 +49,25 @@ EXPORT_SYMBOL(__ocelot_rmw_ix);
 
 u32 ocelot_port_readl(struct ocelot_port *port, u32 reg)
 {
-       return readl(port->regs + reg);
+       struct ocelot *ocelot = port->ocelot;
+       u16 target = reg >> TARGET_OFFSET;
+       u32 val;
+
+       WARN_ON(!target);
+
+       regmap_read(port->target, ocelot->map[target][reg & REG_MASK], &val);
+       return val;
 }
 EXPORT_SYMBOL(ocelot_port_readl);
 
 void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
 {
-       writel(val, port->regs + reg);
+       struct ocelot *ocelot = port->ocelot;
+       u16 target = reg >> TARGET_OFFSET;
+
+       WARN_ON(!target);
+
+       regmap_write(port->target, ocelot->map[target][reg & REG_MASK], val);
 }
 EXPORT_SYMBOL(ocelot_port_writel);
 
@@ -77,6 +89,8 @@ int ocelot_regfields_init(struct ocelot *ocelot,
                regfield.reg = ocelot->map[target][reg & REG_MASK];
                regfield.lsb = regfields[i].lsb;
                regfield.msb = regfields[i].msb;
+               regfield.id_size = regfields[i].id_size;
+               regfield.id_offset = regfields[i].id_offset;
 
                ocelot->regfields[i] =
                devm_regmap_field_alloc(ocelot->dev,
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
new file mode 100644 (file)
index 0000000..0668d23
--- /dev/null
@@ -0,0 +1,1050 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017, 2019 Microsemi Corporation
+ */
+
+#include <linux/if_bridge.h>
+#include "ocelot.h"
+#include "ocelot_vcap.h"
+
+int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
+                              struct flow_cls_offload *f,
+                              bool ingress)
+{
+       struct ocelot *ocelot = priv->port.ocelot;
+       int port = priv->chip_port;
+
+       if (!ingress)
+               return -EOPNOTSUPP;
+
+       switch (f->command) {
+       case FLOW_CLS_REPLACE:
+               return ocelot_cls_flower_replace(ocelot, port, f, ingress);
+       case FLOW_CLS_DESTROY:
+               return ocelot_cls_flower_destroy(ocelot, port, f, ingress);
+       case FLOW_CLS_STATS:
+               return ocelot_cls_flower_stats(ocelot, port, f, ingress);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
+                                       struct tc_cls_matchall_offload *f,
+                                       bool ingress)
+{
+       struct netlink_ext_ack *extack = f->common.extack;
+       struct ocelot *ocelot = priv->port.ocelot;
+       struct ocelot_policer pol = { 0 };
+       struct flow_action_entry *action;
+       int port = priv->chip_port;
+       int err;
+
+       if (!ingress) {
+               NL_SET_ERR_MSG_MOD(extack, "Only ingress is supported");
+               return -EOPNOTSUPP;
+       }
+
+       switch (f->command) {
+       case TC_CLSMATCHALL_REPLACE:
+               if (!flow_offload_has_one_action(&f->rule->action)) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Only one action is supported");
+                       return -EOPNOTSUPP;
+               }
+
+               if (priv->tc.block_shared) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Rate limit is not supported on shared blocks");
+                       return -EOPNOTSUPP;
+               }
+
+               action = &f->rule->action.entries[0];
+
+               if (action->id != FLOW_ACTION_POLICE) {
+                       NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
+                       return -EOPNOTSUPP;
+               }
+
+               if (priv->tc.police_id && priv->tc.police_id != f->cookie) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Only one policer per port is supported");
+                       return -EEXIST;
+               }
+
+               pol.rate = (u32)div_u64(action->police.rate_bytes_ps, 1000) * 8;
+               pol.burst = action->police.burst;
+
+               err = ocelot_port_policer_add(ocelot, port, &pol);
+               if (err) {
+                       NL_SET_ERR_MSG_MOD(extack, "Could not add policer");
+                       return err;
+               }
+
+               priv->tc.police_id = f->cookie;
+               priv->tc.offload_cnt++;
+               return 0;
+       case TC_CLSMATCHALL_DESTROY:
+               if (priv->tc.police_id != f->cookie)
+                       return -ENOENT;
+
+               err = ocelot_port_policer_del(ocelot, port);
+               if (err) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Could not delete policer");
+                       return err;
+               }
+               priv->tc.police_id = 0;
+               priv->tc.offload_cnt--;
+               return 0;
+       case TC_CLSMATCHALL_STATS:
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int ocelot_setup_tc_block_cb(enum tc_setup_type type,
+                                   void *type_data,
+                                   void *cb_priv, bool ingress)
+{
+       struct ocelot_port_private *priv = cb_priv;
+
+       if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
+               return -EOPNOTSUPP;
+
+       switch (type) {
+       case TC_SETUP_CLSMATCHALL:
+               return ocelot_setup_tc_cls_matchall(priv, type_data, ingress);
+       case TC_SETUP_CLSFLOWER:
+               return ocelot_setup_tc_cls_flower(priv, type_data, ingress);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int ocelot_setup_tc_block_cb_ig(enum tc_setup_type type,
+                                      void *type_data,
+                                      void *cb_priv)
+{
+       return ocelot_setup_tc_block_cb(type, type_data,
+                                       cb_priv, true);
+}
+
+static int ocelot_setup_tc_block_cb_eg(enum tc_setup_type type,
+                                      void *type_data,
+                                      void *cb_priv)
+{
+       return ocelot_setup_tc_block_cb(type, type_data,
+                                       cb_priv, false);
+}
+
+static LIST_HEAD(ocelot_block_cb_list);
+
+static int ocelot_setup_tc_block(struct ocelot_port_private *priv,
+                                struct flow_block_offload *f)
+{
+       struct flow_block_cb *block_cb;
+       flow_setup_cb_t *cb;
+
+       if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
+               cb = ocelot_setup_tc_block_cb_ig;
+               priv->tc.block_shared = f->block_shared;
+       } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
+               cb = ocelot_setup_tc_block_cb_eg;
+       } else {
+               return -EOPNOTSUPP;
+       }
+
+       f->driver_block_list = &ocelot_block_cb_list;
+
+       switch (f->command) {
+       case FLOW_BLOCK_BIND:
+               if (flow_block_cb_is_busy(cb, priv, &ocelot_block_cb_list))
+                       return -EBUSY;
+
+               block_cb = flow_block_cb_alloc(cb, priv, priv, NULL);
+               if (IS_ERR(block_cb))
+                       return PTR_ERR(block_cb);
+
+               flow_block_cb_add(block_cb, f);
+               list_add_tail(&block_cb->driver_list, f->driver_block_list);
+               return 0;
+       case FLOW_BLOCK_UNBIND:
+               block_cb = flow_block_cb_lookup(f->block, cb, priv);
+               if (!block_cb)
+                       return -ENOENT;
+
+               flow_block_cb_remove(block_cb, f);
+               list_del(&block_cb->driver_list);
+               return 0;
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int ocelot_setup_tc(struct net_device *dev, enum tc_setup_type type,
+                          void *type_data)
+{
+       struct ocelot_port_private *priv = netdev_priv(dev);
+
+       switch (type) {
+       case TC_SETUP_BLOCK:
+               return ocelot_setup_tc_block(priv, type_data);
+       default:
+               return -EOPNOTSUPP;
+       }
+       return 0;
+}
+
+static void ocelot_port_adjust_link(struct net_device *dev)
+{
+       struct ocelot_port_private *priv = netdev_priv(dev);
+       struct ocelot *ocelot = priv->port.ocelot;
+       int port = priv->chip_port;
+
+       ocelot_adjust_link(ocelot, port, dev->phydev);
+}
+
+static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid,
+                              bool untagged)
+{
+       struct ocelot_port_private *priv = netdev_priv(dev);
+       struct ocelot_port *ocelot_port = &priv->port;
+       struct ocelot *ocelot = ocelot_port->ocelot;
+       int port = priv->chip_port;
+       int ret;
+
+       ret = ocelot_vlan_add(ocelot, port, vid, pvid, untagged);
+       if (ret)
+               return ret;
+
+       /* Add the port MAC address to with the right VLAN information */
+       ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, vid,
+                         ENTRYTYPE_LOCKED);
+
+       return 0;
+}
+
+static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid)
+{
+       struct ocelot_port_private *priv = netdev_priv(dev);
+       struct ocelot *ocelot = priv->port.ocelot;
+       int port = priv->chip_port;
+       int ret;
+
+       /* 8021q removes VID 0 on module unload for all interfaces
+        * with VLAN filtering feature. We need to keep it to receive
+        * untagged traffic.
+        */
+       if (vid == 0)
+               return 0;
+
+       ret = ocelot_vlan_del(ocelot, port, vid);
+       if (ret)
+               return ret;
+
+       /* Del the port MAC address to with the right VLAN information */
+       ocelot_mact_forget(ocelot, dev->dev_addr, vid);
+
+       return 0;
+}
+
+static int ocelot_port_open(struct net_device *dev)
+{
+       struct ocelot_port_private *priv = netdev_priv(dev);
+       struct ocelot_port *ocelot_port = &priv->port;
+       struct ocelot *ocelot = ocelot_port->ocelot;
+       int port = priv->chip_port;
+       int err;
+
+       if (priv->serdes) {
+               err = phy_set_mode_ext(priv->serdes, PHY_MODE_ETHERNET,
+                                      ocelot_port->phy_mode);
+               if (err) {
+                       netdev_err(dev, "Could not set mode of SerDes\n");
+                       return err;
+               }
+       }
+
+       err = phy_connect_direct(dev, priv->phy, &ocelot_port_adjust_link,
+                                ocelot_port->phy_mode);
+       if (err) {
+               netdev_err(dev, "Could not attach to PHY\n");
+               return err;
+       }
+
+       dev->phydev = priv->phy;
+
+       phy_attached_info(priv->phy);
+       phy_start(priv->phy);
+
+       ocelot_port_enable(ocelot, port, priv->phy);
+
+       return 0;
+}
+
+static int ocelot_port_stop(struct net_device *dev)
+{
+       struct ocelot_port_private *priv = netdev_priv(dev);
+       struct ocelot *ocelot = priv->port.ocelot;
+       int port = priv->chip_port;
+
+       phy_disconnect(priv->phy);
+
+       dev->phydev = NULL;
+
+       ocelot_port_disable(ocelot, port);
+
+       return 0;
+}
+
+/* Generate the IFH for frame injection
+ *
+ * The IFH is a 128bit-value
+ * bit 127: bypass the analyzer processing
+ * bit 56-67: destination mask
+ * bit 28-29: pop_cnt: 3 disables all rewriting of the frame
+ * bit 20-27: cpu extraction queue mask
+ * bit 16: tag type 0: C-tag, 1: S-tag
+ * bit 0-11: VID
+ */
+static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info)
+{
+       ifh[0] = IFH_INJ_BYPASS | ((0x1ff & info->rew_op) << 21);
+       ifh[1] = (0xf00 & info->port) >> 8;
+       ifh[2] = (0xff & info->port) << 24;
+       ifh[3] = (info->tag_type << 16) | info->vid;
+
+       return 0;
+}
+
+static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct ocelot_port_private *priv = netdev_priv(dev);
+       struct skb_shared_info *shinfo = skb_shinfo(skb);
+       struct ocelot_port *ocelot_port = &priv->port;
+       struct ocelot *ocelot = ocelot_port->ocelot;
+       u32 val, ifh[OCELOT_TAG_LEN / 4];
+       struct frame_info info = {};
+       u8 grp = 0; /* Send everything on CPU group 0 */
+       unsigned int i, count, last;
+       int port = priv->chip_port;
+
+       val = ocelot_read(ocelot, QS_INJ_STATUS);
+       if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))) ||
+           (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp))))
+               return NETDEV_TX_BUSY;
+
+       ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
+                        QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
+
+       info.port = BIT(port);
+       info.tag_type = IFH_TAG_TYPE_C;
+       info.vid = skb_vlan_tag_get(skb);
+
+       /* Check if timestamping is needed */
+       if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP) {
+               info.rew_op = ocelot_port->ptp_cmd;
+               if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP)
+                       info.rew_op |= (ocelot_port->ts_id  % 4) << 3;
+       }
+
+       ocelot_gen_ifh(ifh, &info);
+
+       for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
+               ocelot_write_rix(ocelot, (__force u32)cpu_to_be32(ifh[i]),
+                                QS_INJ_WR, grp);
+
+       count = (skb->len + 3) / 4;
+       last = skb->len % 4;
+       for (i = 0; i < count; i++)
+               ocelot_write_rix(ocelot, ((u32 *)skb->data)[i], QS_INJ_WR, grp);
+
+       /* Add padding */
+       while (i < (OCELOT_BUFFER_CELL_SZ / 4)) {
+               ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
+               i++;
+       }
+
+       /* Indicate EOF and valid bytes in last word */
+       ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
+                        QS_INJ_CTRL_VLD_BYTES(skb->len < OCELOT_BUFFER_CELL_SZ ? 0 : last) |
+                        QS_INJ_CTRL_EOF,
+                        QS_INJ_CTRL, grp);
+
+       /* Add dummy CRC */
+       ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
+       skb_tx_timestamp(skb);
+
+       dev->stats.tx_packets++;
+       dev->stats.tx_bytes += skb->len;
+
+       if (!ocelot_port_add_txtstamp_skb(ocelot_port, skb)) {
+               ocelot_port->ts_id++;
+               return NETDEV_TX_OK;
+       }
+
+       dev_kfree_skb_any(skb);
+       return NETDEV_TX_OK;
+}
+
+static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr)
+{
+       struct ocelot_port_private *priv = netdev_priv(dev);
+       struct ocelot_port *ocelot_port = &priv->port;
+       struct ocelot *ocelot = ocelot_port->ocelot;
+
+       return ocelot_mact_forget(ocelot, addr, ocelot_port->pvid);
+}
+
+static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr)
+{
+       struct ocelot_port_private *priv = netdev_priv(dev);
+       struct ocelot_port *ocelot_port = &priv->port;
+       struct ocelot *ocelot = ocelot_port->ocelot;
+
+       return ocelot_mact_learn(ocelot, PGID_CPU, addr, ocelot_port->pvid,
+                                ENTRYTYPE_LOCKED);
+}
+
+static void ocelot_set_rx_mode(struct net_device *dev)
+{
+       struct ocelot_port_private *priv = netdev_priv(dev);
+       struct ocelot *ocelot = priv->port.ocelot;
+       u32 val;
+       int i;
+
+       /* This doesn't handle promiscuous mode because the bridge core is
+        * setting IFF_PROMISC on all slave interfaces and all frames would be
+        * forwarded to the CPU port.
+        */
+       val = GENMASK(ocelot->num_phys_ports - 1, 0);
+       for_each_nonreserved_multicast_dest_pgid(ocelot, i)
+               ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i);
+
+       __dev_mc_sync(dev, ocelot_mc_sync, ocelot_mc_unsync);
+}
+
+static int ocelot_port_get_phys_port_name(struct net_device *dev,
+                                         char *buf, size_t len)
+{
+       struct ocelot_port_private *priv = netdev_priv(dev);
+       int port = priv->chip_port;
+       int ret;
+
+       ret = snprintf(buf, len, "p%d", port);
+       if (ret >= len)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
+{
+       struct ocelot_port_private *priv = netdev_priv(dev);
+       struct ocelot_port *ocelot_port = &priv->port;
+       struct ocelot *ocelot = ocelot_port->ocelot;
+       const struct sockaddr *addr = p;
+
+       /* Learn the new net device MAC address in the mac table. */
+       ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data, ocelot_port->pvid,
+                         ENTRYTYPE_LOCKED);
+       /* Then forget the previous one. */
+       ocelot_mact_forget(ocelot, dev->dev_addr, ocelot_port->pvid);
+
+       ether_addr_copy(dev->dev_addr, addr->sa_data);
+       return 0;
+}
+
+static void ocelot_get_stats64(struct net_device *dev,
+                              struct rtnl_link_stats64 *stats)
+{
+       struct ocelot_port_private *priv = netdev_priv(dev);
+       struct ocelot *ocelot = priv->port.ocelot;
+       int port = priv->chip_port;
+
+       /* Configure the port to read the stats from */
+       ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port),
+                    SYS_STAT_CFG);
+
+       /* Get Rx stats */
+       stats->rx_bytes = ocelot_read(ocelot, SYS_COUNT_RX_OCTETS);
+       stats->rx_packets = ocelot_read(ocelot, SYS_COUNT_RX_SHORTS) +
+                           ocelot_read(ocelot, SYS_COUNT_RX_FRAGMENTS) +
+                           ocelot_read(ocelot, SYS_COUNT_RX_JABBERS) +
+                           ocelot_read(ocelot, SYS_COUNT_RX_LONGS) +
+                           ocelot_read(ocelot, SYS_COUNT_RX_64) +
+                           ocelot_read(ocelot, SYS_COUNT_RX_65_127) +
+                           ocelot_read(ocelot, SYS_COUNT_RX_128_255) +
+                           ocelot_read(ocelot, SYS_COUNT_RX_256_1023) +
+                           ocelot_read(ocelot, SYS_COUNT_RX_1024_1526) +
+                           ocelot_read(ocelot, SYS_COUNT_RX_1527_MAX);
+       stats->multicast = ocelot_read(ocelot, SYS_COUNT_RX_MULTICAST);
+       stats->rx_dropped = dev->stats.rx_dropped;
+
+       /* Get Tx stats */
+       stats->tx_bytes = ocelot_read(ocelot, SYS_COUNT_TX_OCTETS);
+       stats->tx_packets = ocelot_read(ocelot, SYS_COUNT_TX_64) +
+                           ocelot_read(ocelot, SYS_COUNT_TX_65_127) +
+                           ocelot_read(ocelot, SYS_COUNT_TX_128_511) +
+                           ocelot_read(ocelot, SYS_COUNT_TX_512_1023) +
+                           ocelot_read(ocelot, SYS_COUNT_TX_1024_1526) +
+                           ocelot_read(ocelot, SYS_COUNT_TX_1527_MAX);
+       stats->tx_dropped = ocelot_read(ocelot, SYS_COUNT_TX_DROPS) +
+                           ocelot_read(ocelot, SYS_COUNT_TX_AGING);
+       stats->collisions = ocelot_read(ocelot, SYS_COUNT_TX_COLLISION);
+}
+
+static int ocelot_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+                              struct net_device *dev,
+                              const unsigned char *addr,
+                              u16 vid, u16 flags,
+                              struct netlink_ext_ack *extack)
+{
+       struct ocelot_port_private *priv = netdev_priv(dev);
+       struct ocelot *ocelot = priv->port.ocelot;
+       int port = priv->chip_port;
+
+       return ocelot_fdb_add(ocelot, port, addr, vid);
+}
+
+static int ocelot_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+                              struct net_device *dev,
+                              const unsigned char *addr, u16 vid)
+{
+       struct ocelot_port_private *priv = netdev_priv(dev);
+       struct ocelot *ocelot = priv->port.ocelot;
+       int port = priv->chip_port;
+
+       return ocelot_fdb_del(ocelot, port, addr, vid);
+}
+
+static int ocelot_port_fdb_dump(struct sk_buff *skb,
+                               struct netlink_callback *cb,
+                               struct net_device *dev,
+                               struct net_device *filter_dev, int *idx)
+{
+       struct ocelot_port_private *priv = netdev_priv(dev);
+       struct ocelot *ocelot = priv->port.ocelot;
+       struct ocelot_dump_ctx dump = {
+               .dev = dev,
+               .skb = skb,
+               .cb = cb,
+               .idx = *idx,
+       };
+       int port = priv->chip_port;
+       int ret;
+
+       ret = ocelot_fdb_dump(ocelot, port, ocelot_port_fdb_do_dump, &dump);
+
+       *idx = dump.idx;
+
+       return ret;
+}
+
+static int ocelot_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
+                                 u16 vid)
+{
+       return ocelot_vlan_vid_add(dev, vid, false, false);
+}
+
+static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
+                                  u16 vid)
+{
+       return ocelot_vlan_vid_del(dev, vid);
+}
+
+static void ocelot_vlan_mode(struct ocelot *ocelot, int port,
+                            netdev_features_t features)
+{
+       u32 val;
+
+       /* Filtering */
+       val = ocelot_read(ocelot, ANA_VLANMASK);
+       if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+               val |= BIT(port);
+       else
+               val &= ~BIT(port);
+       ocelot_write(ocelot, val, ANA_VLANMASK);
+}
+
+static int ocelot_set_features(struct net_device *dev,
+                              netdev_features_t features)
+{
+       netdev_features_t changed = dev->features ^ features;
+       struct ocelot_port_private *priv = netdev_priv(dev);
+       struct ocelot *ocelot = priv->port.ocelot;
+       int port = priv->chip_port;
+
+       if ((dev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) &&
+           priv->tc.offload_cnt) {
+               netdev_err(dev,
+                          "Cannot disable HW TC offload while offloads active\n");
+               return -EBUSY;
+       }
+
+       if (changed & NETIF_F_HW_VLAN_CTAG_FILTER)
+               ocelot_vlan_mode(ocelot, port, features);
+
+       return 0;
+}
+
+static int ocelot_get_port_parent_id(struct net_device *dev,
+                                    struct netdev_phys_item_id *ppid)
+{
+       struct ocelot_port_private *priv = netdev_priv(dev);
+       struct ocelot *ocelot = priv->port.ocelot;
+
+       ppid->id_len = sizeof(ocelot->base_mac);
+       memcpy(&ppid->id, &ocelot->base_mac, ppid->id_len);
+
+       return 0;
+}
+
+static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+       struct ocelot_port_private *priv = netdev_priv(dev);
+       struct ocelot *ocelot = priv->port.ocelot;
+       int port = priv->chip_port;
+
+       /* If the attached PHY device isn't capable of timestamping operations,
+        * use our own (when possible).
+        */
+       if (!phy_has_hwtstamp(dev->phydev) && ocelot->ptp) {
+               switch (cmd) {
+               case SIOCSHWTSTAMP:
+                       return ocelot_hwstamp_set(ocelot, port, ifr);
+               case SIOCGHWTSTAMP:
+                       return ocelot_hwstamp_get(ocelot, port, ifr);
+               }
+       }
+
+       return phy_mii_ioctl(dev->phydev, ifr, cmd);
+}
+
+static const struct net_device_ops ocelot_port_netdev_ops = {
+       .ndo_open                       = ocelot_port_open,
+       .ndo_stop                       = ocelot_port_stop,
+       .ndo_start_xmit                 = ocelot_port_xmit,
+       .ndo_set_rx_mode                = ocelot_set_rx_mode,
+       .ndo_get_phys_port_name         = ocelot_port_get_phys_port_name,
+       .ndo_set_mac_address            = ocelot_port_set_mac_address,
+       .ndo_get_stats64                = ocelot_get_stats64,
+       .ndo_fdb_add                    = ocelot_port_fdb_add,
+       .ndo_fdb_del                    = ocelot_port_fdb_del,
+       .ndo_fdb_dump                   = ocelot_port_fdb_dump,
+       .ndo_vlan_rx_add_vid            = ocelot_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid           = ocelot_vlan_rx_kill_vid,
+       .ndo_set_features               = ocelot_set_features,
+       .ndo_get_port_parent_id         = ocelot_get_port_parent_id,
+       .ndo_setup_tc                   = ocelot_setup_tc,
+       .ndo_do_ioctl                   = ocelot_ioctl,
+};
+
+static void ocelot_port_get_strings(struct net_device *netdev, u32 sset,
+                                   u8 *data)
+{
+       struct ocelot_port_private *priv = netdev_priv(netdev);
+       struct ocelot *ocelot = priv->port.ocelot;
+       int port = priv->chip_port;
+
+       ocelot_get_strings(ocelot, port, sset, data);
+}
+
+static void ocelot_port_get_ethtool_stats(struct net_device *dev,
+                                         struct ethtool_stats *stats,
+                                         u64 *data)
+{
+       struct ocelot_port_private *priv = netdev_priv(dev);
+       struct ocelot *ocelot = priv->port.ocelot;
+       int port = priv->chip_port;
+
+       ocelot_get_ethtool_stats(ocelot, port, data);
+}
+
+static int ocelot_port_get_sset_count(struct net_device *dev, int sset)
+{
+       struct ocelot_port_private *priv = netdev_priv(dev);
+       struct ocelot *ocelot = priv->port.ocelot;
+       int port = priv->chip_port;
+
+       return ocelot_get_sset_count(ocelot, port, sset);
+}
+
+static int ocelot_port_get_ts_info(struct net_device *dev,
+                                  struct ethtool_ts_info *info)
+{
+       struct ocelot_port_private *priv = netdev_priv(dev);
+       struct ocelot *ocelot = priv->port.ocelot;
+       int port = priv->chip_port;
+
+       if (!ocelot->ptp)
+               return ethtool_op_get_ts_info(dev, info);
+
+       return ocelot_get_ts_info(ocelot, port, info);
+}
+
+static const struct ethtool_ops ocelot_ethtool_ops = {
+       .get_strings            = ocelot_port_get_strings,
+       .get_ethtool_stats      = ocelot_port_get_ethtool_stats,
+       .get_sset_count         = ocelot_port_get_sset_count,
+       .get_link_ksettings     = phy_ethtool_get_link_ksettings,
+       .set_link_ksettings     = phy_ethtool_set_link_ksettings,
+       .get_ts_info            = ocelot_port_get_ts_info,
+};
+
+static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port,
+                                          struct switchdev_trans *trans,
+                                          u8 state)
+{
+       if (switchdev_trans_ph_prepare(trans))
+               return;
+
+       ocelot_bridge_stp_state_set(ocelot, port, state);
+}
+
+static void ocelot_port_attr_ageing_set(struct ocelot *ocelot, int port,
+                                       unsigned long ageing_clock_t)
+{
+       unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+       u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
+
+       ocelot_set_ageing_time(ocelot, ageing_time);
+}
+
+static void ocelot_port_attr_mc_set(struct ocelot *ocelot, int port, bool mc)
+{
+       u32 cpu_fwd_mcast = ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA |
+                           ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA |
+                           ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA;
+       u32 val = 0;
+
+       if (mc)
+               val = cpu_fwd_mcast;
+
+       ocelot_rmw_gix(ocelot, val, cpu_fwd_mcast,
+                      ANA_PORT_CPU_FWD_CFG, port);
+}
+
+static int ocelot_port_attr_set(struct net_device *dev,
+                               const struct switchdev_attr *attr,
+                               struct switchdev_trans *trans)
+{
+       struct ocelot_port_private *priv = netdev_priv(dev);
+       struct ocelot *ocelot = priv->port.ocelot;
+       int port = priv->chip_port;
+       int err = 0;
+
+       switch (attr->id) {
+       case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+               ocelot_port_attr_stp_state_set(ocelot, port, trans,
+                                              attr->u.stp_state);
+               break;
+       case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+               ocelot_port_attr_ageing_set(ocelot, port, attr->u.ageing_time);
+               break;
+       case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+               ocelot_port_vlan_filtering(ocelot, port,
+                                          attr->u.vlan_filtering);
+               break;
+       case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
+               ocelot_port_attr_mc_set(ocelot, port, !attr->u.mc_disabled);
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
+
+       return err;
+}
+
+static int ocelot_port_obj_add_vlan(struct net_device *dev,
+                                   const struct switchdev_obj_port_vlan *vlan,
+                                   struct switchdev_trans *trans)
+{
+       int ret;
+       u16 vid;
+
+       for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+               ret = ocelot_vlan_vid_add(dev, vid,
+                                         vlan->flags & BRIDGE_VLAN_INFO_PVID,
+                                         vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int ocelot_port_vlan_del_vlan(struct net_device *dev,
+                                    const struct switchdev_obj_port_vlan *vlan)
+{
+       int ret;
+       u16 vid;
+
+       for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+               ret = ocelot_vlan_vid_del(dev, vid);
+
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int ocelot_port_obj_add_mdb(struct net_device *dev,
+                                  const struct switchdev_obj_port_mdb *mdb,
+                                  struct switchdev_trans *trans)
+{
+       struct ocelot_port_private *priv = netdev_priv(dev);
+       struct ocelot_port *ocelot_port = &priv->port;
+       struct ocelot *ocelot = ocelot_port->ocelot;
+       int port = priv->chip_port;
+
+       if (switchdev_trans_ph_prepare(trans))
+               return 0;
+
+       return ocelot_port_mdb_add(ocelot, port, mdb);
+}
+
+static int ocelot_port_obj_del_mdb(struct net_device *dev,
+                                  const struct switchdev_obj_port_mdb *mdb)
+{
+       struct ocelot_port_private *priv = netdev_priv(dev);
+       struct ocelot_port *ocelot_port = &priv->port;
+       struct ocelot *ocelot = ocelot_port->ocelot;
+       int port = priv->chip_port;
+
+       return ocelot_port_mdb_del(ocelot, port, mdb);
+}
+
+static int ocelot_port_obj_add(struct net_device *dev,
+                              const struct switchdev_obj *obj,
+                              struct switchdev_trans *trans,
+                              struct netlink_ext_ack *extack)
+{
+       int ret = 0;
+
+       switch (obj->id) {
+       case SWITCHDEV_OBJ_ID_PORT_VLAN:
+               ret = ocelot_port_obj_add_vlan(dev,
+                                              SWITCHDEV_OBJ_PORT_VLAN(obj),
+                                              trans);
+               break;
+       case SWITCHDEV_OBJ_ID_PORT_MDB:
+               ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj),
+                                             trans);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return ret;
+}
+
+static int ocelot_port_obj_del(struct net_device *dev,
+                              const struct switchdev_obj *obj)
+{
+       int ret = 0;
+
+       switch (obj->id) {
+       case SWITCHDEV_OBJ_ID_PORT_VLAN:
+               ret = ocelot_port_vlan_del_vlan(dev,
+                                               SWITCHDEV_OBJ_PORT_VLAN(obj));
+               break;
+       case SWITCHDEV_OBJ_ID_PORT_MDB:
+               ret = ocelot_port_obj_del_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj));
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       return ret;
+}
+
+/* Checks if the net_device instance given to us originate from our driver. */
+static bool ocelot_netdevice_dev_check(const struct net_device *dev)
+{
+       return dev->netdev_ops == &ocelot_port_netdev_ops;
+}
+
+static int ocelot_netdevice_port_event(struct net_device *dev,
+                                      unsigned long event,
+                                      struct netdev_notifier_changeupper_info *info)
+{
+       struct ocelot_port_private *priv = netdev_priv(dev);
+       struct ocelot_port *ocelot_port = &priv->port;
+       struct ocelot *ocelot = ocelot_port->ocelot;
+       int port = priv->chip_port;
+       int err = 0;
+
+       switch (event) {
+       case NETDEV_CHANGEUPPER:
+               if (netif_is_bridge_master(info->upper_dev)) {
+                       if (info->linking) {
+                               err = ocelot_port_bridge_join(ocelot, port,
+                                                             info->upper_dev);
+                       } else {
+                               err = ocelot_port_bridge_leave(ocelot, port,
+                                                              info->upper_dev);
+                       }
+               }
+               if (netif_is_lag_master(info->upper_dev)) {
+                       if (info->linking)
+                               err = ocelot_port_lag_join(ocelot, port,
+                                                          info->upper_dev);
+                       else
+                               ocelot_port_lag_leave(ocelot, port,
+                                                     info->upper_dev);
+               }
+               break;
+       default:
+               break;
+       }
+
+       return err;
+}
+
+static int ocelot_netdevice_event(struct notifier_block *unused,
+                                 unsigned long event, void *ptr)
+{
+       struct netdev_notifier_changeupper_info *info = ptr;
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       int ret = 0;
+
+       if (!ocelot_netdevice_dev_check(dev))
+               return 0;
+
+       if (event == NETDEV_PRECHANGEUPPER &&
+           netif_is_lag_master(info->upper_dev)) {
+               struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
+               struct netlink_ext_ack *extack;
+
+               if (lag_upper_info &&
+                   lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+                       extack = netdev_notifier_info_to_extack(&info->info);
+                       NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
+
+                       ret = -EINVAL;
+                       goto notify;
+               }
+       }
+
+       if (netif_is_lag_master(dev)) {
+               struct net_device *slave;
+               struct list_head *iter;
+
+               netdev_for_each_lower_dev(dev, slave, iter) {
+                       ret = ocelot_netdevice_port_event(slave, event, info);
+                       if (ret)
+                               goto notify;
+               }
+       } else {
+               ret = ocelot_netdevice_port_event(dev, event, info);
+       }
+
+notify:
+       return notifier_from_errno(ret);
+}
+
+struct notifier_block ocelot_netdevice_nb __read_mostly = {
+       .notifier_call = ocelot_netdevice_event,
+};
+
+static int ocelot_switchdev_event(struct notifier_block *unused,
+                                 unsigned long event, void *ptr)
+{
+       struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+       int err;
+
+       switch (event) {
+       case SWITCHDEV_PORT_ATTR_SET:
+               err = switchdev_handle_port_attr_set(dev, ptr,
+                                                    ocelot_netdevice_dev_check,
+                                                    ocelot_port_attr_set);
+               return notifier_from_errno(err);
+       }
+
+       return NOTIFY_DONE;
+}
+
+struct notifier_block ocelot_switchdev_nb __read_mostly = {
+       .notifier_call = ocelot_switchdev_event,
+};
+
+static int ocelot_switchdev_blocking_event(struct notifier_block *unused,
+                                          unsigned long event, void *ptr)
+{
+       struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+       int err;
+
+       switch (event) {
+               /* Blocking events. */
+       case SWITCHDEV_PORT_OBJ_ADD:
+               err = switchdev_handle_port_obj_add(dev, ptr,
+                                                   ocelot_netdevice_dev_check,
+                                                   ocelot_port_obj_add);
+               return notifier_from_errno(err);
+       case SWITCHDEV_PORT_OBJ_DEL:
+               err = switchdev_handle_port_obj_del(dev, ptr,
+                                                   ocelot_netdevice_dev_check,
+                                                   ocelot_port_obj_del);
+               return notifier_from_errno(err);
+       case SWITCHDEV_PORT_ATTR_SET:
+               err = switchdev_handle_port_attr_set(dev, ptr,
+                                                    ocelot_netdevice_dev_check,
+                                                    ocelot_port_attr_set);
+               return notifier_from_errno(err);
+       }
+
+       return NOTIFY_DONE;
+}
+
+struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = {
+       .notifier_call = ocelot_switchdev_blocking_event,
+};
+
+int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
+                     struct phy_device *phy)
+{
+       struct ocelot_port_private *priv;
+       struct ocelot_port *ocelot_port;
+       struct net_device *dev;
+       int err;
+
+       dev = alloc_etherdev(sizeof(struct ocelot_port_private));
+       if (!dev)
+               return -ENOMEM;
+       SET_NETDEV_DEV(dev, ocelot->dev);
+       priv = netdev_priv(dev);
+       priv->dev = dev;
+       priv->phy = phy;
+       priv->chip_port = port;
+       ocelot_port = &priv->port;
+       ocelot_port->ocelot = ocelot;
+       ocelot_port->target = target;
+       ocelot->ports[port] = ocelot_port;
+
+       dev->netdev_ops = &ocelot_port_netdev_ops;
+       dev->ethtool_ops = &ocelot_ethtool_ops;
+
+       dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXFCS |
+               NETIF_F_HW_TC;
+       dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
+
+       memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN);
+       dev->dev_addr[ETH_ALEN - 1] += port;
+       ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr, ocelot_port->pvid,
+                         ENTRYTYPE_LOCKED);
+
+       ocelot_init_port(ocelot, port);
+
+       err = register_netdev(dev);
+       if (err) {
+               dev_err(ocelot->dev, "register_netdev failed\n");
+               free_netdev(dev);
+       }
+
+       return err;
+}
index 2e1d8e1..6f5068c 100644 (file)
@@ -7,16 +7,6 @@
 #include <soc/mscc/ocelot.h>
 #include "ocelot_police.h"
 
-enum mscc_qos_rate_mode {
-       MSCC_QOS_RATE_MODE_DISABLED, /* Policer/shaper disabled */
-       MSCC_QOS_RATE_MODE_LINE, /* Measure line rate in kbps incl. IPG */
-       MSCC_QOS_RATE_MODE_DATA, /* Measures data rate in kbps excl. IPG */
-       MSCC_QOS_RATE_MODE_FRAME, /* Measures frame rate in fps */
-       __MSCC_QOS_RATE_MODE_END,
-       NUM_MSCC_QOS_RATE_MODE = __MSCC_QOS_RATE_MODE_END,
-       MSCC_QOS_RATE_MODE_MAX = __MSCC_QOS_RATE_MODE_END - 1,
-};
-
 /* Types for ANA:POL[0-192]:POL_MODE_CFG.FRM_MODE */
 #define POL_MODE_LINERATE   0 /* Incl IPG. Unit: 33 1/3 kbps, 4096 bytes */
 #define POL_MODE_DATARATE   1 /* Excl IPG. Unit: 33 1/3 kbps, 4096 bytes  */
@@ -30,19 +20,8 @@ enum mscc_qos_rate_mode {
 /* Default policer order */
 #define POL_ORDER 0x1d3 /* Ocelot policer order: Serial (QoS -> Port -> VCAP) */
 
-struct qos_policer_conf {
-       enum mscc_qos_rate_mode mode;
-       bool dlb; /* Enable DLB (dual leaky bucket mode */
-       bool cf;  /* Coupling flag (ignored in SLB mode) */
-       u32  cir; /* CIR in kbps/fps (ignored in SLB mode) */
-       u32  cbs; /* CBS in bytes/frames (ignored in SLB mode) */
-       u32  pir; /* PIR in kbps/fps */
-       u32  pbs; /* PBS in bytes/frames */
-       u8   ipg; /* Size of IPG when MSCC_QOS_RATE_MODE_LINE is chosen */
-};
-
-static int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix,
-                               struct qos_policer_conf *conf)
+int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix,
+                        struct qos_policer_conf *conf)
 {
        u32 cf = 0, cir_ena = 0, frm_mode = POL_MODE_LINERATE;
        u32 cir = 0, cbs = 0, pir = 0, pbs = 0;
@@ -228,27 +207,3 @@ int ocelot_port_policer_del(struct ocelot *ocelot, int port)
        return 0;
 }
 EXPORT_SYMBOL(ocelot_port_policer_del);
-
-int ocelot_ace_policer_add(struct ocelot *ocelot, u32 pol_ix,
-                          struct ocelot_policer *pol)
-{
-       struct qos_policer_conf pp = { 0 };
-
-       if (!pol)
-               return -EINVAL;
-
-       pp.mode = MSCC_QOS_RATE_MODE_DATA;
-       pp.pir = pol->rate;
-       pp.pbs = pol->burst;
-
-       return qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
-}
-
-int ocelot_ace_policer_del(struct ocelot *ocelot, u32 pol_ix)
-{
-       struct qos_policer_conf pp = { 0 };
-
-       pp.mode = MSCC_QOS_RATE_MODE_DISABLED;
-
-       return qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
-}
index 792abd2..7adb05f 100644 (file)
@@ -9,9 +9,28 @@
 
 #include "ocelot.h"
 
-int ocelot_ace_policer_add(struct ocelot *ocelot, u32 pol_ix,
-                          struct ocelot_policer *pol);
+enum mscc_qos_rate_mode {
+       MSCC_QOS_RATE_MODE_DISABLED, /* Policer/shaper disabled */
+       MSCC_QOS_RATE_MODE_LINE, /* Measure line rate in kbps incl. IPG */
+       MSCC_QOS_RATE_MODE_DATA, /* Measures data rate in kbps excl. IPG */
+       MSCC_QOS_RATE_MODE_FRAME, /* Measures frame rate in fps */
+       __MSCC_QOS_RATE_MODE_END,
+       NUM_MSCC_QOS_RATE_MODE = __MSCC_QOS_RATE_MODE_END,
+       MSCC_QOS_RATE_MODE_MAX = __MSCC_QOS_RATE_MODE_END - 1,
+};
 
-int ocelot_ace_policer_del(struct ocelot *ocelot, u32 pol_ix);
+struct qos_policer_conf {
+       enum mscc_qos_rate_mode mode;
+       bool dlb; /* Enable DLB (dual leaky bucket mode */
+       bool cf;  /* Coupling flag (ignored in SLB mode) */
+       u32  cir; /* CIR in kbps/fps (ignored in SLB mode) */
+       u32  cbs; /* CBS in bytes/frames (ignored in SLB mode) */
+       u32  pir; /* PIR in kbps/fps */
+       u32  pbs; /* PBS in bytes/frames */
+       u8   ipg; /* Size of IPG when MSCC_QOS_RATE_MODE_LINE is chosen */
+};
+
+int qos_policer_conf_set(struct ocelot *ocelot, int port, u32 pol_ix,
+                        struct qos_policer_conf *conf);
 
 #endif /* _MSCC_OCELOT_POLICE_H_ */
index a3088a1..1e08fe4 100644 (file)
@@ -184,18 +184,20 @@ int ocelot_ptp_enable(struct ptp_clock_info *ptp,
                      struct ptp_clock_request *rq, int on)
 {
        struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info);
-       struct timespec64 ts_start, ts_period;
+       struct timespec64 ts_phase, ts_period;
        enum ocelot_ptp_pins ptp_pin;
        unsigned long flags;
        bool pps = false;
        int pin = -1;
+       s64 wf_high;
+       s64 wf_low;
        u32 val;
-       s64 ns;
 
        switch (rq->type) {
        case PTP_CLK_REQ_PEROUT:
                /* Reject requests with unsupported flags */
-               if (rq->perout.flags)
+               if (rq->perout.flags & ~(PTP_PEROUT_DUTY_CYCLE |
+                                        PTP_PEROUT_PHASE))
                        return -EOPNOTSUPP;
 
                pin = ptp_find_pin(ocelot->ptp_clock, PTP_PF_PEROUT,
@@ -211,22 +213,12 @@ int ocelot_ptp_enable(struct ptp_clock_info *ptp,
                else
                        return -EBUSY;
 
-               ts_start.tv_sec = rq->perout.start.sec;
-               ts_start.tv_nsec = rq->perout.start.nsec;
                ts_period.tv_sec = rq->perout.period.sec;
                ts_period.tv_nsec = rq->perout.period.nsec;
 
                if (ts_period.tv_sec == 1 && ts_period.tv_nsec == 0)
                        pps = true;
 
-               if (ts_start.tv_sec || (ts_start.tv_nsec && !pps)) {
-                       dev_warn(ocelot->dev,
-                                "Absolute start time not supported!\n");
-                       dev_warn(ocelot->dev,
-                                "Accept nsec for PPS phase adjustment, otherwise start time should be 0 0.\n");
-                       return -EINVAL;
-               }
-
                /* Handle turning off */
                if (!on) {
                        spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
@@ -236,16 +228,48 @@ int ocelot_ptp_enable(struct ptp_clock_info *ptp,
                        break;
                }
 
+               if (rq->perout.flags & PTP_PEROUT_PHASE) {
+                       ts_phase.tv_sec = rq->perout.phase.sec;
+                       ts_phase.tv_nsec = rq->perout.phase.nsec;
+               } else {
+                       /* Compatibility */
+                       ts_phase.tv_sec = rq->perout.start.sec;
+                       ts_phase.tv_nsec = rq->perout.start.nsec;
+               }
+               if (ts_phase.tv_sec || (ts_phase.tv_nsec && !pps)) {
+                       dev_warn(ocelot->dev,
+                                "Absolute start time not supported!\n");
+                       dev_warn(ocelot->dev,
+                                "Accept nsec for PPS phase adjustment, otherwise start time should be 0 0.\n");
+                       return -EINVAL;
+               }
+
+               /* Calculate waveform high and low times */
+               if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) {
+                       struct timespec64 ts_on;
+
+                       ts_on.tv_sec = rq->perout.on.sec;
+                       ts_on.tv_nsec = rq->perout.on.nsec;
+
+                       wf_high = timespec64_to_ns(&ts_on);
+               } else {
+                       if (pps) {
+                               wf_high = 1000;
+                       } else {
+                               wf_high = timespec64_to_ns(&ts_period);
+                               wf_high = div_s64(wf_high, 2);
+                       }
+               }
+
+               wf_low = timespec64_to_ns(&ts_period);
+               wf_low -= wf_high;
+
                /* Handle PPS request */
                if (pps) {
                        spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
-                       /* Pulse generated perout.start.nsec after TOD has
-                        * increased seconds.
-                        * Pulse width is set to 1us.
-                        */
-                       ocelot_write_rix(ocelot, ts_start.tv_nsec,
+                       ocelot_write_rix(ocelot, ts_phase.tv_nsec,
                                         PTP_PIN_WF_LOW_PERIOD, ptp_pin);
-                       ocelot_write_rix(ocelot, 1000,
+                       ocelot_write_rix(ocelot, wf_high,
                                         PTP_PIN_WF_HIGH_PERIOD, ptp_pin);
                        val = PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_CLOCK);
                        val |= PTP_PIN_CFG_SYNC;
@@ -255,14 +279,16 @@ int ocelot_ptp_enable(struct ptp_clock_info *ptp,
                }
 
                /* Handle periodic clock */
-               ns = timespec64_to_ns(&ts_period);
-               ns = ns >> 1;
-               if (ns > 0x3fffffff || ns <= 0x6)
+               if (wf_high > 0x3fffffff || wf_high <= 0x6)
+                       return -EINVAL;
+               if (wf_low > 0x3fffffff || wf_low <= 0x6)
                        return -EINVAL;
 
                spin_lock_irqsave(&ocelot->ptp_clock_lock, flags);
-               ocelot_write_rix(ocelot, ns, PTP_PIN_WF_LOW_PERIOD, ptp_pin);
-               ocelot_write_rix(ocelot, ns, PTP_PIN_WF_HIGH_PERIOD, ptp_pin);
+               ocelot_write_rix(ocelot, wf_low, PTP_PIN_WF_LOW_PERIOD,
+                                ptp_pin);
+               ocelot_write_rix(ocelot, wf_high, PTP_PIN_WF_HIGH_PERIOD,
+                                ptp_pin);
                val = PTP_PIN_CFG_ACTION(PTP_PIN_ACTION_CLOCK);
                ocelot_write_rix(ocelot, val, PTP_PIN_CFG, ptp_pin);
                spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags);
diff --git a/drivers/net/ethernet/mscc/ocelot_regs.c b/drivers/net/ethernet/mscc/ocelot_regs.c
deleted file mode 100644 (file)
index 81d81ff..0000000
+++ /dev/null
@@ -1,450 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MIT)
-/*
- * Microsemi Ocelot Switch driver
- *
- * Copyright (c) 2017 Microsemi Corporation
- */
-#include "ocelot.h"
-#include <soc/mscc/ocelot_hsio.h>
-
-static const u32 ocelot_ana_regmap[] = {
-       REG(ANA_ADVLEARN,                  0x009000),
-       REG(ANA_VLANMASK,                  0x009004),
-       REG(ANA_PORT_B_DOMAIN,             0x009008),
-       REG(ANA_ANAGEFIL,                  0x00900c),
-       REG(ANA_ANEVENTS,                  0x009010),
-       REG(ANA_STORMLIMIT_BURST,          0x009014),
-       REG(ANA_STORMLIMIT_CFG,            0x009018),
-       REG(ANA_ISOLATED_PORTS,            0x009028),
-       REG(ANA_COMMUNITY_PORTS,           0x00902c),
-       REG(ANA_AUTOAGE,                   0x009030),
-       REG(ANA_MACTOPTIONS,               0x009034),
-       REG(ANA_LEARNDISC,                 0x009038),
-       REG(ANA_AGENCTRL,                  0x00903c),
-       REG(ANA_MIRRORPORTS,               0x009040),
-       REG(ANA_EMIRRORPORTS,              0x009044),
-       REG(ANA_FLOODING,                  0x009048),
-       REG(ANA_FLOODING_IPMC,             0x00904c),
-       REG(ANA_SFLOW_CFG,                 0x009050),
-       REG(ANA_PORT_MODE,                 0x009080),
-       REG(ANA_PGID_PGID,                 0x008c00),
-       REG(ANA_TABLES_ANMOVED,            0x008b30),
-       REG(ANA_TABLES_MACHDATA,           0x008b34),
-       REG(ANA_TABLES_MACLDATA,           0x008b38),
-       REG(ANA_TABLES_MACACCESS,          0x008b3c),
-       REG(ANA_TABLES_MACTINDX,           0x008b40),
-       REG(ANA_TABLES_VLANACCESS,         0x008b44),
-       REG(ANA_TABLES_VLANTIDX,           0x008b48),
-       REG(ANA_TABLES_ISDXACCESS,         0x008b4c),
-       REG(ANA_TABLES_ISDXTIDX,           0x008b50),
-       REG(ANA_TABLES_ENTRYLIM,           0x008b00),
-       REG(ANA_TABLES_PTP_ID_HIGH,        0x008b54),
-       REG(ANA_TABLES_PTP_ID_LOW,         0x008b58),
-       REG(ANA_MSTI_STATE,                0x008e00),
-       REG(ANA_PORT_VLAN_CFG,             0x007000),
-       REG(ANA_PORT_DROP_CFG,             0x007004),
-       REG(ANA_PORT_QOS_CFG,              0x007008),
-       REG(ANA_PORT_VCAP_CFG,             0x00700c),
-       REG(ANA_PORT_VCAP_S1_KEY_CFG,      0x007010),
-       REG(ANA_PORT_VCAP_S2_CFG,          0x00701c),
-       REG(ANA_PORT_PCP_DEI_MAP,          0x007020),
-       REG(ANA_PORT_CPU_FWD_CFG,          0x007060),
-       REG(ANA_PORT_CPU_FWD_BPDU_CFG,     0x007064),
-       REG(ANA_PORT_CPU_FWD_GARP_CFG,     0x007068),
-       REG(ANA_PORT_CPU_FWD_CCM_CFG,      0x00706c),
-       REG(ANA_PORT_PORT_CFG,             0x007070),
-       REG(ANA_PORT_POL_CFG,              0x007074),
-       REG(ANA_PORT_PTP_CFG,              0x007078),
-       REG(ANA_PORT_PTP_DLY1_CFG,         0x00707c),
-       REG(ANA_OAM_UPM_LM_CNT,            0x007c00),
-       REG(ANA_PORT_PTP_DLY2_CFG,         0x007080),
-       REG(ANA_PFC_PFC_CFG,               0x008800),
-       REG(ANA_PFC_PFC_TIMER,             0x008804),
-       REG(ANA_IPT_OAM_MEP_CFG,           0x008000),
-       REG(ANA_IPT_IPT,                   0x008004),
-       REG(ANA_PPT_PPT,                   0x008ac0),
-       REG(ANA_FID_MAP_FID_MAP,           0x000000),
-       REG(ANA_AGGR_CFG,                  0x0090b4),
-       REG(ANA_CPUQ_CFG,                  0x0090b8),
-       REG(ANA_CPUQ_CFG2,                 0x0090bc),
-       REG(ANA_CPUQ_8021_CFG,             0x0090c0),
-       REG(ANA_DSCP_CFG,                  0x009100),
-       REG(ANA_DSCP_REWR_CFG,             0x009200),
-       REG(ANA_VCAP_RNG_TYPE_CFG,         0x009240),
-       REG(ANA_VCAP_RNG_VAL_CFG,          0x009260),
-       REG(ANA_VRAP_CFG,                  0x009280),
-       REG(ANA_VRAP_HDR_DATA,             0x009284),
-       REG(ANA_VRAP_HDR_MASK,             0x009288),
-       REG(ANA_DISCARD_CFG,               0x00928c),
-       REG(ANA_FID_CFG,                   0x009290),
-       REG(ANA_POL_PIR_CFG,               0x004000),
-       REG(ANA_POL_CIR_CFG,               0x004004),
-       REG(ANA_POL_MODE_CFG,              0x004008),
-       REG(ANA_POL_PIR_STATE,             0x00400c),
-       REG(ANA_POL_CIR_STATE,             0x004010),
-       REG(ANA_POL_STATE,                 0x004014),
-       REG(ANA_POL_FLOWC,                 0x008b80),
-       REG(ANA_POL_HYST,                  0x008bec),
-       REG(ANA_POL_MISC_CFG,              0x008bf0),
-};
-
-static const u32 ocelot_qs_regmap[] = {
-       REG(QS_XTR_GRP_CFG,                0x000000),
-       REG(QS_XTR_RD,                     0x000008),
-       REG(QS_XTR_FRM_PRUNING,            0x000010),
-       REG(QS_XTR_FLUSH,                  0x000018),
-       REG(QS_XTR_DATA_PRESENT,           0x00001c),
-       REG(QS_XTR_CFG,                    0x000020),
-       REG(QS_INJ_GRP_CFG,                0x000024),
-       REG(QS_INJ_WR,                     0x00002c),
-       REG(QS_INJ_CTRL,                   0x000034),
-       REG(QS_INJ_STATUS,                 0x00003c),
-       REG(QS_INJ_ERR,                    0x000040),
-       REG(QS_INH_DBG,                    0x000048),
-};
-
-static const u32 ocelot_qsys_regmap[] = {
-       REG(QSYS_PORT_MODE,                0x011200),
-       REG(QSYS_SWITCH_PORT_MODE,         0x011234),
-       REG(QSYS_STAT_CNT_CFG,             0x011264),
-       REG(QSYS_EEE_CFG,                  0x011268),
-       REG(QSYS_EEE_THRES,                0x011294),
-       REG(QSYS_IGR_NO_SHARING,           0x011298),
-       REG(QSYS_EGR_NO_SHARING,           0x01129c),
-       REG(QSYS_SW_STATUS,                0x0112a0),
-       REG(QSYS_EXT_CPU_CFG,              0x0112d0),
-       REG(QSYS_PAD_CFG,                  0x0112d4),
-       REG(QSYS_CPU_GROUP_MAP,            0x0112d8),
-       REG(QSYS_QMAP,                     0x0112dc),
-       REG(QSYS_ISDX_SGRP,                0x011400),
-       REG(QSYS_TIMED_FRAME_ENTRY,        0x014000),
-       REG(QSYS_TFRM_MISC,                0x011310),
-       REG(QSYS_TFRM_PORT_DLY,            0x011314),
-       REG(QSYS_TFRM_TIMER_CFG_1,         0x011318),
-       REG(QSYS_TFRM_TIMER_CFG_2,         0x01131c),
-       REG(QSYS_TFRM_TIMER_CFG_3,         0x011320),
-       REG(QSYS_TFRM_TIMER_CFG_4,         0x011324),
-       REG(QSYS_TFRM_TIMER_CFG_5,         0x011328),
-       REG(QSYS_TFRM_TIMER_CFG_6,         0x01132c),
-       REG(QSYS_TFRM_TIMER_CFG_7,         0x011330),
-       REG(QSYS_TFRM_TIMER_CFG_8,         0x011334),
-       REG(QSYS_RED_PROFILE,              0x011338),
-       REG(QSYS_RES_QOS_MODE,             0x011378),
-       REG(QSYS_RES_CFG,                  0x012000),
-       REG(QSYS_RES_STAT,                 0x012004),
-       REG(QSYS_EGR_DROP_MODE,            0x01137c),
-       REG(QSYS_EQ_CTRL,                  0x011380),
-       REG(QSYS_EVENTS_CORE,              0x011384),
-       REG(QSYS_CIR_CFG,                  0x000000),
-       REG(QSYS_EIR_CFG,                  0x000004),
-       REG(QSYS_SE_CFG,                   0x000008),
-       REG(QSYS_SE_DWRR_CFG,              0x00000c),
-       REG(QSYS_SE_CONNECT,               0x00003c),
-       REG(QSYS_SE_DLB_SENSE,             0x000040),
-       REG(QSYS_CIR_STATE,                0x000044),
-       REG(QSYS_EIR_STATE,                0x000048),
-       REG(QSYS_SE_STATE,                 0x00004c),
-       REG(QSYS_HSCH_MISC_CFG,            0x011388),
-};
-
-static const u32 ocelot_rew_regmap[] = {
-       REG(REW_PORT_VLAN_CFG,             0x000000),
-       REG(REW_TAG_CFG,                   0x000004),
-       REG(REW_PORT_CFG,                  0x000008),
-       REG(REW_DSCP_CFG,                  0x00000c),
-       REG(REW_PCP_DEI_QOS_MAP_CFG,       0x000010),
-       REG(REW_PTP_CFG,                   0x000050),
-       REG(REW_PTP_DLY1_CFG,              0x000054),
-       REG(REW_DSCP_REMAP_DP1_CFG,        0x000690),
-       REG(REW_DSCP_REMAP_CFG,            0x000790),
-       REG(REW_STAT_CFG,                  0x000890),
-       REG(REW_PPT,                       0x000680),
-};
-
-static const u32 ocelot_sys_regmap[] = {
-       REG(SYS_COUNT_RX_OCTETS,           0x000000),
-       REG(SYS_COUNT_RX_UNICAST,          0x000004),
-       REG(SYS_COUNT_RX_MULTICAST,        0x000008),
-       REG(SYS_COUNT_RX_BROADCAST,        0x00000c),
-       REG(SYS_COUNT_RX_SHORTS,           0x000010),
-       REG(SYS_COUNT_RX_FRAGMENTS,        0x000014),
-       REG(SYS_COUNT_RX_JABBERS,          0x000018),
-       REG(SYS_COUNT_RX_CRC_ALIGN_ERRS,   0x00001c),
-       REG(SYS_COUNT_RX_SYM_ERRS,         0x000020),
-       REG(SYS_COUNT_RX_64,               0x000024),
-       REG(SYS_COUNT_RX_65_127,           0x000028),
-       REG(SYS_COUNT_RX_128_255,          0x00002c),
-       REG(SYS_COUNT_RX_256_1023,         0x000030),
-       REG(SYS_COUNT_RX_1024_1526,        0x000034),
-       REG(SYS_COUNT_RX_1527_MAX,         0x000038),
-       REG(SYS_COUNT_RX_PAUSE,            0x00003c),
-       REG(SYS_COUNT_RX_CONTROL,          0x000040),
-       REG(SYS_COUNT_RX_LONGS,            0x000044),
-       REG(SYS_COUNT_RX_CLASSIFIED_DROPS, 0x000048),
-       REG(SYS_COUNT_TX_OCTETS,           0x000100),
-       REG(SYS_COUNT_TX_UNICAST,          0x000104),
-       REG(SYS_COUNT_TX_MULTICAST,        0x000108),
-       REG(SYS_COUNT_TX_BROADCAST,        0x00010c),
-       REG(SYS_COUNT_TX_COLLISION,        0x000110),
-       REG(SYS_COUNT_TX_DROPS,            0x000114),
-       REG(SYS_COUNT_TX_PAUSE,            0x000118),
-       REG(SYS_COUNT_TX_64,               0x00011c),
-       REG(SYS_COUNT_TX_65_127,           0x000120),
-       REG(SYS_COUNT_TX_128_511,          0x000124),
-       REG(SYS_COUNT_TX_512_1023,         0x000128),
-       REG(SYS_COUNT_TX_1024_1526,        0x00012c),
-       REG(SYS_COUNT_TX_1527_MAX,         0x000130),
-       REG(SYS_COUNT_TX_AGING,            0x000170),
-       REG(SYS_RESET_CFG,                 0x000508),
-       REG(SYS_CMID,                      0x00050c),
-       REG(SYS_VLAN_ETYPE_CFG,            0x000510),
-       REG(SYS_PORT_MODE,                 0x000514),
-       REG(SYS_FRONT_PORT_MODE,           0x000548),
-       REG(SYS_FRM_AGING,                 0x000574),
-       REG(SYS_STAT_CFG,                  0x000578),
-       REG(SYS_SW_STATUS,                 0x00057c),
-       REG(SYS_MISC_CFG,                  0x0005ac),
-       REG(SYS_REW_MAC_HIGH_CFG,          0x0005b0),
-       REG(SYS_REW_MAC_LOW_CFG,           0x0005dc),
-       REG(SYS_CM_ADDR,                   0x000500),
-       REG(SYS_CM_DATA,                   0x000504),
-       REG(SYS_PAUSE_CFG,                 0x000608),
-       REG(SYS_PAUSE_TOT_CFG,             0x000638),
-       REG(SYS_ATOP,                      0x00063c),
-       REG(SYS_ATOP_TOT_CFG,              0x00066c),
-       REG(SYS_MAC_FC_CFG,                0x000670),
-       REG(SYS_MMGT,                      0x00069c),
-       REG(SYS_MMGT_FAST,                 0x0006a0),
-       REG(SYS_EVENTS_DIF,                0x0006a4),
-       REG(SYS_EVENTS_CORE,               0x0006b4),
-       REG(SYS_CNT,                       0x000000),
-       REG(SYS_PTP_STATUS,                0x0006b8),
-       REG(SYS_PTP_TXSTAMP,               0x0006bc),
-       REG(SYS_PTP_NXT,                   0x0006c0),
-       REG(SYS_PTP_CFG,                   0x0006c4),
-};
-
-static const u32 ocelot_s2_regmap[] = {
-       REG(S2_CORE_UPDATE_CTRL,           0x000000),
-       REG(S2_CORE_MV_CFG,                0x000004),
-       REG(S2_CACHE_ENTRY_DAT,            0x000008),
-       REG(S2_CACHE_MASK_DAT,             0x000108),
-       REG(S2_CACHE_ACTION_DAT,           0x000208),
-       REG(S2_CACHE_CNT_DAT,              0x000308),
-       REG(S2_CACHE_TG_DAT,               0x000388),
-};
-
-static const u32 ocelot_ptp_regmap[] = {
-       REG(PTP_PIN_CFG,                   0x000000),
-       REG(PTP_PIN_TOD_SEC_MSB,           0x000004),
-       REG(PTP_PIN_TOD_SEC_LSB,           0x000008),
-       REG(PTP_PIN_TOD_NSEC,              0x00000c),
-       REG(PTP_PIN_WF_HIGH_PERIOD,        0x000014),
-       REG(PTP_PIN_WF_LOW_PERIOD,         0x000018),
-       REG(PTP_CFG_MISC,                  0x0000a0),
-       REG(PTP_CLK_CFG_ADJ_CFG,           0x0000a4),
-       REG(PTP_CLK_CFG_ADJ_FREQ,          0x0000a8),
-};
-
-static const u32 *ocelot_regmap[] = {
-       [ANA] = ocelot_ana_regmap,
-       [QS] = ocelot_qs_regmap,
-       [QSYS] = ocelot_qsys_regmap,
-       [REW] = ocelot_rew_regmap,
-       [SYS] = ocelot_sys_regmap,
-       [S2] = ocelot_s2_regmap,
-       [PTP] = ocelot_ptp_regmap,
-};
-
-static const struct reg_field ocelot_regfields[] = {
-       [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 11, 11),
-       [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 10),
-       [ANA_ANEVENTS_MSTI_DROP] = REG_FIELD(ANA_ANEVENTS, 27, 27),
-       [ANA_ANEVENTS_ACLKILL] = REG_FIELD(ANA_ANEVENTS, 26, 26),
-       [ANA_ANEVENTS_ACLUSED] = REG_FIELD(ANA_ANEVENTS, 25, 25),
-       [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 24, 24),
-       [ANA_ANEVENTS_VS2TTL1] = REG_FIELD(ANA_ANEVENTS, 23, 23),
-       [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 22, 22),
-       [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 21, 21),
-       [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 20, 20),
-       [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 19, 19),
-       [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 18, 18),
-       [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 17, 17),
-       [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 16, 16),
-       [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 15, 15),
-       [ANA_ANEVENTS_DROPPED] = REG_FIELD(ANA_ANEVENTS, 14, 14),
-       [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 13, 13),
-       [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 12, 12),
-       [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 11, 11),
-       [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 10, 10),
-       [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 9, 9),
-       [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 8, 8),
-       [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 7, 7),
-       [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6),
-       [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5),
-       [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 4, 4),
-       [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 3, 3),
-       [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 2, 2),
-       [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 1, 1),
-       [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 0, 0),
-       [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 18, 18),
-       [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 10, 11),
-       [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 9),
-       [QSYS_TIMED_FRAME_ENTRY_TFRM_VLD] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 20, 20),
-       [QSYS_TIMED_FRAME_ENTRY_TFRM_FP] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 8, 19),
-       [QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 4, 7),
-       [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 1, 3),
-       [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 0, 0),
-       [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 2, 2),
-       [SYS_RESET_CFG_MEM_ENA] = REG_FIELD(SYS_RESET_CFG, 1, 1),
-       [SYS_RESET_CFG_MEM_INIT] = REG_FIELD(SYS_RESET_CFG, 0, 0),
-};
-
-static const struct ocelot_stat_layout ocelot_stats_layout[] = {
-       { .name = "rx_octets", .offset = 0x00, },
-       { .name = "rx_unicast", .offset = 0x01, },
-       { .name = "rx_multicast", .offset = 0x02, },
-       { .name = "rx_broadcast", .offset = 0x03, },
-       { .name = "rx_shorts", .offset = 0x04, },
-       { .name = "rx_fragments", .offset = 0x05, },
-       { .name = "rx_jabbers", .offset = 0x06, },
-       { .name = "rx_crc_align_errs", .offset = 0x07, },
-       { .name = "rx_sym_errs", .offset = 0x08, },
-       { .name = "rx_frames_below_65_octets", .offset = 0x09, },
-       { .name = "rx_frames_65_to_127_octets", .offset = 0x0A, },
-       { .name = "rx_frames_128_to_255_octets", .offset = 0x0B, },
-       { .name = "rx_frames_256_to_511_octets", .offset = 0x0C, },
-       { .name = "rx_frames_512_to_1023_octets", .offset = 0x0D, },
-       { .name = "rx_frames_1024_to_1526_octets", .offset = 0x0E, },
-       { .name = "rx_frames_over_1526_octets", .offset = 0x0F, },
-       { .name = "rx_pause", .offset = 0x10, },
-       { .name = "rx_control", .offset = 0x11, },
-       { .name = "rx_longs", .offset = 0x12, },
-       { .name = "rx_classified_drops", .offset = 0x13, },
-       { .name = "rx_red_prio_0", .offset = 0x14, },
-       { .name = "rx_red_prio_1", .offset = 0x15, },
-       { .name = "rx_red_prio_2", .offset = 0x16, },
-       { .name = "rx_red_prio_3", .offset = 0x17, },
-       { .name = "rx_red_prio_4", .offset = 0x18, },
-       { .name = "rx_red_prio_5", .offset = 0x19, },
-       { .name = "rx_red_prio_6", .offset = 0x1A, },
-       { .name = "rx_red_prio_7", .offset = 0x1B, },
-       { .name = "rx_yellow_prio_0", .offset = 0x1C, },
-       { .name = "rx_yellow_prio_1", .offset = 0x1D, },
-       { .name = "rx_yellow_prio_2", .offset = 0x1E, },
-       { .name = "rx_yellow_prio_3", .offset = 0x1F, },
-       { .name = "rx_yellow_prio_4", .offset = 0x20, },
-       { .name = "rx_yellow_prio_5", .offset = 0x21, },
-       { .name = "rx_yellow_prio_6", .offset = 0x22, },
-       { .name = "rx_yellow_prio_7", .offset = 0x23, },
-       { .name = "rx_green_prio_0", .offset = 0x24, },
-       { .name = "rx_green_prio_1", .offset = 0x25, },
-       { .name = "rx_green_prio_2", .offset = 0x26, },
-       { .name = "rx_green_prio_3", .offset = 0x27, },
-       { .name = "rx_green_prio_4", .offset = 0x28, },
-       { .name = "rx_green_prio_5", .offset = 0x29, },
-       { .name = "rx_green_prio_6", .offset = 0x2A, },
-       { .name = "rx_green_prio_7", .offset = 0x2B, },
-       { .name = "tx_octets", .offset = 0x40, },
-       { .name = "tx_unicast", .offset = 0x41, },
-       { .name = "tx_multicast", .offset = 0x42, },
-       { .name = "tx_broadcast", .offset = 0x43, },
-       { .name = "tx_collision", .offset = 0x44, },
-       { .name = "tx_drops", .offset = 0x45, },
-       { .name = "tx_pause", .offset = 0x46, },
-       { .name = "tx_frames_below_65_octets", .offset = 0x47, },
-       { .name = "tx_frames_65_to_127_octets", .offset = 0x48, },
-       { .name = "tx_frames_128_255_octets", .offset = 0x49, },
-       { .name = "tx_frames_256_511_octets", .offset = 0x4A, },
-       { .name = "tx_frames_512_1023_octets", .offset = 0x4B, },
-       { .name = "tx_frames_1024_1526_octets", .offset = 0x4C, },
-       { .name = "tx_frames_over_1526_octets", .offset = 0x4D, },
-       { .name = "tx_yellow_prio_0", .offset = 0x4E, },
-       { .name = "tx_yellow_prio_1", .offset = 0x4F, },
-       { .name = "tx_yellow_prio_2", .offset = 0x50, },
-       { .name = "tx_yellow_prio_3", .offset = 0x51, },
-       { .name = "tx_yellow_prio_4", .offset = 0x52, },
-       { .name = "tx_yellow_prio_5", .offset = 0x53, },
-       { .name = "tx_yellow_prio_6", .offset = 0x54, },
-       { .name = "tx_yellow_prio_7", .offset = 0x55, },
-       { .name = "tx_green_prio_0", .offset = 0x56, },
-       { .name = "tx_green_prio_1", .offset = 0x57, },
-       { .name = "tx_green_prio_2", .offset = 0x58, },
-       { .name = "tx_green_prio_3", .offset = 0x59, },
-       { .name = "tx_green_prio_4", .offset = 0x5A, },
-       { .name = "tx_green_prio_5", .offset = 0x5B, },
-       { .name = "tx_green_prio_6", .offset = 0x5C, },
-       { .name = "tx_green_prio_7", .offset = 0x5D, },
-       { .name = "tx_aged", .offset = 0x5E, },
-       { .name = "drop_local", .offset = 0x80, },
-       { .name = "drop_tail", .offset = 0x81, },
-       { .name = "drop_yellow_prio_0", .offset = 0x82, },
-       { .name = "drop_yellow_prio_1", .offset = 0x83, },
-       { .name = "drop_yellow_prio_2", .offset = 0x84, },
-       { .name = "drop_yellow_prio_3", .offset = 0x85, },
-       { .name = "drop_yellow_prio_4", .offset = 0x86, },
-       { .name = "drop_yellow_prio_5", .offset = 0x87, },
-       { .name = "drop_yellow_prio_6", .offset = 0x88, },
-       { .name = "drop_yellow_prio_7", .offset = 0x89, },
-       { .name = "drop_green_prio_0", .offset = 0x8A, },
-       { .name = "drop_green_prio_1", .offset = 0x8B, },
-       { .name = "drop_green_prio_2", .offset = 0x8C, },
-       { .name = "drop_green_prio_3", .offset = 0x8D, },
-       { .name = "drop_green_prio_4", .offset = 0x8E, },
-       { .name = "drop_green_prio_5", .offset = 0x8F, },
-       { .name = "drop_green_prio_6", .offset = 0x90, },
-       { .name = "drop_green_prio_7", .offset = 0x91, },
-};
-
-static void ocelot_pll5_init(struct ocelot *ocelot)
-{
-       /* Configure PLL5. This will need a proper CCF driver
-        * The values are coming from the VTSS API for Ocelot
-        */
-       regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG4,
-                    HSIO_PLL5G_CFG4_IB_CTRL(0x7600) |
-                    HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8));
-       regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG0,
-                    HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) |
-                    HSIO_PLL5G_CFG0_CPU_CLK_DIV(2) |
-                    HSIO_PLL5G_CFG0_ENA_BIAS |
-                    HSIO_PLL5G_CFG0_ENA_VCO_BUF |
-                    HSIO_PLL5G_CFG0_ENA_CP1 |
-                    HSIO_PLL5G_CFG0_SELCPI(2) |
-                    HSIO_PLL5G_CFG0_LOOP_BW_RES(0xe) |
-                    HSIO_PLL5G_CFG0_SELBGV820(4) |
-                    HSIO_PLL5G_CFG0_DIV4 |
-                    HSIO_PLL5G_CFG0_ENA_CLKTREE |
-                    HSIO_PLL5G_CFG0_ENA_LANE);
-       regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG2,
-                    HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET |
-                    HSIO_PLL5G_CFG2_EN_RESET_OVERRUN |
-                    HSIO_PLL5G_CFG2_GAIN_TEST(0x8) |
-                    HSIO_PLL5G_CFG2_ENA_AMPCTRL |
-                    HSIO_PLL5G_CFG2_PWD_AMPCTRL_N |
-                    HSIO_PLL5G_CFG2_AMPC_SEL(0x10));
-}
-
-int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
-{
-       int ret;
-
-       ocelot->map = ocelot_regmap;
-       ocelot->stats_layout = ocelot_stats_layout;
-       ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout);
-       ocelot->shared_queue_sz = 224 * 1024;
-       ocelot->num_mact_rows = 1024;
-       ocelot->ops = ops;
-
-       ret = ocelot_regfields_init(ocelot, ocelot_regfields);
-       if (ret)
-               return ret;
-
-       ocelot_pll5_init(ocelot);
-
-       eth_random_addr(ocelot->base_mac);
-       ocelot->base_mac[5] &= 0xf0;
-
-       return 0;
-}
-EXPORT_SYMBOL(ocelot_chip_init);
diff --git a/drivers/net/ethernet/mscc/ocelot_tc.c b/drivers/net/ethernet/mscc/ocelot_tc.c
deleted file mode 100644 (file)
index b7baf76..0000000
+++ /dev/null
@@ -1,179 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0 OR MIT)
-/* Microsemi Ocelot Switch TC driver
- *
- * Copyright (c) 2019 Microsemi Corporation
- */
-
-#include <soc/mscc/ocelot.h>
-#include "ocelot_tc.h"
-#include "ocelot_ace.h"
-#include <net/pkt_cls.h>
-
-static int ocelot_setup_tc_cls_matchall(struct ocelot_port_private *priv,
-                                       struct tc_cls_matchall_offload *f,
-                                       bool ingress)
-{
-       struct netlink_ext_ack *extack = f->common.extack;
-       struct ocelot *ocelot = priv->port.ocelot;
-       struct ocelot_policer pol = { 0 };
-       struct flow_action_entry *action;
-       int port = priv->chip_port;
-       int err;
-
-       if (!ingress) {
-               NL_SET_ERR_MSG_MOD(extack, "Only ingress is supported");
-               return -EOPNOTSUPP;
-       }
-
-       switch (f->command) {
-       case TC_CLSMATCHALL_REPLACE:
-               if (!flow_offload_has_one_action(&f->rule->action)) {
-                       NL_SET_ERR_MSG_MOD(extack,
-                                          "Only one action is supported");
-                       return -EOPNOTSUPP;
-               }
-
-               if (priv->tc.block_shared) {
-                       NL_SET_ERR_MSG_MOD(extack,
-                                          "Rate limit is not supported on shared blocks");
-                       return -EOPNOTSUPP;
-               }
-
-               action = &f->rule->action.entries[0];
-
-               if (action->id != FLOW_ACTION_POLICE) {
-                       NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
-                       return -EOPNOTSUPP;
-               }
-
-               if (priv->tc.police_id && priv->tc.police_id != f->cookie) {
-                       NL_SET_ERR_MSG_MOD(extack,
-                                          "Only one policer per port is supported");
-                       return -EEXIST;
-               }
-
-               pol.rate = (u32)div_u64(action->police.rate_bytes_ps, 1000) * 8;
-               pol.burst = (u32)div_u64(action->police.rate_bytes_ps *
-                                        PSCHED_NS2TICKS(action->police.burst),
-                                        PSCHED_TICKS_PER_SEC);
-
-               err = ocelot_port_policer_add(ocelot, port, &pol);
-               if (err) {
-                       NL_SET_ERR_MSG_MOD(extack, "Could not add policer");
-                       return err;
-               }
-
-               priv->tc.police_id = f->cookie;
-               priv->tc.offload_cnt++;
-               return 0;
-       case TC_CLSMATCHALL_DESTROY:
-               if (priv->tc.police_id != f->cookie)
-                       return -ENOENT;
-
-               err = ocelot_port_policer_del(ocelot, port);
-               if (err) {
-                       NL_SET_ERR_MSG_MOD(extack,
-                                          "Could not delete policer");
-                       return err;
-               }
-               priv->tc.police_id = 0;
-               priv->tc.offload_cnt--;
-               return 0;
-       case TC_CLSMATCHALL_STATS: /* fall through */
-       default:
-               return -EOPNOTSUPP;
-       }
-}
-
-static int ocelot_setup_tc_block_cb(enum tc_setup_type type,
-                                   void *type_data,
-                                   void *cb_priv, bool ingress)
-{
-       struct ocelot_port_private *priv = cb_priv;
-
-       if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
-               return -EOPNOTSUPP;
-
-       switch (type) {
-       case TC_SETUP_CLSMATCHALL:
-               return ocelot_setup_tc_cls_matchall(priv, type_data, ingress);
-       case TC_SETUP_CLSFLOWER:
-               return ocelot_setup_tc_cls_flower(priv, type_data, ingress);
-       default:
-               return -EOPNOTSUPP;
-       }
-}
-
-static int ocelot_setup_tc_block_cb_ig(enum tc_setup_type type,
-                                      void *type_data,
-                                      void *cb_priv)
-{
-       return ocelot_setup_tc_block_cb(type, type_data,
-                                       cb_priv, true);
-}
-
-static int ocelot_setup_tc_block_cb_eg(enum tc_setup_type type,
-                                      void *type_data,
-                                      void *cb_priv)
-{
-       return ocelot_setup_tc_block_cb(type, type_data,
-                                       cb_priv, false);
-}
-
-static LIST_HEAD(ocelot_block_cb_list);
-
-static int ocelot_setup_tc_block(struct ocelot_port_private *priv,
-                                struct flow_block_offload *f)
-{
-       struct flow_block_cb *block_cb;
-       flow_setup_cb_t *cb;
-
-       if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
-               cb = ocelot_setup_tc_block_cb_ig;
-               priv->tc.block_shared = f->block_shared;
-       } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
-               cb = ocelot_setup_tc_block_cb_eg;
-       } else {
-               return -EOPNOTSUPP;
-       }
-
-       f->driver_block_list = &ocelot_block_cb_list;
-
-       switch (f->command) {
-       case FLOW_BLOCK_BIND:
-               if (flow_block_cb_is_busy(cb, priv, &ocelot_block_cb_list))
-                       return -EBUSY;
-
-               block_cb = flow_block_cb_alloc(cb, priv, priv, NULL);
-               if (IS_ERR(block_cb))
-                       return PTR_ERR(block_cb);
-
-               flow_block_cb_add(block_cb, f);
-               list_add_tail(&block_cb->driver_list, f->driver_block_list);
-               return 0;
-       case FLOW_BLOCK_UNBIND:
-               block_cb = flow_block_cb_lookup(f->block, cb, priv);
-               if (!block_cb)
-                       return -ENOENT;
-
-               flow_block_cb_remove(block_cb, f);
-               list_del(&block_cb->driver_list);
-               return 0;
-       default:
-               return -EOPNOTSUPP;
-       }
-}
-
-int ocelot_setup_tc(struct net_device *dev, enum tc_setup_type type,
-                   void *type_data)
-{
-       struct ocelot_port_private *priv = netdev_priv(dev);
-
-       switch (type) {
-       case TC_SETUP_BLOCK:
-               return ocelot_setup_tc_block(priv, type_data);
-       default:
-               return -EOPNOTSUPP;
-       }
-       return 0;
-}
diff --git a/drivers/net/ethernet/mscc/ocelot_tc.h b/drivers/net/ethernet/mscc/ocelot_tc.h
deleted file mode 100644 (file)
index 61757c2..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
-/* Microsemi Ocelot Switch driver
- *
- * Copyright (c) 2019 Microsemi Corporation
- */
-
-#ifndef _MSCC_OCELOT_TC_H_
-#define _MSCC_OCELOT_TC_H_
-
-#include <linux/netdevice.h>
-
-struct ocelot_port_tc {
-       bool block_shared;
-       unsigned long offload_cnt;
-
-       unsigned long police_id;
-};
-
-int ocelot_setup_tc(struct net_device *dev, enum tc_setup_type type,
-                   void *type_data);
-
-#endif /* _MSCC_OCELOT_TC_H_ */
similarity index 77%
rename from drivers/net/ethernet/mscc/ocelot_ace.c
rename to drivers/net/ethernet/mscc/ocelot_vcap.c
index dfd82a3..3ef620f 100644 (file)
@@ -8,7 +8,7 @@
 
 #include <soc/mscc/ocelot_vcap.h>
 #include "ocelot_police.h"
-#include "ocelot_ace.h"
+#include "ocelot_vcap.h"
 #include "ocelot_s2.h"
 
 #define OCELOT_POLICER_DISCARD 0x17f
@@ -119,7 +119,8 @@ static void vcap_cache2entry(struct ocelot *ocelot, struct vcap_data *data)
 static void vcap_action2cache(struct ocelot *ocelot, struct vcap_data *data)
 {
        const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
-       u32 action_words, i, width, mask;
+       u32 action_words, mask;
+       int i, width;
 
        /* Encode action type */
        width = vcap_is2->action_type_width;
@@ -141,7 +142,8 @@ static void vcap_action2cache(struct ocelot *ocelot, struct vcap_data *data)
 static void vcap_cache2action(struct ocelot *ocelot, struct vcap_data *data)
 {
        const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
-       u32 action_words, i, width;
+       u32 action_words;
+       int i, width;
 
        action_words = DIV_ROUND_UP(vcap_is2->action_width, ENTRY_WIDTH);
 
@@ -161,8 +163,8 @@ static void vcap_cache2action(struct ocelot *ocelot, struct vcap_data *data)
 static void is2_data_get(struct ocelot *ocelot, struct vcap_data *data, int ix)
 {
        const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
-       u32 i, col, offset, count, cnt, base;
-       u32 width = vcap_is2->tg_width;
+       int i, col, offset, count, cnt, base;
+       int width = vcap_is2->tg_width;
 
        count = (data->tg_sw == VCAP_TG_HALF ? 2 : 4);
        col = (ix % 2);
@@ -300,10 +302,10 @@ static void vcap_action_set(struct ocelot *ocelot, struct vcap_data *data,
 }
 
 static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data,
-                          struct ocelot_ace_rule *ace)
+                          struct ocelot_vcap_filter *filter)
 {
-       switch (ace->action) {
-       case OCELOT_ACL_ACTION_DROP:
+       switch (filter->action) {
+       case OCELOT_VCAP_ACTION_DROP:
                vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0);
                vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 1);
                vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 1);
@@ -312,7 +314,7 @@ static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data,
                vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0);
                vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 0);
                break;
-       case OCELOT_ACL_ACTION_TRAP:
+       case OCELOT_VCAP_ACTION_TRAP:
                vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0);
                vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 1);
                vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 0);
@@ -320,12 +322,12 @@ static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data,
                vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0);
                vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 1);
                break;
-       case OCELOT_ACL_ACTION_POLICE:
+       case OCELOT_VCAP_ACTION_POLICE:
                vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0);
                vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 0);
                vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 1);
                vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_IDX,
-                               ace->pol_ix);
+                               filter->pol_ix);
                vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0);
                vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 0);
                break;
@@ -333,11 +335,11 @@ static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data,
 }
 
 static void is2_entry_set(struct ocelot *ocelot, int ix,
-                         struct ocelot_ace_rule *ace)
+                         struct ocelot_vcap_filter *filter)
 {
        const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+       struct ocelot_vcap_key_vlan *tag = &filter->vlan;
        u32 val, msk, type, type_mask = 0xf, i, count;
-       struct ocelot_ace_vlan *tag = &ace->vlan;
        struct ocelot_vcap_u64 payload;
        struct vcap_data data;
        int row = (ix / 2);
@@ -353,19 +355,19 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
        data.tg_sw = VCAP_TG_HALF;
        is2_data_get(ocelot, &data, ix);
        data.tg = (data.tg & ~data.tg_mask);
-       if (ace->prio != 0)
+       if (filter->prio != 0)
                data.tg |= data.tg_value;
 
        data.type = IS2_ACTION_TYPE_NORMAL;
 
        vcap_key_set(ocelot, &data, VCAP_IS2_HK_PAG, 0, 0);
        vcap_key_set(ocelot, &data, VCAP_IS2_HK_IGR_PORT_MASK, 0,
-                    ~ace->ingress_port_mask);
+                    ~filter->ingress_port_mask);
        vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_FIRST, OCELOT_VCAP_BIT_1);
        vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_HOST_MATCH,
                         OCELOT_VCAP_BIT_ANY);
-       vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_MC, ace->dmac_mc);
-       vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_BC, ace->dmac_bc);
+       vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_MC, filter->dmac_mc);
+       vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_BC, filter->dmac_bc);
        vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_VLAN_TAGGED, tag->tagged);
        vcap_key_set(ocelot, &data, VCAP_IS2_HK_VID,
                     tag->vid.value, tag->vid.mask);
@@ -373,9 +375,9 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
                     tag->pcp.value[0], tag->pcp.mask[0]);
        vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_DEI, tag->dei);
 
-       switch (ace->type) {
-       case OCELOT_ACE_TYPE_ETYPE: {
-               struct ocelot_ace_frame_etype *etype = &ace->frame.etype;
+       switch (filter->key_type) {
+       case OCELOT_VCAP_KEY_ETYPE: {
+               struct ocelot_vcap_key_etype *etype = &filter->key.etype;
 
                type = IS2_TYPE_ETYPE;
                vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC,
@@ -396,8 +398,8 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
                                   etype->data.value, etype->data.mask);
                break;
        }
-       case OCELOT_ACE_TYPE_LLC: {
-               struct ocelot_ace_frame_llc *llc = &ace->frame.llc;
+       case OCELOT_VCAP_KEY_LLC: {
+               struct ocelot_vcap_key_llc *llc = &filter->key.llc;
 
                type = IS2_TYPE_LLC;
                vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC,
@@ -412,8 +414,8 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
                                   payload.value, payload.mask);
                break;
        }
-       case OCELOT_ACE_TYPE_SNAP: {
-               struct ocelot_ace_frame_snap *snap = &ace->frame.snap;
+       case OCELOT_VCAP_KEY_SNAP: {
+               struct ocelot_vcap_key_snap *snap = &filter->key.snap;
 
                type = IS2_TYPE_SNAP;
                vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC,
@@ -421,12 +423,12 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
                vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_SMAC,
                                   snap->smac.value, snap->smac.mask);
                vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_SNAP_L2_SNAP,
-                                  ace->frame.snap.snap.value,
-                                  ace->frame.snap.snap.mask);
+                                  filter->key.snap.snap.value,
+                                  filter->key.snap.snap.mask);
                break;
        }
-       case OCELOT_ACE_TYPE_ARP: {
-               struct ocelot_ace_frame_arp *arp = &ace->frame.arp;
+       case OCELOT_VCAP_KEY_ARP: {
+               struct ocelot_vcap_key_arp *arp = &filter->key.arp;
 
                type = IS2_TYPE_ARP;
                vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_ARP_SMAC,
@@ -467,20 +469,20 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
                             0, 0);
                break;
        }
-       case OCELOT_ACE_TYPE_IPV4:
-       case OCELOT_ACE_TYPE_IPV6: {
+       case OCELOT_VCAP_KEY_IPV4:
+       case OCELOT_VCAP_KEY_IPV6: {
                enum ocelot_vcap_bit sip_eq_dip, sport_eq_dport, seq_zero, tcp;
                enum ocelot_vcap_bit ttl, fragment, options, tcp_ack, tcp_urg;
                enum ocelot_vcap_bit tcp_fin, tcp_syn, tcp_rst, tcp_psh;
-               struct ocelot_ace_frame_ipv4 *ipv4 = NULL;
-               struct ocelot_ace_frame_ipv6 *ipv6 = NULL;
+               struct ocelot_vcap_key_ipv4 *ipv4 = NULL;
+               struct ocelot_vcap_key_ipv6 *ipv6 = NULL;
                struct ocelot_vcap_udp_tcp *sport, *dport;
                struct ocelot_vcap_ipv4 sip, dip;
                struct ocelot_vcap_u8 proto, ds;
                struct ocelot_vcap_u48 *ip_data;
 
-               if (ace->type == OCELOT_ACE_TYPE_IPV4) {
-                       ipv4 = &ace->frame.ipv4;
+               if (filter->key_type == OCELOT_VCAP_KEY_IPV4) {
+                       ipv4 = &filter->key.ipv4;
                        ttl = ipv4->ttl;
                        fragment = ipv4->fragment;
                        options = ipv4->options;
@@ -501,7 +503,7 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
                        sport_eq_dport = ipv4->sport_eq_dport;
                        seq_zero = ipv4->seq_zero;
                } else {
-                       ipv6 = &ace->frame.ipv6;
+                       ipv6 = &filter->key.ipv6;
                        ttl = ipv6->ttl;
                        fragment = OCELOT_VCAP_BIT_ANY;
                        options = OCELOT_VCAP_BIT_ANY;
@@ -605,7 +607,7 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
                }
                break;
        }
-       case OCELOT_ACE_TYPE_ANY:
+       case OCELOT_VCAP_KEY_ANY:
        default:
                type = 0;
                type_mask = 0;
@@ -621,9 +623,9 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
        }
 
        vcap_key_set(ocelot, &data, VCAP_IS2_TYPE, type, type_mask);
-       is2_action_set(ocelot, &data, ace);
+       is2_action_set(ocelot, &data, filter);
        vcap_data_set(data.counter, data.counter_offset,
-                     vcap_is2->counter_width, ace->stats.pkts);
+                     vcap_is2->counter_width, filter->stats.pkts);
 
        /* Write row */
        vcap_entry2cache(ocelot, &data);
@@ -631,7 +633,7 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
        vcap_row_cmd(ocelot, row, VCAP_CMD_WRITE, VCAP_SEL_ALL);
 }
 
-static void is2_entry_get(struct ocelot *ocelot, struct ocelot_ace_rule *rule,
+static void is2_entry_get(struct ocelot *ocelot, struct ocelot_vcap_filter *filter,
                          int ix)
 {
        const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
@@ -646,55 +648,99 @@ static void is2_entry_get(struct ocelot *ocelot, struct ocelot_ace_rule *rule,
        cnt = vcap_data_get(data.counter, data.counter_offset,
                            vcap_is2->counter_width);
 
-       rule->stats.pkts = cnt;
+       filter->stats.pkts = cnt;
 }
 
-static void ocelot_ace_rule_add(struct ocelot *ocelot,
-                               struct ocelot_acl_block *block,
-                               struct ocelot_ace_rule *rule)
+static int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix,
+                                  struct ocelot_policer *pol)
 {
-       struct ocelot_ace_rule *tmp;
+       struct qos_policer_conf pp = { 0 };
+
+       if (!pol)
+               return -EINVAL;
+
+       pp.mode = MSCC_QOS_RATE_MODE_DATA;
+       pp.pir = pol->rate;
+       pp.pbs = pol->burst;
+
+       return qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+}
+
+static void ocelot_vcap_policer_del(struct ocelot *ocelot,
+                                   struct ocelot_vcap_block *block,
+                                   u32 pol_ix)
+{
+       struct ocelot_vcap_filter *filter;
+       struct qos_policer_conf pp = {0};
+       int index = -1;
+
+       if (pol_ix < block->pol_lpr)
+               return;
+
+       list_for_each_entry(filter, &block->rules, list) {
+               index++;
+               if (filter->action == OCELOT_VCAP_ACTION_POLICE &&
+                   filter->pol_ix < pol_ix) {
+                       filter->pol_ix += 1;
+                       ocelot_vcap_policer_add(ocelot, filter->pol_ix,
+                                               &filter->pol);
+                       is2_entry_set(ocelot, index, filter);
+               }
+       }
+
+       pp.mode = MSCC_QOS_RATE_MODE_DISABLED;
+       qos_policer_conf_set(ocelot, 0, pol_ix, &pp);
+
+       block->pol_lpr++;
+}
+
+static void ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
+                                           struct ocelot_vcap_block *block,
+                                           struct ocelot_vcap_filter *filter)
+{
+       struct ocelot_vcap_filter *tmp;
        struct list_head *pos, *n;
 
-       if (rule->action == OCELOT_ACL_ACTION_POLICE) {
+       if (filter->action == OCELOT_VCAP_ACTION_POLICE) {
                block->pol_lpr--;
-               rule->pol_ix = block->pol_lpr;
-               ocelot_ace_policer_add(ocelot, rule->pol_ix, &rule->pol);
+               filter->pol_ix = block->pol_lpr;
+               ocelot_vcap_policer_add(ocelot, filter->pol_ix, &filter->pol);
        }
 
        block->count++;
 
        if (list_empty(&block->rules)) {
-               list_add(&rule->list, &block->rules);
+               list_add(&filter->list, &block->rules);
                return;
        }
 
        list_for_each_safe(pos, n, &block->rules) {
-               tmp = list_entry(pos, struct ocelot_ace_rule, list);
-               if (rule->prio < tmp->prio)
+               tmp = list_entry(pos, struct ocelot_vcap_filter, list);
+               if (filter->prio < tmp->prio)
                        break;
        }
-       list_add(&rule->list, pos->prev);
+       list_add(&filter->list, pos->prev);
 }
 
-static int ocelot_ace_rule_get_index_id(struct ocelot_acl_block *block,
-                                       struct ocelot_ace_rule *rule)
+static int ocelot_vcap_block_get_filter_index(struct ocelot_vcap_block *block,
+                                             struct ocelot_vcap_filter *filter)
 {
-       struct ocelot_ace_rule *tmp;
+       struct ocelot_vcap_filter *tmp;
        int index = -1;
 
        list_for_each_entry(tmp, &block->rules, list) {
                ++index;
-               if (rule->id == tmp->id)
+               if (filter->id == tmp->id)
                        break;
        }
        return index;
 }
 
-static struct ocelot_ace_rule*
-ocelot_ace_rule_get_rule_index(struct ocelot_acl_block *block, int index)
+static struct ocelot_vcap_filter*
+ocelot_vcap_block_find_filter(struct ocelot_vcap_block *block,
+                             int index)
 {
-       struct ocelot_ace_rule *tmp;
+       struct ocelot_vcap_filter *tmp;
        int i = 0;
 
        list_for_each_entry(tmp, &block->rules, list) {
@@ -737,15 +783,16 @@ static void ocelot_match_all_as_mac_etype(struct ocelot *ocelot, int port,
                       ANA_PORT_VCAP_S2_CFG, port);
 }
 
-static bool ocelot_ace_is_problematic_mac_etype(struct ocelot_ace_rule *ace)
+static bool
+ocelot_vcap_is_problematic_mac_etype(struct ocelot_vcap_filter *filter)
 {
        u16 proto, mask;
 
-       if (ace->type != OCELOT_ACE_TYPE_ETYPE)
+       if (filter->key_type != OCELOT_VCAP_KEY_ETYPE)
                return false;
 
-       proto = ntohs(*(u16 *)ace->frame.etype.etype.value);
-       mask = ntohs(*(u16 *)ace->frame.etype.etype.mask);
+       proto = ntohs(*(__be16 *)filter->key.etype.etype.value);
+       mask = ntohs(*(__be16 *)filter->key.etype.etype.mask);
 
        /* ETH_P_ALL match, so all protocols below are included */
        if (mask == 0)
@@ -760,49 +807,51 @@ static bool ocelot_ace_is_problematic_mac_etype(struct ocelot_ace_rule *ace)
        return false;
 }
 
-static bool ocelot_ace_is_problematic_non_mac_etype(struct ocelot_ace_rule *ace)
+static bool
+ocelot_vcap_is_problematic_non_mac_etype(struct ocelot_vcap_filter *filter)
 {
-       if (ace->type == OCELOT_ACE_TYPE_SNAP)
+       if (filter->key_type == OCELOT_VCAP_KEY_SNAP)
                return true;
-       if (ace->type == OCELOT_ACE_TYPE_ARP)
+       if (filter->key_type == OCELOT_VCAP_KEY_ARP)
                return true;
-       if (ace->type == OCELOT_ACE_TYPE_IPV4)
+       if (filter->key_type == OCELOT_VCAP_KEY_IPV4)
                return true;
-       if (ace->type == OCELOT_ACE_TYPE_IPV6)
+       if (filter->key_type == OCELOT_VCAP_KEY_IPV6)
                return true;
        return false;
 }
 
-static bool ocelot_exclusive_mac_etype_ace_rules(struct ocelot *ocelot,
-                                                struct ocelot_ace_rule *ace)
+static bool
+ocelot_exclusive_mac_etype_filter_rules(struct ocelot *ocelot,
+                                       struct ocelot_vcap_filter *filter)
 {
-       struct ocelot_acl_block *block = &ocelot->acl_block;
-       struct ocelot_ace_rule *tmp;
+       struct ocelot_vcap_block *block = &ocelot->block;
+       struct ocelot_vcap_filter *tmp;
        unsigned long port;
        int i;
 
-       if (ocelot_ace_is_problematic_mac_etype(ace)) {
+       if (ocelot_vcap_is_problematic_mac_etype(filter)) {
                /* Search for any non-MAC_ETYPE rules on the port */
                for (i = 0; i < block->count; i++) {
-                       tmp = ocelot_ace_rule_get_rule_index(block, i);
-                       if (tmp->ingress_port_mask & ace->ingress_port_mask &&
-                           ocelot_ace_is_problematic_non_mac_etype(tmp))
+                       tmp = ocelot_vcap_block_find_filter(block, i);
+                       if (tmp->ingress_port_mask & filter->ingress_port_mask &&
+                           ocelot_vcap_is_problematic_non_mac_etype(tmp))
                                return false;
                }
 
-               for_each_set_bit(port, &ace->ingress_port_mask,
+               for_each_set_bit(port, &filter->ingress_port_mask,
                                 ocelot->num_phys_ports)
                        ocelot_match_all_as_mac_etype(ocelot, port, true);
-       } else if (ocelot_ace_is_problematic_non_mac_etype(ace)) {
+       } else if (ocelot_vcap_is_problematic_non_mac_etype(filter)) {
                /* Search for any MAC_ETYPE rules on the port */
                for (i = 0; i < block->count; i++) {
-                       tmp = ocelot_ace_rule_get_rule_index(block, i);
-                       if (tmp->ingress_port_mask & ace->ingress_port_mask &&
-                           ocelot_ace_is_problematic_mac_etype(tmp))
+                       tmp = ocelot_vcap_block_find_filter(block, i);
+                       if (tmp->ingress_port_mask & filter->ingress_port_mask &&
+                           ocelot_vcap_is_problematic_mac_etype(tmp))
                                return false;
                }
 
-               for_each_set_bit(port, &ace->ingress_port_mask,
+               for_each_set_bit(port, &filter->ingress_port_mask,
                                 ocelot->num_phys_ports)
                        ocelot_match_all_as_mac_etype(ocelot, port, false);
        }
@@ -810,75 +859,51 @@ static bool ocelot_exclusive_mac_etype_ace_rules(struct ocelot *ocelot,
        return true;
 }
 
-int ocelot_ace_rule_offload_add(struct ocelot *ocelot,
-                               struct ocelot_ace_rule *rule,
-                               struct netlink_ext_ack *extack)
+int ocelot_vcap_filter_add(struct ocelot *ocelot,
+                          struct ocelot_vcap_filter *filter,
+                          struct netlink_ext_ack *extack)
 {
-       struct ocelot_acl_block *block = &ocelot->acl_block;
-       struct ocelot_ace_rule *ace;
+       struct ocelot_vcap_block *block = &ocelot->block;
        int i, index;
 
-       if (!ocelot_exclusive_mac_etype_ace_rules(ocelot, rule)) {
+       if (!ocelot_exclusive_mac_etype_filter_rules(ocelot, filter)) {
                NL_SET_ERR_MSG_MOD(extack,
                                   "Cannot mix MAC_ETYPE with non-MAC_ETYPE rules");
                return -EBUSY;
        }
 
-       /* Add rule to the linked list */
-       ocelot_ace_rule_add(ocelot, block, rule);
+       /* Add filter to the linked list */
+       ocelot_vcap_filter_add_to_block(ocelot, block, filter);
 
-       /* Get the index of the inserted rule */
-       index = ocelot_ace_rule_get_index_id(block, rule);
+       /* Get the index of the inserted filter */
+       index = ocelot_vcap_block_get_filter_index(block, filter);
 
-       /* Move down the rules to make place for the new rule */
+       /* Move down the rules to make place for the new filter */
        for (i = block->count - 1; i > index; i--) {
-               ace = ocelot_ace_rule_get_rule_index(block, i);
-               is2_entry_set(ocelot, i, ace);
-       }
-
-       /* Now insert the new rule */
-       is2_entry_set(ocelot, index, rule);
-       return 0;
-}
-
-static void ocelot_ace_police_del(struct ocelot *ocelot,
-                                 struct ocelot_acl_block *block,
-                                 u32 ix)
-{
-       struct ocelot_ace_rule *ace;
-       int index = -1;
-
-       if (ix < block->pol_lpr)
-               return;
+               struct ocelot_vcap_filter *tmp;
 
-       list_for_each_entry(ace, &block->rules, list) {
-               index++;
-               if (ace->action == OCELOT_ACL_ACTION_POLICE &&
-                   ace->pol_ix < ix) {
-                       ace->pol_ix += 1;
-                       ocelot_ace_policer_add(ocelot, ace->pol_ix,
-                                              &ace->pol);
-                       is2_entry_set(ocelot, index, ace);
-               }
+               tmp = ocelot_vcap_block_find_filter(block, i);
+               is2_entry_set(ocelot, i, tmp);
        }
 
-       ocelot_ace_policer_del(ocelot, block->pol_lpr);
-       block->pol_lpr++;
+       /* Now insert the new filter */
+       is2_entry_set(ocelot, index, filter);
+       return 0;
 }
 
-static void ocelot_ace_rule_del(struct ocelot *ocelot,
-                               struct ocelot_acl_block *block,
-                               struct ocelot_ace_rule *rule)
+static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot,
+                                           struct ocelot_vcap_block *block,
+                                           struct ocelot_vcap_filter *filter)
 {
-       struct ocelot_ace_rule *tmp;
+       struct ocelot_vcap_filter *tmp;
        struct list_head *pos, *q;
 
        list_for_each_safe(pos, q, &block->rules) {
-               tmp = list_entry(pos, struct ocelot_ace_rule, list);
-               if (tmp->id == rule->id) {
-                       if (tmp->action == OCELOT_ACL_ACTION_POLICE)
-                               ocelot_ace_police_del(ocelot, block,
-                                                     tmp->pol_ix);
+               tmp = list_entry(pos, struct ocelot_vcap_filter, list);
+               if (tmp->id == filter->id) {
+                       if (tmp->action == OCELOT_VCAP_ACTION_POLICE)
+                               ocelot_vcap_policer_del(ocelot, block,
+                                                       tmp->pol_ix);
 
                        list_del(pos);
                        kfree(tmp);
@@ -888,56 +913,57 @@ static void ocelot_ace_rule_del(struct ocelot *ocelot,
        block->count--;
 }
 
-int ocelot_ace_rule_offload_del(struct ocelot *ocelot,
-                               struct ocelot_ace_rule *rule)
+int ocelot_vcap_filter_del(struct ocelot *ocelot,
+                          struct ocelot_vcap_filter *filter)
 {
-       struct ocelot_acl_block *block = &ocelot->acl_block;
-       struct ocelot_ace_rule del_ace;
-       struct ocelot_ace_rule *ace;
+       struct ocelot_vcap_block *block = &ocelot->block;
+       struct ocelot_vcap_filter del_filter;
        int i, index;
 
-       memset(&del_ace, 0, sizeof(del_ace));
+       memset(&del_filter, 0, sizeof(del_filter));
 
-       /* Gets index of the rule */
-       index = ocelot_ace_rule_get_index_id(block, rule);
+       /* Gets index of the filter */
+       index = ocelot_vcap_block_get_filter_index(block, filter);
 
-       /* Delete rule */
-       ocelot_ace_rule_del(ocelot, block, rule);
+       /* Delete filter */
+       ocelot_vcap_block_remove_filter(ocelot, block, filter);
 
-       /* Move up all the blocks over the deleted rule */
+       /* Move up all the blocks over the deleted filter */
        for (i = index; i < block->count; i++) {
-               ace = ocelot_ace_rule_get_rule_index(block, i);
-               is2_entry_set(ocelot, i, ace);
+               struct ocelot_vcap_filter *tmp;
+
+               tmp = ocelot_vcap_block_find_filter(block, i);
+               is2_entry_set(ocelot, i, tmp);
        }
 
-       /* Now delete the last rule, because it is duplicated */
-       is2_entry_set(ocelot, block->count, &del_ace);
+       /* Now delete the last filter, because it is duplicated */
+       is2_entry_set(ocelot, block->count, &del_filter);
 
        return 0;
 }
 
-int ocelot_ace_rule_stats_update(struct ocelot *ocelot,
-                                struct ocelot_ace_rule *rule)
+int ocelot_vcap_filter_stats_update(struct ocelot *ocelot,
+                                   struct ocelot_vcap_filter *filter)
 {
-       struct ocelot_acl_block *block = &ocelot->acl_block;
-       struct ocelot_ace_rule *tmp;
+       struct ocelot_vcap_block *block = &ocelot->block;
+       struct ocelot_vcap_filter *tmp;
        int index;
 
-       index = ocelot_ace_rule_get_index_id(block, rule);
-       is2_entry_get(ocelot, rule, index);
+       index = ocelot_vcap_block_get_filter_index(block, filter);
+       is2_entry_get(ocelot, filter, index);
 
        /* After we get the result we need to clear the counters */
-       tmp = ocelot_ace_rule_get_rule_index(block, index);
+       tmp = ocelot_vcap_block_find_filter(block, index);
        tmp->stats.pkts = 0;
        is2_entry_set(ocelot, index, tmp);
 
        return 0;
 }
 
-int ocelot_ace_init(struct ocelot *ocelot)
+int ocelot_vcap_init(struct ocelot *ocelot)
 {
        const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
-       struct ocelot_acl_block *block = &ocelot->acl_block;
+       struct ocelot_vcap_block *block = &ocelot->block;
        struct vcap_data data;
 
        memset(&data, 0, sizeof(data));
@@ -968,7 +994,7 @@ int ocelot_ace_init(struct ocelot *ocelot)
 
        block->pol_lpr = OCELOT_POLICER_DISCARD - 1;
 
-       INIT_LIST_HEAD(&ocelot->acl_block.rules);
+       INIT_LIST_HEAD(&ocelot->block.rules);
 
        return 0;
 }
similarity index 75%
rename from drivers/net/ethernet/mscc/ocelot_ace.h
rename to drivers/net/ethernet/mscc/ocelot_vcap.h
index 099e177..0dfbfc0 100644 (file)
@@ -3,8 +3,8 @@
  * Copyright (c) 2019 Microsemi Corporation
  */
 
-#ifndef _MSCC_OCELOT_ACE_H_
-#define _MSCC_OCELOT_ACE_H_
+#ifndef _MSCC_OCELOT_VCAP_H_
+#define _MSCC_OCELOT_VCAP_H_
 
 #include "ocelot.h"
 #include "ocelot_police.h"
@@ -76,31 +76,31 @@ struct ocelot_vcap_udp_tcp {
        u16 mask;
 };
 
-enum ocelot_ace_type {
-       OCELOT_ACE_TYPE_ANY,
-       OCELOT_ACE_TYPE_ETYPE,
-       OCELOT_ACE_TYPE_LLC,
-       OCELOT_ACE_TYPE_SNAP,
-       OCELOT_ACE_TYPE_ARP,
-       OCELOT_ACE_TYPE_IPV4,
-       OCELOT_ACE_TYPE_IPV6
+enum ocelot_vcap_key_type {
+       OCELOT_VCAP_KEY_ANY,
+       OCELOT_VCAP_KEY_ETYPE,
+       OCELOT_VCAP_KEY_LLC,
+       OCELOT_VCAP_KEY_SNAP,
+       OCELOT_VCAP_KEY_ARP,
+       OCELOT_VCAP_KEY_IPV4,
+       OCELOT_VCAP_KEY_IPV6
 };
 
-struct ocelot_ace_vlan {
+struct ocelot_vcap_key_vlan {
        struct ocelot_vcap_vid vid;    /* VLAN ID (12 bit) */
        struct ocelot_vcap_u8  pcp;    /* PCP (3 bit) */
        enum ocelot_vcap_bit dei;    /* DEI */
        enum ocelot_vcap_bit tagged; /* Tagged/untagged frame */
 };
 
-struct ocelot_ace_frame_etype {
+struct ocelot_vcap_key_etype {
        struct ocelot_vcap_u48 dmac;
        struct ocelot_vcap_u48 smac;
        struct ocelot_vcap_u16 etype;
        struct ocelot_vcap_u16 data; /* MAC data */
 };
 
-struct ocelot_ace_frame_llc {
+struct ocelot_vcap_key_llc {
        struct ocelot_vcap_u48 dmac;
        struct ocelot_vcap_u48 smac;
 
@@ -108,7 +108,7 @@ struct ocelot_ace_frame_llc {
        struct ocelot_vcap_u32 llc;
 };
 
-struct ocelot_ace_frame_snap {
+struct ocelot_vcap_key_snap {
        struct ocelot_vcap_u48 dmac;
        struct ocelot_vcap_u48 smac;
 
@@ -116,7 +116,7 @@ struct ocelot_ace_frame_snap {
        struct ocelot_vcap_u40 snap;
 };
 
-struct ocelot_ace_frame_arp {
+struct ocelot_vcap_key_arp {
        struct ocelot_vcap_u48 smac;
        enum ocelot_vcap_bit arp;       /* Opcode ARP/RARP */
        enum ocelot_vcap_bit req;       /* Opcode request/reply */
@@ -133,7 +133,7 @@ struct ocelot_ace_frame_arp {
        struct ocelot_vcap_ipv4 dip;     /* Target IP address */
 };
 
-struct ocelot_ace_frame_ipv4 {
+struct ocelot_vcap_key_ipv4 {
        enum ocelot_vcap_bit ttl;      /* TTL zero */
        enum ocelot_vcap_bit fragment; /* Fragment */
        enum ocelot_vcap_bit options;  /* Header options */
@@ -155,7 +155,7 @@ struct ocelot_ace_frame_ipv4 {
        enum ocelot_vcap_bit seq_zero;       /* TCP sequence number is zero */
 };
 
-struct ocelot_ace_frame_ipv6 {
+struct ocelot_vcap_key_ipv6 {
        struct ocelot_vcap_u8 proto; /* IPv6 protocol */
        struct ocelot_vcap_u128 sip; /* IPv6 source (byte 0-7 ignored) */
        enum ocelot_vcap_bit ttl;  /* TTL zero */
@@ -174,58 +174,58 @@ struct ocelot_ace_frame_ipv6 {
        enum ocelot_vcap_bit seq_zero;       /* TCP sequence number is zero */
 };
 
-enum ocelot_ace_action {
-       OCELOT_ACL_ACTION_DROP,
-       OCELOT_ACL_ACTION_TRAP,
-       OCELOT_ACL_ACTION_POLICE,
+enum ocelot_vcap_action {
+       OCELOT_VCAP_ACTION_DROP,
+       OCELOT_VCAP_ACTION_TRAP,
+       OCELOT_VCAP_ACTION_POLICE,
 };
 
-struct ocelot_ace_stats {
+struct ocelot_vcap_stats {
        u64 bytes;
        u64 pkts;
        u64 used;
 };
 
-struct ocelot_ace_rule {
+struct ocelot_vcap_filter {
        struct list_head list;
 
        u16 prio;
        u32 id;
 
-       enum ocelot_ace_action action;
-       struct ocelot_ace_stats stats;
+       enum ocelot_vcap_action action;
+       struct ocelot_vcap_stats stats;
        unsigned long ingress_port_mask;
 
        enum ocelot_vcap_bit dmac_mc;
        enum ocelot_vcap_bit dmac_bc;
-       struct ocelot_ace_vlan vlan;
+       struct ocelot_vcap_key_vlan vlan;
 
-       enum ocelot_ace_type type;
+       enum ocelot_vcap_key_type key_type;
        union {
-               /* ocelot_ACE_TYPE_ANY: No specific fields */
-               struct ocelot_ace_frame_etype etype;
-               struct ocelot_ace_frame_llc llc;
-               struct ocelot_ace_frame_snap snap;
-               struct ocelot_ace_frame_arp arp;
-               struct ocelot_ace_frame_ipv4 ipv4;
-               struct ocelot_ace_frame_ipv6 ipv6;
-       } frame;
+               /* OCELOT_VCAP_KEY_ANY: No specific fields */
+               struct ocelot_vcap_key_etype etype;
+               struct ocelot_vcap_key_llc llc;
+               struct ocelot_vcap_key_snap snap;
+               struct ocelot_vcap_key_arp arp;
+               struct ocelot_vcap_key_ipv4 ipv4;
+               struct ocelot_vcap_key_ipv6 ipv6;
+       } key;
        struct ocelot_policer pol;
        u32 pol_ix;
 };
 
-int ocelot_ace_rule_offload_add(struct ocelot *ocelot,
-                               struct ocelot_ace_rule *rule,
-                               struct netlink_ext_ack *extack);
-int ocelot_ace_rule_offload_del(struct ocelot *ocelot,
-                               struct ocelot_ace_rule *rule);
-int ocelot_ace_rule_stats_update(struct ocelot *ocelot,
-                                struct ocelot_ace_rule *rule);
+int ocelot_vcap_filter_add(struct ocelot *ocelot,
+                          struct ocelot_vcap_filter *rule,
+                          struct netlink_ext_ack *extack);
+int ocelot_vcap_filter_del(struct ocelot *ocelot,
+                          struct ocelot_vcap_filter *rule);
+int ocelot_vcap_filter_stats_update(struct ocelot *ocelot,
+                                   struct ocelot_vcap_filter *rule);
 
-int ocelot_ace_init(struct ocelot *ocelot);
+int ocelot_vcap_init(struct ocelot *ocelot);
 
 int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
                               struct flow_cls_offload *f,
                               bool ingress);
 
-#endif /* _MSCC_OCELOT_ACE_H_ */
+#endif /* _MSCC_OCELOT_VCAP_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
new file mode 100644 (file)
index 0000000..65408bc
--- /dev/null
@@ -0,0 +1,1138 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_net.h>
+#include <linux/netdevice.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/mfd/syscon.h>
+#include <linux/skbuff.h>
+#include <net/switchdev.h>
+
+#include <soc/mscc/ocelot_vcap.h>
+#include <soc/mscc/ocelot_hsio.h>
+#include "ocelot.h"
+
+#define IFH_EXTRACT_BITFIELD64(x, o, w) (((x) >> (o)) & GENMASK_ULL((w) - 1, 0))
+#define VSC7514_VCAP_IS2_CNT 64
+#define VSC7514_VCAP_IS2_ENTRY_WIDTH 376
+#define VSC7514_VCAP_IS2_ACTION_WIDTH 99
+#define VSC7514_VCAP_PORT_CNT 11
+
+static const u32 ocelot_ana_regmap[] = {
+       REG(ANA_ADVLEARN,                               0x009000),
+       REG(ANA_VLANMASK,                               0x009004),
+       REG(ANA_PORT_B_DOMAIN,                          0x009008),
+       REG(ANA_ANAGEFIL,                               0x00900c),
+       REG(ANA_ANEVENTS,                               0x009010),
+       REG(ANA_STORMLIMIT_BURST,                       0x009014),
+       REG(ANA_STORMLIMIT_CFG,                         0x009018),
+       REG(ANA_ISOLATED_PORTS,                         0x009028),
+       REG(ANA_COMMUNITY_PORTS,                        0x00902c),
+       REG(ANA_AUTOAGE,                                0x009030),
+       REG(ANA_MACTOPTIONS,                            0x009034),
+       REG(ANA_LEARNDISC,                              0x009038),
+       REG(ANA_AGENCTRL,                               0x00903c),
+       REG(ANA_MIRRORPORTS,                            0x009040),
+       REG(ANA_EMIRRORPORTS,                           0x009044),
+       REG(ANA_FLOODING,                               0x009048),
+       REG(ANA_FLOODING_IPMC,                          0x00904c),
+       REG(ANA_SFLOW_CFG,                              0x009050),
+       REG(ANA_PORT_MODE,                              0x009080),
+       REG(ANA_PGID_PGID,                              0x008c00),
+       REG(ANA_TABLES_ANMOVED,                         0x008b30),
+       REG(ANA_TABLES_MACHDATA,                        0x008b34),
+       REG(ANA_TABLES_MACLDATA,                        0x008b38),
+       REG(ANA_TABLES_MACACCESS,                       0x008b3c),
+       REG(ANA_TABLES_MACTINDX,                        0x008b40),
+       REG(ANA_TABLES_VLANACCESS,                      0x008b44),
+       REG(ANA_TABLES_VLANTIDX,                        0x008b48),
+       REG(ANA_TABLES_ISDXACCESS,                      0x008b4c),
+       REG(ANA_TABLES_ISDXTIDX,                        0x008b50),
+       REG(ANA_TABLES_ENTRYLIM,                        0x008b00),
+       REG(ANA_TABLES_PTP_ID_HIGH,                     0x008b54),
+       REG(ANA_TABLES_PTP_ID_LOW,                      0x008b58),
+       REG(ANA_MSTI_STATE,                             0x008e00),
+       REG(ANA_PORT_VLAN_CFG,                          0x007000),
+       REG(ANA_PORT_DROP_CFG,                          0x007004),
+       REG(ANA_PORT_QOS_CFG,                           0x007008),
+       REG(ANA_PORT_VCAP_CFG,                          0x00700c),
+       REG(ANA_PORT_VCAP_S1_KEY_CFG,                   0x007010),
+       REG(ANA_PORT_VCAP_S2_CFG,                       0x00701c),
+       REG(ANA_PORT_PCP_DEI_MAP,                       0x007020),
+       REG(ANA_PORT_CPU_FWD_CFG,                       0x007060),
+       REG(ANA_PORT_CPU_FWD_BPDU_CFG,                  0x007064),
+       REG(ANA_PORT_CPU_FWD_GARP_CFG,                  0x007068),
+       REG(ANA_PORT_CPU_FWD_CCM_CFG,                   0x00706c),
+       REG(ANA_PORT_PORT_CFG,                          0x007070),
+       REG(ANA_PORT_POL_CFG,                           0x007074),
+       REG(ANA_PORT_PTP_CFG,                           0x007078),
+       REG(ANA_PORT_PTP_DLY1_CFG,                      0x00707c),
+       REG(ANA_OAM_UPM_LM_CNT,                         0x007c00),
+       REG(ANA_PORT_PTP_DLY2_CFG,                      0x007080),
+       REG(ANA_PFC_PFC_CFG,                            0x008800),
+       REG(ANA_PFC_PFC_TIMER,                          0x008804),
+       REG(ANA_IPT_OAM_MEP_CFG,                        0x008000),
+       REG(ANA_IPT_IPT,                                0x008004),
+       REG(ANA_PPT_PPT,                                0x008ac0),
+       REG(ANA_FID_MAP_FID_MAP,                        0x000000),
+       REG(ANA_AGGR_CFG,                               0x0090b4),
+       REG(ANA_CPUQ_CFG,                               0x0090b8),
+       REG(ANA_CPUQ_CFG2,                              0x0090bc),
+       REG(ANA_CPUQ_8021_CFG,                          0x0090c0),
+       REG(ANA_DSCP_CFG,                               0x009100),
+       REG(ANA_DSCP_REWR_CFG,                          0x009200),
+       REG(ANA_VCAP_RNG_TYPE_CFG,                      0x009240),
+       REG(ANA_VCAP_RNG_VAL_CFG,                       0x009260),
+       REG(ANA_VRAP_CFG,                               0x009280),
+       REG(ANA_VRAP_HDR_DATA,                          0x009284),
+       REG(ANA_VRAP_HDR_MASK,                          0x009288),
+       REG(ANA_DISCARD_CFG,                            0x00928c),
+       REG(ANA_FID_CFG,                                0x009290),
+       REG(ANA_POL_PIR_CFG,                            0x004000),
+       REG(ANA_POL_CIR_CFG,                            0x004004),
+       REG(ANA_POL_MODE_CFG,                           0x004008),
+       REG(ANA_POL_PIR_STATE,                          0x00400c),
+       REG(ANA_POL_CIR_STATE,                          0x004010),
+       REG(ANA_POL_STATE,                              0x004014),
+       REG(ANA_POL_FLOWC,                              0x008b80),
+       REG(ANA_POL_HYST,                               0x008bec),
+       REG(ANA_POL_MISC_CFG,                           0x008bf0),
+};
+
+static const u32 ocelot_qs_regmap[] = {
+       REG(QS_XTR_GRP_CFG,                             0x000000),
+       REG(QS_XTR_RD,                                  0x000008),
+       REG(QS_XTR_FRM_PRUNING,                         0x000010),
+       REG(QS_XTR_FLUSH,                               0x000018),
+       REG(QS_XTR_DATA_PRESENT,                        0x00001c),
+       REG(QS_XTR_CFG,                                 0x000020),
+       REG(QS_INJ_GRP_CFG,                             0x000024),
+       REG(QS_INJ_WR,                                  0x00002c),
+       REG(QS_INJ_CTRL,                                0x000034),
+       REG(QS_INJ_STATUS,                              0x00003c),
+       REG(QS_INJ_ERR,                                 0x000040),
+       REG(QS_INH_DBG,                                 0x000048),
+};
+
+static const u32 ocelot_qsys_regmap[] = {
+       REG(QSYS_PORT_MODE,                             0x011200),
+       REG(QSYS_SWITCH_PORT_MODE,                      0x011234),
+       REG(QSYS_STAT_CNT_CFG,                          0x011264),
+       REG(QSYS_EEE_CFG,                               0x011268),
+       REG(QSYS_EEE_THRES,                             0x011294),
+       REG(QSYS_IGR_NO_SHARING,                        0x011298),
+       REG(QSYS_EGR_NO_SHARING,                        0x01129c),
+       REG(QSYS_SW_STATUS,                             0x0112a0),
+       REG(QSYS_EXT_CPU_CFG,                           0x0112d0),
+       REG(QSYS_PAD_CFG,                               0x0112d4),
+       REG(QSYS_CPU_GROUP_MAP,                         0x0112d8),
+       REG(QSYS_QMAP,                                  0x0112dc),
+       REG(QSYS_ISDX_SGRP,                             0x011400),
+       REG(QSYS_TIMED_FRAME_ENTRY,                     0x014000),
+       REG(QSYS_TFRM_MISC,                             0x011310),
+       REG(QSYS_TFRM_PORT_DLY,                         0x011314),
+       REG(QSYS_TFRM_TIMER_CFG_1,                      0x011318),
+       REG(QSYS_TFRM_TIMER_CFG_2,                      0x01131c),
+       REG(QSYS_TFRM_TIMER_CFG_3,                      0x011320),
+       REG(QSYS_TFRM_TIMER_CFG_4,                      0x011324),
+       REG(QSYS_TFRM_TIMER_CFG_5,                      0x011328),
+       REG(QSYS_TFRM_TIMER_CFG_6,                      0x01132c),
+       REG(QSYS_TFRM_TIMER_CFG_7,                      0x011330),
+       REG(QSYS_TFRM_TIMER_CFG_8,                      0x011334),
+       REG(QSYS_RED_PROFILE,                           0x011338),
+       REG(QSYS_RES_QOS_MODE,                          0x011378),
+       REG(QSYS_RES_CFG,                               0x012000),
+       REG(QSYS_RES_STAT,                              0x012004),
+       REG(QSYS_EGR_DROP_MODE,                         0x01137c),
+       REG(QSYS_EQ_CTRL,                               0x011380),
+       REG(QSYS_EVENTS_CORE,                           0x011384),
+       REG(QSYS_CIR_CFG,                               0x000000),
+       REG(QSYS_EIR_CFG,                               0x000004),
+       REG(QSYS_SE_CFG,                                0x000008),
+       REG(QSYS_SE_DWRR_CFG,                           0x00000c),
+       REG(QSYS_SE_CONNECT,                            0x00003c),
+       REG(QSYS_SE_DLB_SENSE,                          0x000040),
+       REG(QSYS_CIR_STATE,                             0x000044),
+       REG(QSYS_EIR_STATE,                             0x000048),
+       REG(QSYS_SE_STATE,                              0x00004c),
+       REG(QSYS_HSCH_MISC_CFG,                         0x011388),
+};
+
+static const u32 ocelot_rew_regmap[] = {
+       REG(REW_PORT_VLAN_CFG,                          0x000000),
+       REG(REW_TAG_CFG,                                0x000004),
+       REG(REW_PORT_CFG,                               0x000008),
+       REG(REW_DSCP_CFG,                               0x00000c),
+       REG(REW_PCP_DEI_QOS_MAP_CFG,                    0x000010),
+       REG(REW_PTP_CFG,                                0x000050),
+       REG(REW_PTP_DLY1_CFG,                           0x000054),
+       REG(REW_DSCP_REMAP_DP1_CFG,                     0x000690),
+       REG(REW_DSCP_REMAP_CFG,                         0x000790),
+       REG(REW_STAT_CFG,                               0x000890),
+       REG(REW_PPT,                                    0x000680),
+};
+
+static const u32 ocelot_sys_regmap[] = {
+       REG(SYS_COUNT_RX_OCTETS,                        0x000000),
+       REG(SYS_COUNT_RX_UNICAST,                       0x000004),
+       REG(SYS_COUNT_RX_MULTICAST,                     0x000008),
+       REG(SYS_COUNT_RX_BROADCAST,                     0x00000c),
+       REG(SYS_COUNT_RX_SHORTS,                        0x000010),
+       REG(SYS_COUNT_RX_FRAGMENTS,                     0x000014),
+       REG(SYS_COUNT_RX_JABBERS,                       0x000018),
+       REG(SYS_COUNT_RX_CRC_ALIGN_ERRS,                0x00001c),
+       REG(SYS_COUNT_RX_SYM_ERRS,                      0x000020),
+       REG(SYS_COUNT_RX_64,                            0x000024),
+       REG(SYS_COUNT_RX_65_127,                        0x000028),
+       REG(SYS_COUNT_RX_128_255,                       0x00002c),
+       REG(SYS_COUNT_RX_256_1023,                      0x000030),
+       REG(SYS_COUNT_RX_1024_1526,                     0x000034),
+       REG(SYS_COUNT_RX_1527_MAX,                      0x000038),
+       REG(SYS_COUNT_RX_PAUSE,                         0x00003c),
+       REG(SYS_COUNT_RX_CONTROL,                       0x000040),
+       REG(SYS_COUNT_RX_LONGS,                         0x000044),
+       REG(SYS_COUNT_RX_CLASSIFIED_DROPS,              0x000048),
+       REG(SYS_COUNT_TX_OCTETS,                        0x000100),
+       REG(SYS_COUNT_TX_UNICAST,                       0x000104),
+       REG(SYS_COUNT_TX_MULTICAST,                     0x000108),
+       REG(SYS_COUNT_TX_BROADCAST,                     0x00010c),
+       REG(SYS_COUNT_TX_COLLISION,                     0x000110),
+       REG(SYS_COUNT_TX_DROPS,                         0x000114),
+       REG(SYS_COUNT_TX_PAUSE,                         0x000118),
+       REG(SYS_COUNT_TX_64,                            0x00011c),
+       REG(SYS_COUNT_TX_65_127,                        0x000120),
+       REG(SYS_COUNT_TX_128_511,                       0x000124),
+       REG(SYS_COUNT_TX_512_1023,                      0x000128),
+       REG(SYS_COUNT_TX_1024_1526,                     0x00012c),
+       REG(SYS_COUNT_TX_1527_MAX,                      0x000130),
+       REG(SYS_COUNT_TX_AGING,                         0x000170),
+       REG(SYS_RESET_CFG,                              0x000508),
+       REG(SYS_CMID,                                   0x00050c),
+       REG(SYS_VLAN_ETYPE_CFG,                         0x000510),
+       REG(SYS_PORT_MODE,                              0x000514),
+       REG(SYS_FRONT_PORT_MODE,                        0x000548),
+       REG(SYS_FRM_AGING,                              0x000574),
+       REG(SYS_STAT_CFG,                               0x000578),
+       REG(SYS_SW_STATUS,                              0x00057c),
+       REG(SYS_MISC_CFG,                               0x0005ac),
+       REG(SYS_REW_MAC_HIGH_CFG,                       0x0005b0),
+       REG(SYS_REW_MAC_LOW_CFG,                        0x0005dc),
+       REG(SYS_CM_ADDR,                                0x000500),
+       REG(SYS_CM_DATA,                                0x000504),
+       REG(SYS_PAUSE_CFG,                              0x000608),
+       REG(SYS_PAUSE_TOT_CFG,                          0x000638),
+       REG(SYS_ATOP,                                   0x00063c),
+       REG(SYS_ATOP_TOT_CFG,                           0x00066c),
+       REG(SYS_MAC_FC_CFG,                             0x000670),
+       REG(SYS_MMGT,                                   0x00069c),
+       REG(SYS_MMGT_FAST,                              0x0006a0),
+       REG(SYS_EVENTS_DIF,                             0x0006a4),
+       REG(SYS_EVENTS_CORE,                            0x0006b4),
+       REG(SYS_CNT,                                    0x000000),
+       REG(SYS_PTP_STATUS,                             0x0006b8),
+       REG(SYS_PTP_TXSTAMP,                            0x0006bc),
+       REG(SYS_PTP_NXT,                                0x0006c0),
+       REG(SYS_PTP_CFG,                                0x0006c4),
+};
+
+static const u32 ocelot_s2_regmap[] = {
+       REG(S2_CORE_UPDATE_CTRL,                        0x000000),
+       REG(S2_CORE_MV_CFG,                             0x000004),
+       REG(S2_CACHE_ENTRY_DAT,                         0x000008),
+       REG(S2_CACHE_MASK_DAT,                          0x000108),
+       REG(S2_CACHE_ACTION_DAT,                        0x000208),
+       REG(S2_CACHE_CNT_DAT,                           0x000308),
+       REG(S2_CACHE_TG_DAT,                            0x000388),
+};
+
+static const u32 ocelot_ptp_regmap[] = {
+       REG(PTP_PIN_CFG,                                0x000000),
+       REG(PTP_PIN_TOD_SEC_MSB,                        0x000004),
+       REG(PTP_PIN_TOD_SEC_LSB,                        0x000008),
+       REG(PTP_PIN_TOD_NSEC,                           0x00000c),
+       REG(PTP_PIN_WF_HIGH_PERIOD,                     0x000014),
+       REG(PTP_PIN_WF_LOW_PERIOD,                      0x000018),
+       REG(PTP_CFG_MISC,                               0x0000a0),
+       REG(PTP_CLK_CFG_ADJ_CFG,                        0x0000a4),
+       REG(PTP_CLK_CFG_ADJ_FREQ,                       0x0000a8),
+};
+
+static const u32 ocelot_dev_gmii_regmap[] = {
+       REG(DEV_CLOCK_CFG,                              0x0),
+       REG(DEV_PORT_MISC,                              0x4),
+       REG(DEV_EVENTS,                                 0x8),
+       REG(DEV_EEE_CFG,                                0xc),
+       REG(DEV_RX_PATH_DELAY,                          0x10),
+       REG(DEV_TX_PATH_DELAY,                          0x14),
+       REG(DEV_PTP_PREDICT_CFG,                        0x18),
+       REG(DEV_MAC_ENA_CFG,                            0x1c),
+       REG(DEV_MAC_MODE_CFG,                           0x20),
+       REG(DEV_MAC_MAXLEN_CFG,                         0x24),
+       REG(DEV_MAC_TAGS_CFG,                           0x28),
+       REG(DEV_MAC_ADV_CHK_CFG,                        0x2c),
+       REG(DEV_MAC_IFG_CFG,                            0x30),
+       REG(DEV_MAC_HDX_CFG,                            0x34),
+       REG(DEV_MAC_DBG_CFG,                            0x38),
+       REG(DEV_MAC_FC_MAC_LOW_CFG,                     0x3c),
+       REG(DEV_MAC_FC_MAC_HIGH_CFG,                    0x40),
+       REG(DEV_MAC_STICKY,                             0x44),
+       REG(PCS1G_CFG,                                  0x48),
+       REG(PCS1G_MODE_CFG,                             0x4c),
+       REG(PCS1G_SD_CFG,                               0x50),
+       REG(PCS1G_ANEG_CFG,                             0x54),
+       REG(PCS1G_ANEG_NP_CFG,                          0x58),
+       REG(PCS1G_LB_CFG,                               0x5c),
+       REG(PCS1G_DBG_CFG,                              0x60),
+       REG(PCS1G_CDET_CFG,                             0x64),
+       REG(PCS1G_ANEG_STATUS,                          0x68),
+       REG(PCS1G_ANEG_NP_STATUS,                       0x6c),
+       REG(PCS1G_LINK_STATUS,                          0x70),
+       REG(PCS1G_LINK_DOWN_CNT,                        0x74),
+       REG(PCS1G_STICKY,                               0x78),
+       REG(PCS1G_DEBUG_STATUS,                         0x7c),
+       REG(PCS1G_LPI_CFG,                              0x80),
+       REG(PCS1G_LPI_WAKE_ERROR_CNT,                   0x84),
+       REG(PCS1G_LPI_STATUS,                           0x88),
+       REG(PCS1G_TSTPAT_MODE_CFG,                      0x8c),
+       REG(PCS1G_TSTPAT_STATUS,                        0x90),
+       REG(DEV_PCS_FX100_CFG,                          0x94),
+       REG(DEV_PCS_FX100_STATUS,                       0x98),
+};
+
+static const u32 *ocelot_regmap[TARGET_MAX] = {
+       [ANA] = ocelot_ana_regmap,
+       [QS] = ocelot_qs_regmap,
+       [QSYS] = ocelot_qsys_regmap,
+       [REW] = ocelot_rew_regmap,
+       [SYS] = ocelot_sys_regmap,
+       [S2] = ocelot_s2_regmap,
+       [PTP] = ocelot_ptp_regmap,
+       [DEV_GMII] = ocelot_dev_gmii_regmap,
+};
+
+static const struct reg_field ocelot_regfields[REGFIELD_MAX] = {
+       [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 11, 11),
+       [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 10),
+       [ANA_ANEVENTS_MSTI_DROP] = REG_FIELD(ANA_ANEVENTS, 27, 27),
+       [ANA_ANEVENTS_ACLKILL] = REG_FIELD(ANA_ANEVENTS, 26, 26),
+       [ANA_ANEVENTS_ACLUSED] = REG_FIELD(ANA_ANEVENTS, 25, 25),
+       [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 24, 24),
+       [ANA_ANEVENTS_VS2TTL1] = REG_FIELD(ANA_ANEVENTS, 23, 23),
+       [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 22, 22),
+       [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 21, 21),
+       [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 20, 20),
+       [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 19, 19),
+       [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 18, 18),
+       [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 17, 17),
+       [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 16, 16),
+       [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 15, 15),
+       [ANA_ANEVENTS_DROPPED] = REG_FIELD(ANA_ANEVENTS, 14, 14),
+       [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 13, 13),
+       [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 12, 12),
+       [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 11, 11),
+       [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 10, 10),
+       [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 9, 9),
+       [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 8, 8),
+       [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 7, 7),
+       [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6),
+       [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5),
+       [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 4, 4),
+       [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 3, 3),
+       [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 2, 2),
+       [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 1, 1),
+       [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 0, 0),
+       [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 18, 18),
+       [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 10, 11),
+       [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 9),
+       [QSYS_TIMED_FRAME_ENTRY_TFRM_VLD] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 20, 20),
+       [QSYS_TIMED_FRAME_ENTRY_TFRM_FP] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 8, 19),
+       [QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 4, 7),
+       [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 1, 3),
+       [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 0, 0),
+       [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 2, 2),
+       [SYS_RESET_CFG_MEM_ENA] = REG_FIELD(SYS_RESET_CFG, 1, 1),
+       [SYS_RESET_CFG_MEM_INIT] = REG_FIELD(SYS_RESET_CFG, 0, 0),
+       /* Replicated per number of ports (12), register size 4 per port */
+       [QSYS_SWITCH_PORT_MODE_PORT_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 14, 14, 12, 4),
+       [QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 11, 13, 12, 4),
+       [QSYS_SWITCH_PORT_MODE_YEL_RSRVD] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 10, 10, 12, 4),
+       [QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 9, 9, 12, 4),
+       [QSYS_SWITCH_PORT_MODE_TX_PFC_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 1, 8, 12, 4),
+       [QSYS_SWITCH_PORT_MODE_TX_PFC_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 0, 0, 12, 4),
+       [SYS_PORT_MODE_DATA_WO_TS] = REG_FIELD_ID(SYS_PORT_MODE, 5, 6, 12, 4),
+       [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 3, 4, 12, 4),
+       [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 1, 2, 12, 4),
+       [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 12, 4),
+       [SYS_PAUSE_CFG_PAUSE_START] = REG_FIELD_ID(SYS_PAUSE_CFG, 10, 18, 12, 4),
+       [SYS_PAUSE_CFG_PAUSE_STOP] = REG_FIELD_ID(SYS_PAUSE_CFG, 1, 9, 12, 4),
+       [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 12, 4),
+};
+
+static const struct ocelot_stat_layout ocelot_stats_layout[] = {
+       { .name = "rx_octets", .offset = 0x00, },
+       { .name = "rx_unicast", .offset = 0x01, },
+       { .name = "rx_multicast", .offset = 0x02, },
+       { .name = "rx_broadcast", .offset = 0x03, },
+       { .name = "rx_shorts", .offset = 0x04, },
+       { .name = "rx_fragments", .offset = 0x05, },
+       { .name = "rx_jabbers", .offset = 0x06, },
+       { .name = "rx_crc_align_errs", .offset = 0x07, },
+       { .name = "rx_sym_errs", .offset = 0x08, },
+       { .name = "rx_frames_below_65_octets", .offset = 0x09, },
+       { .name = "rx_frames_65_to_127_octets", .offset = 0x0A, },
+       { .name = "rx_frames_128_to_255_octets", .offset = 0x0B, },
+       { .name = "rx_frames_256_to_511_octets", .offset = 0x0C, },
+       { .name = "rx_frames_512_to_1023_octets", .offset = 0x0D, },
+       { .name = "rx_frames_1024_to_1526_octets", .offset = 0x0E, },
+       { .name = "rx_frames_over_1526_octets", .offset = 0x0F, },
+       { .name = "rx_pause", .offset = 0x10, },
+       { .name = "rx_control", .offset = 0x11, },
+       { .name = "rx_longs", .offset = 0x12, },
+       { .name = "rx_classified_drops", .offset = 0x13, },
+       { .name = "rx_red_prio_0", .offset = 0x14, },
+       { .name = "rx_red_prio_1", .offset = 0x15, },
+       { .name = "rx_red_prio_2", .offset = 0x16, },
+       { .name = "rx_red_prio_3", .offset = 0x17, },
+       { .name = "rx_red_prio_4", .offset = 0x18, },
+       { .name = "rx_red_prio_5", .offset = 0x19, },
+       { .name = "rx_red_prio_6", .offset = 0x1A, },
+       { .name = "rx_red_prio_7", .offset = 0x1B, },
+       { .name = "rx_yellow_prio_0", .offset = 0x1C, },
+       { .name = "rx_yellow_prio_1", .offset = 0x1D, },
+       { .name = "rx_yellow_prio_2", .offset = 0x1E, },
+       { .name = "rx_yellow_prio_3", .offset = 0x1F, },
+       { .name = "rx_yellow_prio_4", .offset = 0x20, },
+       { .name = "rx_yellow_prio_5", .offset = 0x21, },
+       { .name = "rx_yellow_prio_6", .offset = 0x22, },
+       { .name = "rx_yellow_prio_7", .offset = 0x23, },
+       { .name = "rx_green_prio_0", .offset = 0x24, },
+       { .name = "rx_green_prio_1", .offset = 0x25, },
+       { .name = "rx_green_prio_2", .offset = 0x26, },
+       { .name = "rx_green_prio_3", .offset = 0x27, },
+       { .name = "rx_green_prio_4", .offset = 0x28, },
+       { .name = "rx_green_prio_5", .offset = 0x29, },
+       { .name = "rx_green_prio_6", .offset = 0x2A, },
+       { .name = "rx_green_prio_7", .offset = 0x2B, },
+       { .name = "tx_octets", .offset = 0x40, },
+       { .name = "tx_unicast", .offset = 0x41, },
+       { .name = "tx_multicast", .offset = 0x42, },
+       { .name = "tx_broadcast", .offset = 0x43, },
+       { .name = "tx_collision", .offset = 0x44, },
+       { .name = "tx_drops", .offset = 0x45, },
+       { .name = "tx_pause", .offset = 0x46, },
+       { .name = "tx_frames_below_65_octets", .offset = 0x47, },
+       { .name = "tx_frames_65_to_127_octets", .offset = 0x48, },
+       { .name = "tx_frames_128_255_octets", .offset = 0x49, },
+       { .name = "tx_frames_256_511_octets", .offset = 0x4A, },
+       { .name = "tx_frames_512_1023_octets", .offset = 0x4B, },
+       { .name = "tx_frames_1024_1526_octets", .offset = 0x4C, },
+       { .name = "tx_frames_over_1526_octets", .offset = 0x4D, },
+       { .name = "tx_yellow_prio_0", .offset = 0x4E, },
+       { .name = "tx_yellow_prio_1", .offset = 0x4F, },
+       { .name = "tx_yellow_prio_2", .offset = 0x50, },
+       { .name = "tx_yellow_prio_3", .offset = 0x51, },
+       { .name = "tx_yellow_prio_4", .offset = 0x52, },
+       { .name = "tx_yellow_prio_5", .offset = 0x53, },
+       { .name = "tx_yellow_prio_6", .offset = 0x54, },
+       { .name = "tx_yellow_prio_7", .offset = 0x55, },
+       { .name = "tx_green_prio_0", .offset = 0x56, },
+       { .name = "tx_green_prio_1", .offset = 0x57, },
+       { .name = "tx_green_prio_2", .offset = 0x58, },
+       { .name = "tx_green_prio_3", .offset = 0x59, },
+       { .name = "tx_green_prio_4", .offset = 0x5A, },
+       { .name = "tx_green_prio_5", .offset = 0x5B, },
+       { .name = "tx_green_prio_6", .offset = 0x5C, },
+       { .name = "tx_green_prio_7", .offset = 0x5D, },
+       { .name = "tx_aged", .offset = 0x5E, },
+       { .name = "drop_local", .offset = 0x80, },
+       { .name = "drop_tail", .offset = 0x81, },
+       { .name = "drop_yellow_prio_0", .offset = 0x82, },
+       { .name = "drop_yellow_prio_1", .offset = 0x83, },
+       { .name = "drop_yellow_prio_2", .offset = 0x84, },
+       { .name = "drop_yellow_prio_3", .offset = 0x85, },
+       { .name = "drop_yellow_prio_4", .offset = 0x86, },
+       { .name = "drop_yellow_prio_5", .offset = 0x87, },
+       { .name = "drop_yellow_prio_6", .offset = 0x88, },
+       { .name = "drop_yellow_prio_7", .offset = 0x89, },
+       { .name = "drop_green_prio_0", .offset = 0x8A, },
+       { .name = "drop_green_prio_1", .offset = 0x8B, },
+       { .name = "drop_green_prio_2", .offset = 0x8C, },
+       { .name = "drop_green_prio_3", .offset = 0x8D, },
+       { .name = "drop_green_prio_4", .offset = 0x8E, },
+       { .name = "drop_green_prio_5", .offset = 0x8F, },
+       { .name = "drop_green_prio_6", .offset = 0x90, },
+       { .name = "drop_green_prio_7", .offset = 0x91, },
+};
+
+static void ocelot_pll5_init(struct ocelot *ocelot)
+{
+       /* Configure PLL5. This will need a proper CCF driver
+        * The values are coming from the VTSS API for Ocelot
+        */
+       regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG4,
+                    HSIO_PLL5G_CFG4_IB_CTRL(0x7600) |
+                    HSIO_PLL5G_CFG4_IB_BIAS_CTRL(0x8));
+       regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG0,
+                    HSIO_PLL5G_CFG0_CORE_CLK_DIV(0x11) |
+                    HSIO_PLL5G_CFG0_CPU_CLK_DIV(2) |
+                    HSIO_PLL5G_CFG0_ENA_BIAS |
+                    HSIO_PLL5G_CFG0_ENA_VCO_BUF |
+                    HSIO_PLL5G_CFG0_ENA_CP1 |
+                    HSIO_PLL5G_CFG0_SELCPI(2) |
+                    HSIO_PLL5G_CFG0_LOOP_BW_RES(0xe) |
+                    HSIO_PLL5G_CFG0_SELBGV820(4) |
+                    HSIO_PLL5G_CFG0_DIV4 |
+                    HSIO_PLL5G_CFG0_ENA_CLKTREE |
+                    HSIO_PLL5G_CFG0_ENA_LANE);
+       regmap_write(ocelot->targets[HSIO], HSIO_PLL5G_CFG2,
+                    HSIO_PLL5G_CFG2_EN_RESET_FRQ_DET |
+                    HSIO_PLL5G_CFG2_EN_RESET_OVERRUN |
+                    HSIO_PLL5G_CFG2_GAIN_TEST(0x8) |
+                    HSIO_PLL5G_CFG2_ENA_AMPCTRL |
+                    HSIO_PLL5G_CFG2_PWD_AMPCTRL_N |
+                    HSIO_PLL5G_CFG2_AMPC_SEL(0x10));
+}
+
+static int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
+{
+       int ret;
+
+       ocelot->map = ocelot_regmap;
+       ocelot->stats_layout = ocelot_stats_layout;
+       ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout);
+       ocelot->shared_queue_sz = 224 * 1024;
+       ocelot->num_mact_rows = 1024;
+       ocelot->ops = ops;
+
+       ret = ocelot_regfields_init(ocelot, ocelot_regfields);
+       if (ret)
+               return ret;
+
+       ocelot_pll5_init(ocelot);
+
+       eth_random_addr(ocelot->base_mac);
+       ocelot->base_mac[5] &= 0xf0;
+
+       return 0;
+}
+
+static int ocelot_parse_ifh(u32 *_ifh, struct frame_info *info)
+{
+       u8 llen, wlen;
+       u64 ifh[2];
+
+       ifh[0] = be64_to_cpu(((__force __be64 *)_ifh)[0]);
+       ifh[1] = be64_to_cpu(((__force __be64 *)_ifh)[1]);
+
+       wlen = IFH_EXTRACT_BITFIELD64(ifh[0], 7,  8);
+       llen = IFH_EXTRACT_BITFIELD64(ifh[0], 15,  6);
+
+       info->len = OCELOT_BUFFER_CELL_SZ * wlen + llen - 80;
+
+       info->timestamp = IFH_EXTRACT_BITFIELD64(ifh[0], 21, 32);
+
+       info->port = IFH_EXTRACT_BITFIELD64(ifh[1], 43, 4);
+
+       info->tag_type = IFH_EXTRACT_BITFIELD64(ifh[1], 16,  1);
+       info->vid = IFH_EXTRACT_BITFIELD64(ifh[1], 0,  12);
+
+       return 0;
+}
+
+static int ocelot_rx_frame_word(struct ocelot *ocelot, u8 grp, bool ifh,
+                               u32 *rval)
+{
+       u32 val;
+       u32 bytes_valid;
+
+       val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+       if (val == XTR_NOT_READY) {
+               if (ifh)
+                       return -EIO;
+
+               do {
+                       val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+               } while (val == XTR_NOT_READY);
+       }
+
+       switch (val) {
+       case XTR_ABORT:
+               return -EIO;
+       case XTR_EOF_0:
+       case XTR_EOF_1:
+       case XTR_EOF_2:
+       case XTR_EOF_3:
+       case XTR_PRUNED:
+               bytes_valid = XTR_VALID_BYTES(val);
+               val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+               if (val == XTR_ESCAPE)
+                       *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+               else
+                       *rval = val;
+
+               return bytes_valid;
+       case XTR_ESCAPE:
+               *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+
+               return 4;
+       default:
+               *rval = val;
+
+               return 4;
+       }
+}
+
+static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
+{
+       struct ocelot *ocelot = arg;
+       int i = 0, grp = 0;
+       int err = 0;
+
+       if (!(ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)))
+               return IRQ_NONE;
+
+       do {
+               struct skb_shared_hwtstamps *shhwtstamps;
+               struct ocelot_port_private *priv;
+               struct ocelot_port *ocelot_port;
+               u64 tod_in_ns, full_ts_in_ns;
+               struct frame_info info = {};
+               struct net_device *dev;
+               u32 ifh[4], val, *buf;
+               struct timespec64 ts;
+               int sz, len, buf_len;
+               struct sk_buff *skb;
+
+               for (i = 0; i < OCELOT_TAG_LEN / 4; i++) {
+                       err = ocelot_rx_frame_word(ocelot, grp, true, &ifh[i]);
+                       if (err != 4)
+                               break;
+               }
+
+               if (err != 4)
+                       break;
+
+               /* At this point the IFH was read correctly, so it is safe to
+                * presume that there is no error. The err needs to be reset
+                * otherwise a frame could come in CPU queue between the while
+                * condition and the check for error later on. And in that case
+                * the new frame is just removed and not processed.
+                */
+               err = 0;
+
+               ocelot_parse_ifh(ifh, &info);
+
+               ocelot_port = ocelot->ports[info.port];
+               priv = container_of(ocelot_port, struct ocelot_port_private,
+                                   port);
+               dev = priv->dev;
+
+               skb = netdev_alloc_skb(dev, info.len);
+
+               if (unlikely(!skb)) {
+                       netdev_err(dev, "Unable to allocate sk_buff\n");
+                       err = -ENOMEM;
+                       break;
+               }
+               buf_len = info.len - ETH_FCS_LEN;
+               buf = (u32 *)skb_put(skb, buf_len);
+
+               len = 0;
+               do {
+                       sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
+                       *buf++ = val;
+                       len += sz;
+               } while (len < buf_len);
+
+               /* Read the FCS */
+               sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
+               /* Update the statistics if part of the FCS was read before */
+               len -= ETH_FCS_LEN - sz;
+
+               if (unlikely(dev->features & NETIF_F_RXFCS)) {
+                       buf = (u32 *)skb_put(skb, ETH_FCS_LEN);
+                       *buf = val;
+               }
+
+               if (sz < 0) {
+                       err = sz;
+                       break;
+               }
+
+               if (ocelot->ptp) {
+                       ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
+
+                       tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
+                       if ((tod_in_ns & 0xffffffff) < info.timestamp)
+                               full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) |
+                                               info.timestamp;
+                       else
+                               full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) |
+                                               info.timestamp;
+
+                       shhwtstamps = skb_hwtstamps(skb);
+                       memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+                       shhwtstamps->hwtstamp = full_ts_in_ns;
+               }
+
+               /* Everything we see on an interface that is in the HW bridge
+                * has already been forwarded.
+                */
+               if (ocelot->bridge_mask & BIT(info.port))
+                       skb->offload_fwd_mark = 1;
+
+               skb->protocol = eth_type_trans(skb, dev);
+               if (!skb_defer_rx_timestamp(skb))
+                       netif_rx(skb);
+               dev->stats.rx_bytes += len;
+               dev->stats.rx_packets++;
+       } while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp));
+
+       if (err)
+               while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp))
+                       ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t ocelot_ptp_rdy_irq_handler(int irq, void *arg)
+{
+       struct ocelot *ocelot = arg;
+
+       ocelot_get_txtstamp(ocelot);
+
+       return IRQ_HANDLED;
+}
+
+static const struct of_device_id mscc_ocelot_match[] = {
+       { .compatible = "mscc,vsc7514-switch" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, mscc_ocelot_match);
+
+static int ocelot_reset(struct ocelot *ocelot)
+{
+       int retries = 100;
+       u32 val;
+
+       regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
+       regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+
+       do {
+               msleep(1);
+               regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT],
+                                 &val);
+       } while (val && --retries);
+
+       if (!retries)
+               return -ETIMEDOUT;
+
+       regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+       regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
+
+       return 0;
+}
+
+/* Watermark encode
+ * Bit 8:   Unit; 0:1, 1:16
+ * Bit 7-0: Value to be multiplied with unit
+ */
+static u16 ocelot_wm_enc(u16 value)
+{
+       if (value >= BIT(8))
+               return BIT(8) | (value / 16);
+
+       return value;
+}
+
+static const struct ocelot_ops ocelot_ops = {
+       .reset                  = ocelot_reset,
+       .wm_enc                 = ocelot_wm_enc,
+};
+
+static const struct vcap_field vsc7514_vcap_is2_keys[] = {
+       /* Common: 46 bits */
+       [VCAP_IS2_TYPE]                         = {  0,   4},
+       [VCAP_IS2_HK_FIRST]                     = {  4,   1},
+       [VCAP_IS2_HK_PAG]                       = {  5,   8},
+       [VCAP_IS2_HK_IGR_PORT_MASK]             = { 13,  12},
+       [VCAP_IS2_HK_RSV2]                      = { 25,   1},
+       [VCAP_IS2_HK_HOST_MATCH]                = { 26,   1},
+       [VCAP_IS2_HK_L2_MC]                     = { 27,   1},
+       [VCAP_IS2_HK_L2_BC]                     = { 28,   1},
+       [VCAP_IS2_HK_VLAN_TAGGED]               = { 29,   1},
+       [VCAP_IS2_HK_VID]                       = { 30,  12},
+       [VCAP_IS2_HK_DEI]                       = { 42,   1},
+       [VCAP_IS2_HK_PCP]                       = { 43,   3},
+       /* MAC_ETYPE / MAC_LLC / MAC_SNAP / OAM common */
+       [VCAP_IS2_HK_L2_DMAC]                   = { 46,  48},
+       [VCAP_IS2_HK_L2_SMAC]                   = { 94,  48},
+       /* MAC_ETYPE (TYPE=000) */
+       [VCAP_IS2_HK_MAC_ETYPE_ETYPE]           = {142,  16},
+       [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0]     = {158,  16},
+       [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1]     = {174,   8},
+       [VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2]     = {182,   3},
+       /* MAC_LLC (TYPE=001) */
+       [VCAP_IS2_HK_MAC_LLC_L2_LLC]            = {142,  40},
+       /* MAC_SNAP (TYPE=010) */
+       [VCAP_IS2_HK_MAC_SNAP_L2_SNAP]          = {142,  40},
+       /* MAC_ARP (TYPE=011) */
+       [VCAP_IS2_HK_MAC_ARP_SMAC]              = { 46,  48},
+       [VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK]     = { 94,   1},
+       [VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK]    = { 95,   1},
+       [VCAP_IS2_HK_MAC_ARP_LEN_OK]            = { 96,   1},
+       [VCAP_IS2_HK_MAC_ARP_TARGET_MATCH]      = { 97,   1},
+       [VCAP_IS2_HK_MAC_ARP_SENDER_MATCH]      = { 98,   1},
+       [VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN]    = { 99,   1},
+       [VCAP_IS2_HK_MAC_ARP_OPCODE]            = {100,   2},
+       [VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP]        = {102,  32},
+       [VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP]        = {134,  32},
+       [VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP]        = {166,   1},
+       /* IP4_TCP_UDP / IP4_OTHER common */
+       [VCAP_IS2_HK_IP4]                       = { 46,   1},
+       [VCAP_IS2_HK_L3_FRAGMENT]               = { 47,   1},
+       [VCAP_IS2_HK_L3_FRAG_OFS_GT0]           = { 48,   1},
+       [VCAP_IS2_HK_L3_OPTIONS]                = { 49,   1},
+       [VCAP_IS2_HK_IP4_L3_TTL_GT0]            = { 50,   1},
+       [VCAP_IS2_HK_L3_TOS]                    = { 51,   8},
+       [VCAP_IS2_HK_L3_IP4_DIP]                = { 59,  32},
+       [VCAP_IS2_HK_L3_IP4_SIP]                = { 91,  32},
+       [VCAP_IS2_HK_DIP_EQ_SIP]                = {123,   1},
+       /* IP4_TCP_UDP (TYPE=100) */
+       [VCAP_IS2_HK_TCP]                       = {124,   1},
+       [VCAP_IS2_HK_L4_SPORT]                  = {125,  16},
+       [VCAP_IS2_HK_L4_DPORT]                  = {141,  16},
+       [VCAP_IS2_HK_L4_RNG]                    = {157,   8},
+       [VCAP_IS2_HK_L4_SPORT_EQ_DPORT]         = {165,   1},
+       [VCAP_IS2_HK_L4_SEQUENCE_EQ0]           = {166,   1},
+       [VCAP_IS2_HK_L4_URG]                    = {167,   1},
+       [VCAP_IS2_HK_L4_ACK]                    = {168,   1},
+       [VCAP_IS2_HK_L4_PSH]                    = {169,   1},
+       [VCAP_IS2_HK_L4_RST]                    = {170,   1},
+       [VCAP_IS2_HK_L4_SYN]                    = {171,   1},
+       [VCAP_IS2_HK_L4_FIN]                    = {172,   1},
+       [VCAP_IS2_HK_L4_1588_DOM]               = {173,   8},
+       [VCAP_IS2_HK_L4_1588_VER]               = {181,   4},
+       /* IP4_OTHER (TYPE=101) */
+       [VCAP_IS2_HK_IP4_L3_PROTO]              = {124,   8},
+       [VCAP_IS2_HK_L3_PAYLOAD]                = {132,  56},
+       /* IP6_STD (TYPE=110) */
+       [VCAP_IS2_HK_IP6_L3_TTL_GT0]            = { 46,   1},
+       [VCAP_IS2_HK_L3_IP6_SIP]                = { 47, 128},
+       [VCAP_IS2_HK_IP6_L3_PROTO]              = {175,   8},
+       /* OAM (TYPE=111) */
+       [VCAP_IS2_HK_OAM_MEL_FLAGS]             = {142,   7},
+       [VCAP_IS2_HK_OAM_VER]                   = {149,   5},
+       [VCAP_IS2_HK_OAM_OPCODE]                = {154,   8},
+       [VCAP_IS2_HK_OAM_FLAGS]                 = {162,   8},
+       [VCAP_IS2_HK_OAM_MEPID]                 = {170,  16},
+       [VCAP_IS2_HK_OAM_CCM_CNTS_EQ0]          = {186,   1},
+       [VCAP_IS2_HK_OAM_IS_Y1731]              = {187,   1},
+};
+
+static const struct vcap_field vsc7514_vcap_is2_actions[] = {
+       [VCAP_IS2_ACT_HIT_ME_ONCE]              = {  0,  1},
+       [VCAP_IS2_ACT_CPU_COPY_ENA]             = {  1,  1},
+       [VCAP_IS2_ACT_CPU_QU_NUM]               = {  2,  3},
+       [VCAP_IS2_ACT_MASK_MODE]                = {  5,  2},
+       [VCAP_IS2_ACT_MIRROR_ENA]               = {  7,  1},
+       [VCAP_IS2_ACT_LRN_DIS]                  = {  8,  1},
+       [VCAP_IS2_ACT_POLICE_ENA]               = {  9,  1},
+       [VCAP_IS2_ACT_POLICE_IDX]               = { 10,  9},
+       [VCAP_IS2_ACT_POLICE_VCAP_ONLY]         = { 19,  1},
+       [VCAP_IS2_ACT_PORT_MASK]                = { 20, 11},
+       [VCAP_IS2_ACT_REW_OP]                   = { 31,  9},
+       [VCAP_IS2_ACT_SMAC_REPLACE_ENA]         = { 40,  1},
+       [VCAP_IS2_ACT_RSV]                      = { 41,  2},
+       [VCAP_IS2_ACT_ACL_ID]                   = { 43,  6},
+       [VCAP_IS2_ACT_HIT_CNT]                  = { 49, 32},
+};
+
+static const struct vcap_props vsc7514_vcap_props[] = {
+       [VCAP_IS2] = {
+               .tg_width = 2,
+               .sw_count = 4,
+               .entry_count = VSC7514_VCAP_IS2_CNT,
+               .entry_width = VSC7514_VCAP_IS2_ENTRY_WIDTH,
+               .action_count = VSC7514_VCAP_IS2_CNT +
+                               VSC7514_VCAP_PORT_CNT + 2,
+               .action_width = 99,
+               .action_type_width = 1,
+               .action_table = {
+                       [IS2_ACTION_TYPE_NORMAL] = {
+                               .width = 49,
+                               .count = 2
+                       },
+                       [IS2_ACTION_TYPE_SMAC_SIP] = {
+                               .width = 6,
+                               .count = 4
+                       },
+               },
+               .counter_words = 4,
+               .counter_width = 32,
+       },
+};
+
+static struct ptp_clock_info ocelot_ptp_clock_info = {
+       .owner          = THIS_MODULE,
+       .name           = "ocelot ptp",
+       .max_adj        = 0x7fffffff,
+       .n_alarm        = 0,
+       .n_ext_ts       = 0,
+       .n_per_out      = OCELOT_PTP_PINS_NUM,
+       .n_pins         = OCELOT_PTP_PINS_NUM,
+       .pps            = 0,
+       .gettime64      = ocelot_ptp_gettime64,
+       .settime64      = ocelot_ptp_settime64,
+       .adjtime        = ocelot_ptp_adjtime,
+       .adjfine        = ocelot_ptp_adjfine,
+       .verify         = ocelot_ptp_verify,
+       .enable         = ocelot_ptp_enable,
+};
+
+static int mscc_ocelot_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct device_node *ports, *portnp;
+       int err, irq_xtr, irq_ptp_rdy;
+       struct ocelot *ocelot;
+       struct regmap *hsio;
+       unsigned int i;
+
+       struct {
+               enum ocelot_target id;
+               char *name;
+               u8 optional:1;
+       } io_target[] = {
+               { SYS, "sys" },
+               { REW, "rew" },
+               { QSYS, "qsys" },
+               { ANA, "ana" },
+               { QS, "qs" },
+               { S2, "s2" },
+               { PTP, "ptp", 1 },
+       };
+
+       if (!np && !pdev->dev.platform_data)
+               return -ENODEV;
+
+       ocelot = devm_kzalloc(&pdev->dev, sizeof(*ocelot), GFP_KERNEL);
+       if (!ocelot)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, ocelot);
+       ocelot->dev = &pdev->dev;
+
+       for (i = 0; i < ARRAY_SIZE(io_target); i++) {
+               struct regmap *target;
+               struct resource *res;
+
+               res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+                                                  io_target[i].name);
+
+               target = ocelot_regmap_init(ocelot, res);
+               if (IS_ERR(target)) {
+                       if (io_target[i].optional) {
+                               ocelot->targets[io_target[i].id] = NULL;
+                               continue;
+                       }
+                       return PTR_ERR(target);
+               }
+
+               ocelot->targets[io_target[i].id] = target;
+       }
+
+       hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio");
+       if (IS_ERR(hsio)) {
+               dev_err(&pdev->dev, "missing hsio syscon\n");
+               return PTR_ERR(hsio);
+       }
+
+       ocelot->targets[HSIO] = hsio;
+
+       err = ocelot_chip_init(ocelot, &ocelot_ops);
+       if (err)
+               return err;
+
+       irq_xtr = platform_get_irq_byname(pdev, "xtr");
+       if (irq_xtr < 0)
+               return -ENODEV;
+
+       err = devm_request_threaded_irq(&pdev->dev, irq_xtr, NULL,
+                                       ocelot_xtr_irq_handler, IRQF_ONESHOT,
+                                       "frame extraction", ocelot);
+       if (err)
+               return err;
+
+       irq_ptp_rdy = platform_get_irq_byname(pdev, "ptp_rdy");
+       if (irq_ptp_rdy > 0 && ocelot->targets[PTP]) {
+               err = devm_request_threaded_irq(&pdev->dev, irq_ptp_rdy, NULL,
+                                               ocelot_ptp_rdy_irq_handler,
+                                               IRQF_ONESHOT, "ptp ready",
+                                               ocelot);
+               if (err)
+                       return err;
+
+               /* Both the PTP interrupt and the PTP bank are available */
+               ocelot->ptp = 1;
+       }
+
+       ports = of_get_child_by_name(np, "ethernet-ports");
+       if (!ports) {
+               dev_err(&pdev->dev, "no ethernet-ports child node found\n");
+               return -ENODEV;
+       }
+
+       ocelot->num_phys_ports = of_get_child_count(ports);
+
+       ocelot->ports = devm_kcalloc(&pdev->dev, ocelot->num_phys_ports,
+                                    sizeof(struct ocelot_port *), GFP_KERNEL);
+
+       ocelot->vcap_is2_keys = vsc7514_vcap_is2_keys;
+       ocelot->vcap_is2_actions = vsc7514_vcap_is2_actions;
+       ocelot->vcap = vsc7514_vcap_props;
+
+       ocelot_init(ocelot);
+       if (ocelot->ptp) {
+               err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info);
+               if (err) {
+                       dev_err(ocelot->dev,
+                               "Timestamp initialization failed\n");
+                       ocelot->ptp = 0;
+               }
+       }
+
+       /* No NPI port */
+       ocelot_configure_cpu(ocelot, -1, OCELOT_TAG_PREFIX_NONE,
+                            OCELOT_TAG_PREFIX_NONE);
+
+       for_each_available_child_of_node(ports, portnp) {
+               struct ocelot_port_private *priv;
+               struct ocelot_port *ocelot_port;
+               struct device_node *phy_node;
+               phy_interface_t phy_mode;
+               struct phy_device *phy;
+               struct regmap *target;
+               struct resource *res;
+               struct phy *serdes;
+               char res_name[8];
+               u32 port;
+
+               if (of_property_read_u32(portnp, "reg", &port))
+                       continue;
+
+               snprintf(res_name, sizeof(res_name), "port%d", port);
+
+               res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+                                                  res_name);
+               target = ocelot_regmap_init(ocelot, res);
+               if (IS_ERR(target))
+                       continue;
+
+               phy_node = of_parse_phandle(portnp, "phy-handle", 0);
+               if (!phy_node)
+                       continue;
+
+               phy = of_phy_find_device(phy_node);
+               of_node_put(phy_node);
+               if (!phy)
+                       continue;
+
+               err = ocelot_probe_port(ocelot, port, target, phy);
+               if (err) {
+                       of_node_put(portnp);
+                       goto out_put_ports;
+               }
+
+               ocelot_port = ocelot->ports[port];
+               priv = container_of(ocelot_port, struct ocelot_port_private,
+                                   port);
+
+               of_get_phy_mode(portnp, &phy_mode);
+
+               ocelot_port->phy_mode = phy_mode;
+
+               switch (ocelot_port->phy_mode) {
+               case PHY_INTERFACE_MODE_NA:
+                       continue;
+               case PHY_INTERFACE_MODE_SGMII:
+                       break;
+               case PHY_INTERFACE_MODE_QSGMII:
+                       /* Ensure clock signals and speed is set on all
+                        * QSGMII links
+                        */
+                       ocelot_port_writel(ocelot_port,
+                                          DEV_CLOCK_CFG_LINK_SPEED
+                                          (OCELOT_SPEED_1000),
+                                          DEV_CLOCK_CFG);
+                       break;
+               default:
+                       dev_err(ocelot->dev,
+                               "invalid phy mode for port%d, (Q)SGMII only\n",
+                               port);
+                       of_node_put(portnp);
+                       err = -EINVAL;
+                       goto out_put_ports;
+               }
+
+               serdes = devm_of_phy_get(ocelot->dev, portnp, NULL);
+               if (IS_ERR(serdes)) {
+                       err = PTR_ERR(serdes);
+                       if (err == -EPROBE_DEFER)
+                               dev_dbg(ocelot->dev, "deferring probe\n");
+                       else
+                               dev_err(ocelot->dev,
+                                       "missing SerDes phys for port%d\n",
+                                       port);
+
+                       of_node_put(portnp);
+                       goto out_put_ports;
+               }
+
+               priv->serdes = serdes;
+       }
+
+       register_netdevice_notifier(&ocelot_netdevice_nb);
+       register_switchdev_notifier(&ocelot_switchdev_nb);
+       register_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
+
+       dev_info(&pdev->dev, "Ocelot switch probed\n");
+
+out_put_ports:
+       of_node_put(ports);
+       return err;
+}
+
+static int mscc_ocelot_remove(struct platform_device *pdev)
+{
+       struct ocelot *ocelot = platform_get_drvdata(pdev);
+
+       ocelot_deinit_timestamp(ocelot);
+       ocelot_deinit(ocelot);
+       unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
+       unregister_switchdev_notifier(&ocelot_switchdev_nb);
+       unregister_netdevice_notifier(&ocelot_netdevice_nb);
+
+       return 0;
+}
+
+static struct platform_driver mscc_ocelot_driver = {
+       .probe = mscc_ocelot_probe,
+       .remove = mscc_ocelot_remove,
+       .driver = {
+               .name = "ocelot-switch",
+               .of_match_table = mscc_ocelot_match,
+       },
+};
+
+module_platform_driver(mscc_ocelot_driver);
+
+MODULE_DESCRIPTION("Microsemi Ocelot switch driver");
+MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@bootlin.com>");
+MODULE_LICENSE("Dual MIT/GPL");
index e1e1f4e..4a5beaf 100644 (file)
@@ -3257,13 +3257,12 @@ static void myri10ge_mask_surprise_down(struct pci_dev *pdev)
        }
 }
 
-#ifdef CONFIG_PM
-static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused myri10ge_suspend(struct device *dev)
 {
        struct myri10ge_priv *mgp;
        struct net_device *netdev;
 
-       mgp = pci_get_drvdata(pdev);
+       mgp = dev_get_drvdata(dev);
        if (mgp == NULL)
                return -EINVAL;
        netdev = mgp->dev;
@@ -3276,14 +3275,13 @@ static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state)
                rtnl_unlock();
        }
        myri10ge_dummy_rdma(mgp, 0);
-       pci_save_state(pdev);
-       pci_disable_device(pdev);
 
-       return pci_set_power_state(pdev, pci_choose_state(pdev, state));
+       return 0;
 }
 
-static int myri10ge_resume(struct pci_dev *pdev)
+static int __maybe_unused myri10ge_resume(struct device *dev)
 {
+       struct pci_dev *pdev = to_pci_dev(dev);
        struct myri10ge_priv *mgp;
        struct net_device *netdev;
        int status;
@@ -3293,7 +3291,6 @@ static int myri10ge_resume(struct pci_dev *pdev)
        if (mgp == NULL)
                return -EINVAL;
        netdev = mgp->dev;
-       pci_set_power_state(pdev, PCI_D0);      /* zeros conf space as a side effect */
        msleep(5);              /* give card time to respond */
        pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
        if (vendor == 0xffff) {
@@ -3301,23 +3298,9 @@ static int myri10ge_resume(struct pci_dev *pdev)
                return -EIO;
        }
 
-       pci_restore_state(pdev);
-
-       status = pci_enable_device(pdev);
-       if (status) {
-               dev_err(&pdev->dev, "failed to enable device\n");
-               return status;
-       }
-
-       pci_set_master(pdev);
-
        myri10ge_reset(mgp);
        myri10ge_dummy_rdma(mgp, 1);
 
-       /* Save configuration space to be restored if the
-        * nic resets due to a parity error */
-       pci_save_state(pdev);
-
        if (netif_running(netdev)) {
                rtnl_lock();
                status = myri10ge_open(netdev);
@@ -3331,11 +3314,8 @@ static int myri10ge_resume(struct pci_dev *pdev)
        return 0;
 
 abort_with_enabled:
-       pci_disable_device(pdev);
        return -EIO;
-
 }
-#endif                         /* CONFIG_PM */
 
 static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp)
 {
@@ -4017,15 +3997,14 @@ static const struct pci_device_id myri10ge_pci_tbl[] = {
 
 MODULE_DEVICE_TABLE(pci, myri10ge_pci_tbl);
 
+static SIMPLE_DEV_PM_OPS(myri10ge_pm_ops, myri10ge_suspend, myri10ge_resume);
+
 static struct pci_driver myri10ge_driver = {
        .name = "myri10ge",
        .probe = myri10ge_probe,
        .remove = myri10ge_remove,
        .id_table = myri10ge_pci_tbl,
-#ifdef CONFIG_PM
-       .suspend = myri10ge_suspend,
-       .resume = myri10ge_resume,
-#endif
+       .driver.pm = &myri10ge_pm_ops,
 };
 
 #ifdef CONFIG_MYRI10GE_DCA
index d21d706..c2867fe 100644 (file)
@@ -3247,8 +3247,6 @@ static void natsemi_remove1(struct pci_dev *pdev)
        free_netdev (dev);
 }
 
-#ifdef CONFIG_PM
-
 /*
  * The ns83815 chip doesn't have explicit RxStop bits.
  * Kicking the Rx or Tx process for a new packet reenables the Rx process
@@ -3275,9 +3273,9 @@ static void natsemi_remove1(struct pci_dev *pdev)
  * Interrupts must be disabled, otherwise hands_off can cause irq storms.
  */
 
-static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused natsemi_suspend(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata (pdev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
        struct netdev_private *np = netdev_priv(dev);
        void __iomem * ioaddr = ns_ioaddr(dev);
 
@@ -3326,11 +3324,10 @@ static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state)
 }
 
 
-static int natsemi_resume (struct pci_dev *pdev)
+static int __maybe_unused natsemi_resume(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata (pdev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
        struct netdev_private *np = netdev_priv(dev);
-       int ret = 0;
 
        rtnl_lock();
        if (netif_device_present(dev))
@@ -3339,12 +3336,6 @@ static int natsemi_resume (struct pci_dev *pdev)
                const int irq = np->pci_dev->irq;
 
                BUG_ON(!np->hands_off);
-               ret = pci_enable_device(pdev);
-               if (ret < 0) {
-                       dev_err(&pdev->dev,
-                               "pci_enable_device() failed: %d\n", ret);
-                       goto out;
-               }
        /*      pci_power_on(pdev); */
 
                napi_enable(&np->napi);
@@ -3364,20 +3355,17 @@ static int natsemi_resume (struct pci_dev *pdev)
        netif_device_attach(dev);
 out:
        rtnl_unlock();
-       return ret;
+       return 0;
 }
 
-#endif /* CONFIG_PM */
+static SIMPLE_DEV_PM_OPS(natsemi_pm_ops, natsemi_suspend, natsemi_resume);
 
 static struct pci_driver natsemi_driver = {
        .name           = DRV_NAME,
        .id_table       = natsemi_pci_tbl,
        .probe          = natsemi_probe1,
        .remove         = natsemi_remove1,
-#ifdef CONFIG_PM
-       .suspend        = natsemi_suspend,
-       .resume         = natsemi_resume,
-#endif
+       .driver.pm      = &natsemi_pm_ops,
 };
 
 static int __init natsemi_init_mod (void)
index 5484f18..0c0d127 100644 (file)
@@ -27,7 +27,7 @@ config S2IO
          on its age.
 
          More specific information on configuring the driver is in
-         <file:Documentation/networking/device_drivers/neterion/s2io.rst>.
+         <file:Documentation/networking/device_drivers/ethernet/neterion/s2io.rst>.
 
          To compile this driver as a module, choose M here. The module
          will be called s2io.
@@ -42,7 +42,7 @@ config VXGE
          labeled as either one, depending on its age.
 
          More specific information on configuring the driver is in
-         <file:Documentation/networking/device_drivers/neterion/vxge.rst>.
+         <file:Documentation/networking/device_drivers/ethernet/neterion/vxge.rst>.
 
          To compile this driver as a module, choose M here. The module
          will be called vxge.
index 67e6260..a0980f8 100644 (file)
@@ -640,11 +640,11 @@ static int init_shared_mem(struct s2io_nic *nic)
                        int k = 0;
                        dma_addr_t tmp_p;
                        void *tmp_v;
-                       tmp_v = pci_alloc_consistent(nic->pdev,
-                                                    PAGE_SIZE, &tmp_p);
+                       tmp_v = dma_alloc_coherent(&nic->pdev->dev, PAGE_SIZE,
+                                                  &tmp_p, GFP_KERNEL);
                        if (!tmp_v) {
                                DBG_PRINT(INFO_DBG,
-                                         "pci_alloc_consistent failed for TxDL\n");
+                                         "dma_alloc_coherent failed for TxDL\n");
                                return -ENOMEM;
                        }
                        /* If we got a zero DMA address(can happen on
@@ -658,11 +658,12 @@ static int init_shared_mem(struct s2io_nic *nic)
                                          "%s: Zero DMA address for TxDL. "
                                          "Virtual address %p\n",
                                          dev->name, tmp_v);
-                               tmp_v = pci_alloc_consistent(nic->pdev,
-                                                            PAGE_SIZE, &tmp_p);
+                               tmp_v = dma_alloc_coherent(&nic->pdev->dev,
+                                                          PAGE_SIZE, &tmp_p,
+                                                          GFP_KERNEL);
                                if (!tmp_v) {
                                        DBG_PRINT(INFO_DBG,
-                                                 "pci_alloc_consistent failed for TxDL\n");
+                                                 "dma_alloc_coherent failed for TxDL\n");
                                        return -ENOMEM;
                                }
                                mem_allocated += PAGE_SIZE;
@@ -734,8 +735,8 @@ static int init_shared_mem(struct s2io_nic *nic)
 
                        rx_blocks = &ring->rx_blocks[j];
                        size = SIZE_OF_BLOCK;   /* size is always page size */
-                       tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
-                                                         &tmp_p_addr);
+                       tmp_v_addr = dma_alloc_coherent(&nic->pdev->dev, size,
+                                                       &tmp_p_addr, GFP_KERNEL);
                        if (tmp_v_addr == NULL) {
                                /*
                                 * In case of failure, free_shared_mem()
@@ -835,8 +836,8 @@ static int init_shared_mem(struct s2io_nic *nic)
        /* Allocation and initialization of Statistics block */
        size = sizeof(struct stat_block);
        mac_control->stats_mem =
-               pci_alloc_consistent(nic->pdev, size,
-                                    &mac_control->stats_mem_phy);
+               dma_alloc_coherent(&nic->pdev->dev, size,
+                                  &mac_control->stats_mem_phy, GFP_KERNEL);
 
        if (!mac_control->stats_mem) {
                /*
@@ -906,18 +907,18 @@ static void free_shared_mem(struct s2io_nic *nic)
                        fli = &fifo->list_info[mem_blks];
                        if (!fli->list_virt_addr)
                                break;
-                       pci_free_consistent(nic->pdev, PAGE_SIZE,
-                                           fli->list_virt_addr,
-                                           fli->list_phy_addr);
+                       dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
+                                         fli->list_virt_addr,
+                                         fli->list_phy_addr);
                        swstats->mem_freed += PAGE_SIZE;
                }
                /* If we got a zero DMA address during allocation,
                 * free the page now
                 */
                if (mac_control->zerodma_virt_addr) {
-                       pci_free_consistent(nic->pdev, PAGE_SIZE,
-                                           mac_control->zerodma_virt_addr,
-                                           (dma_addr_t)0);
+                       dma_free_coherent(&nic->pdev->dev, PAGE_SIZE,
+                                         mac_control->zerodma_virt_addr,
+                                         (dma_addr_t)0);
                        DBG_PRINT(INIT_DBG,
                                  "%s: Freeing TxDL with zero DMA address. "
                                  "Virtual address %p\n",
@@ -939,8 +940,8 @@ static void free_shared_mem(struct s2io_nic *nic)
                        tmp_p_addr = ring->rx_blocks[j].block_dma_addr;
                        if (tmp_v_addr == NULL)
                                break;
-                       pci_free_consistent(nic->pdev, size,
-                                           tmp_v_addr, tmp_p_addr);
+                       dma_free_coherent(&nic->pdev->dev, size, tmp_v_addr,
+                                         tmp_p_addr);
                        swstats->mem_freed += size;
                        kfree(ring->rx_blocks[j].rxds);
                        swstats->mem_freed += sizeof(struct rxd_info) *
@@ -993,10 +994,9 @@ static void free_shared_mem(struct s2io_nic *nic)
 
        if (mac_control->stats_mem) {
                swstats->mem_freed += mac_control->stats_mem_sz;
-               pci_free_consistent(nic->pdev,
-                                   mac_control->stats_mem_sz,
-                                   mac_control->stats_mem,
-                                   mac_control->stats_mem_phy);
+               dma_free_coherent(&nic->pdev->dev, mac_control->stats_mem_sz,
+                                 mac_control->stats_mem,
+                                 mac_control->stats_mem_phy);
        }
 }
 
@@ -2316,8 +2316,9 @@ static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
 
        txds = txdlp;
        if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
-               pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
-                                sizeof(u64), PCI_DMA_TODEVICE);
+               dma_unmap_single(&nic->pdev->dev,
+                                (dma_addr_t)txds->Buffer_Pointer,
+                                sizeof(u64), DMA_TO_DEVICE);
                txds++;
        }
 
@@ -2326,8 +2327,8 @@ static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
                memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
                return NULL;
        }
-       pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
-                        skb_headlen(skb), PCI_DMA_TODEVICE);
+       dma_unmap_single(&nic->pdev->dev, (dma_addr_t)txds->Buffer_Pointer,
+                        skb_headlen(skb), DMA_TO_DEVICE);
        frg_cnt = skb_shinfo(skb)->nr_frags;
        if (frg_cnt) {
                txds++;
@@ -2335,9 +2336,9 @@ static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
                        const skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
                        if (!txds->Buffer_Pointer)
                                break;
-                       pci_unmap_page(nic->pdev,
+                       dma_unmap_page(&nic->pdev->dev,
                                       (dma_addr_t)txds->Buffer_Pointer,
-                                      skb_frag_size(frag), PCI_DMA_TODEVICE);
+                                      skb_frag_size(frag), DMA_TO_DEVICE);
                }
        }
        memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
@@ -2521,11 +2522,10 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
                        memset(rxdp, 0, sizeof(struct RxD1));
                        skb_reserve(skb, NET_IP_ALIGN);
                        rxdp1->Buffer0_ptr =
-                               pci_map_single(ring->pdev, skb->data,
+                               dma_map_single(&ring->pdev->dev, skb->data,
                                               size - NET_IP_ALIGN,
-                                              PCI_DMA_FROMDEVICE);
-                       if (pci_dma_mapping_error(nic->pdev,
-                                                 rxdp1->Buffer0_ptr))
+                                              DMA_FROM_DEVICE);
+                       if (dma_mapping_error(&nic->pdev->dev, rxdp1->Buffer0_ptr))
                                goto pci_map_failed;
 
                        rxdp->Control_2 =
@@ -2557,17 +2557,16 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
 
                        if (from_card_up) {
                                rxdp3->Buffer0_ptr =
-                                       pci_map_single(ring->pdev, ba->ba_0,
-                                                      BUF0_LEN,
-                                                      PCI_DMA_FROMDEVICE);
-                               if (pci_dma_mapping_error(nic->pdev,
-                                                         rxdp3->Buffer0_ptr))
+                                       dma_map_single(&ring->pdev->dev,
+                                                      ba->ba_0, BUF0_LEN,
+                                                      DMA_FROM_DEVICE);
+                               if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer0_ptr))
                                        goto pci_map_failed;
                        } else
-                               pci_dma_sync_single_for_device(ring->pdev,
-                                                              (dma_addr_t)rxdp3->Buffer0_ptr,
-                                                              BUF0_LEN,
-                                                              PCI_DMA_FROMDEVICE);
+                               dma_sync_single_for_device(&ring->pdev->dev,
+                                                          (dma_addr_t)rxdp3->Buffer0_ptr,
+                                                          BUF0_LEN,
+                                                          DMA_FROM_DEVICE);
 
                        rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
                        if (ring->rxd_mode == RXD_MODE_3B) {
@@ -2577,29 +2576,28 @@ static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
                                 * Buffer2 will have L3/L4 header plus
                                 * L4 payload
                                 */
-                               rxdp3->Buffer2_ptr = pci_map_single(ring->pdev,
+                               rxdp3->Buffer2_ptr = dma_map_single(&ring->pdev->dev,
                                                                    skb->data,
                                                                    ring->mtu + 4,
-                                                                   PCI_DMA_FROMDEVICE);
+                                                                   DMA_FROM_DEVICE);
 
-                               if (pci_dma_mapping_error(nic->pdev,
-                                                         rxdp3->Buffer2_ptr))
+                               if (dma_mapping_error(&nic->pdev->dev, rxdp3->Buffer2_ptr))
                                        goto pci_map_failed;
 
                                if (from_card_up) {
                                        rxdp3->Buffer1_ptr =
-                                               pci_map_single(ring->pdev,
+                                               dma_map_single(&ring->pdev->dev,
                                                               ba->ba_1,
                                                               BUF1_LEN,
-                                                              PCI_DMA_FROMDEVICE);
+                                                              DMA_FROM_DEVICE);
 
-                                       if (pci_dma_mapping_error(nic->pdev,
-                                                                 rxdp3->Buffer1_ptr)) {
-                                               pci_unmap_single(ring->pdev,
+                                       if (dma_mapping_error(&nic->pdev->dev,
+                                                             rxdp3->Buffer1_ptr)) {
+                                               dma_unmap_single(&ring->pdev->dev,
                                                                 (dma_addr_t)(unsigned long)
                                                                 skb->data,
                                                                 ring->mtu + 4,
-                                                                PCI_DMA_FROMDEVICE);
+                                                                DMA_FROM_DEVICE);
                                                goto pci_map_failed;
                                        }
                                }
@@ -2668,27 +2666,24 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
                        continue;
                if (sp->rxd_mode == RXD_MODE_1) {
                        rxdp1 = (struct RxD1 *)rxdp;
-                       pci_unmap_single(sp->pdev,
+                       dma_unmap_single(&sp->pdev->dev,
                                         (dma_addr_t)rxdp1->Buffer0_ptr,
                                         dev->mtu +
                                         HEADER_ETHERNET_II_802_3_SIZE +
                                         HEADER_802_2_SIZE + HEADER_SNAP_SIZE,
-                                        PCI_DMA_FROMDEVICE);
+                                        DMA_FROM_DEVICE);
                        memset(rxdp, 0, sizeof(struct RxD1));
                } else if (sp->rxd_mode == RXD_MODE_3B) {
                        rxdp3 = (struct RxD3 *)rxdp;
-                       pci_unmap_single(sp->pdev,
+                       dma_unmap_single(&sp->pdev->dev,
                                         (dma_addr_t)rxdp3->Buffer0_ptr,
-                                        BUF0_LEN,
-                                        PCI_DMA_FROMDEVICE);
-                       pci_unmap_single(sp->pdev,
+                                        BUF0_LEN, DMA_FROM_DEVICE);
+                       dma_unmap_single(&sp->pdev->dev,
                                         (dma_addr_t)rxdp3->Buffer1_ptr,
-                                        BUF1_LEN,
-                                        PCI_DMA_FROMDEVICE);
-                       pci_unmap_single(sp->pdev,
+                                        BUF1_LEN, DMA_FROM_DEVICE);
+                       dma_unmap_single(&sp->pdev->dev,
                                         (dma_addr_t)rxdp3->Buffer2_ptr,
-                                        dev->mtu + 4,
-                                        PCI_DMA_FROMDEVICE);
+                                        dev->mtu + 4, DMA_FROM_DEVICE);
                        memset(rxdp, 0, sizeof(struct RxD3));
                }
                swstats->mem_freed += skb->truesize;
@@ -2919,23 +2914,21 @@ static int rx_intr_handler(struct ring_info *ring_data, int budget)
                }
                if (ring_data->rxd_mode == RXD_MODE_1) {
                        rxdp1 = (struct RxD1 *)rxdp;
-                       pci_unmap_single(ring_data->pdev, (dma_addr_t)
-                                        rxdp1->Buffer0_ptr,
+                       dma_unmap_single(&ring_data->pdev->dev,
+                                        (dma_addr_t)rxdp1->Buffer0_ptr,
                                         ring_data->mtu +
                                         HEADER_ETHERNET_II_802_3_SIZE +
                                         HEADER_802_2_SIZE +
                                         HEADER_SNAP_SIZE,
-                                        PCI_DMA_FROMDEVICE);
+                                        DMA_FROM_DEVICE);
                } else if (ring_data->rxd_mode == RXD_MODE_3B) {
                        rxdp3 = (struct RxD3 *)rxdp;
-                       pci_dma_sync_single_for_cpu(ring_data->pdev,
-                                                   (dma_addr_t)rxdp3->Buffer0_ptr,
-                                                   BUF0_LEN,
-                                                   PCI_DMA_FROMDEVICE);
-                       pci_unmap_single(ring_data->pdev,
+                       dma_sync_single_for_cpu(&ring_data->pdev->dev,
+                                               (dma_addr_t)rxdp3->Buffer0_ptr,
+                                               BUF0_LEN, DMA_FROM_DEVICE);
+                       dma_unmap_single(&ring_data->pdev->dev,
                                         (dma_addr_t)rxdp3->Buffer2_ptr,
-                                        ring_data->mtu + 4,
-                                        PCI_DMA_FROMDEVICE);
+                                        ring_data->mtu + 4, DMA_FROM_DEVICE);
                }
                prefetch(skb->data);
                rx_osm_handler(ring_data, rxdp);
@@ -4117,9 +4110,9 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        frg_len = skb_headlen(skb);
-       txdp->Buffer_Pointer = pci_map_single(sp->pdev, skb->data,
-                                             frg_len, PCI_DMA_TODEVICE);
-       if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
+       txdp->Buffer_Pointer = dma_map_single(&sp->pdev->dev, skb->data,
+                                             frg_len, DMA_TO_DEVICE);
+       if (dma_mapping_error(&sp->pdev->dev, txdp->Buffer_Pointer))
                goto pci_map_failed;
 
        txdp->Host_Control = (unsigned long)skb;
@@ -6772,10 +6765,10 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
                         * Host Control is NULL
                         */
                        rxdp1->Buffer0_ptr = *temp0 =
-                               pci_map_single(sp->pdev, (*skb)->data,
+                               dma_map_single(&sp->pdev->dev, (*skb)->data,
                                               size - NET_IP_ALIGN,
-                                              PCI_DMA_FROMDEVICE);
-                       if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
+                                              DMA_FROM_DEVICE);
+                       if (dma_mapping_error(&sp->pdev->dev, rxdp1->Buffer0_ptr))
                                goto memalloc_failed;
                        rxdp->Host_Control = (unsigned long) (*skb);
                }
@@ -6798,37 +6791,34 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
                        }
                        stats->mem_allocated += (*skb)->truesize;
                        rxdp3->Buffer2_ptr = *temp2 =
-                               pci_map_single(sp->pdev, (*skb)->data,
-                                              dev->mtu + 4,
-                                              PCI_DMA_FROMDEVICE);
-                       if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
+                               dma_map_single(&sp->pdev->dev, (*skb)->data,
+                                              dev->mtu + 4, DMA_FROM_DEVICE);
+                       if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer2_ptr))
                                goto memalloc_failed;
                        rxdp3->Buffer0_ptr = *temp0 =
-                               pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
-                                              PCI_DMA_FROMDEVICE);
-                       if (pci_dma_mapping_error(sp->pdev,
-                                                 rxdp3->Buffer0_ptr)) {
-                               pci_unmap_single(sp->pdev,
+                               dma_map_single(&sp->pdev->dev, ba->ba_0,
+                                              BUF0_LEN, DMA_FROM_DEVICE);
+                       if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer0_ptr)) {
+                               dma_unmap_single(&sp->pdev->dev,
                                                 (dma_addr_t)rxdp3->Buffer2_ptr,
                                                 dev->mtu + 4,
-                                                PCI_DMA_FROMDEVICE);
+                                                DMA_FROM_DEVICE);
                                goto memalloc_failed;
                        }
                        rxdp->Host_Control = (unsigned long) (*skb);
 
                        /* Buffer-1 will be dummy buffer not used */
                        rxdp3->Buffer1_ptr = *temp1 =
-                               pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
-                                              PCI_DMA_FROMDEVICE);
-                       if (pci_dma_mapping_error(sp->pdev,
-                                                 rxdp3->Buffer1_ptr)) {
-                               pci_unmap_single(sp->pdev,
+                               dma_map_single(&sp->pdev->dev, ba->ba_1,
+                                              BUF1_LEN, DMA_FROM_DEVICE);
+                       if (dma_mapping_error(&sp->pdev->dev, rxdp3->Buffer1_ptr)) {
+                               dma_unmap_single(&sp->pdev->dev,
                                                 (dma_addr_t)rxdp3->Buffer0_ptr,
-                                                BUF0_LEN, PCI_DMA_FROMDEVICE);
-                               pci_unmap_single(sp->pdev,
+                                                BUF0_LEN, DMA_FROM_DEVICE);
+                               dma_unmap_single(&sp->pdev->dev,
                                                 (dma_addr_t)rxdp3->Buffer2_ptr,
                                                 dev->mtu + 4,
-                                                PCI_DMA_FROMDEVICE);
+                                                DMA_FROM_DEVICE);
                                goto memalloc_failed;
                        }
                }
@@ -7675,17 +7665,16 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
                return ret;
        }
 
-       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
                DBG_PRINT(INIT_DBG, "%s: Using 64bit DMA\n", __func__);
                dma_flag = true;
-               if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+               if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
                        DBG_PRINT(ERR_DBG,
-                                 "Unable to obtain 64bit DMA "
-                                 "for consistent allocations\n");
+                                 "Unable to obtain 64bit DMA for coherent allocations\n");
                        pci_disable_device(pdev);
                        return -ENOMEM;
                }
-       } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+       } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
                DBG_PRINT(INIT_DBG, "%s: Using 32bit DMA\n", __func__);
        } else {
                pci_disable_device(pdev);
index 51cd57a..4f1f90f 100644 (file)
@@ -1102,10 +1102,10 @@ static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
        hldev = blockpool->hldev;
 
        list_for_each_safe(p, n, &blockpool->free_block_list) {
-               pci_unmap_single(hldev->pdev,
-                       ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
-                       ((struct __vxge_hw_blockpool_entry *)p)->length,
-                       PCI_DMA_BIDIRECTIONAL);
+               dma_unmap_single(&hldev->pdev->dev,
+                                ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
+                                ((struct __vxge_hw_blockpool_entry *)p)->length,
+                                DMA_BIDIRECTIONAL);
 
                vxge_os_dma_free(hldev->pdev,
                        ((struct __vxge_hw_blockpool_entry *)p)->memblock,
@@ -1178,10 +1178,10 @@ __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
                        goto blockpool_create_exit;
                }
 
-               dma_addr = pci_map_single(hldev->pdev, memblock,
-                               VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
-               if (unlikely(pci_dma_mapping_error(hldev->pdev,
-                               dma_addr))) {
+               dma_addr = dma_map_single(&hldev->pdev->dev, memblock,
+                                         VXGE_HW_BLOCK_SIZE,
+                                         DMA_BIDIRECTIONAL);
+               if (unlikely(dma_mapping_error(&hldev->pdev->dev, dma_addr))) {
                        vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
                        __vxge_hw_blockpool_destroy(blockpool);
                        status = VXGE_HW_ERR_OUT_OF_MEMORY;
@@ -2264,10 +2264,10 @@ static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
                goto exit;
        }
 
-       dma_addr = pci_map_single(devh->pdev, block_addr, length,
-                               PCI_DMA_BIDIRECTIONAL);
+       dma_addr = dma_map_single(&devh->pdev->dev, block_addr, length,
+                                 DMA_BIDIRECTIONAL);
 
-       if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
+       if (unlikely(dma_mapping_error(&devh->pdev->dev, dma_addr))) {
                vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
                blockpool->req_out--;
                goto exit;
@@ -2359,11 +2359,10 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
                if (!memblock)
                        goto exit;
 
-               dma_object->addr = pci_map_single(devh->pdev, memblock, size,
-                                       PCI_DMA_BIDIRECTIONAL);
+               dma_object->addr = dma_map_single(&devh->pdev->dev, memblock,
+                                                 size, DMA_BIDIRECTIONAL);
 
-               if (unlikely(pci_dma_mapping_error(devh->pdev,
-                               dma_object->addr))) {
+               if (unlikely(dma_mapping_error(&devh->pdev->dev, dma_object->addr))) {
                        vxge_os_dma_free(devh->pdev, memblock,
                                &dma_object->acc_handle);
                        memblock = NULL;
@@ -2410,11 +2409,10 @@ __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
                if (blockpool->pool_size < blockpool->pool_max)
                        break;
 
-               pci_unmap_single(
-                       (blockpool->hldev)->pdev,
-                       ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
-                       ((struct __vxge_hw_blockpool_entry *)p)->length,
-                       PCI_DMA_BIDIRECTIONAL);
+               dma_unmap_single(&(blockpool->hldev)->pdev->dev,
+                                ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
+                                ((struct __vxge_hw_blockpool_entry *)p)->length,
+                                DMA_BIDIRECTIONAL);
 
                vxge_os_dma_free(
                        (blockpool->hldev)->pdev,
@@ -2445,8 +2443,8 @@ static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
        blockpool = &devh->block_pool;
 
        if (size != blockpool->block_size) {
-               pci_unmap_single(devh->pdev, dma_object->addr, size,
-                       PCI_DMA_BIDIRECTIONAL);
+               dma_unmap_single(&devh->pdev->dev, dma_object->addr, size,
+                                DMA_BIDIRECTIONAL);
                vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
        } else {
 
index 628fa9b..3731651 100644 (file)
@@ -297,7 +297,7 @@ struct vxge_hw_fifo_config {
  * @greedy_return: If Set it forces the device to return absolutely all RxD
  *             that are consumed and still on board when a timer interrupt
  *             triggers. If Clear, then if the device has already returned
- *             RxD before current timer interrupt trigerred and after the
+ *             RxD before current timer interrupt triggered and after the
  *             previous timer interrupt triggered, then the device is not
  *             forced to returned the rest of the consumed RxD that it has
  *             on board which account for a byte count less than the one
index 9b63574..fc4ed0a 100644 (file)
@@ -241,10 +241,10 @@ static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
        rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
 
        rx_priv->skb_data = rx_priv->skb->data;
-       dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data,
-                               rx_priv->data_size, PCI_DMA_FROMDEVICE);
+       dma_addr = dma_map_single(&ring->pdev->dev, rx_priv->skb_data,
+                                 rx_priv->data_size, DMA_FROM_DEVICE);
 
-       if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) {
+       if (unlikely(dma_mapping_error(&ring->pdev->dev, dma_addr))) {
                ring->stats.pci_map_fail++;
                return -EIO;
        }
@@ -323,8 +323,8 @@ vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
 static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
                                    struct vxge_rx_priv *rx_priv)
 {
-       pci_dma_sync_single_for_device(ring->pdev,
-               rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE);
+       dma_sync_single_for_device(&ring->pdev->dev, rx_priv->data_dma,
+                                  rx_priv->data_size, DMA_FROM_DEVICE);
 
        vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
        vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
@@ -425,8 +425,9 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
                                if (!vxge_rx_map(dtr, ring)) {
                                        skb_put(skb, pkt_length);
 
-                                       pci_unmap_single(ring->pdev, data_dma,
-                                               data_size, PCI_DMA_FROMDEVICE);
+                                       dma_unmap_single(&ring->pdev->dev,
+                                                        data_dma, data_size,
+                                                        DMA_FROM_DEVICE);
 
                                        vxge_hw_ring_rxd_pre_post(ringh, dtr);
                                        vxge_post(&dtr_cnt, &first_dtr, dtr,
@@ -458,9 +459,9 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
                                skb_reserve(skb_up,
                                    VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
 
-                               pci_dma_sync_single_for_cpu(ring->pdev,
-                                       data_dma, data_size,
-                                       PCI_DMA_FROMDEVICE);
+                               dma_sync_single_for_cpu(&ring->pdev->dev,
+                                                       data_dma, data_size,
+                                                       DMA_FROM_DEVICE);
 
                                vxge_debug_mem(VXGE_TRACE,
                                        "%s: %s:%d  skb_up = %p",
@@ -585,13 +586,13 @@ vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
                }
 
                /*  for unfragmented skb */
-               pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
-                               skb_headlen(skb), PCI_DMA_TODEVICE);
+               dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
+                                skb_headlen(skb), DMA_TO_DEVICE);
 
                for (j = 0; j < frg_cnt; j++) {
-                       pci_unmap_page(fifo->pdev,
-                                       txd_priv->dma_buffers[i++],
-                                       skb_frag_size(frag), PCI_DMA_TODEVICE);
+                       dma_unmap_page(&fifo->pdev->dev,
+                                      txd_priv->dma_buffers[i++],
+                                      skb_frag_size(frag), DMA_TO_DEVICE);
                        frag += 1;
                }
 
@@ -897,10 +898,10 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
 
        first_frg_len = skb_headlen(skb);
 
-       dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len,
-                               PCI_DMA_TODEVICE);
+       dma_pointer = dma_map_single(&fifo->pdev->dev, skb->data,
+                                    first_frg_len, DMA_TO_DEVICE);
 
-       if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) {
+       if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer))) {
                vxge_hw_fifo_txdl_free(fifo_hw, dtr);
                fifo->stats.pci_map_fail++;
                goto _exit0;
@@ -977,12 +978,12 @@ _exit1:
        j = 0;
        frag = &skb_shinfo(skb)->frags[0];
 
-       pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++],
-                       skb_headlen(skb), PCI_DMA_TODEVICE);
+       dma_unmap_single(&fifo->pdev->dev, txdl_priv->dma_buffers[j++],
+                        skb_headlen(skb), DMA_TO_DEVICE);
 
        for (; j < i; j++) {
-               pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j],
-                       skb_frag_size(frag), PCI_DMA_TODEVICE);
+               dma_unmap_page(&fifo->pdev->dev, txdl_priv->dma_buffers[j],
+                              skb_frag_size(frag), DMA_TO_DEVICE);
                frag += 1;
        }
 
@@ -1012,8 +1013,8 @@ vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata)
        if (state != VXGE_HW_RXD_STATE_POSTED)
                return;
 
-       pci_unmap_single(ring->pdev, rx_priv->data_dma,
-               rx_priv->data_size, PCI_DMA_FROMDEVICE);
+       dma_unmap_single(&ring->pdev->dev, rx_priv->data_dma,
+                        rx_priv->data_size, DMA_FROM_DEVICE);
 
        dev_kfree_skb(rx_priv->skb);
        rx_priv->skb_data = NULL;
@@ -1048,12 +1049,12 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
        frag = &skb_shinfo(skb)->frags[0];
 
        /*  for unfragmented skb */
-       pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
-               skb_headlen(skb), PCI_DMA_TODEVICE);
+       dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
+                        skb_headlen(skb), DMA_TO_DEVICE);
 
        for (j = 0; j < frg_cnt; j++) {
-               pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++],
-                              skb_frag_size(frag), PCI_DMA_TODEVICE);
+               dma_unmap_page(&fifo->pdev->dev, txd_priv->dma_buffers[i++],
+                              skb_frag_size(frag), DMA_TO_DEVICE);
                frag += 1;
        }
 
@@ -1075,7 +1076,7 @@ static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
        list_for_each_safe(entry, next, &vpath->mac_addr_list) {
                if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
                        list_del(entry);
-                       kfree((struct vxge_mac_addrs *)entry);
+                       kfree(entry);
                        vpath->mac_addr_cnt--;
 
                        if (is_multicast_ether_addr(mac->macaddr))
@@ -2912,7 +2913,7 @@ static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
 
        list_for_each_safe(entry, next, &vpath->mac_addr_list) {
                list_del(entry);
-               kfree((struct vxge_mac_addrs *)entry);
+               kfree(entry);
        }
 }
 
@@ -3999,12 +4000,11 @@ static void vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
        }
 }
 
-#ifdef CONFIG_PM
 /**
  * vxge_pm_suspend - vxge power management suspend entry point
  *
  */
-static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused vxge_pm_suspend(struct device *dev_d)
 {
        return -ENOSYS;
 }
@@ -4012,13 +4012,11 @@ static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state)
  * vxge_pm_resume - vxge power management resume entry point
  *
  */
-static int vxge_pm_resume(struct pci_dev *pdev)
+static int __maybe_unused vxge_pm_resume(struct device *dev_d)
 {
        return -ENOSYS;
 }
 
-#endif
-
 /**
  * vxge_io_error_detected - called when PCI error is detected
  * @pdev: Pointer to PCI device
@@ -4390,21 +4388,20 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
                goto _exit0;
        }
 
-       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
                vxge_debug_ll_config(VXGE_TRACE,
                        "%s : using 64bit DMA", __func__);
 
                high_dma = 1;
 
-               if (pci_set_consistent_dma_mask(pdev,
-                                               DMA_BIT_MASK(64))) {
+               if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
                        vxge_debug_init(VXGE_ERR,
                                "%s : unable to obtain 64bit DMA for "
                                "consistent allocations", __func__);
                        ret = -ENOMEM;
                        goto _exit1;
                }
-       } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+       } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
                vxge_debug_ll_config(VXGE_TRACE,
                        "%s : using 32bit DMA", __func__);
        } else {
@@ -4796,15 +4793,14 @@ static const struct pci_error_handlers vxge_err_handler = {
        .resume = vxge_io_resume,
 };
 
+static SIMPLE_DEV_PM_OPS(vxge_pm_ops, vxge_pm_suspend, vxge_pm_resume);
+
 static struct pci_driver vxge_driver = {
        .name = VXGE_DRIVER_NAME,
        .id_table = vxge_id_table,
        .probe = vxge_probe,
        .remove = vxge_remove,
-#ifdef CONFIG_PM
-       .suspend = vxge_pm_suspend,
-       .resume = vxge_pm_resume,
-#endif
+       .driver.pm = &vxge_pm_ops,
        .err_handler = &vxge_err_handler,
 };
 
index c393276..bb448c8 100644 (file)
@@ -861,7 +861,7 @@ static void nfp_flower_clean(struct nfp_app *app)
        flush_work(&app_priv->cmsg_work);
 
        flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app,
-                                nfp_flower_setup_indr_block_cb);
+                                nfp_flower_setup_indr_tc_release);
 
        if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
                nfp_flower_qos_cleanup(app);
index 6c3dc3b..3bf9c1a 100644 (file)
@@ -458,10 +458,11 @@ void nfp_flower_qos_cleanup(struct nfp_app *app);
 int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
                                 struct tc_cls_matchall_offload *flow);
 void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb);
-int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
-                               enum tc_setup_type type, void *type_data);
-int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, void *type_data,
-                                  void *cb_priv);
+int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
+                               enum tc_setup_type type, void *type_data,
+                               void *data,
+                               void (*cleanup)(struct flow_block_cb *block_cb));
+void nfp_flower_setup_indr_tc_release(void *cb_priv);
 
 void
 __nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv);
index 695d24b..4651fe4 100644 (file)
@@ -1491,7 +1491,7 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
                nfp_flower_update_merge_stats(app, nfp_flow);
 
        flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
-                         priv->stats[ctx_id].pkts, priv->stats[ctx_id].used,
+                         priv->stats[ctx_id].pkts, 0, priv->stats[ctx_id].used,
                          FLOW_ACTION_HW_STATS_DELAYED);
 
        priv->stats[ctx_id].pkts = 0;
@@ -1619,8 +1619,8 @@ nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
        return NULL;
 }
 
-int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
-                                  void *type_data, void *cb_priv)
+static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
+                                         void *type_data, void *cb_priv)
 {
        struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
        struct flow_cls_offload *flower = type_data;
@@ -1637,7 +1637,7 @@ int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
        }
 }
 
-static void nfp_flower_setup_indr_tc_release(void *cb_priv)
+void nfp_flower_setup_indr_tc_release(void *cb_priv)
 {
        struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
 
@@ -1646,8 +1646,9 @@ static void nfp_flower_setup_indr_tc_release(void *cb_priv)
 }
 
 static int
-nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
-                              struct flow_block_offload *f)
+nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, struct nfp_app *app,
+                              struct flow_block_offload *f, void *data,
+                              void (*cleanup)(struct flow_block_cb *block_cb))
 {
        struct nfp_flower_indr_block_cb_priv *cb_priv;
        struct nfp_flower_priv *priv = app->priv;
@@ -1676,9 +1677,10 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
                cb_priv->app = app;
                list_add(&cb_priv->list, &priv->indr_block_cb_priv);
 
-               block_cb = flow_block_cb_alloc(nfp_flower_setup_indr_block_cb,
-                                              cb_priv, cb_priv,
-                                              nfp_flower_setup_indr_tc_release);
+               block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb,
+                                                   cb_priv, cb_priv,
+                                                   nfp_flower_setup_indr_tc_release,
+                                                   f, netdev, sch, data, app, cleanup);
                if (IS_ERR(block_cb)) {
                        list_del(&cb_priv->list);
                        kfree(cb_priv);
@@ -1699,7 +1701,7 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
                if (!block_cb)
                        return -ENOENT;
 
-               flow_block_cb_remove(block_cb, f);
+               flow_indr_block_cb_remove(block_cb, f);
                list_del(&block_cb->driver_list);
                return 0;
        default:
@@ -1709,16 +1711,18 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
 }
 
 int
-nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
-                           enum tc_setup_type type, void *type_data)
+nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
+                           enum tc_setup_type type, void *type_data,
+                           void *data,
+                           void (*cleanup)(struct flow_block_cb *block_cb))
 {
        if (!nfp_fl_is_netdev_to_offload(netdev))
                return -EOPNOTSUPP;
 
        switch (type) {
        case TC_SETUP_BLOCK:
-               return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
-                                                     type_data);
+               return nfp_flower_setup_indr_tc_block(netdev, sch, cb_priv,
+                                                     type_data, data, cleanup);
        default:
                return -EOPNOTSUPP;
        }
index d18a830..d4ce8f9 100644 (file)
@@ -69,7 +69,8 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
        struct nfp_repr *repr;
        struct sk_buff *skb;
        u32 netdev_port_id;
-       u64 burst, rate;
+       u32 burst;
+       u64 rate;
 
        if (!nfp_netdev_is_nfp_repr(netdev)) {
                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
@@ -104,8 +105,7 @@ nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
        }
 
        rate = action->police.rate_bytes_ps;
-       burst = div_u64(rate * PSCHED_NS2TICKS(action->police.burst),
-                       PSCHED_TICKS_PER_SEC);
+       burst = action->police.burst;
        netdev_port_id = nfp_repr_get_port_id(netdev);
 
        skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
@@ -319,7 +319,7 @@ nfp_flower_stats_rate_limiter(struct nfp_app *app, struct net_device *netdev,
        prev_stats->bytes = curr_stats->bytes;
        spin_unlock_bh(&fl_priv->qos_stats_lock);
 
-       flow_stats_update(&flow->stats, diff_bytes, diff_pkts,
+       flow_stats_update(&flow->stats, diff_bytes, diff_pkts, 0,
                          repr_priv->qos_table.last_update,
                          FLOW_ACTION_HW_STATS_DELAYED);
        return 0;
index 07dbf4d..be52510 100644 (file)
@@ -70,9 +70,6 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
        unsigned int lanes;
        int ret;
 
-       if (count < 2)
-               return -EINVAL;
-
        mutex_lock(&pf->lock);
 
        rtnl_lock();
@@ -81,7 +78,7 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
        if (ret)
                goto out;
 
-       if (eth_port.is_split || eth_port.port_lanes % count) {
+       if (eth_port.port_lanes % count) {
                ret = -EINVAL;
                goto out;
        }
@@ -353,6 +350,7 @@ const struct devlink_ops nfp_devlink_ops = {
 
 int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
 {
+       struct devlink_port_attrs attrs = {};
        struct nfp_eth_table_port eth_port;
        struct devlink *devlink;
        const u8 *serial;
@@ -365,10 +363,15 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
        if (ret)
                return ret;
 
+       attrs.split = eth_port.is_split;
+       attrs.splittable = !attrs.split;
+       attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+       attrs.phys.port_number = eth_port.label_port;
+       attrs.phys.split_subport_number = eth_port.label_subport;
        serial_len = nfp_cpp_serial(port->app->cpp, &serial);
-       devlink_port_attrs_set(&port->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
-                              eth_port.label_port, eth_port.is_split,
-                              eth_port.label_subport, serial, serial_len);
+       memcpy(attrs.switch_id.id, serial, serial_len);
+       attrs.switch_id.id_len = serial_len;
+       devlink_port_attrs_set(&port->dl_port, &attrs);
 
        devlink = priv_to_devlink(app->pf);
 
index ff44384..df5b748 100644 (file)
@@ -575,8 +575,6 @@ struct nfp_net_dp {
  * @rx_coalesce_max_frames: RX interrupt moderation frame count parameter
  * @tx_coalesce_usecs:      TX interrupt moderation usecs delay parameter
  * @tx_coalesce_max_frames: TX interrupt moderation frame count parameter
- * @vxlan_ports:       VXLAN ports for RX inner csum offload communicated to HW
- * @vxlan_usecnt:      IPv4/IPv6 VXLAN port use counts
  * @qcp_cfg:            Pointer to QCP queue used for configuration notification
  * @tx_bar:             Pointer to mapped TX queues
  * @rx_bar:             Pointer to mapped FL/RX queues
@@ -661,9 +659,6 @@ struct nfp_net {
        u32 tx_coalesce_usecs;
        u32 tx_coalesce_max_frames;
 
-       __be16 vxlan_ports[NFP_NET_N_VXLAN_PORTS];
-       u8 vxlan_usecnt[NFP_NET_N_VXLAN_PORTS];
-
        u8 __iomem *qcp_cfg;
 
        u8 __iomem *tx_bar;
index 0e0cc3d..4460887 100644 (file)
@@ -974,7 +974,7 @@ static int nfp_net_prep_tx_meta(struct sk_buff *skb, u64 tls_handle)
  *
  * Return: NETDEV_TX_OK on success.
  */
-static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
 {
        struct nfp_net *nn = netdev_priv(netdev);
        const skb_frag_t *frag;
@@ -2867,15 +2867,6 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn)
        for (r = 0; r < nn->dp.num_rx_rings; r++)
                nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]);
 
-       /* Since reconfiguration requests while NFP is down are ignored we
-        * have to wipe the entire VXLAN configuration and reinitialize it.
-        */
-       if (nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN) {
-               memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
-               memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
-               udp_tunnel_get_rx_info(nn->dp.netdev);
-       }
-
        return 0;
 }
 
@@ -3566,87 +3557,6 @@ nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
        return 0;
 }
 
-/**
- * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW
- * @nn:   NFP Net device to reconfigure
- * @idx:  Index into the port table where new port should be written
- * @port: UDP port to configure (pass zero to remove VXLAN port)
- */
-static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port)
-{
-       int i;
-
-       nn->vxlan_ports[idx] = port;
-
-       if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_VXLAN))
-               return;
-
-       BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
-       for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2)
-               nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port),
-                         be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 |
-                         be16_to_cpu(nn->vxlan_ports[i]));
-
-       nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_VXLAN);
-}
-
-/**
- * nfp_net_find_vxlan_idx() - find table entry of the port or a free one
- * @nn:   NFP Network structure
- * @port: UDP port to look for
- *
- * Return: if the port is already in the table -- it's position;
- *        if the port is not in the table -- free position to use;
- *        if the table is full -- -ENOSPC.
- */
-static int nfp_net_find_vxlan_idx(struct nfp_net *nn, __be16 port)
-{
-       int i, free_idx = -ENOSPC;
-
-       for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) {
-               if (nn->vxlan_ports[i] == port)
-                       return i;
-               if (!nn->vxlan_usecnt[i])
-                       free_idx = i;
-       }
-
-       return free_idx;
-}
-
-static void nfp_net_add_vxlan_port(struct net_device *netdev,
-                                  struct udp_tunnel_info *ti)
-{
-       struct nfp_net *nn = netdev_priv(netdev);
-       int idx;
-
-       if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
-               return;
-
-       idx = nfp_net_find_vxlan_idx(nn, ti->port);
-       if (idx == -ENOSPC)
-               return;
-
-       if (!nn->vxlan_usecnt[idx]++)
-               nfp_net_set_vxlan_port(nn, idx, ti->port);
-}
-
-static void nfp_net_del_vxlan_port(struct net_device *netdev,
-                                  struct udp_tunnel_info *ti)
-{
-       struct nfp_net *nn = netdev_priv(netdev);
-       int idx;
-
-       if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
-               return;
-
-       idx = nfp_net_find_vxlan_idx(nn, ti->port);
-       if (idx == -ENOSPC || !nn->vxlan_usecnt[idx])
-               return;
-
-       if (!--nn->vxlan_usecnt[idx])
-               nfp_net_set_vxlan_port(nn, idx, 0);
-}
-
 static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf)
 {
        struct bpf_prog *prog = bpf->prog;
@@ -3757,12 +3667,43 @@ const struct net_device_ops nfp_net_netdev_ops = {
        .ndo_set_features       = nfp_net_set_features,
        .ndo_features_check     = nfp_net_features_check,
        .ndo_get_phys_port_name = nfp_net_get_phys_port_name,
-       .ndo_udp_tunnel_add     = nfp_net_add_vxlan_port,
-       .ndo_udp_tunnel_del     = nfp_net_del_vxlan_port,
+       .ndo_udp_tunnel_add     = udp_tunnel_nic_add_port,
+       .ndo_udp_tunnel_del     = udp_tunnel_nic_del_port,
        .ndo_bpf                = nfp_net_xdp,
        .ndo_get_devlink_port   = nfp_devlink_get_devlink_port,
 };
 
+static int nfp_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
+{
+       struct nfp_net *nn = netdev_priv(netdev);
+       int i;
+
+       BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
+       for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2) {
+               struct udp_tunnel_info ti0, ti1;
+
+               udp_tunnel_nic_get_port(netdev, table, i, &ti0);
+               udp_tunnel_nic_get_port(netdev, table, i + 1, &ti1);
+
+               nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(ti0.port),
+                         be16_to_cpu(ti1.port) << 16 | be16_to_cpu(ti0.port));
+       }
+
+       return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_VXLAN);
+}
+
+static const struct udp_tunnel_nic_info nfp_udp_tunnels = {
+       .sync_table     = nfp_udp_tunnel_sync,
+       .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
+                         UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
+       .tables         = {
+               {
+                       .n_entries      = NFP_NET_N_VXLAN_PORTS,
+                       .tunnel_types   = UDP_TUNNEL_TYPE_VXLAN,
+               },
+       },
+};
+
 /**
  * nfp_net_info() - Print general info about the NIC
  * @nn:      NFP Net device to reconfigure
@@ -4010,6 +3951,7 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
        if (nn->cap & NFP_NET_CFG_CTRL_VXLAN) {
                if (nn->cap & NFP_NET_CFG_CTRL_LSO)
                        netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+               netdev->udp_tunnel_nic_info = &nfp_udp_tunnels;
                nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN;
        }
        if (nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
index 32b9d77..55cef5b 100644 (file)
@@ -147,7 +147,7 @@ struct pch_gbe_regs {
 #define PCH_GBE_RH_ALM_FULL_8   0x00001000      /* 8 words */
 #define PCH_GBE_RH_ALM_FULL_16  0x00002000      /* 16 words */
 #define PCH_GBE_RH_ALM_FULL_32  0x00003000      /* 32 words */
-/* RX FIFO Read Triger Threshold */
+/* RX FIFO Read Trigger Threshold */
 #define PCH_GBE_RH_RD_TRG_4     0x00000000      /* 4 words */
 #define PCH_GBE_RH_RD_TRG_8     0x00000200      /* 8 words */
 #define PCH_GBE_RH_RD_TRG_16    0x00000400      /* 16 words */
index 73ec195..23f7c76 100644 (file)
@@ -2064,7 +2064,7 @@ static int pch_gbe_stop(struct net_device *netdev)
  *     - NETDEV_TX_OK:   Normal end
  *     - NETDEV_TX_BUSY: Error end
  */
-static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 {
        struct pch_gbe_adapter *adapter = netdev_priv(netdev);
        struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
index 70816d2..d058a63 100644 (file)
@@ -644,13 +644,15 @@ static int hamachi_init_one(struct pci_dev *pdev,
        hmp->mii_if.phy_id_mask = 0x1f;
        hmp->mii_if.reg_num_mask = 0x1f;
 
-       ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
+       ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
+                                       GFP_KERNEL);
        if (!ring_space)
                goto err_out_cleardev;
        hmp->tx_ring = ring_space;
        hmp->tx_ring_dma = ring_dma;
 
-       ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
+       ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
+                                       GFP_KERNEL);
        if (!ring_space)
                goto err_out_unmap_tx;
        hmp->rx_ring = ring_space;
@@ -773,11 +775,11 @@ static int hamachi_init_one(struct pci_dev *pdev,
        return 0;
 
 err_out_unmap_rx:
-       pci_free_consistent(pdev, RX_TOTAL_SIZE, hmp->rx_ring,
-               hmp->rx_ring_dma);
+       dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, hmp->rx_ring,
+                         hmp->rx_ring_dma);
 err_out_unmap_tx:
-       pci_free_consistent(pdev, TX_TOTAL_SIZE, hmp->tx_ring,
-               hmp->tx_ring_dma);
+       dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, hmp->tx_ring,
+                         hmp->tx_ring_dma);
 err_out_cleardev:
        free_netdev (dev);
 err_out_iounmap:
@@ -1001,9 +1003,9 @@ static inline int hamachi_tx(struct net_device *dev)
                /* Free the original skb. */
                skb = hmp->tx_skbuff[entry];
                if (skb) {
-                       pci_unmap_single(hmp->pci_dev,
-                               leXX_to_cpu(hmp->tx_ring[entry].addr),
-                               skb->len, PCI_DMA_TODEVICE);
+                       dma_unmap_single(&hmp->pci_dev->dev,
+                                        leXX_to_cpu(hmp->tx_ring[entry].addr),
+                                        skb->len, DMA_TO_DEVICE);
                        dev_kfree_skb(skb);
                        hmp->tx_skbuff[entry] = NULL;
                }
@@ -1093,8 +1095,9 @@ static void hamachi_tx_timeout(struct net_device *dev, unsigned int txqueue)
                        hmp->tx_ring[i].status_n_length &= cpu_to_le32(0x0000ffff);
                skb = hmp->tx_skbuff[i];
                if (skb){
-                       pci_unmap_single(hmp->pci_dev, leXX_to_cpu(hmp->tx_ring[i].addr),
-                               skb->len, PCI_DMA_TODEVICE);
+                       dma_unmap_single(&hmp->pci_dev->dev,
+                                        leXX_to_cpu(hmp->tx_ring[i].addr),
+                                        skb->len, DMA_TO_DEVICE);
                        dev_kfree_skb(skb);
                        hmp->tx_skbuff[i] = NULL;
                }
@@ -1115,9 +1118,9 @@ static void hamachi_tx_timeout(struct net_device *dev, unsigned int txqueue)
                struct sk_buff *skb = hmp->rx_skbuff[i];
 
                if (skb){
-                       pci_unmap_single(hmp->pci_dev,
-                               leXX_to_cpu(hmp->rx_ring[i].addr),
-                               hmp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+                       dma_unmap_single(&hmp->pci_dev->dev,
+                                        leXX_to_cpu(hmp->rx_ring[i].addr),
+                                        hmp->rx_buf_sz, DMA_FROM_DEVICE);
                        dev_kfree_skb(skb);
                        hmp->rx_skbuff[i] = NULL;
                }
@@ -1131,8 +1134,10 @@ static void hamachi_tx_timeout(struct net_device *dev, unsigned int txqueue)
                if (skb == NULL)
                        break;
 
-                hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
-                       skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
+               hmp->rx_ring[i].addr = cpu_to_leXX(dma_map_single(&hmp->pci_dev->dev,
+                                                                 skb->data,
+                                                                 hmp->rx_buf_sz,
+                                                                 DMA_FROM_DEVICE));
                hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
                        DescEndPacket | DescIntr | (hmp->rx_buf_sz - 2));
        }
@@ -1183,8 +1188,10 @@ static void hamachi_init_ring(struct net_device *dev)
                if (skb == NULL)
                        break;
                skb_reserve(skb, 2); /* 16 byte align the IP header. */
-                hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
-                       skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
+               hmp->rx_ring[i].addr = cpu_to_leXX(dma_map_single(&hmp->pci_dev->dev,
+                                                                 skb->data,
+                                                                 hmp->rx_buf_sz,
+                                                                 DMA_FROM_DEVICE));
                /* -2 because it doesn't REALLY have that first 2 bytes -KDU */
                hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
                        DescEndPacket | DescIntr | (hmp->rx_buf_sz -2));
@@ -1233,8 +1240,10 @@ static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb,
 
        hmp->tx_skbuff[entry] = skb;
 
-        hmp->tx_ring[entry].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
-               skb->data, skb->len, PCI_DMA_TODEVICE));
+       hmp->tx_ring[entry].addr = cpu_to_leXX(dma_map_single(&hmp->pci_dev->dev,
+                                                             skb->data,
+                                                             skb->len,
+                                                             DMA_TO_DEVICE));
 
        /* Hmmmm, could probably put a DescIntr on these, but the way
                the driver is currently coded makes Tx interrupts unnecessary
@@ -1333,10 +1342,10 @@ static irqreturn_t hamachi_interrupt(int irq, void *dev_instance)
                                        skb = hmp->tx_skbuff[entry];
                                        /* Free the original skb. */
                                        if (skb){
-                                               pci_unmap_single(hmp->pci_dev,
-                                                       leXX_to_cpu(hmp->tx_ring[entry].addr),
-                                                       skb->len,
-                                                       PCI_DMA_TODEVICE);
+                                               dma_unmap_single(&hmp->pci_dev->dev,
+                                                                leXX_to_cpu(hmp->tx_ring[entry].addr),
+                                                                skb->len,
+                                                                DMA_TO_DEVICE);
                                                dev_consume_skb_irq(skb);
                                                hmp->tx_skbuff[entry] = NULL;
                                        }
@@ -1413,10 +1422,9 @@ static int hamachi_rx(struct net_device *dev)
 
                if (desc_status & DescOwn)
                        break;
-               pci_dma_sync_single_for_cpu(hmp->pci_dev,
-                                           leXX_to_cpu(desc->addr),
-                                           hmp->rx_buf_sz,
-                                           PCI_DMA_FROMDEVICE);
+               dma_sync_single_for_cpu(&hmp->pci_dev->dev,
+                                       leXX_to_cpu(desc->addr),
+                                       hmp->rx_buf_sz, DMA_FROM_DEVICE);
                buf_addr = (u8 *) hmp->rx_skbuff[entry]->data;
                frame_status = get_unaligned_le32(&(buf_addr[data_size - 12]));
                if (hamachi_debug > 4)
@@ -1483,10 +1491,10 @@ static int hamachi_rx(struct net_device *dev)
                                  "not good with RX_CHECKSUM\n", dev->name);
 #endif
                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
-                               pci_dma_sync_single_for_cpu(hmp->pci_dev,
-                                                           leXX_to_cpu(hmp->rx_ring[entry].addr),
-                                                           hmp->rx_buf_sz,
-                                                           PCI_DMA_FROMDEVICE);
+                               dma_sync_single_for_cpu(&hmp->pci_dev->dev,
+                                                       leXX_to_cpu(hmp->rx_ring[entry].addr),
+                                                       hmp->rx_buf_sz,
+                                                       DMA_FROM_DEVICE);
                                /* Call copy + cksum if available. */
 #if 1 || USE_IP_COPYSUM
                                skb_copy_to_linear_data(skb,
@@ -1496,14 +1504,15 @@ static int hamachi_rx(struct net_device *dev)
                                skb_put_data(skb, hmp->rx_ring_dma
                                             + entry*sizeof(*desc), pkt_len);
 #endif
-                               pci_dma_sync_single_for_device(hmp->pci_dev,
-                                                              leXX_to_cpu(hmp->rx_ring[entry].addr),
-                                                              hmp->rx_buf_sz,
-                                                              PCI_DMA_FROMDEVICE);
+                               dma_sync_single_for_device(&hmp->pci_dev->dev,
+                                                          leXX_to_cpu(hmp->rx_ring[entry].addr),
+                                                          hmp->rx_buf_sz,
+                                                          DMA_FROM_DEVICE);
                        } else {
-                               pci_unmap_single(hmp->pci_dev,
+                               dma_unmap_single(&hmp->pci_dev->dev,
                                                 leXX_to_cpu(hmp->rx_ring[entry].addr),
-                                                hmp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+                                                hmp->rx_buf_sz,
+                                                DMA_FROM_DEVICE);
                                skb_put(skb = hmp->rx_skbuff[entry], pkt_len);
                                hmp->rx_skbuff[entry] = NULL;
                        }
@@ -1586,8 +1595,10 @@ static int hamachi_rx(struct net_device *dev)
                        if (skb == NULL)
                                break;          /* Better luck next round. */
                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
-                       desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
-                               skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
+                       desc->addr = cpu_to_leXX(dma_map_single(&hmp->pci_dev->dev,
+                                                               skb->data,
+                                                               hmp->rx_buf_sz,
+                                                               DMA_FROM_DEVICE));
                }
                desc->status_n_length = cpu_to_le32(hmp->rx_buf_sz);
                if (entry >= RX_RING_SIZE-1)
@@ -1704,9 +1715,9 @@ static int hamachi_close(struct net_device *dev)
                skb = hmp->rx_skbuff[i];
                hmp->rx_ring[i].status_n_length = 0;
                if (skb) {
-                       pci_unmap_single(hmp->pci_dev,
-                               leXX_to_cpu(hmp->rx_ring[i].addr),
-                               hmp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+                       dma_unmap_single(&hmp->pci_dev->dev,
+                                        leXX_to_cpu(hmp->rx_ring[i].addr),
+                                        hmp->rx_buf_sz, DMA_FROM_DEVICE);
                        dev_kfree_skb(skb);
                        hmp->rx_skbuff[i] = NULL;
                }
@@ -1715,9 +1726,9 @@ static int hamachi_close(struct net_device *dev)
        for (i = 0; i < TX_RING_SIZE; i++) {
                skb = hmp->tx_skbuff[i];
                if (skb) {
-                       pci_unmap_single(hmp->pci_dev,
-                               leXX_to_cpu(hmp->tx_ring[i].addr),
-                               skb->len, PCI_DMA_TODEVICE);
+                       dma_unmap_single(&hmp->pci_dev->dev,
+                                        leXX_to_cpu(hmp->tx_ring[i].addr),
+                                        skb->len, DMA_TO_DEVICE);
                        dev_kfree_skb(skb);
                        hmp->tx_skbuff[i] = NULL;
                }
@@ -1899,10 +1910,10 @@ static void hamachi_remove_one(struct pci_dev *pdev)
        if (dev) {
                struct hamachi_private *hmp = netdev_priv(dev);
 
-               pci_free_consistent(pdev, RX_TOTAL_SIZE, hmp->rx_ring,
-                       hmp->rx_ring_dma);
-               pci_free_consistent(pdev, TX_TOTAL_SIZE, hmp->tx_ring,
-                       hmp->tx_ring_dma);
+               dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, hmp->rx_ring,
+                                 hmp->rx_ring_dma);
+               dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, hmp->tx_ring,
+                                 hmp->tx_ring_dma);
                unregister_netdev(dev);
                iounmap(hmp->base);
                free_netdev(dev);
index 520779f..647a143 100644 (file)
@@ -434,19 +434,22 @@ static int yellowfin_init_one(struct pci_dev *pdev,
        np->drv_flags = drv_flags;
        np->base = ioaddr;
 
-       ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
+       ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
+                                       GFP_KERNEL);
        if (!ring_space)
                goto err_out_cleardev;
        np->tx_ring = ring_space;
        np->tx_ring_dma = ring_dma;
 
-       ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
+       ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
+                                       GFP_KERNEL);
        if (!ring_space)
                goto err_out_unmap_tx;
        np->rx_ring = ring_space;
        np->rx_ring_dma = ring_dma;
 
-       ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma);
+       ring_space = dma_alloc_coherent(&pdev->dev, STATUS_TOTAL_SIZE,
+                                       &ring_dma, GFP_KERNEL);
        if (!ring_space)
                goto err_out_unmap_rx;
        np->tx_status = ring_space;
@@ -505,12 +508,14 @@ static int yellowfin_init_one(struct pci_dev *pdev,
        return 0;
 
 err_out_unmap_status:
-        pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
-               np->tx_status_dma);
+       dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status,
+                         np->tx_status_dma);
 err_out_unmap_rx:
-        pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
+       dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
+                         np->rx_ring_dma);
 err_out_unmap_tx:
-        pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
+       dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
+                         np->tx_ring_dma);
 err_out_cleardev:
        pci_iounmap(pdev, ioaddr);
 err_out_free_res:
@@ -740,8 +745,10 @@ static int yellowfin_init_ring(struct net_device *dev)
                if (skb == NULL)
                        break;
                skb_reserve(skb, 2);    /* 16 byte align the IP header. */
-               yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
-                       skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
+               yp->rx_ring[i].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
+                                                                skb->data,
+                                                                yp->rx_buf_sz,
+                                                                DMA_FROM_DEVICE));
        }
        if (i != RX_RING_SIZE) {
                for (j = 0; j < i; j++)
@@ -831,8 +838,9 @@ static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
        yp->tx_skbuff[entry] = skb;
 
 #ifdef NO_TXSTATS
-       yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
-               skb->data, len, PCI_DMA_TODEVICE));
+       yp->tx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
+                                                            skb->data,
+                                                            len, DMA_TO_DEVICE));
        yp->tx_ring[entry].result_status = 0;
        if (entry >= TX_RING_SIZE-1) {
                /* New stop command. */
@@ -847,8 +855,9 @@ static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
        yp->cur_tx++;
 #else
        yp->tx_ring[entry<<1].request_cnt = len;
-       yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
-               skb->data, len, PCI_DMA_TODEVICE));
+       yp->tx_ring[entry<<1].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
+                                                               skb->data,
+                                                               len, DMA_TO_DEVICE));
        /* The input_last (status-write) command is constant, but we must
           rewrite the subsequent 'stop' command. */
 
@@ -923,8 +932,9 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
                        dev->stats.tx_packets++;
                        dev->stats.tx_bytes += skb->len;
                        /* Free the original skb. */
-                       pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr),
-                               skb->len, PCI_DMA_TODEVICE);
+                       dma_unmap_single(&yp->pci_dev->dev,
+                                        le32_to_cpu(yp->tx_ring[entry].addr),
+                                        skb->len, DMA_TO_DEVICE);
                        dev_consume_skb_irq(skb);
                        yp->tx_skbuff[entry] = NULL;
                }
@@ -980,9 +990,9 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
                                        dev->stats.tx_packets++;
                                }
                                /* Free the original skb. */
-                               pci_unmap_single(yp->pci_dev,
-                                       yp->tx_ring[entry<<1].addr, skb->len,
-                                       PCI_DMA_TODEVICE);
+                               dma_unmap_single(&yp->pci_dev->dev,
+                                                yp->tx_ring[entry << 1].addr,
+                                                skb->len, DMA_TO_DEVICE);
                                dev_consume_skb_irq(skb);
                                yp->tx_skbuff[entry] = 0;
                                /* Mark status as empty. */
@@ -1055,8 +1065,9 @@ static int yellowfin_rx(struct net_device *dev)
 
                if(!desc->result_status)
                        break;
-               pci_dma_sync_single_for_cpu(yp->pci_dev, le32_to_cpu(desc->addr),
-                       yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+               dma_sync_single_for_cpu(&yp->pci_dev->dev,
+                                       le32_to_cpu(desc->addr),
+                                       yp->rx_buf_sz, DMA_FROM_DEVICE);
                desc_status = le32_to_cpu(desc->result_status) >> 16;
                buf_addr = rx_skb->data;
                data_size = (le32_to_cpu(desc->dbdma_cmd) -
@@ -1121,10 +1132,10 @@ static int yellowfin_rx(struct net_device *dev)
                           without copying to a properly sized skbuff. */
                        if (pkt_len > rx_copybreak) {
                                skb_put(skb = rx_skb, pkt_len);
-                               pci_unmap_single(yp->pci_dev,
-                                       le32_to_cpu(yp->rx_ring[entry].addr),
-                                       yp->rx_buf_sz,
-                                       PCI_DMA_FROMDEVICE);
+                               dma_unmap_single(&yp->pci_dev->dev,
+                                                le32_to_cpu(yp->rx_ring[entry].addr),
+                                                yp->rx_buf_sz,
+                                                DMA_FROM_DEVICE);
                                yp->rx_skbuff[entry] = NULL;
                        } else {
                                skb = netdev_alloc_skb(dev, pkt_len + 2);
@@ -1133,10 +1144,10 @@ static int yellowfin_rx(struct net_device *dev)
                                skb_reserve(skb, 2);    /* 16 byte align the IP header */
                                skb_copy_to_linear_data(skb, rx_skb->data, pkt_len);
                                skb_put(skb, pkt_len);
-                               pci_dma_sync_single_for_device(yp->pci_dev,
-                                                               le32_to_cpu(desc->addr),
-                                                               yp->rx_buf_sz,
-                                                               PCI_DMA_FROMDEVICE);
+                               dma_sync_single_for_device(&yp->pci_dev->dev,
+                                                          le32_to_cpu(desc->addr),
+                                                          yp->rx_buf_sz,
+                                                          DMA_FROM_DEVICE);
                        }
                        skb->protocol = eth_type_trans(skb, dev);
                        netif_rx(skb);
@@ -1155,8 +1166,10 @@ static int yellowfin_rx(struct net_device *dev)
                                break;                          /* Better luck next round. */
                        yp->rx_skbuff[entry] = skb;
                        skb_reserve(skb, 2);    /* Align IP on 16 byte boundaries */
-                       yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
-                               skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
+                       yp->rx_ring[entry].addr = cpu_to_le32(dma_map_single(&yp->pci_dev->dev,
+                                                                            skb->data,
+                                                                            yp->rx_buf_sz,
+                                                                            DMA_FROM_DEVICE));
                }
                yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
                yp->rx_ring[entry].result_status = 0;   /* Clear complete bit. */
@@ -1379,10 +1392,12 @@ static void yellowfin_remove_one(struct pci_dev *pdev)
        BUG_ON(!dev);
        np = netdev_priv(dev);
 
-        pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status,
-               np->tx_status_dma);
-       pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
-       pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
+       dma_free_coherent(&pdev->dev, STATUS_TOTAL_SIZE, np->tx_status,
+                         np->tx_status_dma);
+       dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
+                         np->rx_ring_dma);
+       dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
+                         np->tx_ring_dma);
        unregister_netdev (dev);
 
        pci_iounmap(pdev, np->base);
index d25b88f..76f8cc5 100644 (file)
@@ -25,7 +25,7 @@ config IONIC
          This enables the support for the Pensando family of Ethernet
          adapters.  More specific information on this driver can be
          found in
-         <file:Documentation/networking/device_drivers/pensando/ionic.rst>.
+         <file:Documentation/networking/device_drivers/ethernet/pensando/ionic.rst>.
 
          To compile this driver as a module, choose M here. The module
          will be called ionic.
index 2924cde..85c686c 100644 (file)
@@ -247,12 +247,11 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_out_pci_disable_device;
        }
 
-       pci_set_master(pdev);
        pcie_print_link_status(pdev);
 
        err = ionic_map_bars(ionic);
        if (err)
-               goto err_out_pci_clear_master;
+               goto err_out_pci_disable_device;
 
        /* Configure the device */
        err = ionic_setup(ionic);
@@ -260,6 +259,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                dev_err(dev, "Cannot setup device: %d, aborting\n", err);
                goto err_out_unmap_bars;
        }
+       pci_set_master(pdev);
 
        err = ionic_identify(ionic);
        if (err) {
@@ -350,6 +350,7 @@ err_out_reset:
        ionic_reset(ionic);
 err_out_teardown:
        ionic_dev_teardown(ionic);
+       pci_clear_master(pdev);
        /* Don't fail the probe for these errors, keep
         * the hw interface around for inspection
         */
@@ -358,8 +359,6 @@ err_out_teardown:
 err_out_unmap_bars:
        ionic_unmap_bars(ionic);
        pci_release_regions(pdev);
-err_out_pci_clear_master:
-       pci_clear_master(pdev);
 err_out_pci_disable_device:
        pci_disable_device(pdev);
 err_out_debugfs_del_dev:
@@ -389,9 +388,9 @@ static void ionic_remove(struct pci_dev *pdev)
        ionic_port_reset(ionic);
        ionic_reset(ionic);
        ionic_dev_teardown(ionic);
+       pci_clear_master(pdev);
        ionic_unmap_bars(ionic);
        pci_release_regions(pdev);
-       pci_clear_master(pdev);
        pci_disable_device(pdev);
        ionic_debugfs_del_dev(ionic);
        mutex_destroy(&ionic->dev_cmd_lock);
index 525434f..d5cba50 100644 (file)
@@ -10,8 +10,6 @@
 #include "ionic_if.h"
 #include "ionic_regs.h"
 
-#define IONIC_MIN_MTU                  ETH_MIN_MTU
-#define IONIC_MAX_MTU                  9194
 #define IONIC_MAX_TX_DESC              8192
 #define IONIC_MAX_RX_DESC              16384
 #define IONIC_MIN_TXRX_DESC            16
index 2d590e5..c4f4fd4 100644 (file)
@@ -69,6 +69,7 @@ void ionic_devlink_free(struct ionic *ionic)
 int ionic_devlink_register(struct ionic *ionic)
 {
        struct devlink *dl = priv_to_devlink(ionic);
+       struct devlink_port_attrs attrs = {};
        int err;
 
        err = devlink_register(dl, ionic->dev);
@@ -77,8 +78,8 @@ int ionic_devlink_register(struct ionic *ionic)
                return err;
        }
 
-       devlink_port_attrs_set(&ionic->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
-                              0, false, 0, NULL, 0);
+       attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+       devlink_port_attrs_set(&ionic->dl_port, &attrs);
        err = devlink_port_register(dl, &ionic->dl_port, 0);
        if (err)
                dev_err(ionic->dev, "devlink_port_register failed: %d\n", err);
index f7e3ce3..e03ea9b 100644 (file)
@@ -468,12 +468,18 @@ static void ionic_get_ringparam(struct net_device *netdev,
        ring->rx_pending = lif->nrxq_descs;
 }
 
+static void ionic_set_ringsize(struct ionic_lif *lif, void *arg)
+{
+       struct ethtool_ringparam *ring = arg;
+
+       lif->ntxq_descs = ring->tx_pending;
+       lif->nrxq_descs = ring->rx_pending;
+}
+
 static int ionic_set_ringparam(struct net_device *netdev,
                               struct ethtool_ringparam *ring)
 {
        struct ionic_lif *lif = netdev_priv(netdev);
-       bool running;
-       int err;
 
        if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
                netdev_info(netdev, "Changing jumbo or mini descriptors not supported\n");
@@ -491,22 +497,7 @@ static int ionic_set_ringparam(struct net_device *netdev,
            ring->rx_pending == lif->nrxq_descs)
                return 0;
 
-       err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
-       if (err)
-               return err;
-
-       running = test_bit(IONIC_LIF_F_UP, lif->state);
-       if (running)
-               ionic_stop(netdev);
-
-       lif->ntxq_descs = ring->tx_pending;
-       lif->nrxq_descs = ring->rx_pending;
-
-       if (running)
-               ionic_open(netdev);
-       clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
-
-       return 0;
+       return ionic_reset_queues(lif, ionic_set_ringsize, ring);
 }
 
 static void ionic_get_channels(struct net_device *netdev,
@@ -521,12 +512,17 @@ static void ionic_get_channels(struct net_device *netdev,
        ch->combined_count = lif->nxqs;
 }
 
+static void ionic_set_queuecount(struct ionic_lif *lif, void *arg)
+{
+       struct ethtool_channels *ch = arg;
+
+       lif->nxqs = ch->combined_count;
+}
+
 static int ionic_set_channels(struct net_device *netdev,
                              struct ethtool_channels *ch)
 {
        struct ionic_lif *lif = netdev_priv(netdev);
-       bool running;
-       int err;
 
        if (!ch->combined_count || ch->other_count ||
            ch->rx_count || ch->tx_count)
@@ -535,21 +531,7 @@ static int ionic_set_channels(struct net_device *netdev,
        if (ch->combined_count == lif->nxqs)
                return 0;
 
-       err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
-       if (err)
-               return err;
-
-       running = test_bit(IONIC_LIF_F_UP, lif->state);
-       if (running)
-               ionic_stop(netdev);
-
-       lif->nxqs = ch->combined_count;
-
-       if (running)
-               ionic_open(netdev);
-       clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
-
-       return 0;
+       return ionic_reset_queues(lif, ionic_set_queuecount, ch);
 }
 
 static u32 ionic_get_priv_flags(struct net_device *netdev)
index 7e22ba4..acc94b2 100644 (file)
@@ -59,6 +59,8 @@ enum ionic_cmd_opcode {
        IONIC_CMD_QOS_CLASS_INIT                = 241,
        IONIC_CMD_QOS_CLASS_RESET               = 242,
        IONIC_CMD_QOS_CLASS_UPDATE              = 243,
+       IONIC_CMD_QOS_CLEAR_STATS               = 244,
+       IONIC_CMD_QOS_RESET                     = 245,
 
        /* Firmware commands */
        IONIC_CMD_FW_DOWNLOAD                   = 254,
@@ -90,8 +92,8 @@ enum ionic_status_code {
        IONIC_RC_DEV_CMD        = 18,   /* Device cmd attempted on AdminQ */
        IONIC_RC_ENOSUPP        = 19,   /* Operation not supported */
        IONIC_RC_ERROR          = 29,   /* Generic error */
-
        IONIC_RC_ERDMA          = 30,   /* Generic RDMA error */
+       IONIC_RC_EVFID          = 31,   /* VF ID does not exist */
 };
 
 enum ionic_notifyq_opcode {
@@ -103,7 +105,7 @@ enum ionic_notifyq_opcode {
 };
 
 /**
- * struct cmd - General admin command format
+ * struct ionic_admin_cmd - General admin command format
  * @opcode:     Opcode for the command
  * @lif_index:  LIF index
  * @cmd_data:   Opcode-specific command bytes
@@ -167,7 +169,7 @@ struct ionic_dev_init_cmd {
 };
 
 /**
- * struct init_comp - Device init command completion
+ * struct ionic_dev_init_comp - Device init command completion
  * @status: Status of the command (enum ionic_status_code)
  */
 struct ionic_dev_init_comp {
@@ -185,7 +187,7 @@ struct ionic_dev_reset_cmd {
 };
 
 /**
- * struct reset_comp - Reset command completion
+ * struct ionic_dev_reset_comp - Reset command completion
  * @status: Status of the command (enum ionic_status_code)
  */
 struct ionic_dev_reset_comp {
@@ -357,12 +359,12 @@ struct ionic_lif_logical_qtype {
  * enum ionic_lif_state - LIF state
  * @IONIC_LIF_DISABLE:     LIF disabled
  * @IONIC_LIF_ENABLE:      LIF enabled
- * @IONIC_LIF_HANG_RESET:  LIF hung, being reset
+ * @IONIC_LIF_QUIESCE:     LIF Quiesced
  */
 enum ionic_lif_state {
-       IONIC_LIF_DISABLE       = 0,
+       IONIC_LIF_QUIESCE       = 0,
        IONIC_LIF_ENABLE        = 1,
-       IONIC_LIF_HANG_RESET    = 2,
+       IONIC_LIF_DISABLE       = 2,
 };
 
 /**
@@ -371,6 +373,7 @@ enum ionic_lif_state {
  * @name:           LIF name
  * @mtu:            MTU
  * @mac:            Station MAC address
+ * @vlan:           Default Vlan ID
  * @features:       Features (enum ionic_eth_hw_features)
  * @queue_count:    Queue counts per queue-type
  */
@@ -381,7 +384,7 @@ union ionic_lif_config {
                char   name[IONIC_IFNAMSIZ];
                __le32 mtu;
                u8     mac[6];
-               u8     rsvd2[2];
+               __le16 vlan;
                __le64 features;
                __le32 queue_count[IONIC_QTYPE_MAX];
        } __packed;
@@ -489,13 +492,13 @@ struct ionic_lif_init_comp {
        u8 rsvd2[12];
 };
 
- /**
 * struct ionic_q_identify_cmd - queue identify command
 * @opcode:     opcode
 * @lif_type:   LIF type (enum ionic_lif_type)
 * @type:       Logical queue type (enum ionic_logical_qtype)
 * @ver:        Highest queue type version that the driver supports
 */
+/**
+ * struct ionic_q_identify_cmd - queue identify command
+ * @opcode:     opcode
+ * @lif_type:   LIF type (enum ionic_lif_type)
+ * @type:       Logical queue type (enum ionic_logical_qtype)
+ * @ver:        Highest queue type version that the driver supports
+ */
 struct ionic_q_identify_cmd {
        u8     opcode;
        u8     rsvd;
@@ -983,6 +986,14 @@ enum ionic_pkt_type {
        IONIC_PKT_TYPE_IPV6       = 0x008,
        IONIC_PKT_TYPE_IPV6_TCP   = 0x018,
        IONIC_PKT_TYPE_IPV6_UDP   = 0x028,
+       /* below types are only used if encap offloads are enabled on lif */
+       IONIC_PKT_TYPE_ENCAP_NON_IP     = 0x40,
+       IONIC_PKT_TYPE_ENCAP_IPV4       = 0x41,
+       IONIC_PKT_TYPE_ENCAP_IPV4_TCP   = 0x43,
+       IONIC_PKT_TYPE_ENCAP_IPV4_UDP   = 0x45,
+       IONIC_PKT_TYPE_ENCAP_IPV6       = 0x48,
+       IONIC_PKT_TYPE_ENCAP_IPV6_TCP   = 0x58,
+       IONIC_PKT_TYPE_ENCAP_IPV6_UDP   = 0x68,
 };
 
 enum ionic_eth_hw_features {
@@ -1003,6 +1014,9 @@ enum ionic_eth_hw_features {
        IONIC_ETH_HW_TSO_IPXIP6         = BIT(14),
        IONIC_ETH_HW_TSO_UDP            = BIT(15),
        IONIC_ETH_HW_TSO_UDP_CSUM       = BIT(16),
+       IONIC_ETH_HW_RX_CSUM_GENEVE     = BIT(17),
+       IONIC_ETH_HW_TX_CSUM_GENEVE     = BIT(18),
+       IONIC_ETH_HW_TSO_GENEVE         = BIT(19)
 };
 
 /**
@@ -1011,7 +1025,7 @@ enum ionic_eth_hw_features {
  * @type:       Queue type
  * @lif_index:  LIF index
  * @index:      Queue index
- * @oper:       Operation (enum q_control_oper)
+ * @oper:       Operation (enum ionic_q_control_oper)
  */
 struct ionic_q_control_cmd {
        u8     opcode;
@@ -1172,7 +1186,7 @@ enum ionic_port_loopback_mode {
  * struct ionic_xcvr_status - Transceiver Status information
  * @state:    Transceiver status (enum ionic_xcvr_state)
  * @phy:      Physical connection type (enum ionic_phy_type)
- * @pid:      Transceiver link mode (enum pid)
+ * @pid:      Transceiver link mode (enum ionic_xcvr_pid)
  * @sprom:    Transceiver sprom contents
  */
 struct ionic_xcvr_status {
@@ -1186,7 +1200,7 @@ struct ionic_xcvr_status {
  * union ionic_port_config - Port configuration
  * @speed:              port speed (in Mbps)
  * @mtu:                mtu
- * @state:              port admin state (enum port_admin_state)
+ * @state:              port admin state (enum ionic_port_admin_state)
  * @an_enable:          autoneg enable
  * @fec_type:           fec type (enum ionic_port_fec_type)
  * @pause_type:         pause type (enum ionic_port_pause_type)
@@ -1874,12 +1888,14 @@ struct ionic_qos_identify_comp {
 };
 
 #define IONIC_QOS_TC_MAX               8
+#define IONIC_QOS_ALL_TC               0xFF
 /* Capri max supported, should be renamed. */
 #define IONIC_QOS_CLASS_MAX            7
 #define IONIC_QOS_PCP_MAX              8
 #define IONIC_QOS_CLASS_NAME_SZ        32
 #define IONIC_QOS_DSCP_MAX             64
 #define IONIC_QOS_ALL_PCP              0xFF
+#define IONIC_DSCP_BLOCK_SIZE          8
 
 /**
  * enum ionic_qos_class
@@ -1923,6 +1939,7 @@ enum ionic_qos_sched_type {
  *     IONIC_QOS_CONFIG_F_NO_DROP              drop/nodrop
  *     IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP         enable dot1q pcp rewrite
  *     IONIC_QOS_CONFIG_F_RW_IP_DSCP           enable ip dscp rewrite
+ *     IONIC_QOS_CONFIG_F_NON_DISRUPTIVE       Non-disruptive TC update
  * @sched_type:                QoS class scheduling type (enum ionic_qos_sched_type)
  * @class_type:                QoS class type (enum ionic_qos_class_type)
  * @pause_type:                QoS pause type (enum ionic_qos_pause_type)
@@ -1944,6 +1961,8 @@ union ionic_qos_config {
 /* Used to rewrite PCP or DSCP value. */
 #define IONIC_QOS_CONFIG_F_RW_DOT1Q_PCP                BIT(2)
 #define IONIC_QOS_CONFIG_F_RW_IP_DSCP          BIT(3)
+/* Non-disruptive TC update */
+#define IONIC_QOS_CONFIG_F_NON_DISRUPTIVE      BIT(4)
                u8      flags;
                u8      sched_type;
                u8      class_type;
@@ -2019,6 +2038,16 @@ struct ionic_qos_reset_cmd {
        u8    rsvd[62];
 };
 
+/**
+ * struct ionic_qos_clear_port_stats_cmd - Qos config reset command
+ * @opcode:    Opcode
+ */
+struct ionic_qos_clear_stats_cmd {
+       u8    opcode;
+       u8    group_bitmap;
+       u8    rsvd[62];
+};
+
 typedef struct ionic_admin_comp ionic_qos_reset_comp;
 
 /**
@@ -2164,7 +2193,7 @@ struct ionic_notifyq_event {
  * struct ionic_link_change_event - Link change event notification
  * @eid:               event number
  * @ecode:             event code = IONIC_EVENT_LINK_CHANGE
- * @link_status:       link up or down, with error bits (enum port_status)
+ * @link_status:       link up/down, with error bits (enum ionic_port_status)
  * @link_speed:                speed of the network link
  *
  * Sent when the network link state changes between UP and DOWN
@@ -2377,6 +2406,16 @@ enum ionic_pb_buffer_drop_stats {
        IONIC_BUFFER_DROP_MAX,
 };
 
+enum ionic_oflow_drop_stats {
+       IONIC_OFLOW_OCCUPANCY_DROP,
+       IONIC_OFLOW_EMERGENCY_STOP_DROP,
+       IONIC_OFLOW_WRITE_BUFFER_ACK_FILL_UP_DROP,
+       IONIC_OFLOW_WRITE_BUFFER_ACK_FULL_DROP,
+       IONIC_OFLOW_WRITE_BUFFER_FULL_DROP,
+       IONIC_OFLOW_CONTROL_FIFO_FULL_DROP,
+       IONIC_OFLOW_DROP_MAX,
+};
+
 /**
  * struct port_pb_stats - packet buffers system stats
  * uses ionic_pb_buffer_drop_stats for drop_counts[]
@@ -2390,12 +2429,20 @@ struct ionic_port_pb_stats {
        __le64 input_queue_buffer_occupancy[IONIC_QOS_TC_MAX];
        __le64 input_queue_port_monitor[IONIC_QOS_TC_MAX];
        __le64 output_queue_port_monitor[IONIC_QOS_TC_MAX];
+       __le64 oflow_drop_counts[IONIC_OFLOW_DROP_MAX];
+       __le64 input_queue_good_pkts_in[IONIC_QOS_TC_MAX];
+       __le64 input_queue_good_pkts_out[IONIC_QOS_TC_MAX];
+       __le64 input_queue_err_pkts_in[IONIC_QOS_TC_MAX];
+       __le64 input_queue_fifo_depth[IONIC_QOS_TC_MAX];
+       __le64 input_queue_max_fifo_depth[IONIC_QOS_TC_MAX];
+       __le64 input_queue_peak_occupancy[IONIC_QOS_TC_MAX];
+       __le64 output_queue_buffer_occupancy[IONIC_QOS_TC_MAX];
 };
 
 /**
  * struct ionic_port_identity - port identity structure
  * @version:        identity structure version
- * @type:           type of port (enum port_type)
+ * @type:           type of port (enum ionic_port_type)
  * @num_lanes:      number of lanes for the port
  * @autoneg:        autoneg supported
  * @min_frame_size: minimum frame size supported
@@ -2637,6 +2684,7 @@ union ionic_dev_cmd {
        struct ionic_qos_identify_cmd qos_identify;
        struct ionic_qos_init_cmd qos_init;
        struct ionic_qos_reset_cmd qos_reset;
+       struct ionic_qos_clear_stats_cmd qos_clear_stats;
 
        struct ionic_q_identify_cmd q_identify;
        struct ionic_q_init_cmd q_init;
index 9d8c969..3aa6403 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/dynamic_debug.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
 #include <linux/rtnetlink.h>
 #include <linux/interrupt.h>
 #include <linux/pci.h>
@@ -96,7 +97,8 @@ static void ionic_link_status_check(struct ionic_lif *lif)
        u16 link_status;
        bool link_up;
 
-       if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
+       if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state) ||
+           test_bit(IONIC_LIF_F_QUEUE_RESET, lif->state))
                return;
 
        link_status = le16_to_cpu(lif->info->status.link_status);
@@ -717,7 +719,7 @@ static bool ionic_notifyq_service(struct ionic_cq *cq,
        eid = le64_to_cpu(comp->event.eid);
 
        /* Have we run out of new completions to process? */
-       if (eid <= lif->last_eid)
+       if ((s64)(eid - lif->last_eid) <= 0)
                return false;
 
        lif->last_eid = eid;
@@ -1245,6 +1247,7 @@ static int ionic_init_nic_features(struct ionic_lif *lif)
 
        netdev->hw_features |= netdev->hw_enc_features;
        netdev->features |= netdev->hw_features;
+       netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES;
 
        netdev->priv_flags |= IFF_UNICAST_FLT |
                              IFF_LIVE_ADDR_CHANGE;
@@ -1311,7 +1314,7 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
                return err;
 
        netdev->mtu = new_mtu;
-       err = ionic_reset_queues(lif);
+       err = ionic_reset_queues(lif, NULL, NULL);
 
        return err;
 }
@@ -1323,7 +1326,7 @@ static void ionic_tx_timeout_work(struct work_struct *ws)
        netdev_info(lif->netdev, "Tx Timeout recovery\n");
 
        rtnl_lock();
-       ionic_reset_queues(lif);
+       ionic_reset_queues(lif, NULL, NULL);
        rtnl_unlock();
 }
 
@@ -1671,6 +1674,14 @@ int ionic_open(struct net_device *netdev)
        if (err)
                goto err_out;
 
+       err = netif_set_real_num_tx_queues(netdev, lif->nxqs);
+       if (err)
+               goto err_txrx_deinit;
+
+       err = netif_set_real_num_rx_queues(netdev, lif->nxqs);
+       if (err)
+               goto err_txrx_deinit;
+
        /* don't start the queues until we have link */
        if (netif_carrier_ok(netdev)) {
                err = ionic_start_queues(lif);
@@ -1692,15 +1703,15 @@ static void ionic_stop_queues(struct ionic_lif *lif)
        if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
                return;
 
-       ionic_txrx_disable(lif);
        netif_tx_disable(lif->netdev);
+       ionic_txrx_disable(lif);
 }
 
 int ionic_stop(struct net_device *netdev)
 {
        struct ionic_lif *lif = netdev_priv(netdev);
 
-       if (!netif_device_present(netdev))
+       if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
                return 0;
 
        ionic_stop_queues(lif);
@@ -1978,24 +1989,32 @@ static const struct net_device_ops ionic_netdev_ops = {
        .ndo_get_vf_stats       = ionic_get_vf_stats,
 };
 
-int ionic_reset_queues(struct ionic_lif *lif)
+int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
 {
        bool running;
        int err = 0;
 
-       /* Put off the next watchdog timeout */
-       netif_trans_update(lif->netdev);
-
        err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
        if (err)
                return err;
 
        running = netif_running(lif->netdev);
-       if (running)
+       if (running) {
+               netif_device_detach(lif->netdev);
                err = ionic_stop(lif->netdev);
-       if (!err && running)
-               ionic_open(lif->netdev);
+               if (err)
+                       goto reset_out;
+       }
 
+       if (cb)
+               cb(lif, arg);
+
+       if (running) {
+               err = ionic_open(lif->netdev);
+               netif_device_attach(lif->netdev);
+       }
+
+reset_out:
        clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
 
        return err;
@@ -2004,16 +2023,22 @@ int ionic_reset_queues(struct ionic_lif *lif)
 static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index)
 {
        struct device *dev = ionic->dev;
+       union ionic_lif_identity *lid;
        struct net_device *netdev;
        struct ionic_lif *lif;
        int tbl_sz;
        int err;
 
+       lid = kzalloc(sizeof(*lid), GFP_KERNEL);
+       if (!lid)
+               return ERR_PTR(-ENOMEM);
+
        netdev = alloc_etherdev_mqs(sizeof(*lif),
                                    ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
        if (!netdev) {
                dev_err(dev, "Cannot allocate netdev, aborting\n");
-               return ERR_PTR(-ENOMEM);
+               err = -ENOMEM;
+               goto err_out_free_lid;
        }
 
        SET_NETDEV_DEV(netdev, dev);
@@ -2027,8 +2052,12 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
        netdev->watchdog_timeo = 2 * HZ;
        netif_carrier_off(netdev);
 
-       netdev->min_mtu = IONIC_MIN_MTU;
-       netdev->max_mtu = IONIC_MAX_MTU;
+       lif->identity = lid;
+       lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
+       ionic_lif_identify(ionic, lif->lif_type, lif->identity);
+       lif->netdev->min_mtu = le32_to_cpu(lif->identity->eth.min_frame_size);
+       lif->netdev->max_mtu =
+               le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
 
        lif->neqs = ionic->neqs_per_lif;
        lif->nxqs = ionic->ntxqs_per_lif;
@@ -2095,6 +2124,8 @@ err_out_free_lif_info:
 err_out_free_netdev:
        free_netdev(lif->netdev);
        lif = NULL;
+err_out_free_lid:
+       kfree(lid);
 
        return ERR_PTR(err);
 }
@@ -2114,7 +2145,6 @@ int ionic_lifs_alloc(struct ionic *ionic)
                return -ENOMEM;
        }
 
-       lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
        ionic_lif_queue_identify(lif);
 
        return 0;
@@ -2225,6 +2255,7 @@ static void ionic_lif_free(struct ionic_lif *lif)
                ionic_lif_reset(lif);
 
        /* free lif info */
+       kfree(lif->identity);
        dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
        lif->info = NULL;
        lif->info_pa = 0;
@@ -2602,6 +2633,7 @@ int ionic_lifs_register(struct ionic *ionic)
                return err;
        }
        ionic->master_lif->registered = true;
+       ionic_lif_set_netdev_info(ionic->master_lif);
 
        return 0;
 }
index c342803..f1e7d3e 100644 (file)
@@ -184,6 +184,7 @@ struct ionic_lif {
        u16 lif_type;
        unsigned int nucast;
 
+       union ionic_lif_identity *identity;
        struct ionic_lif_info *info;
        dma_addr_t info_pa;
        u32 info_sz;
@@ -235,18 +236,7 @@ static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
        return (usecs * mult) / div;
 }
 
-static inline u32 ionic_coal_hw_to_usec(struct ionic *ionic, u32 units)
-{
-       u32 mult = le32_to_cpu(ionic->ident.dev.intr_coal_mult);
-       u32 div = le32_to_cpu(ionic->ident.dev.intr_coal_div);
-
-       /* Div-by-zero should never be an issue, but check anyway */
-       if (!div || !mult)
-               return 0;
-
-       /* Convert from device units to usec */
-       return (units * div) / mult;
-}
+typedef void (*ionic_reset_cb)(struct ionic_lif *lif, void *arg);
 
 void ionic_link_status_check_request(struct ionic_lif *lif);
 void ionic_get_stats64(struct net_device *netdev,
@@ -267,7 +257,7 @@ int ionic_lif_rss_config(struct ionic_lif *lif, u16 types,
 
 int ionic_open(struct net_device *netdev);
 int ionic_stop(struct net_device *netdev);
-int ionic_reset_queues(struct ionic_lif *lif);
+int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg);
 
 static inline void debug_stats_txq_post(struct ionic_qcq *qcq,
                                        struct ionic_txq_desc *desc, bool dbell)
index 8067ea0..f218477 100644 (file)
@@ -1695,19 +1695,13 @@ static void netxen_nic_detach_func(struct netxen_adapter *adapter)
        clear_bit(__NX_RESETTING, &adapter->state);
 }
 
-static int netxen_nic_attach_func(struct pci_dev *pdev)
+static int netxen_nic_attach_late_func(struct pci_dev *pdev)
 {
        struct netxen_adapter *adapter = pci_get_drvdata(pdev);
        struct net_device *netdev = adapter->netdev;
        int err;
 
-       err = pci_enable_device(pdev);
-       if (err)
-               return err;
-
-       pci_set_power_state(pdev, PCI_D0);
        pci_set_master(pdev);
-       pci_restore_state(pdev);
 
        adapter->ahw.crb_win = -1;
        adapter->ahw.ocm_win = -1;
@@ -1741,6 +1735,20 @@ err_out:
        return err;
 }
 
+static int netxen_nic_attach_func(struct pci_dev *pdev)
+{
+       int err;
+
+       err = pci_enable_device(pdev);
+       if (err)
+               return err;
+
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+
+       return netxen_nic_attach_late_func(pdev);
+}
+
 static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev,
                                                pci_channel_state_t state)
 {
@@ -1785,36 +1793,24 @@ static void netxen_nic_shutdown(struct pci_dev *pdev)
        pci_disable_device(pdev);
 }
 
-#ifdef CONFIG_PM
-static int
-netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused
+netxen_nic_suspend(struct device *dev_d)
 {
-       struct netxen_adapter *adapter = pci_get_drvdata(pdev);
-       int retval;
+       struct netxen_adapter *adapter = dev_get_drvdata(dev_d);
 
        netxen_nic_detach_func(adapter);
 
-       retval = pci_save_state(pdev);
-       if (retval)
-               return retval;
-
-       if (netxen_nic_wol_supported(adapter)) {
-               pci_enable_wake(pdev, PCI_D3cold, 1);
-               pci_enable_wake(pdev, PCI_D3hot, 1);
-       }
-
-       pci_disable_device(pdev);
-       pci_set_power_state(pdev, pci_choose_state(pdev, state));
+       if (netxen_nic_wol_supported(adapter))
+               device_wakeup_enable(dev_d);
 
        return 0;
 }
 
-static int
-netxen_nic_resume(struct pci_dev *pdev)
+static int __maybe_unused
+netxen_nic_resume(struct device *dev_d)
 {
-       return netxen_nic_attach_func(pdev);
+       return netxen_nic_attach_late_func(to_pci_dev(dev_d));
 }
-#endif
 
 static int netxen_nic_open(struct net_device *netdev)
 {
@@ -3448,15 +3444,16 @@ static const struct pci_error_handlers netxen_err_handler = {
        .slot_reset = netxen_io_slot_reset,
 };
 
+static SIMPLE_DEV_PM_OPS(netxen_nic_pm_ops,
+                        netxen_nic_suspend,
+                        netxen_nic_resume);
+
 static struct pci_driver netxen_driver = {
        .name = netxen_nic_driver_name,
        .id_table = netxen_pci_tbl,
        .probe = netxen_nic_probe,
        .remove = netxen_nic_remove,
-#ifdef CONFIG_PM
-       .suspend = netxen_nic_suspend,
-       .resume = netxen_nic_resume,
-#endif
+       .driver.pm = &netxen_nic_pm_ops,
        .shutdown = netxen_nic_shutdown,
        .err_handler = &netxen_err_handler
 };
index a0acb94..f947b10 100644 (file)
@@ -1,12 +1,37 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+# Copyright (c) 2019-2020 Marvell International Ltd.
+
 obj-$(CONFIG_QED) := qed.o
 
-qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
-        qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
-        qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o qed_mng_tlv.o
-qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
-qed-$(CONFIG_QED_LL2) += qed_ll2.o
-qed-$(CONFIG_QED_RDMA) += qed_roce.o qed_rdma.o qed_iwarp.o
-qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o
+qed-y :=                       \
+       qed_chain.o             \
+       qed_cxt.o               \
+       qed_dcbx.o              \
+       qed_debug.o             \
+       qed_dev.o               \
+       qed_hw.o                \
+       qed_init_fw_funcs.o     \
+       qed_init_ops.o          \
+       qed_int.o               \
+       qed_l2.o                \
+       qed_main.o              \
+       qed_mcp.o               \
+       qed_mng_tlv.o           \
+       qed_ptp.o               \
+       qed_selftest.o          \
+       qed_sp_commands.o       \
+       qed_spq.o
+
 qed-$(CONFIG_QED_FCOE) += qed_fcoe.o
+qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o
+qed-$(CONFIG_QED_LL2) += qed_ll2.o
 qed-$(CONFIG_QED_OOO) += qed_ooo.o
+
+qed-$(CONFIG_QED_RDMA) +=      \
+       qed_iwarp.o             \
+       qed_rdma.o              \
+       qed_roce.o
+
+qed-$(CONFIG_QED_SRIOV) +=     \
+       qed_sriov.o             \
+       qed_vf.o
index a49743d..b2a7b53 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef _QED_H
@@ -271,20 +245,6 @@ enum QED_FEATURE {
        QED_MAX_FEATURES,
 };
 
-enum QED_PORT_MODE {
-       QED_PORT_MODE_DE_2X40G,
-       QED_PORT_MODE_DE_2X50G,
-       QED_PORT_MODE_DE_1X100G,
-       QED_PORT_MODE_DE_4X10G_F,
-       QED_PORT_MODE_DE_4X10G_E,
-       QED_PORT_MODE_DE_4X20G,
-       QED_PORT_MODE_DE_1X40G,
-       QED_PORT_MODE_DE_2X25G,
-       QED_PORT_MODE_DE_1X25G,
-       QED_PORT_MODE_DE_4X25G,
-       QED_PORT_MODE_DE_2X10G,
-};
-
 enum qed_dev_cap {
        QED_DEV_CAP_ETH,
        QED_DEV_CAP_FCOE,
@@ -306,48 +266,49 @@ enum qed_db_rec_exec {
 
 struct qed_hw_info {
        /* PCI personality */
-       enum qed_pci_personality personality;
-#define QED_IS_RDMA_PERSONALITY(dev)                       \
-       ((dev)->hw_info.personality == QED_PCI_ETH_ROCE ||  \
-        (dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
+       enum qed_pci_personality        personality;
+#define QED_IS_RDMA_PERSONALITY(dev)                                   \
+       ((dev)->hw_info.personality == QED_PCI_ETH_ROCE ||              \
+        (dev)->hw_info.personality == QED_PCI_ETH_IWARP ||             \
         (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
-#define QED_IS_ROCE_PERSONALITY(dev)                      \
-       ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
+#define QED_IS_ROCE_PERSONALITY(dev)                                   \
+       ((dev)->hw_info.personality == QED_PCI_ETH_ROCE ||              \
         (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
-#define QED_IS_IWARP_PERSONALITY(dev)                      \
-       ((dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
+#define QED_IS_IWARP_PERSONALITY(dev)                                  \
+       ((dev)->hw_info.personality == QED_PCI_ETH_IWARP ||             \
         (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
-#define QED_IS_L2_PERSONALITY(dev)                   \
-       ((dev)->hw_info.personality == QED_PCI_ETH || \
+#define QED_IS_L2_PERSONALITY(dev)                                     \
+       ((dev)->hw_info.personality == QED_PCI_ETH ||                   \
         QED_IS_RDMA_PERSONALITY(dev))
-#define QED_IS_FCOE_PERSONALITY(dev) \
+#define QED_IS_FCOE_PERSONALITY(dev)                                   \
        ((dev)->hw_info.personality == QED_PCI_FCOE)
-#define QED_IS_ISCSI_PERSONALITY(dev) \
+#define QED_IS_ISCSI_PERSONALITY(dev)                                  \
        ((dev)->hw_info.personality == QED_PCI_ISCSI)
 
        /* Resource Allocation scheme results */
        u32                             resc_start[QED_MAX_RESC];
        u32                             resc_num[QED_MAX_RESC];
-       u32                             feat_num[QED_MAX_FEATURES];
+#define RESC_START(_p_hwfn, resc)      ((_p_hwfn)->hw_info.resc_start[resc])
+#define RESC_NUM(_p_hwfn, resc)                ((_p_hwfn)->hw_info.resc_num[resc])
+#define RESC_END(_p_hwfn, resc)                (RESC_START(_p_hwfn, resc) +    \
+                                        RESC_NUM(_p_hwfn, resc))
 
-#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
-#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
-#define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
-                                RESC_NUM(_p_hwfn, resc))
-#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
+       u32                             feat_num[QED_MAX_FEATURES];
+#define FEAT_NUM(_p_hwfn, resc)                ((_p_hwfn)->hw_info.feat_num[resc])
 
        /* Amount of traffic classes HW supports */
-       u8 num_hw_tc;
+       u8                              num_hw_tc;
 
        /* Amount of TCs which should be active according to DCBx or upper
         * layer driver configuration.
         */
-       u8 num_active_tc;
+       u8                              num_active_tc;
+
        u8                              offload_tc;
        bool                            offload_tc_set;
 
        bool                            multi_tc_roce_en;
-#define IS_QED_MULTI_TC_ROCE(p_hwfn) (((p_hwfn)->hw_info.multi_tc_roce_en))
+#define IS_QED_MULTI_TC_ROCE(p_hwfn)   ((p_hwfn)->hw_info.multi_tc_roce_en)
 
        u32                             concrete_fid;
        u16                             opaque_fid;
@@ -362,12 +323,11 @@ struct qed_hw_info {
 
        struct qed_igu_info             *p_igu_info;
 
-       u32                             port_mode;
        u32                             hw_mode;
-       unsigned long           device_capabilities;
+       unsigned long                   device_capabilities;
        u16                             mtu;
 
-       enum qed_wol_support b_wol_support;
+       enum qed_wol_support            b_wol_support;
 };
 
 /* maximun size of read/write commands (HW limit) */
@@ -741,41 +701,42 @@ struct qed_dbg_feature {
 };
 
 struct qed_dev {
-       u32     dp_module;
-       u8      dp_level;
-       char    name[NAME_SIZE];
-
-       enum    qed_dev_type type;
-/* Translate type/revision combo into the proper conditions */
-#define QED_IS_BB(dev)  ((dev)->type == QED_DEV_TYPE_BB)
-#define QED_IS_BB_B0(dev)       (QED_IS_BB(dev) && \
-                                CHIP_REV_IS_B0(dev))
-#define QED_IS_AH(dev)  ((dev)->type == QED_DEV_TYPE_AH)
-#define QED_IS_K2(dev)  QED_IS_AH(dev)
-
-       u16     vendor_id;
-       u16     device_id;
-#define QED_DEV_ID_MASK                0xff00
-#define QED_DEV_ID_MASK_BB     0x1600
-#define QED_DEV_ID_MASK_AH     0x8000
-#define QED_IS_E4(dev)  (QED_IS_BB(dev) || QED_IS_AH(dev))
-
-       u16     chip_num;
-#define CHIP_NUM_MASK                   0xffff
-#define CHIP_NUM_SHIFT                  16
-
-       u16     chip_rev;
-#define CHIP_REV_MASK                   0xf
-#define CHIP_REV_SHIFT                  12
-#define CHIP_REV_IS_B0(_cdev)   ((_cdev)->chip_rev == 1)
+       u32                             dp_module;
+       u8                              dp_level;
+       char                            name[NAME_SIZE];
+
+       enum qed_dev_type               type;
+       /* Translate type/revision combo into the proper conditions */
+#define QED_IS_BB(dev)                 ((dev)->type == QED_DEV_TYPE_BB)
+#define QED_IS_BB_B0(dev)              (QED_IS_BB(dev) && CHIP_REV_IS_B0(dev))
+#define QED_IS_AH(dev)                 ((dev)->type == QED_DEV_TYPE_AH)
+#define QED_IS_K2(dev)                 QED_IS_AH(dev)
+#define QED_IS_E4(dev)                 (QED_IS_BB(dev) || QED_IS_AH(dev))
+#define QED_IS_E5(dev)                 ((dev)->type == QED_DEV_TYPE_E5)
+
+       u16                             vendor_id;
+
+       u16                             device_id;
+#define QED_DEV_ID_MASK                        0xff00
+#define QED_DEV_ID_MASK_BB             0x1600
+#define QED_DEV_ID_MASK_AH             0x8000
+
+       u16                             chip_num;
+#define CHIP_NUM_MASK                  0xffff
+#define CHIP_NUM_SHIFT                 16
+
+       u16                             chip_rev;
+#define CHIP_REV_MASK                  0xf
+#define CHIP_REV_SHIFT                 12
+#define CHIP_REV_IS_B0(_cdev)          ((_cdev)->chip_rev == 1)
 
        u16                             chip_metal;
-#define CHIP_METAL_MASK                 0xff
-#define CHIP_METAL_SHIFT                4
+#define CHIP_METAL_MASK                        0xff
+#define CHIP_METAL_SHIFT               4
 
        u16                             chip_bond_id;
-#define CHIP_BOND_ID_MASK               0xf
-#define CHIP_BOND_ID_SHIFT              0
+#define CHIP_BOND_ID_MASK              0xf
+#define CHIP_BOND_ID_SHIFT             0
 
        u8                              num_engines;
        u8                              num_ports;
@@ -876,6 +837,8 @@ struct qed_dev {
        struct qed_dbg_feature dbg_features[DBG_FEATURE_NUM];
        u8 engine_for_debug;
        bool disable_ilt_dump;
+       bool                            dbg_bin_dump;
+
        DECLARE_HASHTABLE(connections, 10);
        const struct firmware           *firmware;
 
diff --git a/drivers/net/ethernet/qlogic/qed/qed_chain.c b/drivers/net/ethernet/qlogic/qed/qed_chain.c
new file mode 100644 (file)
index 0000000..f8efd36
--- /dev/null
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* Copyright (c) 2020 Marvell International Ltd. */
+
+#include <linux/dma-mapping.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/vmalloc.h>
+
+#include "qed_dev_api.h"
+
+static void qed_chain_init(struct qed_chain *chain,
+                          const struct qed_chain_init_params *params,
+                          u32 page_cnt)
+{
+       memset(chain, 0, sizeof(*chain));
+
+       chain->elem_size = params->elem_size;
+       chain->intended_use = params->intended_use;
+       chain->mode = params->mode;
+       chain->cnt_type = params->cnt_type;
+
+       chain->elem_per_page = ELEMS_PER_PAGE(params->elem_size,
+                                             params->page_size);
+       chain->usable_per_page = USABLE_ELEMS_PER_PAGE(params->elem_size,
+                                                      params->page_size,
+                                                      params->mode);
+       chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(params->elem_size,
+                                                      params->mode);
+
+       chain->elem_per_page_mask = chain->elem_per_page - 1;
+       chain->next_page_mask = chain->usable_per_page &
+                               chain->elem_per_page_mask;
+
+       chain->page_size = params->page_size;
+       chain->page_cnt = page_cnt;
+       chain->capacity = chain->usable_per_page * page_cnt;
+       chain->size = chain->elem_per_page * page_cnt;
+
+       if (params->ext_pbl_virt) {
+               chain->pbl_sp.table_virt = params->ext_pbl_virt;
+               chain->pbl_sp.table_phys = params->ext_pbl_phys;
+
+               chain->b_external_pbl = true;
+       }
+}
+
+static void qed_chain_init_next_ptr_elem(const struct qed_chain *chain,
+                                        void *virt_curr, void *virt_next,
+                                        dma_addr_t phys_next)
+{
+       struct qed_chain_next *next;
+       u32 size;
+
+       size = chain->elem_size * chain->usable_per_page;
+       next = virt_curr + size;
+
+       DMA_REGPAIR_LE(next->next_phys, phys_next);
+       next->next_virt = virt_next;
+}
+
+static void qed_chain_init_mem(struct qed_chain *chain, void *virt_addr,
+                              dma_addr_t phys_addr)
+{
+       chain->p_virt_addr = virt_addr;
+       chain->p_phys_addr = phys_addr;
+}
+
+static void qed_chain_free_next_ptr(struct qed_dev *cdev,
+                                   struct qed_chain *chain)
+{
+       struct device *dev = &cdev->pdev->dev;
+       struct qed_chain_next *next;
+       dma_addr_t phys, phys_next;
+       void *virt, *virt_next;
+       u32 size, i;
+
+       size = chain->elem_size * chain->usable_per_page;
+       virt = chain->p_virt_addr;
+       phys = chain->p_phys_addr;
+
+       for (i = 0; i < chain->page_cnt; i++) {
+               if (!virt)
+                       break;
+
+               next = virt + size;
+               virt_next = next->next_virt;
+               phys_next = HILO_DMA_REGPAIR(next->next_phys);
+
+               dma_free_coherent(dev, chain->page_size, virt, phys);
+
+               virt = virt_next;
+               phys = phys_next;
+       }
+}
+
+static void qed_chain_free_single(struct qed_dev *cdev,
+                                 struct qed_chain *chain)
+{
+       if (!chain->p_virt_addr)
+               return;
+
+       dma_free_coherent(&cdev->pdev->dev, chain->page_size,
+                         chain->p_virt_addr, chain->p_phys_addr);
+}
+
+static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain)
+{
+       struct device *dev = &cdev->pdev->dev;
+       struct addr_tbl_entry *entry;
+       u32 i;
+
+       if (!chain->pbl.pp_addr_tbl)
+               return;
+
+       for (i = 0; i < chain->page_cnt; i++) {
+               entry = chain->pbl.pp_addr_tbl + i;
+               if (!entry->virt_addr)
+                       break;
+
+               dma_free_coherent(dev, chain->page_size, entry->virt_addr,
+                                 entry->dma_map);
+       }
+
+       if (!chain->b_external_pbl)
+               dma_free_coherent(dev, chain->pbl_sp.table_size,
+                                 chain->pbl_sp.table_virt,
+                                 chain->pbl_sp.table_phys);
+
+       vfree(chain->pbl.pp_addr_tbl);
+       chain->pbl.pp_addr_tbl = NULL;
+}
+
+/**
+ * qed_chain_free() - Free chain DMA memory.
+ *
+ * @cdev: Main device structure.
+ * @chain: Chain to free.
+ */
+void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain)
+{
+       switch (chain->mode) {
+       case QED_CHAIN_MODE_NEXT_PTR:
+               qed_chain_free_next_ptr(cdev, chain);
+               break;
+       case QED_CHAIN_MODE_SINGLE:
+               qed_chain_free_single(cdev, chain);
+               break;
+       case QED_CHAIN_MODE_PBL:
+               qed_chain_free_pbl(cdev, chain);
+               break;
+       default:
+               return;
+       }
+
+       qed_chain_init_mem(chain, NULL, 0);
+}
+
+static int
+qed_chain_alloc_sanity_check(struct qed_dev *cdev,
+                            const struct qed_chain_init_params *params,
+                            u32 page_cnt)
+{
+       u64 chain_size;
+
+       chain_size = ELEMS_PER_PAGE(params->elem_size, params->page_size);
+       chain_size *= page_cnt;
+
+       if (!chain_size)
+               return -EINVAL;
+
+       /* The actual chain size can be larger than the maximal possible value
+        * after rounding up the requested elements number to pages, and after
+        * taking into account the unusuable elements (next-ptr elements).
+        * The size of a "u16" chain can be (U16_MAX + 1) since the chain
+        * size/capacity fields are of u32 type.
+        */
+       switch (params->cnt_type) {
+       case QED_CHAIN_CNT_TYPE_U16:
+               if (chain_size > U16_MAX + 1)
+                       break;
+
+               return 0;
+       case QED_CHAIN_CNT_TYPE_U32:
+               if (chain_size > U32_MAX)
+                       break;
+
+               return 0;
+       default:
+               return -EINVAL;
+       }
+
+       DP_NOTICE(cdev,
+                 "The actual chain size (0x%llx) is larger than the maximal possible value\n",
+                 chain_size);
+
+       return -EINVAL;
+}
+
+static int qed_chain_alloc_next_ptr(struct qed_dev *cdev,
+                                   struct qed_chain *chain)
+{
+       struct device *dev = &cdev->pdev->dev;
+       void *virt, *virt_prev = NULL;
+       dma_addr_t phys;
+       u32 i;
+
+       for (i = 0; i < chain->page_cnt; i++) {
+               virt = dma_alloc_coherent(dev, chain->page_size, &phys,
+                                         GFP_KERNEL);
+               if (!virt)
+                       return -ENOMEM;
+
+               if (i == 0) {
+                       qed_chain_init_mem(chain, virt, phys);
+                       qed_chain_reset(chain);
+               } else {
+                       qed_chain_init_next_ptr_elem(chain, virt_prev, virt,
+                                                    phys);
+               }
+
+               virt_prev = virt;
+       }
+
+       /* Last page's next element should point to the beginning of the
+        * chain.
+        */
+       qed_chain_init_next_ptr_elem(chain, virt_prev, chain->p_virt_addr,
+                                    chain->p_phys_addr);
+
+       return 0;
+}
+
+static int qed_chain_alloc_single(struct qed_dev *cdev,
+                                 struct qed_chain *chain)
+{
+       dma_addr_t phys;
+       void *virt;
+
+       virt = dma_alloc_coherent(&cdev->pdev->dev, chain->page_size,
+                                 &phys, GFP_KERNEL);
+       if (!virt)
+               return -ENOMEM;
+
+       qed_chain_init_mem(chain, virt, phys);
+       qed_chain_reset(chain);
+
+       return 0;
+}
+
+static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain)
+{
+       struct device *dev = &cdev->pdev->dev;
+       struct addr_tbl_entry *addr_tbl;
+       dma_addr_t phys, pbl_phys;
+       __le64 *pbl_virt;
+       u32 page_cnt, i;
+       size_t size;
+       void *virt;
+
+       page_cnt = chain->page_cnt;
+
+       size = array_size(page_cnt, sizeof(*addr_tbl));
+       if (unlikely(size == SIZE_MAX))
+               return -EOVERFLOW;
+
+       addr_tbl = vzalloc(size);
+       if (!addr_tbl)
+               return -ENOMEM;
+
+       chain->pbl.pp_addr_tbl = addr_tbl;
+
+       if (chain->b_external_pbl)
+               goto alloc_pages;
+
+       size = array_size(page_cnt, sizeof(*pbl_virt));
+       if (unlikely(size == SIZE_MAX))
+               return -EOVERFLOW;
+
+       pbl_virt = dma_alloc_coherent(dev, size, &pbl_phys, GFP_KERNEL);
+       if (!pbl_virt)
+               return -ENOMEM;
+
+       chain->pbl_sp.table_virt = pbl_virt;
+       chain->pbl_sp.table_phys = pbl_phys;
+       chain->pbl_sp.table_size = size;
+
+alloc_pages:
+       for (i = 0; i < page_cnt; i++) {
+               virt = dma_alloc_coherent(dev, chain->page_size, &phys,
+                                         GFP_KERNEL);
+               if (!virt)
+                       return -ENOMEM;
+
+               if (i == 0) {
+                       qed_chain_init_mem(chain, virt, phys);
+                       qed_chain_reset(chain);
+               }
+
+               /* Fill the PBL table with the physical address of the page */
+               pbl_virt[i] = cpu_to_le64(phys);
+
+               /* Keep the virtual address of the page */
+               addr_tbl[i].virt_addr = virt;
+               addr_tbl[i].dma_map = phys;
+       }
+
+       return 0;
+}
+
+/**
+ * qed_chain_alloc() - Allocate and initialize a chain.
+ *
+ * @cdev: Main device structure.
+ * @chain: Chain to be processed.
+ * @params: Chain initialization parameters.
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain,
+                   struct qed_chain_init_params *params)
+{
+       u32 page_cnt;
+       int rc;
+
+       if (!params->page_size)
+               params->page_size = QED_CHAIN_PAGE_SIZE;
+
+       if (params->mode == QED_CHAIN_MODE_SINGLE)
+               page_cnt = 1;
+       else
+               page_cnt = QED_CHAIN_PAGE_CNT(params->num_elems,
+                                             params->elem_size,
+                                             params->page_size,
+                                             params->mode);
+
+       rc = qed_chain_alloc_sanity_check(cdev, params, page_cnt);
+       if (rc) {
+               DP_NOTICE(cdev,
+                         "Cannot allocate a chain with the given arguments:\n");
+               DP_NOTICE(cdev,
+                         "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu, page_size %u]\n",
+                         params->intended_use, params->mode, params->cnt_type,
+                         params->num_elems, params->elem_size,
+                         params->page_size);
+               return rc;
+       }
+
+       qed_chain_init(chain, params, page_cnt);
+
+       switch (params->mode) {
+       case QED_CHAIN_MODE_NEXT_PTR:
+               rc = qed_chain_alloc_next_ptr(cdev, chain);
+               break;
+       case QED_CHAIN_MODE_SINGLE:
+               rc = qed_chain_alloc_single(cdev, chain);
+               break;
+       case QED_CHAIN_MODE_PBL:
+               rc = qed_chain_alloc_pbl(cdev, chain);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (!rc)
+               return 0;
+
+       qed_chain_free(cdev, chain);
+
+       return rc;
+}
index 7b76667..6c221e9 100644 (file)
@@ -1,33 +1,7 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #include <linux/types.h>
@@ -99,8 +73,8 @@ union type1_task_context {
 };
 
 struct src_ent {
-       u8 opaque[56];
-       u64 next;
+       __u8                            opaque[56];
+       __be64                          next;
 };
 
 #define CDUT_SEG_ALIGNMET              3 /* in 4k chunks */
@@ -271,7 +245,7 @@ static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
                vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
        }
 
-       iids->vf_cids += vf_cids * p_mngr->vf_count;
+       iids->vf_cids = vf_cids;
        iids->tids += vf_tids * p_mngr->vf_count;
 
        DP_VERBOSE(p_hwfn, QED_MSG_ILT,
@@ -465,6 +439,20 @@ static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk)
        return p_blk;
 }
 
+static void qed_cxt_ilt_blk_reset(struct qed_hwfn *p_hwfn)
+{
+       struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
+       u32 cli_idx, blk_idx;
+
+       for (cli_idx = 0; cli_idx < MAX_ILT_CLIENTS; cli_idx++) {
+               for (blk_idx = 0; blk_idx < ILT_CLI_PF_BLOCKS; blk_idx++)
+                       clients[cli_idx].pf_blks[blk_idx].total_size = 0;
+
+               for (blk_idx = 0; blk_idx < ILT_CLI_VF_BLOCKS; blk_idx++)
+                       clients[cli_idx].vf_blks[blk_idx].total_size = 0;
+       }
+}
+
 int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
 {
        struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
@@ -484,6 +472,11 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
 
        p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
 
+       /* Reset all ILT blocks at the beginning of ILT computing in order
+        * to prevent memory allocation for irrelevant blocks afterwards.
+        */
+       qed_cxt_ilt_blk_reset(p_hwfn);
+
        DP_VERBOSE(p_hwfn, QED_MSG_ILT,
                   "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
                   p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
@@ -2177,12 +2170,14 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
                          enum qed_cxt_elem_type elem_type, u32 iid)
 {
        u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
+       struct tdif_task_context *tdif_context;
        struct qed_ilt_client_cfg *p_cli;
        struct qed_ilt_cli_blk *p_blk;
        struct qed_ptt *p_ptt;
        dma_addr_t p_phys;
        u64 ilt_hw_entry;
        void *p_virt;
+       u32 flags1;
        int rc = 0;
 
        switch (elem_type) {
@@ -2259,8 +2254,12 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
 
                for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
                        elem = (union type1_task_context *)elem_start;
-                       SET_FIELD(elem->roce_ctx.tdif_context.flags1,
-                                 TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf);
+                       tdif_context = &elem->roce_ctx.tdif_context;
+
+                       flags1 = le32_to_cpu(tdif_context->flags1);
+                       SET_FIELD(flags1, TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf);
+                       tdif_context->flags1 = cpu_to_le32(flags1);
+
                        elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
                }
        }
@@ -2336,6 +2335,11 @@ qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
                elem_size = SRQ_CXT_SIZE;
                p_blk = &p_cli->pf_blks[SRQ_BLK];
                break;
+       case QED_ELEM_XRC_SRQ:
+               p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
+               elem_size = XRC_SRQ_CXT_SIZE;
+               p_blk = &p_cli->pf_blks[SRQ_BLK];
+               break;
        case QED_ELEM_TASK:
                p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
                elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
index ce08ae8..8b64495 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef _QED_CXT_H
index 5c6a276..17d5b64 100644 (file)
@@ -1,33 +1,7 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #include <linux/types.h>
@@ -573,7 +547,8 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
                      struct dcbx_ets_feature *p_ets,
                      struct qed_dcbx_params *p_params)
 {
-       u32 bw_map[2], tsa_map[2], pri_map;
+       __be32 bw_map[2], tsa_map[2];
+       u32 pri_map;
        int i;
 
        p_params->ets_willing = QED_MFW_GET_FIELD(p_ets->flags,
@@ -599,11 +574,10 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
        /* 8 bit tsa and bw data corresponding to each of the 8 TC's are
         * encoded in a type u32 array of size 2.
         */
-       bw_map[0] = be32_to_cpu(p_ets->tc_bw_tbl[0]);
-       bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]);
-       tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]);
-       tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]);
+       cpu_to_be32_array(bw_map, p_ets->tc_bw_tbl, 2);
+       cpu_to_be32_array(tsa_map, p_ets->tc_tsa_tbl, 2);
        pri_map = p_ets->pri_tc_tbl[0];
+
        for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
                p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i];
                p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i];
@@ -1080,7 +1054,7 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn,
                      struct dcbx_ets_feature *p_ets,
                      struct qed_dcbx_params *p_params)
 {
-       u8 *bw_map, *tsa_map;
+       __be32 bw_map[2], tsa_map[2];
        u32 val;
        int i;
 
@@ -1102,22 +1076,21 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn,
        p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK;
        p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_SHIFT;
 
-       bw_map = (u8 *)&p_ets->tc_bw_tbl[0];
-       tsa_map = (u8 *)&p_ets->tc_tsa_tbl[0];
        p_ets->pri_tc_tbl[0] = 0;
+
        for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
-               bw_map[i] = p_params->ets_tc_bw_tbl[i];
-               tsa_map[i] = p_params->ets_tc_tsa_tbl[i];
+               ((u8 *)bw_map)[i] = p_params->ets_tc_bw_tbl[i];
+               ((u8 *)tsa_map)[i] = p_params->ets_tc_tsa_tbl[i];
+
                /* Copy the priority value to the corresponding 4 bits in the
                 * traffic class table.
                 */
                val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4));
                p_ets->pri_tc_tbl[0] |= val;
        }
-       for (i = 0; i < 2; i++) {
-               p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]);
-               p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]);
-       }
+
+       be32_to_cpu_array(p_ets->tc_bw_tbl, bw_map, 2);
+       be32_to_cpu_array(p_ets->tc_tsa_tbl, tsa_map, 2);
 }
 
 static void
index 01f253e..e179892 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef _QED_DCBX_H
@@ -107,6 +81,8 @@ struct qed_dcbx_mib_meta_data {
        u32 addr;
 };
 
+extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
+
 #ifdef CONFIG_DCB
 int qed_dcbx_get_config_params(struct qed_hwfn *, struct qed_dcbx_set *);
 
index 57a0dab..6ab3e60 100644 (file)
@@ -1,6 +1,7 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qed NIC Driver
  * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #include <linux/module.h>
@@ -971,7 +972,7 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
 {
        struct storm_defs *storm = &s_storm_defs[storm_id];
        struct fw_info_location fw_info_location;
-       u32 addr, i, *dest;
+       u32 addr, i, size, *dest;
 
        memset(&fw_info_location, 0, sizeof(fw_info_location));
        memset(fw_info, 0, sizeof(*fw_info));
@@ -984,20 +985,29 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
            sizeof(fw_info_location);
 
        dest = (u32 *)&fw_info_location;
+       size = BYTES_TO_DWORDS(sizeof(fw_info_location));
 
-       for (i = 0; i < BYTES_TO_DWORDS(sizeof(fw_info_location));
-            i++, addr += BYTES_IN_DWORD)
+       for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
                dest[i] = qed_rd(p_hwfn, p_ptt, addr);
 
+       /* qed_rq() fetches data in CPU byteorder. Swap it back to
+        * the device's to get right structure layout.
+        */
+       cpu_to_le32_array(dest, size);
+
        /* Read FW version info from Storm RAM */
-       if (fw_info_location.size > 0 && fw_info_location.size <=
-           sizeof(*fw_info)) {
-               addr = fw_info_location.grc_addr;
-               dest = (u32 *)fw_info;
-               for (i = 0; i < BYTES_TO_DWORDS(fw_info_location.size);
-                    i++, addr += BYTES_IN_DWORD)
-                       dest[i] = qed_rd(p_hwfn, p_ptt, addr);
-       }
+       size = le32_to_cpu(fw_info_location.size);
+       if (!size || size > sizeof(*fw_info))
+               return;
+
+       addr = le32_to_cpu(fw_info_location.grc_addr);
+       dest = (u32 *)fw_info;
+       size = BYTES_TO_DWORDS(size);
+
+       for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
+               dest[i] = qed_rd(p_hwfn, p_ptt, addr);
+
+       cpu_to_le32_array(dest, size);
 }
 
 /* Dumps the specified string to the specified buffer.
@@ -1121,9 +1131,8 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
                                     dump, "fw-version", fw_ver_str);
        offset += qed_dump_str_param(dump_buf + offset,
                                     dump, "fw-image", fw_img_str);
-       offset += qed_dump_num_param(dump_buf + offset,
-                                    dump,
-                                    "fw-timestamp", fw_info.ver.timestamp);
+       offset += qed_dump_num_param(dump_buf + offset, dump, "fw-timestamp",
+                                    le32_to_cpu(fw_info.ver.timestamp));
 
        return offset;
 }
@@ -4440,9 +4449,11 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
                        continue;
                }
 
+               addr = le16_to_cpu(asserts->section_ram_line_offset);
                fw_asserts_section_addr = storm->sem_fast_mem_addr +
-                       SEM_FAST_REG_INT_RAM +
-                       RAM_LINES_TO_BYTES(asserts->section_ram_line_offset);
+                                         SEM_FAST_REG_INT_RAM +
+                                         RAM_LINES_TO_BYTES(addr);
+
                next_list_idx_addr = fw_asserts_section_addr +
                        DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
                next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
@@ -5568,7 +5579,8 @@ static const char * const s_status_str[] = {
 
        /* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */
        "The filter/trigger constraint dword offsets are not enabled for recording",
-
+       /* DBG_STATUS_NO_MATCHING_FRAMING_MODE */
+       "No matching framing mode",
 
        /* DBG_STATUS_VFC_READ_ERROR */
        "Error reading from VFC",
@@ -7505,6 +7517,12 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
        if (p_hwfn->cdev->print_dbg_data)
                qed_dbg_print_feature(text_buf, text_size_bytes);
 
+       /* Just return the original binary buffer if requested */
+       if (p_hwfn->cdev->dbg_bin_dump) {
+               vfree(text_buf);
+               return DBG_STATUS_OK;
+       }
+
        /* Free the old dump_buf and point the dump_buf to the newly allocagted
         * and formatted text buffer.
         */
@@ -7649,8 +7667,7 @@ static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
 {
        struct qed_hwfn *p_hwfn =
                &cdev->hwfns[cdev->engine_for_debug];
-       u32 len_rounded, i;
-       __be32 val;
+       u32 len_rounded;
        int rc;
 
        *num_dumped_bytes = 0;
@@ -7669,10 +7686,9 @@ static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
 
        /* QED_NVM_IMAGE_NVM_META image is not swapped like other images */
        if (image_id != QED_NVM_IMAGE_NVM_META)
-               for (i = 0; i < len_rounded; i += 4) {
-                       val = cpu_to_be32(*(u32 *)(buffer + i));
-                       *(u32 *)(buffer + i) = val;
-               }
+               cpu_to_be32_array((__force __be32 *)buffer,
+                                 (const u32 *)buffer,
+                                 len_rounded / sizeof(u32));
 
        *num_dumped_bytes = len_rounded;
 
@@ -7732,7 +7748,9 @@ int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
 #define REGDUMP_HEADER_SIZE_SHIFT              0
 #define REGDUMP_HEADER_SIZE_MASK               0xffffff
 #define REGDUMP_HEADER_FEATURE_SHIFT           24
-#define REGDUMP_HEADER_FEATURE_MASK            0x3f
+#define REGDUMP_HEADER_FEATURE_MASK            0x1f
+#define REGDUMP_HEADER_BIN_DUMP_SHIFT          29
+#define REGDUMP_HEADER_BIN_DUMP_MASK           0x1
 #define REGDUMP_HEADER_OMIT_ENGINE_SHIFT       30
 #define REGDUMP_HEADER_OMIT_ENGINE_MASK                0x1
 #define REGDUMP_HEADER_ENGINE_SHIFT            31
@@ -7770,6 +7788,7 @@ static u32 qed_calc_regdump_header(struct qed_dev *cdev,
                          feature, feature_size);
 
        SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature);
+       SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, 1);
        SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine);
        SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine);
 
@@ -7793,6 +7812,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
                omit_engine = 1;
 
        mutex_lock(&qed_dbg_lock);
+       cdev->dbg_bin_dump = true;
 
        org_engine = qed_get_debug_engine(cdev);
        for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
@@ -7930,6 +7950,10 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
                DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
        }
 
+       /* Re-populate nvm attribute info */
+       qed_mcp_nvm_info_free(p_hwfn);
+       qed_mcp_nvm_info_populate(p_hwfn);
+
        /* nvm cfg1 */
        rc = qed_dbg_nvm_image(cdev,
                               (u8 *)buffer + offset +
@@ -7992,6 +8016,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
                       QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc);
        }
 
+       cdev->dbg_bin_dump = false;
        mutex_unlock(&qed_dbg_lock);
 
        return 0;
index edf99d2..e71af82 100644 (file)
@@ -1,6 +1,7 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef _QED_DEBUGFS_H
index 1eebf30..d9c7a1a 100644 (file)
@@ -1,33 +1,7 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #include <linux/types.h>
@@ -980,7 +954,7 @@ int qed_llh_add_mac_filter(struct qed_dev *cdev,
        struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
        struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
        union qed_llh_filter filter = {};
-       u8 filter_idx, abs_ppfid;
+       u8 filter_idx, abs_ppfid = 0;
        u32 high, low, ref_cnt;
        int rc = 0;
 
@@ -1368,6 +1342,8 @@ static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn)
 
 void qed_resc_free(struct qed_dev *cdev)
 {
+       struct qed_rdma_info *rdma_info;
+       struct qed_hwfn *p_hwfn;
        int i;
 
        if (IS_VF(cdev)) {
@@ -1385,7 +1361,8 @@ void qed_resc_free(struct qed_dev *cdev)
        qed_llh_free(cdev);
 
        for_each_hwfn(cdev, i) {
-               struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+               p_hwfn = cdev->hwfns + i;
+               rdma_info = p_hwfn->p_rdma_info;
 
                qed_cxt_mngr_free(p_hwfn);
                qed_qm_info_free(p_hwfn);
@@ -1404,8 +1381,10 @@ void qed_resc_free(struct qed_dev *cdev)
                        qed_ooo_free(p_hwfn);
                }
 
-               if (QED_IS_RDMA_PERSONALITY(p_hwfn))
+               if (QED_IS_RDMA_PERSONALITY(p_hwfn) && rdma_info) {
+                       qed_spq_unregister_async_cb(p_hwfn, rdma_info->proto);
                        qed_rdma_info_free(p_hwfn);
+               }
 
                qed_iov_free(p_hwfn);
                qed_l2_free(p_hwfn);
@@ -3989,8 +3968,9 @@ unlock_and_exit:
 
 static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 {
-       u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
+       u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities, fld;
        u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
+       struct qed_mcp_link_speed_params *ext_speed;
        struct qed_mcp_link_capabilities *p_caps;
        struct qed_mcp_link_params *link;
 
@@ -4015,37 +3995,21 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
                NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
        case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G:
-               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
-               break;
        case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G:
-               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
-               break;
        case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G:
-               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
-               break;
        case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F:
-               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
-               break;
        case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E:
-               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
-               break;
        case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G:
-               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
-               break;
        case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G:
-               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
-               break;
        case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
-               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
-               break;
        case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G:
-               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X10G;
-               break;
        case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
-               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
-               break;
        case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G:
-               p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X25G;
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1:
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1:
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2:
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2:
+       case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4:
                break;
        default:
                DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg);
@@ -4063,8 +4027,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
        link->speed.advertised_speeds = link_temp;
 
-       link_temp = link->speed.advertised_speeds;
-       p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp;
+       p_caps->speed_capabilities = link->speed.advertised_speeds;
 
        link_temp = qed_rd(p_hwfn, p_ptt,
                           port_cfg_addr +
@@ -4099,19 +4062,40 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
                DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp);
        }
 
-       p_hwfn->mcp_info->link_capabilities.default_speed_autoneg =
-               link->speed.autoneg;
+       p_caps->default_speed_autoneg = link->speed.autoneg;
 
-       link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
-       link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
-       link->pause.autoneg = !!(link_temp &
-                                NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
-       link->pause.forced_rx = !!(link_temp &
-                                  NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
-       link->pause.forced_tx = !!(link_temp &
-                                  NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
+       fld = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_DRV_FLOW_CONTROL);
+       link->pause.autoneg = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
+       link->pause.forced_rx = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
+       link->pause.forced_tx = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
        link->loopback_mode = 0;
 
+       if (p_hwfn->mcp_info->capabilities &
+           FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) {
+               switch (GET_MFW_FIELD(link_temp,
+                                     NVM_CFG1_PORT_FEC_FORCE_MODE)) {
+               case NVM_CFG1_PORT_FEC_FORCE_MODE_NONE:
+                       p_caps->fec_default |= QED_FEC_MODE_NONE;
+                       break;
+               case NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE:
+                       p_caps->fec_default |= QED_FEC_MODE_FIRECODE;
+                       break;
+               case NVM_CFG1_PORT_FEC_FORCE_MODE_RS:
+                       p_caps->fec_default |= QED_FEC_MODE_RS;
+                       break;
+               case NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO:
+                       p_caps->fec_default |= QED_FEC_MODE_AUTO;
+                       break;
+               default:
+                       DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                                  "unknown FEC mode in 0x%08x\n", link_temp);
+               }
+       } else {
+               p_caps->fec_default = QED_FEC_MODE_UNSUPPORTED;
+       }
+
+       link->fec = p_caps->fec_default;
+
        if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
                link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr +
                                   offsetof(struct nvm_cfg1_port, ext_phy));
@@ -4143,14 +4127,97 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
                p_caps->default_eee = QED_MCP_EEE_UNSUPPORTED;
        }
 
-       DP_VERBOSE(p_hwfn,
-                  NETIF_MSG_LINK,
-                  "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n",
-                  link->speed.forced_speed,
-                  link->speed.advertised_speeds,
-                  link->speed.autoneg,
-                  link->pause.autoneg,
-                  p_caps->default_eee, p_caps->eee_lpi_timer);
+       if (p_hwfn->mcp_info->capabilities &
+           FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) {
+               ext_speed = &link->ext_speed;
+
+               link_temp = qed_rd(p_hwfn, p_ptt,
+                                  port_cfg_addr +
+                                  offsetof(struct nvm_cfg1_port,
+                                           extended_speed));
+
+               fld = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_EXTENDED_SPEED);
+               if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN)
+                       ext_speed->autoneg = true;
+
+               ext_speed->forced_speed = 0;
+               if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G)
+                       ext_speed->forced_speed |= QED_EXT_SPEED_1G;
+               if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G)
+                       ext_speed->forced_speed |= QED_EXT_SPEED_10G;
+               if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G)
+                       ext_speed->forced_speed |= QED_EXT_SPEED_20G;
+               if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G)
+                       ext_speed->forced_speed |= QED_EXT_SPEED_25G;
+               if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G)
+                       ext_speed->forced_speed |= QED_EXT_SPEED_40G;
+               if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R)
+                       ext_speed->forced_speed |= QED_EXT_SPEED_50G_R;
+               if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2)
+                       ext_speed->forced_speed |= QED_EXT_SPEED_50G_R2;
+               if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2)
+                       ext_speed->forced_speed |= QED_EXT_SPEED_100G_R2;
+               if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4)
+                       ext_speed->forced_speed |= QED_EXT_SPEED_100G_R4;
+               if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4)
+                       ext_speed->forced_speed |= QED_EXT_SPEED_100G_P4;
+
+               fld = GET_MFW_FIELD(link_temp,
+                                   NVM_CFG1_PORT_EXTENDED_SPEED_CAP);
+
+               ext_speed->advertised_speeds = 0;
+               if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED)
+                       ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_RES;
+               if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G)
+                       ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_1G;
+               if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G)
+                       ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_10G;
+               if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G)
+                       ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_20G;
+               if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G)
+                       ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_25G;
+               if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G)
+                       ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_40G;
+               if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R)
+                       ext_speed->advertised_speeds |=
+                               QED_EXT_SPEED_MASK_50G_R;
+               if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2)
+                       ext_speed->advertised_speeds |=
+                               QED_EXT_SPEED_MASK_50G_R2;
+               if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2)
+                       ext_speed->advertised_speeds |=
+                               QED_EXT_SPEED_MASK_100G_R2;
+               if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4)
+                       ext_speed->advertised_speeds |=
+                               QED_EXT_SPEED_MASK_100G_R4;
+               if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4)
+                       ext_speed->advertised_speeds |=
+                               QED_EXT_SPEED_MASK_100G_P4;
+
+               link_temp = qed_rd(p_hwfn, p_ptt,
+                                  port_cfg_addr +
+                                  offsetof(struct nvm_cfg1_port,
+                                           extended_fec_mode));
+               link->ext_fec_mode = link_temp;
+
+               p_caps->default_ext_speed_caps = ext_speed->advertised_speeds;
+               p_caps->default_ext_speed = ext_speed->forced_speed;
+               p_caps->default_ext_autoneg = ext_speed->autoneg;
+               p_caps->default_ext_fec = link->ext_fec_mode;
+
+               DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                          "Read default extended link config: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, FEC: 0x%02x\n",
+                          ext_speed->forced_speed,
+                          ext_speed->advertised_speeds, ext_speed->autoneg,
+                          p_caps->default_ext_fec);
+       }
+
+       DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+                  "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x, EEE: 0x%02x [0x%08x usec], FEC: 0x%02x\n",
+                  link->speed.forced_speed, link->speed.advertised_speeds,
+                  link->speed.autoneg, link->pause.autoneg,
+                  p_caps->default_eee, p_caps->eee_lpi_timer,
+                  p_caps->fec_default);
 
        if (IS_LEAD_HWFN(p_hwfn)) {
                struct qed_dev *cdev = p_hwfn->cdev;
@@ -4467,12 +4534,6 @@ static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        return 0;
 }
 
-static void qed_nvm_info_free(struct qed_hwfn *p_hwfn)
-{
-       kfree(p_hwfn->nvm_info.image_att);
-       p_hwfn->nvm_info.image_att = NULL;
-}
-
 static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
                                 void __iomem *p_regview,
                                 void __iomem *p_doorbells,
@@ -4557,7 +4618,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
        return rc;
 err3:
        if (IS_LEAD_HWFN(p_hwfn))
-               qed_nvm_info_free(p_hwfn);
+               qed_mcp_nvm_info_free(p_hwfn);
 err2:
        if (IS_LEAD_HWFN(p_hwfn))
                qed_iov_free_hw_info(p_hwfn->cdev);
@@ -4618,7 +4679,7 @@ int qed_hw_prepare(struct qed_dev *cdev,
                if (rc) {
                        if (IS_PF(cdev)) {
                                qed_init_free(p_hwfn);
-                               qed_nvm_info_free(p_hwfn);
+                               qed_mcp_nvm_info_free(p_hwfn);
                                qed_mcp_free(p_hwfn);
                                qed_hw_hwfn_free(p_hwfn);
                        }
@@ -4652,280 +4713,7 @@ void qed_hw_remove(struct qed_dev *cdev)
 
        qed_iov_free_hw_info(cdev);
 
-       qed_nvm_info_free(p_hwfn);
-}
-
-static void qed_chain_free_next_ptr(struct qed_dev *cdev,
-                                   struct qed_chain *p_chain)
-{
-       void *p_virt = p_chain->p_virt_addr, *p_virt_next = NULL;
-       dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0;
-       struct qed_chain_next *p_next;
-       u32 size, i;
-
-       if (!p_virt)
-               return;
-
-       size = p_chain->elem_size * p_chain->usable_per_page;
-
-       for (i = 0; i < p_chain->page_cnt; i++) {
-               if (!p_virt)
-                       break;
-
-               p_next = (struct qed_chain_next *)((u8 *)p_virt + size);
-               p_virt_next = p_next->next_virt;
-               p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys);
-
-               dma_free_coherent(&cdev->pdev->dev,
-                                 QED_CHAIN_PAGE_SIZE, p_virt, p_phys);
-
-               p_virt = p_virt_next;
-               p_phys = p_phys_next;
-       }
-}
-
-static void qed_chain_free_single(struct qed_dev *cdev,
-                                 struct qed_chain *p_chain)
-{
-       if (!p_chain->p_virt_addr)
-               return;
-
-       dma_free_coherent(&cdev->pdev->dev,
-                         QED_CHAIN_PAGE_SIZE,
-                         p_chain->p_virt_addr, p_chain->p_phys_addr);
-}
-
-static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
-{
-       struct addr_tbl_entry *pp_addr_tbl = p_chain->pbl.pp_addr_tbl;
-       u32 page_cnt = p_chain->page_cnt, i, pbl_size;
-
-       if (!pp_addr_tbl)
-               return;
-
-       for (i = 0; i < page_cnt; i++) {
-               if (!pp_addr_tbl[i].virt_addr || !pp_addr_tbl[i].dma_map)
-                       break;
-
-               dma_free_coherent(&cdev->pdev->dev,
-                                 QED_CHAIN_PAGE_SIZE,
-                                 pp_addr_tbl[i].virt_addr,
-                                 pp_addr_tbl[i].dma_map);
-       }
-
-       pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
-
-       if (!p_chain->b_external_pbl)
-               dma_free_coherent(&cdev->pdev->dev,
-                                 pbl_size,
-                                 p_chain->pbl_sp.p_virt_table,
-                                 p_chain->pbl_sp.p_phys_table);
-
-       vfree(p_chain->pbl.pp_addr_tbl);
-       p_chain->pbl.pp_addr_tbl = NULL;
-}
-
-void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain)
-{
-       switch (p_chain->mode) {
-       case QED_CHAIN_MODE_NEXT_PTR:
-               qed_chain_free_next_ptr(cdev, p_chain);
-               break;
-       case QED_CHAIN_MODE_SINGLE:
-               qed_chain_free_single(cdev, p_chain);
-               break;
-       case QED_CHAIN_MODE_PBL:
-               qed_chain_free_pbl(cdev, p_chain);
-               break;
-       }
-}
-
-static int
-qed_chain_alloc_sanity_check(struct qed_dev *cdev,
-                            enum qed_chain_cnt_type cnt_type,
-                            size_t elem_size, u32 page_cnt)
-{
-       u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
-
-       /* The actual chain size can be larger than the maximal possible value
-        * after rounding up the requested elements number to pages, and after
-        * taking into acount the unusuable elements (next-ptr elements).
-        * The size of a "u16" chain can be (U16_MAX + 1) since the chain
-        * size/capacity fields are of a u32 type.
-        */
-       if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 &&
-            chain_size > ((u32)U16_MAX + 1)) ||
-           (cnt_type == QED_CHAIN_CNT_TYPE_U32 && chain_size > U32_MAX)) {
-               DP_NOTICE(cdev,
-                         "The actual chain size (0x%llx) is larger than the maximal possible value\n",
-                         chain_size);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int
-qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain)
-{
-       void *p_virt = NULL, *p_virt_prev = NULL;
-       dma_addr_t p_phys = 0;
-       u32 i;
-
-       for (i = 0; i < p_chain->page_cnt; i++) {
-               p_virt = dma_alloc_coherent(&cdev->pdev->dev,
-                                           QED_CHAIN_PAGE_SIZE,
-                                           &p_phys, GFP_KERNEL);
-               if (!p_virt)
-                       return -ENOMEM;
-
-               if (i == 0) {
-                       qed_chain_init_mem(p_chain, p_virt, p_phys);
-                       qed_chain_reset(p_chain);
-               } else {
-                       qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
-                                                    p_virt, p_phys);
-               }
-
-               p_virt_prev = p_virt;
-       }
-       /* Last page's next element should point to the beginning of the
-        * chain.
-        */
-       qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
-                                    p_chain->p_virt_addr,
-                                    p_chain->p_phys_addr);
-
-       return 0;
-}
-
-static int
-qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain)
-{
-       dma_addr_t p_phys = 0;
-       void *p_virt = NULL;
-
-       p_virt = dma_alloc_coherent(&cdev->pdev->dev,
-                                   QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL);
-       if (!p_virt)
-               return -ENOMEM;
-
-       qed_chain_init_mem(p_chain, p_virt, p_phys);
-       qed_chain_reset(p_chain);
-
-       return 0;
-}
-
-static int
-qed_chain_alloc_pbl(struct qed_dev *cdev,
-                   struct qed_chain *p_chain,
-                   struct qed_chain_ext_pbl *ext_pbl)
-{
-       u32 page_cnt = p_chain->page_cnt, size, i;
-       dma_addr_t p_phys = 0, p_pbl_phys = 0;
-       struct addr_tbl_entry *pp_addr_tbl;
-       u8 *p_pbl_virt = NULL;
-       void *p_virt = NULL;
-
-       size = page_cnt * sizeof(*pp_addr_tbl);
-       pp_addr_tbl =  vzalloc(size);
-       if (!pp_addr_tbl)
-               return -ENOMEM;
-
-       /* The allocation of the PBL table is done with its full size, since it
-        * is expected to be successive.
-        * qed_chain_init_pbl_mem() is called even in a case of an allocation
-        * failure, since tbl was previously allocated, and it
-        * should be saved to allow its freeing during the error flow.
-        */
-       size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
-
-       if (!ext_pbl) {
-               p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
-                                               size, &p_pbl_phys, GFP_KERNEL);
-       } else {
-               p_pbl_virt = ext_pbl->p_pbl_virt;
-               p_pbl_phys = ext_pbl->p_pbl_phys;
-               p_chain->b_external_pbl = true;
-       }
-
-       qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, pp_addr_tbl);
-       if (!p_pbl_virt)
-               return -ENOMEM;
-
-       for (i = 0; i < page_cnt; i++) {
-               p_virt = dma_alloc_coherent(&cdev->pdev->dev,
-                                           QED_CHAIN_PAGE_SIZE,
-                                           &p_phys, GFP_KERNEL);
-               if (!p_virt)
-                       return -ENOMEM;
-
-               if (i == 0) {
-                       qed_chain_init_mem(p_chain, p_virt, p_phys);
-                       qed_chain_reset(p_chain);
-               }
-
-               /* Fill the PBL table with the physical address of the page */
-               *(dma_addr_t *)p_pbl_virt = p_phys;
-               /* Keep the virtual address of the page */
-               p_chain->pbl.pp_addr_tbl[i].virt_addr = p_virt;
-               p_chain->pbl.pp_addr_tbl[i].dma_map = p_phys;
-
-               p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
-       }
-
-       return 0;
-}
-
-int qed_chain_alloc(struct qed_dev *cdev,
-                   enum qed_chain_use_mode intended_use,
-                   enum qed_chain_mode mode,
-                   enum qed_chain_cnt_type cnt_type,
-                   u32 num_elems,
-                   size_t elem_size,
-                   struct qed_chain *p_chain,
-                   struct qed_chain_ext_pbl *ext_pbl)
-{
-       u32 page_cnt;
-       int rc = 0;
-
-       if (mode == QED_CHAIN_MODE_SINGLE)
-               page_cnt = 1;
-       else
-               page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
-
-       rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt);
-       if (rc) {
-               DP_NOTICE(cdev,
-                         "Cannot allocate a chain with the given arguments:\n");
-               DP_NOTICE(cdev,
-                         "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
-                         intended_use, mode, cnt_type, num_elems, elem_size);
-               return rc;
-       }
-
-       qed_chain_init_params(p_chain, page_cnt, (u8) elem_size, intended_use,
-                             mode, cnt_type);
-
-       switch (mode) {
-       case QED_CHAIN_MODE_NEXT_PTR:
-               rc = qed_chain_alloc_next_ptr(cdev, p_chain);
-               break;
-       case QED_CHAIN_MODE_SINGLE:
-               rc = qed_chain_alloc_single(cdev, p_chain);
-               break;
-       case QED_CHAIN_MODE_PBL:
-               rc = qed_chain_alloc_pbl(cdev, p_chain, ext_pbl);
-               break;
-       }
-       if (rc)
-               goto nomem;
-
-       return 0;
-
-nomem:
-       qed_chain_free(cdev, p_chain);
-       return rc;
+       qed_mcp_nvm_info_free(p_hwfn);
 }
 
 int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
index eb4808b..d3c1f38 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef _QED_DEV_API_H
@@ -280,35 +254,9 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
                       dma_addr_t dest_addr,
                       u32 size_in_dwords, struct qed_dmae_params *p_params);
 
-/**
- * @brief qed_chain_alloc - Allocate and initialize a chain
- *
- * @param p_hwfn
- * @param intended_use
- * @param mode
- * @param num_elems
- * @param elem_size
- * @param p_chain
- * @param ext_pbl - a possible external PBL
- *
- * @return int
- */
-int
-qed_chain_alloc(struct qed_dev *cdev,
-               enum qed_chain_use_mode intended_use,
-               enum qed_chain_mode mode,
-               enum qed_chain_cnt_type cnt_type,
-               u32 num_elems,
-               size_t elem_size,
-               struct qed_chain *p_chain, struct qed_chain_ext_pbl *ext_pbl);
-
-/**
- * @brief qed_chain_free - Free chain DMA memory
- *
- * @param p_hwfn
- * @param p_chain
- */
-void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain);
+int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain,
+                   struct qed_chain_init_params *params);
+void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain);
 
 /**
  * @@brief qed_fw_l2_queue - Get absolute L2 queue ID
index 4c7fa39..b768f06 100644 (file)
@@ -1,33 +1,7 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #include <linux/types.h>
@@ -121,7 +95,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
        struct qed_cxt_info cxt_info;
        u32 dummy_cid;
        int rc = 0;
-       u16 tmp;
+       __le16 tmp;
        u8 i;
 
        /* Get SPQ entry */
@@ -188,17 +162,13 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
        tmp = cpu_to_le16(fcoe_pf_params->cmdq_num_entries);
        p_data->q_params.cmdq_num_entries = tmp;
 
-       tmp = fcoe_pf_params->num_cqs;
-       p_data->q_params.num_queues = (u8)tmp;
+       p_data->q_params.num_queues = fcoe_pf_params->num_cqs;
 
-       tmp = (u16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS];
-       p_data->q_params.queue_relative_offset = (u8)tmp;
+       tmp = (__force __le16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS];
+       p_data->q_params.queue_relative_offset = (__force u8)tmp;
 
        for (i = 0; i < fcoe_pf_params->num_cqs; i++) {
-               u16 igu_sb_id;
-
-               igu_sb_id = qed_get_igu_sb_id(p_hwfn, i);
-               tmp = cpu_to_le16(igu_sb_id);
+               tmp = cpu_to_le16(qed_get_igu_sb_id(p_hwfn, i));
                p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp;
        }
 
@@ -211,21 +181,21 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
                       fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
        p_data->q_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
            fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_RQ];
-       tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ];
-       p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
-       tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ];
-       p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(tmp);
+       tmp = cpu_to_le16(fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ]);
+       p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = tmp;
+       tmp = cpu_to_le16(fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ]);
+       p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = tmp;
 
        DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_IMM_DATA],
                       fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]);
        p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA] =
            fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA];
-       tmp = fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA];
-       p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
-       tmp = fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA];
-       p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(tmp);
-       tmp = fcoe_pf_params->rq_buffer_size;
-       p_data->q_params.rq_buffer_size = cpu_to_le16(tmp);
+       tmp = cpu_to_le16(fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA]);
+       p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = tmp;
+       tmp = cpu_to_le16(fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA]);
+       p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = tmp;
+       tmp = cpu_to_le16(fcoe_pf_params->rq_buffer_size);
+       p_data->q_params.rq_buffer_size = tmp;
 
        if (fcoe_pf_params->is_target) {
                SET_FIELD(p_data->q_params.q_validity,
@@ -259,7 +229,8 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
        struct fcoe_conn_offload_ramrod_data *p_data;
        struct qed_spq_entry *p_ent = NULL;
        struct qed_sp_init_data init_data;
-       u16 physical_q0, tmp;
+       u16 physical_q0;
+       __le16 tmp;
        int rc;
 
        /* Get SPQ entry */
@@ -280,7 +251,7 @@ qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
 
        /* Transmission PQ is the first of the PF */
        physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
-       p_conn->physical_q0 = cpu_to_le16(physical_q0);
+       p_conn->physical_q0 = physical_q0;
        p_data->physical_q0 = cpu_to_le16(physical_q0);
 
        p_data->conn_id = cpu_to_le16(p_conn->conn_id);
@@ -579,8 +550,8 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
 void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
 {
        struct e4_fcoe_task_context *p_task_ctx = NULL;
+       u32 i, lc;
        int rc;
-       u32 i;
 
        spin_lock_init(&p_hwfn->p_fcoe_info->lock);
        for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) {
@@ -591,10 +562,15 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
                        continue;
 
                memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context));
-               SET_FIELD(p_task_ctx->timer_context.logical_client_0,
-                         TIMERS_CONTEXT_VALIDLC0, 1);
-               SET_FIELD(p_task_ctx->timer_context.logical_client_1,
-                         TIMERS_CONTEXT_VALIDLC1, 1);
+
+               lc = 0;
+               SET_FIELD(lc, TIMERS_CONTEXT_VALIDLC0, 1);
+               p_task_ctx->timer_context.logical_client_0 = cpu_to_le32(lc);
+
+               lc = 0;
+               SET_FIELD(lc, TIMERS_CONTEXT_VALIDLC1, 1);
+               p_task_ctx->timer_context.logical_client_1 = cpu_to_le32(lc);
+
                SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
                          E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
        }
index 027a76a..19c85ad 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef _QED_FCOE_H
@@ -71,9 +45,4 @@ static inline void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
 }
 #endif /* CONFIG_QED_FCOE */
 
-#ifdef CONFIG_QED_LL2
-extern const struct qed_common_ops qed_common_ops_pass;
-extern const struct qed_ll2_ops qed_ll2_ops_pass;
-#endif
-
 #endif /* _QED_FCOE_H */
index f00460d..559df9f 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef _QED_HSI_H
@@ -387,7 +361,7 @@ struct core_tx_update_ramrod_data {
        u8 update_qm_pq_id_flg;
        u8 reserved0;
        __le16 qm_pq_id;
-       __le32 reserved1[1];
+       __le32 reserved1;
 };
 
 /* Enum flag for what type of dcb data to update */
@@ -2819,34 +2793,34 @@ struct fw_overlay_buf_hdr {
 
 /* init array header: raw */
 struct init_array_raw_hdr {
-       u32 data;
-#define INIT_ARRAY_RAW_HDR_TYPE_MASK   0xF
-#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT  0
-#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF
-#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT        4
+       __le32                                          data;
+#define INIT_ARRAY_RAW_HDR_TYPE_MASK                   0xF
+#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT                  0
+#define INIT_ARRAY_RAW_HDR_PARAMS_MASK                 0xFFFFFFF
+#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT                        4
 };
 
 /* init array header: standard */
 struct init_array_standard_hdr {
-       u32 data;
-#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK      0xF
-#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT     0
-#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK      0xFFFFFFF
-#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT     4
+       __le32                                          data;
+#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK              0xF
+#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT             0
+#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK              0xFFFFFFF
+#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT             4
 };
 
 /* init array header: zipped */
 struct init_array_zipped_hdr {
-       u32 data;
-#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK                0xF
-#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT       0
-#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF
-#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT        4
+       __le32                                          data;
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK                        0xF
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT               0
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK         0xFFFFFFF
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT                4
 };
 
 /* init array header: pattern */
 struct init_array_pattern_hdr {
-       u32 data;
+       __le32                                          data;
 #define INIT_ARRAY_PATTERN_HDR_TYPE_MASK               0xF
 #define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT              0
 #define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK       0xF
@@ -2857,10 +2831,10 @@ struct init_array_pattern_hdr {
 
 /* init array header union */
 union init_array_hdr {
-       struct init_array_raw_hdr raw;
-       struct init_array_standard_hdr standard;
-       struct init_array_zipped_hdr zipped;
-       struct init_array_pattern_hdr pattern;
+       struct init_array_raw_hdr                       raw;
+       struct init_array_standard_hdr                  standard;
+       struct init_array_zipped_hdr                    zipped;
+       struct init_array_pattern_hdr                   pattern;
 };
 
 /* init array types */
@@ -2873,54 +2847,54 @@ enum init_array_types {
 
 /* init operation: callback */
 struct init_callback_op {
-       u32 op_data;
-#define INIT_CALLBACK_OP_OP_MASK       0xF
-#define INIT_CALLBACK_OP_OP_SHIFT      0
-#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
-#define INIT_CALLBACK_OP_RESERVED_SHIFT        4
-       u16 callback_id;
-       u16 block_id;
+       __le32                                          op_data;
+#define INIT_CALLBACK_OP_OP_MASK                       0xF
+#define INIT_CALLBACK_OP_OP_SHIFT                      0
+#define INIT_CALLBACK_OP_RESERVED_MASK                 0xFFFFFFF
+#define INIT_CALLBACK_OP_RESERVED_SHIFT                        4
+       __le16                                          callback_id;
+       __le16                                          block_id;
 };
 
 /* init operation: delay */
 struct init_delay_op {
-       u32 op_data;
-#define INIT_DELAY_OP_OP_MASK          0xF
-#define INIT_DELAY_OP_OP_SHIFT         0
-#define INIT_DELAY_OP_RESERVED_MASK    0xFFFFFFF
-#define INIT_DELAY_OP_RESERVED_SHIFT   4
-       u32 delay;
+       __le32                                          op_data;
+#define INIT_DELAY_OP_OP_MASK                          0xF
+#define INIT_DELAY_OP_OP_SHIFT                         0
+#define INIT_DELAY_OP_RESERVED_MASK                    0xFFFFFFF
+#define INIT_DELAY_OP_RESERVED_SHIFT                   4
+       __le32                                          delay;
 };
 
 /* init operation: if_mode */
 struct init_if_mode_op {
-       u32 op_data;
-#define INIT_IF_MODE_OP_OP_MASK                        0xF
-#define INIT_IF_MODE_OP_OP_SHIFT               0
-#define INIT_IF_MODE_OP_RESERVED1_MASK         0xFFF
-#define INIT_IF_MODE_OP_RESERVED1_SHIFT                4
-#define INIT_IF_MODE_OP_CMD_OFFSET_MASK                0xFFFF
-#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT       16
-       u16 reserved2;
-       u16 modes_buf_offset;
+       __le32                                          op_data;
+#define INIT_IF_MODE_OP_OP_MASK                                0xF
+#define INIT_IF_MODE_OP_OP_SHIFT                       0
+#define INIT_IF_MODE_OP_RESERVED1_MASK                 0xFFF
+#define INIT_IF_MODE_OP_RESERVED1_SHIFT                        4
+#define INIT_IF_MODE_OP_CMD_OFFSET_MASK                        0xFFFF
+#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT               16
+       __le16                                          reserved2;
+       __le16                                          modes_buf_offset;
 };
 
 /* init operation: if_phase */
 struct init_if_phase_op {
-       u32 op_data;
-#define INIT_IF_PHASE_OP_OP_MASK               0xF
-#define INIT_IF_PHASE_OP_OP_SHIFT              0
-#define INIT_IF_PHASE_OP_RESERVED1_MASK                0xFFF
-#define INIT_IF_PHASE_OP_RESERVED1_SHIFT       4
-#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK       0xFFFF
-#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT      16
-       u32 phase_data;
-#define INIT_IF_PHASE_OP_PHASE_MASK            0xFF
-#define INIT_IF_PHASE_OP_PHASE_SHIFT           0
-#define INIT_IF_PHASE_OP_RESERVED2_MASK                0xFF
-#define INIT_IF_PHASE_OP_RESERVED2_SHIFT       8
-#define INIT_IF_PHASE_OP_PHASE_ID_MASK         0xFFFF
-#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT                16
+       __le32                                          op_data;
+#define INIT_IF_PHASE_OP_OP_MASK                       0xF
+#define INIT_IF_PHASE_OP_OP_SHIFT                      0
+#define INIT_IF_PHASE_OP_RESERVED1_MASK                        0xFFF
+#define INIT_IF_PHASE_OP_RESERVED1_SHIFT               4
+#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK               0xFFFF
+#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT              16
+       __le32                                          phase_data;
+#define INIT_IF_PHASE_OP_PHASE_MASK                    0xFF
+#define INIT_IF_PHASE_OP_PHASE_SHIFT                   0
+#define INIT_IF_PHASE_OP_RESERVED2_MASK                        0xFF
+#define INIT_IF_PHASE_OP_RESERVED2_SHIFT               8
+#define INIT_IF_PHASE_OP_PHASE_ID_MASK                 0xFFFF
+#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT                        16
 };
 
 /* init mode operators */
@@ -2933,67 +2907,67 @@ enum init_mode_ops {
 
 /* init operation: raw */
 struct init_raw_op {
-       u32 op_data;
-#define INIT_RAW_OP_OP_MASK            0xF
-#define INIT_RAW_OP_OP_SHIFT           0
-#define INIT_RAW_OP_PARAM1_MASK                0xFFFFFFF
-#define INIT_RAW_OP_PARAM1_SHIFT       4
-       u32 param2;
+       __le32                                          op_data;
+#define INIT_RAW_OP_OP_MASK                            0xF
+#define INIT_RAW_OP_OP_SHIFT                           0
+#define INIT_RAW_OP_PARAM1_MASK                                0xFFFFFFF
+#define INIT_RAW_OP_PARAM1_SHIFT                       4
+       __le32                                          param2;
 };
 
 /* init array params */
 struct init_op_array_params {
-       u16 size;
-       u16 offset;
+       __le16                                          size;
+       __le16                                          offset;
 };
 
 /* Write init operation arguments */
 union init_write_args {
-       u32 inline_val;
-       u32 zeros_count;
-       u32 array_offset;
-       struct init_op_array_params runtime;
+       __le32                                          inline_val;
+       __le32                                          zeros_count;
+       __le32                                          array_offset;
+       struct init_op_array_params                     runtime;
 };
 
 /* init operation: write */
 struct init_write_op {
-       u32 data;
-#define INIT_WRITE_OP_OP_MASK          0xF
-#define INIT_WRITE_OP_OP_SHIFT         0
-#define INIT_WRITE_OP_SOURCE_MASK      0x7
-#define INIT_WRITE_OP_SOURCE_SHIFT     4
-#define INIT_WRITE_OP_RESERVED_MASK    0x1
-#define INIT_WRITE_OP_RESERVED_SHIFT   7
-#define INIT_WRITE_OP_WIDE_BUS_MASK    0x1
-#define INIT_WRITE_OP_WIDE_BUS_SHIFT   8
-#define INIT_WRITE_OP_ADDRESS_MASK     0x7FFFFF
-#define INIT_WRITE_OP_ADDRESS_SHIFT    9
-       union init_write_args args;
+       __le32                                          data;
+#define INIT_WRITE_OP_OP_MASK                          0xF
+#define INIT_WRITE_OP_OP_SHIFT                         0
+#define INIT_WRITE_OP_SOURCE_MASK                      0x7
+#define INIT_WRITE_OP_SOURCE_SHIFT                     4
+#define INIT_WRITE_OP_RESERVED_MASK                    0x1
+#define INIT_WRITE_OP_RESERVED_SHIFT                   7
+#define INIT_WRITE_OP_WIDE_BUS_MASK                    0x1
+#define INIT_WRITE_OP_WIDE_BUS_SHIFT                   8
+#define INIT_WRITE_OP_ADDRESS_MASK                     0x7FFFFF
+#define INIT_WRITE_OP_ADDRESS_SHIFT                    9
+       union init_write_args                           args;
 };
 
 /* init operation: read */
 struct init_read_op {
-       u32 op_data;
-#define INIT_READ_OP_OP_MASK           0xF
-#define INIT_READ_OP_OP_SHIFT          0
-#define INIT_READ_OP_POLL_TYPE_MASK    0xF
-#define INIT_READ_OP_POLL_TYPE_SHIFT   4
-#define INIT_READ_OP_RESERVED_MASK     0x1
-#define INIT_READ_OP_RESERVED_SHIFT    8
-#define INIT_READ_OP_ADDRESS_MASK      0x7FFFFF
-#define INIT_READ_OP_ADDRESS_SHIFT     9
-       u32 expected_val;
+       __le32                                          op_data;
+#define INIT_READ_OP_OP_MASK                           0xF
+#define INIT_READ_OP_OP_SHIFT                          0
+#define INIT_READ_OP_POLL_TYPE_MASK                    0xF
+#define INIT_READ_OP_POLL_TYPE_SHIFT                   4
+#define INIT_READ_OP_RESERVED_MASK                     0x1
+#define INIT_READ_OP_RESERVED_SHIFT                    8
+#define INIT_READ_OP_ADDRESS_MASK                      0x7FFFFF
+#define INIT_READ_OP_ADDRESS_SHIFT                     9
+       __le32                                          expected_val;
 };
 
 /* Init operations union */
 union init_op {
-       struct init_raw_op raw;
-       struct init_write_op write;
-       struct init_read_op read;
-       struct init_if_mode_op if_mode;
-       struct init_if_phase_op if_phase;
-       struct init_callback_op callback;
-       struct init_delay_op delay;
+       struct init_raw_op                              raw;
+       struct init_write_op                            write;
+       struct init_read_op                             read;
+       struct init_if_mode_op                          if_mode;
+       struct init_if_phase_op                         if_phase;
+       struct init_callback_op                         callback;
+       struct init_delay_op                            delay;
 };
 
 /* Init command operation types */
@@ -4417,79 +4391,6 @@ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
        (IRO[66].base + ((roce_pf_id) * IRO[66].m1))
 #define USTORM_ROCE_CQE_STATS_SIZE                     (IRO[66].size)
 
-/* IRO Array */
-static const u32 iro_arr[] = {
-       0x00000000, 0x00000000, 0x00080000,
-       0x00003288, 0x00000088, 0x00880000,
-       0x000058e8, 0x00000020, 0x00200000,
-       0x00000b00, 0x00000008, 0x00040000,
-       0x00000a80, 0x00000008, 0x00040000,
-       0x00000000, 0x00000008, 0x00020000,
-       0x00000080, 0x00000008, 0x00040000,
-       0x00000084, 0x00000008, 0x00020000,
-       0x00005718, 0x00000004, 0x00040000,
-       0x00004dd0, 0x00000000, 0x00780000,
-       0x00003e40, 0x00000000, 0x00780000,
-       0x00004480, 0x00000000, 0x00780000,
-       0x00003210, 0x00000000, 0x00780000,
-       0x00003b50, 0x00000000, 0x00780000,
-       0x00007f58, 0x00000000, 0x00780000,
-       0x00005f58, 0x00000000, 0x00080000,
-       0x00007100, 0x00000000, 0x00080000,
-       0x0000aea0, 0x00000000, 0x00080000,
-       0x00004398, 0x00000000, 0x00080000,
-       0x0000a5a0, 0x00000000, 0x00080000,
-       0x0000bde8, 0x00000000, 0x00080000,
-       0x00000020, 0x00000004, 0x00040000,
-       0x000056c8, 0x00000010, 0x00100000,
-       0x0000c210, 0x00000030, 0x00300000,
-       0x0000b088, 0x00000038, 0x00380000,
-       0x00003d20, 0x00000080, 0x00400000,
-       0x0000bf60, 0x00000000, 0x00040000,
-       0x00004560, 0x00040080, 0x00040000,
-       0x000001f8, 0x00000004, 0x00040000,
-       0x00003d60, 0x00000080, 0x00200000,
-       0x00008960, 0x00000040, 0x00300000,
-       0x0000e840, 0x00000060, 0x00600000,
-       0x00004618, 0x00000080, 0x00380000,
-       0x00010738, 0x000000c0, 0x00c00000,
-       0x000001f8, 0x00000002, 0x00020000,
-       0x0000a2a0, 0x00000000, 0x01080000,
-       0x0000a3a8, 0x00000008, 0x00080000,
-       0x000001c0, 0x00000008, 0x00080000,
-       0x000001f8, 0x00000008, 0x00080000,
-       0x00000ac0, 0x00000008, 0x00080000,
-       0x00002578, 0x00000008, 0x00080000,
-       0x000024f8, 0x00000008, 0x00080000,
-       0x00000280, 0x00000008, 0x00080000,
-       0x00000680, 0x00080018, 0x00080000,
-       0x00000b78, 0x00080018, 0x00020000,
-       0x0000c640, 0x00000050, 0x003c0000,
-       0x00012038, 0x00000018, 0x00100000,
-       0x00011b00, 0x00000040, 0x00180000,
-       0x000095d0, 0x00000050, 0x00200000,
-       0x00008b10, 0x00000040, 0x00280000,
-       0x00011640, 0x00000018, 0x00100000,
-       0x0000c828, 0x00000048, 0x00380000,
-       0x00011710, 0x00000020, 0x00200000,
-       0x00004650, 0x00000080, 0x00100000,
-       0x00003618, 0x00000010, 0x00100000,
-       0x0000a968, 0x00000008, 0x00010000,
-       0x000097a0, 0x00000008, 0x00010000,
-       0x00011990, 0x00000008, 0x00010000,
-       0x0000f018, 0x00000008, 0x00010000,
-       0x00012628, 0x00000008, 0x00010000,
-       0x00011da8, 0x00000008, 0x00010000,
-       0x0000aa78, 0x00000030, 0x00100000,
-       0x0000d768, 0x00000028, 0x00280000,
-       0x00009a58, 0x00000018, 0x00180000,
-       0x00009bd8, 0x00000008, 0x00080000,
-       0x00013a18, 0x00000008, 0x00080000,
-       0x000126e8, 0x00000018, 0x00180000,
-       0x0000e608, 0x00500288, 0x00100000,
-       0x00012970, 0x00000138, 0x00280000,
-};
-
 /* Runtime array offsets */
 #define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET                               0
 #define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET                               1
@@ -11635,37 +11536,98 @@ typedef u32 offsize_t;                /* In DWORDS !!! */
 
 /* PHY configuration */
 struct eth_phy_cfg {
-       u32 speed;
-#define ETH_SPEED_AUTONEG      0
-#define ETH_SPEED_SMARTLINQ    0x8
-
-       u32 pause;
-#define ETH_PAUSE_NONE         0x0
-#define ETH_PAUSE_AUTONEG      0x1
-#define ETH_PAUSE_RX           0x2
-#define ETH_PAUSE_TX           0x4
-
-       u32 adv_speed;
-       u32 loopback_mode;
-#define ETH_LOOPBACK_NONE              (0)
-#define ETH_LOOPBACK_INT_PHY           (1)
-#define ETH_LOOPBACK_EXT_PHY           (2)
-#define ETH_LOOPBACK_EXT               (3)
-#define ETH_LOOPBACK_MAC               (4)
-
-       u32 eee_cfg;
+       u32                                     speed;
+#define ETH_SPEED_AUTONEG                      0x0
+#define ETH_SPEED_SMARTLINQ                    0x8
+
+       u32                                     pause;
+#define ETH_PAUSE_NONE                         0x0
+#define ETH_PAUSE_AUTONEG                      0x1
+#define ETH_PAUSE_RX                           0x2
+#define ETH_PAUSE_TX                           0x4
+
+       u32                                     adv_speed;
+
+       u32                                     loopback_mode;
+#define ETH_LOOPBACK_NONE                      0x0
+#define ETH_LOOPBACK_INT_PHY                   0x1
+#define ETH_LOOPBACK_EXT_PHY                   0x2
+#define ETH_LOOPBACK_EXT                       0x3
+#define ETH_LOOPBACK_MAC                       0x4
+#define ETH_LOOPBACK_CNIG_AH_ONLY_0123         0x5
+#define ETH_LOOPBACK_CNIG_AH_ONLY_2301         0x6
+#define ETH_LOOPBACK_PCS_AH_ONLY               0x7
+#define ETH_LOOPBACK_REVERSE_MAC_AH_ONLY       0x8
+#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY       0x9
+
+       u32                                     eee_cfg;
 #define EEE_CFG_EEE_ENABLED                    BIT(0)
 #define EEE_CFG_TX_LPI                         BIT(1)
 #define EEE_CFG_ADV_SPEED_1G                   BIT(2)
 #define EEE_CFG_ADV_SPEED_10G                  BIT(3)
-#define EEE_TX_TIMER_USEC_MASK                 (0xfffffff0)
+#define EEE_TX_TIMER_USEC_MASK                 0xfffffff0
 #define EEE_TX_TIMER_USEC_OFFSET               4
-#define EEE_TX_TIMER_USEC_BALANCED_TIME                (0xa00)
-#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME      (0x100)
-#define EEE_TX_TIMER_USEC_LATENCY_TIME         (0x6000)
-
-       u32 feature_config_flags;
-#define ETH_EEE_MODE_ADV_LPI           (1 << 0)
+#define EEE_TX_TIMER_USEC_BALANCED_TIME                0xa00
+#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME      0x100
+#define EEE_TX_TIMER_USEC_LATENCY_TIME         0x6000
+
+       u32                                     deprecated;
+
+       u32                                     fec_mode;
+#define FEC_FORCE_MODE_MASK                    0x000000ff
+#define FEC_FORCE_MODE_OFFSET                  0
+#define FEC_FORCE_MODE_NONE                    0x00
+#define FEC_FORCE_MODE_FIRECODE                        0x01
+#define FEC_FORCE_MODE_RS                      0x02
+#define FEC_FORCE_MODE_AUTO                    0x07
+#define FEC_EXTENDED_MODE_MASK                 0xffffff00
+#define FEC_EXTENDED_MODE_OFFSET               8
+#define ETH_EXT_FEC_NONE                       0x00000100
+#define ETH_EXT_FEC_10G_NONE                   0x00000200
+#define ETH_EXT_FEC_10G_BASE_R                 0x00000400
+#define ETH_EXT_FEC_20G_NONE                   0x00000800
+#define ETH_EXT_FEC_20G_BASE_R                 0x00001000
+#define ETH_EXT_FEC_25G_NONE                   0x00002000
+#define ETH_EXT_FEC_25G_BASE_R                 0x00004000
+#define ETH_EXT_FEC_25G_RS528                  0x00008000
+#define ETH_EXT_FEC_40G_NONE                   0x00010000
+#define ETH_EXT_FEC_40G_BASE_R                 0x00020000
+#define ETH_EXT_FEC_50G_NONE                   0x00040000
+#define ETH_EXT_FEC_50G_BASE_R                 0x00080000
+#define ETH_EXT_FEC_50G_RS528                  0x00100000
+#define ETH_EXT_FEC_50G_RS544                  0x00200000
+#define ETH_EXT_FEC_100G_NONE                  0x00400000
+#define ETH_EXT_FEC_100G_BASE_R                        0x00800000
+#define ETH_EXT_FEC_100G_RS528                 0x01000000
+#define ETH_EXT_FEC_100G_RS544                 0x02000000
+
+       u32                                     extended_speed;
+#define ETH_EXT_SPEED_MASK                     0x0000ffff
+#define ETH_EXT_SPEED_OFFSET                   0
+#define ETH_EXT_SPEED_AN                       0x00000001
+#define ETH_EXT_SPEED_1G                       0x00000002
+#define ETH_EXT_SPEED_10G                      0x00000004
+#define ETH_EXT_SPEED_20G                      0x00000008
+#define ETH_EXT_SPEED_25G                      0x00000010
+#define ETH_EXT_SPEED_40G                      0x00000020
+#define ETH_EXT_SPEED_50G_BASE_R               0x00000040
+#define ETH_EXT_SPEED_50G_BASE_R2              0x00000080
+#define ETH_EXT_SPEED_100G_BASE_R2             0x00000100
+#define ETH_EXT_SPEED_100G_BASE_R4             0x00000200
+#define ETH_EXT_SPEED_100G_BASE_P4             0x00000400
+#define ETH_EXT_ADV_SPEED_MASK                 0xffff0000
+#define ETH_EXT_ADV_SPEED_OFFSET               16
+#define ETH_EXT_ADV_SPEED_RESERVED             0x00010000
+#define ETH_EXT_ADV_SPEED_1G                   0x00020000
+#define ETH_EXT_ADV_SPEED_10G                  0x00040000
+#define ETH_EXT_ADV_SPEED_20G                  0x00080000
+#define ETH_EXT_ADV_SPEED_25G                  0x00100000
+#define ETH_EXT_ADV_SPEED_40G                  0x00200000
+#define ETH_EXT_ADV_SPEED_50G_BASE_R           0x00400000
+#define ETH_EXT_ADV_SPEED_50G_BASE_R2          0x00800000
+#define ETH_EXT_ADV_SPEED_100G_BASE_R2         0x01000000
+#define ETH_EXT_ADV_SPEED_100G_BASE_R4         0x02000000
+#define ETH_EXT_ADV_SPEED_100G_BASE_P4         0x04000000
 };
 
 struct port_mf_cfg {
@@ -11994,41 +11956,36 @@ struct public_path {
 };
 
 struct public_port {
-       u32 validity_map;
-
-       u32 link_status;
-#define LINK_STATUS_LINK_UP                    0x00000001
-#define LINK_STATUS_SPEED_AND_DUPLEX_MASK      0x0000001e
-#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD   (1 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD   (2 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_10G       (3 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_20G       (4 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_40G       (5 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_50G       (6 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_100G      (7 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_25G       (8 << 1)
-
-#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED     0x00000020
-
-#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE    0x00000040
-#define LINK_STATUS_PARALLEL_DETECTION_USED    0x00000080
-
+       u32                                             validity_map;
+
+       u32                                             link_status;
+#define LINK_STATUS_LINK_UP                            0x00000001
+#define LINK_STATUS_SPEED_AND_DUPLEX_MASK              0x0000001e
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD           (1 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD           (2 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_10G               (3 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_20G               (4 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_40G               (5 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_50G               (6 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100G              (7 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_25G               (8 << 1)
+#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED             0x00000020
+#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE            0x00000040
+#define LINK_STATUS_PARALLEL_DETECTION_USED            0x00000080
 #define LINK_STATUS_PFC_ENABLED                                0x00000100
-#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
-#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
+#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE       0x00000200
+#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE       0x00000400
 #define LINK_STATUS_LINK_PARTNER_10G_CAPABLE           0x00000800
 #define LINK_STATUS_LINK_PARTNER_20G_CAPABLE           0x00001000
 #define LINK_STATUS_LINK_PARTNER_40G_CAPABLE           0x00002000
 #define LINK_STATUS_LINK_PARTNER_50G_CAPABLE           0x00004000
 #define LINK_STATUS_LINK_PARTNER_100G_CAPABLE          0x00008000
 #define LINK_STATUS_LINK_PARTNER_25G_CAPABLE           0x00010000
-
-#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK     0x000C0000
+#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK     0x000c0000
 #define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE     (0 << 18)
 #define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE       (1 << 18)
 #define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE      (2 << 18)
 #define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE            (3 << 18)
-
 #define LINK_STATUS_SFP_TX_FAULT                       0x00100000
 #define LINK_STATUS_TX_FLOW_CONTROL_ENABLED            0x00200000
 #define LINK_STATUS_RX_FLOW_CONTROL_ENABLED            0x00400000
@@ -12037,6 +11994,11 @@ struct public_port {
 #define LINK_STATUS_MAC_REMOTE_FAULT                   0x02000000
 #define LINK_STATUS_UNSUPPORTED_SPD_REQ                        0x04000000
 
+#define LINK_STATUS_FEC_MODE_MASK                      0x38000000
+#define LINK_STATUS_FEC_MODE_NONE                      (0 << 27)
+#define LINK_STATUS_FEC_MODE_FIRECODE_CL74             (1 << 27)
+#define LINK_STATUS_FEC_MODE_RS_CL91                   (2 << 27)
+
        u32 link_status1;
        u32 ext_phy_fw_version;
        u32 drv_phy_cfg_addr;
@@ -12072,59 +12034,65 @@ struct public_port {
        struct dcbx_mib operational_dcbx_mib;
 
        u32 reserved[2];
-       u32 transceiver_data;
-#define ETH_TRANSCEIVER_STATE_MASK     0x000000FF
-#define ETH_TRANSCEIVER_STATE_SHIFT    0x00000000
-#define ETH_TRANSCEIVER_STATE_OFFSET   0x00000000
-#define ETH_TRANSCEIVER_STATE_UNPLUGGED        0x00000000
-#define ETH_TRANSCEIVER_STATE_PRESENT  0x00000001
-#define ETH_TRANSCEIVER_STATE_VALID    0x00000003
-#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
-#define ETH_TRANSCEIVER_TYPE_MASK       0x0000FF00
-#define ETH_TRANSCEIVER_TYPE_OFFSET     0x8
-#define ETH_TRANSCEIVER_TYPE_NONE                       0x00
-#define ETH_TRANSCEIVER_TYPE_UNKNOWN                    0xFF
-#define ETH_TRANSCEIVER_TYPE_1G_PCC                     0x01
-#define ETH_TRANSCEIVER_TYPE_1G_ACC                     0x02
-#define ETH_TRANSCEIVER_TYPE_1G_LX                      0x03
-#define ETH_TRANSCEIVER_TYPE_1G_SX                      0x04
-#define ETH_TRANSCEIVER_TYPE_10G_SR                     0x05
-#define ETH_TRANSCEIVER_TYPE_10G_LR                     0x06
-#define ETH_TRANSCEIVER_TYPE_10G_LRM                    0x07
-#define ETH_TRANSCEIVER_TYPE_10G_ER                     0x08
-#define ETH_TRANSCEIVER_TYPE_10G_PCC                    0x09
-#define ETH_TRANSCEIVER_TYPE_10G_ACC                    0x0a
-#define ETH_TRANSCEIVER_TYPE_XLPPI                      0x0b
-#define ETH_TRANSCEIVER_TYPE_40G_LR4                    0x0c
-#define ETH_TRANSCEIVER_TYPE_40G_SR4                    0x0d
-#define ETH_TRANSCEIVER_TYPE_40G_CR4                    0x0e
-#define ETH_TRANSCEIVER_TYPE_100G_AOC                   0x0f
-#define ETH_TRANSCEIVER_TYPE_100G_SR4                   0x10
-#define ETH_TRANSCEIVER_TYPE_100G_LR4                   0x11
-#define ETH_TRANSCEIVER_TYPE_100G_ER4                   0x12
-#define ETH_TRANSCEIVER_TYPE_100G_ACC                   0x13
-#define ETH_TRANSCEIVER_TYPE_100G_CR4                   0x14
-#define ETH_TRANSCEIVER_TYPE_4x10G_SR                   0x15
-#define ETH_TRANSCEIVER_TYPE_25G_CA_N                   0x16
-#define ETH_TRANSCEIVER_TYPE_25G_ACC_S                  0x17
-#define ETH_TRANSCEIVER_TYPE_25G_CA_S                   0x18
-#define ETH_TRANSCEIVER_TYPE_25G_ACC_M                  0x19
-#define ETH_TRANSCEIVER_TYPE_25G_CA_L                   0x1a
-#define ETH_TRANSCEIVER_TYPE_25G_ACC_L                  0x1b
-#define ETH_TRANSCEIVER_TYPE_25G_SR                     0x1c
-#define ETH_TRANSCEIVER_TYPE_25G_LR                     0x1d
-#define ETH_TRANSCEIVER_TYPE_25G_AOC                    0x1e
-#define ETH_TRANSCEIVER_TYPE_4x10G                      0x1f
-#define ETH_TRANSCEIVER_TYPE_4x25G_CR                   0x20
-#define ETH_TRANSCEIVER_TYPE_1000BASET                  0x21
-#define ETH_TRANSCEIVER_TYPE_10G_BASET                  0x22
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR      0x30
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR      0x31
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR      0x32
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR     0x33
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR     0x34
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR     0x35
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC    0x36
+
+       u32                                             transceiver_data;
+#define ETH_TRANSCEIVER_STATE_MASK                     0x000000ff
+#define ETH_TRANSCEIVER_STATE_SHIFT                    0x00000000
+#define ETH_TRANSCEIVER_STATE_OFFSET                   0x00000000
+#define ETH_TRANSCEIVER_STATE_UNPLUGGED                        0x00000000
+#define ETH_TRANSCEIVER_STATE_PRESENT                  0x00000001
+#define ETH_TRANSCEIVER_STATE_VALID                    0x00000003
+#define ETH_TRANSCEIVER_STATE_UPDATING                 0x00000008
+#define ETH_TRANSCEIVER_TYPE_MASK                      0x0000ff00
+#define ETH_TRANSCEIVER_TYPE_OFFSET                    0x8
+#define ETH_TRANSCEIVER_TYPE_NONE                      0x00
+#define ETH_TRANSCEIVER_TYPE_UNKNOWN                   0xff
+#define ETH_TRANSCEIVER_TYPE_1G_PCC                    0x01
+#define ETH_TRANSCEIVER_TYPE_1G_ACC                    0x02
+#define ETH_TRANSCEIVER_TYPE_1G_LX                     0x03
+#define ETH_TRANSCEIVER_TYPE_1G_SX                     0x04
+#define ETH_TRANSCEIVER_TYPE_10G_SR                    0x05
+#define ETH_TRANSCEIVER_TYPE_10G_LR                    0x06
+#define ETH_TRANSCEIVER_TYPE_10G_LRM                   0x07
+#define ETH_TRANSCEIVER_TYPE_10G_ER                    0x08
+#define ETH_TRANSCEIVER_TYPE_10G_PCC                   0x09
+#define ETH_TRANSCEIVER_TYPE_10G_ACC                   0x0a
+#define ETH_TRANSCEIVER_TYPE_XLPPI                     0x0b
+#define ETH_TRANSCEIVER_TYPE_40G_LR4                   0x0c
+#define ETH_TRANSCEIVER_TYPE_40G_SR4                   0x0d
+#define ETH_TRANSCEIVER_TYPE_40G_CR4                   0x0e
+#define ETH_TRANSCEIVER_TYPE_100G_AOC                  0x0f
+#define ETH_TRANSCEIVER_TYPE_100G_SR4                  0x10
+#define ETH_TRANSCEIVER_TYPE_100G_LR4                  0x11
+#define ETH_TRANSCEIVER_TYPE_100G_ER4                  0x12
+#define ETH_TRANSCEIVER_TYPE_100G_ACC                  0x13
+#define ETH_TRANSCEIVER_TYPE_100G_CR4                  0x14
+#define ETH_TRANSCEIVER_TYPE_4x10G_SR                  0x15
+#define ETH_TRANSCEIVER_TYPE_25G_CA_N                  0x16
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_S                 0x17
+#define ETH_TRANSCEIVER_TYPE_25G_CA_S                  0x18
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_M                 0x19
+#define ETH_TRANSCEIVER_TYPE_25G_CA_L                  0x1a
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_L                 0x1b
+#define ETH_TRANSCEIVER_TYPE_25G_SR                    0x1c
+#define ETH_TRANSCEIVER_TYPE_25G_LR                    0x1d
+#define ETH_TRANSCEIVER_TYPE_25G_AOC                   0x1e
+#define ETH_TRANSCEIVER_TYPE_4x10G                     0x1f
+#define ETH_TRANSCEIVER_TYPE_4x25G_CR                  0x20
+#define ETH_TRANSCEIVER_TYPE_1000BASET                 0x21
+#define ETH_TRANSCEIVER_TYPE_10G_BASET                 0x22
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR     0x30
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR     0x31
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR     0x32
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR    0x33
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR    0x34
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR    0x35
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC   0x36
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR     0x37
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR     0x38
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR      0x39
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR      0x3a
+
        u32 wol_info;
        u32 wol_pkt_len;
        u32 wol_pkt_details;
@@ -12617,66 +12585,68 @@ struct public_drv_mb {
 #define DRV_MB_PARAM_SET_LED_MODE_ON           0x1
 #define DRV_MB_PARAM_SET_LED_MODE_OFF          0x2
 
-#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET           0
-#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK             0x00000003
-#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET           2
-#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK             0x000000FC
-#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET    8
-#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK      0x0000FF00
-#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET         16
-#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK           0xFFFF0000
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET                   0
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK                     0x00000003
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET                   2
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK                     0x000000fc
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET            8
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK              0x0000ff00
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET                 16
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK                   0xffff0000
 
        /* Resource Allocation params - Driver version support */
-#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
-#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT        16
-#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF
-#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT        0
-
-#define DRV_MB_PARAM_BIST_REGISTER_TEST                1
-#define DRV_MB_PARAM_BIST_CLOCK_TEST           2
-#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES  3
-#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX      4
-
-#define DRV_MB_PARAM_BIST_RC_UNKNOWN           0
-#define DRV_MB_PARAM_BIST_RC_PASSED            1
-#define DRV_MB_PARAM_BIST_RC_FAILED            2
-#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
-
-#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT     0
-#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK      0x000000FF
-#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT       8
-#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK                0x0000FF00
-
-#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK         0x0000FFFF
-#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET       0
-#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE          0x00000002
-#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK                0x00010000
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK         0xffff0000
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT                16
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK         0x0000ffff
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT                0
+
+#define DRV_MB_PARAM_BIST_REGISTER_TEST                                1
+#define DRV_MB_PARAM_BIST_CLOCK_TEST                           2
+#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES                  3
+#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX              4
+
+#define DRV_MB_PARAM_BIST_RC_UNKNOWN                           0
+#define DRV_MB_PARAM_BIST_RC_PASSED                            1
+#define DRV_MB_PARAM_BIST_RC_FAILED                            2
+#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER                 3
+
+#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT                     0
+#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK                      0x000000ff
+#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT               8
+#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK                        0x0000ff00
+
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK                 0x0000ffff
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET               0
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE                  0x00000002
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL          0x00000004
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL        0x00000008
+#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK                        0x00010000
 
 /* DRV_MSG_CODE_DEBUG_DATA_SEND parameters */
-#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_OFFSET       0
-#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_MASK         0xFF
+#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_OFFSET               0
+#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_MASK                 0xff
 
 /* Driver attributes params */
-#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET              0
-#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK                        0x00FFFFFF
-#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET              24
-#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK                        0xFF000000
-
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_OFFSET          0
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT           0
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK            0x0000FFFF
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT          16
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_MASK           0x00010000
-#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_SHIFT         17
-#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK          0x00020000
-#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_SHIFT       18
-#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK                0x00040000
-#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_SHIFT         19
-#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK          0x00080000
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_SHIFT   20
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_MASK    0x00100000
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_SHIFT    24
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_MASK     0x0f000000
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET                      0
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK                                0x00ffffff
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET                      24
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK                                0xff000000
+
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_OFFSET                  0
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT                   0
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK                    0x0000ffff
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT                  16
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_MASK                   0x00010000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_SHIFT                 17
+#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK                  0x00020000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_SHIFT               18
+#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK                        0x00040000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_SHIFT                 19
+#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK                  0x00080000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_SHIFT           20
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_MASK            0x00100000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_SHIFT            24
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_MASK             0x0f000000
 
        u32 fw_mb_header;
 #define FW_MSG_CODE_MASK                       0xffff0000
@@ -12723,55 +12693,57 @@ struct public_drv_mb {
 
 #define FW_MSG_CODE_MDUMP_INVALID_CMD          0x00030000
 
-       u32 fw_mb_param;
-#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK  0xFFFF0000
-#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
-#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK  0x0000FFFF
-#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
-
-       /* get pf rdma protocol command responce */
-#define FW_MB_PARAM_GET_PF_RDMA_NONE           0x0
-#define FW_MB_PARAM_GET_PF_RDMA_ROCE           0x1
-#define FW_MB_PARAM_GET_PF_RDMA_IWARP          0x2
-#define FW_MB_PARAM_GET_PF_RDMA_BOTH           0x3
-
-/* get MFW feature support response */
-#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ  0x00000001
-#define FW_MB_PARAM_FEATURE_SUPPORT_EEE                0x00000002
-#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK      0x00010000
-
-#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR  (1 << 0)
-
-#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK   0x00000001
-#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_SHIFT 0
-#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK   0x00000002
-#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_SHIFT 1
-#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK    0x00000004
-#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_SHIFT  2
-#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK    0x00000008
-#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_SHIFT  3
-
-#define FW_MB_PARAM_PPFID_BITMAP_MASK  0xFF
-#define FW_MB_PARAM_PPFID_BITMAP_SHIFT 0
-
-       u32 drv_pulse_mb;
-#define DRV_PULSE_SEQ_MASK                     0x00007fff
-#define DRV_PULSE_SYSTEM_TIME_MASK             0xffff0000
-#define DRV_PULSE_ALWAYS_ALIVE                 0x00008000
-
-       u32 mcp_pulse_mb;
-#define MCP_PULSE_SEQ_MASK                     0x00007fff
-#define MCP_PULSE_ALWAYS_ALIVE                 0x00008000
-#define MCP_EVENT_MASK                         0xffff0000
-#define MCP_EVENT_OTHER_DRIVER_RESET_REQ       0x00010000
-
-       union drv_union_data union_data;
-};
-
-#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK       0x00ffffff
-#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_SHIFT      0
-#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_MASK         0xff000000
-#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_SHIFT                24
+       u32                                                     fw_mb_param;
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK          0xffff0000
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT         16
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK          0x0000ffff
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT         0
+
+       /* Get PF RDMA protocol command response */
+#define FW_MB_PARAM_GET_PF_RDMA_NONE                           0x0
+#define FW_MB_PARAM_GET_PF_RDMA_ROCE                           0x1
+#define FW_MB_PARAM_GET_PF_RDMA_IWARP                          0x2
+#define FW_MB_PARAM_GET_PF_RDMA_BOTH                           0x3
+
+       /* Get MFW feature support response */
+#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ                  BIT(0)
+#define FW_MB_PARAM_FEATURE_SUPPORT_EEE                                BIT(1)
+#define FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL                        BIT(5)
+#define FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL      BIT(6)
+#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK                      BIT(16)
+
+#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR                  BIT(0)
+
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK               0x00000001
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_SHIFT              0
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK               0x00000002
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_SHIFT              1
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK                        0x00000004
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_SHIFT               2
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK                        0x00000008
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_SHIFT               3
+
+#define FW_MB_PARAM_PPFID_BITMAP_MASK                          0xff
+#define FW_MB_PARAM_PPFID_BITMAP_SHIFT                         0
+
+       u32                                                     drv_pulse_mb;
+#define DRV_PULSE_SEQ_MASK                                     0x00007fff
+#define DRV_PULSE_SYSTEM_TIME_MASK                             0xffff0000
+#define DRV_PULSE_ALWAYS_ALIVE                                 0x00008000
+
+       u32                                                     mcp_pulse_mb;
+#define MCP_PULSE_SEQ_MASK                                     0x00007fff
+#define MCP_PULSE_ALWAYS_ALIVE                                 0x00008000
+#define MCP_EVENT_MASK                                         0xffff0000
+#define MCP_EVENT_OTHER_DRIVER_RESET_REQ                       0x00010000
+
+       union drv_union_data                                    union_data;
+};
+
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK               0x00ffffff
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_SHIFT              0
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_MASK                 0xff000000
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_SHIFT                        24
 
 enum MFW_DRV_MSG_TYPE {
        MFW_DRV_MSG_LINK_CHANGE,
@@ -13058,122 +13030,138 @@ enum tlvs {
 };
 
 struct nvm_cfg_mac_address {
-       u32 mac_addr_hi;
-#define NVM_CFG_MAC_ADDRESS_HI_MASK    0x0000FFFF
-#define NVM_CFG_MAC_ADDRESS_HI_OFFSET  0
-       u32 mac_addr_lo;
+       u32                                                     mac_addr_hi;
+#define NVM_CFG_MAC_ADDRESS_HI_MASK                            0x0000ffff
+#define NVM_CFG_MAC_ADDRESS_HI_OFFSET                          0
+
+       u32                                                     mac_addr_lo;
 };
 
 struct nvm_cfg1_glob {
-       u32 generic_cont0;
-#define NVM_CFG1_GLOB_MF_MODE_MASK             0x00000FF0
-#define NVM_CFG1_GLOB_MF_MODE_OFFSET           4
-#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED       0x0
-#define NVM_CFG1_GLOB_MF_MODE_DEFAULT          0x1
-#define NVM_CFG1_GLOB_MF_MODE_SPIO4            0x2
-#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0          0x3
-#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5          0x4
-#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0          0x5
-#define NVM_CFG1_GLOB_MF_MODE_BD               0x6
-#define NVM_CFG1_GLOB_MF_MODE_UFP              0x7
-       u32 engineering_change[3];
-       u32 manufacturing_id;
-       u32 serial_number[4];
-       u32 pcie_cfg;
-       u32 mgmt_traffic;
-       u32 core_cfg;
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK           0x000000FF
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET         0
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G       0x0
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G          0x1
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G      0x2
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F                0x3
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E     0x4
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G       0x5
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G          0xB
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G          0xC
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G          0xD
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G          0xE
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G          0xF
-
-       u32 e_lane_cfg1;
-       u32 e_lane_cfg2;
-       u32 f_lane_cfg1;
-       u32 f_lane_cfg2;
-       u32 mps10_preemphasis;
-       u32 mps10_driver_current;
-       u32 mps25_preemphasis;
-       u32 mps25_driver_current;
-       u32 pci_id;
-       u32 pci_subsys_id;
-       u32 bar;
-       u32 mps10_txfir_main;
-       u32 mps10_txfir_post;
-       u32 mps25_txfir_main;
-       u32 mps25_txfir_post;
-       u32 manufacture_ver;
-       u32 manufacture_time;
-       u32 led_global_settings;
-       u32 generic_cont1;
-       u32 mbi_version;
-#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK               0x000000FF
-#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET             0
-#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK               0x0000FF00
-#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET             8
-#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK               0x00FF0000
-#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET             16
-       u32 mbi_date;
-       u32 misc_sig;
-       u32 device_capabilities;
-#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET     0x1
-#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE         0x2
-#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI                0x4
-#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE         0x8
-       u32 power_dissipated;
-       u32 power_consumed;
-       u32 efi_version;
-       u32 multi_network_modes_capability;
-       u32 reserved[41];
+       u32                                                     generic_cont0;
+#define NVM_CFG1_GLOB_MF_MODE_MASK                             0x00000ff0
+#define NVM_CFG1_GLOB_MF_MODE_OFFSET                           4
+#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED                       0x0
+#define NVM_CFG1_GLOB_MF_MODE_DEFAULT                          0x1
+#define NVM_CFG1_GLOB_MF_MODE_SPIO4                            0x2
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0                          0x3
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5                          0x4
+#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0                          0x5
+#define NVM_CFG1_GLOB_MF_MODE_BD                               0x6
+#define NVM_CFG1_GLOB_MF_MODE_UFP                              0x7
+
+       u32                                                     engineering_change[3];
+       u32                                                     manufacturing_id;
+       u32                                                     serial_number[4];
+       u32                                                     pcie_cfg;
+       u32                                                     mgmt_traffic;
+
+       u32                                                     core_cfg;
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK                   0x000000ff
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET                 0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G               0x0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G                  0x1
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G              0x2
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F                        0x3
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E             0x4
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G               0x5
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G                  0xb
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G                  0xc
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G                  0xd
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G                  0xe
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G                  0xf
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1           0x11
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1           0x12
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2          0x13
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2          0x14
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4          0x15
+
+       u32                                                     e_lane_cfg1;
+       u32                                                     e_lane_cfg2;
+       u32                                                     f_lane_cfg1;
+       u32                                                     f_lane_cfg2;
+       u32                                                     mps10_preemphasis;
+       u32                                                     mps10_driver_current;
+       u32                                                     mps25_preemphasis;
+       u32                                                     mps25_driver_current;
+       u32                                                     pci_id;
+       u32                                                     pci_subsys_id;
+       u32                                                     bar;
+       u32                                                     mps10_txfir_main;
+       u32                                                     mps10_txfir_post;
+       u32                                                     mps25_txfir_main;
+       u32                                                     mps25_txfir_post;
+       u32                                                     manufacture_ver;
+       u32                                                     manufacture_time;
+       u32                                                     led_global_settings;
+       u32                                                     generic_cont1;
+
+       u32                                                     mbi_version;
+#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK                       0x000000ff
+#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET                     0
+#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK                       0x0000ff00
+#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET                     8
+#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK                       0x00ff0000
+#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET                     16
+
+       u32                                                     mbi_date;
+       u32                                                     misc_sig;
+
+       u32                                                     device_capabilities;
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET             0x1
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE                 0x2
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI                        0x4
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE                 0x8
+
+       u32                                                     power_dissipated;
+       u32                                                     power_consumed;
+       u32                                                     efi_version;
+       u32                                                     multi_net_modes_cap;
+       u32                                                     reserved[41];
 };
 
 struct nvm_cfg1_path {
-       u32 reserved[30];
+       u32                                                     reserved[30];
 };
 
 struct nvm_cfg1_port {
-       u32 reserved__m_relocated_to_option_123;
-       u32 reserved__m_relocated_to_option_124;
-       u32 generic_cont0;
-#define NVM_CFG1_PORT_DCBX_MODE_MASK                           0x000F0000
+       u32                                                     rel_to_opt123;
+       u32                                                     rel_to_opt124;
+
+       u32                                                     generic_cont0;
+#define NVM_CFG1_PORT_DCBX_MODE_MASK                           0x000f0000
 #define NVM_CFG1_PORT_DCBX_MODE_OFFSET                         16
 #define NVM_CFG1_PORT_DCBX_MODE_DISABLED                       0x0
 #define NVM_CFG1_PORT_DCBX_MODE_IEEE                           0x1
 #define NVM_CFG1_PORT_DCBX_MODE_CEE                            0x2
 #define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC                                0x3
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK           0x00F00000
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK           0x00f00000
 #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET         20
 #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET       0x1
 #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE           0x2
 #define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI          0x4
-       u32 pcie_cfg;
-       u32 features;
-       u32 speed_cap_mask;
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK           0x0000FFFF
+
+       u32                                                     pcie_cfg;
+       u32                                                     features;
+
+       u32                                                     speed_cap_mask;
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK           0x0000ffff
 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET         0
 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G             0x1
 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G            0x2
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G             0x4
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G            0x4
 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G            0x8
 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G            0x10
 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G            0x20
 #define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G                0x40
-       u32 link_settings;
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK                      0x0000000F
+
+       u32                                                     link_settings;
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK                      0x0000000f
 #define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET                    0
 #define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG                   0x0
 #define NVM_CFG1_PORT_DRV_LINK_SPEED_1G                                0x1
 #define NVM_CFG1_PORT_DRV_LINK_SPEED_10G                       0x2
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G                        0x3
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G                       0x3
 #define NVM_CFG1_PORT_DRV_LINK_SPEED_25G                       0x4
 #define NVM_CFG1_PORT_DRV_LINK_SPEED_40G                       0x5
 #define NVM_CFG1_PORT_DRV_LINK_SPEED_50G                       0x6
@@ -13184,49 +13172,92 @@ struct nvm_cfg1_port {
 #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG                 0x1
 #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX                      0x2
 #define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX                      0x4
-       u32 phy_cfg;
-       u32 mgmt_traffic;
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK                      0x000e0000
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET                    17
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE                      0x0
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE                  0x1
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_RS                                0x2
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO                      0x7
+
+       u32                                                     phy_cfg;
+       u32                                                     mgmt_traffic;
 
-       u32 ext_phy;
+       u32                                                     ext_phy;
        /* EEE power saving mode */
-#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK               0x00FF0000
+#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK               0x00ff0000
 #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET             16
 #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED           0x0
 #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED           0x1
 #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE         0x2
 #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY                0x3
 
-       u32 mba_cfg1;
-       u32 mba_cfg2;
-       u32 vf_cfg;
-       struct nvm_cfg_mac_address lldp_mac_address;
-       u32 led_port_settings;
-       u32 transceiver_00;
-       u32 device_ids;
-       u32 board_cfg;
-#define NVM_CFG1_PORT_PORT_TYPE_MASK                            0x000000FF
-#define NVM_CFG1_PORT_PORT_TYPE_OFFSET                          0
-#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED                       0x0
-#define NVM_CFG1_PORT_PORT_TYPE_MODULE                          0x1
-#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE                       0x2
-#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY                         0x3
-#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE                    0x4
-       u32 mnm_10g_cap;
-       u32 mnm_10g_ctrl;
-       u32 mnm_10g_misc;
-       u32 mnm_25g_cap;
-       u32 mnm_25g_ctrl;
-       u32 mnm_25g_misc;
-       u32 mnm_40g_cap;
-       u32 mnm_40g_ctrl;
-       u32 mnm_40g_misc;
-       u32 mnm_50g_cap;
-       u32 mnm_50g_ctrl;
-       u32 mnm_50g_misc;
-       u32 mnm_100g_cap;
-       u32 mnm_100g_ctrl;
-       u32 mnm_100g_misc;
-       u32 reserved[116];
+       u32                                                     mba_cfg1;
+       u32                                                     mba_cfg2;
+       u32                                                     vf_cfg;
+       struct nvm_cfg_mac_address                              lldp_mac_address;
+       u32                                                     led_port_settings;
+       u32                                                     transceiver_00;
+       u32                                                     device_ids;
+
+       u32                                                     board_cfg;
+#define NVM_CFG1_PORT_PORT_TYPE_MASK                           0x000000ff
+#define NVM_CFG1_PORT_PORT_TYPE_OFFSET                         0
+#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED                      0x0
+#define NVM_CFG1_PORT_PORT_TYPE_MODULE                         0x1
+#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE                      0x2
+#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY                                0x3
+#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE                   0x4
+
+       u32                                                     mnm_10g_cap;
+       u32                                                     mnm_10g_ctrl;
+       u32                                                     mnm_10g_misc;
+       u32                                                     mnm_25g_cap;
+       u32                                                     mnm_25g_ctrl;
+       u32                                                     mnm_25g_misc;
+       u32                                                     mnm_40g_cap;
+       u32                                                     mnm_40g_ctrl;
+       u32                                                     mnm_40g_misc;
+       u32                                                     mnm_50g_cap;
+       u32                                                     mnm_50g_ctrl;
+       u32                                                     mnm_50g_misc;
+       u32                                                     mnm_100g_cap;
+       u32                                                     mnm_100g_ctrl;
+       u32                                                     mnm_100g_misc;
+
+       u32                                                     temperature;
+       u32                                                     ext_phy_cfg1;
+
+       u32                                                     extended_speed;
+#define NVM_CFG1_PORT_EXTENDED_SPEED_MASK                      0x0000ffff
+#define NVM_CFG1_PORT_EXTENDED_SPEED_OFFSET                    0
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN              0x1
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G              0x2
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G             0x4
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G             0x8
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G             0x10
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G             0x20
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R           0x40
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2          0x80
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2         0x100
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4         0x200
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4         0x400
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_MASK                  0xffff0000
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_OFFSET                        16
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED    0x1
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G          0x2
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G         0x4
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G         0x8
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G         0x10
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G         0x20
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R       0x40
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2      0x80
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2     0x100
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4     0x200
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4     0x400
+
+       u32                                                     extended_fec_mode;
+
+       u32                                                     reserved[112];
 };
 
 struct nvm_cfg1_func {
index 5fa2514..554f30b 100644 (file)
@@ -1,33 +1,7 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #include <linux/types.h>
@@ -838,9 +812,8 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
        return rc;
 }
 
-void qed_hw_err_notify(struct qed_hwfn *p_hwfn,
-                      struct qed_ptt *p_ptt,
-                      enum qed_hw_err_type err_type, char *fmt, ...)
+void qed_hw_err_notify(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                      enum qed_hw_err_type err_type, const char *fmt, ...)
 {
        char buf[QED_HW_ERR_MAX_STR_SIZE];
        va_list vl;
index f5b109b..2734f49 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef _QED_HW_H
@@ -327,7 +301,8 @@ int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
  * @param fmt - debug data buffer to send to the MFW
  * @param ... - buffer format args
  */
-void qed_hw_err_notify(struct qed_hwfn *p_hwfn,
-                      struct qed_ptt *p_ptt,
-                      enum qed_hw_err_type err_type, char *fmt, ...);
+void __printf(4, 5) __cold qed_hw_err_notify(struct qed_hwfn *p_hwfn,
+                                            struct qed_ptt *p_ptt,
+                                            enum qed_hw_err_type err_type,
+                                            const char *fmt, ...);
 #endif
index 2f1049b..ea888a2 100644 (file)
@@ -1,33 +1,7 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #include <linux/types.h>
@@ -182,23 +156,25 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
                  cmd ## _ ## field, \
                  value)
 
-#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, vp_pq_id, rl_valid, rl_id, \
-                         ext_voq, wrr) \
-       do { \
-               typeof(map) __map; \
-               memset(&__map, 0, sizeof(__map)); \
-               SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _PQ_VALID, 1); \
-               SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_VALID, \
-                         rl_valid ? 1 : 0);\
-               SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VP_PQ_ID, \
-                         vp_pq_id); \
-               SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _RL_ID, rl_id); \
-               SET_FIELD(__map.reg, QM_RF_PQ_MAP_ ## chip ## _VOQ, ext_voq); \
-               SET_FIELD(__map.reg, \
-                         QM_RF_PQ_MAP_ ## chip ## _WRR_WEIGHT_GROUP, wrr); \
-               STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \
-                            *((u32 *)&__map)); \
-               (map) = __map; \
+#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, vp_pq_id, rl_valid,              \
+                         rl_id, ext_voq, wrr)                                \
+       do {                                                                  \
+               u32 __reg = 0;                                                \
+                                                                             \
+               BUILD_BUG_ON(sizeof((map).reg) != sizeof(__reg));             \
+                                                                             \
+               SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1);          \
+               SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_RL_VALID,              \
+                         !!(rl_valid));                                      \
+               SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, (vp_pq_id)); \
+               SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_RL_ID, (rl_id));       \
+               SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_VOQ, (ext_voq));       \
+               SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP,      \
+                         (wrr));                                             \
+                                                                             \
+               STORE_RT_REG((p_hwfn), QM_REG_TXPQMAP_RT_OFFSET + (pq_id),    \
+                            __reg);                                          \
+               (map).reg = cpu_to_le32(__reg);                               \
        } while (0)
 
 #define WRITE_PQ_INFO_TO_RAM   1
@@ -1022,20 +998,23 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
        } while (0)
 
 /**
- * @brief qed_dmae_to_grc - is an internal function - writes from host to
- * wide-bus registers (split registers are not supported yet)
+ * qed_dmae_to_grc() - Internal function for writing from host to
+ * wide-bus registers (split registers are not supported yet).
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT window used for writing the registers.
+ * @p_data: Pointer to source data.
+ * @addr: Destination register address.
+ * @len_in_dwords: Data length in dwords (u32).
  *
- * @param p_hwfn - HW device data
- * @param p_ptt - ptt window used for writing the registers.
- * @param p_data - pointer to source data.
- * @param addr - Destination register address.
- * @param len_in_dwords - data length in DWARDS (u32)
+ * Return: Length of the written data in dwords (u32) or -1 on invalid
+ *         input.
  */
-static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn,
-                          struct qed_ptt *p_ptt,
-                          u32 *p_data, u32 addr, u32 len_in_dwords)
+static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+                          __le32 *p_data, u32 addr, u32 len_in_dwords)
 {
        struct qed_dmae_params params = {};
+       u32 *data_cpu;
        int rc;
 
        if (!p_data)
@@ -1054,8 +1033,13 @@ static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn,
                DP_VERBOSE(p_hwfn,
                           QED_MSG_DEBUG,
                           "Failed writing to chip using DMAE, using GRC instead\n");
-               /* write to registers using GRC */
-               ARR_REG_WR(p_hwfn, p_ptt, addr, p_data, len_in_dwords);
+
+               /* Swap to CPU byteorder and write to registers using GRC */
+               data_cpu = (__force u32 *)p_data;
+               le32_to_cpu_array(data_cpu, len_in_dwords);
+
+               ARR_REG_WR(p_hwfn, p_ptt, addr, data_cpu, len_in_dwords);
+               cpu_to_le32_array(data_cpu, len_in_dwords);
        }
 
        return len_in_dwords;
@@ -1256,7 +1240,7 @@ void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
        qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
 
        /* Zero ramline */
-       qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
+       qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo,
                        PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
                        sizeof(ram_line) / REG_SIZE);
 }
@@ -1268,8 +1252,10 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
                    bool udp,
                    bool ipv4, bool ipv6, enum gft_profile_type profile_type)
 {
-       u32 reg_val, cam_line, search_non_ip_as_gft;
-       struct regpair ram_line = { };
+       struct regpair ram_line;
+       u32 search_non_ip_as_gft;
+       u32 reg_val, cam_line;
+       u32 lo = 0, hi = 0;
 
        if (!ipv6 && !ipv4)
                DP_NOTICE(p_hwfn,
@@ -1340,43 +1326,46 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
        search_non_ip_as_gft = 0;
 
        /* Tunnel type */
-       SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
-       SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
+       SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
+       SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
 
        if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
-               SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1);
-               SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1);
-               SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
-               SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
-               SET_FIELD(ram_line.lo, GFT_RAM_LINE_SRC_PORT, 1);
-               SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1);
+               SET_FIELD(hi, GFT_RAM_LINE_DST_IP, 1);
+               SET_FIELD(hi, GFT_RAM_LINE_SRC_IP, 1);
+               SET_FIELD(hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+               SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1);
+               SET_FIELD(lo, GFT_RAM_LINE_SRC_PORT, 1);
+               SET_FIELD(lo, GFT_RAM_LINE_DST_PORT, 1);
        } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
-               SET_FIELD(ram_line.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
-               SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
-               SET_FIELD(ram_line.lo, GFT_RAM_LINE_DST_PORT, 1);
+               SET_FIELD(hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+               SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1);
+               SET_FIELD(lo, GFT_RAM_LINE_DST_PORT, 1);
        } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
-               SET_FIELD(ram_line.hi, GFT_RAM_LINE_DST_IP, 1);
-               SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
+               SET_FIELD(hi, GFT_RAM_LINE_DST_IP, 1);
+               SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1);
        } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
-               SET_FIELD(ram_line.hi, GFT_RAM_LINE_SRC_IP, 1);
-               SET_FIELD(ram_line.lo, GFT_RAM_LINE_ETHERTYPE, 1);
+               SET_FIELD(hi, GFT_RAM_LINE_SRC_IP, 1);
+               SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1);
        } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
-               SET_FIELD(ram_line.lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
+               SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
 
                /* Allow tunneled traffic without inner IP */
                search_non_ip_as_gft = 1;
        }
 
+       ram_line.lo = cpu_to_le32(lo);
+       ram_line.hi = cpu_to_le32(hi);
+
        qed_wr(p_hwfn,
               p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft);
-       qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
+       qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo,
                        PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
                        sizeof(ram_line) / REG_SIZE);
 
        /* Set default profile so that no filter match will happen */
-       ram_line.lo = 0xffffffff;
-       ram_line.hi = 0x3ff;
-       qed_dmae_to_grc(p_hwfn, p_ptt, (u32 *)&ram_line,
+       ram_line.lo = cpu_to_le32(0xffffffff);
+       ram_line.hi = cpu_to_le32(0x3ff);
+       qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo,
                        PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
                        PRS_GFT_CAM_LINES_NO_MATCH,
                        sizeof(ram_line) / REG_SIZE);
@@ -1394,7 +1383,7 @@ static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
        u8 crc, validation_byte = 0;
        static u8 crc8_table_valid; /* automatically initialized to 0 */
        u32 validation_string = 0;
-       u32 data_to_crc;
+       __be32 data_to_crc;
 
        if (!crc8_table_valid) {
                crc8_populate_msb(cdu_crc8_table, 0x07);
@@ -1416,10 +1405,9 @@ static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
                validation_string |= (conn_type & 0xF);
 
        /* Convert to big-endian and calculate CRC8 */
-       data_to_crc = be32_to_cpu(validation_string);
-
-       crc = crc8(cdu_crc8_table,
-                  (u8 *)&data_to_crc, sizeof(data_to_crc), CRC8_INIT_VALUE);
+       data_to_crc = cpu_to_be32(validation_string);
+       crc = crc8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
+                  CRC8_INIT_VALUE);
 
        /* The validation byte [7:0] is composed:
         * for type A validation
index 5a6e4ac..7e6c638 100644 (file)
@@ -1,33 +1,7 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #include <linux/types.h>
@@ -69,6 +43,79 @@ static u32 pxp_global_win[] = {
        0,
 };
 
+/* IRO Array */
+static const u32 iro_arr[] = {
+       0x00000000, 0x00000000, 0x00080000,
+       0x00003288, 0x00000088, 0x00880000,
+       0x000058e8, 0x00000020, 0x00200000,
+       0x00000b00, 0x00000008, 0x00040000,
+       0x00000a80, 0x00000008, 0x00040000,
+       0x00000000, 0x00000008, 0x00020000,
+       0x00000080, 0x00000008, 0x00040000,
+       0x00000084, 0x00000008, 0x00020000,
+       0x00005718, 0x00000004, 0x00040000,
+       0x00004dd0, 0x00000000, 0x00780000,
+       0x00003e40, 0x00000000, 0x00780000,
+       0x00004480, 0x00000000, 0x00780000,
+       0x00003210, 0x00000000, 0x00780000,
+       0x00003b50, 0x00000000, 0x00780000,
+       0x00007f58, 0x00000000, 0x00780000,
+       0x00005f58, 0x00000000, 0x00080000,
+       0x00007100, 0x00000000, 0x00080000,
+       0x0000aea0, 0x00000000, 0x00080000,
+       0x00004398, 0x00000000, 0x00080000,
+       0x0000a5a0, 0x00000000, 0x00080000,
+       0x0000bde8, 0x00000000, 0x00080000,
+       0x00000020, 0x00000004, 0x00040000,
+       0x000056c8, 0x00000010, 0x00100000,
+       0x0000c210, 0x00000030, 0x00300000,
+       0x0000b088, 0x00000038, 0x00380000,
+       0x00003d20, 0x00000080, 0x00400000,
+       0x0000bf60, 0x00000000, 0x00040000,
+       0x00004560, 0x00040080, 0x00040000,
+       0x000001f8, 0x00000004, 0x00040000,
+       0x00003d60, 0x00000080, 0x00200000,
+       0x00008960, 0x00000040, 0x00300000,
+       0x0000e840, 0x00000060, 0x00600000,
+       0x00004618, 0x00000080, 0x00380000,
+       0x00010738, 0x000000c0, 0x00c00000,
+       0x000001f8, 0x00000002, 0x00020000,
+       0x0000a2a0, 0x00000000, 0x01080000,
+       0x0000a3a8, 0x00000008, 0x00080000,
+       0x000001c0, 0x00000008, 0x00080000,
+       0x000001f8, 0x00000008, 0x00080000,
+       0x00000ac0, 0x00000008, 0x00080000,
+       0x00002578, 0x00000008, 0x00080000,
+       0x000024f8, 0x00000008, 0x00080000,
+       0x00000280, 0x00000008, 0x00080000,
+       0x00000680, 0x00080018, 0x00080000,
+       0x00000b78, 0x00080018, 0x00020000,
+       0x0000c640, 0x00000050, 0x003c0000,
+       0x00012038, 0x00000018, 0x00100000,
+       0x00011b00, 0x00000040, 0x00180000,
+       0x000095d0, 0x00000050, 0x00200000,
+       0x00008b10, 0x00000040, 0x00280000,
+       0x00011640, 0x00000018, 0x00100000,
+       0x0000c828, 0x00000048, 0x00380000,
+       0x00011710, 0x00000020, 0x00200000,
+       0x00004650, 0x00000080, 0x00100000,
+       0x00003618, 0x00000010, 0x00100000,
+       0x0000a968, 0x00000008, 0x00010000,
+       0x000097a0, 0x00000008, 0x00010000,
+       0x00011990, 0x00000008, 0x00010000,
+       0x0000f018, 0x00000008, 0x00010000,
+       0x00012628, 0x00000008, 0x00010000,
+       0x00011da8, 0x00000008, 0x00010000,
+       0x0000aa78, 0x00000030, 0x00100000,
+       0x0000d768, 0x00000028, 0x00280000,
+       0x00009a58, 0x00000018, 0x00180000,
+       0x00009bd8, 0x00000008, 0x00080000,
+       0x00013a18, 0x00000008, 0x00080000,
+       0x000126e8, 0x00000018, 0x00180000,
+       0x0000e608, 0x00500288, 0x00100000,
+       0x00012970, 0x00000138, 0x00280000,
+};
+
 void qed_init_iro_array(struct qed_dev *cdev)
 {
        cdev->iro_arr = iro_arr;
index e9e8ade..a573c89 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef _QED_INIT_OPS_H
index b7b974f..9be4028 100644 (file)
@@ -1,33 +1,7 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #include <linux/types.h>
@@ -842,11 +816,12 @@ static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
 }
 
 /**
- *  @brief qed_int_assertion - handles asserted attention bits
+ * qed_int_assertion() - Handle asserted attention bits.
  *
- *  @param p_hwfn
- *  @param asserted_bits newly asserted bits
- *  @return int
+ * @p_hwfn: HW device data.
+ * @asserted_bits: Newly asserted bits.
+ *
+ * Return: Zero value.
  */
 static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits)
 {
@@ -906,16 +881,17 @@ static void qed_int_attn_print(struct qed_hwfn *p_hwfn,
 }
 
 /**
- * @brief qed_int_deassertion_aeu_bit - handles the effects of a single
- * cause of the attention
+ * qed_int_deassertion_aeu_bit() - Handles the effects of a single
+ * cause of the attention.
  *
- * @param p_hwfn
- * @param p_aeu - descriptor of an AEU bit which caused the attention
- * @param aeu_en_reg - register offset of the AEU enable reg. which configured
- *  this bit to this group.
- * @param bit_index - index of this bit in the aeu_en_reg
+ * @p_hwfn: HW device data.
+ * @p_aeu: Descriptor of an AEU bit which caused the attention.
+ * @aeu_en_reg: Register offset of the AEU enable reg. which configured
+ *              this bit to this group.
+ * @p_bit_name: AEU bit description for logging purposes.
+ * @bitmask: Index of this bit in the aeu_en_reg.
  *
- * @return int
+ * Return: Zero on success, negative errno otherwise.
  */
 static int
 qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn,
@@ -964,12 +940,12 @@ out:
 }
 
 /**
- * @brief qed_int_deassertion_parity - handle a single parity AEU source
+ * qed_int_deassertion_parity() - Handle a single parity AEU source.
  *
- * @param p_hwfn
- * @param p_aeu - descriptor of an AEU bit which caused the parity
- * @param aeu_en_reg - address of the AEU enable register
- * @param bit_index
+ * @p_hwfn: HW device data.
+ * @p_aeu: Descriptor of an AEU bit which caused the parity.
+ * @aeu_en_reg: Address of the AEU enable register.
+ * @bit_index: Index (0-31) of an AEU bit.
  */
 static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn,
                                       struct aeu_invert_reg_bit *p_aeu,
@@ -1002,12 +978,13 @@ static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn,
 }
 
 /**
- * @brief - handles deassertion of previously asserted attentions.
+ * qed_int_deassertion() - Handle deassertion of previously asserted
+ * attentions.
  *
- * @param p_hwfn
- * @param deasserted_bits - newly deasserted bits
- * @return int
+ * @p_hwfn: HW device data.
+ * @deasserted_bits: newly deasserted bits.
  *
+ * Return: Zero value.
  */
 static int qed_int_deassertion(struct qed_hwfn  *p_hwfn,
                               u16 deasserted_bits)
@@ -1214,16 +1191,15 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
 static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
                            void __iomem *igu_addr, u32 ack_cons)
 {
-       struct igu_prod_cons_update igu_ack = { 0 };
+       u32 igu_ack;
 
-       igu_ack.sb_id_and_flags =
-               ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
-                (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
-                (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
-                (IGU_SEG_ACCESS_ATTN <<
-                 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+       igu_ack = ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+                  (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+                  (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+                  (IGU_SEG_ACCESS_ATTN <<
+                   IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
 
-       DIRECT_REG_WR(igu_addr, igu_ack.sb_id_and_flags);
+       DIRECT_REG_WR(igu_addr, igu_ack);
 
        /* Both segments (interrupts & acks) are written to same place address;
         * Need to guarantee all commands will be received (in-order) by HW.
@@ -1437,16 +1413,16 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
                           u8 pf_id, u16 vf_number, u8 vf_valid)
 {
        struct qed_dev *cdev = p_hwfn->cdev;
-       u32 cau_state;
+       u32 cau_state, params = 0, data = 0;
        u8 timer_res;
 
        memset(p_sb_entry, 0, sizeof(*p_sb_entry));
 
-       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
-       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
-       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
-       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
-       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
+       SET_FIELD(params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
+       SET_FIELD(params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
+       SET_FIELD(params, CAU_SB_ENTRY_VF_VALID, vf_valid);
+       SET_FIELD(params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
+       SET_FIELD(params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
 
        cau_state = CAU_HC_DISABLE_STATE;
 
@@ -1465,7 +1441,8 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
                timer_res = 1;
        else
                timer_res = 2;
-       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
+
+       SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
 
        if (cdev->tx_coalesce_usecs <= 0x7F)
                timer_res = 0;
@@ -1473,10 +1450,13 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
                timer_res = 1;
        else
                timer_res = 2;
-       SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
 
-       SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
-       SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
+       SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
+       p_sb_entry->params = cpu_to_le32(params);
+
+       SET_FIELD(data, CAU_SB_ENTRY_STATE0, cau_state);
+       SET_FIELD(data, CAU_SB_ENTRY_STATE1, cau_state);
+       p_sb_entry->data = cpu_to_le32(data);
 }
 
 static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
@@ -1486,31 +1466,27 @@ static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
                                enum qed_coalescing_fsm coalescing_fsm,
                                u8 timeset)
 {
-       struct cau_pi_entry pi_entry;
        u32 sb_offset, pi_offset;
+       u32 prod = 0;
 
        if (IS_VF(p_hwfn->cdev))
                return;
 
-       sb_offset = igu_sb_id * PIS_PER_SB_E4;
-       memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
-
-       SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
+       SET_FIELD(prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
        if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE)
-               SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
+               SET_FIELD(prod, CAU_PI_ENTRY_FSM_SEL, 0);
        else
-               SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
+               SET_FIELD(prod, CAU_PI_ENTRY_FSM_SEL, 1);
 
+       sb_offset = igu_sb_id * PIS_PER_SB_E4;
        pi_offset = sb_offset + pi_index;
-       if (p_hwfn->hw_init_done) {
+
+       if (p_hwfn->hw_init_done)
                qed_wr(p_hwfn, p_ptt,
-                      CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
-                      *((u32 *)&(pi_entry)));
-       } else {
-               STORE_RT_REG(p_hwfn,
-                            CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
-                            *((u32 *)&(pi_entry)));
-       }
+                      CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), prod);
+       else
+               STORE_RT_REG(p_hwfn, CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
+                            prod);
 }
 
 void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
@@ -2267,9 +2243,9 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 }
 
 /**
- * @brief Initialize igu runtime registers
+ * qed_int_igu_init_rt() - Initialize IGU runtime registers.
  *
- * @param p_hwfn
+ * @p_hwfn: HW device data.
  */
 void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn)
 {
@@ -2379,6 +2355,7 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
                          u8 timer_res, u16 sb_id, bool tx)
 {
        struct cau_sb_entry sb_entry;
+       u32 params;
        int rc;
 
        if (!p_hwfn->hw_init_done) {
@@ -2394,10 +2371,14 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
                return rc;
        }
 
+       params = le32_to_cpu(sb_entry.params);
+
        if (tx)
-               SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
+               SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
        else
-               SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
+               SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
+
+       sb_entry.params = cpu_to_le32(params);
 
        rc = qed_dmae_host2grc(p_hwfn, p_ptt,
                               (u64)(uintptr_t)&sb_entry,
index e09db33..aea04b1 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef _QED_INT_H
index 7245a61..4eae4ee 100644 (file)
@@ -1,33 +1,7 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #include <linux/types.h>
@@ -143,10 +117,9 @@ struct qed_iscsi_conn {
        u8 abortive_dsconnect;
 };
 
-static int
-qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
-                     u8 fw_event_code,
-                     u16 echo, union event_ring_data *data, u8 fw_return_code)
+static int qed_iscsi_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
+                                __le16 echo, union event_ring_data *data,
+                                u8 fw_return_code)
 {
        if (p_hwfn->p_iscsi_info->event_cb) {
                struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
@@ -297,6 +270,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
        dma_addr_t xhq_pbl_addr;
        dma_addr_t uhq_pbl_addr;
        u16 physical_q;
+       __le16 tmp;
        int rc = 0;
        u32 dval;
        u16 wval;
@@ -320,12 +294,12 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
 
        /* Transmission PQ is the first of the PF */
        physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
-       p_conn->physical_q0 = cpu_to_le16(physical_q);
+       p_conn->physical_q0 = physical_q;
        p_ramrod->iscsi.physical_q0 = cpu_to_le16(physical_q);
 
        /* iSCSI Pure-ACK PQ */
        physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
-       p_conn->physical_q1 = cpu_to_le16(physical_q);
+       p_conn->physical_q1 = physical_q;
        p_ramrod->iscsi.physical_q1 = cpu_to_le16(physical_q);
 
        p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
@@ -351,14 +325,20 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
                p_tcp = &p_ramrod->tcp;
 
                p = (u16 *)p_conn->local_mac;
-               p_tcp->local_mac_addr_hi = swab16(get_unaligned(p));
-               p_tcp->local_mac_addr_mid = swab16(get_unaligned(p + 1));
-               p_tcp->local_mac_addr_lo = swab16(get_unaligned(p + 2));
+               tmp = cpu_to_le16(get_unaligned_be16(p));
+               p_tcp->local_mac_addr_hi = tmp;
+               tmp = cpu_to_le16(get_unaligned_be16(p + 1));
+               p_tcp->local_mac_addr_mid = tmp;
+               tmp = cpu_to_le16(get_unaligned_be16(p + 2));
+               p_tcp->local_mac_addr_lo = tmp;
 
                p = (u16 *)p_conn->remote_mac;
-               p_tcp->remote_mac_addr_hi = swab16(get_unaligned(p));
-               p_tcp->remote_mac_addr_mid = swab16(get_unaligned(p + 1));
-               p_tcp->remote_mac_addr_lo = swab16(get_unaligned(p + 2));
+               tmp = cpu_to_le16(get_unaligned_be16(p));
+               p_tcp->remote_mac_addr_hi = tmp;
+               tmp = cpu_to_le16(get_unaligned_be16(p + 1));
+               p_tcp->remote_mac_addr_mid = tmp;
+               tmp = cpu_to_le16(get_unaligned_be16(p + 2));
+               p_tcp->remote_mac_addr_lo = tmp;
 
                p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id);
 
@@ -417,14 +397,20 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
                    &((struct iscsi_spe_conn_offload_option2 *)p_ramrod)->tcp;
 
                p = (u16 *)p_conn->local_mac;
-               p_tcp2->local_mac_addr_hi = swab16(get_unaligned(p));
-               p_tcp2->local_mac_addr_mid = swab16(get_unaligned(p + 1));
-               p_tcp2->local_mac_addr_lo = swab16(get_unaligned(p + 2));
+               tmp = cpu_to_le16(get_unaligned_be16(p));
+               p_tcp2->local_mac_addr_hi = tmp;
+               tmp = cpu_to_le16(get_unaligned_be16(p + 1));
+               p_tcp2->local_mac_addr_mid = tmp;
+               tmp = cpu_to_le16(get_unaligned_be16(p + 2));
+               p_tcp2->local_mac_addr_lo = tmp;
 
                p = (u16 *)p_conn->remote_mac;
-               p_tcp2->remote_mac_addr_hi = swab16(get_unaligned(p));
-               p_tcp2->remote_mac_addr_mid = swab16(get_unaligned(p + 1));
-               p_tcp2->remote_mac_addr_lo = swab16(get_unaligned(p + 2));
+               tmp = cpu_to_le16(get_unaligned_be16(p));
+               p_tcp2->remote_mac_addr_hi = tmp;
+               tmp = cpu_to_le16(get_unaligned_be16(p + 1));
+               p_tcp2->remote_mac_addr_mid = tmp;
+               tmp = cpu_to_le16(get_unaligned_be16(p + 2));
+               p_tcp2->remote_mac_addr_lo = tmp;
 
                p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id);
                p_tcp2->flags = cpu_to_le16(p_conn->tcp_flags);
@@ -698,9 +684,13 @@ nomem:
 static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn,
                                         struct qed_iscsi_conn **p_out_conn)
 {
-       u16 uhq_num_elements = 0, xhq_num_elements = 0, r2tq_num_elements = 0;
        struct scsi_terminate_extra_params *p_q_cnts = NULL;
        struct qed_iscsi_pf_params *p_params = NULL;
+       struct qed_chain_init_params params = {
+               .mode           = QED_CHAIN_MODE_PBL,
+               .intended_use   = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+               .cnt_type       = QED_CHAIN_CNT_TYPE_U16,
+       };
        struct tcp_upload_params *p_tcp = NULL;
        struct qed_iscsi_conn *p_conn = NULL;
        int rc = 0;
@@ -741,34 +731,25 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn,
                goto nomem_upload_param;
        p_conn->tcp_upload_params_virt_addr = p_tcp;
 
-       r2tq_num_elements = p_params->num_r2tq_pages_in_ring *
-                           QED_CHAIN_PAGE_SIZE / 0x80;
-       rc = qed_chain_alloc(p_hwfn->cdev,
-                            QED_CHAIN_USE_TO_CONSUME_PRODUCE,
-                            QED_CHAIN_MODE_PBL,
-                            QED_CHAIN_CNT_TYPE_U16,
-                            r2tq_num_elements, 0x80, &p_conn->r2tq, NULL);
+       params.num_elems = p_params->num_r2tq_pages_in_ring *
+                          QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_wqe);
+       params.elem_size = sizeof(struct iscsi_wqe);
+
+       rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->r2tq, &params);
        if (rc)
                goto nomem_r2tq;
 
-       uhq_num_elements = p_params->num_uhq_pages_in_ring *
+       params.num_elems = p_params->num_uhq_pages_in_ring *
                           QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_uhqe);
-       rc = qed_chain_alloc(p_hwfn->cdev,
-                            QED_CHAIN_USE_TO_CONSUME_PRODUCE,
-                            QED_CHAIN_MODE_PBL,
-                            QED_CHAIN_CNT_TYPE_U16,
-                            uhq_num_elements,
-                            sizeof(struct iscsi_uhqe), &p_conn->uhq, NULL);
+       params.elem_size = sizeof(struct iscsi_uhqe);
+
+       rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->uhq, &params);
        if (rc)
                goto nomem_uhq;
 
-       xhq_num_elements = uhq_num_elements;
-       rc = qed_chain_alloc(p_hwfn->cdev,
-                            QED_CHAIN_USE_TO_CONSUME_PRODUCE,
-                            QED_CHAIN_MODE_PBL,
-                            QED_CHAIN_CNT_TYPE_U16,
-                            xhq_num_elements,
-                            sizeof(struct iscsi_xhqe), &p_conn->xhq, NULL);
+       params.elem_size = sizeof(struct iscsi_xhqe);
+
+       rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->xhq, &params);
        if (rc)
                goto nomem;
 
index 225c75b..dab7a5d 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef _QED_ISCSI_H
@@ -52,10 +26,6 @@ struct qed_iscsi_info {
        iscsi_event_cb_t event_cb;
 };
 
-#ifdef CONFIG_QED_LL2
-extern const struct qed_ll2_ops qed_ll2_ops_pass;
-#endif
-
 #if IS_ENABLED(CONFIG_QED_ISCSI)
 int qed_iscsi_alloc(struct qed_hwfn *p_hwfn);
 
index d2fe61a..512cbef 100644 (file)
@@ -1,34 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
+
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
 #include <linux/ip.h>
@@ -84,9 +59,8 @@ struct mpa_v2_hdr {
 #define QED_IWARP_DEF_KA_TIMEOUT       (1200000)       /* 20 min */
 #define QED_IWARP_DEF_KA_INTERVAL      (1000)          /* 1 sec */
 
-static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
-                                u8 fw_event_code, u16 echo,
-                                union event_ring_data *data,
+static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
+                                __le16 echo, union event_ring_data *data,
                                 u8 fw_return_code);
 
 /* Override devinfo with iWARP specific values */
@@ -272,14 +246,14 @@ int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
        SET_FIELD(p_ramrod->flags,
                  IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
 
-       p_ramrod->pd = qp->pd;
-       p_ramrod->sq_num_pages = qp->sq_num_pages;
-       p_ramrod->rq_num_pages = qp->rq_num_pages;
+       p_ramrod->pd = cpu_to_le16(qp->pd);
+       p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
+       p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
 
        p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
        p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
-       p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
-       p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
+       p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi;
+       p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo;
 
        p_ramrod->cq_cid_for_sq =
            cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
@@ -314,6 +288,7 @@ static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
        struct iwarp_modify_qp_ramrod_data *p_ramrod;
        struct qed_sp_init_data init_data;
        struct qed_spq_entry *p_ent;
+       u16 flags, trans_to_state;
        int rc;
 
        /* Get SPQ entry */
@@ -329,12 +304,17 @@ static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
                return rc;
 
        p_ramrod = &p_ent->ramrod.iwarp_modify_qp;
-       SET_FIELD(p_ramrod->flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN,
-                 0x1);
+
+       flags = le16_to_cpu(p_ramrod->flags);
+       SET_FIELD(flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN, 0x1);
+       p_ramrod->flags = cpu_to_le16(flags);
+
        if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING)
-               p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_CLOSING;
+               trans_to_state = IWARP_MODIFY_QP_STATE_CLOSING;
        else
-               p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_ERROR;
+               trans_to_state = IWARP_MODIFY_QP_STATE_ERROR;
+
+       p_ramrod->transition_to_state = cpu_to_le16(trans_to_state);
 
        rc = qed_spq_post(p_hwfn, p_ent, NULL);
 
@@ -647,6 +627,7 @@ qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
        dma_addr_t async_output_phys;
        dma_addr_t in_pdata_phys;
        u16 physical_q;
+       u16 flags = 0;
        u8 tcp_flags;
        int rc;
        int i;
@@ -699,13 +680,14 @@ qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
        tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan);
 
        tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags;
-       tcp->flags = 0;
-       SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN,
+
+       SET_FIELD(flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN,
                  !!(tcp_flags & QED_IWARP_TS_EN));
 
-       SET_FIELD(tcp->flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN,
+       SET_FIELD(flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN,
                  !!(tcp_flags & QED_IWARP_DA_EN));
 
+       tcp->flags = cpu_to_le16(flags);
        tcp->ip_version = ep->cm_info.ip_version;
 
        for (i = 0; i < 4; i++) {
@@ -721,10 +703,10 @@ qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
        tcp->tos_or_tc = 0;
 
        tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME;
-       tcp->cwnd = QED_IWARP_DEF_CWND_FACTOR *  tcp->mss;
+       tcp->cwnd = cpu_to_le32(QED_IWARP_DEF_CWND_FACTOR * ep->mss);
        tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT;
-       tcp->ka_timeout = QED_IWARP_DEF_KA_TIMEOUT;
-       tcp->ka_interval = QED_IWARP_DEF_KA_INTERVAL;
+       tcp->ka_timeout = cpu_to_le32(QED_IWARP_DEF_KA_TIMEOUT);
+       tcp->ka_interval = cpu_to_le32(QED_IWARP_DEF_KA_INTERVAL);
 
        tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
        tcp->connect_mode = ep->connect_mode;
@@ -755,6 +737,7 @@ qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
        union async_output *async_data;
        u16 mpa_ord, mpa_ird;
        u8 mpa_hdr_size = 0;
+       u16 ulp_data_len;
        u8 mpa_rev;
 
        async_data = &ep->ep_buffer_virt->async_output;
@@ -818,8 +801,8 @@ qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
        /* Strip mpa v2 hdr from private data before sending to upper layer */
        ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size;
 
-       ep->cm_info.private_data_len = async_data->mpa_request.ulp_data_len -
-                                      mpa_hdr_size;
+       ulp_data_len = le16_to_cpu(async_data->mpa_request.ulp_data_len);
+       ep->cm_info.private_data_len = ulp_data_len - mpa_hdr_size;
 
        params.event = QED_IWARP_EVENT_MPA_REQUEST;
        params.cm_info = &ep->cm_info;
@@ -834,6 +817,7 @@ static int
 qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
 {
        struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
+       struct mpa_outgoing_params *common;
        struct qed_iwarp_info *iwarp_info;
        struct qed_sp_init_data init_data;
        dma_addr_t async_output_phys;
@@ -842,6 +826,7 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
        dma_addr_t in_pdata_phys;
        struct qed_rdma_qp *qp;
        bool reject;
+       u32 val;
        int rc;
 
        if (!ep)
@@ -866,18 +851,21 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
                return rc;
 
        p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload;
+       common = &p_mpa_ramrod->common;
+
        out_pdata_phys = ep->ep_buffer_phys +
                         offsetof(struct qed_iwarp_ep_memory, out_pdata);
-       DMA_REGPAIR_LE(p_mpa_ramrod->common.outgoing_ulp_buffer.addr,
-                      out_pdata_phys);
-       p_mpa_ramrod->common.outgoing_ulp_buffer.len =
-           ep->cm_info.private_data_len;
-       p_mpa_ramrod->common.crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed;
+       DMA_REGPAIR_LE(common->outgoing_ulp_buffer.addr, out_pdata_phys);
+
+       val = ep->cm_info.private_data_len;
+       common->outgoing_ulp_buffer.len = cpu_to_le16(val);
+       common->crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed;
 
-       p_mpa_ramrod->common.out_rq.ord = ep->cm_info.ord;
-       p_mpa_ramrod->common.out_rq.ird = ep->cm_info.ird;
+       common->out_rq.ord = cpu_to_le32(ep->cm_info.ord);
+       common->out_rq.ird = cpu_to_le32(ep->cm_info.ird);
 
-       p_mpa_ramrod->tcp_cid = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid;
+       val = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid;
+       p_mpa_ramrod->tcp_cid = cpu_to_le32(val);
 
        in_pdata_phys = ep->ep_buffer_phys +
                        offsetof(struct qed_iwarp_ep_memory, in_pdata);
@@ -899,11 +887,11 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
                p_mpa_ramrod->stats_counter_id =
                    RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue;
        } else {
-               p_mpa_ramrod->common.reject = 1;
+               common->reject = 1;
        }
 
        iwarp_info = &p_hwfn->p_rdma_info->iwarp;
-       p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size;
+       p_mpa_ramrod->rcv_wnd = cpu_to_le16(iwarp_info->rcv_wnd_size);
        p_mpa_ramrod->mode = ep->mpa_rev;
        SET_FIELD(p_mpa_ramrod->rtr_pref,
                  IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
@@ -954,6 +942,7 @@ qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
        union async_output *async_data;
        u16 mpa_ird, mpa_ord;
        u8 mpa_data_size = 0;
+       u16 ulp_data_len;
 
        if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) {
                mpa_v2_params =
@@ -965,11 +954,12 @@ qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
                ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK);
                ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK);
        }
-       async_data = &ep->ep_buffer_virt->async_output;
 
+       async_data = &ep->ep_buffer_virt->async_output;
        ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size;
-       ep->cm_info.private_data_len = async_data->mpa_response.ulp_data_len -
-                                      mpa_data_size;
+
+       ulp_data_len = le16_to_cpu(async_data->mpa_response.ulp_data_len);
+       ep->cm_info.private_data_len = ulp_data_len - mpa_data_size;
 }
 
 static void
@@ -1846,7 +1836,7 @@ qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn,
                goto out;
        }
 
-       mpa_len = ntohs(*((u16 *)(mpa_data)));
+       mpa_len = ntohs(*(__force __be16 *)mpa_data);
        fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
 
        if (fpdu->fpdu_length <= tcp_payload_len)
@@ -1868,11 +1858,13 @@ qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf,
                    struct unaligned_opaque_data *pkt_data,
                    u16 tcp_payload_size, u8 placement_offset)
 {
+       u16 first_mpa_offset = le16_to_cpu(pkt_data->first_mpa_offset);
+
        fpdu->mpa_buf = buf;
        fpdu->pkt_hdr = buf->data_phys_addr + placement_offset;
        fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset;
-       fpdu->mpa_frag = buf->data_phys_addr + pkt_data->first_mpa_offset;
-       fpdu->mpa_frag_virt = (u8 *)(buf->data) + pkt_data->first_mpa_offset;
+       fpdu->mpa_frag = buf->data_phys_addr + first_mpa_offset;
+       fpdu->mpa_frag_virt = (u8 *)(buf->data) + first_mpa_offset;
 
        if (tcp_payload_size == 1)
                fpdu->incomplete_bytes = QED_IWARP_INVALID_FPDU_LENGTH;
@@ -1890,6 +1882,7 @@ qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn,
                 struct unaligned_opaque_data *pkt_data,
                 struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size)
 {
+       u16 first_mpa_offset = le16_to_cpu(pkt_data->first_mpa_offset);
        u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf;
        int rc;
 
@@ -1910,13 +1903,11 @@ qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn,
        DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
                   "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n",
                   fpdu->mpa_frag_virt, fpdu->mpa_frag_len,
-                  (u8 *)(buf->data) + pkt_data->first_mpa_offset,
-                  tcp_payload_size);
+                  (u8 *)(buf->data) + first_mpa_offset, tcp_payload_size);
 
        memcpy(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len);
        memcpy(tmp_buf + fpdu->mpa_frag_len,
-              (u8 *)(buf->data) + pkt_data->first_mpa_offset,
-              tcp_payload_size);
+              (u8 *)(buf->data) + first_mpa_offset, tcp_payload_size);
 
        rc = qed_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf);
        if (rc)
@@ -2059,6 +2050,7 @@ qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn,
                    u16 tcp_payload_size, enum qed_iwarp_mpa_pkt_type pkt_type)
 {
        struct qed_ll2_tx_pkt_info tx_pkt;
+       u16 first_mpa_offset;
        u8 ll2_handle;
        int rc;
 
@@ -2110,11 +2102,13 @@ qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn,
        if (!fpdu->incomplete_bytes)
                goto out;
 
+       first_mpa_offset = le16_to_cpu(curr_pkt->first_mpa_offset);
+
        /* Set third fragment to second part of the packet */
        rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn,
                                               ll2_handle,
                                               buf->data_phys_addr +
-                                              curr_pkt->first_mpa_offset,
+                                              first_mpa_offset,
                                               fpdu->incomplete_bytes);
 out:
        DP_VERBOSE(p_hwfn,
@@ -2135,12 +2129,12 @@ qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn,
 {
        u64 opaque_data;
 
-       opaque_data = HILO_64(opaque_data1, opaque_data0);
+       opaque_data = HILO_64(cpu_to_le32(opaque_data1),
+                             cpu_to_le32(opaque_data0));
        *curr_pkt = *((struct unaligned_opaque_data *)&opaque_data);
 
-       curr_pkt->first_mpa_offset = curr_pkt->tcp_payload_offset +
-                                    le16_to_cpu(curr_pkt->first_mpa_offset);
-       curr_pkt->cid = le32_to_cpu(curr_pkt->cid);
+       le16_add_cpu(&curr_pkt->first_mpa_offset,
+                    curr_pkt->tcp_payload_offset);
 }
 
 /* This function is called when an unaligned or incomplete MPA packet arrives
@@ -2155,18 +2149,22 @@ qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn,
        struct qed_iwarp_ll2_buff *buf = mpa_buf->ll2_buf;
        enum qed_iwarp_mpa_pkt_type pkt_type;
        struct qed_iwarp_fpdu *fpdu;
+       u16 cid, first_mpa_offset;
        int rc = -EINVAL;
        u8 *mpa_data;
 
-       fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, curr_pkt->cid & 0xffff);
+       cid = le32_to_cpu(curr_pkt->cid);
+
+       fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)cid);
        if (!fpdu) { /* something corrupt with cid, post rx back */
                DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n",
-                      curr_pkt->cid);
+                      cid);
                goto err;
        }
 
        do {
-               mpa_data = ((u8 *)(buf->data) + curr_pkt->first_mpa_offset);
+               first_mpa_offset = le16_to_cpu(curr_pkt->first_mpa_offset);
+               mpa_data = ((u8 *)(buf->data) + first_mpa_offset);
 
                pkt_type = qed_iwarp_mpa_classify(p_hwfn, fpdu,
                                                  mpa_buf->tcp_payload_len,
@@ -2212,7 +2210,8 @@ qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn,
                        }
 
                        mpa_buf->tcp_payload_len -= fpdu->fpdu_length;
-                       curr_pkt->first_mpa_offset += fpdu->fpdu_length;
+                       le16_add_cpu(&curr_pkt->first_mpa_offset,
+                                    fpdu->fpdu_length);
                        break;
                case QED_IWARP_MPA_PKT_UNALIGNED:
                        qed_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data);
@@ -2251,7 +2250,9 @@ qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn,
                        }
 
                        mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes;
-                       curr_pkt->first_mpa_offset += fpdu->incomplete_bytes;
+                       le16_add_cpu(&curr_pkt->first_mpa_offset,
+                                    fpdu->incomplete_bytes);
+
                        /* The framed PDU was sent - no more incomplete bytes */
                        fpdu->incomplete_bytes = 0;
                        break;
@@ -2302,6 +2303,7 @@ qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
        struct qed_iwarp_ll2_mpa_buf *mpa_buf;
        struct qed_iwarp_info *iwarp_info;
        struct qed_hwfn *p_hwfn = cxt;
+       u16 first_mpa_offset;
 
        iwarp_info = &p_hwfn->p_rdma_info->iwarp;
        mpa_buf = list_first_entry(&iwarp_info->mpa_buf_list,
@@ -2315,17 +2317,21 @@ qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
        qed_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data,
                               data->opaque_data_0, data->opaque_data_1);
 
+       first_mpa_offset = le16_to_cpu(mpa_buf->data.first_mpa_offset);
+
        DP_VERBOSE(p_hwfn,
                   QED_MSG_RDMA,
                   "LL2 MPA CompRx payload_len:0x%x\tfirst_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n",
-                  data->length.packet_length, mpa_buf->data.first_mpa_offset,
+                  data->length.packet_length, first_mpa_offset,
                   mpa_buf->data.tcp_payload_offset, mpa_buf->data.flags,
                   mpa_buf->data.cid);
 
        mpa_buf->ll2_buf = data->cookie;
        mpa_buf->tcp_payload_len = data->length.packet_length -
-                                  mpa_buf->data.first_mpa_offset;
-       mpa_buf->data.first_mpa_offset += data->u.placement_offset;
+                                  first_mpa_offset;
+
+       first_mpa_offset += data->u.placement_offset;
+       mpa_buf->data.first_mpa_offset = cpu_to_le16(first_mpa_offset);
        mpa_buf->placement_offset = data->u.placement_offset;
 
        list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_pending_list);
@@ -2524,14 +2530,16 @@ qed_iwarp_ll2_slowpath(void *cxt,
        struct unaligned_opaque_data unalign_data;
        struct qed_hwfn *p_hwfn = cxt;
        struct qed_iwarp_fpdu *fpdu;
+       u32 cid;
 
        qed_iwarp_mpa_get_data(p_hwfn, &unalign_data,
                               opaque_data_0, opaque_data_1);
 
-       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n",
-                  unalign_data.cid);
+       cid = le32_to_cpu(unalign_data.cid);
 
-       fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)unalign_data.cid);
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n", cid);
+
+       fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)cid);
        if (fpdu)
                memset(fpdu, 0, sizeof(*fpdu));
 }
@@ -2836,8 +2844,6 @@ int qed_iwarp_stop(struct qed_hwfn *p_hwfn)
        if (rc)
                return rc;
 
-       qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_IWARP);
-
        return qed_iwarp_ll2_stop(p_hwfn);
 }
 
@@ -3035,9 +3041,8 @@ qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
        return true;
 }
 
-static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
-                                u8 fw_event_code, u16 echo,
-                                union event_ring_data *data,
+static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
+                                __le16 echo, union event_ring_data *data,
                                 u8 fw_return_code)
 {
        struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
index c1b2057..c3872cd 100644 (file)
@@ -1,34 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
+
 #ifndef _QED_IWARP_H
 #define _QED_IWARP_H
 
index 29810a1..4c6ac88 100644 (file)
@@ -1,33 +1,7 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #include <linux/types.h>
@@ -50,6 +24,7 @@
 #include "qed.h"
 #include <linux/qed/qed_chain.h>
 #include "qed_cxt.h"
+#include "qed_dcbx.h"
 #include "qed_dev_api.h"
 #include <linux/qed/qed_eth_if.h>
 #include "qed_hsi.h"
@@ -57,6 +32,7 @@
 #include "qed_int.h"
 #include "qed_l2.h"
 #include "qed_mcp.h"
+#include "qed_ptp.h"
 #include "qed_reg_addr.h"
 #include "qed_sp.h"
 #include "qed_sriov.h"
@@ -366,10 +342,11 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
                           struct qed_sp_vport_start_params *p_params)
 {
        struct vport_start_ramrod_data *p_ramrod = NULL;
+       struct eth_vport_tpa_param *tpa_param;
        struct qed_spq_entry *p_ent =  NULL;
        struct qed_sp_init_data init_data;
+       u16 min_size, rx_mode = 0;
        u8 abs_vport_id = 0;
-       u16 rx_mode = 0;
        int rc;
 
        rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
@@ -402,21 +379,23 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
        p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
 
        /* TPA related fields */
-       memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param));
+       tpa_param = &p_ramrod->tpa_param;
+       memset(tpa_param, 0, sizeof(*tpa_param));
 
-       p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
+       tpa_param->max_buff_num = p_params->max_buffers_per_cqe;
 
        switch (p_params->tpa_mode) {
        case QED_TPA_MODE_GRO:
-               p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
-               p_ramrod->tpa_param.tpa_max_size = (u16)-1;
-               p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
-               p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
-               p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
-               p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
-               p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
-               p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
-               break;
+               min_size = p_params->mtu / 2;
+
+               tpa_param->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
+               tpa_param->tpa_max_size = cpu_to_le16(U16_MAX);
+               tpa_param->tpa_min_size_to_cont = cpu_to_le16(min_size);
+               tpa_param->tpa_min_size_to_start = cpu_to_le16(min_size);
+               tpa_param->tpa_ipv4_en_flg = 1;
+               tpa_param->tpa_ipv6_en_flg = 1;
+               tpa_param->tpa_pkt_split_flg = 1;
+               tpa_param->tpa_gro_consistent_flg = 1;
        default:
                break;
        }
@@ -625,33 +604,33 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
 static void
 qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn,
                            struct vport_update_ramrod_data *p_ramrod,
-                           struct qed_sge_tpa_params *p_params)
+                           const struct qed_sge_tpa_params *param)
 {
-       struct eth_vport_tpa_param *p_tpa;
+       struct eth_vport_tpa_param *tpa;
 
-       if (!p_params) {
+       if (!param) {
                p_ramrod->common.update_tpa_param_flg = 0;
                p_ramrod->common.update_tpa_en_flg = 0;
                p_ramrod->common.update_tpa_param_flg = 0;
                return;
        }
 
-       p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
-       p_tpa = &p_ramrod->tpa_param;
-       p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
-       p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
-       p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
-       p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
+       p_ramrod->common.update_tpa_en_flg = param->update_tpa_en_flg;
+       tpa = &p_ramrod->tpa_param;
+       tpa->tpa_ipv4_en_flg = param->tpa_ipv4_en_flg;
+       tpa->tpa_ipv6_en_flg = param->tpa_ipv6_en_flg;
+       tpa->tpa_ipv4_tunn_en_flg = param->tpa_ipv4_tunn_en_flg;
+       tpa->tpa_ipv6_tunn_en_flg = param->tpa_ipv6_tunn_en_flg;
 
-       p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
-       p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
-       p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
-       p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
-       p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
-       p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
-       p_tpa->tpa_max_size = p_params->tpa_max_size;
-       p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
-       p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
+       p_ramrod->common.update_tpa_param_flg = param->update_tpa_param_flg;
+       tpa->max_buff_num = param->max_buffers_per_cqe;
+       tpa->tpa_pkt_split_flg = param->tpa_pkt_split_flg;
+       tpa->tpa_hdr_data_split_flg = param->tpa_hdr_data_split_flg;
+       tpa->tpa_gro_consistent_flg = param->tpa_gro_consistent_flg;
+       tpa->tpa_max_aggs_num = param->tpa_max_aggs_num;
+       tpa->tpa_max_size = cpu_to_le16(param->tpa_max_size);
+       tpa->tpa_min_size_to_start = cpu_to_le16(param->tpa_min_size_to_start);
+       tpa->tpa_min_size_to_cont = cpu_to_le16(param->tpa_min_size_to_cont);
 }
 
 static void
@@ -2113,7 +2092,8 @@ int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
                return rc;
        }
 
-       timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0);
+       timer_res = GET_FIELD(le32_to_cpu(sb_entry.params),
+                             CAU_SB_ENTRY_TIMER_RES0);
 
        address = BAR0_MAP_REG_USDM_RAM +
                  USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
@@ -2146,7 +2126,8 @@ int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn,
                return rc;
        }
 
-       timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1);
+       timer_res = GET_FIELD(le32_to_cpu(sb_entry.params),
+                             CAU_SB_ENTRY_TIMER_RES1);
 
        address = BAR0_MAP_REG_XSDM_RAM +
                  XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
@@ -2900,16 +2881,6 @@ static int qed_req_bulletin_update_mac(struct qed_dev *cdev, u8 *mac)
        return 0;
 }
 
-#ifdef CONFIG_QED_SRIOV
-extern const struct qed_iov_hv_ops qed_iov_ops_pass;
-#endif
-
-#ifdef CONFIG_DCB
-extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
-#endif
-
-extern const struct qed_eth_ptp_ops qed_ptp_ops_pass;
-
 static const struct qed_eth_ops qed_eth_ops_pass = {
        .common = &qed_common_ops_pass,
 #ifdef CONFIG_QED_SRIOV
index 7127d5a..8eceeeb 100644 (file)
@@ -1,34 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
+
 #ifndef _QED_L2_H
 #define _QED_L2_H
 #include <linux/types.h>
index 4afd857..0452b72 100644 (file)
@@ -1,33 +1,7 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #include <linux/types.h>
@@ -1151,6 +1125,12 @@ static int
 qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
                              struct qed_ll2_info *p_ll2_info)
 {
+       struct qed_chain_init_params params = {
+               .intended_use   = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+               .cnt_type       = QED_CHAIN_CNT_TYPE_U16,
+               .num_elems      = p_ll2_info->input.rx_num_desc,
+       };
+       struct qed_dev *cdev = p_hwfn->cdev;
        struct qed_ll2_rx_packet *p_descq;
        u32 capacity;
        int rc = 0;
@@ -1158,13 +1138,10 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
        if (!p_ll2_info->input.rx_num_desc)
                goto out;
 
-       rc = qed_chain_alloc(p_hwfn->cdev,
-                            QED_CHAIN_USE_TO_CONSUME_PRODUCE,
-                            QED_CHAIN_MODE_NEXT_PTR,
-                            QED_CHAIN_CNT_TYPE_U16,
-                            p_ll2_info->input.rx_num_desc,
-                            sizeof(struct core_rx_bd),
-                            &p_ll2_info->rx_queue.rxq_chain, NULL);
+       params.mode = QED_CHAIN_MODE_NEXT_PTR;
+       params.elem_size = sizeof(struct core_rx_bd);
+
+       rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rxq_chain, &params);
        if (rc) {
                DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
                goto out;
@@ -1180,13 +1157,10 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
        }
        p_ll2_info->rx_queue.descq_array = p_descq;
 
-       rc = qed_chain_alloc(p_hwfn->cdev,
-                            QED_CHAIN_USE_TO_CONSUME_PRODUCE,
-                            QED_CHAIN_MODE_PBL,
-                            QED_CHAIN_CNT_TYPE_U16,
-                            p_ll2_info->input.rx_num_desc,
-                            sizeof(struct core_rx_fast_path_cqe),
-                            &p_ll2_info->rx_queue.rcq_chain, NULL);
+       params.mode = QED_CHAIN_MODE_PBL;
+       params.elem_size = sizeof(struct core_rx_fast_path_cqe);
+
+       rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rcq_chain, &params);
        if (rc) {
                DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
                goto out;
@@ -1203,6 +1177,13 @@ out:
 static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
                                         struct qed_ll2_info *p_ll2_info)
 {
+       struct qed_chain_init_params params = {
+               .mode           = QED_CHAIN_MODE_PBL,
+               .intended_use   = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+               .cnt_type       = QED_CHAIN_CNT_TYPE_U16,
+               .num_elems      = p_ll2_info->input.tx_num_desc,
+               .elem_size      = sizeof(struct core_tx_bd),
+       };
        struct qed_ll2_tx_packet *p_descq;
        u32 desc_size;
        u32 capacity;
@@ -1211,13 +1192,8 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
        if (!p_ll2_info->input.tx_num_desc)
                goto out;
 
-       rc = qed_chain_alloc(p_hwfn->cdev,
-                            QED_CHAIN_USE_TO_CONSUME_PRODUCE,
-                            QED_CHAIN_MODE_PBL,
-                            QED_CHAIN_CNT_TYPE_U16,
-                            p_ll2_info->input.tx_num_desc,
-                            sizeof(struct core_tx_bd),
-                            &p_ll2_info->tx_queue.txq_chain, NULL);
+       rc = qed_chain_alloc(p_hwfn->cdev, &p_ll2_info->tx_queue.txq_chain,
+                            &params);
        if (rc)
                goto out;
 
@@ -1824,6 +1800,7 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
        enum core_roce_flavor_type roce_flavor;
        enum core_tx_dest tx_dest;
        u16 bd_data = 0, frag_idx;
+       u16 bitfield1;
 
        roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
                                                             : CORE_RROCE;
@@ -1855,9 +1832,11 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
                        pkt->remove_stag = true;
        }
 
-       SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
-                 cpu_to_le16(pkt->l4_hdr_offset_w));
-       SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
+       bitfield1 = le16_to_cpu(start_bd->bitfield1);
+       SET_FIELD(bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, pkt->l4_hdr_offset_w);
+       SET_FIELD(bitfield1, CORE_TX_BD_TX_DST, tx_dest);
+       start_bd->bitfield1 = cpu_to_le16(bitfield1);
+
        bd_data |= pkt->bd_flags;
        SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
        SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
index 288642d..500d0c4 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef _QED_LL2_H
@@ -142,6 +116,8 @@ struct qed_ll2_info {
        struct qed_ll2_cbs cbs;
 };
 
+extern const struct qed_ll2_ops qed_ll2_ops_pass;
+
 /**
  * @brief qed_ll2_acquire_connection - allocate resources,
  *        starts rx & tx (if relevant) queues pair. Provides
index 11367a2..2558cb6 100644 (file)
@@ -1,33 +1,7 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #include <linux/stddef.h>
@@ -50,6 +24,7 @@
 #include <linux/qed/qed_ll2_if.h>
 #include <net/devlink.h>
 #include <linux/aer.h>
+#include <linux/phylink.h>
 
 #include "qed.h"
 #include "qed_sriov.h"
@@ -90,20 +65,200 @@ MODULE_VERSION(DRV_MODULE_VERSION);
 
 MODULE_FIRMWARE(QED_FW_FILE_NAME);
 
+/* MFW speed capabilities maps */
+
+struct qed_mfw_speed_map {
+       u32             mfw_val;
+       __ETHTOOL_DECLARE_LINK_MODE_MASK(caps);
+
+       const u32       *cap_arr;
+       u32             arr_size;
+};
+
+#define QED_MFW_SPEED_MAP(type, arr)           \
+{                                              \
+       .mfw_val        = (type),               \
+       .cap_arr        = (arr),                \
+       .arr_size       = ARRAY_SIZE(arr),      \
+}
+
+static const u32 qed_mfw_ext_1g[] __initconst = {
+       ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+       ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+       ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+};
+
+static const u32 qed_mfw_ext_10g[] __initconst = {
+       ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+       ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+       ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+       ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+       ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+       ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+       ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+       ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
+};
+
+static const u32 qed_mfw_ext_20g[] __initconst = {
+       ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
+};
+
+static const u32 qed_mfw_ext_25g[] __initconst = {
+       ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+       ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+       ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+};
+
+static const u32 qed_mfw_ext_40g[] __initconst = {
+       ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+       ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+       ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+       ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+};
+
+static const u32 qed_mfw_ext_50g_base_r[] __initconst = {
+       ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
+       ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
+       ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
+       ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+       ETHTOOL_LINK_MODE_50000baseDR_Full_BIT,
+};
+
+static const u32 qed_mfw_ext_50g_base_r2[] __initconst = {
+       ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+       ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+       ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+};
+
+static const u32 qed_mfw_ext_100g_base_r2[] __initconst = {
+       ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
+       ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
+       ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
+       ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT,
+       ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
+};
+
+static const u32 qed_mfw_ext_100g_base_r4[] __initconst = {
+       ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+       ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+       ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+       ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+};
+
+static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = {
+       QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g),
+       QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g),
+       QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_20G, qed_mfw_ext_20g),
+       QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g),
+       QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g),
+       QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R,
+                         qed_mfw_ext_50g_base_r),
+       QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R2,
+                         qed_mfw_ext_50g_base_r2),
+       QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R2,
+                         qed_mfw_ext_100g_base_r2),
+       QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R4,
+                         qed_mfw_ext_100g_base_r4),
+};
+
+static const u32 qed_mfw_legacy_1g[] __initconst = {
+       ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+       ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+       ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+};
+
+static const u32 qed_mfw_legacy_10g[] __initconst = {
+       ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+       ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+       ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+       ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+       ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+       ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+       ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+       ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
+};
+
+static const u32 qed_mfw_legacy_20g[] __initconst = {
+       ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
+};
+
+static const u32 qed_mfw_legacy_25g[] __initconst = {
+       ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+       ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+       ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+};
+
+static const u32 qed_mfw_legacy_40g[] __initconst = {
+       ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+       ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+       ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+       ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+};
+
+static const u32 qed_mfw_legacy_50g[] __initconst = {
+       ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+       ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+       ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+};
+
+static const u32 qed_mfw_legacy_bb_100g[] __initconst = {
+       ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+       ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+       ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+       ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+};
+
+static struct qed_mfw_speed_map qed_mfw_legacy_maps[] __ro_after_init = {
+       QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G,
+                         qed_mfw_legacy_1g),
+       QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G,
+                         qed_mfw_legacy_10g),
+       QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G,
+                         qed_mfw_legacy_20g),
+       QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G,
+                         qed_mfw_legacy_25g),
+       QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G,
+                         qed_mfw_legacy_40g),
+       QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G,
+                         qed_mfw_legacy_50g),
+       QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G,
+                         qed_mfw_legacy_bb_100g),
+};
+
+static void __init qed_mfw_speed_map_populate(struct qed_mfw_speed_map *map)
+{
+       linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps);
+
+       map->cap_arr = NULL;
+       map->arr_size = 0;
+}
+
+static void __init qed_mfw_speed_maps_init(void)
+{
+       u32 i;
+
+       for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++)
+               qed_mfw_speed_map_populate(qed_mfw_ext_maps + i);
+
+       for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++)
+               qed_mfw_speed_map_populate(qed_mfw_legacy_maps + i);
+}
+
 static int __init qed_init(void)
 {
        pr_info("%s", version);
 
+       qed_mfw_speed_maps_init();
+
        return 0;
 }
+module_init(qed_init);
 
-static void __exit qed_cleanup(void)
+static void __exit qed_exit(void)
 {
-       pr_notice("qed_cleanup called\n");
+       /* To prevent marking this module as "permanent" */
 }
-
-module_init(qed_init);
-module_exit(qed_cleanup);
+module_exit(qed_exit);
 
 /* Check if the DMA controller on the machine can properly handle the DMA
  * addressing required by the device.
@@ -1480,13 +1635,156 @@ static bool qed_can_link_change(struct qed_dev *cdev)
        return true;
 }
 
+static void qed_set_ext_speed_params(struct qed_mcp_link_params *link_params,
+                                    const struct qed_link_params *params)
+{
+       struct qed_mcp_link_speed_params *ext_speed = &link_params->ext_speed;
+       const struct qed_mfw_speed_map *map;
+       u32 i;
+
+       if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
+               ext_speed->autoneg = !!params->autoneg;
+
+       if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
+               ext_speed->advertised_speeds = 0;
+
+               for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) {
+                       map = qed_mfw_ext_maps + i;
+
+                       if (linkmode_intersects(params->adv_speeds, map->caps))
+                               ext_speed->advertised_speeds |= map->mfw_val;
+               }
+       }
+
+       if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) {
+               switch (params->forced_speed) {
+               case SPEED_1000:
+                       ext_speed->forced_speed = QED_EXT_SPEED_1G;
+                       break;
+               case SPEED_10000:
+                       ext_speed->forced_speed = QED_EXT_SPEED_10G;
+                       break;
+               case SPEED_20000:
+                       ext_speed->forced_speed = QED_EXT_SPEED_20G;
+                       break;
+               case SPEED_25000:
+                       ext_speed->forced_speed = QED_EXT_SPEED_25G;
+                       break;
+               case SPEED_40000:
+                       ext_speed->forced_speed = QED_EXT_SPEED_40G;
+                       break;
+               case SPEED_50000:
+                       ext_speed->forced_speed = QED_EXT_SPEED_50G_R |
+                                                 QED_EXT_SPEED_50G_R2;
+                       break;
+               case SPEED_100000:
+                       ext_speed->forced_speed = QED_EXT_SPEED_100G_R2 |
+                                                 QED_EXT_SPEED_100G_R4 |
+                                                 QED_EXT_SPEED_100G_P4;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       if (!(params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG))
+               return;
+
+       switch (params->forced_speed) {
+       case SPEED_25000:
+               switch (params->fec) {
+               case FEC_FORCE_MODE_NONE:
+                       link_params->ext_fec_mode = ETH_EXT_FEC_25G_NONE;
+                       break;
+               case FEC_FORCE_MODE_FIRECODE:
+                       link_params->ext_fec_mode = ETH_EXT_FEC_25G_BASE_R;
+                       break;
+               case FEC_FORCE_MODE_RS:
+                       link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528;
+                       break;
+               case FEC_FORCE_MODE_AUTO:
+                       link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528 |
+                                                   ETH_EXT_FEC_25G_BASE_R |
+                                                   ETH_EXT_FEC_25G_NONE;
+                       break;
+               default:
+                       break;
+               }
+
+               break;
+       case SPEED_40000:
+               switch (params->fec) {
+               case FEC_FORCE_MODE_NONE:
+                       link_params->ext_fec_mode = ETH_EXT_FEC_40G_NONE;
+                       break;
+               case FEC_FORCE_MODE_FIRECODE:
+                       link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R;
+                       break;
+               case FEC_FORCE_MODE_AUTO:
+                       link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R |
+                                                   ETH_EXT_FEC_40G_NONE;
+                       break;
+               default:
+                       break;
+               }
+
+               break;
+       case SPEED_50000:
+               switch (params->fec) {
+               case FEC_FORCE_MODE_NONE:
+                       link_params->ext_fec_mode = ETH_EXT_FEC_50G_NONE;
+                       break;
+               case FEC_FORCE_MODE_FIRECODE:
+                       link_params->ext_fec_mode = ETH_EXT_FEC_50G_BASE_R;
+                       break;
+               case FEC_FORCE_MODE_RS:
+                       link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528;
+                       break;
+               case FEC_FORCE_MODE_AUTO:
+                       link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528 |
+                                                   ETH_EXT_FEC_50G_BASE_R |
+                                                   ETH_EXT_FEC_50G_NONE;
+                       break;
+               default:
+                       break;
+               }
+
+               break;
+       case SPEED_100000:
+               switch (params->fec) {
+               case FEC_FORCE_MODE_NONE:
+                       link_params->ext_fec_mode = ETH_EXT_FEC_100G_NONE;
+                       break;
+               case FEC_FORCE_MODE_FIRECODE:
+                       link_params->ext_fec_mode = ETH_EXT_FEC_100G_BASE_R;
+                       break;
+               case FEC_FORCE_MODE_RS:
+                       link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528;
+                       break;
+               case FEC_FORCE_MODE_AUTO:
+                       link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528 |
+                                                   ETH_EXT_FEC_100G_BASE_R |
+                                                   ETH_EXT_FEC_100G_NONE;
+                       break;
+               default:
+                       break;
+               }
+
+               break;
+       default:
+               break;
+       }
+}
+
 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
 {
-       struct qed_hwfn *hwfn;
        struct qed_mcp_link_params *link_params;
+       struct qed_mcp_link_speed_params *speed;
+       const struct qed_mfw_speed_map *map;
+       struct qed_hwfn *hwfn;
        struct qed_ptt *ptt;
-       u32 sup_caps;
        int rc;
+       u32 i;
 
        if (!cdev)
                return -ENODEV;
@@ -1508,59 +1806,31 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
                return -EBUSY;
 
        link_params = qed_mcp_get_link_params(hwfn);
+       if (!link_params)
+               return -ENODATA;
+
+       speed = &link_params->speed;
+
        if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
-               link_params->speed.autoneg = params->autoneg;
+               speed->autoneg = !!params->autoneg;
+
        if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
-               link_params->speed.advertised_speeds = 0;
-               sup_caps = QED_LM_1000baseT_Full_BIT |
-                          QED_LM_1000baseKX_Full_BIT |
-                          QED_LM_1000baseX_Full_BIT;
-               if (params->adv_speeds & sup_caps)
-                       link_params->speed.advertised_speeds |=
-                           NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
-               sup_caps = QED_LM_10000baseT_Full_BIT |
-                          QED_LM_10000baseKR_Full_BIT |
-                          QED_LM_10000baseKX4_Full_BIT |
-                          QED_LM_10000baseR_FEC_BIT |
-                          QED_LM_10000baseCR_Full_BIT |
-                          QED_LM_10000baseSR_Full_BIT |
-                          QED_LM_10000baseLR_Full_BIT |
-                          QED_LM_10000baseLRM_Full_BIT;
-               if (params->adv_speeds & sup_caps)
-                       link_params->speed.advertised_speeds |=
-                           NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
-               if (params->adv_speeds & QED_LM_20000baseKR2_Full_BIT)
-                       link_params->speed.advertised_speeds |=
-                               NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
-               sup_caps = QED_LM_25000baseKR_Full_BIT |
-                          QED_LM_25000baseCR_Full_BIT |
-                          QED_LM_25000baseSR_Full_BIT;
-               if (params->adv_speeds & sup_caps)
-                       link_params->speed.advertised_speeds |=
-                           NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
-               sup_caps = QED_LM_40000baseLR4_Full_BIT |
-                          QED_LM_40000baseKR4_Full_BIT |
-                          QED_LM_40000baseCR4_Full_BIT |
-                          QED_LM_40000baseSR4_Full_BIT;
-               if (params->adv_speeds & sup_caps)
-                       link_params->speed.advertised_speeds |=
-                               NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
-               sup_caps = QED_LM_50000baseKR2_Full_BIT |
-                          QED_LM_50000baseCR2_Full_BIT |
-                          QED_LM_50000baseSR2_Full_BIT;
-               if (params->adv_speeds & sup_caps)
-                       link_params->speed.advertised_speeds |=
-                           NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
-               sup_caps = QED_LM_100000baseKR4_Full_BIT |
-                          QED_LM_100000baseSR4_Full_BIT |
-                          QED_LM_100000baseCR4_Full_BIT |
-                          QED_LM_100000baseLR4_ER4_Full_BIT;
-               if (params->adv_speeds & sup_caps)
-                       link_params->speed.advertised_speeds |=
-                           NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
+               speed->advertised_speeds = 0;
+
+               for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) {
+                       map = qed_mfw_legacy_maps + i;
+
+                       if (linkmode_intersects(params->adv_speeds, map->caps))
+                               speed->advertised_speeds |= map->mfw_val;
+               }
        }
+
        if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
-               link_params->speed.forced_speed = params->forced_speed;
+               speed->forced_speed = params->forced_speed;
+
+       if (qed_mcp_is_ext_speed_supported(hwfn))
+               qed_set_ext_speed_params(link_params, params);
+
        if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
                if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
                        link_params->pause.autoneg = true;
@@ -1575,6 +1845,7 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
                else
                        link_params->pause.forced_tx = false;
        }
+
        if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
                switch (params->loopback_mode) {
                case QED_LINK_LOOPBACK_INT_PHY:
@@ -1589,6 +1860,25 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
                case QED_LINK_LOOPBACK_MAC:
                        link_params->loopback_mode = ETH_LOOPBACK_MAC;
                        break;
+               case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123:
+                       link_params->loopback_mode =
+                               ETH_LOOPBACK_CNIG_AH_ONLY_0123;
+                       break;
+               case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301:
+                       link_params->loopback_mode =
+                               ETH_LOOPBACK_CNIG_AH_ONLY_2301;
+                       break;
+               case QED_LINK_LOOPBACK_PCS_AH_ONLY:
+                       link_params->loopback_mode = ETH_LOOPBACK_PCS_AH_ONLY;
+                       break;
+               case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY:
+                       link_params->loopback_mode =
+                               ETH_LOOPBACK_REVERSE_MAC_AH_ONLY;
+                       break;
+               case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY:
+                       link_params->loopback_mode =
+                               ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY;
+                       break;
                default:
                        link_params->loopback_mode = ETH_LOOPBACK_NONE;
                        break;
@@ -1599,6 +1889,9 @@ static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
                memcpy(&link_params->eee, &params->eee,
                       sizeof(link_params->eee));
 
+       if (params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG)
+               link_params->fec = params->fec;
+
        rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
 
        qed_ptt_release(hwfn, ptt);
@@ -1615,7 +1908,6 @@ static int qed_get_port_type(u32 media_type)
        case MEDIA_SFP_1G_FIBER:
        case MEDIA_XFP_FIBER:
        case MEDIA_MODULE_FIBER:
-       case MEDIA_KR:
                port_type = PORT_FIBRE;
                break;
        case MEDIA_DA_TWINAX:
@@ -1624,6 +1916,7 @@ static int qed_get_port_type(u32 media_type)
        case MEDIA_BASE_T:
                port_type = PORT_TP;
                break;
+       case MEDIA_KR:
        case MEDIA_NOT_PRESENT:
                port_type = PORT_NONE;
                break;
@@ -1670,7 +1963,7 @@ static int qed_get_link_data(struct qed_hwfn *hwfn,
 
 static void qed_fill_link_capability(struct qed_hwfn *hwfn,
                                     struct qed_ptt *ptt, u32 capability,
-                                    u32 *if_capability)
+                                    unsigned long *if_caps)
 {
        u32 media_type, tcvr_state, tcvr_type;
        u32 speed_mask, board_cfg;
@@ -1693,122 +1986,215 @@ static void qed_fill_link_capability(struct qed_hwfn *hwfn,
 
        switch (media_type) {
        case MEDIA_DA_TWINAX:
-               *if_capability |= QED_LM_FIBRE_BIT;
+               phylink_set(if_caps, FIBRE);
+
                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
-                       *if_capability |= QED_LM_20000baseKR2_Full_BIT;
-               /* For DAC media multiple speed capabilities are supported*/
-               capability = capability & speed_mask;
+                       phylink_set(if_caps, 20000baseKR2_Full);
+
+               /* For DAC media multiple speed capabilities are supported */
+               capability |= speed_mask;
+
                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
-                       *if_capability |= QED_LM_1000baseKX_Full_BIT;
+                       phylink_set(if_caps, 1000baseKX_Full);
                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
-                       *if_capability |= QED_LM_10000baseCR_Full_BIT;
+                       phylink_set(if_caps, 10000baseCR_Full);
+
                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
-                       *if_capability |= QED_LM_40000baseCR4_Full_BIT;
+                       switch (tcvr_type) {
+                       case ETH_TRANSCEIVER_TYPE_40G_CR4:
+                       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
+                       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
+                               phylink_set(if_caps, 40000baseCR4_Full);
+                               break;
+                       default:
+                               break;
+                       }
+
                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
-                       *if_capability |= QED_LM_25000baseCR_Full_BIT;
+                       phylink_set(if_caps, 25000baseCR_Full);
                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
-                       *if_capability |= QED_LM_50000baseCR2_Full_BIT;
+                       phylink_set(if_caps, 50000baseCR2_Full);
+
                if (capability &
-                       NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
-                       *if_capability |= QED_LM_100000baseCR4_Full_BIT;
+                   NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
+                       switch (tcvr_type) {
+                       case ETH_TRANSCEIVER_TYPE_100G_CR4:
+                       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
+                               phylink_set(if_caps, 100000baseCR4_Full);
+                               break;
+                       default:
+                               break;
+                       }
+
                break;
        case MEDIA_BASE_T:
-               *if_capability |= QED_LM_TP_BIT;
+               phylink_set(if_caps, TP);
+
                if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) {
                        if (capability &
-                           NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
-                               *if_capability |= QED_LM_1000baseT_Full_BIT;
-                       }
+                           NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+                               phylink_set(if_caps, 1000baseT_Full);
                        if (capability &
-                           NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
-                               *if_capability |= QED_LM_10000baseT_Full_BIT;
-                       }
+                           NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+                               phylink_set(if_caps, 10000baseT_Full);
                }
+
                if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) {
-                       *if_capability |= QED_LM_FIBRE_BIT;
-                       if (tcvr_type == ETH_TRANSCEIVER_TYPE_1000BASET)
-                               *if_capability |= QED_LM_1000baseT_Full_BIT;
-                       if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_BASET)
-                               *if_capability |= QED_LM_10000baseT_Full_BIT;
+                       phylink_set(if_caps, FIBRE);
+
+                       switch (tcvr_type) {
+                       case ETH_TRANSCEIVER_TYPE_1000BASET:
+                               phylink_set(if_caps, 1000baseT_Full);
+                               break;
+                       case ETH_TRANSCEIVER_TYPE_10G_BASET:
+                               phylink_set(if_caps, 10000baseT_Full);
+                               break;
+                       default:
+                               break;
+                       }
                }
+
                break;
        case MEDIA_SFP_1G_FIBER:
        case MEDIA_SFPP_10G_FIBER:
        case MEDIA_XFP_FIBER:
        case MEDIA_MODULE_FIBER:
-               *if_capability |= QED_LM_FIBRE_BIT;
-               if (capability &
-                   NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
-                       if ((tcvr_type == ETH_TRANSCEIVER_TYPE_1G_LX) ||
-                           (tcvr_type == ETH_TRANSCEIVER_TYPE_1G_SX))
-                               *if_capability |= QED_LM_1000baseKX_Full_BIT;
-               }
-               if (capability &
-                   NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
-                       if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_SR)
-                               *if_capability |= QED_LM_10000baseSR_Full_BIT;
-                       if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LR)
-                               *if_capability |= QED_LM_10000baseLR_Full_BIT;
-                       if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LRM)
-                               *if_capability |= QED_LM_10000baseLRM_Full_BIT;
-                       if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_ER)
-                               *if_capability |= QED_LM_10000baseR_FEC_BIT;
-               }
+               phylink_set(if_caps, FIBRE);
+               capability |= speed_mask;
+
+               if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+                       switch (tcvr_type) {
+                       case ETH_TRANSCEIVER_TYPE_1G_LX:
+                       case ETH_TRANSCEIVER_TYPE_1G_SX:
+                       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
+                       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
+                               phylink_set(if_caps, 1000baseKX_Full);
+                               break;
+                       default:
+                               break;
+                       }
+
+               if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+                       switch (tcvr_type) {
+                       case ETH_TRANSCEIVER_TYPE_10G_SR:
+                       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
+                       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
+                       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
+                               phylink_set(if_caps, 10000baseSR_Full);
+                               break;
+                       case ETH_TRANSCEIVER_TYPE_10G_LR:
+                       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
+                       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR:
+                       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
+                               phylink_set(if_caps, 10000baseLR_Full);
+                               break;
+                       case ETH_TRANSCEIVER_TYPE_10G_LRM:
+                               phylink_set(if_caps, 10000baseLRM_Full);
+                               break;
+                       case ETH_TRANSCEIVER_TYPE_10G_ER:
+                               phylink_set(if_caps, 10000baseR_FEC);
+                               break;
+                       default:
+                               break;
+                       }
+
                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
-                       *if_capability |= QED_LM_20000baseKR2_Full_BIT;
-               if (capability &
-                   NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) {
-                       if (tcvr_type == ETH_TRANSCEIVER_TYPE_25G_SR)
-                               *if_capability |= QED_LM_25000baseSR_Full_BIT;
-               }
-               if (capability &
-                   NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) {
-                       if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_LR4)
-                               *if_capability |= QED_LM_40000baseLR4_Full_BIT;
-                       if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_SR4)
-                               *if_capability |= QED_LM_40000baseSR4_Full_BIT;
-               }
-               if (capability &
-                   NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
-                       *if_capability |= QED_LM_50000baseKR2_Full_BIT;
+                       phylink_set(if_caps, 20000baseKR2_Full);
+
+               if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
+                       switch (tcvr_type) {
+                       case ETH_TRANSCEIVER_TYPE_25G_SR:
+                       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
+                               phylink_set(if_caps, 25000baseSR_Full);
+                               break;
+                       default:
+                               break;
+                       }
+
+               if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+                       switch (tcvr_type) {
+                       case ETH_TRANSCEIVER_TYPE_40G_LR4:
+                       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
+                       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
+                               phylink_set(if_caps, 40000baseLR4_Full);
+                               break;
+                       case ETH_TRANSCEIVER_TYPE_40G_SR4:
+                       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
+                       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
+                               phylink_set(if_caps, 40000baseSR4_Full);
+                               break;
+                       default:
+                               break;
+                       }
+
+               if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+                       phylink_set(if_caps, 50000baseKR2_Full);
+
                if (capability &
-                   NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) {
-                       if (tcvr_type == ETH_TRANSCEIVER_TYPE_100G_SR4)
-                               *if_capability |= QED_LM_100000baseSR4_Full_BIT;
-               }
+                   NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
+                       switch (tcvr_type) {
+                       case ETH_TRANSCEIVER_TYPE_100G_SR4:
+                       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
+                               phylink_set(if_caps, 100000baseSR4_Full);
+                               break;
+                       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
+                               phylink_set(if_caps, 100000baseLR4_ER4_Full);
+                               break;
+                       default:
+                               break;
+                       }
 
                break;
        case MEDIA_KR:
-               *if_capability |= QED_LM_Backplane_BIT;
+               phylink_set(if_caps, Backplane);
+
                if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
-                       *if_capability |= QED_LM_20000baseKR2_Full_BIT;
-               if (capability &
-                   NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
-                       *if_capability |= QED_LM_1000baseKX_Full_BIT;
-               if (capability &
-                   NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
-                       *if_capability |= QED_LM_10000baseKR_Full_BIT;
-               if (capability &
-                   NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
-                       *if_capability |= QED_LM_25000baseKR_Full_BIT;
-               if (capability &
-                   NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
-                       *if_capability |= QED_LM_40000baseKR4_Full_BIT;
-               if (capability &
-                   NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
-                       *if_capability |= QED_LM_50000baseKR2_Full_BIT;
+                       phylink_set(if_caps, 20000baseKR2_Full);
+               if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+                       phylink_set(if_caps, 1000baseKX_Full);
+               if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+                       phylink_set(if_caps, 10000baseKR_Full);
+               if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
+                       phylink_set(if_caps, 25000baseKR_Full);
+               if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+                       phylink_set(if_caps, 40000baseKR4_Full);
+               if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+                       phylink_set(if_caps, 50000baseKR2_Full);
                if (capability &
                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
-                       *if_capability |= QED_LM_100000baseKR4_Full_BIT;
+                       phylink_set(if_caps, 100000baseKR4_Full);
+
                break;
        case MEDIA_UNSPECIFIED:
        case MEDIA_NOT_PRESENT:
+       default:
                DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG,
                           "Unknown media and transceiver type;\n");
                break;
        }
 }
 
+static void qed_lp_caps_to_speed_mask(u32 caps, u32 *speed_mask)
+{
+       *speed_mask = 0;
+
+       if (caps &
+           (QED_LINK_PARTNER_SPEED_1G_FD | QED_LINK_PARTNER_SPEED_1G_HD))
+               *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+       if (caps & QED_LINK_PARTNER_SPEED_10G)
+               *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+       if (caps & QED_LINK_PARTNER_SPEED_20G)
+               *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
+       if (caps & QED_LINK_PARTNER_SPEED_25G)
+               *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
+       if (caps & QED_LINK_PARTNER_SPEED_40G)
+               *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
+       if (caps & QED_LINK_PARTNER_SPEED_50G)
+               *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
+       if (caps & QED_LINK_PARTNER_SPEED_100G)
+               *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
+}
+
 static void qed_fill_link(struct qed_hwfn *hwfn,
                          struct qed_ptt *ptt,
                          struct qed_link_output *if_link)
@@ -1816,7 +2202,7 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
        struct qed_mcp_link_capabilities link_caps;
        struct qed_mcp_link_params params;
        struct qed_mcp_link_state link;
-       u32 media_type;
+       u32 media_type, speed_mask;
 
        memset(if_link, 0, sizeof(*if_link));
 
@@ -1830,28 +2216,53 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
        if (link.link_up)
                if_link->link_up = true;
 
-       /* TODO - at the moment assume supported and advertised speed equal */
-       if (link_caps.default_speed_autoneg)
-               if_link->supported_caps |= QED_LM_Autoneg_BIT;
+       if (IS_PF(hwfn->cdev) && qed_mcp_is_ext_speed_supported(hwfn)) {
+               if (link_caps.default_ext_autoneg)
+                       phylink_set(if_link->supported_caps, Autoneg);
+
+               linkmode_copy(if_link->advertised_caps, if_link->supported_caps);
+
+               if (params.ext_speed.autoneg)
+                       phylink_set(if_link->advertised_caps, Autoneg);
+               else
+                       phylink_clear(if_link->advertised_caps, Autoneg);
+
+               qed_fill_link_capability(hwfn, ptt,
+                                        params.ext_speed.advertised_speeds,
+                                        if_link->advertised_caps);
+       } else {
+               if (link_caps.default_speed_autoneg)
+                       phylink_set(if_link->supported_caps, Autoneg);
+
+               linkmode_copy(if_link->advertised_caps, if_link->supported_caps);
+
+               if (params.speed.autoneg)
+                       phylink_set(if_link->advertised_caps, Autoneg);
+               else
+                       phylink_clear(if_link->advertised_caps, Autoneg);
+       }
+
        if (params.pause.autoneg ||
            (params.pause.forced_rx && params.pause.forced_tx))
-               if_link->supported_caps |= QED_LM_Asym_Pause_BIT;
+               phylink_set(if_link->supported_caps, Asym_Pause);
        if (params.pause.autoneg || params.pause.forced_rx ||
            params.pause.forced_tx)
-               if_link->supported_caps |= QED_LM_Pause_BIT;
+               phylink_set(if_link->supported_caps, Pause);
 
-       if_link->advertised_caps = if_link->supported_caps;
-       if (params.speed.autoneg)
-               if_link->advertised_caps |= QED_LM_Autoneg_BIT;
-       else
-               if_link->advertised_caps &= ~QED_LM_Autoneg_BIT;
+       if_link->sup_fec = link_caps.fec_default;
+       if_link->active_fec = params.fec;
 
-       /* Fill link advertised capability*/
+       /* Fill link advertised capability */
        qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds,
-                                &if_link->advertised_caps);
-       /* Fill link supported capability*/
+                                if_link->advertised_caps);
+
+       /* Fill link supported capability */
        qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities,
-                                &if_link->supported_caps);
+                                if_link->supported_caps);
+
+       /* Fill partner advertised capability */
+       qed_lp_caps_to_speed_mask(link.partner_adv_speed, &speed_mask);
+       qed_fill_link_capability(hwfn, ptt, speed_mask, if_link->lp_caps);
 
        if (link.link_up)
                if_link->speed = link.speed;
@@ -1870,31 +2281,13 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
        if (params.pause.forced_tx)
                if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
 
-       /* Link partner capabilities */
-       if (link.partner_adv_speed &
-           QED_LINK_PARTNER_SPEED_1G_FD)
-               if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
-       if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
-               if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
-       if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_20G)
-               if_link->lp_caps |= QED_LM_20000baseKR2_Full_BIT;
-       if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
-               if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
-       if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
-               if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT;
-       if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G)
-               if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT;
-       if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G)
-               if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT;
-
        if (link.an_complete)
-               if_link->lp_caps |= QED_LM_Autoneg_BIT;
-
+               phylink_set(if_link->lp_caps, Autoneg);
        if (link.partner_adv_pause)
-               if_link->lp_caps |= QED_LM_Pause_BIT;
+               phylink_set(if_link->lp_caps, Pause);
        if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
            link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
-               if_link->lp_caps |= QED_LM_Asym_Pause_BIT;
+               phylink_set(if_link->lp_caps, Asym_Pause);
 
        if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) {
                if_link->eee_supported = false;
@@ -1988,8 +2381,7 @@ static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev,
                                          u32 *crc)
 {
        u8 *buf = NULL;
-       int rc, j;
-       u32 val;
+       int rc;
 
        /* Allocate a buffer for holding the nvram image */
        buf = kzalloc(nvm_image->length, GFP_KERNEL);
@@ -2007,15 +2399,14 @@ static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev,
        /* Convert the buffer into big-endian format (excluding the
         * closing 4 bytes of CRC).
         */
-       for (j = 0; j < nvm_image->length - 4; j += 4) {
-               val = cpu_to_be32(*(u32 *)&buf[j]);
-               *(u32 *)&buf[j] = val;
-       }
+       cpu_to_be32_array((__force __be32 *)buf, (const u32 *)buf,
+                         DIV_ROUND_UP(nvm_image->length - 4, 4));
 
        /* Calc CRC for the "actual" image buffer, i.e. not including
         * the last 4 CRC bytes.
         */
-       *crc = (~cpu_to_be32(crc32(0xffffffff, buf, nvm_image->length - 4)));
+       *crc = ~crc32(~0U, buf, nvm_image->length - 4);
+       *crc = (__force u32)cpu_to_be32p(crc);
 
 out:
        kfree(buf);
@@ -2477,7 +2868,7 @@ void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn)
                ops->schedule_recovery_handler(cookie);
 }
 
-char *qed_hw_err_type_descr[] = {
+static const char * const qed_hw_err_type_descr[] = {
        [QED_HW_ERR_FAN_FAIL]           = "Fan Failure",
        [QED_HW_ERR_MFW_RESP_FAIL]      = "MFW Response Failure",
        [QED_HW_ERR_HW_ATTN]            = "HW Attention",
@@ -2492,7 +2883,7 @@ void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
 {
        struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
        void *cookie = p_hwfn->cdev->ops_cookie;
-       char *err_str;
+       const char *err_str;
 
        if (err_type > QED_HW_ERR_LAST)
                err_type = QED_HW_ERR_LAST;
index 9624616..988d845 100644 (file)
@@ -1,33 +1,7 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #include <linux/types.h>
@@ -1472,6 +1446,25 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
        if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
                qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
 
+       if (p_hwfn->mcp_info->capabilities &
+           FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) {
+               switch (status & LINK_STATUS_FEC_MODE_MASK) {
+               case LINK_STATUS_FEC_MODE_NONE:
+                       p_link->fec_active = QED_FEC_MODE_NONE;
+                       break;
+               case LINK_STATUS_FEC_MODE_FIRECODE_CL74:
+                       p_link->fec_active = QED_FEC_MODE_FIRECODE;
+                       break;
+               case LINK_STATUS_FEC_MODE_RS_CL91:
+                       p_link->fec_active = QED_FEC_MODE_RS;
+                       break;
+               default:
+                       p_link->fec_active = QED_FEC_MODE_AUTO;
+               }
+       } else {
+               p_link->fec_active = QED_FEC_MODE_UNSUPPORTED;
+       }
+
        qed_link_update(p_hwfn, p_ptt);
 out:
        spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
@@ -1482,8 +1475,9 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
        struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
        struct qed_mcp_mb_params mb_params;
        struct eth_phy_cfg phy_cfg;
+       u32 cmd, fec_bit = 0;
+       u32 val, ext_speed;
        int rc = 0;
-       u32 cmd;
 
        /* Set the shmem configuration according to params */
        memset(&phy_cfg, 0, sizeof(phy_cfg));
@@ -1515,19 +1509,91 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
                                   EEE_TX_TIMER_USEC_MASK;
        }
 
+       if (p_hwfn->mcp_info->capabilities &
+           FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) {
+               if (params->fec & QED_FEC_MODE_NONE)
+                       fec_bit |= FEC_FORCE_MODE_NONE;
+               else if (params->fec & QED_FEC_MODE_FIRECODE)
+                       fec_bit |= FEC_FORCE_MODE_FIRECODE;
+               else if (params->fec & QED_FEC_MODE_RS)
+                       fec_bit |= FEC_FORCE_MODE_RS;
+               else if (params->fec & QED_FEC_MODE_AUTO)
+                       fec_bit |= FEC_FORCE_MODE_AUTO;
+
+               SET_MFW_FIELD(phy_cfg.fec_mode, FEC_FORCE_MODE, fec_bit);
+       }
+
+       if (p_hwfn->mcp_info->capabilities &
+           FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) {
+               ext_speed = 0;
+               if (params->ext_speed.autoneg)
+                       ext_speed |= ETH_EXT_SPEED_AN;
+
+               val = params->ext_speed.forced_speed;
+               if (val & QED_EXT_SPEED_1G)
+                       ext_speed |= ETH_EXT_SPEED_1G;
+               if (val & QED_EXT_SPEED_10G)
+                       ext_speed |= ETH_EXT_SPEED_10G;
+               if (val & QED_EXT_SPEED_20G)
+                       ext_speed |= ETH_EXT_SPEED_20G;
+               if (val & QED_EXT_SPEED_25G)
+                       ext_speed |= ETH_EXT_SPEED_25G;
+               if (val & QED_EXT_SPEED_40G)
+                       ext_speed |= ETH_EXT_SPEED_40G;
+               if (val & QED_EXT_SPEED_50G_R)
+                       ext_speed |= ETH_EXT_SPEED_50G_BASE_R;
+               if (val & QED_EXT_SPEED_50G_R2)
+                       ext_speed |= ETH_EXT_SPEED_50G_BASE_R2;
+               if (val & QED_EXT_SPEED_100G_R2)
+                       ext_speed |= ETH_EXT_SPEED_100G_BASE_R2;
+               if (val & QED_EXT_SPEED_100G_R4)
+                       ext_speed |= ETH_EXT_SPEED_100G_BASE_R4;
+               if (val & QED_EXT_SPEED_100G_P4)
+                       ext_speed |= ETH_EXT_SPEED_100G_BASE_P4;
+
+               SET_MFW_FIELD(phy_cfg.extended_speed, ETH_EXT_SPEED,
+                             ext_speed);
+
+               ext_speed = 0;
+
+               val = params->ext_speed.advertised_speeds;
+               if (val & QED_EXT_SPEED_MASK_1G)
+                       ext_speed |= ETH_EXT_ADV_SPEED_1G;
+               if (val & QED_EXT_SPEED_MASK_10G)
+                       ext_speed |= ETH_EXT_ADV_SPEED_10G;
+               if (val & QED_EXT_SPEED_MASK_20G)
+                       ext_speed |= ETH_EXT_ADV_SPEED_20G;
+               if (val & QED_EXT_SPEED_MASK_25G)
+                       ext_speed |= ETH_EXT_ADV_SPEED_25G;
+               if (val & QED_EXT_SPEED_MASK_40G)
+                       ext_speed |= ETH_EXT_ADV_SPEED_40G;
+               if (val & QED_EXT_SPEED_MASK_50G_R)
+                       ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R;
+               if (val & QED_EXT_SPEED_MASK_50G_R2)
+                       ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R2;
+               if (val & QED_EXT_SPEED_MASK_100G_R2)
+                       ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R2;
+               if (val & QED_EXT_SPEED_MASK_100G_R4)
+                       ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R4;
+               if (val & QED_EXT_SPEED_MASK_100G_P4)
+                       ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_P4;
+
+               phy_cfg.extended_speed |= ext_speed;
+
+               SET_MFW_FIELD(phy_cfg.fec_mode, FEC_EXTENDED_MODE,
+                             params->ext_fec_mode);
+       }
+
        p_hwfn->b_drv_link_init = b_up;
 
        if (b_up) {
                DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
-                          "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
-                          phy_cfg.speed,
-                          phy_cfg.pause,
-                          phy_cfg.adv_speed,
-                          phy_cfg.loopback_mode,
-                          phy_cfg.feature_config_flags);
+                          "Configuring Link: Speed 0x%08x, Pause 0x%08x, Adv. Speed 0x%08x, Loopback 0x%08x, FEC 0x%08x, Ext. Speed 0x%08x\n",
+                          phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
+                          phy_cfg.loopback_mode, phy_cfg.fec_mode,
+                          phy_cfg.extended_speed);
        } else {
-               DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
-                          "Resetting link\n");
+               DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link\n");
        }
 
        memset(&mb_params, 0, sizeof(mb_params));
@@ -2219,6 +2285,11 @@ int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
                    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
                break;
+       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
+       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR:
+               *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+                               NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+               break;
        case ETH_TRANSCEIVER_TYPE_40G_CR4:
        case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
                *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
@@ -2249,8 +2320,10 @@ int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
                *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
                break;
        case ETH_TRANSCEIVER_TYPE_10G_BASET:
+       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
+       case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
                *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
-                   NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+                               NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
                break;
        default:
                DP_INFO(p_hwfn, "Unknown transceiver type 0x%x\n",
@@ -3280,6 +3353,13 @@ err0:
        return rc;
 }
 
+void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn)
+{
+       kfree(p_hwfn->nvm_info.image_att);
+       p_hwfn->nvm_info.image_att = NULL;
+       p_hwfn->nvm_info.valid = false;
+}
+
 int
 qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
                          enum qed_nvm_images image_id,
@@ -3817,7 +3897,12 @@ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
        u32 mcp_resp, mcp_param, features;
 
        features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
-                  DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK;
+                  DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK |
+                  DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL;
+
+       if (QED_IS_E5(p_hwfn->cdev))
+               features |=
+                   DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL;
 
        return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
                           features, &mcp_resp, &mcp_param);
index 5750b4c..8edb450 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef _QED_MCP_H
 #include "qed_dev_api.h"
 
 struct qed_mcp_link_speed_params {
-       bool    autoneg;
-       u32     advertised_speeds;      /* bitmask of DRV_SPEED_CAPABILITY */
-       u32     forced_speed;      /* In Mb/s */
+       bool                                    autoneg;
+
+       u32                                     advertised_speeds;
+#define QED_EXT_SPEED_MASK_RES                 0x1
+#define QED_EXT_SPEED_MASK_1G                  0x2
+#define QED_EXT_SPEED_MASK_10G                 0x4
+#define QED_EXT_SPEED_MASK_20G                 0x8
+#define QED_EXT_SPEED_MASK_25G                 0x10
+#define QED_EXT_SPEED_MASK_40G                 0x20
+#define QED_EXT_SPEED_MASK_50G_R               0x40
+#define QED_EXT_SPEED_MASK_50G_R2              0x80
+#define QED_EXT_SPEED_MASK_100G_R2             0x100
+#define QED_EXT_SPEED_MASK_100G_R4             0x200
+#define QED_EXT_SPEED_MASK_100G_P4             0x400
+
+       u32                                     forced_speed;      /* In Mb/s */
+#define QED_EXT_SPEED_1G                       0x1
+#define QED_EXT_SPEED_10G                      0x2
+#define QED_EXT_SPEED_20G                      0x4
+#define QED_EXT_SPEED_25G                      0x8
+#define QED_EXT_SPEED_40G                      0x10
+#define QED_EXT_SPEED_50G_R                    0x20
+#define QED_EXT_SPEED_50G_R2                   0x40
+#define QED_EXT_SPEED_100G_R2                  0x80
+#define QED_EXT_SPEED_100G_R4                  0x100
+#define QED_EXT_SPEED_100G_P4                  0x200
 };
 
 struct qed_mcp_link_pause_params {
-       bool    autoneg;
-       bool    forced_rx;
-       bool    forced_tx;
+       bool                                    autoneg;
+       bool                                    forced_rx;
+       bool                                    forced_tx;
 };
 
 enum qed_mcp_eee_mode {
@@ -60,61 +57,72 @@ enum qed_mcp_eee_mode {
 };
 
 struct qed_mcp_link_params {
-       struct qed_mcp_link_speed_params speed;
-       struct qed_mcp_link_pause_params pause;
-       u32 loopback_mode;
-       struct qed_link_eee_params eee;
+       struct qed_mcp_link_speed_params        speed;
+       struct qed_mcp_link_pause_params        pause;
+       u32                                     loopback_mode;
+       struct qed_link_eee_params              eee;
+       u32                                     fec;
+
+       struct qed_mcp_link_speed_params        ext_speed;
+       u32                                     ext_fec_mode;
 };
 
 struct qed_mcp_link_capabilities {
-       u32 speed_capabilities;
-       bool default_speed_autoneg;
-       enum qed_mcp_eee_mode default_eee;
-       u32 eee_lpi_timer;
-       u8 eee_speed_caps;
+       u32                                     speed_capabilities;
+       bool                                    default_speed_autoneg;
+       u32                                     fec_default;
+       enum qed_mcp_eee_mode                   default_eee;
+       u32                                     eee_lpi_timer;
+       u8                                      eee_speed_caps;
+
+       u32                                     default_ext_speed_caps;
+       u32                                     default_ext_autoneg;
+       u32                                     default_ext_speed;
+       u32                                     default_ext_fec;
 };
 
 struct qed_mcp_link_state {
-       bool    link_up;
-
-       u32     min_pf_rate;
+       bool                                    link_up;
+       u32                                     min_pf_rate;
 
        /* Actual link speed in Mb/s */
-       u32     line_speed;
+       u32                                     line_speed;
 
        /* PF max speed in Mb/s, deduced from line_speed
         * according to PF max bandwidth configuration.
         */
-       u32     speed;
-       bool    full_duplex;
-
-       bool    an;
-       bool    an_complete;
-       bool    parallel_detection;
-       bool    pfc_enabled;
-
-#define QED_LINK_PARTNER_SPEED_1G_HD    BIT(0)
-#define QED_LINK_PARTNER_SPEED_1G_FD    BIT(1)
-#define QED_LINK_PARTNER_SPEED_10G      BIT(2)
-#define QED_LINK_PARTNER_SPEED_20G      BIT(3)
-#define QED_LINK_PARTNER_SPEED_25G      BIT(4)
-#define QED_LINK_PARTNER_SPEED_40G      BIT(5)
-#define QED_LINK_PARTNER_SPEED_50G      BIT(6)
-#define QED_LINK_PARTNER_SPEED_100G     BIT(7)
-       u32     partner_adv_speed;
-
-       bool    partner_tx_flow_ctrl_en;
-       bool    partner_rx_flow_ctrl_en;
-
-#define QED_LINK_PARTNER_SYMMETRIC_PAUSE (1)
-#define QED_LINK_PARTNER_ASYMMETRIC_PAUSE (2)
-#define QED_LINK_PARTNER_BOTH_PAUSE (3)
-       u8      partner_adv_pause;
-
-       bool    sfp_tx_fault;
-       bool    eee_active;
-       u8      eee_adv_caps;
-       u8      eee_lp_adv_caps;
+       u32                                     speed;
+
+       bool                                    full_duplex;
+       bool                                    an;
+       bool                                    an_complete;
+       bool                                    parallel_detection;
+       bool                                    pfc_enabled;
+
+       u32                                     partner_adv_speed;
+#define QED_LINK_PARTNER_SPEED_1G_HD           BIT(0)
+#define QED_LINK_PARTNER_SPEED_1G_FD           BIT(1)
+#define QED_LINK_PARTNER_SPEED_10G             BIT(2)
+#define QED_LINK_PARTNER_SPEED_20G             BIT(3)
+#define QED_LINK_PARTNER_SPEED_25G             BIT(4)
+#define QED_LINK_PARTNER_SPEED_40G             BIT(5)
+#define QED_LINK_PARTNER_SPEED_50G             BIT(6)
+#define QED_LINK_PARTNER_SPEED_100G            BIT(7)
+
+       bool                                    partner_tx_flow_ctrl_en;
+       bool                                    partner_rx_flow_ctrl_en;
+
+       u8                                      partner_adv_pause;
+#define QED_LINK_PARTNER_SYMMETRIC_PAUSE       0x1
+#define QED_LINK_PARTNER_ASYMMETRIC_PAUSE      0x2
+#define QED_LINK_PARTNER_BOTH_PAUSE            0x3
+
+       bool                                    sfp_tx_fault;
+       bool                                    eee_active;
+       u8                                      eee_adv_caps;
+       u8                                      eee_lp_adv_caps;
+
+       u32                                     fec_active;
 };
 
 struct qed_mcp_function_info {
@@ -774,6 +782,20 @@ struct qed_drv_tlv_hdr {
 };
 
 /**
+ * qed_mcp_is_ext_speed_supported() - Check if management firmware supports
+ *                                    extended speeds.
+ * @p_hwfn: HW device data.
+ *
+ * Return: true if supported, false otherwise.
+ */
+static inline bool
+qed_mcp_is_ext_speed_supported(const struct qed_hwfn *p_hwfn)
+{
+       return !!(p_hwfn->mcp_info->capabilities &
+                 FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL);
+}
+
+/**
  * @brief Initialize the interface with the MCP
  *
  * @param p_hwfn - HW func
@@ -1221,6 +1243,13 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
 int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn);
 
 /**
+ * @brief Delete nvm info shadow in the given hardware function
+ *
+ * @param p_hwfn
+ */
+void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn);
+
+/**
  * @brief Get the engine affinity configuration.
  *
  * @param p_hwfn
index 6c16158..3e3192a 100644 (file)
@@ -1,4 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* Copyright (c) 2019-2020 Marvell International Ltd. */
+
 #include <linux/types.h>
 #include <asm/byteorder.h>
 #include <linux/bug.h>
@@ -1274,7 +1276,7 @@ int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
         */
        for (offset = 0; offset < size; offset += sizeof(u32)) {
                val = qed_rd(p_hwfn, p_ptt, addr + offset);
-               val = be32_to_cpu(val);
+               val = be32_to_cpu((__force __be32)val);
                memcpy(&p_mfw_buf[offset], &val, sizeof(u32));
        }
 
@@ -1323,7 +1325,7 @@ int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
         */
        for (offset = 0; offset < size; offset += sizeof(u32)) {
                memcpy(&val, &p_mfw_buf[offset], sizeof(u32));
-               val = cpu_to_be32(val);
+               val = (__force u32)cpu_to_be32(val);
                qed_wr(p_hwfn, p_ptt, addr + offset, val);
        }
 
index ffac4ac..88353aa 100644 (file)
@@ -1,33 +1,7 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #include <linux/types.h>
index 49c4e75..3a7e1b5 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef _QED_OOO_H
index 3e61305..2c62d73 100644 (file)
@@ -1,40 +1,16 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
+
 #include <linux/types.h>
 #include "qed.h"
 #include "qed_dev_api.h"
 #include "qed_hw.h"
 #include "qed_l2.h"
 #include "qed_mcp.h"
+#include "qed_ptp.h"
 #include "qed_reg_addr.h"
 
 /* 16 nano second time quantas to wait before making a Drift adjustment */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.h b/drivers/net/ethernet/qlogic/qed/qed_ptp.h
new file mode 100644 (file)
index 0000000..40a11c0
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright (c) 2020 Marvell International Ltd. */
+
+#ifndef __QED_PTP_H
+#define __QED_PTP_H
+
+extern const struct qed_eth_ptp_ops qed_ptp_ops_pass;
+
+#endif /* __QED_PTP_H */
index 19c0c88..a4bcde5 100644 (file)
@@ -1,34 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
+
 #include <linux/types.h>
 #include <asm/byteorder.h>
 #include <linux/bitops.h>
@@ -404,6 +379,7 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
        qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1);
        qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1);
        qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrc_srq_map, 1);
+       qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, 1);
 
        kfree(p_rdma_info->port);
        kfree(p_rdma_info->dev);
@@ -1131,7 +1107,7 @@ static int qed_rdma_create_cq(void *rdma_cxt,
        p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
        p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
                           params->cnq_id;
-       p_ramrod->int_timeout = params->int_timeout;
+       p_ramrod->int_timeout = cpu_to_le16(params->int_timeout);
 
        /* toggle the bit for every resize or create cq for a given icid */
        toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
@@ -1231,7 +1207,7 @@ err:      dma_free_coherent(&p_hwfn->cdev->pdev->dev,
        return rc;
 }
 
-void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac)
+void qed_rdma_set_fw_mac(__le16 *p_fw_mac, const u8 *p_qed_mac)
 {
        p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
        p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
@@ -1520,6 +1496,7 @@ qed_rdma_register_tid(void *rdma_cxt,
        struct qed_spq_entry *p_ent;
        enum rdma_tid_type tid_type;
        u8 fw_return_code;
+       u16 flags = 0;
        int rc;
 
        DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
@@ -1539,54 +1516,46 @@ qed_rdma_register_tid(void *rdma_cxt,
        if (p_hwfn->p_rdma_info->last_tid < params->itid)
                p_hwfn->p_rdma_info->last_tid = params->itid;
 
-       p_ramrod = &p_ent->ramrod.rdma_register_tid;
-
-       p_ramrod->flags = 0;
-       SET_FIELD(p_ramrod->flags,
-                 RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
+       SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
                  params->pbl_two_level);
 
-       SET_FIELD(p_ramrod->flags,
-                 RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, params->zbva);
+       SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED,
+                 params->zbva);
 
-       SET_FIELD(p_ramrod->flags,
-                 RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
+       SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
 
        /* Don't initialize D/C field, as it may override other bits. */
        if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
-               SET_FIELD(p_ramrod->flags,
-                         RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
+               SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
                          params->page_size_log - 12);
 
-       SET_FIELD(p_ramrod->flags,
-                 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
+       SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
                  params->remote_read);
 
-       SET_FIELD(p_ramrod->flags,
-                 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
+       SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
                  params->remote_write);
 
-       SET_FIELD(p_ramrod->flags,
-                 RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
+       SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
                  params->remote_atomic);
 
-       SET_FIELD(p_ramrod->flags,
-                 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
+       SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
                  params->local_write);
 
-       SET_FIELD(p_ramrod->flags,
-                 RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, params->local_read);
+       SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ,
+                 params->local_read);
 
-       SET_FIELD(p_ramrod->flags,
-                 RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
+       SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
                  params->mw_bind);
 
+       p_ramrod = &p_ent->ramrod.rdma_register_tid;
+       p_ramrod->flags = cpu_to_le16(flags);
+
        SET_FIELD(p_ramrod->flags1,
                  RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
                  params->pbl_page_size_log - 12);
 
-       SET_FIELD(p_ramrod->flags2,
-                 RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, params->dma_mr);
+       SET_FIELD(p_ramrod->flags2, RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR,
+                 params->dma_mr);
 
        switch (params->tid_type) {
        case QED_RDMA_TID_REGISTERED_MR:
@@ -1604,8 +1573,9 @@ qed_rdma_register_tid(void *rdma_cxt,
                qed_sp_destroy_request(p_hwfn, p_ent);
                return rc;
        }
-       SET_FIELD(p_ramrod->flags1,
-                 RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, tid_type);
+
+       SET_FIELD(p_ramrod->flags1, RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE,
+                 tid_type);
 
        p_ramrod->itid = cpu_to_le32(params->itid);
        p_ramrod->key = params->key;
index 1e69d5b..6a1de3a 100644 (file)
@@ -1,34 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
+
 #ifndef _QED_RDMA_H
 #define _QED_RDMA_H
 #include <linux/types.h>
@@ -226,7 +201,7 @@ qed_bmap_release_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num);
 int
 qed_bmap_test_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num);
 
-void qed_rdma_set_fw_mac(u16 *p_fw_mac, u8 *p_qed_mac);
+void qed_rdma_set_fw_mac(__le16 *p_fw_mac, const u8 *p_qed_mac);
 
 bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn);
 #endif
index 3dcb6ff..9db22be 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef REG_ADDR_H
index 4566815..f16a157 100644 (file)
@@ -1,34 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
+
 #include <linux/types.h>
 #include <asm/byteorder.h>
 #include <linux/bitops.h>
 
 static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
 
-static int
-qed_roce_async_event(struct qed_hwfn *p_hwfn,
-                    u8 fw_event_code,
-                    u16 echo, union event_ring_data *data, u8 fw_return_code)
+static int qed_roce_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
+                               __le16 echo, union event_ring_data *data,
+                               u8 fw_return_code)
 {
        struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
+       union rdma_eqe_data *rdata = &data->rdma_data;
 
        if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
-               u16 icid =
-                   (u16)le32_to_cpu(data->rdma_data.rdma_destroy_qp_data.cid);
+               u16 icid = (u16)le32_to_cpu(rdata->rdma_destroy_qp_data.cid);
 
                /* icid release in this async event can occur only if the icid
                 * was offloaded to the FW. In case it wasn't offloaded this is
                 * handled in qed_roce_sp_destroy_qp.
                 */
                qed_roce_free_real_icid(p_hwfn, icid);
-       } else {
-               if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY ||
-                   fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) {
-                       u16 srq_id = (u16)data->rdma_data.async_handle.lo;
+       } else if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY ||
+                  fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) {
+               u16 srq_id = (u16)le32_to_cpu(rdata->async_handle.lo);
 
-                       events.affiliated_event(events.context, fw_event_code,
-                                               &srq_id);
-               } else {
-                       union rdma_eqe_data rdata = data->rdma_data;
-
-                       events.affiliated_event(events.context, fw_event_code,
-                                               (void *)&rdata.async_handle);
-               }
+               events.affiliated_event(events.context, fw_event_code,
+                                       &srq_id);
+       } else {
+               events.affiliated_event(events.context, fw_event_code,
+                                       (void *)&rdata->async_handle);
        }
 
        return 0;
@@ -113,7 +83,6 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn)
                        break;
                }
        }
-       qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE);
 }
 
 static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
@@ -248,9 +217,9 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
        struct roce_create_qp_resp_ramrod_data *p_ramrod;
        u16 regular_latency_queue, low_latency_queue;
        struct qed_sp_init_data init_data;
-       enum roce_flavor roce_flavor;
        struct qed_spq_entry *p_ent;
        enum protocol_type proto;
+       u32 flags = 0;
        int rc;
        u8 tc;
 
@@ -283,45 +252,34 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
        if (rc)
                goto err;
 
-       p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
-
-       p_ramrod->flags = 0;
+       SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR,
+                 qed_roce_mode_to_flavor(qp->roce_mode));
 
-       roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
-
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
+       SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
                  qp->incoming_rdma_read_en);
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
+       SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
                  qp->incoming_rdma_write_en);
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
+       SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
                  qp->incoming_atomic_en);
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
+       SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
                  qp->e2e_flow_control_en);
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
+       SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
+       SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
                  qp->fmr_and_reserved_lkey);
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
+       SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
                  qp->min_rnr_nak_timer);
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG,
+       SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG,
                  qed_rdma_is_xrc_qp(qp));
 
+       p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
+       p_ramrod->flags = cpu_to_le32(flags);
        p_ramrod->max_ird = qp->max_rd_atomic_resp;
        p_ramrod->traffic_class = qp->traffic_class_tos;
        p_ramrod->hop_limit = qp->hop_limit_ttl;
@@ -336,10 +294,10 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
        DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
        DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
        qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
-       p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
-       p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
-       p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
-       p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
+       p_ramrod->qp_handle_for_async.hi = qp->qp_handle_async.hi;
+       p_ramrod->qp_handle_for_async.lo = qp->qp_handle_async.lo;
+       p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi;
+       p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo;
        p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
                                       qp->rq_cq_id);
        p_ramrod->xrc_domain = cpu_to_le16(qp->xrcd_id);
@@ -361,7 +319,7 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
        qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
        qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
 
-       p_ramrod->udp_src_port = qp->udp_src_port;
+       p_ramrod->udp_src_port = cpu_to_le16(qp->udp_src_port);
        p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
        p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
        p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
@@ -397,9 +355,9 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
        struct roce_create_qp_req_ramrod_data *p_ramrod;
        u16 regular_latency_queue, low_latency_queue;
        struct qed_sp_init_data init_data;
-       enum roce_flavor roce_flavor;
        struct qed_spq_entry *p_ent;
        enum protocol_type proto;
+       u16 flags = 0;
        int rc;
        u8 tc;
 
@@ -433,34 +391,29 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
        if (rc)
                goto err;
 
-       p_ramrod = &p_ent->ramrod.roce_create_qp_req;
-
-       p_ramrod->flags = 0;
+       SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR,
+                 qed_roce_mode_to_flavor(qp->roce_mode));
 
-       roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode);
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor);
-
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
+       SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
                  qp->fmr_and_reserved_lkey);
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
+       SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP,
+                 qp->signal_all);
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
+       SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT,
+                 qp->retry_cnt);
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
+       SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
                  qp->rnr_retry_cnt);
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG,
+       SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG,
                  qed_rdma_is_xrc_qp(qp));
 
-       SET_FIELD(p_ramrod->flags2,
-                 ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE, qp->edpm_mode);
+       p_ramrod = &p_ent->ramrod.roce_create_qp_req;
+       p_ramrod->flags = cpu_to_le16(flags);
+
+       SET_FIELD(p_ramrod->flags2, ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE,
+                 qp->edpm_mode);
 
        p_ramrod->max_ord = qp->max_rd_atomic_req;
        p_ramrod->traffic_class = qp->traffic_class_tos;
@@ -477,10 +430,10 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
        DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
        DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
        qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
-       p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi);
-       p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo);
-       p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi);
-       p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo);
+       p_ramrod->qp_handle_for_async.hi = qp->qp_handle_async.hi;
+       p_ramrod->qp_handle_for_async.lo = qp->qp_handle_async.lo;
+       p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi;
+       p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo;
        p_ramrod->cq_cid =
            cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
 
@@ -501,7 +454,7 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
        qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
        qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
 
-       p_ramrod->udp_src_port = qp->udp_src_port;
+       p_ramrod->udp_src_port = cpu_to_le16(qp->udp_src_port);
        p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
        p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
                                     qp->stats_queue;
@@ -533,6 +486,7 @@ static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
        struct roce_modify_qp_resp_ramrod_data *p_ramrod;
        struct qed_sp_init_data init_data;
        struct qed_spq_entry *p_ent;
+       u16 flags = 0;
        int rc;
 
        if (!qp->has_resp)
@@ -557,53 +511,43 @@ static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
                return rc;
        }
 
-       p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
-
-       p_ramrod->flags = 0;
+       SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG,
+                 !!move_to_err);
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
-
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
+       SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
                  qp->incoming_rdma_read_en);
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
+       SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
                  qp->incoming_rdma_write_en);
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
+       SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
                  qp->incoming_atomic_en);
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
+       SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
                  qp->e2e_flow_control_en);
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
+       SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
                  GET_FIELD(modify_flags,
                            QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
+       SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
                  GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
+       SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
                  GET_FIELD(modify_flags,
                            QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
+       SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
                  GET_FIELD(modify_flags,
                            QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
+       SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
                  GET_FIELD(modify_flags,
                            QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
 
+       p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
+       p_ramrod->flags = cpu_to_le16(flags);
+
        p_ramrod->fields = 0;
        SET_FIELD(p_ramrod->fields,
                  ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
@@ -630,6 +574,7 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
        struct roce_modify_qp_req_ramrod_data *p_ramrod;
        struct qed_sp_init_data init_data;
        struct qed_spq_entry *p_ent;
+       u16 flags = 0;
        int rc;
 
        if (!qp->has_req)
@@ -654,54 +599,44 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
                return rc;
        }
 
-       p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
-
-       p_ramrod->flags = 0;
+       SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG,
+                 !!move_to_err);
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err);
+       SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG,
+                 !!move_to_sqd);
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd);
-
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
+       SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
                  qp->sqd_async);
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
+       SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
                  GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
+       SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
                  GET_FIELD(modify_flags,
                            QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
+       SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
                  GET_FIELD(modify_flags,
                            QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
+       SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
                  GET_FIELD(modify_flags,
                            QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
+       SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
                  GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT));
 
-       SET_FIELD(p_ramrod->flags,
-                 ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
+       SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
                  GET_FIELD(modify_flags,
                            QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
 
+       p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
+       p_ramrod->flags = cpu_to_le16(flags);
+
        p_ramrod->fields = 0;
        SET_FIELD(p_ramrod->fields,
                  ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
-
-       SET_FIELD(p_ramrod->fields,
-                 ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
+       SET_FIELD(p_ramrod->fields, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
                  qp->rnr_retry_cnt);
 
        p_ramrod->max_ord = qp->max_rd_atomic_req;
@@ -822,8 +757,7 @@ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
        if (!qp->req_offloaded)
                return 0;
 
-       p_ramrod_res = (struct roce_destroy_qp_req_output_params *)
-                      dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+       p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
                                          sizeof(*p_ramrod_res),
                                          &ramrod_res_phys, GFP_KERNEL);
        if (!p_ramrod_res) {
index f801f39..3a4a2d7 100644 (file)
@@ -1,34 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
+
 #ifndef _QED_ROCE_H
 #define _QED_ROCE_H
 #include <linux/types.h>
index cf1d447..6e70781 100644 (file)
@@ -1,33 +1,7 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2016  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #include <linux/crc32.h>
index ad00d08..e27dd9a 100644 (file)
@@ -1,4 +1,6 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright (c) 2019-2020 Marvell International Ltd. */
+
 #ifndef _QED_SELFTEST_API_H
 #define _QED_SELFTEST_API_H
 #include <linux/types.h>
index b7b4fbb..993f135 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef _QED_SP_H
@@ -180,12 +154,9 @@ struct qed_consq {
        struct qed_chain chain;
 };
 
-typedef int
-(*qed_spq_async_comp_cb)(struct qed_hwfn *p_hwfn,
-                        u8 opcode,
-                        u16 echo,
-                        union event_ring_data *data,
-                        u8 fw_return_code);
+typedef int (*qed_spq_async_comp_cb)(struct qed_hwfn *p_hwfn, u8 opcode,
+                                    __le16 echo, union event_ring_data *data,
+                                    u8 fw_return_code);
 
 int
 qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
index 900bc60..aa71adc 100644 (file)
@@ -1,33 +1,7 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #include <linux/types.h>
@@ -326,6 +300,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
                    struct qed_tunnel_info *p_tunn,
                    bool allow_npar_tx_switch)
 {
+       struct outer_tag_config_struct *outer_tag_config;
        struct pf_start_ramrod_data *p_ramrod = NULL;
        u16 sb = qed_int_get_sp_sb_id(p_hwfn);
        u8 sb_index = p_hwfn->p_eq->eq_sb_index;
@@ -362,39 +337,40 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
        else
                p_ramrod->mf_mode = MF_NPAR;
 
-       p_ramrod->outer_tag_config.outer_tag.tci =
-                               cpu_to_le16(p_hwfn->hw_info.ovlan);
+       outer_tag_config = &p_ramrod->outer_tag_config;
+       outer_tag_config->outer_tag.tci = cpu_to_le16(p_hwfn->hw_info.ovlan);
+
        if (test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits)) {
-               p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q;
+               outer_tag_config->outer_tag.tpid = cpu_to_le16(ETH_P_8021Q);
        } else if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) {
-               p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD;
-               p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
+               outer_tag_config->outer_tag.tpid = cpu_to_le16(ETH_P_8021AD);
+               outer_tag_config->enable_stag_pri_change = 1;
        }
 
-       p_ramrod->outer_tag_config.pri_map_valid = 1;
+       outer_tag_config->pri_map_valid = 1;
        for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
-               p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i;
+               outer_tag_config->inner_to_outer_pri_map[i] = i;
 
        /* enable_stag_pri_change should be set if port is in BD mode or,
         * UFP with Host Control mode.
         */
        if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) {
                if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS)
-                       p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
+                       outer_tag_config->enable_stag_pri_change = 1;
                else
-                       p_ramrod->outer_tag_config.enable_stag_pri_change = 0;
+                       outer_tag_config->enable_stag_pri_change = 0;
 
-               p_ramrod->outer_tag_config.outer_tag.tci |=
+               outer_tag_config->outer_tag.tci |=
                    cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13));
        }
 
        /* Place EQ address in RAMROD */
        DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
-                      p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
+                      qed_chain_get_pbl_phys(&p_hwfn->p_eq->chain));
        page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
        p_ramrod->event_ring_num_pages = page_cnt;
        DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
-                      p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
+                      qed_chain_get_pbl_phys(&p_hwfn->p_consq->chain));
 
        qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
 
@@ -432,7 +408,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
 
        DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
                   "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n",
-                  sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tci);
+                  sb, sb_index, outer_tag_config->outer_tag.tci);
 
        rc = qed_spq_post(p_hwfn, p_ent, NULL);
 
index 790c28d..0bc1a0a 100644 (file)
@@ -1,33 +1,7 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #include <linux/types.h>
@@ -408,22 +382,26 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
 
 int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
 {
+       struct qed_chain_init_params params = {
+               .mode           = QED_CHAIN_MODE_PBL,
+               .intended_use   = QED_CHAIN_USE_TO_PRODUCE,
+               .cnt_type       = QED_CHAIN_CNT_TYPE_U16,
+               .num_elems      = num_elem,
+               .elem_size      = sizeof(union event_ring_element),
+       };
        struct qed_eq *p_eq;
+       int ret;
 
        /* Allocate EQ struct */
        p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
        if (!p_eq)
                return -ENOMEM;
 
-       /* Allocate and initialize EQ chain*/
-       if (qed_chain_alloc(p_hwfn->cdev,
-                           QED_CHAIN_USE_TO_PRODUCE,
-                           QED_CHAIN_MODE_PBL,
-                           QED_CHAIN_CNT_TYPE_U16,
-                           num_elem,
-                           sizeof(union event_ring_element),
-                           &p_eq->chain, NULL))
+       ret = qed_chain_alloc(p_hwfn->cdev, &p_eq->chain, &params);
+       if (ret) {
+               DP_NOTICE(p_hwfn, "Failed to allocate EQ chain\n");
                goto eq_allocate_fail;
+       }
 
        /* register EQ completion on the SP SB */
        qed_int_register_cb(p_hwfn, qed_eq_completion,
@@ -434,7 +412,8 @@ int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
 
 eq_allocate_fail:
        kfree(p_eq);
-       return -ENOMEM;
+
+       return ret;
 }
 
 void qed_eq_setup(struct qed_hwfn *p_hwfn)
@@ -555,33 +534,40 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn)
 
 int qed_spq_alloc(struct qed_hwfn *p_hwfn)
 {
+       struct qed_chain_init_params params = {
+               .mode           = QED_CHAIN_MODE_SINGLE,
+               .intended_use   = QED_CHAIN_USE_TO_PRODUCE,
+               .cnt_type       = QED_CHAIN_CNT_TYPE_U16,
+               .elem_size      = sizeof(struct slow_path_element),
+       };
+       struct qed_dev *cdev = p_hwfn->cdev;
        struct qed_spq_entry *p_virt = NULL;
        struct qed_spq *p_spq = NULL;
        dma_addr_t p_phys = 0;
        u32 capacity;
+       int ret;
 
        /* SPQ struct */
        p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
        if (!p_spq)
                return -ENOMEM;
 
-       /* SPQ ring  */
-       if (qed_chain_alloc(p_hwfn->cdev,
-                           QED_CHAIN_USE_TO_PRODUCE,
-                           QED_CHAIN_MODE_SINGLE,
-                           QED_CHAIN_CNT_TYPE_U16,
-                           0,   /* N/A when the mode is SINGLE */
-                           sizeof(struct slow_path_element),
-                           &p_spq->chain, NULL))
-               goto spq_allocate_fail;
+       /* SPQ ring */
+       ret = qed_chain_alloc(cdev, &p_spq->chain, &params);
+       if (ret) {
+               DP_NOTICE(p_hwfn, "Failed to allocate SPQ chain\n");
+               goto spq_chain_alloc_fail;
+       }
 
        /* allocate and fill the SPQ elements (incl. ramrod data list) */
        capacity = qed_chain_get_capacity(&p_spq->chain);
-       p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+       ret = -ENOMEM;
+
+       p_virt = dma_alloc_coherent(&cdev->pdev->dev,
                                    capacity * sizeof(struct qed_spq_entry),
                                    &p_phys, GFP_KERNEL);
        if (!p_virt)
-               goto spq_allocate_fail;
+               goto spq_alloc_fail;
 
        p_spq->p_virt = p_virt;
        p_spq->p_phys = p_phys;
@@ -589,10 +575,12 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
 
        return 0;
 
-spq_allocate_fail:
-       qed_chain_free(p_hwfn->cdev, &p_spq->chain);
+spq_alloc_fail:
+       qed_chain_free(cdev, &p_spq->chain);
+spq_chain_alloc_fail:
        kfree(p_spq);
-       return -ENOMEM;
+
+       return ret;
 }
 
 void qed_spq_free(struct qed_hwfn *p_hwfn)
@@ -668,18 +656,18 @@ void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
 }
 
 /**
- * @brief qed_spq_add_entry - adds a new entry to the pending
- *        list. Should be used while lock is being held.
+ * qed_spq_add_entry() - Add a new entry to the pending list.
+ *                       Should be used while lock is being held.
  *
- * Addes an entry to the pending list is there is room (en empty
+ * @p_hwfn: HW device data.
+ * @p_ent: An entry to add.
+ * @priority: Desired priority.
+ *
+ * Adds an entry to the pending list is there is room (an empty
  * element is available in the free_pool), or else places the
  * entry in the unlimited_pending pool.
  *
- * @param p_hwfn
- * @param p_ent
- * @param priority
- *
- * @return int
+ * Return: zero on success, -EINVAL on invalid @priority.
  */
 static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
                             struct qed_spq_entry *p_ent,
@@ -993,30 +981,40 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
        return 0;
 }
 
+#define QED_SPQ_CONSQ_ELEM_SIZE                0x80
+
 int qed_consq_alloc(struct qed_hwfn *p_hwfn)
 {
+       struct qed_chain_init_params params = {
+               .mode           = QED_CHAIN_MODE_PBL,
+               .intended_use   = QED_CHAIN_USE_TO_PRODUCE,
+               .cnt_type       = QED_CHAIN_CNT_TYPE_U16,
+               .num_elems      = QED_CHAIN_PAGE_SIZE / QED_SPQ_CONSQ_ELEM_SIZE,
+               .elem_size      = QED_SPQ_CONSQ_ELEM_SIZE,
+       };
        struct qed_consq *p_consq;
+       int ret;
 
        /* Allocate ConsQ struct */
        p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
        if (!p_consq)
                return -ENOMEM;
 
-       /* Allocate and initialize EQ chain*/
-       if (qed_chain_alloc(p_hwfn->cdev,
-                           QED_CHAIN_USE_TO_PRODUCE,
-                           QED_CHAIN_MODE_PBL,
-                           QED_CHAIN_CNT_TYPE_U16,
-                           QED_CHAIN_PAGE_SIZE / 0x80,
-                           0x80, &p_consq->chain, NULL))
-               goto consq_allocate_fail;
+       /* Allocate and initialize ConsQ chain */
+       ret = qed_chain_alloc(p_hwfn->cdev, &p_consq->chain, &params);
+       if (ret) {
+               DP_NOTICE(p_hwfn, "Failed to allocate ConsQ chain");
+               goto consq_alloc_fail;
+       }
 
        p_hwfn->p_consq = p_consq;
+
        return 0;
 
-consq_allocate_fail:
+consq_alloc_fail:
        kfree(p_consq);
-       return -ENOMEM;
+
+       return ret;
 }
 
 void qed_consq_setup(struct qed_hwfn *p_hwfn)
index 20679fd..aa215ee 100644 (file)
@@ -1,33 +1,7 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #include <linux/etherdevice.h>
@@ -849,16 +823,17 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
 }
 
 /**
- * @brief qed_iov_config_perm_table - configure the permission
- *      zone table.
- *      In E4, queue zone permission table size is 320x9. There
- *      are 320 VF queues for single engine device (256 for dual
- *      engine device), and each entry has the following format:
- *      {Valid, VF[7:0]}
- * @param p_hwfn
- * @param p_ptt
- * @param vf
- * @param enable
+ * qed_iov_config_perm_table() - Configure the permission zone table.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT window for writing the registers.
+ * @vf: VF info data.
+ * @enable: The actual permision for this VF.
+ *
+ * In E4, queue zone permission table size is 320x9. There
+ * are 320 VF queues for single engine device (256 for dual
+ * engine device), and each entry has the following format:
+ * {Valid, VF[7:0]}
  */
 static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
                                      struct qed_ptt *p_ptt,
@@ -4030,7 +4005,7 @@ static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
        /* List the physical address of the request so that handler
         * could later on copy the message from it.
         */
-       p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
+       p_vf->vf_mbx.pending_req = HILO_64(vf_msg->hi, vf_msg->lo);
 
        /* Mark the event and schedule the workqueue */
        p_vf->vf_mbx.b_pending_msg = true;
@@ -4062,9 +4037,7 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
        }
 }
 
-static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
-                              u8 opcode,
-                              __le16 echo,
+static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
                               union event_ring_data *data, u8 fw_return_code)
 {
        switch (opcode) {
index 368e885..eacd645 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef _QED_SRIOV_H
@@ -272,6 +246,8 @@ enum qed_iov_wq_flag {
        QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG,
 };
 
+extern const struct qed_iov_hv_ops qed_iov_ops_pass;
+
 #ifdef CONFIG_QED_SRIOV
 /**
  * @brief Check if given VF ID @vfid is valid
index 856051f..72a38d5 100644 (file)
@@ -1,33 +1,7 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #include <linux/crc32.h>
@@ -81,12 +55,17 @@ static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status)
        mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
 }
 
+#define QED_VF_CHANNEL_USLEEP_ITERATIONS       90
+#define QED_VF_CHANNEL_USLEEP_DELAY            100
+#define QED_VF_CHANNEL_MSLEEP_ITERATIONS       10
+#define QED_VF_CHANNEL_MSLEEP_DELAY            25
+
 static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
 {
        union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
        struct ustorm_trigger_vf_zone trigger;
        struct ustorm_vf_zone *zone_data;
-       int rc = 0, time = 100;
+       int iter, rc = 0;
 
        zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
 
@@ -126,11 +105,19 @@ static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
        REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
 
        /* When PF would be done with the response, it would write back to the
-        * `done' address. Poll until then.
+        * `done' address from a coherent DMA zone. Poll until then.
         */
-       while ((!*done) && time) {
-               msleep(25);
-               time--;
+
+       iter = QED_VF_CHANNEL_USLEEP_ITERATIONS;
+       while (!*done && iter--) {
+               udelay(QED_VF_CHANNEL_USLEEP_DELAY);
+               dma_rmb();
+       }
+
+       iter = QED_VF_CHANNEL_MSLEEP_ITERATIONS;
+       while (!*done && iter--) {
+               msleep(QED_VF_CHANNEL_MSLEEP_DELAY);
+               dma_rmb();
        }
 
        if (!*done) {
index 033409d..60d2bb6 100644 (file)
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
  */
 
 #ifndef _QED_VF_H
index 3fc91d1..a6e8d9f 100644 (file)
@@ -1,4 +1,6 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+# Copyright (c) 2019-2020 Marvell International Ltd.
+
 obj-$(CONFIG_QEDE) := qede.o
 
 qede-y := qede_main.o qede_fp.o qede_filter.o qede_ethtool.o qede_ptp.o
index 8857da1..803c1fc 100644 (file)
@@ -1,34 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qede NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
+
 #ifndef _QEDE_H_
 #define _QEDE_H_
 #include <linux/compiler.h>
@@ -201,16 +176,17 @@ struct qede_dev {
        u32                             dp_module;
        u8                              dp_level;
 
-       unsigned long flags;
-#define IS_VF(edev)    (test_bit(QEDE_FLAGS_IS_VF, &(edev)->flags))
+       unsigned long                   flags;
+#define IS_VF(edev)                    test_bit(QEDE_FLAGS_IS_VF, \
+                                                &(edev)->flags)
 
        const struct qed_eth_ops        *ops;
        struct qede_ptp                 *ptp;
        u64                             ptp_skip_txts;
 
-       struct qed_dev_eth_info dev_info;
-#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
-#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues)
+       struct qed_dev_eth_info         dev_info;
+#define QEDE_MAX_RSS_CNT(edev)         ((edev)->dev_info.num_queues)
+#define QEDE_MAX_TSS_CNT(edev)         ((edev)->dev_info.num_queues)
 #define QEDE_IS_BB(edev) \
        ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_BB)
 #define QEDE_IS_AH(edev) \
@@ -223,14 +199,16 @@ struct qede_dev {
        u8                              fp_num_rx;
        u16                             req_queues;
        u16                             num_queues;
-#define QEDE_QUEUE_CNT(edev)   ((edev)->num_queues)
-#define QEDE_RSS_COUNT(edev)   ((edev)->num_queues - (edev)->fp_num_tx)
+       u16                             total_xdp_queues;
+
+#define QEDE_QUEUE_CNT(edev)           ((edev)->num_queues)
+#define QEDE_RSS_COUNT(edev)           ((edev)->num_queues - (edev)->fp_num_tx)
 #define QEDE_RX_QUEUE_IDX(edev, i)     (i)
-#define QEDE_TSS_COUNT(edev)   ((edev)->num_queues - (edev)->fp_num_rx)
+#define QEDE_TSS_COUNT(edev)           ((edev)->num_queues - (edev)->fp_num_rx)
 
        struct qed_int_info             int_info;
 
-       /* Smaller private varaiant of the RTNL lock */
+       /* Smaller private variant of the RTNL lock */
        struct mutex                    qede_lock;
        u32                             state; /* Protected by qede_lock */
        u16                             rx_buf_size;
@@ -251,22 +229,28 @@ struct qede_dev {
              SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 
        struct qede_stats               stats;
-#define QEDE_RSS_INDIR_INITED  BIT(0)
-#define QEDE_RSS_KEY_INITED    BIT(1)
-#define QEDE_RSS_CAPS_INITED   BIT(2)
-       u32 rss_params_inited; /* bit-field to track initialized rss params */
-       u16 rss_ind_table[128];
-       u32 rss_key[10];
-       u8 rss_caps;
-
-       u16                     q_num_rx_buffers; /* Must be a power of two */
-       u16                     q_num_tx_buffers; /* Must be a power of two */
-
-       bool gro_disable;
-       struct list_head vlan_list;
-       u16 configured_vlans;
-       u16 non_configured_vlans;
-       bool accept_any_vlan;
+
+       /* Bitfield to track initialized RSS params */
+       u32                             rss_params_inited;
+#define QEDE_RSS_INDIR_INITED          BIT(0)
+#define QEDE_RSS_KEY_INITED            BIT(1)
+#define QEDE_RSS_CAPS_INITED           BIT(2)
+
+       u16                             rss_ind_table[128];
+       u32                             rss_key[10];
+       u8                              rss_caps;
+
+       /* Both must be a power of two */
+       u16                             q_num_rx_buffers;
+       u16                             q_num_tx_buffers;
+
+       bool                            gro_disable;
+
+       struct list_head                vlan_list;
+       u16                             configured_vlans;
+       u16                             non_configured_vlans;
+       bool                            accept_any_vlan;
+
        struct delayed_work             sp_task;
        unsigned long                   sp_flags;
        u16                             vxlan_dst_port;
@@ -277,14 +261,14 @@ struct qede_dev {
 
        struct qede_rdma_dev            rdma_info;
 
-       struct bpf_prog *xdp_prog;
+       struct bpf_prog                 *xdp_prog;
 
-       unsigned long err_flags;
-#define QEDE_ERR_IS_HANDLED    31
-#define QEDE_ERR_ATTN_CLR_EN   0
-#define QEDE_ERR_GET_DBG_INFO  1
-#define QEDE_ERR_IS_RECOVERABLE        2
-#define QEDE_ERR_WARN          3
+       unsigned long                   err_flags;
+#define QEDE_ERR_IS_HANDLED            31
+#define QEDE_ERR_ATTN_CLR_EN           0
+#define QEDE_ERR_GET_DBG_INFO          1
+#define QEDE_ERR_IS_RECOVERABLE                2
+#define QEDE_ERR_WARN                  3
 
        struct qede_dump_info           dump_info;
 };
@@ -397,29 +381,34 @@ struct sw_tx_bd {
 };
 
 struct sw_tx_xdp {
-       struct page *page;
-       dma_addr_t mapping;
+       struct page                     *page;
+       struct xdp_frame                *xdpf;
+       dma_addr_t                      mapping;
 };
 
 struct qede_tx_queue {
-       u8 is_xdp;
-       bool is_legacy;
-       u16 sw_tx_cons;
-       u16 sw_tx_prod;
-       u16 num_tx_buffers; /* Slowpath only */
+       u8                              is_xdp;
+       bool                            is_legacy;
+       u16                             sw_tx_cons;
+       u16                             sw_tx_prod;
+       u16                             num_tx_buffers; /* Slowpath only */
 
-       u64 xmit_pkts;
-       u64 stopped_cnt;
-       u64 tx_mem_alloc_err;
+       u64                             xmit_pkts;
+       u64                             stopped_cnt;
+       u64                             tx_mem_alloc_err;
 
-       __le16 *hw_cons_ptr;
+       __le16                          *hw_cons_ptr;
 
        /* Needed for the mapping of packets */
-       struct device *dev;
+       struct device                   *dev;
+
+       void __iomem                    *doorbell_addr;
+       union db_prod                   tx_db;
+
+       /* Spinlock for XDP queues in case of XDP_REDIRECT */
+       spinlock_t                      xdp_tx_lock;
 
-       void __iomem *doorbell_addr;
-       union db_prod tx_db;
-       int index; /* Slowpath only */
+       int                             index; /* Slowpath only */
 #define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \
                                         QEDE_MAX_TSS_CNT(edev))
 #define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev))
@@ -431,22 +420,22 @@ struct qede_tx_queue {
 #define QEDE_NDEV_TXQ_ID_TO_TXQ(edev, idx)     \
        (&((edev)->fp_array[QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx)].txq \
        [QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx)]))
-#define QEDE_FP_TC0_TXQ(fp)    (&((fp)->txq[0]))
+#define QEDE_FP_TC0_TXQ(fp)            (&((fp)->txq[0]))
 
        /* Regular Tx requires skb + metadata for release purpose,
         * while XDP requires the pages and the mapped address.
         */
        union {
-               struct sw_tx_bd *skbs;
-               struct sw_tx_xdp *xdp;
-       } sw_tx_ring;
+               struct sw_tx_bd         *skbs;
+               struct sw_tx_xdp        *xdp;
+       }                               sw_tx_ring;
 
-       struct qed_chain tx_pbl;
+       struct qed_chain                tx_pbl;
 
        /* Slowpath; Should be kept in end [unless missing padding] */
-       void *handle;
-       u16 cos;
-       u16 ndev_txq_id;
+       void                            *handle;
+       u16                             cos;
+       u16                             ndev_txq_id;
 };
 
 #define BD_UNMAP_ADDR(bd)              HILO_U64(le32_to_cpu((bd)->addr.hi), \
@@ -460,32 +449,37 @@ struct qede_tx_queue {
 #define BD_UNMAP_LEN(bd)               (le16_to_cpu((bd)->nbytes))
 
 struct qede_fastpath {
-       struct qede_dev *edev;
-#define QEDE_FASTPATH_TX       BIT(0)
-#define QEDE_FASTPATH_RX       BIT(1)
-#define QEDE_FASTPATH_XDP      BIT(2)
-#define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX)
-       u8                      type;
-       u8                      id;
-       u8                      xdp_xmit;
-       struct napi_struct      napi;
-       struct qed_sb_info      *sb_info;
-       struct qede_rx_queue    *rxq;
-       struct qede_tx_queue    *txq;
-       struct qede_tx_queue    *xdp_tx;
-
-#define VEC_NAME_SIZE  (sizeof_field(struct net_device, name) + 8)
-       char    name[VEC_NAME_SIZE];
+       struct qede_dev                 *edev;
+
+       u8                              type;
+#define QEDE_FASTPATH_TX               BIT(0)
+#define QEDE_FASTPATH_RX               BIT(1)
+#define QEDE_FASTPATH_XDP              BIT(2)
+#define QEDE_FASTPATH_COMBINED         (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX)
+
+       u8                              id;
+
+       u8                              xdp_xmit;
+#define QEDE_XDP_TX                    BIT(0)
+#define QEDE_XDP_REDIRECT              BIT(1)
+
+       struct napi_struct              napi;
+       struct qed_sb_info              *sb_info;
+       struct qede_rx_queue            *rxq;
+       struct qede_tx_queue            *txq;
+       struct qede_tx_queue            *xdp_tx;
+
+       char                            name[IFNAMSIZ + 8];
 };
 
 /* Debug print definitions */
-#define DP_NAME(edev) ((edev)->ndev->name)
+#define DP_NAME(edev)                  netdev_name((edev)->ndev)
 
-#define XMIT_PLAIN             0
-#define XMIT_L4_CSUM           BIT(0)
-#define XMIT_LSO               BIT(1)
-#define XMIT_ENC               BIT(2)
-#define XMIT_ENC_GSO_L4_CSUM   BIT(3)
+#define XMIT_PLAIN                     0
+#define XMIT_L4_CSUM                   BIT(0)
+#define XMIT_LSO                       BIT(1)
+#define XMIT_ENC                       BIT(2)
+#define XMIT_ENC_GSO_L4_CSUM           BIT(3)
 
 #define QEDE_CSUM_ERROR                        BIT(0)
 #define QEDE_CSUM_UNNECESSARY          BIT(1)
@@ -528,6 +522,8 @@ struct qede_reload_args {
 
 /* Datapath functions definition */
 netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+int qede_xdp_transmit(struct net_device *dev, int n_frames,
+                     struct xdp_frame **frames, u32 flags);
 u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
                      struct net_device *sb_dev);
 netdev_features_t qede_features_check(struct sk_buff *skb,
@@ -568,6 +564,7 @@ void qede_set_dcbnl_ops(struct net_device *ndev);
 
 void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
 void qede_set_ethtool_ops(struct net_device *netdev);
+void qede_set_udp_tunnels(struct qede_dev *edev);
 void qede_reload(struct qede_dev *edev,
                 struct qede_reload_args *args, bool is_locked);
 int qede_change_mtu(struct net_device *dev, int new_mtu);
@@ -581,6 +578,8 @@ void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
 int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
                            struct flow_cls_offload *f);
 
+void qede_forced_speed_maps_init(void);
+
 #define RX_RING_SIZE_POW       13
 #define RX_RING_SIZE           ((u16)BIT(RX_RING_SIZE_POW))
 #define NUM_RX_BDS_MAX         (RX_RING_SIZE - 1)
index e6e844a..2763369 100644 (file)
@@ -1,7 +1,8 @@
-// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qede NIC Driver
-* Copyright (c) 2015 QLogic Corporation
-*/
+ * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
 
 #include <linux/types.h>
 #include <linux/netdevice.h>
index 24cc683..b9aa638 100644 (file)
@@ -1,34 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qede NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
+
 #include <linux/version.h>
 #include <linux/types.h>
 #include <linux/netdevice.h>
@@ -38,6 +13,8 @@
 #include <linux/pci.h>
 #include <linux/capability.h>
 #include <linux/vmalloc.h>
+#include <linux/phylink.h>
+
 #include "qede.h"
 #include "qede_ptp.h"
 
@@ -219,6 +196,96 @@ static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = {
        "Nvram (online)\t\t",
 };
 
+/* Forced speed capabilities maps */
+
+struct qede_forced_speed_map {
+       u32             speed;
+       __ETHTOOL_DECLARE_LINK_MODE_MASK(caps);
+
+       const u32       *cap_arr;
+       u32             arr_size;
+};
+
+#define QEDE_FORCED_SPEED_MAP(value)                                   \
+{                                                                      \
+       .speed          = SPEED_##value,                                \
+       .cap_arr        = qede_forced_speed_##value,                    \
+       .arr_size       = ARRAY_SIZE(qede_forced_speed_##value),        \
+}
+
+static const u32 qede_forced_speed_1000[] __initconst = {
+       ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+       ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+       ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+};
+
+static const u32 qede_forced_speed_10000[] __initconst = {
+       ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+       ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+       ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+       ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+       ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+       ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+       ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+       ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
+};
+
+static const u32 qede_forced_speed_20000[] __initconst = {
+       ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
+};
+
+static const u32 qede_forced_speed_25000[] __initconst = {
+       ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+       ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+       ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+};
+
+static const u32 qede_forced_speed_40000[] __initconst = {
+       ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+       ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+       ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+       ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+};
+
+static const u32 qede_forced_speed_50000[] __initconst = {
+       ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+       ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+       ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+};
+
+static const u32 qede_forced_speed_100000[] __initconst = {
+       ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+       ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+       ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+       ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+};
+
+static struct qede_forced_speed_map qede_forced_speed_maps[] __ro_after_init = {
+       QEDE_FORCED_SPEED_MAP(1000),
+       QEDE_FORCED_SPEED_MAP(10000),
+       QEDE_FORCED_SPEED_MAP(20000),
+       QEDE_FORCED_SPEED_MAP(25000),
+       QEDE_FORCED_SPEED_MAP(40000),
+       QEDE_FORCED_SPEED_MAP(50000),
+       QEDE_FORCED_SPEED_MAP(100000),
+};
+
+void __init qede_forced_speed_maps_init(void)
+{
+       struct qede_forced_speed_map *map;
+       u32 i;
+
+       for (i = 0; i < ARRAY_SIZE(qede_forced_speed_maps); i++) {
+               map = qede_forced_speed_maps + i;
+
+               linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps);
+               map->cap_arr = NULL;
+               map->arr_size = 0;
+       }
+}
+
+/* Ethtool callbacks */
+
 static void qede_get_strings_stats_txq(struct qede_dev *edev,
                                       struct qede_tx_queue *txq, u8 **buf)
 {
@@ -443,76 +510,10 @@ static int qede_set_priv_flags(struct net_device *dev, u32 flags)
        return 0;
 }
 
-struct qede_link_mode_mapping {
-       u32 qed_link_mode;
-       u32 ethtool_link_mode;
-};
-
-static const struct qede_link_mode_mapping qed_lm_map[] = {
-       {QED_LM_FIBRE_BIT, ETHTOOL_LINK_MODE_FIBRE_BIT},
-       {QED_LM_Autoneg_BIT, ETHTOOL_LINK_MODE_Autoneg_BIT},
-       {QED_LM_Asym_Pause_BIT, ETHTOOL_LINK_MODE_Asym_Pause_BIT},
-       {QED_LM_Pause_BIT, ETHTOOL_LINK_MODE_Pause_BIT},
-       {QED_LM_1000baseT_Full_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT},
-       {QED_LM_10000baseT_Full_BIT, ETHTOOL_LINK_MODE_10000baseT_Full_BIT},
-       {QED_LM_TP_BIT, ETHTOOL_LINK_MODE_TP_BIT},
-       {QED_LM_Backplane_BIT, ETHTOOL_LINK_MODE_Backplane_BIT},
-       {QED_LM_1000baseKX_Full_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT},
-       {QED_LM_10000baseKX4_Full_BIT, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT},
-       {QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
-       {QED_LM_10000baseKR_Full_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
-       {QED_LM_10000baseR_FEC_BIT, ETHTOOL_LINK_MODE_10000baseR_FEC_BIT},
-       {QED_LM_20000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT},
-       {QED_LM_40000baseKR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT},
-       {QED_LM_40000baseCR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT},
-       {QED_LM_40000baseSR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT},
-       {QED_LM_40000baseLR4_Full_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT},
-       {QED_LM_25000baseCR_Full_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT},
-       {QED_LM_25000baseKR_Full_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT},
-       {QED_LM_25000baseSR_Full_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT},
-       {QED_LM_50000baseCR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT},
-       {QED_LM_50000baseKR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT},
-       {QED_LM_100000baseKR4_Full_BIT,
-               ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT},
-       {QED_LM_100000baseSR4_Full_BIT,
-               ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT},
-       {QED_LM_100000baseCR4_Full_BIT,
-               ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT},
-       {QED_LM_100000baseLR4_ER4_Full_BIT,
-               ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT},
-       {QED_LM_50000baseSR2_Full_BIT, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT},
-       {QED_LM_1000baseX_Full_BIT, ETHTOOL_LINK_MODE_1000baseX_Full_BIT},
-       {QED_LM_10000baseCR_Full_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT},
-       {QED_LM_10000baseSR_Full_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT},
-       {QED_LM_10000baseLR_Full_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT},
-       {QED_LM_10000baseLRM_Full_BIT, ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT},
-};
-
-#define QEDE_DRV_TO_ETHTOOL_CAPS(caps, lk_ksettings, name)     \
-{                                                              \
-       int i;                                                  \
-                                                               \
-       for (i = 0; i < ARRAY_SIZE(qed_lm_map); i++) {          \
-               if ((caps) & (qed_lm_map[i].qed_link_mode))     \
-                       __set_bit(qed_lm_map[i].ethtool_link_mode,\
-                                 lk_ksettings->link_modes.name); \
-       }                                                       \
-}
-
-#define QEDE_ETHTOOL_TO_DRV_CAPS(caps, lk_ksettings, name)     \
-{                                                              \
-       int i;                                                  \
-                                                               \
-       for (i = 0; i < ARRAY_SIZE(qed_lm_map); i++) {          \
-               if (test_bit(qed_lm_map[i].ethtool_link_mode,   \
-                            lk_ksettings->link_modes.name))    \
-                       caps |= qed_lm_map[i].qed_link_mode;    \
-       }                                                       \
-}
-
 static int qede_get_link_ksettings(struct net_device *dev,
                                   struct ethtool_link_ksettings *cmd)
 {
+       typeof(cmd->link_modes) *link_modes = &cmd->link_modes;
        struct ethtool_link_settings *base = &cmd->base;
        struct qede_dev *edev = netdev_priv(dev);
        struct qed_link_output current_link;
@@ -522,14 +523,9 @@ static int qede_get_link_ksettings(struct net_device *dev,
        memset(&current_link, 0, sizeof(current_link));
        edev->ops->common->get_link(edev->cdev, &current_link);
 
-       ethtool_link_ksettings_zero_link_mode(cmd, supported);
-       QEDE_DRV_TO_ETHTOOL_CAPS(current_link.supported_caps, cmd, supported)
-
-       ethtool_link_ksettings_zero_link_mode(cmd, advertising);
-       QEDE_DRV_TO_ETHTOOL_CAPS(current_link.advertised_caps, cmd, advertising)
-
-       ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising);
-       QEDE_DRV_TO_ETHTOOL_CAPS(current_link.lp_caps, cmd, lp_advertising)
+       linkmode_copy(link_modes->supported, current_link.supported_caps);
+       linkmode_copy(link_modes->advertising, current_link.advertised_caps);
+       linkmode_copy(link_modes->lp_advertising, current_link.lp_caps);
 
        if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) {
                base->speed = current_link.speed;
@@ -553,9 +549,10 @@ static int qede_set_link_ksettings(struct net_device *dev,
 {
        const struct ethtool_link_settings *base = &cmd->base;
        struct qede_dev *edev = netdev_priv(dev);
+       const struct qede_forced_speed_map *map;
        struct qed_link_output current_link;
        struct qed_link_params params;
-       u32 sup_caps;
+       u32 i;
 
        if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
                DP_INFO(edev, "Link settings are not allowed to be changed\n");
@@ -567,107 +564,40 @@ static int qede_set_link_ksettings(struct net_device *dev,
 
        params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS;
        params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG;
+
        if (base->autoneg == AUTONEG_ENABLE) {
-               if (!(current_link.supported_caps & QED_LM_Autoneg_BIT)) {
+               if (!phylink_test(current_link.supported_caps, Autoneg)) {
                        DP_INFO(edev, "Auto negotiation is not supported\n");
                        return -EOPNOTSUPP;
                }
 
                params.autoneg = true;
                params.forced_speed = 0;
-               QEDE_ETHTOOL_TO_DRV_CAPS(params.adv_speeds, cmd, advertising)
+
+               linkmode_copy(params.adv_speeds, cmd->link_modes.advertising);
        } else {                /* forced speed */
                params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED;
                params.autoneg = false;
                params.forced_speed = base->speed;
-               switch (base->speed) {
-               case SPEED_1000:
-                       sup_caps = QED_LM_1000baseT_Full_BIT |
-                                       QED_LM_1000baseKX_Full_BIT |
-                                       QED_LM_1000baseX_Full_BIT;
-                       if (!(current_link.supported_caps & sup_caps)) {
-                               DP_INFO(edev, "1G speed not supported\n");
-                               return -EINVAL;
-                       }
-                       params.adv_speeds = current_link.supported_caps &
-                                               sup_caps;
-                       break;
-               case SPEED_10000:
-                       sup_caps = QED_LM_10000baseT_Full_BIT |
-                                       QED_LM_10000baseKR_Full_BIT |
-                                       QED_LM_10000baseKX4_Full_BIT |
-                                       QED_LM_10000baseR_FEC_BIT |
-                                       QED_LM_10000baseCR_Full_BIT |
-                                       QED_LM_10000baseSR_Full_BIT |
-                                       QED_LM_10000baseLR_Full_BIT |
-                                       QED_LM_10000baseLRM_Full_BIT;
-                       if (!(current_link.supported_caps & sup_caps)) {
-                               DP_INFO(edev, "10G speed not supported\n");
-                               return -EINVAL;
-                       }
-                       params.adv_speeds = current_link.supported_caps &
-                                               sup_caps;
-                       break;
-               case SPEED_20000:
-                       if (!(current_link.supported_caps &
-                           QED_LM_20000baseKR2_Full_BIT)) {
-                               DP_INFO(edev, "20G speed not supported\n");
-                               return -EINVAL;
-                       }
-                       params.adv_speeds = QED_LM_20000baseKR2_Full_BIT;
-                       break;
-               case SPEED_25000:
-                       sup_caps = QED_LM_25000baseKR_Full_BIT |
-                                       QED_LM_25000baseCR_Full_BIT |
-                                       QED_LM_25000baseSR_Full_BIT;
-                       if (!(current_link.supported_caps & sup_caps)) {
-                               DP_INFO(edev, "25G speed not supported\n");
-                               return -EINVAL;
-                       }
-                       params.adv_speeds = current_link.supported_caps &
-                                               sup_caps;
-                       break;
-               case SPEED_40000:
-                       sup_caps = QED_LM_40000baseLR4_Full_BIT |
-                                       QED_LM_40000baseKR4_Full_BIT |
-                                       QED_LM_40000baseCR4_Full_BIT |
-                                       QED_LM_40000baseSR4_Full_BIT;
-                       if (!(current_link.supported_caps & sup_caps)) {
-                               DP_INFO(edev, "40G speed not supported\n");
-                               return -EINVAL;
-                       }
-                       params.adv_speeds = current_link.supported_caps &
-                                               sup_caps;
-                       break;
-               case SPEED_50000:
-                       sup_caps = QED_LM_50000baseKR2_Full_BIT |
-                                       QED_LM_50000baseCR2_Full_BIT |
-                                       QED_LM_50000baseSR2_Full_BIT;
-                       if (!(current_link.supported_caps & sup_caps)) {
-                               DP_INFO(edev, "50G speed not supported\n");
-                               return -EINVAL;
-                       }
-                       params.adv_speeds = current_link.supported_caps &
-                                               sup_caps;
-                       break;
-               case SPEED_100000:
-                       sup_caps = QED_LM_100000baseKR4_Full_BIT |
-                                       QED_LM_100000baseSR4_Full_BIT |
-                                       QED_LM_100000baseCR4_Full_BIT |
-                                       QED_LM_100000baseLR4_ER4_Full_BIT;
-                       if (!(current_link.supported_caps & sup_caps)) {
-                               DP_INFO(edev, "100G speed not supported\n");
-                               return -EINVAL;
-                       }
-                       params.adv_speeds = current_link.supported_caps &
-                                               sup_caps;
-                       break;
-               default:
-                       DP_INFO(edev, "Unsupported speed %u\n", base->speed);
-                       return -EINVAL;
+
+               for (i = 0; i < ARRAY_SIZE(qede_forced_speed_maps); i++) {
+                       map = qede_forced_speed_maps + i;
+
+                       if (base->speed != map->speed ||
+                           !linkmode_intersects(current_link.supported_caps,
+                                                map->caps))
+                               continue;
+
+                       linkmode_and(params.adv_speeds,
+                                    current_link.supported_caps, map->caps);
+                       goto set_link;
                }
+
+               DP_INFO(edev, "Unsupported speed %u\n", base->speed);
+               return -EINVAL;
        }
 
+set_link:
        params.link_up = true;
        edev->ops->common->set_link(edev->cdev, &params);
 
@@ -1031,13 +961,16 @@ static int qede_set_pauseparam(struct net_device *dev,
 
        memset(&params, 0, sizeof(params));
        params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
+
        if (epause->autoneg) {
-               if (!(current_link.supported_caps & QED_LM_Autoneg_BIT)) {
+               if (!phylink_test(current_link.supported_caps, Autoneg)) {
                        DP_INFO(edev, "autoneg not supported\n");
                        return -EINVAL;
                }
+
                params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
        }
+
        if (epause->rx_pause)
                params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
        if (epause->tx_pause)
@@ -1901,6 +1834,78 @@ static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata)
        return 0;
 }
 
+static u32 qede_link_to_ethtool_fec(u32 link_fec)
+{
+       u32 eth_fec = 0;
+
+       if (link_fec & QED_FEC_MODE_NONE)
+               eth_fec |= ETHTOOL_FEC_OFF;
+       if (link_fec & QED_FEC_MODE_FIRECODE)
+               eth_fec |= ETHTOOL_FEC_BASER;
+       if (link_fec & QED_FEC_MODE_RS)
+               eth_fec |= ETHTOOL_FEC_RS;
+       if (link_fec & QED_FEC_MODE_AUTO)
+               eth_fec |= ETHTOOL_FEC_AUTO;
+       if (link_fec & QED_FEC_MODE_UNSUPPORTED)
+               eth_fec |= ETHTOOL_FEC_NONE;
+
+       return eth_fec;
+}
+
+static u32 qede_ethtool_to_link_fec(u32 eth_fec)
+{
+       u32 link_fec = 0;
+
+       if (eth_fec & ETHTOOL_FEC_OFF)
+               link_fec |= QED_FEC_MODE_NONE;
+       if (eth_fec & ETHTOOL_FEC_BASER)
+               link_fec |= QED_FEC_MODE_FIRECODE;
+       if (eth_fec & ETHTOOL_FEC_RS)
+               link_fec |= QED_FEC_MODE_RS;
+       if (eth_fec & ETHTOOL_FEC_AUTO)
+               link_fec |= QED_FEC_MODE_AUTO;
+       if (eth_fec & ETHTOOL_FEC_NONE)
+               link_fec |= QED_FEC_MODE_UNSUPPORTED;
+
+       return link_fec;
+}
+
+static int qede_get_fecparam(struct net_device *dev,
+                            struct ethtool_fecparam *fecparam)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       struct qed_link_output curr_link;
+
+       memset(&curr_link, 0, sizeof(curr_link));
+       edev->ops->common->get_link(edev->cdev, &curr_link);
+
+       fecparam->active_fec = qede_link_to_ethtool_fec(curr_link.active_fec);
+       fecparam->fec = qede_link_to_ethtool_fec(curr_link.sup_fec);
+
+       return 0;
+}
+
+static int qede_set_fecparam(struct net_device *dev,
+                            struct ethtool_fecparam *fecparam)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       struct qed_link_params params;
+
+       if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
+               DP_INFO(edev, "Link settings are not allowed to be changed\n");
+               return -EOPNOTSUPP;
+       }
+
+       memset(&params, 0, sizeof(params));
+       params.override_flags |= QED_LINK_OVERRIDE_FEC_CONFIG;
+       params.fec = qede_ethtool_to_link_fec(fecparam->fec);
+       params.link_up = true;
+
+       edev->ops->common->set_link(edev->cdev, &params);
+
+       return 0;
+}
+
 static int qede_get_module_info(struct net_device *dev,
                                struct ethtool_modinfo *modinfo)
 {
@@ -2099,78 +2104,79 @@ err:
 }
 
 static const struct ethtool_ops qede_ethtool_ops = {
-       .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
-       .get_link_ksettings = qede_get_link_ksettings,
-       .set_link_ksettings = qede_set_link_ksettings,
-       .get_drvinfo = qede_get_drvinfo,
-       .get_regs_len = qede_get_regs_len,
-       .get_regs = qede_get_regs,
-       .get_wol = qede_get_wol,
-       .set_wol = qede_set_wol,
-       .get_msglevel = qede_get_msglevel,
-       .set_msglevel = qede_set_msglevel,
-       .nway_reset = qede_nway_reset,
-       .get_link = qede_get_link,
-       .get_coalesce = qede_get_coalesce,
-       .set_coalesce = qede_set_coalesce,
-       .get_ringparam = qede_get_ringparam,
-       .set_ringparam = qede_set_ringparam,
-       .get_pauseparam = qede_get_pauseparam,
-       .set_pauseparam = qede_set_pauseparam,
-       .get_strings = qede_get_strings,
-       .set_phys_id = qede_set_phys_id,
-       .get_ethtool_stats = qede_get_ethtool_stats,
-       .get_priv_flags = qede_get_priv_flags,
-       .set_priv_flags = qede_set_priv_flags,
-       .get_sset_count = qede_get_sset_count,
-       .get_rxnfc = qede_get_rxnfc,
-       .set_rxnfc = qede_set_rxnfc,
-       .get_rxfh_indir_size = qede_get_rxfh_indir_size,
-       .get_rxfh_key_size = qede_get_rxfh_key_size,
-       .get_rxfh = qede_get_rxfh,
-       .set_rxfh = qede_set_rxfh,
-       .get_ts_info = qede_get_ts_info,
-       .get_channels = qede_get_channels,
-       .set_channels = qede_set_channels,
-       .self_test = qede_self_test,
-       .get_module_info = qede_get_module_info,
-       .get_module_eeprom = qede_get_module_eeprom,
-       .get_eee = qede_get_eee,
-       .set_eee = qede_set_eee,
-
-       .get_tunable = qede_get_tunable,
-       .set_tunable = qede_set_tunable,
-       .flash_device = qede_flash_device,
-       .get_dump_flag = qede_get_dump_flag,
-       .get_dump_data = qede_get_dump_data,
-       .set_dump = qede_set_dump,
+       .supported_coalesce_params      = ETHTOOL_COALESCE_USECS,
+       .get_link_ksettings             = qede_get_link_ksettings,
+       .set_link_ksettings             = qede_set_link_ksettings,
+       .get_drvinfo                    = qede_get_drvinfo,
+       .get_regs_len                   = qede_get_regs_len,
+       .get_regs                       = qede_get_regs,
+       .get_wol                        = qede_get_wol,
+       .set_wol                        = qede_set_wol,
+       .get_msglevel                   = qede_get_msglevel,
+       .set_msglevel                   = qede_set_msglevel,
+       .nway_reset                     = qede_nway_reset,
+       .get_link                       = qede_get_link,
+       .get_coalesce                   = qede_get_coalesce,
+       .set_coalesce                   = qede_set_coalesce,
+       .get_ringparam                  = qede_get_ringparam,
+       .set_ringparam                  = qede_set_ringparam,
+       .get_pauseparam                 = qede_get_pauseparam,
+       .set_pauseparam                 = qede_set_pauseparam,
+       .get_strings                    = qede_get_strings,
+       .set_phys_id                    = qede_set_phys_id,
+       .get_ethtool_stats              = qede_get_ethtool_stats,
+       .get_priv_flags                 = qede_get_priv_flags,
+       .set_priv_flags                 = qede_set_priv_flags,
+       .get_sset_count                 = qede_get_sset_count,
+       .get_rxnfc                      = qede_get_rxnfc,
+       .set_rxnfc                      = qede_set_rxnfc,
+       .get_rxfh_indir_size            = qede_get_rxfh_indir_size,
+       .get_rxfh_key_size              = qede_get_rxfh_key_size,
+       .get_rxfh                       = qede_get_rxfh,
+       .set_rxfh                       = qede_set_rxfh,
+       .get_ts_info                    = qede_get_ts_info,
+       .get_channels                   = qede_get_channels,
+       .set_channels                   = qede_set_channels,
+       .self_test                      = qede_self_test,
+       .get_module_info                = qede_get_module_info,
+       .get_module_eeprom              = qede_get_module_eeprom,
+       .get_eee                        = qede_get_eee,
+       .set_eee                        = qede_set_eee,
+       .get_fecparam                   = qede_get_fecparam,
+       .set_fecparam                   = qede_set_fecparam,
+       .get_tunable                    = qede_get_tunable,
+       .set_tunable                    = qede_set_tunable,
+       .flash_device                   = qede_flash_device,
+       .get_dump_flag                  = qede_get_dump_flag,
+       .get_dump_data                  = qede_get_dump_data,
+       .set_dump                       = qede_set_dump,
 };
 
 static const struct ethtool_ops qede_vf_ethtool_ops = {
-       .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
-       .get_link_ksettings = qede_get_link_ksettings,
-       .get_drvinfo = qede_get_drvinfo,
-       .get_msglevel = qede_get_msglevel,
-       .set_msglevel = qede_set_msglevel,
-       .get_link = qede_get_link,
-       .get_coalesce = qede_get_coalesce,
-       .set_coalesce = qede_set_coalesce,
-       .get_ringparam = qede_get_ringparam,
-       .set_ringparam = qede_set_ringparam,
-       .get_strings = qede_get_strings,
-       .get_ethtool_stats = qede_get_ethtool_stats,
-       .get_priv_flags = qede_get_priv_flags,
-       .get_sset_count = qede_get_sset_count,
-       .get_rxnfc = qede_get_rxnfc,
-       .set_rxnfc = qede_set_rxnfc,
-       .get_rxfh_indir_size = qede_get_rxfh_indir_size,
-       .get_rxfh_key_size = qede_get_rxfh_key_size,
-       .get_rxfh = qede_get_rxfh,
-       .set_rxfh = qede_set_rxfh,
-       .get_channels = qede_get_channels,
-       .set_channels = qede_set_channels,
-       .get_tunable = qede_get_tunable,
-       .set_tunable = qede_set_tunable,
+       .supported_coalesce_params      = ETHTOOL_COALESCE_USECS,
+       .get_link_ksettings             = qede_get_link_ksettings,
+       .get_drvinfo                    = qede_get_drvinfo,
+       .get_msglevel                   = qede_get_msglevel,
+       .set_msglevel                   = qede_set_msglevel,
+       .get_link                       = qede_get_link,
+       .get_coalesce                   = qede_get_coalesce,
+       .set_coalesce                   = qede_set_coalesce,
+       .get_ringparam                  = qede_get_ringparam,
+       .set_ringparam                  = qede_set_ringparam,
+       .get_strings                    = qede_get_strings,
+       .get_ethtool_stats              = qede_get_ethtool_stats,
+       .get_priv_flags                 = qede_get_priv_flags,
+       .get_sset_count                 = qede_get_sset_count,
+       .get_rxnfc                      = qede_get_rxnfc,
+       .set_rxnfc                      = qede_set_rxnfc,
+       .get_rxfh_indir_size            = qede_get_rxfh_indir_size,
+       .get_rxfh_key_size              = qede_get_rxfh_key_size,
+       .get_rxfh                       = qede_get_rxfh,
+       .set_rxfh                       = qede_set_rxfh,
+       .get_channels                   = qede_get_channels,
+       .set_channels                   = qede_set_channels,
+       .get_tunable                    = qede_get_tunable,
+       .set_tunable                    = qede_set_tunable,
 };
 
 void qede_set_ethtool_ops(struct net_device *dev)
index fe72bb6..b7d0b6c 100644 (file)
@@ -1,34 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qede NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
+
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <net/udp_tunnel.h>
@@ -978,115 +953,67 @@ int qede_set_features(struct net_device *dev, netdev_features_t features)
        return 0;
 }
 
-void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
+static int qede_udp_tunnel_sync(struct net_device *dev, unsigned int table)
 {
        struct qede_dev *edev = netdev_priv(dev);
        struct qed_tunn_params tunn_params;
-       u16 t_port = ntohs(ti->port);
+       struct udp_tunnel_info ti;
+       u16 *save_port;
        int rc;
 
        memset(&tunn_params, 0, sizeof(tunn_params));
 
-       switch (ti->type) {
-       case UDP_TUNNEL_TYPE_VXLAN:
-               if (!edev->dev_info.common.vxlan_enable)
-                       return;
-
-               if (edev->vxlan_dst_port)
-                       return;
-
+       udp_tunnel_nic_get_port(dev, table, 0, &ti);
+       if (ti.type == UDP_TUNNEL_TYPE_VXLAN) {
                tunn_params.update_vxlan_port = 1;
-               tunn_params.vxlan_port = t_port;
-
-               __qede_lock(edev);
-               rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
-               __qede_unlock(edev);
-
-               if (!rc) {
-                       edev->vxlan_dst_port = t_port;
-                       DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
-                                  t_port);
-               } else {
-                       DP_NOTICE(edev, "Failed to add vxlan UDP port=%d\n",
-                                 t_port);
-               }
-
-               break;
-       case UDP_TUNNEL_TYPE_GENEVE:
-               if (!edev->dev_info.common.geneve_enable)
-                       return;
-
-               if (edev->geneve_dst_port)
-                       return;
-
+               tunn_params.vxlan_port = ntohs(ti.port);
+               save_port = &edev->vxlan_dst_port;
+       } else {
                tunn_params.update_geneve_port = 1;
-               tunn_params.geneve_port = t_port;
-
-               __qede_lock(edev);
-               rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
-               __qede_unlock(edev);
-
-               if (!rc) {
-                       edev->geneve_dst_port = t_port;
-                       DP_VERBOSE(edev, QED_MSG_DEBUG,
-                                  "Added geneve port=%d\n", t_port);
-               } else {
-                       DP_NOTICE(edev, "Failed to add geneve UDP port=%d\n",
-                                 t_port);
-               }
-
-               break;
-       default:
-               return;
+               tunn_params.geneve_port = ntohs(ti.port);
+               save_port = &edev->geneve_dst_port;
        }
-}
-
-void qede_udp_tunnel_del(struct net_device *dev,
-                        struct udp_tunnel_info *ti)
-{
-       struct qede_dev *edev = netdev_priv(dev);
-       struct qed_tunn_params tunn_params;
-       u16 t_port = ntohs(ti->port);
 
-       memset(&tunn_params, 0, sizeof(tunn_params));
-
-       switch (ti->type) {
-       case UDP_TUNNEL_TYPE_VXLAN:
-               if (t_port != edev->vxlan_dst_port)
-                       return;
-
-               tunn_params.update_vxlan_port = 1;
-               tunn_params.vxlan_port = 0;
-
-               __qede_lock(edev);
-               edev->ops->tunn_config(edev->cdev, &tunn_params);
-               __qede_unlock(edev);
-
-               edev->vxlan_dst_port = 0;
-
-               DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
-                          t_port);
-
-               break;
-       case UDP_TUNNEL_TYPE_GENEVE:
-               if (t_port != edev->geneve_dst_port)
-                       return;
-
-               tunn_params.update_geneve_port = 1;
-               tunn_params.geneve_port = 0;
+       __qede_lock(edev);
+       rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
+       __qede_unlock(edev);
+       if (rc)
+               return rc;
 
-               __qede_lock(edev);
-               edev->ops->tunn_config(edev->cdev, &tunn_params);
-               __qede_unlock(edev);
+       *save_port = ntohs(ti.port);
+       return 0;
+}
 
-               edev->geneve_dst_port = 0;
+static const struct udp_tunnel_nic_info qede_udp_tunnels_both = {
+       .sync_table     = qede_udp_tunnel_sync,
+       .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
+       .tables         = {
+               { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
+               { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+       },
+}, qede_udp_tunnels_vxlan = {
+       .sync_table     = qede_udp_tunnel_sync,
+       .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
+       .tables         = {
+               { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
+       },
+}, qede_udp_tunnels_geneve = {
+       .sync_table     = qede_udp_tunnel_sync,
+       .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
+       .tables         = {
+               { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+       },
+};
 
-               DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
-                          t_port);
-               break;
-       default:
-               return;
-       }
+void qede_set_udp_tunnels(struct qede_dev *edev)
+{
+       if (edev->dev_info.common.vxlan_enable &&
+           edev->dev_info.common.geneve_enable)
+               edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_both;
+       else if (edev->dev_info.common.vxlan_enable)
+               edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_vxlan;
+       else if (edev->dev_info.common.geneve_enable)
+               edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_geneve;
 }
 
 static void qede_xdp_reload_func(struct qede_dev *edev,
@@ -1789,8 +1716,8 @@ qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule,
                struct flow_match_ports match;
 
                flow_rule_match_ports(rule, &match);
-               if ((match.key->src && match.mask->src != U16_MAX) ||
-                   (match.key->dst && match.mask->dst != U16_MAX)) {
+               if ((match.key->src && match.mask->src != htons(U16_MAX)) ||
+                   (match.key->dst && match.mask->dst != htons(U16_MAX))) {
                        DP_NOTICE(edev, "Do not support ports masks\n");
                        return -EINVAL;
                }
@@ -1842,8 +1769,8 @@ qede_flow_parse_v4_common(struct qede_dev *edev, struct flow_rule *rule,
                struct flow_match_ipv4_addrs match;
 
                flow_rule_match_ipv4_addrs(rule, &match);
-               if ((match.key->src && match.mask->src != U32_MAX) ||
-                   (match.key->dst && match.mask->dst != U32_MAX)) {
+               if ((match.key->src && match.mask->src != htonl(U32_MAX)) ||
+                   (match.key->dst && match.mask->dst != htonl(U32_MAX))) {
                        DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n");
                        return -EINVAL;
                }
index 7598ebe..a2494bf 100644 (file)
@@ -1,34 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qede NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
+
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
@@ -327,51 +302,94 @@ static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
        wmb();
 }
 
-static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
-                        struct sw_rx_data *metadata, u16 padding, u16 length)
+static int qede_xdp_xmit(struct qede_tx_queue *txq, dma_addr_t dma, u16 pad,
+                        u16 len, struct page *page, struct xdp_frame *xdpf)
 {
-       struct qede_tx_queue *txq = fp->xdp_tx;
-       struct eth_tx_1st_bd *first_bd;
-       u16 idx = txq->sw_tx_prod;
+       struct eth_tx_1st_bd *bd;
+       struct sw_tx_xdp *xdp;
        u16 val;
 
-       if (!qed_chain_get_elem_left(&txq->tx_pbl)) {
+       if (unlikely(qed_chain_get_elem_used(&txq->tx_pbl) >=
+                    txq->num_tx_buffers)) {
                txq->stopped_cnt++;
                return -ENOMEM;
        }
 
-       first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
-
-       memset(first_bd, 0, sizeof(*first_bd));
-       first_bd->data.bd_flags.bitfields =
-           BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
+       bd = qed_chain_produce(&txq->tx_pbl);
+       bd->data.nbds = 1;
+       bd->data.bd_flags.bitfields = BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
 
-       val = (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+       val = (len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
               ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
 
-       first_bd->data.bitfields |= cpu_to_le16(val);
-       first_bd->data.nbds = 1;
+       bd->data.bitfields = cpu_to_le16(val);
 
        /* We can safely ignore the offset, as it's 0 for XDP */
-       BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length);
+       BD_SET_UNMAP_ADDR_LEN(bd, dma + pad, len);
 
-       /* Synchronize the buffer back to device, as program [probably]
-        * has changed it.
-        */
-       dma_sync_single_for_device(&edev->pdev->dev,
-                                  metadata->mapping + padding,
-                                  length, PCI_DMA_TODEVICE);
+       xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod;
+       xdp->mapping = dma;
+       xdp->page = page;
+       xdp->xdpf = xdpf;
 
-       txq->sw_tx_ring.xdp[idx].page = metadata->data;
-       txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping;
        txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
 
-       /* Mark the fastpath for future XDP doorbell */
-       fp->xdp_xmit = 1;
-
        return 0;
 }
 
+int qede_xdp_transmit(struct net_device *dev, int n_frames,
+                     struct xdp_frame **frames, u32 flags)
+{
+       struct qede_dev *edev = netdev_priv(dev);
+       struct device *dmadev = &edev->pdev->dev;
+       struct qede_tx_queue *xdp_tx;
+       struct xdp_frame *xdpf;
+       dma_addr_t mapping;
+       int i, drops = 0;
+       u16 xdp_prod;
+
+       if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+               return -EINVAL;
+
+       if (unlikely(!netif_running(dev)))
+               return -ENETDOWN;
+
+       i = smp_processor_id() % edev->total_xdp_queues;
+       xdp_tx = edev->fp_array[i].xdp_tx;
+
+       spin_lock(&xdp_tx->xdp_tx_lock);
+
+       for (i = 0; i < n_frames; i++) {
+               xdpf = frames[i];
+
+               mapping = dma_map_single(dmadev, xdpf->data, xdpf->len,
+                                        DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(dmadev, mapping))) {
+                       xdp_return_frame_rx_napi(xdpf);
+                       drops++;
+
+                       continue;
+               }
+
+               if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len,
+                                          NULL, xdpf))) {
+                       xdp_return_frame_rx_napi(xdpf);
+                       drops++;
+               }
+       }
+
+       if (flags & XDP_XMIT_FLUSH) {
+               xdp_prod = qed_chain_get_prod_idx(&xdp_tx->tx_pbl);
+
+               xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
+               qede_update_tx_producer(xdp_tx);
+       }
+
+       spin_unlock(&xdp_tx->xdp_tx_lock);
+
+       return n_frames - drops;
+}
+
 int qede_txq_has_work(struct qede_tx_queue *txq)
 {
        u16 hw_bd_cons;
@@ -387,20 +405,31 @@ int qede_txq_has_work(struct qede_tx_queue *txq)
 
 static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
 {
-       u16 hw_bd_cons, idx;
+       struct sw_tx_xdp *xdp_info, *xdp_arr = txq->sw_tx_ring.xdp;
+       struct device *dev = &edev->pdev->dev;
+       struct xdp_frame *xdpf;
+       u16 hw_bd_cons;
 
        hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
        barrier();
 
        while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
-               qed_chain_consume(&txq->tx_pbl);
-               idx = txq->sw_tx_cons;
+               xdp_info = xdp_arr + txq->sw_tx_cons;
+               xdpf = xdp_info->xdpf;
+
+               if (xdpf) {
+                       dma_unmap_single(dev, xdp_info->mapping, xdpf->len,
+                                        DMA_TO_DEVICE);
+                       xdp_return_frame(xdpf);
 
-               dma_unmap_page(&edev->pdev->dev,
-                              txq->sw_tx_ring.xdp[idx].mapping,
-                              PAGE_SIZE, DMA_BIDIRECTIONAL);
-               __free_page(txq->sw_tx_ring.xdp[idx].page);
+                       xdp_info->xdpf = NULL;
+               } else {
+                       dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE,
+                                      DMA_BIDIRECTIONAL);
+                       __free_page(xdp_info->page);
+               }
 
+               qed_chain_consume(&txq->tx_pbl);
                txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
                txq->xmit_pkts++;
        }
@@ -1089,32 +1118,59 @@ static bool qede_rx_xdp(struct qede_dev *edev,
        switch (act) {
        case XDP_TX:
                /* We need the replacement buffer before transmit. */
-               if (qede_alloc_rx_buffer(rxq, true)) {
+               if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
                        qede_recycle_rx_bd_ring(rxq, 1);
+
                        trace_xdp_exception(edev->ndev, prog, act);
-                       return false;
+                       break;
                }
 
                /* Now if there's a transmission problem, we'd still have to
                 * throw current buffer, as replacement was already allocated.
                 */
-               if (qede_xdp_xmit(edev, fp, bd, *data_offset, *len)) {
-                       dma_unmap_page(rxq->dev, bd->mapping,
-                                      PAGE_SIZE, DMA_BIDIRECTIONAL);
+               if (unlikely(qede_xdp_xmit(fp->xdp_tx, bd->mapping,
+                                          *data_offset, *len, bd->data,
+                                          NULL))) {
+                       dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
+                                      rxq->data_direction);
                        __free_page(bd->data);
+
                        trace_xdp_exception(edev->ndev, prog, act);
+               } else {
+                       dma_sync_single_for_device(rxq->dev,
+                                                  bd->mapping + *data_offset,
+                                                  *len, rxq->data_direction);
+                       fp->xdp_xmit |= QEDE_XDP_TX;
                }
 
                /* Regardless, we've consumed an Rx BD */
                qede_rx_bd_ring_consume(rxq);
-               return false;
+               break;
+       case XDP_REDIRECT:
+               /* We need the replacement buffer before transmit. */
+               if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
+                       qede_recycle_rx_bd_ring(rxq, 1);
+
+                       trace_xdp_exception(edev->ndev, prog, act);
+                       break;
+               }
 
+               dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
+                              rxq->data_direction);
+
+               if (unlikely(xdp_do_redirect(edev->ndev, &xdp, prog)))
+                       DP_NOTICE(edev, "Failed to redirect the packet\n");
+               else
+                       fp->xdp_xmit |= QEDE_XDP_REDIRECT;
+
+               qede_rx_bd_ring_consume(rxq);
+               break;
        default:
                bpf_warn_invalid_xdp_action(act);
-               /* Fall through */
+               fallthrough;
        case XDP_ABORTED:
                trace_xdp_exception(edev->ndev, prog, act);
-               /* Fall through */
+               fallthrough;
        case XDP_DROP:
                qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
        }
@@ -1378,6 +1434,9 @@ int qede_poll(struct napi_struct *napi, int budget)
                                                napi);
        struct qede_dev *edev = fp->edev;
        int rx_work_done = 0;
+       u16 xdp_prod;
+
+       fp->xdp_xmit = 0;
 
        if (likely(fp->type & QEDE_FASTPATH_TX)) {
                int cos;
@@ -1405,14 +1464,16 @@ int qede_poll(struct napi_struct *napi, int budget)
                }
        }
 
-       if (fp->xdp_xmit) {
-               u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
+       if (fp->xdp_xmit & QEDE_XDP_TX) {
+               xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
 
-               fp->xdp_xmit = 0;
                fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
                qede_update_tx_producer(fp->xdp_tx);
        }
 
+       if (fp->xdp_xmit & QEDE_XDP_REDIRECT)
+               xdp_do_flush_map();
+
        return rx_work_done;
 }
 
index 756c05e..1aaae32 100644 (file)
@@ -1,34 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qede NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
+
 #include <linux/crash_dump.h>
 #include <linux/module.h>
 #include <linux/pci.h>
@@ -288,6 +263,8 @@ int __init qede_init(void)
 
        pr_info("qede_init: %s\n", version);
 
+       qede_forced_speed_maps_init();
+
        qed_ops = qed_get_eth_ops();
        if (!qed_ops) {
                pr_notice("Failed to get qed ethtool operations\n");
@@ -662,79 +639,81 @@ qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
 }
 
 static const struct net_device_ops qede_netdev_ops = {
-       .ndo_open = qede_open,
-       .ndo_stop = qede_close,
-       .ndo_start_xmit = qede_start_xmit,
-       .ndo_select_queue = qede_select_queue,
-       .ndo_set_rx_mode = qede_set_rx_mode,
-       .ndo_set_mac_address = qede_set_mac_addr,
-       .ndo_validate_addr = eth_validate_addr,
-       .ndo_change_mtu = qede_change_mtu,
-       .ndo_do_ioctl = qede_ioctl,
-       .ndo_tx_timeout = qede_tx_timeout,
+       .ndo_open               = qede_open,
+       .ndo_stop               = qede_close,
+       .ndo_start_xmit         = qede_start_xmit,
+       .ndo_select_queue       = qede_select_queue,
+       .ndo_set_rx_mode        = qede_set_rx_mode,
+       .ndo_set_mac_address    = qede_set_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_change_mtu         = qede_change_mtu,
+       .ndo_do_ioctl           = qede_ioctl,
+       .ndo_tx_timeout         = qede_tx_timeout,
 #ifdef CONFIG_QED_SRIOV
-       .ndo_set_vf_mac = qede_set_vf_mac,
-       .ndo_set_vf_vlan = qede_set_vf_vlan,
-       .ndo_set_vf_trust = qede_set_vf_trust,
+       .ndo_set_vf_mac         = qede_set_vf_mac,
+       .ndo_set_vf_vlan        = qede_set_vf_vlan,
+       .ndo_set_vf_trust       = qede_set_vf_trust,
 #endif
-       .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
-       .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
-       .ndo_fix_features = qede_fix_features,
-       .ndo_set_features = qede_set_features,
-       .ndo_get_stats64 = qede_get_stats64,
+       .ndo_vlan_rx_add_vid    = qede_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid   = qede_vlan_rx_kill_vid,
+       .ndo_fix_features       = qede_fix_features,
+       .ndo_set_features       = qede_set_features,
+       .ndo_get_stats64        = qede_get_stats64,
 #ifdef CONFIG_QED_SRIOV
-       .ndo_set_vf_link_state = qede_set_vf_link_state,
-       .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
-       .ndo_get_vf_config = qede_get_vf_config,
-       .ndo_set_vf_rate = qede_set_vf_rate,
+       .ndo_set_vf_link_state  = qede_set_vf_link_state,
+       .ndo_set_vf_spoofchk    = qede_set_vf_spoofchk,
+       .ndo_get_vf_config      = qede_get_vf_config,
+       .ndo_set_vf_rate        = qede_set_vf_rate,
 #endif
-       .ndo_udp_tunnel_add = qede_udp_tunnel_add,
-       .ndo_udp_tunnel_del = qede_udp_tunnel_del,
-       .ndo_features_check = qede_features_check,
-       .ndo_bpf = qede_xdp,
+       .ndo_udp_tunnel_add     = udp_tunnel_nic_add_port,
+       .ndo_udp_tunnel_del     = udp_tunnel_nic_del_port,
+       .ndo_features_check     = qede_features_check,
+       .ndo_bpf                = qede_xdp,
 #ifdef CONFIG_RFS_ACCEL
-       .ndo_rx_flow_steer = qede_rx_flow_steer,
+       .ndo_rx_flow_steer      = qede_rx_flow_steer,
 #endif
-       .ndo_setup_tc = qede_setup_tc_offload,
+       .ndo_xdp_xmit           = qede_xdp_transmit,
+       .ndo_setup_tc           = qede_setup_tc_offload,
 };
 
 static const struct net_device_ops qede_netdev_vf_ops = {
-       .ndo_open = qede_open,
-       .ndo_stop = qede_close,
-       .ndo_start_xmit = qede_start_xmit,
-       .ndo_select_queue = qede_select_queue,
-       .ndo_set_rx_mode = qede_set_rx_mode,
-       .ndo_set_mac_address = qede_set_mac_addr,
-       .ndo_validate_addr = eth_validate_addr,
-       .ndo_change_mtu = qede_change_mtu,
-       .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
-       .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
-       .ndo_fix_features = qede_fix_features,
-       .ndo_set_features = qede_set_features,
-       .ndo_get_stats64 = qede_get_stats64,
-       .ndo_udp_tunnel_add = qede_udp_tunnel_add,
-       .ndo_udp_tunnel_del = qede_udp_tunnel_del,
-       .ndo_features_check = qede_features_check,
+       .ndo_open               = qede_open,
+       .ndo_stop               = qede_close,
+       .ndo_start_xmit         = qede_start_xmit,
+       .ndo_select_queue       = qede_select_queue,
+       .ndo_set_rx_mode        = qede_set_rx_mode,
+       .ndo_set_mac_address    = qede_set_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_change_mtu         = qede_change_mtu,
+       .ndo_vlan_rx_add_vid    = qede_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid   = qede_vlan_rx_kill_vid,
+       .ndo_fix_features       = qede_fix_features,
+       .ndo_set_features       = qede_set_features,
+       .ndo_get_stats64        = qede_get_stats64,
+       .ndo_udp_tunnel_add     = udp_tunnel_nic_add_port,
+       .ndo_udp_tunnel_del     = udp_tunnel_nic_del_port,
+       .ndo_features_check     = qede_features_check,
 };
 
 static const struct net_device_ops qede_netdev_vf_xdp_ops = {
-       .ndo_open = qede_open,
-       .ndo_stop = qede_close,
-       .ndo_start_xmit = qede_start_xmit,
-       .ndo_select_queue = qede_select_queue,
-       .ndo_set_rx_mode = qede_set_rx_mode,
-       .ndo_set_mac_address = qede_set_mac_addr,
-       .ndo_validate_addr = eth_validate_addr,
-       .ndo_change_mtu = qede_change_mtu,
-       .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
-       .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
-       .ndo_fix_features = qede_fix_features,
-       .ndo_set_features = qede_set_features,
-       .ndo_get_stats64 = qede_get_stats64,
-       .ndo_udp_tunnel_add = qede_udp_tunnel_add,
-       .ndo_udp_tunnel_del = qede_udp_tunnel_del,
-       .ndo_features_check = qede_features_check,
-       .ndo_bpf = qede_xdp,
+       .ndo_open               = qede_open,
+       .ndo_stop               = qede_close,
+       .ndo_start_xmit         = qede_start_xmit,
+       .ndo_select_queue       = qede_select_queue,
+       .ndo_set_rx_mode        = qede_set_rx_mode,
+       .ndo_set_mac_address    = qede_set_mac_addr,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_change_mtu         = qede_change_mtu,
+       .ndo_vlan_rx_add_vid    = qede_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid   = qede_vlan_rx_kill_vid,
+       .ndo_fix_features       = qede_fix_features,
+       .ndo_set_features       = qede_set_features,
+       .ndo_get_stats64        = qede_get_stats64,
+       .ndo_udp_tunnel_add     = udp_tunnel_nic_add_port,
+       .ndo_udp_tunnel_del     = udp_tunnel_nic_del_port,
+       .ndo_features_check     = qede_features_check,
+       .ndo_bpf                = qede_xdp,
+       .ndo_xdp_xmit           = qede_xdp_transmit,
 };
 
 /* -------------------------------------------------------------------------
@@ -847,6 +826,8 @@ static void qede_init_ndev(struct qede_dev *edev)
                                NETIF_F_GSO_UDP_TUNNEL_CSUM);
                ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
                                          NETIF_F_GSO_UDP_TUNNEL_CSUM);
+
+               qede_set_udp_tunnels(edev);
        }
 
        if (edev->dev_info.common.gre_enable) {
@@ -1229,7 +1210,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
 
        /* PTP not supported on VFs */
        if (!is_vf)
-               qede_ptp_enable(edev, (mode == QEDE_PROBE_NORMAL));
+               qede_ptp_enable(edev);
 
        edev->ops->register_ops(cdev, &qede_ll_ops, edev);
 
@@ -1318,6 +1299,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
        if (system_state == SYSTEM_POWER_OFF)
                return;
        qed_ops->common->remove(cdev);
+       edev->cdev = NULL;
 
        /* Since this can happen out-of-sync with other flows,
         * don't release the netdevice until after slowpath stop
@@ -1462,6 +1444,11 @@ static void qede_set_tpa_param(struct qede_rx_queue *rxq)
 /* This function allocates all memory needed per Rx queue */
 static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
 {
+       struct qed_chain_init_params params = {
+               .cnt_type       = QED_CHAIN_CNT_TYPE_U16,
+               .num_elems      = RX_RING_SIZE,
+       };
+       struct qed_dev *cdev = edev->cdev;
        int i, rc, size;
 
        rxq->num_rx_buffers = edev->q_num_rx_buffers;
@@ -1497,24 +1484,20 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
        }
 
        /* Allocate FW Rx ring  */
-       rc = edev->ops->common->chain_alloc(edev->cdev,
-                                           QED_CHAIN_USE_TO_CONSUME_PRODUCE,
-                                           QED_CHAIN_MODE_NEXT_PTR,
-                                           QED_CHAIN_CNT_TYPE_U16,
-                                           RX_RING_SIZE,
-                                           sizeof(struct eth_rx_bd),
-                                           &rxq->rx_bd_ring, NULL);
+       params.mode = QED_CHAIN_MODE_NEXT_PTR;
+       params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
+       params.elem_size = sizeof(struct eth_rx_bd);
+
+       rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_bd_ring, &params);
        if (rc)
                goto err;
 
        /* Allocate FW completion ring */
-       rc = edev->ops->common->chain_alloc(edev->cdev,
-                                           QED_CHAIN_USE_TO_CONSUME,
-                                           QED_CHAIN_MODE_PBL,
-                                           QED_CHAIN_CNT_TYPE_U16,
-                                           RX_RING_SIZE,
-                                           sizeof(union eth_rx_cqe),
-                                           &rxq->rx_comp_ring, NULL);
+       params.mode = QED_CHAIN_MODE_PBL;
+       params.intended_use = QED_CHAIN_USE_TO_CONSUME;
+       params.elem_size = sizeof(union eth_rx_cqe);
+
+       rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_comp_ring, &params);
        if (rc)
                goto err;
 
@@ -1551,7 +1534,13 @@ static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
 /* This function allocates all memory needed per Tx queue */
 static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
 {
-       union eth_tx_bd_types *p_virt;
+       struct qed_chain_init_params params = {
+               .mode           = QED_CHAIN_MODE_PBL,
+               .intended_use   = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+               .cnt_type       = QED_CHAIN_CNT_TYPE_U16,
+               .num_elems      = edev->q_num_tx_buffers,
+               .elem_size      = sizeof(union eth_tx_bd_types),
+       };
        int size, rc;
 
        txq->num_tx_buffers = edev->q_num_tx_buffers;
@@ -1569,13 +1558,7 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
                        goto err;
        }
 
-       rc = edev->ops->common->chain_alloc(edev->cdev,
-                                           QED_CHAIN_USE_TO_CONSUME_PRODUCE,
-                                           QED_CHAIN_MODE_PBL,
-                                           QED_CHAIN_CNT_TYPE_U16,
-                                           txq->num_tx_buffers,
-                                           sizeof(*p_virt),
-                                           &txq->tx_pbl, NULL);
+       rc = edev->ops->common->chain_alloc(edev->cdev, &txq->tx_pbl, &params);
        if (rc)
                goto err;
 
@@ -1731,6 +1714,7 @@ static void qede_init_fp(struct qede_dev *edev)
 {
        int queue_id, rxq_index = 0, txq_index = 0;
        struct qede_fastpath *fp;
+       bool init_xdp = false;
 
        for_each_queue(queue_id) {
                fp = &edev->fp_array[queue_id];
@@ -1742,6 +1726,9 @@ static void qede_init_fp(struct qede_dev *edev)
                        fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
                                                                rxq_index);
                        fp->xdp_tx->is_xdp = 1;
+
+                       spin_lock_init(&fp->xdp_tx->xdp_tx_lock);
+                       init_xdp = true;
                }
 
                if (fp->type & QEDE_FASTPATH_RX) {
@@ -1757,6 +1744,13 @@ static void qede_init_fp(struct qede_dev *edev)
                        /* Driver have no error path from here */
                        WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
                                                 fp->rxq->rxq_id) < 0);
+
+                       if (xdp_rxq_info_reg_mem_model(&fp->rxq->xdp_rxq,
+                                                      MEM_TYPE_PAGE_ORDER0,
+                                                      NULL)) {
+                               DP_NOTICE(edev,
+                                         "Failed to register XDP memory model\n");
+                       }
                }
 
                if (fp->type & QEDE_FASTPATH_TX) {
@@ -1782,6 +1776,11 @@ static void qede_init_fp(struct qede_dev *edev)
                snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
                         edev->ndev->name, queue_id);
        }
+
+       if (init_xdp) {
+               edev->total_xdp_queues = QEDE_RSS_COUNT(edev);
+               DP_INFO(edev, "Total XDP queues: %u\n", edev->total_xdp_queues);
+       }
 }
 
 static int qede_set_real_num_queues(struct qede_dev *edev)
@@ -2445,7 +2444,7 @@ static int qede_open(struct net_device *ndev)
        if (rc)
                return rc;
 
-       udp_tunnel_get_rx_info(ndev);
+       udp_tunnel_nic_reset_ntf(ndev);
 
        edev->ops->common->update_drv_state(edev->cdev, true);
 
@@ -2547,7 +2546,7 @@ static void qede_recovery_handler(struct qede_dev *edev)
                        goto err;
 
                qede_config_rx_mode(edev->ndev);
-               udp_tunnel_get_rx_info(edev->ndev);
+               udp_tunnel_nic_reset_ntf(edev->ndev);
        }
 
        edev->state = curr_state;
index 4c7f7a7..8c28fab 100644 (file)
@@ -1,34 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qede NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
+
 #include "qede_ptp.h"
 #define QEDE_PTP_TX_TIMEOUT (2 * HZ)
 
@@ -53,12 +28,12 @@ struct qede_ptp {
 };
 
 /**
- * qede_ptp_adjfreq
- * @ptp: the ptp clock structure
- * @ppb: parts per billion adjustment from base
+ * qede_ptp_adjfreq() - Adjust the frequency of the PTP cycle counter.
+ *
+ * @info: The PTP clock info structure.
+ * @ppb: Parts per billion adjustment from base.
  *
- * Adjust the frequency of the ptp cycle counter by the
- * indicated ppb from the base frequency.
+ * Return: Zero on success, negative errno otherwise.
  */
 static int qede_ptp_adjfreq(struct ptp_clock_info *info, s32 ppb)
 {
@@ -412,6 +387,7 @@ void qede_ptp_disable(struct qede_dev *edev)
        if (ptp->tx_skb) {
                dev_kfree_skb_any(ptp->tx_skb);
                ptp->tx_skb = NULL;
+               clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
        }
 
        /* Disable PTP in HW */
@@ -423,7 +399,7 @@ void qede_ptp_disable(struct qede_dev *edev)
        edev->ptp = NULL;
 }
 
-static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
+static int qede_ptp_init(struct qede_dev *edev)
 {
        struct qede_ptp *ptp;
        int rc;
@@ -444,25 +420,19 @@ static int qede_ptp_init(struct qede_dev *edev, bool init_tc)
        /* Init work queue for Tx timestamping */
        INIT_WORK(&ptp->work, qede_ptp_task);
 
-       /* Init cyclecounter and timecounter. This is done only in the first
-        * load. If done in every load, PTP application will fail when doing
-        * unload / load (e.g. MTU change) while it is running.
-        */
-       if (init_tc) {
-               memset(&ptp->cc, 0, sizeof(ptp->cc));
-               ptp->cc.read = qede_ptp_read_cc;
-               ptp->cc.mask = CYCLECOUNTER_MASK(64);
-               ptp->cc.shift = 0;
-               ptp->cc.mult = 1;
-
-               timecounter_init(&ptp->tc, &ptp->cc,
-                                ktime_to_ns(ktime_get_real()));
-       }
+       /* Init cyclecounter and timecounter */
+       memset(&ptp->cc, 0, sizeof(ptp->cc));
+       ptp->cc.read = qede_ptp_read_cc;
+       ptp->cc.mask = CYCLECOUNTER_MASK(64);
+       ptp->cc.shift = 0;
+       ptp->cc.mult = 1;
 
-       return rc;
+       timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+
+       return 0;
 }
 
-int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
+int qede_ptp_enable(struct qede_dev *edev)
 {
        struct qede_ptp *ptp;
        int rc;
@@ -483,7 +453,7 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
 
        edev->ptp = ptp;
 
-       rc = qede_ptp_init(edev, init_tc);
+       rc = qede_ptp_init(edev);
        if (rc)
                goto err1;
 
index 691a14c..1db0f02 100644 (file)
@@ -1,34 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qede NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
+
 #ifndef _QEDE_PTP_H_
 #define _QEDE_PTP_H_
 
@@ -41,7 +16,7 @@ void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb);
 void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
 int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
 void qede_ptp_disable(struct qede_dev *edev);
-int qede_ptp_enable(struct qede_dev *edev, bool init_tc);
+int qede_ptp_enable(struct qede_dev *edev);
 int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts);
 
 static inline void qede_ptp_record_rx_ts(struct qede_dev *edev,
index 2d873ae..769ec2f 100644 (file)
@@ -1,34 +1,9 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
 /* QLogic qedr NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
+
 #include <linux/pci.h>
 #include <linux/netdevice.h>
 #include <linux/list.h>
@@ -105,6 +80,7 @@ static void qede_rdma_destroy_wq(struct qede_dev *edev)
 
        qede_rdma_cleanup_event(edev);
        destroy_workqueue(edev->rdma_info.rdma_wq);
+       edev->rdma_info.rdma_wq = NULL;
 }
 
 int qede_rdma_dev_add(struct qede_dev *edev, bool recovery)
@@ -325,7 +301,7 @@ static void qede_rdma_add_event(struct qede_dev *edev,
        if (edev->rdma_info.exp_recovery)
                return;
 
-       if (!edev->rdma_info.qedr_dev)
+       if (!edev->rdma_info.qedr_dev || !edev->rdma_info.rdma_wq)
                return;
 
        /* We don't want the cleanup flow to start while we're allocating and
index d838774..d67f826 100644 (file)
@@ -536,8 +536,6 @@ struct qlcnic_hardware_context {
        u8 extend_lb_time;
        u8 phys_port_id[ETH_ALEN];
        u8 lb_mode;
-       u8 vxlan_port_count;
-       u16 vxlan_port;
        struct device *hwmon_dev;
        u32 post_mode;
        bool run_post;
@@ -1026,9 +1024,6 @@ struct qlcnic_ipaddr {
 #define QLCNIC_HAS_PHYS_PORT_ID                0x40000
 #define QLCNIC_TSS_RSS                 0x80000
 
-#define QLCNIC_ADD_VXLAN_PORT          0x100000
-#define QLCNIC_DEL_VXLAN_PORT          0x200000
-
 #define QLCNIC_VLAN_FILTERING          0x800000
 
 #define QLCNIC_IS_MSI_FAMILY(adapter) \
@@ -1700,6 +1695,8 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *);
 int qlcnic_set_default_offload_settings(struct qlcnic_adapter *);
 int qlcnic_reset_npar_config(struct qlcnic_adapter *);
 int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *);
+int qlcnic_set_vxlan_port(struct qlcnic_adapter *adapter, u16 port);
+int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter, u16 port);
 int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
 int qlcnic_read_mac_addr(struct qlcnic_adapter *);
 int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
index cda5b0a..0e2f2fb 100644 (file)
@@ -1028,9 +1028,8 @@ static int qlcnic_83xx_idc_check_state_validity(struct qlcnic_adapter *adapter,
 #define QLCNIC_ENABLE_INGRESS_ENCAP_PARSING 1
 #define QLCNIC_DISABLE_INGRESS_ENCAP_PARSING 0
 
-static int qlcnic_set_vxlan_port(struct qlcnic_adapter *adapter)
+int qlcnic_set_vxlan_port(struct qlcnic_adapter *adapter, u16 port)
 {
-       u16 port = adapter->ahw->vxlan_port;
        struct qlcnic_cmd_args cmd;
        int ret = 0;
 
@@ -1057,10 +1056,8 @@ static int qlcnic_set_vxlan_port(struct qlcnic_adapter *adapter)
        return ret;
 }
 
-static int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter,
-                                   bool state)
+int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter, u16 port)
 {
-       u16 vxlan_port = adapter->ahw->vxlan_port;
        struct qlcnic_cmd_args cmd;
        int ret = 0;
 
@@ -1071,18 +1068,18 @@ static int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter,
        if (ret)
                return ret;
 
-       cmd.req.arg[1] = state ? QLCNIC_ENABLE_INGRESS_ENCAP_PARSING :
-                                QLCNIC_DISABLE_INGRESS_ENCAP_PARSING;
+       cmd.req.arg[1] = port ? QLCNIC_ENABLE_INGRESS_ENCAP_PARSING :
+                               QLCNIC_DISABLE_INGRESS_ENCAP_PARSING;
 
        ret = qlcnic_issue_cmd(adapter, &cmd);
        if (ret)
                netdev_err(adapter->netdev,
                           "Failed to %s VXLAN parsing for port %d\n",
-                          state ? "enable" : "disable", vxlan_port);
+                          port ? "enable" : "disable", port);
        else
                netdev_info(adapter->netdev,
                            "%s VXLAN parsing for port %d\n",
-                           state ? "Enabled" : "Disabled", vxlan_port);
+                           port ? "Enabled" : "Disabled", port);
 
        qlcnic_free_mbx_args(&cmd);
 
@@ -1093,22 +1090,6 @@ static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter)
 {
        if (adapter->fhash.fnum)
                qlcnic_prune_lb_filters(adapter);
-
-       if (adapter->flags & QLCNIC_ADD_VXLAN_PORT) {
-               if (qlcnic_set_vxlan_port(adapter))
-                       return;
-
-               if (qlcnic_set_vxlan_parsing(adapter, true))
-                       return;
-
-               adapter->flags &= ~QLCNIC_ADD_VXLAN_PORT;
-       } else if (adapter->flags & QLCNIC_DEL_VXLAN_PORT) {
-               if (qlcnic_set_vxlan_parsing(adapter, false))
-                       return;
-
-               adapter->ahw->vxlan_port = 0;
-               adapter->flags &= ~QLCNIC_DEL_VXLAN_PORT;
-       }
 }
 
 /**
index 822aa39..35d891f 100644 (file)
@@ -1649,7 +1649,6 @@ int qlcnic_82xx_shutdown(struct pci_dev *pdev)
 {
        struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
        struct net_device *netdev = adapter->netdev;
-       int retval;
 
        netif_device_detach(netdev);
 
@@ -1662,14 +1661,8 @@ int qlcnic_82xx_shutdown(struct pci_dev *pdev)
 
        clear_bit(__QLCNIC_RESETTING, &adapter->state);
 
-       retval = pci_save_state(pdev);
-       if (retval)
-               return retval;
-
-       if (qlcnic_wol_supported(adapter)) {
-               pci_enable_wake(pdev, PCI_D3cold, 1);
-               pci_enable_wake(pdev, PCI_D3hot, 1);
-       }
+       if (qlcnic_wol_supported(adapter))
+               device_wakeup_enable(&pdev->dev);
 
        return 0;
 }
index 9dd6cb3..173c730 100644 (file)
@@ -471,48 +471,29 @@ static int qlcnic_get_phys_port_id(struct net_device *netdev,
        return 0;
 }
 
-static void qlcnic_add_vxlan_port(struct net_device *netdev,
-                                 struct udp_tunnel_info *ti)
+static int qlcnic_udp_tunnel_sync(struct net_device *dev, unsigned int table)
 {
-       struct qlcnic_adapter *adapter = netdev_priv(netdev);
-       struct qlcnic_hardware_context *ahw = adapter->ahw;
-
-       if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
-               return;
+       struct qlcnic_adapter *adapter = netdev_priv(dev);
+       struct udp_tunnel_info ti;
+       int err;
 
-       /* Adapter supports only one VXLAN port. Use very first port
-        * for enabling offload
-        */
-       if (!qlcnic_encap_rx_offload(adapter))
-               return;
-       if (!ahw->vxlan_port_count) {
-               ahw->vxlan_port_count = 1;
-               ahw->vxlan_port = ntohs(ti->port);
-               adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
-               return;
+       udp_tunnel_nic_get_port(dev, table, 0, &ti);
+       if (ti.port) {
+               err = qlcnic_set_vxlan_port(adapter, ntohs(ti.port));
+               if (err)
+                       return err;
        }
-       if (ahw->vxlan_port == ntohs(ti->port))
-               ahw->vxlan_port_count++;
 
+       return qlcnic_set_vxlan_parsing(adapter, ntohs(ti.port));
 }
 
-static void qlcnic_del_vxlan_port(struct net_device *netdev,
-                                 struct udp_tunnel_info *ti)
-{
-       struct qlcnic_adapter *adapter = netdev_priv(netdev);
-       struct qlcnic_hardware_context *ahw = adapter->ahw;
-
-       if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
-               return;
-
-       if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port_count ||
-           (ahw->vxlan_port != ntohs(ti->port)))
-               return;
-
-       ahw->vxlan_port_count--;
-       if (!ahw->vxlan_port_count)
-               adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
-}
+static const struct udp_tunnel_nic_info qlcnic_udp_tunnels = {
+       .sync_table     = qlcnic_udp_tunnel_sync,
+       .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
+       .tables         = {
+               { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+       },
+};
 
 static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
                                               struct net_device *dev,
@@ -540,8 +521,8 @@ static const struct net_device_ops qlcnic_netdev_ops = {
        .ndo_fdb_del            = qlcnic_fdb_del,
        .ndo_fdb_dump           = qlcnic_fdb_dump,
        .ndo_get_phys_port_id   = qlcnic_get_phys_port_id,
-       .ndo_udp_tunnel_add     = qlcnic_add_vxlan_port,
-       .ndo_udp_tunnel_del     = qlcnic_del_vxlan_port,
+       .ndo_udp_tunnel_add     = udp_tunnel_nic_add_port,
+       .ndo_udp_tunnel_del     = udp_tunnel_nic_del_port,
        .ndo_features_check     = qlcnic_features_check,
 #ifdef CONFIG_QLCNIC_SRIOV
        .ndo_set_vf_mac         = qlcnic_sriov_set_vf_mac,
@@ -2017,7 +1998,7 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
        qlcnic_create_sysfs_entries(adapter);
 
        if (qlcnic_encap_rx_offload(adapter))
-               udp_tunnel_get_rx_info(netdev);
+               udp_tunnel_nic_reset_ntf(netdev);
 
        adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
        return 0;
@@ -2335,9 +2316,12 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
                                          NETIF_F_TSO6;
        }
 
-       if (qlcnic_encap_rx_offload(adapter))
+       if (qlcnic_encap_rx_offload(adapter)) {
                netdev->hw_enc_features |= NETIF_F_RXCSUM;
 
+               netdev->udp_tunnel_nic_info = &qlcnic_udp_tunnels;
+       }
+
        netdev->hw_features = netdev->features;
        netdev->priv_flags |= IFF_UNICAST_FLT;
        netdev->irq = adapter->msix_entries[0].vector;
@@ -2811,35 +2795,17 @@ static void qlcnic_shutdown(struct pci_dev *pdev)
        pci_disable_device(pdev);
 }
 
-#ifdef CONFIG_PM
-static int qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused qlcnic_suspend(struct device *dev_d)
 {
-       int retval;
-
-       retval = __qlcnic_shutdown(pdev);
-       if (retval)
-               return retval;
-
-       pci_set_power_state(pdev, pci_choose_state(pdev, state));
-       return 0;
+       return __qlcnic_shutdown(to_pci_dev(dev_d));
 }
 
-static int qlcnic_resume(struct pci_dev *pdev)
+static int __maybe_unused qlcnic_resume(struct device *dev_d)
 {
-       struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
-       int err;
-
-       err = pci_enable_device(pdev);
-       if (err)
-               return err;
-
-       pci_set_power_state(pdev, PCI_D0);
-       pci_set_master(pdev);
-       pci_restore_state(pdev);
+       struct qlcnic_adapter *adapter = dev_get_drvdata(dev_d);
 
        return  __qlcnic_resume(adapter);
 }
-#endif
 
 static int qlcnic_open(struct net_device *netdev)
 {
@@ -4258,15 +4224,14 @@ static const struct pci_error_handlers qlcnic_err_handler = {
        .resume = qlcnic_io_resume,
 };
 
+static SIMPLE_DEV_PM_OPS(qlcnic_pm_ops, qlcnic_suspend, qlcnic_resume);
+
 static struct pci_driver qlcnic_driver = {
        .name = qlcnic_driver_name,
        .id_table = qlcnic_pci_tbl,
        .probe = qlcnic_probe,
        .remove = qlcnic_remove,
-#ifdef CONFIG_PM
-       .suspend = qlcnic_suspend,
-       .resume = qlcnic_resume,
-#endif
+       .driver.pm = &qlcnic_pm_ops,
        .shutdown = qlcnic_shutdown,
        .err_handler = &qlcnic_err_handler,
 #ifdef CONFIG_QLCNIC_SRIOV
index 8d7b9bb..1003763 100644 (file)
@@ -269,7 +269,7 @@ static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
                                     struct bin_attribute *attr, char *buf,
                                     loff_t offset, size_t size)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
        int ret;
 
@@ -286,7 +286,7 @@ static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
                                      struct bin_attribute *attr, char *buf,
                                      loff_t offset, size_t size)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
        int ret;
 
@@ -315,7 +315,7 @@ static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
                                     struct bin_attribute *attr, char *buf,
                                     loff_t offset, size_t size)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
        u64 data;
        int ret;
@@ -337,7 +337,7 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
                                      struct bin_attribute *attr, char *buf,
                                      loff_t offset, size_t size)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
        u64 data;
        int ret;
@@ -402,7 +402,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
                                            char *buf, loff_t offset,
                                            size_t size)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
        struct qlcnic_pm_func_cfg *pm_cfg;
        u32 id, action, pci_func;
@@ -452,7 +452,7 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
                                           char *buf, loff_t offset,
                                           size_t size)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
        struct qlcnic_pm_func_cfg *pm_cfg;
        u8 pci_func;
@@ -545,7 +545,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
                                             char *buf, loff_t offset,
                                             size_t size)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
        struct qlcnic_esw_func_cfg *esw_cfg;
        struct qlcnic_npar_info *npar;
@@ -629,7 +629,7 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
                                            char *buf, loff_t offset,
                                            size_t size)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
        struct qlcnic_esw_func_cfg *esw_cfg;
        u8 pci_func;
@@ -681,7 +681,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
                                              char *buf, loff_t offset,
                                              size_t size)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
        struct qlcnic_info nic_info;
        struct qlcnic_npar_func_cfg *np_cfg;
@@ -728,7 +728,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
                                             char *buf, loff_t offset,
                                             size_t size)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
        struct qlcnic_npar_func_cfg *np_cfg;
        struct qlcnic_info nic_info;
@@ -775,7 +775,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
                                           char *buf, loff_t offset,
                                           size_t size)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
        struct qlcnic_esw_statistics port_stats;
        int ret;
@@ -810,7 +810,7 @@ static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
                                          char *buf, loff_t offset,
                                          size_t size)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
        struct qlcnic_esw_statistics esw_stats;
        int ret;
@@ -845,7 +845,7 @@ static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
                                            char *buf, loff_t offset,
                                            size_t size)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
        int ret;
 
@@ -875,7 +875,7 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
                                             size_t size)
 {
 
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
        int ret;
 
@@ -904,7 +904,7 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
                                            char *buf, loff_t offset,
                                            size_t size)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
        struct qlcnic_pci_func_cfg *pci_cfg;
        struct qlcnic_pci_info *pci_info;
@@ -946,7 +946,7 @@ static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
 {
        unsigned char *p_read_buf;
        int  ret, count;
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
 
        if (!size)
@@ -1124,7 +1124,7 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
        int  ret;
        static int flash_mode;
        unsigned long data;
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
 
        ret = kstrtoul(buf, 16, &data);
index 40efe60..fcdecdd 100644 (file)
@@ -47,15 +47,23 @@ static int rmnet_unregister_real_device(struct net_device *real_dev)
        return 0;
 }
 
-static int rmnet_register_real_device(struct net_device *real_dev)
+static int rmnet_register_real_device(struct net_device *real_dev,
+                                     struct netlink_ext_ack *extack)
 {
        struct rmnet_port *port;
        int rc, entry;
 
        ASSERT_RTNL();
 
-       if (rmnet_is_real_dev_registered(real_dev))
+       if (rmnet_is_real_dev_registered(real_dev)) {
+               port = rmnet_get_port_rtnl(real_dev);
+               if (port->rmnet_mode != RMNET_EPMODE_VND) {
+                       NL_SET_ERR_MSG_MOD(extack, "bridge device already exists");
+                       return -EINVAL;
+               }
+
                return 0;
+       }
 
        port = kzalloc(sizeof(*port), GFP_KERNEL);
        if (!port)
@@ -133,7 +141,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
 
        mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
 
-       err = rmnet_register_real_device(real_dev);
+       err = rmnet_register_real_device(real_dev, extack);
        if (err)
                goto err0;
 
@@ -422,7 +430,7 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
        }
 
        if (port->rmnet_mode != RMNET_EPMODE_VND) {
-               NL_SET_ERR_MSG_MOD(extack, "bridge device already exists");
+               NL_SET_ERR_MSG_MOD(extack, "more than one bridge dev attached");
                return -EINVAL;
        }
 
@@ -433,7 +441,7 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
                return -EBUSY;
        }
 
-       err = rmnet_register_real_device(slave_dev);
+       err = rmnet_register_real_device(slave_dev, extack);
        if (err)
                return -EBUSY;
 
index f5ecc41..7c74318 100644 (file)
@@ -262,9 +262,9 @@ static void r6040_free_txbufs(struct net_device *dev)
 
        for (i = 0; i < TX_DCNT; i++) {
                if (lp->tx_insert_ptr->skb_ptr) {
-                       pci_unmap_single(lp->pdev,
-                               le32_to_cpu(lp->tx_insert_ptr->buf),
-                               MAX_BUF_SIZE, PCI_DMA_TODEVICE);
+                       dma_unmap_single(&lp->pdev->dev,
+                                        le32_to_cpu(lp->tx_insert_ptr->buf),
+                                        MAX_BUF_SIZE, DMA_TO_DEVICE);
                        dev_kfree_skb(lp->tx_insert_ptr->skb_ptr);
                        lp->tx_insert_ptr->skb_ptr = NULL;
                }
@@ -279,9 +279,9 @@ static void r6040_free_rxbufs(struct net_device *dev)
 
        for (i = 0; i < RX_DCNT; i++) {
                if (lp->rx_insert_ptr->skb_ptr) {
-                       pci_unmap_single(lp->pdev,
-                               le32_to_cpu(lp->rx_insert_ptr->buf),
-                               MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+                       dma_unmap_single(&lp->pdev->dev,
+                                        le32_to_cpu(lp->rx_insert_ptr->buf),
+                                        MAX_BUF_SIZE, DMA_FROM_DEVICE);
                        dev_kfree_skb(lp->rx_insert_ptr->skb_ptr);
                        lp->rx_insert_ptr->skb_ptr = NULL;
                }
@@ -335,9 +335,10 @@ static int r6040_alloc_rxbufs(struct net_device *dev)
                        goto err_exit;
                }
                desc->skb_ptr = skb;
-               desc->buf = cpu_to_le32(pci_map_single(lp->pdev,
-                                       desc->skb_ptr->data,
-                                       MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));
+               desc->buf = cpu_to_le32(dma_map_single(&lp->pdev->dev,
+                                                      desc->skb_ptr->data,
+                                                      MAX_BUF_SIZE,
+                                                      DMA_FROM_DEVICE));
                desc->status = DSC_OWNER_MAC;
                desc = desc->vndescp;
        } while (desc != lp->rx_ring);
@@ -484,14 +485,14 @@ static int r6040_close(struct net_device *dev)
 
        /* Free Descriptor memory */
        if (lp->rx_ring) {
-               pci_free_consistent(pdev,
-                               RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma);
+               dma_free_coherent(&pdev->dev, RX_DESC_SIZE, lp->rx_ring,
+                                 lp->rx_ring_dma);
                lp->rx_ring = NULL;
        }
 
        if (lp->tx_ring) {
-               pci_free_consistent(pdev,
-                               TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma);
+               dma_free_coherent(&pdev->dev, TX_DESC_SIZE, lp->tx_ring,
+                                 lp->tx_ring_dma);
                lp->tx_ring = NULL;
        }
 
@@ -544,8 +545,8 @@ static int r6040_rx(struct net_device *dev, int limit)
 
                /* Do not count the CRC */
                skb_put(skb_ptr, descptr->len - 4);
-               pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
-                                       MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+               dma_unmap_single(&priv->pdev->dev, le32_to_cpu(descptr->buf),
+                                MAX_BUF_SIZE, DMA_FROM_DEVICE);
                skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev);
 
                /* Send to upper layer */
@@ -555,9 +556,10 @@ static int r6040_rx(struct net_device *dev, int limit)
 
                /* put new skb into descriptor */
                descptr->skb_ptr = new_skb;
-               descptr->buf = cpu_to_le32(pci_map_single(priv->pdev,
-                                               descptr->skb_ptr->data,
-                                       MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));
+               descptr->buf = cpu_to_le32(dma_map_single(&priv->pdev->dev,
+                                                         descptr->skb_ptr->data,
+                                                         MAX_BUF_SIZE,
+                                                         DMA_FROM_DEVICE));
 
 next_descr:
                /* put the descriptor back to the MAC */
@@ -597,8 +599,8 @@ static void r6040_tx(struct net_device *dev)
                dev->stats.tx_packets++;
                dev->stats.tx_bytes += skb_ptr->len;
 
-               pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
-                       skb_ptr->len, PCI_DMA_TODEVICE);
+               dma_unmap_single(&priv->pdev->dev, le32_to_cpu(descptr->buf),
+                                skb_ptr->len, DMA_TO_DEVICE);
                /* Free buffer */
                dev_kfree_skb(skb_ptr);
                descptr->skb_ptr = NULL;
@@ -750,14 +752,16 @@ static int r6040_open(struct net_device *dev)
 
        /* Allocate Descriptor memory */
        lp->rx_ring =
-               pci_alloc_consistent(lp->pdev, RX_DESC_SIZE, &lp->rx_ring_dma);
+               dma_alloc_coherent(&lp->pdev->dev, RX_DESC_SIZE,
+                                  &lp->rx_ring_dma, GFP_KERNEL);
        if (!lp->rx_ring) {
                ret = -ENOMEM;
                goto err_free_irq;
        }
 
        lp->tx_ring =
-               pci_alloc_consistent(lp->pdev, TX_DESC_SIZE, &lp->tx_ring_dma);
+               dma_alloc_coherent(&lp->pdev->dev, TX_DESC_SIZE,
+                                  &lp->tx_ring_dma, GFP_KERNEL);
        if (!lp->tx_ring) {
                ret = -ENOMEM;
                goto err_free_rx_ring;
@@ -773,11 +777,11 @@ static int r6040_open(struct net_device *dev)
        return 0;
 
 err_free_tx_ring:
-       pci_free_consistent(lp->pdev, TX_DESC_SIZE, lp->tx_ring,
-                       lp->tx_ring_dma);
+       dma_free_coherent(&lp->pdev->dev, TX_DESC_SIZE, lp->tx_ring,
+                         lp->tx_ring_dma);
 err_free_rx_ring:
-       pci_free_consistent(lp->pdev, RX_DESC_SIZE, lp->rx_ring,
-                       lp->rx_ring_dma);
+       dma_free_coherent(&lp->pdev->dev, RX_DESC_SIZE, lp->rx_ring,
+                         lp->rx_ring_dma);
 err_free_irq:
        free_irq(dev->irq, dev);
 out:
@@ -811,8 +815,8 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
        descptr = lp->tx_insert_ptr;
        descptr->len = skb->len;
        descptr->skb_ptr = skb;
-       descptr->buf = cpu_to_le32(pci_map_single(lp->pdev,
-               skb->data, skb->len, PCI_DMA_TODEVICE));
+       descptr->buf = cpu_to_le32(dma_map_single(&lp->pdev->dev, skb->data,
+                                                 skb->len, DMA_TO_DEVICE));
        descptr->status = DSC_OWNER_MAC;
 
        skb_tx_timestamp(skb);
@@ -1029,12 +1033,12 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_out;
 
        /* this should always be supported */
-       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+       err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
        if (err) {
                dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card\n");
                goto err_out_disable_dev;
        }
-       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+       err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
        if (err) {
                dev_err(&pdev->dev, "32-bit PCI DMA addresses not supported by the card\n");
                goto err_out_disable_dev;
index 22a6a05..7be86ef 100644 (file)
@@ -26,7 +26,6 @@ enum mac_version {
        RTL_GIGA_MAC_VER_12,
        RTL_GIGA_MAC_VER_13,
        RTL_GIGA_MAC_VER_14,
-       RTL_GIGA_MAC_VER_15,
        RTL_GIGA_MAC_VER_16,
        RTL_GIGA_MAC_VER_17,
        RTL_GIGA_MAC_VER_18,
@@ -66,6 +65,7 @@ enum mac_version {
        RTL_GIGA_MAC_VER_52,
        RTL_GIGA_MAC_VER_60,
        RTL_GIGA_MAC_VER_61,
+       RTL_GIGA_MAC_VER_63,
        RTL_GIGA_MAC_NONE
 };
 
index dad84ec..d1da92a 100644 (file)
@@ -56,6 +56,7 @@
 #define FIRMWARE_8107E_1       "rtl_nic/rtl8107e-1.fw"
 #define FIRMWARE_8107E_2       "rtl_nic/rtl8107e-2.fw"
 #define FIRMWARE_8125A_3       "rtl_nic/rtl8125a-3.fw"
+#define FIRMWARE_8125B_2       "rtl_nic/rtl8125b-2.fw"
 
 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
    The RTL chips use a 64 element hash table based on the Ethernet CRC. */
@@ -105,9 +106,8 @@ static const struct {
        [RTL_GIGA_MAC_VER_10] = {"RTL8101e"                             },
        [RTL_GIGA_MAC_VER_11] = {"RTL8168b/8111b"                       },
        [RTL_GIGA_MAC_VER_12] = {"RTL8168b/8111b"                       },
-       [RTL_GIGA_MAC_VER_13] = {"RTL8101e"                             },
-       [RTL_GIGA_MAC_VER_14] = {"RTL8100e"                             },
-       [RTL_GIGA_MAC_VER_15] = {"RTL8100e"                             },
+       [RTL_GIGA_MAC_VER_13] = {"RTL8101e/RTL8100e"                    },
+       [RTL_GIGA_MAC_VER_14] = {"RTL8401"                              },
        [RTL_GIGA_MAC_VER_16] = {"RTL8101e"                             },
        [RTL_GIGA_MAC_VER_17] = {"RTL8168b/8111b"                       },
        [RTL_GIGA_MAC_VER_18] = {"RTL8168cp/8111cp"                     },
@@ -145,8 +145,10 @@ static const struct {
        [RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep"                     },
        [RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep"                     },
        [RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117",  FIRMWARE_8168FP_3},
-       [RTL_GIGA_MAC_VER_60] = {"RTL8125"                              },
-       [RTL_GIGA_MAC_VER_61] = {"RTL8125",             FIRMWARE_8125A_3},
+       [RTL_GIGA_MAC_VER_60] = {"RTL8125A"                             },
+       [RTL_GIGA_MAC_VER_61] = {"RTL8125A",            FIRMWARE_8125A_3},
+       /* reserve 62 for CFG_METHOD_4 in the vendor driver */
+       [RTL_GIGA_MAC_VER_63] = {"RTL8125B",            FIRMWARE_8125B_2},
 };
 
 static const struct pci_device_id rtl8169_pci_tbl[] = {
@@ -336,6 +338,7 @@ enum rtl8125_registers {
        IntrStatus_8125         = 0x3c,
        TxPoll_8125             = 0x90,
        MAC0_BKP                = 0x19e0,
+       EEE_TXIDLE_TIMER_8125   = 0x6048,
 };
 
 #define RX_VLAN_INNER_8125     BIT(22)
@@ -529,8 +532,6 @@ enum rtl_rx_desc_bit {
        RxVlanTag       = (1 << 16), /* VLAN tag available */
 };
 
-#define RsvdMask       0x3fffc000
-
 #define RTL_GSO_MAX_SIZE_V1    32000
 #define RTL_GSO_MAX_SEGS_V1    24
 #define RTL_GSO_MAX_SIZE_V2    64000
@@ -613,7 +614,6 @@ struct rtl8169_private {
 
        struct {
                DECLARE_BITMAP(flags, RTL_FLAG_MAX);
-               struct mutex mutex;
                struct work_struct work;
        } wk;
 
@@ -659,22 +659,13 @@ MODULE_FIRMWARE(FIRMWARE_8168FP_3);
 MODULE_FIRMWARE(FIRMWARE_8107E_1);
 MODULE_FIRMWARE(FIRMWARE_8107E_2);
 MODULE_FIRMWARE(FIRMWARE_8125A_3);
+MODULE_FIRMWARE(FIRMWARE_8125B_2);
 
 static inline struct device *tp_to_dev(struct rtl8169_private *tp)
 {
        return &tp->pci_dev->dev;
 }
 
-static void rtl_lock_work(struct rtl8169_private *tp)
-{
-       mutex_lock(&tp->wk.mutex);
-}
-
-static void rtl_unlock_work(struct rtl8169_private *tp)
-{
-       mutex_unlock(&tp->wk.mutex);
-}
-
 static void rtl_lock_config_regs(struct rtl8169_private *tp)
 {
        RTL_W8(tp, Cfg9346, Cfg9346_Lock);
@@ -980,7 +971,7 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
        case RTL_GIGA_MAC_VER_31:
                r8168dp_2_mdio_write(tp, location, val);
                break;
-       case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_61:
+       case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
                r8168g_mdio_write(tp, location, val);
                break;
        default:
@@ -997,7 +988,7 @@ static int rtl_readphy(struct rtl8169_private *tp, int location)
        case RTL_GIGA_MAC_VER_28:
        case RTL_GIGA_MAC_VER_31:
                return r8168dp_2_mdio_read(tp, location);
-       case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_61:
+       case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
                return r8168g_mdio_read(tp, location);
        default:
                return r8169_mdio_read(tp, location);
@@ -1350,10 +1341,8 @@ static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
 
-       rtl_lock_work(tp);
        wol->supported = WAKE_ANY;
        wol->wolopts = tp->saved_wolopts;
-       rtl_unlock_work(tp);
 }
 
 static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
@@ -1405,7 +1394,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
                break;
        case RTL_GIGA_MAC_VER_34:
        case RTL_GIGA_MAC_VER_37:
-       case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_61:
+       case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63:
                options = RTL_R8(tp, Config2) & ~PME_SIGNAL;
                if (wolopts)
                        options |= PME_SIGNAL;
@@ -1424,23 +1413,12 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
-       struct device *d = tp_to_dev(tp);
 
        if (wol->wolopts & ~WAKE_ANY)
                return -EINVAL;
 
-       pm_runtime_get_noresume(d);
-
-       rtl_lock_work(tp);
-
        tp->saved_wolopts = wol->wolopts;
-
-       if (pm_runtime_active(d))
-               __rtl8169_set_wol(tp, tp->saved_wolopts);
-
-       rtl_unlock_work(tp);
-
-       pm_runtime_put_noidle(d);
+       __rtl8169_set_wol(tp, tp->saved_wolopts);
 
        return 0;
 }
@@ -1504,8 +1482,6 @@ static int rtl8169_set_features(struct net_device *dev,
 {
        struct rtl8169_private *tp = netdev_priv(dev);
 
-       rtl_lock_work(tp);
-
        rtl_set_rx_config_features(tp, features);
 
        if (features & NETIF_F_RXCSUM)
@@ -1523,8 +1499,6 @@ static int rtl8169_set_features(struct net_device *dev,
        RTL_W16(tp, CPlusCmd, tp->cp_cmd);
        rtl_pci_commit(tp);
 
-       rtl_unlock_work(tp);
-
        return 0;
 }
 
@@ -1550,10 +1524,8 @@ static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
        u32 *dw = p;
        int i;
 
-       rtl_lock_work(tp);
        for (i = 0; i < R8169_REGS_SIZE; i += 4)
                memcpy_fromio(dw++, data++, 4);
-       rtl_unlock_work(tp);
 }
 
 static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
@@ -1659,17 +1631,10 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev,
                                      struct ethtool_stats *stats, u64 *data)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
-       struct device *d = tp_to_dev(tp);
-       struct rtl8169_counters *counters = tp->counters;
-
-       ASSERT_RTNL();
-
-       pm_runtime_get_noresume(d);
-
-       if (pm_runtime_active(d))
-               rtl8169_update_counters(tp);
+       struct rtl8169_counters *counters;
 
-       pm_runtime_put_noidle(d);
+       counters = tp->counters;
+       rtl8169_update_counters(tp);
 
        data[0] = le64_to_cpu(counters->tx_packets);
        data[1] = le64_to_cpu(counters->rx_packets);
@@ -1733,16 +1698,16 @@ struct rtl_coalesce_info {
 #define COALESCE_DELAY(d) { (d), 8 * (d), 16 * (d), 32 * (d) }
 
 static const struct rtl_coalesce_info rtl_coalesce_info_8169[] = {
-       { SPEED_10,     COALESCE_DELAY(40960) },
-       { SPEED_100,    COALESCE_DELAY(2560) },
        { SPEED_1000,   COALESCE_DELAY(320) },
+       { SPEED_100,    COALESCE_DELAY(2560) },
+       { SPEED_10,     COALESCE_DELAY(40960) },
        { 0 },
 };
 
 static const struct rtl_coalesce_info rtl_coalesce_info_8168_8136[] = {
-       { SPEED_10,     COALESCE_DELAY(40960) },
-       { SPEED_100,    COALESCE_DELAY(2560) },
        { SPEED_1000,   COALESCE_DELAY(5000) },
+       { SPEED_100,    COALESCE_DELAY(2560) },
+       { SPEED_10,     COALESCE_DELAY(40960) },
        { 0 },
 };
 #undef COALESCE_DELAY
@@ -1758,6 +1723,10 @@ rtl_coalesce_info(struct rtl8169_private *tp)
        else
                ci = rtl_coalesce_info_8168_8136;
 
+       /* if speed is unknown assume highest one */
+       if (tp->phydev->speed == SPEED_UNKNOWN)
+               return ci;
+
        for (; ci->speed; ci++) {
                if (tp->phydev->speed == ci->speed)
                        return ci;
@@ -1872,8 +1841,6 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
        units = DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000U, scale);
        w |= FIELD_PREP(RTL_COALESCE_RX_USECS, units);
 
-       rtl_lock_work(tp);
-
        RTL_W16(tp, IntrMitigate, w);
 
        /* Meaning of PktCntrDisable bit changed from RTL8168e-vl */
@@ -1889,56 +1856,32 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
        RTL_W16(tp, CPlusCmd, tp->cp_cmd);
        rtl_pci_commit(tp);
 
-       rtl_unlock_work(tp);
-
        return 0;
 }
 
 static int rtl8169_get_eee(struct net_device *dev, struct ethtool_eee *data)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
-       struct device *d = tp_to_dev(tp);
-       int ret;
 
        if (!rtl_supports_eee(tp))
                return -EOPNOTSUPP;
 
-       pm_runtime_get_noresume(d);
-
-       if (!pm_runtime_active(d)) {
-               ret = -EOPNOTSUPP;
-       } else {
-               ret = phy_ethtool_get_eee(tp->phydev, data);
-       }
-
-       pm_runtime_put_noidle(d);
-
-       return ret;
+       return phy_ethtool_get_eee(tp->phydev, data);
 }
 
 static int rtl8169_set_eee(struct net_device *dev, struct ethtool_eee *data)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
-       struct device *d = tp_to_dev(tp);
        int ret;
 
        if (!rtl_supports_eee(tp))
                return -EOPNOTSUPP;
 
-       pm_runtime_get_noresume(d);
-
-       if (!pm_runtime_active(d)) {
-               ret = -EOPNOTSUPP;
-               goto out;
-       }
-
        ret = phy_ethtool_set_eee(tp->phydev, data);
 
        if (!ret)
                tp->eee_adv = phy_read_mmd(dev->phydev, MDIO_MMD_AN,
                                           MDIO_AN_EEE_ADV);
-out:
-       pm_runtime_put_noidle(d);
        return ret;
 }
 
@@ -1997,7 +1940,10 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
                u16 val;
                enum mac_version ver;
        } mac_info[] = {
-               /* 8125 family. */
+               /* 8125B family. */
+               { 0x7cf, 0x641, RTL_GIGA_MAC_VER_63 },
+
+               /* 8125A family. */
                { 0x7cf, 0x608, RTL_GIGA_MAC_VER_60 },
                { 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 },
 
@@ -2062,16 +2008,15 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
                { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 },
                { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 },
                { 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 },
-               /* RTL8401, reportedly works if treated as RTL8101e */
-               { 0x7cf, 0x240, RTL_GIGA_MAC_VER_13 },
+               { 0x7cf, 0x240, RTL_GIGA_MAC_VER_14 },
                { 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 },
                { 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 },
                { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 },
                { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 },
                { 0x7c8, 0x340, RTL_GIGA_MAC_VER_16 },
                /* FIXME: where did these entries come from ? -- FR */
-               { 0xfc8, 0x388, RTL_GIGA_MAC_VER_15 },
-               { 0xfc8, 0x308, RTL_GIGA_MAC_VER_14 },
+               { 0xfc8, 0x388, RTL_GIGA_MAC_VER_13 },
+               { 0xfc8, 0x308, RTL_GIGA_MAC_VER_13 },
 
                /* 8110 family. */
                { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 },
@@ -2114,8 +2059,11 @@ static void rtl_release_firmware(struct rtl8169_private *tp)
 void r8169_apply_firmware(struct rtl8169_private *tp)
 {
        /* TODO: release firmware if rtl_fw_write_firmware signals failure. */
-       if (tp->rtl_fw)
+       if (tp->rtl_fw) {
                rtl_fw_write_firmware(tp, tp->rtl_fw);
+               /* At least one firmware doesn't reset tp->ocp_base. */
+               tp->ocp_base = OCP_STD_PHY_BASE;
+       }
 }
 
 static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
@@ -2127,12 +2075,23 @@ static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
        rtl_eri_set_bits(tp, 0x1b0, 0x0003);
 }
 
-static void rtl8125_config_eee_mac(struct rtl8169_private *tp)
+static void rtl8125a_config_eee_mac(struct rtl8169_private *tp)
 {
        r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
        r8168_mac_ocp_modify(tp, 0xeb62, 0, BIT(2) | BIT(1));
 }
 
+static void rtl8125_set_eee_txidle_timer(struct rtl8169_private *tp)
+{
+       RTL_W16(tp, EEE_TXIDLE_TIMER_8125, tp->dev->mtu + ETH_HLEN + 0x20);
+}
+
+static void rtl8125b_config_eee_mac(struct rtl8169_private *tp)
+{
+       rtl8125_set_eee_txidle_timer(tp);
+       r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
+}
+
 static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
 {
        const u16 w[] = {
@@ -2196,8 +2155,6 @@ static void rtl8169_init_phy(struct rtl8169_private *tp)
 
 static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
 {
-       rtl_lock_work(tp);
-
        rtl_unlock_config_regs(tp);
 
        RTL_W32(tp, MAC4, addr[4] | addr[5] << 8);
@@ -2210,26 +2167,18 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
                rtl_rar_exgmac_set(tp, addr);
 
        rtl_lock_config_regs(tp);
-
-       rtl_unlock_work(tp);
 }
 
 static int rtl_set_mac_address(struct net_device *dev, void *p)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
-       struct device *d = tp_to_dev(tp);
        int ret;
 
        ret = eth_mac_addr(dev, p);
        if (ret)
                return ret;
 
-       pm_runtime_get_noresume(d);
-
-       if (pm_runtime_active(d))
-               rtl_rar_set(tp, dev->dev_addr);
-
-       pm_runtime_put_noidle(d);
+       rtl_rar_set(tp, dev->dev_addr);
 
        return 0;
 }
@@ -2244,7 +2193,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
        case RTL_GIGA_MAC_VER_32:
        case RTL_GIGA_MAC_VER_33:
        case RTL_GIGA_MAC_VER_34:
-       case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_61:
+       case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_63:
                RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
                        AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
                break;
@@ -2278,11 +2227,7 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
        case RTL_GIGA_MAC_VER_46:
        case RTL_GIGA_MAC_VER_47:
        case RTL_GIGA_MAC_VER_48:
-       case RTL_GIGA_MAC_VER_50:
-       case RTL_GIGA_MAC_VER_51:
-       case RTL_GIGA_MAC_VER_52:
-       case RTL_GIGA_MAC_VER_60:
-       case RTL_GIGA_MAC_VER_61:
+       case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63:
                RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
                break;
        case RTL_GIGA_MAC_VER_40:
@@ -2294,10 +2239,14 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
        default:
                break;
        }
+
+       clk_disable_unprepare(tp->clk);
 }
 
 static void rtl_pll_power_up(struct rtl8169_private *tp)
 {
+       clk_prepare_enable(tp->clk);
+
        switch (tp->mac_version) {
        case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
        case RTL_GIGA_MAC_VER_37:
@@ -2310,11 +2259,7 @@ static void rtl_pll_power_up(struct rtl8169_private *tp)
        case RTL_GIGA_MAC_VER_46:
        case RTL_GIGA_MAC_VER_47:
        case RTL_GIGA_MAC_VER_48:
-       case RTL_GIGA_MAC_VER_50:
-       case RTL_GIGA_MAC_VER_51:
-       case RTL_GIGA_MAC_VER_52:
-       case RTL_GIGA_MAC_VER_60:
-       case RTL_GIGA_MAC_VER_61:
+       case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63:
                RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
                break;
        case RTL_GIGA_MAC_VER_40:
@@ -2345,7 +2290,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
        case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
                RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
                break;
-       case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61:
+       case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63:
                RTL_W32(tp, RxConfig, RX_FETCH_DFLT_8125 | RX_DMA_BURST);
                break;
        default:
@@ -2508,6 +2453,12 @@ DECLARE_RTL_COND(rtl_rxtx_empty_cond)
        return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY;
 }
 
+DECLARE_RTL_COND(rtl_rxtx_empty_cond_2)
+{
+       /* IntrMitigate has new functionality on RTL8125 */
+       return (RTL_R16(tp, IntrMitigate) & 0x0103) == 0x0103;
+}
+
 static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
 {
        switch (tp->mac_version) {
@@ -2518,6 +2469,11 @@ static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
        case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61:
                rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
                break;
+       case RTL_GIGA_MAC_VER_63:
+               RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
+               rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
+               rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond_2, 100, 42);
+               break;
        default:
                break;
        }
@@ -3467,6 +3423,19 @@ static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
        rtl_ephy_write(tp, 0x03, 0xc2f9);
 }
 
+static void rtl_hw_start_8401(struct rtl8169_private *tp)
+{
+       static const struct ephy_info e_info_8401[] = {
+               { 0x01, 0xffff, 0x6fe5 },
+               { 0x03, 0xffff, 0x0599 },
+               { 0x06, 0xffff, 0xaf25 },
+               { 0x07, 0xffff, 0x8e68 },
+       };
+
+       rtl_ephy_init(tp, e_info_8401);
+       RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
+}
+
 static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
 {
        static const struct ephy_info e_info_8105e_1[] = {
@@ -3576,18 +3545,27 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
        /* disable new tx descriptor format */
        r8168_mac_ocp_modify(tp, 0xeb58, 0x0001, 0x0000);
 
-       r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
-       r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0020);
+       if (tp->mac_version == RTL_GIGA_MAC_VER_63)
+               r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0200);
+       else
+               r8168_mac_ocp_modify(tp, 0xe614, 0x0700, 0x0400);
+
+       if (tp->mac_version == RTL_GIGA_MAC_VER_63)
+               r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0000);
+       else
+               r8168_mac_ocp_modify(tp, 0xe63e, 0x0c30, 0x0020);
+
        r8168_mac_ocp_modify(tp, 0xc0b4, 0x0000, 0x000c);
        r8168_mac_ocp_modify(tp, 0xeb6a, 0x00ff, 0x0033);
        r8168_mac_ocp_modify(tp, 0xeb50, 0x03e0, 0x0040);
        r8168_mac_ocp_modify(tp, 0xe056, 0x00f0, 0x0030);
        r8168_mac_ocp_modify(tp, 0xe040, 0x1000, 0x0000);
+       r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001);
        r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403);
-       r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0067);
+       r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0068);
        r8168_mac_ocp_modify(tp, 0xc0ac, 0x0080, 0x1f00);
        r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f);
-       r8168_mac_ocp_modify(tp, 0xe84c, 0x0000, 0x00c0);
+
        r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000);
        r8168_mac_ocp_modify(tp, 0xeb54, 0x0000, 0x0001);
        udelay(1);
@@ -3598,15 +3576,18 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp)
 
        rtl_loop_wait_low(tp, &rtl_mac_ocp_e00e_cond, 1000, 10);
 
-       rtl8125_config_eee_mac(tp);
+       if (tp->mac_version == RTL_GIGA_MAC_VER_63)
+               rtl8125b_config_eee_mac(tp);
+       else
+               rtl8125a_config_eee_mac(tp);
 
        RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
        udelay(10);
 }
 
-static void rtl_hw_start_8125_1(struct rtl8169_private *tp)
+static void rtl_hw_start_8125a_1(struct rtl8169_private *tp)
 {
-       static const struct ephy_info e_info_8125_1[] = {
+       static const struct ephy_info e_info_8125a_1[] = {
                { 0x01, 0xffff, 0xa812 },
                { 0x09, 0xffff, 0x520c },
                { 0x04, 0xffff, 0xd000 },
@@ -3638,14 +3619,15 @@ static void rtl_hw_start_8125_1(struct rtl8169_private *tp)
 
        /* disable aspm and clock request before access ephy */
        rtl_hw_aspm_clkreq_enable(tp, false);
-       rtl_ephy_init(tp, e_info_8125_1);
+       rtl_ephy_init(tp, e_info_8125a_1);
 
        rtl_hw_start_8125_common(tp);
+       rtl_hw_aspm_clkreq_enable(tp, true);
 }
 
-static void rtl_hw_start_8125_2(struct rtl8169_private *tp)
+static void rtl_hw_start_8125a_2(struct rtl8169_private *tp)
 {
-       static const struct ephy_info e_info_8125_2[] = {
+       static const struct ephy_info e_info_8125a_2[] = {
                { 0x04, 0xffff, 0xd000 },
                { 0x0a, 0xffff, 0x8653 },
                { 0x23, 0xffff, 0xab66 },
@@ -3665,9 +3647,30 @@ static void rtl_hw_start_8125_2(struct rtl8169_private *tp)
 
        /* disable aspm and clock request before access ephy */
        rtl_hw_aspm_clkreq_enable(tp, false);
-       rtl_ephy_init(tp, e_info_8125_2);
+       rtl_ephy_init(tp, e_info_8125a_2);
 
        rtl_hw_start_8125_common(tp);
+       rtl_hw_aspm_clkreq_enable(tp, true);
+}
+
+static void rtl_hw_start_8125b(struct rtl8169_private *tp)
+{
+       static const struct ephy_info e_info_8125b[] = {
+               { 0x0b, 0xffff, 0xa908 },
+               { 0x1e, 0xffff, 0x20eb },
+               { 0x4b, 0xffff, 0xa908 },
+               { 0x5e, 0xffff, 0x20eb },
+               { 0x22, 0x0030, 0x0020 },
+               { 0x62, 0x0030, 0x0020 },
+       };
+
+       rtl_set_def_aspm_entry_latency(tp);
+       rtl_hw_aspm_clkreq_enable(tp, false);
+
+       rtl_ephy_init(tp, e_info_8125b);
+       rtl_hw_start_8125_common(tp);
+
+       rtl_hw_aspm_clkreq_enable(tp, true);
 }
 
 static void rtl_hw_config(struct rtl8169_private *tp)
@@ -3680,8 +3683,7 @@ static void rtl_hw_config(struct rtl8169_private *tp)
                [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168b,
                [RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168b,
                [RTL_GIGA_MAC_VER_13] = NULL,
-               [RTL_GIGA_MAC_VER_14] = NULL,
-               [RTL_GIGA_MAC_VER_15] = NULL,
+               [RTL_GIGA_MAC_VER_14] = rtl_hw_start_8401,
                [RTL_GIGA_MAC_VER_16] = NULL,
                [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168b,
                [RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1,
@@ -3719,8 +3721,9 @@ static void rtl_hw_config(struct rtl8169_private *tp)
                [RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2,
                [RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3,
                [RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117,
-               [RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125_1,
-               [RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125_2,
+               [RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125a_1,
+               [RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2,
+               [RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b,
        };
 
        if (hw_configs[tp->mac_version])
@@ -3807,6 +3810,15 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
        netdev_update_features(dev);
        rtl_jumbo_config(tp);
 
+       switch (tp->mac_version) {
+       case RTL_GIGA_MAC_VER_61:
+       case RTL_GIGA_MAC_VER_63:
+               rtl8125_set_eee_txidle_timer(tp);
+               break;
+       default:
+               break;
+       }
+
        return 0;
 }
 
@@ -3928,10 +3940,12 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp)
        netdev_reset_queue(tp->dev);
 }
 
-static void rtl8169_hw_reset(struct rtl8169_private *tp, bool going_down)
+static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down)
 {
+       napi_disable(&tp->napi);
+
        /* Give a racing hard_start_xmit a few cycles to complete. */
-       synchronize_rcu();
+       synchronize_net();
 
        /* Disable interrupts */
        rtl8169_irq_mask_and_ack(tp);
@@ -3951,7 +3965,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp, bool going_down)
                RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
                rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
                break;
-       case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_61:
+       case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_63:
                rtl_enable_rxdvgate(tp);
                fsleep(2000);
                break;
@@ -3969,20 +3983,17 @@ no_reset:
 
 static void rtl_reset_work(struct rtl8169_private *tp)
 {
-       struct net_device *dev = tp->dev;
        int i;
 
-       napi_disable(&tp->napi);
-       netif_stop_queue(dev);
+       netif_stop_queue(tp->dev);
 
-       rtl8169_hw_reset(tp, false);
+       rtl8169_cleanup(tp, false);
 
        for (i = 0; i < NUM_RX_DESC; i++)
                rtl8169_mark_to_asic(tp->RxDescArray + i);
 
        napi_enable(&tp->napi);
        rtl_hw_start(tp);
-       netif_wake_queue(dev);
 }
 
 static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue)
@@ -4561,16 +4572,18 @@ static void rtl_task(struct work_struct *work)
        struct rtl8169_private *tp =
                container_of(work, struct rtl8169_private, wk.work);
 
-       rtl_lock_work(tp);
+       rtnl_lock();
 
        if (!netif_running(tp->dev) ||
            !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
                goto out_unlock;
 
-       if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags))
+       if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags)) {
                rtl_reset_work(tp);
+               netif_wake_queue(tp->dev);
+       }
 out_unlock:
-       rtl_unlock_work(tp);
+       rtnl_unlock();
 }
 
 static int rtl8169_poll(struct napi_struct *napi, int budget)
@@ -4632,19 +4645,27 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
 
 static void rtl8169_down(struct rtl8169_private *tp)
 {
-       rtl_lock_work(tp);
-
        /* Clear all task flags */
        bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
 
        phy_stop(tp->phydev);
-       napi_disable(&tp->napi);
 
-       rtl8169_hw_reset(tp, true);
+       rtl8169_update_counters(tp);
+
+       rtl8169_cleanup(tp, true);
 
        rtl_pll_power_down(tp);
+}
+
+static void rtl8169_up(struct rtl8169_private *tp)
+{
+       rtl_pll_power_up(tp);
+       rtl8169_init_phy(tp);
+       napi_enable(&tp->napi);
+       set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+       rtl_reset_work(tp);
 
-       rtl_unlock_work(tp);
+       phy_start(tp->phydev);
 }
 
 static int rtl8169_close(struct net_device *dev)
@@ -4654,9 +4675,6 @@ static int rtl8169_close(struct net_device *dev)
 
        pm_runtime_get_sync(&pdev->dev);
 
-       /* Update counters before going down */
-       rtl8169_update_counters(tp);
-
        netif_stop_queue(dev);
        rtl8169_down(tp);
        rtl8169_rx_clear(tp);
@@ -4725,25 +4743,10 @@ static int rtl_open(struct net_device *dev)
        if (retval)
                goto err_free_irq;
 
-       rtl_lock_work(tp);
-
-       set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
-
-       napi_enable(&tp->napi);
-
-       rtl8169_init_phy(tp);
-
-       rtl_pll_power_up(tp);
-
-       rtl_hw_start(tp);
-
+       rtl8169_up(tp);
        rtl8169_init_counter_offsets(tp);
-
-       phy_start(tp->phydev);
        netif_start_queue(dev);
 
-       rtl_unlock_work(tp);
-
        pm_runtime_put_sync(&pdev->dev);
 out:
        return retval;
@@ -4815,11 +4818,10 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 
 static void rtl8169_net_suspend(struct rtl8169_private *tp)
 {
-       if (!netif_running(tp->dev))
-               return;
-
        netif_device_detach(tp->dev);
-       rtl8169_down(tp);
+
+       if (netif_running(tp->dev))
+               rtl8169_down(tp);
 }
 
 #ifdef CONFIG_PM
@@ -4828,38 +4830,23 @@ static int __maybe_unused rtl8169_suspend(struct device *device)
 {
        struct rtl8169_private *tp = dev_get_drvdata(device);
 
+       rtnl_lock();
        rtl8169_net_suspend(tp);
-       clk_disable_unprepare(tp->clk);
+       rtnl_unlock();
 
        return 0;
 }
 
-static void __rtl8169_resume(struct rtl8169_private *tp)
-{
-       netif_device_attach(tp->dev);
-
-       rtl_pll_power_up(tp);
-       rtl8169_init_phy(tp);
-
-       phy_start(tp->phydev);
-
-       rtl_lock_work(tp);
-       napi_enable(&tp->napi);
-       set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
-       rtl_reset_work(tp);
-       rtl_unlock_work(tp);
-}
-
-static int __maybe_unused rtl8169_resume(struct device *device)
+static int rtl8169_resume(struct device *device)
 {
        struct rtl8169_private *tp = dev_get_drvdata(device);
 
        rtl_rar_set(tp, tp->dev->dev_addr);
 
-       clk_prepare_enable(tp->clk);
+       if (tp->TxDescArray)
+               rtl8169_up(tp);
 
-       if (netif_running(tp->dev))
-               __rtl8169_resume(tp);
+       netif_device_attach(tp->dev);
 
        return 0;
 }
@@ -4868,17 +4855,15 @@ static int rtl8169_runtime_suspend(struct device *device)
 {
        struct rtl8169_private *tp = dev_get_drvdata(device);
 
-       if (!tp->TxDescArray)
+       if (!tp->TxDescArray) {
+               netif_device_detach(tp->dev);
                return 0;
+       }
 
-       rtl_lock_work(tp);
+       rtnl_lock();
        __rtl8169_set_wol(tp, WAKE_PHY);
-       rtl_unlock_work(tp);
-
        rtl8169_net_suspend(tp);
-
-       /* Update counters before going runtime suspend */
-       rtl8169_update_counters(tp);
+       rtnl_unlock();
 
        return 0;
 }
@@ -4887,18 +4872,9 @@ static int rtl8169_runtime_resume(struct device *device)
 {
        struct rtl8169_private *tp = dev_get_drvdata(device);
 
-       rtl_rar_set(tp, tp->dev->dev_addr);
-
-       if (!tp->TxDescArray)
-               return 0;
-
-       rtl_lock_work(tp);
        __rtl8169_set_wol(tp, tp->saved_wolopts);
-       rtl_unlock_work(tp);
 
-       __rtl8169_resume(tp);
-
-       return 0;
+       return rtl8169_resume(device);
 }
 
 static int rtl8169_runtime_idle(struct device *device)
@@ -4940,7 +4916,9 @@ static void rtl_shutdown(struct pci_dev *pdev)
 {
        struct rtl8169_private *tp = pci_get_drvdata(pdev);
 
+       rtnl_lock();
        rtl8169_net_suspend(tp);
+       rtnl_unlock();
 
        /* Restore original MAC address */
        rtl_rar_set(tp, tp->dev->perm_addr);
@@ -5100,7 +5078,7 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
        new_bus->read = r8169_mdio_read_reg;
        new_bus->write = r8169_mdio_write_reg;
 
-       ret = devm_mdiobus_register(new_bus);
+       ret = devm_mdiobus_register(&pdev->dev, new_bus);
        if (ret)
                return ret;
 
@@ -5163,7 +5141,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
        case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
                rtl_hw_init_8168g(tp);
                break;
-       case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_61:
+       case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63:
                rtl_hw_init_8125(tp);
                break;
        default:
@@ -5346,7 +5324,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                return rc;
        }
 
-       mutex_init(&tp->wk.mutex);
        INIT_WORK(&tp->wk.work, rtl_task);
        u64_stats_init(&tp->rx_stats.syncp);
        u64_stats_init(&tp->tx_stats.syncp);
@@ -5432,8 +5409,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                            jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ?
                            "ok" : "ko");
 
-       if (r8168_check_dash(tp))
+       if (r8168_check_dash(tp)) {
+               netdev_info(dev, "DASH enabled\n");
                rtl8168_driver_start(tp);
+       }
 
        if (pci_dev_run_wake(pdev))
                pm_runtime_put_sync(&pdev->dev);
index b73f7d0..913d030 100644 (file)
@@ -89,7 +89,7 @@ static void rtl8168h_config_eee_phy(struct phy_device *phydev)
        phy_modify_paged(phydev, 0xa42, 0x14, 0x0000, 0x0080);
 }
 
-static void rtl8125_config_eee_phy(struct phy_device *phydev)
+static void rtl8125a_config_eee_phy(struct phy_device *phydev)
 {
        rtl8168h_config_eee_phy(phydev);
 
@@ -97,6 +97,14 @@ static void rtl8125_config_eee_phy(struct phy_device *phydev)
        phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000);
 }
 
+static void rtl8125b_config_eee_phy(struct phy_device *phydev)
+{
+       phy_modify_paged(phydev, 0xa6d, 0x12, 0x0001, 0x0000);
+       phy_modify_paged(phydev, 0xa6d, 0x14, 0x0010, 0x0000);
+       phy_modify_paged(phydev, 0xa42, 0x14, 0x0080, 0x0000);
+       phy_modify_paged(phydev, 0xa4a, 0x11, 0x0200, 0x0000);
+}
+
 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp,
                                   struct phy_device *phydev)
 {
@@ -1091,6 +1099,13 @@ static void rtl8102e_hw_phy_config(struct rtl8169_private *tp,
        rtl_writephy_batch(phydev, phy_reg_init);
 }
 
+static void rtl8401_hw_phy_config(struct rtl8169_private *tp,
+                                 struct phy_device *phydev)
+{
+       phy_set_bits(phydev, 0x11, BIT(12));
+       phy_modify_paged(phydev, 0x0002, 0x0f, 0x0000, 0x0003);
+}
+
 static void rtl8105e_hw_phy_config(struct rtl8169_private *tp,
                                   struct phy_device *phydev)
 {
@@ -1140,8 +1155,13 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp,
        rtl_writephy_batch(phydev, phy_reg_init);
 }
 
-static void rtl8125_1_hw_phy_config(struct rtl8169_private *tp,
-                                   struct phy_device *phydev)
+static void rtl8125_legacy_force_mode(struct phy_device *phydev)
+{
+       phy_modify_paged(phydev, 0xa5b, 0x12, BIT(15), 0);
+}
+
+static void rtl8125a_1_hw_phy_config(struct rtl8169_private *tp,
+                                    struct phy_device *phydev)
 {
        phy_modify_paged(phydev, 0xad4, 0x10, 0x03ff, 0x0084);
        phy_modify_paged(phydev, 0xad4, 0x17, 0x0000, 0x0010);
@@ -1175,11 +1195,11 @@ static void rtl8125_1_hw_phy_config(struct rtl8169_private *tp,
        phy_modify_paged(phydev, 0xa5c, 0x10, 0x0400, 0x0000);
        rtl8168g_enable_gphy_10m(phydev);
 
-       rtl8125_config_eee_phy(phydev);
+       rtl8125a_config_eee_phy(phydev);
 }
 
-static void rtl8125_2_hw_phy_config(struct rtl8169_private *tp,
-                                   struct phy_device *phydev)
+static void rtl8125a_2_hw_phy_config(struct rtl8169_private *tp,
+                                    struct phy_device *phydev)
 {
        int i;
 
@@ -1240,7 +1260,46 @@ static void rtl8125_2_hw_phy_config(struct rtl8169_private *tp,
        phy_modify_paged(phydev, 0xa86, 0x15, 0x0001, 0x0000);
        rtl8168g_enable_gphy_10m(phydev);
 
-       rtl8125_config_eee_phy(phydev);
+       rtl8125a_config_eee_phy(phydev);
+}
+
+static void rtl8125b_hw_phy_config(struct rtl8169_private *tp,
+                                  struct phy_device *phydev)
+{
+       r8169_apply_firmware(tp);
+
+       phy_modify_paged(phydev, 0xa44, 0x11, 0x0000, 0x0800);
+       phy_modify_paged(phydev, 0xac4, 0x13, 0x00f0, 0x0090);
+       phy_modify_paged(phydev, 0xad3, 0x10, 0x0003, 0x0001);
+
+       phy_write(phydev, 0x1f, 0x0b87);
+       phy_write(phydev, 0x16, 0x80f5);
+       phy_write(phydev, 0x17, 0x760e);
+       phy_write(phydev, 0x16, 0x8107);
+       phy_write(phydev, 0x17, 0x360e);
+       phy_write(phydev, 0x16, 0x8551);
+       phy_modify(phydev, 0x17, 0xff00, 0x0800);
+       phy_write(phydev, 0x1f, 0x0000);
+
+       phy_modify_paged(phydev, 0xbf0, 0x10, 0xe000, 0xa000);
+       phy_modify_paged(phydev, 0xbf4, 0x13, 0x0f00, 0x0300);
+
+       r8168g_phy_param(phydev, 0x8044, 0xffff, 0x2417);
+       r8168g_phy_param(phydev, 0x804a, 0xffff, 0x2417);
+       r8168g_phy_param(phydev, 0x8050, 0xffff, 0x2417);
+       r8168g_phy_param(phydev, 0x8056, 0xffff, 0x2417);
+       r8168g_phy_param(phydev, 0x805c, 0xffff, 0x2417);
+       r8168g_phy_param(phydev, 0x8062, 0xffff, 0x2417);
+       r8168g_phy_param(phydev, 0x8068, 0xffff, 0x2417);
+       r8168g_phy_param(phydev, 0x806e, 0xffff, 0x2417);
+       r8168g_phy_param(phydev, 0x8074, 0xffff, 0x2417);
+       r8168g_phy_param(phydev, 0x807a, 0xffff, 0x2417);
+
+       phy_modify_paged(phydev, 0xa4c, 0x15, 0x0000, 0x0040);
+       phy_modify_paged(phydev, 0xbf8, 0x12, 0xe000, 0xa000);
+
+       rtl8125_legacy_force_mode(phydev);
+       rtl8125b_config_eee_phy(phydev);
 }
 
 void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
@@ -1261,8 +1320,7 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
                [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config,
                [RTL_GIGA_MAC_VER_12] = rtl8168bef_hw_phy_config,
                [RTL_GIGA_MAC_VER_13] = NULL,
-               [RTL_GIGA_MAC_VER_14] = NULL,
-               [RTL_GIGA_MAC_VER_15] = NULL,
+               [RTL_GIGA_MAC_VER_14] = rtl8401_hw_phy_config,
                [RTL_GIGA_MAC_VER_16] = NULL,
                [RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config,
                [RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config,
@@ -1300,8 +1358,9 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
                [RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config,
                [RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
                [RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config,
-               [RTL_GIGA_MAC_VER_60] = rtl8125_1_hw_phy_config,
-               [RTL_GIGA_MAC_VER_61] = rtl8125_2_hw_phy_config,
+               [RTL_GIGA_MAC_VER_60] = rtl8125a_1_hw_phy_config,
+               [RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config,
+               [RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config,
        };
 
        if (phy_configs[ver])
index 7585cd2..fc99e71 100644 (file)
@@ -647,10 +647,10 @@ static int rocker_dma_rings_init(struct rocker *rocker)
 err_dma_event_ring_bufs_alloc:
        rocker_dma_ring_destroy(rocker, &rocker->event_ring);
 err_dma_event_ring_create:
+       rocker_dma_cmd_ring_waits_free(rocker);
+err_dma_cmd_ring_waits_alloc:
        rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
                                  PCI_DMA_BIDIRECTIONAL);
-err_dma_cmd_ring_waits_alloc:
-       rocker_dma_cmd_ring_waits_free(rocker);
 err_dma_cmd_ring_bufs_alloc:
        rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
        return err;
index 1b59e9f..2590cab 100644 (file)
@@ -282,7 +282,10 @@ typedef union efx_oword {
                                 field7, value7,                        \
                                 field8, value8,                        \
                                 field9, value9,                        \
-                                field10, value10)                      \
+                                field10, value10,                      \
+                                field11, value11,                      \
+                                field12, value12,                      \
+                                field13, value13)                      \
        (EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) |      \
         EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) |      \
         EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) |      \
@@ -292,7 +295,10 @@ typedef union efx_oword {
         EFX_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) |      \
         EFX_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) |      \
         EFX_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) |      \
-        EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10)))
+        EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10)) |    \
+        EFX_INSERT_FIELD_NATIVE((min), (max), field11, (value11)) |    \
+        EFX_INSERT_FIELD_NATIVE((min), (max), field12, (value12)) |    \
+        EFX_INSERT_FIELD_NATIVE((min), (max), field13, (value13)))
 
 #define EFX_INSERT_FIELDS64(...)                               \
        cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
@@ -334,7 +340,13 @@ typedef union efx_oword {
 #endif
 
 /* Populate an octword field with various numbers of arguments */
-#define EFX_POPULATE_OWORD_10 EFX_POPULATE_OWORD
+#define EFX_POPULATE_OWORD_13 EFX_POPULATE_OWORD
+#define EFX_POPULATE_OWORD_12(oword, ...) \
+       EFX_POPULATE_OWORD_13(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_11(oword, ...) \
+       EFX_POPULATE_OWORD_12(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_10(oword, ...) \
+       EFX_POPULATE_OWORD_11(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
 #define EFX_POPULATE_OWORD_9(oword, ...) \
        EFX_POPULATE_OWORD_10(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
 #define EFX_POPULATE_OWORD_8(oword, ...) \
@@ -363,7 +375,13 @@ typedef union efx_oword {
                             EFX_DWORD_3, 0xffffffff)
 
 /* Populate a quadword field with various numbers of arguments */
-#define EFX_POPULATE_QWORD_10 EFX_POPULATE_QWORD
+#define EFX_POPULATE_QWORD_13 EFX_POPULATE_QWORD
+#define EFX_POPULATE_QWORD_12(qword, ...) \
+       EFX_POPULATE_QWORD_13(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_11(qword, ...) \
+       EFX_POPULATE_QWORD_12(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_10(qword, ...) \
+       EFX_POPULATE_QWORD_11(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
 #define EFX_POPULATE_QWORD_9(qword, ...) \
        EFX_POPULATE_QWORD_10(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
 #define EFX_POPULATE_QWORD_8(qword, ...) \
@@ -390,7 +408,13 @@ typedef union efx_oword {
                             EFX_DWORD_1, 0xffffffff)
 
 /* Populate a dword field with various numbers of arguments */
-#define EFX_POPULATE_DWORD_10 EFX_POPULATE_DWORD
+#define EFX_POPULATE_DWORD_13 EFX_POPULATE_DWORD
+#define EFX_POPULATE_DWORD_12(dword, ...) \
+       EFX_POPULATE_DWORD_13(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_11(dword, ...) \
+       EFX_POPULATE_DWORD_12(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_10(dword, ...) \
+       EFX_POPULATE_DWORD_11(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
 #define EFX_POPULATE_DWORD_9(dword, ...) \
        EFX_POPULATE_DWORD_10(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
 #define EFX_POPULATE_DWORD_8(dword, ...) \
index 4b0e369..fa7229f 100644 (file)
@@ -10,6 +10,7 @@
 #include "io.h"
 #include "mcdi.h"
 #include "mcdi_pcol.h"
+#include "mcdi_port.h"
 #include "mcdi_port_common.h"
 #include "mcdi_functions.h"
 #include "nic.h"
@@ -21,6 +22,7 @@
 #include <linux/jhash.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
+#include <net/udp_tunnel.h>
 
 /* Hardware control for EF10 architecture including 'Huntington'. */
 
@@ -37,6 +39,7 @@ struct efx_ef10_vlan {
 };
 
 static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading);
+static const struct udp_tunnel_nic_info efx_ef10_udp_tunnels;
 
 static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
 {
@@ -551,10 +554,6 @@ static int efx_ef10_probe(struct efx_nic *efx)
        }
        nic_data->warm_boot_count = rc;
 
-       efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
-
-       efx->vport_id = EVB_PORT_ID_ASSIGNED;
-
        /* In case we're recovering from a crash (kexec), we want to
         * cancel any outstanding request by the previous user of this
         * function.  We send a special message using the least
@@ -567,6 +566,9 @@ static int efx_ef10_probe(struct efx_nic *efx)
                goto fail2;
 
        mutex_init(&nic_data->udp_tunnels_lock);
+       for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i)
+               nic_data->udp_tunnels[i].type =
+                       TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID;
 
        /* Reset (most) configuration for this function */
        rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
@@ -601,10 +603,15 @@ static int efx_ef10_probe(struct efx_nic *efx)
         * However, until we use TX option descriptors we need two TX queues
         * per channel.
         */
-       efx->max_channels = min_t(unsigned int,
-                                 EFX_MAX_CHANNELS,
-                                 efx_ef10_mem_map_size(efx) /
-                                 (efx->vi_stride * EFX_TXQ_TYPES));
+       efx->tx_queues_per_channel = 2;
+       efx->max_vis = efx_ef10_mem_map_size(efx) / efx->vi_stride;
+       if (!efx->max_vis) {
+               netif_err(efx, drv, efx->net_dev, "error determining max VIs\n");
+               rc = -EIO;
+               goto fail5;
+       }
+       efx->max_channels = min_t(unsigned int, EFX_MAX_CHANNELS,
+                                 efx->max_vis / efx->tx_queues_per_channel);
        efx->max_tx_channels = efx->max_channels;
        if (WARN_ON(efx->max_channels == 0)) {
                rc = -EIO;
@@ -663,6 +670,12 @@ static int efx_ef10_probe(struct efx_nic *efx)
        if (rc)
                goto fail_add_vid_0;
 
+       if (nic_data->datapath_caps &
+           (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN) &&
+           efx->mcdi->fn_flags &
+           (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED))
+               efx->net_dev->udp_tunnel_nic_info = &efx_ef10_udp_tunnels;
+
        return 0;
 
 fail_add_vid_0:
@@ -1117,18 +1130,24 @@ static int efx_ef10_alloc_vis(struct efx_nic *efx,
  */
 static int efx_ef10_dimension_resources(struct efx_nic *efx)
 {
+       unsigned int min_vis = max_t(unsigned int, efx->tx_queues_per_channel,
+                                    efx_separate_tx_channels ? 2 : 1);
+       unsigned int channel_vis, pio_write_vi_base, max_vis;
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
        unsigned int uc_mem_map_size, wc_mem_map_size;
-       unsigned int min_vis = max(EFX_TXQ_TYPES,
-                                  efx_separate_tx_channels ? 2 : 1);
-       unsigned int channel_vis, pio_write_vi_base, max_vis;
        void __iomem *membase;
        int rc;
 
        channel_vis = max(efx->n_channels,
                          ((efx->n_tx_channels + efx->n_extra_tx_channels) *
-                          EFX_TXQ_TYPES) +
+                          efx->tx_queues_per_channel) +
                           efx->n_xdp_channels * efx->xdp_tx_per_channel);
+       if (efx->max_vis && efx->max_vis < channel_vis) {
+               netif_dbg(efx, drv, efx->net_dev,
+                         "Reducing channel VIs from %u to %u\n",
+                         channel_vis, efx->max_vis);
+               channel_vis = efx->max_vis;
+       }
 
 #ifdef EFX_USE_PIO
        /* Try to allocate PIO buffers if wanted and if the full
@@ -1210,7 +1229,7 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
                 */
                efx->max_channels = nic_data->n_allocated_vis;
                efx->max_tx_channels =
-                       nic_data->n_allocated_vis / EFX_TXQ_TYPES;
+                       nic_data->n_allocated_vis / efx->tx_queues_per_channel;
 
                efx_mcdi_free_vis(efx);
                return -EAGAIN;
@@ -1269,6 +1288,14 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
        return 0;
 }
 
+static void efx_ef10_fini_nic(struct efx_nic *efx)
+{
+       struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+       kfree(nic_data->mc_stats);
+       nic_data->mc_stats = NULL;
+}
+
 static int efx_ef10_init_nic(struct efx_nic *efx)
 {
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -1290,6 +1317,11 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
                efx->must_realloc_vis = false;
        }
 
+       nic_data->mc_stats = kmalloc(efx->num_mac_stats * sizeof(__le64),
+                                    GFP_KERNEL);
+       if (!nic_data->mc_stats)
+               return -ENOMEM;
+
        if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
                rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
                if (rc == 0) {
@@ -1410,8 +1442,6 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
        { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
 #define EF10_OTHER_STAT(ext_name)                              \
        [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
-#define GENERIC_SW_STAT(ext_name)                              \
-       [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
 
 static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
        EF10_DMA_STAT(port_tx_bytes, TX_BYTES),
@@ -1455,8 +1485,8 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
        EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
        EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
        EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
-       GENERIC_SW_STAT(rx_nodesc_trunc),
-       GENERIC_SW_STAT(rx_noskb_drops),
+       EFX_GENERIC_SW_STAT(rx_nodesc_trunc),
+       EFX_GENERIC_SW_STAT(rx_noskb_drops),
        EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
        EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
        EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
@@ -1765,55 +1795,42 @@ static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
        return stats_count;
 }
 
-static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
+static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
+                                      struct rtnl_link_stats64 *core_stats)
 {
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
        DECLARE_BITMAP(mask, EF10_STAT_COUNT);
-       __le64 generation_start, generation_end;
        u64 *stats = nic_data->stats;
-       __le64 *dma_stats;
 
        efx_ef10_get_stat_mask(efx, mask);
 
-       dma_stats = efx->stats_buffer.addr;
-
-       generation_end = dma_stats[efx->num_mac_stats - 1];
-       if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
-               return 0;
-       rmb();
-       efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
-                            stats, efx->stats_buffer.addr, false);
-       rmb();
-       generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
-       if (generation_end != generation_start)
-               return -EAGAIN;
+       efx_nic_copy_stats(efx, nic_data->mc_stats);
+       efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
+                            mask, stats, nic_data->mc_stats, false);
 
        /* Update derived statistics */
        efx_nic_fix_nodesc_drop_stat(efx,
                                     &stats[EF10_STAT_port_rx_nodesc_drops]);
+       /* MC Firmware reads RX_BYTES and RX_GOOD_BYTES from the MAC.
+        * It then calculates RX_BAD_BYTES and DMAs it to us with RX_BYTES.
+        * We report these as port_rx_ stats. We are not given RX_GOOD_BYTES.
+        * Here we calculate port_rx_good_bytes.
+        */
        stats[EF10_STAT_port_rx_good_bytes] =
                stats[EF10_STAT_port_rx_bytes] -
                stats[EF10_STAT_port_rx_bytes_minus_good_bytes];
+
+       /* The asynchronous reads used to calculate RX_BAD_BYTES in
+        * MC Firmware are done such that we should not see an increase in
+        * RX_BAD_BYTES when a good packet has arrived. Unfortunately this
+        * does mean that the stat can decrease at times. Here we do not
+        * update the stat unless it has increased or has gone to zero
+        * (In the case of the NIC rebooting).
+        * Please see Bug 33781 for a discussion of why things work this way.
+        */
        efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes],
                             stats[EF10_STAT_port_rx_bytes_minus_good_bytes]);
        efx_update_sw_stats(efx, stats);
-       return 0;
-}
-
-
-static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
-                                      struct rtnl_link_stats64 *core_stats)
-{
-       int retry;
-
-       /* If we're unlucky enough to read statistics during the DMA, wait
-        * up to 10ms for it to finish (typically takes <500us)
-        */
-       for (retry = 0; retry < 100; ++retry) {
-               if (efx_ef10_try_update_nic_stats_pf(efx) == 0)
-                       break;
-               udelay(100);
-       }
 
        return efx_ef10_update_stats_common(efx, full_stats, core_stats);
 }
@@ -2236,7 +2253,7 @@ static u32 efx_ef10_tso_versions(struct efx_nic *efx)
 
 static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
 {
-       bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
+       bool csum_offload = tx_queue->label & EFX_TXQ_TYPE_OFFLOAD;
        struct efx_channel *channel = tx_queue->channel;
        struct efx_nic *efx = tx_queue->efx;
        struct efx_ef10_nic_data *nic_data;
@@ -3109,52 +3126,6 @@ fail:
        netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
 }
 
-void efx_ef10_handle_drain_event(struct efx_nic *efx)
-{
-       if (atomic_dec_and_test(&efx->active_queues))
-               wake_up(&efx->flush_wq);
-
-       WARN_ON(atomic_read(&efx->active_queues) < 0);
-}
-
-static int efx_ef10_fini_dmaq(struct efx_nic *efx)
-{
-       struct efx_tx_queue *tx_queue;
-       struct efx_rx_queue *rx_queue;
-       struct efx_channel *channel;
-       int pending;
-
-       /* If the MC has just rebooted, the TX/RX queues will have already been
-        * torn down, but efx->active_queues needs to be set to zero.
-        */
-       if (efx->must_realloc_vis) {
-               atomic_set(&efx->active_queues, 0);
-               return 0;
-       }
-
-       /* Do not attempt to write to the NIC during EEH recovery */
-       if (efx->state != STATE_RECOVERY) {
-               efx_for_each_channel(channel, efx) {
-                       efx_for_each_channel_rx_queue(rx_queue, channel)
-                               efx_mcdi_rx_fini(rx_queue);
-                       efx_for_each_channel_tx_queue(tx_queue, channel)
-                               efx_mcdi_tx_fini(tx_queue);
-               }
-
-               wait_event_timeout(efx->flush_wq,
-                                  atomic_read(&efx->active_queues) == 0,
-                                  msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
-               pending = atomic_read(&efx->active_queues);
-               if (pending) {
-                       netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
-                                 pending);
-                       return -ETIMEDOUT;
-               }
-       }
-
-       return 0;
-}
-
 static void efx_ef10_prepare_flr(struct efx_nic *efx)
 {
        atomic_set(&efx->active_queues, 0);
@@ -3307,18 +3278,15 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
        return rc;
 }
 
-static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
+static int efx_ef10_mac_reconfigure(struct efx_nic *efx, bool mtu_only)
 {
-       efx_mcdi_filter_sync_rx_mode(efx);
-
-       return efx_mcdi_set_mac(efx);
-}
+       WARN_ON(!mutex_is_locked(&efx->mac_lock));
 
-static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx)
-{
        efx_mcdi_filter_sync_rx_mode(efx);
 
-       return 0;
+       if (mtu_only && efx_has_cap(efx, SET_MAC_ENHANCED))
+               return efx_mcdi_set_mtu(efx);
+       return efx_mcdi_set_mac(efx);
 }
 
 static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
@@ -3745,8 +3713,8 @@ static int efx_ef10_set_udp_tnl_ports(struct efx_nic *efx, bool unloading)
                     MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM);
 
        for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) {
-               if (nic_data->udp_tunnels[i].count &&
-                   nic_data->udp_tunnels[i].port) {
+               if (nic_data->udp_tunnels[i].type !=
+                   TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID) {
                        efx_dword_t entry;
 
                        EFX_POPULATE_DWORD_2(entry,
@@ -3832,79 +3800,34 @@ static int efx_ef10_udp_tnl_push_ports(struct efx_nic *efx)
        return rc;
 }
 
-static struct efx_udp_tunnel *__efx_ef10_udp_tnl_lookup_port(struct efx_nic *efx,
-                                                            __be16 port)
+static int efx_ef10_udp_tnl_set_port(struct net_device *dev,
+                                    unsigned int table, unsigned int entry,
+                                    struct udp_tunnel_info *ti)
 {
-       struct efx_ef10_nic_data *nic_data = efx->nic_data;
-       size_t i;
-
-       for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i) {
-               if (!nic_data->udp_tunnels[i].count)
-                       continue;
-               if (nic_data->udp_tunnels[i].port == port)
-                       return &nic_data->udp_tunnels[i];
-       }
-       return NULL;
-}
+       struct efx_nic *efx = netdev_priv(dev);
+       struct efx_ef10_nic_data *nic_data;
+       int efx_tunnel_type, rc;
 
-static int efx_ef10_udp_tnl_add_port(struct efx_nic *efx,
-                                    struct efx_udp_tunnel tnl)
-{
-       struct efx_ef10_nic_data *nic_data = efx->nic_data;
-       struct efx_udp_tunnel *match;
-       char typebuf[8];
-       size_t i;
-       int rc;
+       if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
+               efx_tunnel_type = TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN;
+       else
+               efx_tunnel_type = TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE;
 
+       nic_data = efx->nic_data;
        if (!(nic_data->datapath_caps &
              (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
-               return 0;
-
-       efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf));
-       netif_dbg(efx, drv, efx->net_dev, "Adding UDP tunnel (%s) port %d\n",
-                 typebuf, ntohs(tnl.port));
+               return -EOPNOTSUPP;
 
        mutex_lock(&nic_data->udp_tunnels_lock);
        /* Make sure all TX are stopped while we add to the table, else we
         * might race against an efx_features_check().
         */
        efx_device_detach_sync(efx);
-
-       match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port);
-       if (match != NULL) {
-               if (match->type == tnl.type) {
-                       netif_dbg(efx, drv, efx->net_dev,
-                                 "Referencing existing tunnel entry\n");
-                       match->count++;
-                       /* No need to cause an MCDI update */
-                       rc = 0;
-                       goto unlock_out;
-               }
-               efx_get_udp_tunnel_type_name(match->type,
-                                            typebuf, sizeof(typebuf));
-               netif_dbg(efx, drv, efx->net_dev,
-                         "UDP port %d is already in use by %s\n",
-                         ntohs(tnl.port), typebuf);
-               rc = -EEXIST;
-               goto unlock_out;
-       }
-
-       for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i)
-               if (!nic_data->udp_tunnels[i].count) {
-                       nic_data->udp_tunnels[i] = tnl;
-                       nic_data->udp_tunnels[i].count = 1;
-                       rc = efx_ef10_set_udp_tnl_ports(efx, false);
-                       goto unlock_out;
-               }
-
-       netif_dbg(efx, drv, efx->net_dev,
-                 "Unable to add UDP tunnel (%s) port %d; insufficient resources.\n",
-                 typebuf, ntohs(tnl.port));
-
-       rc = -ENOMEM;
-
-unlock_out:
+       nic_data->udp_tunnels[entry].type = efx_tunnel_type;
+       nic_data->udp_tunnels[entry].port = ti->port;
+       rc = efx_ef10_set_udp_tnl_ports(efx, false);
        mutex_unlock(&nic_data->udp_tunnels_lock);
+
        return rc;
 }
 
@@ -3916,6 +3839,7 @@ unlock_out:
 static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port)
 {
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       size_t i;
 
        if (!(nic_data->datapath_caps &
              (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
@@ -3927,58 +3851,51 @@ static bool efx_ef10_udp_tnl_has_port(struct efx_nic *efx, __be16 port)
                 */
                return false;
 
-       return __efx_ef10_udp_tnl_lookup_port(efx, port) != NULL;
+       for (i = 0; i < ARRAY_SIZE(nic_data->udp_tunnels); ++i)
+               if (nic_data->udp_tunnels[i].type !=
+                   TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID &&
+                   nic_data->udp_tunnels[i].port == port)
+                       return true;
+
+       return false;
 }
 
-static int efx_ef10_udp_tnl_del_port(struct efx_nic *efx,
-                                    struct efx_udp_tunnel tnl)
+static int efx_ef10_udp_tnl_unset_port(struct net_device *dev,
+                                      unsigned int table, unsigned int entry,
+                                      struct udp_tunnel_info *ti)
 {
-       struct efx_ef10_nic_data *nic_data = efx->nic_data;
-       struct efx_udp_tunnel *match;
-       char typebuf[8];
+       struct efx_nic *efx = netdev_priv(dev);
+       struct efx_ef10_nic_data *nic_data;
        int rc;
 
-       if (!(nic_data->datapath_caps &
-             (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN)))
-               return 0;
-
-       efx_get_udp_tunnel_type_name(tnl.type, typebuf, sizeof(typebuf));
-       netif_dbg(efx, drv, efx->net_dev, "Removing UDP tunnel (%s) port %d\n",
-                 typebuf, ntohs(tnl.port));
+       nic_data = efx->nic_data;
 
        mutex_lock(&nic_data->udp_tunnels_lock);
        /* Make sure all TX are stopped while we remove from the table, else we
         * might race against an efx_features_check().
         */
        efx_device_detach_sync(efx);
-
-       match = __efx_ef10_udp_tnl_lookup_port(efx, tnl.port);
-       if (match != NULL) {
-               if (match->type == tnl.type) {
-                       if (--match->count) {
-                               /* Port is still in use, so nothing to do */
-                               netif_dbg(efx, drv, efx->net_dev,
-                                         "UDP tunnel port %d remains active\n",
-                                         ntohs(tnl.port));
-                               rc = 0;
-                               goto out_unlock;
-                       }
-                       rc = efx_ef10_set_udp_tnl_ports(efx, false);
-                       goto out_unlock;
-               }
-               efx_get_udp_tunnel_type_name(match->type,
-                                            typebuf, sizeof(typebuf));
-               netif_warn(efx, drv, efx->net_dev,
-                          "UDP port %d is actually in use by %s, not removing\n",
-                          ntohs(tnl.port), typebuf);
-       }
-       rc = -ENOENT;
-
-out_unlock:
+       nic_data->udp_tunnels[entry].type = TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID;
+       nic_data->udp_tunnels[entry].port = 0;
+       rc = efx_ef10_set_udp_tnl_ports(efx, false);
        mutex_unlock(&nic_data->udp_tunnels_lock);
+
        return rc;
 }
 
+static const struct udp_tunnel_nic_info efx_ef10_udp_tunnels = {
+       .set_port       = efx_ef10_udp_tnl_set_port,
+       .unset_port     = efx_ef10_udp_tnl_unset_port,
+       .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
+       .tables         = {
+               {
+                       .n_entries = 16,
+                       .tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
+                                       UDP_TUNNEL_TYPE_GENEVE,
+               },
+       },
+};
+
 /* EF10 may have multiple datapath firmware variants within a
  * single version.  Report which variants are running.
  */
@@ -4023,13 +3940,13 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
        .remove = efx_ef10_remove,
        .dimension_resources = efx_ef10_dimension_resources,
        .init = efx_ef10_init_nic,
-       .fini = efx_port_dummy_op_void,
+       .fini = efx_ef10_fini_nic,
        .map_reset_reason = efx_ef10_map_reset_reason,
        .map_reset_flags = efx_ef10_map_reset_flags,
        .reset = efx_ef10_reset,
        .probe_port = efx_mcdi_port_probe,
        .remove_port = efx_mcdi_port_remove,
-       .fini_dmaq = efx_ef10_fini_dmaq,
+       .fini_dmaq = efx_fini_dmaq,
        .prepare_flr = efx_ef10_prepare_flr,
        .finish_flr = efx_port_dummy_op_void,
        .describe_stats = efx_ef10_describe_stats,
@@ -4039,7 +3956,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
        .stop_stats = efx_port_dummy_op_void,
        .set_id_led = efx_mcdi_set_id_led,
        .push_irq_moderation = efx_ef10_push_irq_moderation,
-       .reconfigure_mac = efx_ef10_mac_reconfigure_vf,
+       .reconfigure_mac = efx_ef10_mac_reconfigure,
        .check_mac_fault = efx_mcdi_mac_check_fault,
        .reconfigure_port = efx_mcdi_port_reconfigure,
        .get_wol = efx_ef10_get_wol_vf,
@@ -4112,7 +4029,6 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
        .can_rx_scatter = true,
        .always_rx_scatter = true,
        .min_interrupt_mode = EFX_INT_MODE_MSIX,
-       .max_interrupt_mode = EFX_INT_MODE_MSIX,
        .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
        .offload_features = EF10_OFFLOAD_FEATURES,
        .mcdi_max_ver = 2,
@@ -4132,13 +4048,13 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
        .remove = efx_ef10_remove,
        .dimension_resources = efx_ef10_dimension_resources,
        .init = efx_ef10_init_nic,
-       .fini = efx_port_dummy_op_void,
+       .fini = efx_ef10_fini_nic,
        .map_reset_reason = efx_ef10_map_reset_reason,
        .map_reset_flags = efx_ef10_map_reset_flags,
        .reset = efx_ef10_reset,
        .probe_port = efx_mcdi_port_probe,
        .remove_port = efx_mcdi_port_remove,
-       .fini_dmaq = efx_ef10_fini_dmaq,
+       .fini_dmaq = efx_fini_dmaq,
        .prepare_flr = efx_ef10_prepare_flr,
        .finish_flr = efx_port_dummy_op_void,
        .describe_stats = efx_ef10_describe_stats,
@@ -4216,9 +4132,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
        .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
        .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
        .udp_tnl_push_ports = efx_ef10_udp_tnl_push_ports,
-       .udp_tnl_add_port = efx_ef10_udp_tnl_add_port,
        .udp_tnl_has_port = efx_ef10_udp_tnl_has_port,
-       .udp_tnl_del_port = efx_ef10_udp_tnl_del_port,
 #ifdef CONFIG_SFC_SRIOV
        .sriov_configure = efx_ef10_sriov_configure,
        .sriov_init = efx_ef10_sriov_init,
@@ -4249,7 +4163,6 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
        .always_rx_scatter = true,
        .option_descriptors = true,
        .min_interrupt_mode = EFX_INT_MODE_LEGACY,
-       .max_interrupt_mode = EFX_INT_MODE_MSIX,
        .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
        .offload_features = EF10_OFFLOAD_FEATURES,
        .mcdi_max_ver = 2,
index 256807c..f16b4f2 100644 (file)
 
 /**************************************************************************
  *
- * Type name strings
- *
- **************************************************************************
- */
-
-/* UDP tunnel type names */
-static const char *const efx_udp_tunnel_type_names[] = {
-       [TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN] = "vxlan",
-       [TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE] = "geneve",
-};
-
-void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen)
-{
-       if (type < ARRAY_SIZE(efx_udp_tunnel_type_names) &&
-           efx_udp_tunnel_type_names[type] != NULL)
-               snprintf(buf, buflen, "%s", efx_udp_tunnel_type_names[type]);
-       else
-               snprintf(buf, buflen, "type %d", type);
-}
-
-/**************************************************************************
- *
  * Configurable values
  *
  *************************************************************************/
 
+module_param_named(interrupt_mode, efx_interrupt_mode, uint, 0444);
+MODULE_PARM_DESC(interrupt_mode,
+                "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
+
+module_param(rss_cpus, uint, 0444);
+MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
+
 /*
  * Use separate channels for TX and RX events
  *
@@ -133,30 +118,6 @@ static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
  *
  **************************************************************************/
 
-/* Equivalent to efx_link_set_advertising with all-zeroes, except does not
- * force the Autoneg bit on.
- */
-void efx_link_clear_advertising(struct efx_nic *efx)
-{
-       bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS);
-       efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
-}
-
-void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
-{
-       efx->wanted_fc = wanted_fc;
-       if (efx->link_advertising[0]) {
-               if (wanted_fc & EFX_FC_RX)
-                       efx->link_advertising[0] |= (ADVERTISED_Pause |
-                                                    ADVERTISED_Asym_Pause);
-               else
-                       efx->link_advertising[0] &= ~(ADVERTISED_Pause |
-                                                     ADVERTISED_Asym_Pause);
-               if (wanted_fc & EFX_FC_TX)
-                       efx->link_advertising[0] ^= ADVERTISED_Asym_Pause;
-       }
-}
-
 static void efx_fini_port(struct efx_nic *efx);
 
 static int efx_probe_port(struct efx_nic *efx)
@@ -193,10 +154,6 @@ static int efx_init_port(struct efx_nic *efx)
 
        efx->port_initialized = true;
 
-       /* Reconfigure the MAC before creating dma queues (required for
-        * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
-       efx_mac_reconfigure(efx);
-
        /* Ensure the PHY advertises the correct flow control settings */
        rc = efx->phy_op->reconfigure(efx);
        if (rc && rc != -EPERM)
@@ -357,9 +314,6 @@ static int efx_probe_nic(struct efx_nic *efx)
                                    sizeof(efx->rss_context.rx_hash_key));
        efx_set_default_rx_indir_table(efx, &efx->rss_context);
 
-       netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
-       netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
-
        /* Initialise the interrupt moderation settings */
        efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
        efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
@@ -409,7 +363,6 @@ static int efx_probe_all(struct efx_nic *efx)
                rc = -EINVAL;
                goto fail3;
        }
-       efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
 
 #ifdef CONFIG_SFC_SRIOV
        rc = efx->type->vswitching_probe(efx);
@@ -617,109 +570,6 @@ int efx_net_stop(struct net_device *net_dev)
        return 0;
 }
 
-/* Context: netif_tx_lock held, BHs disabled. */
-static void efx_watchdog(struct net_device *net_dev, unsigned int txqueue)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-
-       netif_err(efx, tx_err, efx->net_dev,
-                 "TX stuck with port_enabled=%d: resetting channels\n",
-                 efx->port_enabled);
-
-       efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
-}
-
-static int efx_set_mac_address(struct net_device *net_dev, void *data)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-       struct sockaddr *addr = data;
-       u8 *new_addr = addr->sa_data;
-       u8 old_addr[6];
-       int rc;
-
-       if (!is_valid_ether_addr(new_addr)) {
-               netif_err(efx, drv, efx->net_dev,
-                         "invalid ethernet MAC address requested: %pM\n",
-                         new_addr);
-               return -EADDRNOTAVAIL;
-       }
-
-       /* save old address */
-       ether_addr_copy(old_addr, net_dev->dev_addr);
-       ether_addr_copy(net_dev->dev_addr, new_addr);
-       if (efx->type->set_mac_address) {
-               rc = efx->type->set_mac_address(efx);
-               if (rc) {
-                       ether_addr_copy(net_dev->dev_addr, old_addr);
-                       return rc;
-               }
-       }
-
-       /* Reconfigure the MAC */
-       mutex_lock(&efx->mac_lock);
-       efx_mac_reconfigure(efx);
-       mutex_unlock(&efx->mac_lock);
-
-       return 0;
-}
-
-/* Context: netif_addr_lock held, BHs disabled. */
-static void efx_set_rx_mode(struct net_device *net_dev)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-
-       if (efx->port_enabled)
-               queue_work(efx->workqueue, &efx->mac_work);
-       /* Otherwise efx_start_port() will do this */
-}
-
-static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-       int rc;
-
-       /* If disabling RX n-tuple filtering, clear existing filters */
-       if (net_dev->features & ~data & NETIF_F_NTUPLE) {
-               rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
-               if (rc)
-                       return rc;
-       }
-
-       /* If Rx VLAN filter is changed, update filters via mac_reconfigure.
-        * If rx-fcs is changed, mac_reconfigure updates that too.
-        */
-       if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER |
-                                         NETIF_F_RXFCS)) {
-               /* efx_set_rx_mode() will schedule MAC work to update filters
-                * when a new features are finally set in net_dev.
-                */
-               efx_set_rx_mode(net_dev);
-       }
-
-       return 0;
-}
-
-static int efx_get_phys_port_id(struct net_device *net_dev,
-                               struct netdev_phys_item_id *ppid)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-
-       if (efx->type->get_phys_port_id)
-               return efx->type->get_phys_port_id(efx, ppid);
-       else
-               return -EOPNOTSUPP;
-}
-
-static int efx_get_phys_port_name(struct net_device *net_dev,
-                                 char *name, size_t len)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-
-       if (snprintf(name, len, "p%u", efx->port_num) >= len)
-               return -EINVAL;
-       return 0;
-}
-
 static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid)
 {
        struct efx_nic *efx = netdev_priv(net_dev);
@@ -740,52 +590,6 @@ static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vi
                return -EOPNOTSUPP;
 }
 
-static int efx_udp_tunnel_type_map(enum udp_parsable_tunnel_type in)
-{
-       switch (in) {
-       case UDP_TUNNEL_TYPE_VXLAN:
-               return TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN;
-       case UDP_TUNNEL_TYPE_GENEVE:
-               return TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE;
-       default:
-               return -1;
-       }
-}
-
-static void efx_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
-{
-       struct efx_nic *efx = netdev_priv(dev);
-       struct efx_udp_tunnel tnl;
-       int efx_tunnel_type;
-
-       efx_tunnel_type = efx_udp_tunnel_type_map(ti->type);
-       if (efx_tunnel_type < 0)
-               return;
-
-       tnl.type = (u16)efx_tunnel_type;
-       tnl.port = ti->port;
-
-       if (efx->type->udp_tnl_add_port)
-               (void)efx->type->udp_tnl_add_port(efx, tnl);
-}
-
-static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti)
-{
-       struct efx_nic *efx = netdev_priv(dev);
-       struct efx_udp_tunnel tnl;
-       int efx_tunnel_type;
-
-       efx_tunnel_type = efx_udp_tunnel_type_map(ti->type);
-       if (efx_tunnel_type < 0)
-               return;
-
-       tnl.type = (u16)efx_tunnel_type;
-       tnl.port = ti->port;
-
-       if (efx->type->udp_tnl_del_port)
-               (void)efx->type->udp_tnl_del_port(efx, tnl);
-}
-
 static const struct net_device_ops efx_netdev_ops = {
        .ndo_open               = efx_net_open,
        .ndo_stop               = efx_net_stop,
@@ -813,8 +617,8 @@ static const struct net_device_ops efx_netdev_ops = {
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = efx_filter_rfs,
 #endif
-       .ndo_udp_tunnel_add     = efx_udp_tunnel_add,
-       .ndo_udp_tunnel_del     = efx_udp_tunnel_del,
+       .ndo_udp_tunnel_add     = udp_tunnel_nic_add_port,
+       .ndo_udp_tunnel_del     = udp_tunnel_nic_del_port,
        .ndo_xdp_xmit           = efx_xdp_xmit,
        .ndo_bpf                = efx_xdp
 };
@@ -1098,7 +902,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
 
        efx_pci_remove_main(efx);
 
-       efx_fini_io(efx, efx->type->mem_bar(efx));
+       efx_fini_io(efx);
        netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
 
        efx_fini_struct(efx);
@@ -1366,7 +1170,7 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
        return 0;
 
  fail3:
-       efx_fini_io(efx, efx->type->mem_bar(efx));
+       efx_fini_io(efx);
  fail2:
        efx_fini_struct(efx);
  fail1:
@@ -1514,97 +1318,6 @@ static const struct dev_pm_ops efx_pm_ops = {
        .restore        = efx_pm_resume,
 };
 
-/* A PCI error affecting this device was detected.
- * At this point MMIO and DMA may be disabled.
- * Stop the software path and request a slot reset.
- */
-static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
-                                             enum pci_channel_state state)
-{
-       pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
-       struct efx_nic *efx = pci_get_drvdata(pdev);
-
-       if (state == pci_channel_io_perm_failure)
-               return PCI_ERS_RESULT_DISCONNECT;
-
-       rtnl_lock();
-
-       if (efx->state != STATE_DISABLED) {
-               efx->state = STATE_RECOVERY;
-               efx->reset_pending = 0;
-
-               efx_device_detach_sync(efx);
-
-               efx_stop_all(efx);
-               efx_disable_interrupts(efx);
-
-               status = PCI_ERS_RESULT_NEED_RESET;
-       } else {
-               /* If the interface is disabled we don't want to do anything
-                * with it.
-                */
-               status = PCI_ERS_RESULT_RECOVERED;
-       }
-
-       rtnl_unlock();
-
-       pci_disable_device(pdev);
-
-       return status;
-}
-
-/* Fake a successful reset, which will be performed later in efx_io_resume. */
-static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
-{
-       struct efx_nic *efx = pci_get_drvdata(pdev);
-       pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
-
-       if (pci_enable_device(pdev)) {
-               netif_err(efx, hw, efx->net_dev,
-                         "Cannot re-enable PCI device after reset.\n");
-               status =  PCI_ERS_RESULT_DISCONNECT;
-       }
-
-       return status;
-}
-
-/* Perform the actual reset and resume I/O operations. */
-static void efx_io_resume(struct pci_dev *pdev)
-{
-       struct efx_nic *efx = pci_get_drvdata(pdev);
-       int rc;
-
-       rtnl_lock();
-
-       if (efx->state == STATE_DISABLED)
-               goto out;
-
-       rc = efx_reset(efx, RESET_TYPE_ALL);
-       if (rc) {
-               netif_err(efx, hw, efx->net_dev,
-                         "efx_reset failed after PCI error (%d)\n", rc);
-       } else {
-               efx->state = STATE_READY;
-               netif_dbg(efx, hw, efx->net_dev,
-                         "Done resetting and resuming IO after PCI error.\n");
-       }
-
-out:
-       rtnl_unlock();
-}
-
-/* For simplicity and reliability, we always require a slot reset and try to
- * reset the hardware when a pci error affecting the device is detected.
- * We leave both the link_reset and mmio_enabled callback unimplemented:
- * with our request for slot reset the mmio_enabled callback will never be
- * called, and the link_reset callback is not used by AER or EEH mechanisms.
- */
-static const struct pci_error_handlers efx_err_handlers = {
-       .error_detected = efx_io_error_detected,
-       .slot_reset     = efx_io_slot_reset,
-       .resume         = efx_io_resume,
-};
-
 static struct pci_driver efx_pci_driver = {
        .name           = KBUILD_MODNAME,
        .id_table       = efx_pci_table,
index 66dcab1..e7e7d8d 100644 (file)
@@ -36,13 +36,6 @@ static inline void efx_rx_flush_packet(struct efx_channel *channel)
                __efx_rx_packet(channel);
 }
 
-#define EFX_MAX_DMAQ_SIZE 4096UL
-#define EFX_DEFAULT_DMAQ_SIZE 1024UL
-#define EFX_MIN_DMAQ_SIZE 512UL
-
-#define EFX_MAX_EVQ_SIZE 16384UL
-#define EFX_MIN_EVQ_SIZE 512UL
-
 /* Maximum number of TCP segments we support for soft-TSO */
 #define EFX_TSO_MAX_SEGS       100
 
@@ -147,11 +140,6 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
 {
        return efx->type->filter_get_rx_ids(efx, priority, buf, size);
 }
-#ifdef CONFIG_RFS_ACCEL
-int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
-                  u16 rxq_index, u32 flow_id);
-bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota);
-#endif
 
 /* RSS contexts */
 static inline bool efx_rss_active(struct efx_rss_context *ctx)
@@ -171,10 +159,6 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
 void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
                            unsigned int *rx_usecs, bool *rx_adaptive);
 
-/* Dummy PHY ops for PHY drivers */
-int efx_port_dummy_op_int(struct efx_nic *efx);
-void efx_port_dummy_op_void(struct efx_nic *efx);
-
 /* Update the generic software stats in the passed stats array */
 void efx_update_sw_stats(struct efx_nic *efx, u64 *stats);
 
@@ -201,24 +185,6 @@ static inline unsigned int efx_vf_size(struct efx_nic *efx)
 }
 #endif
 
-static inline void efx_schedule_channel(struct efx_channel *channel)
-{
-       netif_vdbg(channel->efx, intr, channel->efx->net_dev,
-                  "channel %d scheduling NAPI poll on CPU%d\n",
-                  channel->channel, raw_smp_processor_id());
-
-       napi_schedule(&channel->napi_str);
-}
-
-static inline void efx_schedule_channel_irq(struct efx_channel *channel)
-{
-       channel->event_test_cpu = raw_smp_processor_id();
-       efx_schedule_channel(channel);
-}
-
-void efx_link_clear_advertising(struct efx_nic *efx);
-void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
-
 static inline void efx_device_detach_sync(struct efx_nic *efx)
 {
        struct net_device *dev = efx->net_dev;
index c492523..dd4f30e 100644 (file)
  * 1 => MSI
  * 2 => legacy
  */
-static unsigned int interrupt_mode;
-module_param(interrupt_mode, uint, 0444);
-MODULE_PARM_DESC(interrupt_mode,
-                "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
+unsigned int efx_interrupt_mode = EFX_INT_MODE_MSIX;
 
 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
  * i.e. the number of CPUs among which we may distribute simultaneous
@@ -35,9 +32,7 @@ MODULE_PARM_DESC(interrupt_mode,
  * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
  * The default (0) means to assign an interrupt to each core.
  */
-static unsigned int rss_cpus;
-module_param(rss_cpus, uint, 0444);
-MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
+unsigned int rss_cpus;
 
 static unsigned int irq_adapt_low_thresh = 8000;
 module_param(irq_adapt_low_thresh, uint, 0644);
@@ -175,6 +170,13 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
                efx->n_xdp_channels = 0;
                efx->xdp_tx_per_channel = 0;
                efx->xdp_tx_queue_count = 0;
+       } else if (n_channels + n_xdp_tx > efx->max_vis) {
+               netif_err(efx, drv, efx->net_dev,
+                         "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
+                         n_xdp_tx, n_channels, efx->max_vis);
+               efx->n_xdp_channels = 0;
+               efx->xdp_tx_per_channel = 0;
+               efx->xdp_tx_queue_count = 0;
        } else {
                efx->n_xdp_channels = n_xdp_ev;
                efx->xdp_tx_per_channel = EFX_TXQ_TYPES;
@@ -522,7 +524,8 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
        for (j = 0; j < EFX_TXQ_TYPES; j++) {
                tx_queue = &channel->tx_queue[j];
                tx_queue->efx = efx;
-               tx_queue->queue = i * EFX_TXQ_TYPES + j;
+               tx_queue->queue = -1;
+               tx_queue->label = j;
                tx_queue->channel = channel;
        }
 
@@ -550,14 +553,11 @@ int efx_init_channels(struct efx_nic *efx)
        }
 
        /* Higher numbered interrupt modes are less capable! */
-       if (WARN_ON_ONCE(efx->type->max_interrupt_mode >
-                        efx->type->min_interrupt_mode)) {
-               return -EIO;
-       }
-       efx->interrupt_mode = max(efx->type->max_interrupt_mode,
-                                 interrupt_mode);
        efx->interrupt_mode = min(efx->type->min_interrupt_mode,
-                                 interrupt_mode);
+                                 efx_interrupt_mode);
+
+       efx->max_channels = EFX_MAX_CHANNELS;
+       efx->max_tx_channels = EFX_MAX_CHANNELS;
 
        return 0;
 }
@@ -727,7 +727,7 @@ void efx_remove_channel(struct efx_channel *channel)
 
        efx_for_each_channel_rx_queue(rx_queue, channel)
                efx_remove_rx_queue(rx_queue);
-       efx_for_each_possible_channel_tx_queue(tx_queue, channel)
+       efx_for_each_channel_tx_queue(tx_queue, channel)
                efx_remove_tx_queue(tx_queue);
        efx_remove_eventq(channel);
        channel->type->post_remove(channel);
@@ -854,9 +854,11 @@ rollback:
 
 int efx_set_channels(struct efx_nic *efx)
 {
-       struct efx_channel *channel;
        struct efx_tx_queue *tx_queue;
+       struct efx_channel *channel;
+       unsigned int next_queue = 0;
        int xdp_queue_number;
+       int rc;
 
        efx->tx_channel_offset =
                efx_separate_tx_channels ?
@@ -884,18 +886,38 @@ int efx_set_channels(struct efx_nic *efx)
                else
                        channel->rx_queue.core_index = -1;
 
-               efx_for_each_channel_tx_queue(tx_queue, channel) {
-                       tx_queue->queue -= (efx->tx_channel_offset *
-                                           EFX_TXQ_TYPES);
-
-                       if (efx_channel_is_xdp_tx(channel) &&
-                           xdp_queue_number < efx->xdp_tx_queue_count) {
-                               efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
-                               xdp_queue_number++;
+               if (channel->channel >= efx->tx_channel_offset) {
+                       if (efx_channel_is_xdp_tx(channel)) {
+                               efx_for_each_channel_tx_queue(tx_queue, channel) {
+                                       tx_queue->queue = next_queue++;
+                                       netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
+                                                 channel->channel, tx_queue->label,
+                                                 xdp_queue_number, tx_queue->queue);
+                                       /* We may have a few left-over XDP TX
+                                        * queues owing to xdp_tx_queue_count
+                                        * not dividing evenly by EFX_TXQ_TYPES.
+                                        * We still allocate and probe those
+                                        * TXQs, but never use them.
+                                        */
+                                       if (xdp_queue_number < efx->xdp_tx_queue_count)
+                                               efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+                                       xdp_queue_number++;
+                               }
+                       } else {
+                               efx_for_each_channel_tx_queue(tx_queue, channel) {
+                                       tx_queue->queue = next_queue++;
+                                       netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is HW %u\n",
+                                                 channel->channel, tx_queue->label,
+                                                 tx_queue->queue);
+                               }
                        }
                }
        }
-       return 0;
+
+       rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
+       if (rc)
+               return rc;
+       return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
 }
 
 bool efx_default_channel_want_txqs(struct efx_channel *channel)
@@ -1091,7 +1113,7 @@ void efx_stop_channels(struct efx_nic *efx)
        efx_for_each_channel(channel, efx) {
                efx_for_each_channel_rx_queue(rx_queue, channel)
                        efx_fini_rx_queue(rx_queue);
-               efx_for_each_possible_channel_tx_queue(tx_queue, channel)
+               efx_for_each_channel_tx_queue(tx_queue, channel)
                        efx_fini_tx_queue(tx_queue);
        }
 }
index 8d7b8c4..2d71dc9 100644 (file)
@@ -11,6 +11,9 @@
 #ifndef EFX_CHANNELS_H
 #define EFX_CHANNELS_H
 
+extern unsigned int efx_interrupt_mode;
+extern unsigned int rss_cpus;
+
 int efx_probe_interrupts(struct efx_nic *efx);
 void efx_remove_interrupts(struct efx_nic *efx);
 int efx_soft_enable_interrupts(struct efx_nic *efx);
index 1799ff9..5667694 100644 (file)
@@ -139,11 +139,11 @@ void efx_destroy_reset_workqueue(void)
 /* We assume that efx->type->reconfigure_mac will always try to sync RX
  * filters and therefore needs to read-lock the filter table against freeing
  */
-void efx_mac_reconfigure(struct efx_nic *efx)
+void efx_mac_reconfigure(struct efx_nic *efx, bool mtu_only)
 {
        if (efx->type->reconfigure_mac) {
                down_read(&efx->filter_sem);
-               efx->type->reconfigure_mac(efx);
+               efx->type->reconfigure_mac(efx, mtu_only);
                up_read(&efx->filter_sem);
        }
 }
@@ -158,10 +158,80 @@ static void efx_mac_work(struct work_struct *data)
 
        mutex_lock(&efx->mac_lock);
        if (efx->port_enabled)
-               efx_mac_reconfigure(efx);
+               efx_mac_reconfigure(efx, false);
        mutex_unlock(&efx->mac_lock);
 }
 
+int efx_set_mac_address(struct net_device *net_dev, void *data)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+       struct sockaddr *addr = data;
+       u8 *new_addr = addr->sa_data;
+       u8 old_addr[6];
+       int rc;
+
+       if (!is_valid_ether_addr(new_addr)) {
+               netif_err(efx, drv, efx->net_dev,
+                         "invalid ethernet MAC address requested: %pM\n",
+                         new_addr);
+               return -EADDRNOTAVAIL;
+       }
+
+       /* save old address */
+       ether_addr_copy(old_addr, net_dev->dev_addr);
+       ether_addr_copy(net_dev->dev_addr, new_addr);
+       if (efx->type->set_mac_address) {
+               rc = efx->type->set_mac_address(efx);
+               if (rc) {
+                       ether_addr_copy(net_dev->dev_addr, old_addr);
+                       return rc;
+               }
+       }
+
+       /* Reconfigure the MAC */
+       mutex_lock(&efx->mac_lock);
+       efx_mac_reconfigure(efx, false);
+       mutex_unlock(&efx->mac_lock);
+
+       return 0;
+}
+
+/* Context: netif_addr_lock held, BHs disabled. */
+void efx_set_rx_mode(struct net_device *net_dev)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+
+       if (efx->port_enabled)
+               queue_work(efx->workqueue, &efx->mac_work);
+       /* Otherwise efx_start_port() will do this */
+}
+
+int efx_set_features(struct net_device *net_dev, netdev_features_t data)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+       int rc;
+
+       /* If disabling RX n-tuple filtering, clear existing filters */
+       if (net_dev->features & ~data & NETIF_F_NTUPLE) {
+               rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
+               if (rc)
+                       return rc;
+       }
+
+       /* If Rx VLAN filter is changed, update filters via mac_reconfigure.
+        * If rx-fcs is changed, mac_reconfigure updates that too.
+        */
+       if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER |
+                                         NETIF_F_RXFCS)) {
+               /* efx_set_rx_mode() will schedule MAC work to update filters
+                * when a new features are finally set in net_dev.
+                */
+               efx_set_rx_mode(net_dev);
+       }
+
+       return 0;
+}
+
 /* This ensures that the kernel is kept informed (via
  * netif_carrier_on/off) of the link status, and also maintains the
  * link status's stop on the port's TX queue.
@@ -234,7 +304,7 @@ int efx_change_mtu(struct net_device *net_dev, int new_mtu)
 
        mutex_lock(&efx->mac_lock);
        net_dev->mtu = new_mtu;
-       efx_mac_reconfigure(efx);
+       efx_mac_reconfigure(efx, true);
        mutex_unlock(&efx->mac_lock);
 
        efx_start_all(efx);
@@ -383,6 +453,30 @@ static void efx_stop_datapath(struct efx_nic *efx)
  *
  **************************************************************************/
 
+/* Equivalent to efx_link_set_advertising with all-zeroes, except does not
+ * force the Autoneg bit on.
+ */
+void efx_link_clear_advertising(struct efx_nic *efx)
+{
+       bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS);
+       efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
+}
+
+void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
+{
+       efx->wanted_fc = wanted_fc;
+       if (efx->link_advertising[0]) {
+               if (wanted_fc & EFX_FC_RX)
+                       efx->link_advertising[0] |= (ADVERTISED_Pause |
+                                                    ADVERTISED_Asym_Pause);
+               else
+                       efx->link_advertising[0] &= ~(ADVERTISED_Pause |
+                                                     ADVERTISED_Asym_Pause);
+               if (wanted_fc & EFX_FC_TX)
+                       efx->link_advertising[0] ^= ADVERTISED_Asym_Pause;
+       }
+}
+
 static void efx_start_port(struct efx_nic *efx)
 {
        netif_dbg(efx, ifup, efx->net_dev, "start port\n");
@@ -392,7 +486,7 @@ static void efx_start_port(struct efx_nic *efx)
        efx->port_enabled = true;
 
        /* Ensure MAC ingress/egress is enabled */
-       efx_mac_reconfigure(efx);
+       efx_mac_reconfigure(efx, false);
 
        mutex_unlock(&efx->mac_lock);
 }
@@ -626,6 +720,18 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
        efx->type->fini(efx);
 }
 
+/* Context: netif_tx_lock held, BHs disabled. */
+void efx_watchdog(struct net_device *net_dev, unsigned int txqueue)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+
+       netif_err(efx, tx_err, efx->net_dev,
+                 "TX stuck with port_enabled=%d: resetting channels\n",
+                 efx->port_enabled);
+
+       efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
+}
+
 /* This function will always ensure that the locks acquired in
  * efx_reset_down() are released. A failure return code indicates
  * that we were unable to reinitialise the hardware, and the
@@ -911,7 +1017,9 @@ int efx_init_struct(struct efx_nic *efx,
        efx->rx_packet_ts_offset =
                efx->type->rx_ts_offset - efx->type->rx_prefix_size;
        INIT_LIST_HEAD(&efx->rss_context.list);
+       efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
        mutex_init(&efx->rss_lock);
+       efx->vport_id = EVB_PORT_ID_ASSIGNED;
        spin_lock_init(&efx->stats_lock);
        efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
        efx->num_mac_stats = MC_CMD_MAC_NSTATS;
@@ -929,6 +1037,12 @@ int efx_init_struct(struct efx_nic *efx,
        INIT_WORK(&efx->mac_work, efx_mac_work);
        init_waitqueue_head(&efx->flush_wq);
 
+       efx->tx_queues_per_channel = 1;
+       efx->rxq_entries = EFX_DEFAULT_DMAQ_SIZE;
+       efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
+
+       efx->mem_bar = UINT_MAX;
+
        rc = efx_init_channels(efx);
        if (rc)
                goto fail;
@@ -972,7 +1086,9 @@ int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
        struct pci_dev *pci_dev = efx->pci_dev;
        int rc;
 
-       netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
+       efx->mem_bar = UINT_MAX;
+
+       netif_dbg(efx, probe, efx->net_dev, "initialising I/O bar=%d\n", bar);
 
        rc = pci_enable_device(pci_dev);
        if (rc) {
@@ -1014,21 +1130,21 @@ int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
        rc = pci_request_region(pci_dev, bar, "sfc");
        if (rc) {
                netif_err(efx, probe, efx->net_dev,
-                         "request for memory BAR failed\n");
+                         "request for memory BAR[%d] failed\n", bar);
                rc = -EIO;
                goto fail3;
        }
-
+       efx->mem_bar = bar;
        efx->membase = ioremap(efx->membase_phys, mem_map_size);
        if (!efx->membase) {
                netif_err(efx, probe, efx->net_dev,
-                         "could not map memory BAR at %llx+%x\n",
+                         "could not map memory BAR[%d] at %llx+%x\n", bar,
                          (unsigned long long)efx->membase_phys, mem_map_size);
                rc = -ENOMEM;
                goto fail4;
        }
        netif_dbg(efx, probe, efx->net_dev,
-                 "memory BAR at %llx+%x (virtual %p)\n",
+                 "memory BAR[%d] at %llx+%x (virtual %p)\n", bar,
                  (unsigned long long)efx->membase_phys, mem_map_size,
                  efx->membase);
 
@@ -1044,7 +1160,7 @@ fail1:
        return rc;
 }
 
-void efx_fini_io(struct efx_nic *efx, int bar)
+void efx_fini_io(struct efx_nic *efx)
 {
        netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
 
@@ -1054,8 +1170,9 @@ void efx_fini_io(struct efx_nic *efx, int bar)
        }
 
        if (efx->membase_phys) {
-               pci_release_region(efx->pci_dev, bar);
+               pci_release_region(efx->pci_dev, efx->mem_bar);
                efx->membase_phys = 0;
+               efx->mem_bar = UINT_MAX;
        }
 
        /* Don't disable bus-mastering if VFs are assigned */
@@ -1101,3 +1218,114 @@ void efx_fini_mcdi_logging(struct efx_nic *efx)
        device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
 }
 #endif
+
+/* A PCI error affecting this device was detected.
+ * At this point MMIO and DMA may be disabled.
+ * Stop the software path and request a slot reset.
+ */
+static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
+                                             enum pci_channel_state state)
+{
+       pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
+       struct efx_nic *efx = pci_get_drvdata(pdev);
+
+       if (state == pci_channel_io_perm_failure)
+               return PCI_ERS_RESULT_DISCONNECT;
+
+       rtnl_lock();
+
+       if (efx->state != STATE_DISABLED) {
+               efx->state = STATE_RECOVERY;
+               efx->reset_pending = 0;
+
+               efx_device_detach_sync(efx);
+
+               efx_stop_all(efx);
+               efx_disable_interrupts(efx);
+
+               status = PCI_ERS_RESULT_NEED_RESET;
+       } else {
+               /* If the interface is disabled we don't want to do anything
+                * with it.
+                */
+               status = PCI_ERS_RESULT_RECOVERED;
+       }
+
+       rtnl_unlock();
+
+       pci_disable_device(pdev);
+
+       return status;
+}
+
+/* Fake a successful reset, which will be performed later in efx_io_resume. */
+static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
+{
+       struct efx_nic *efx = pci_get_drvdata(pdev);
+       pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
+
+       if (pci_enable_device(pdev)) {
+               netif_err(efx, hw, efx->net_dev,
+                         "Cannot re-enable PCI device after reset.\n");
+               status =  PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       return status;
+}
+
+/* Perform the actual reset and resume I/O operations. */
+static void efx_io_resume(struct pci_dev *pdev)
+{
+       struct efx_nic *efx = pci_get_drvdata(pdev);
+       int rc;
+
+       rtnl_lock();
+
+       if (efx->state == STATE_DISABLED)
+               goto out;
+
+       rc = efx_reset(efx, RESET_TYPE_ALL);
+       if (rc) {
+               netif_err(efx, hw, efx->net_dev,
+                         "efx_reset failed after PCI error (%d)\n", rc);
+       } else {
+               efx->state = STATE_READY;
+               netif_dbg(efx, hw, efx->net_dev,
+                         "Done resetting and resuming IO after PCI error.\n");
+       }
+
+out:
+       rtnl_unlock();
+}
+
+/* For simplicity and reliability, we always require a slot reset and try to
+ * reset the hardware when a pci error affecting the device is detected.
+ * We leave both the link_reset and mmio_enabled callback unimplemented:
+ * with our request for slot reset the mmio_enabled callback will never be
+ * called, and the link_reset callback is not used by AER or EEH mechanisms.
+ */
+const struct pci_error_handlers efx_err_handlers = {
+       .error_detected = efx_io_error_detected,
+       .slot_reset     = efx_io_slot_reset,
+       .resume         = efx_io_resume,
+};
+
+int efx_get_phys_port_id(struct net_device *net_dev,
+                        struct netdev_phys_item_id *ppid)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+
+       if (efx->type->get_phys_port_id)
+               return efx->type->get_phys_port_id(efx, ppid);
+       else
+               return -EOPNOTSUPP;
+}
+
+int efx_get_phys_port_name(struct net_device *net_dev, char *name, size_t len)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+
+       if (snprintf(name, len, "p%u", efx->port_num) >= len)
+               return -EINVAL;
+       return 0;
+}
index fa2fc68..4056f68 100644 (file)
 
 int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
                unsigned int mem_map_size);
-void efx_fini_io(struct efx_nic *efx, int bar);
+void efx_fini_io(struct efx_nic *efx);
 int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev,
                    struct net_device *net_dev);
 void efx_fini_struct(struct efx_nic *efx);
 
+#define EFX_MAX_DMAQ_SIZE 4096UL
+#define EFX_DEFAULT_DMAQ_SIZE 1024UL
+#define EFX_MIN_DMAQ_SIZE 512UL
+
+#define EFX_MAX_EVQ_SIZE 16384UL
+#define EFX_MIN_EVQ_SIZE 512UL
+
+void efx_link_clear_advertising(struct efx_nic *efx);
+void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
+
 void efx_start_all(struct efx_nic *efx);
 void efx_stop_all(struct efx_nic *efx);
 
@@ -43,10 +53,15 @@ int efx_reconfigure_port(struct efx_nic *efx);
 
 int efx_try_recovery(struct efx_nic *efx);
 void efx_reset_down(struct efx_nic *efx, enum reset_type method);
+void efx_watchdog(struct net_device *net_dev, unsigned int txqueue);
 int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
 int efx_reset(struct efx_nic *efx, enum reset_type method);
 void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
 
+/* Dummy PHY ops for PHY drivers */
+int efx_port_dummy_op_int(struct efx_nic *efx);
+void efx_port_dummy_op_void(struct efx_nic *efx);
+
 static inline int efx_check_disabled(struct efx_nic *efx)
 {
        if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
@@ -57,6 +72,21 @@ static inline int efx_check_disabled(struct efx_nic *efx)
        return 0;
 }
 
+static inline void efx_schedule_channel(struct efx_channel *channel)
+{
+       netif_vdbg(channel->efx, intr, channel->efx->net_dev,
+                  "channel %d scheduling NAPI poll on CPU%d\n",
+                  channel->channel, raw_smp_processor_id());
+
+       napi_schedule(&channel->napi_str);
+}
+
+static inline void efx_schedule_channel_irq(struct efx_channel *channel)
+{
+       channel->event_test_cpu = raw_smp_processor_id();
+       efx_schedule_channel(channel);
+}
+
 #ifdef CONFIG_SFC_MCDI_LOGGING
 void efx_init_mcdi_logging(struct efx_nic *efx);
 void efx_fini_mcdi_logging(struct efx_nic *efx);
@@ -65,9 +95,19 @@ static inline void efx_init_mcdi_logging(struct efx_nic *efx) {}
 static inline void efx_fini_mcdi_logging(struct efx_nic *efx) {}
 #endif
 
-void efx_mac_reconfigure(struct efx_nic *efx);
+void efx_mac_reconfigure(struct efx_nic *efx, bool mtu_only);
+int efx_set_mac_address(struct net_device *net_dev, void *data);
+void efx_set_rx_mode(struct net_device *net_dev);
+int efx_set_features(struct net_device *net_dev, netdev_features_t data);
 void efx_link_status_changed(struct efx_nic *efx);
 unsigned int efx_xdp_max_mtu(struct efx_nic *efx);
 int efx_change_mtu(struct net_device *net_dev, int new_mtu);
 
+extern const struct pci_error_handlers efx_err_handlers;
+
+int efx_get_phys_port_id(struct net_device *net_dev,
+                        struct netdev_phys_item_id *ppid);
+
+int efx_get_phys_port_name(struct net_device *net_dev,
+                          char *name, size_t len);
 #endif
index 04e88d0..9828516 100644 (file)
@@ -54,58 +54,6 @@ static int efx_ethtool_phys_id(struct net_device *net_dev,
        return 0;
 }
 
-/* This must be called with rtnl_lock held. */
-static int
-efx_ethtool_get_link_ksettings(struct net_device *net_dev,
-                              struct ethtool_link_ksettings *cmd)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-       struct efx_link_state *link_state = &efx->link_state;
-       u32 supported;
-
-       mutex_lock(&efx->mac_lock);
-       efx->phy_op->get_link_ksettings(efx, cmd);
-       mutex_unlock(&efx->mac_lock);
-
-       /* Both MACs support pause frames (bidirectional and respond-only) */
-       ethtool_convert_link_mode_to_legacy_u32(&supported,
-                                               cmd->link_modes.supported);
-
-       supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
-
-       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
-                                               supported);
-
-       if (LOOPBACK_INTERNAL(efx)) {
-               cmd->base.speed = link_state->speed;
-               cmd->base.duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
-       }
-
-       return 0;
-}
-
-/* This must be called with rtnl_lock held. */
-static int
-efx_ethtool_set_link_ksettings(struct net_device *net_dev,
-                              const struct ethtool_link_ksettings *cmd)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-       int rc;
-
-       /* GMAC does not support 1000Mbps HD */
-       if ((cmd->base.speed == SPEED_1000) &&
-           (cmd->base.duplex != DUPLEX_FULL)) {
-               netif_dbg(efx, drv, efx->net_dev,
-                         "rejecting unsupported 1000Mbps HD setting\n");
-               return -EINVAL;
-       }
-
-       mutex_lock(&efx->mac_lock);
-       rc = efx->phy_op->set_link_ksettings(efx, cmd);
-       mutex_unlock(&efx->mac_lock);
-       return rc;
-}
-
 static int efx_ethtool_get_regs_len(struct net_device *net_dev)
 {
        return efx_nic_get_regs_len(netdev_priv(net_dev));
@@ -120,62 +68,6 @@ static void efx_ethtool_get_regs(struct net_device *net_dev,
        efx_nic_get_regs(efx, buf);
 }
 
-static void efx_ethtool_self_test(struct net_device *net_dev,
-                                 struct ethtool_test *test, u64 *data)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-       struct efx_self_tests *efx_tests;
-       bool already_up;
-       int rc = -ENOMEM;
-
-       efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
-       if (!efx_tests)
-               goto fail;
-
-       if (efx->state != STATE_READY) {
-               rc = -EBUSY;
-               goto out;
-       }
-
-       netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
-                  (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
-
-       /* We need rx buffers and interrupts. */
-       already_up = (efx->net_dev->flags & IFF_UP);
-       if (!already_up) {
-               rc = dev_open(efx->net_dev, NULL);
-               if (rc) {
-                       netif_err(efx, drv, efx->net_dev,
-                                 "failed opening device.\n");
-                       goto out;
-               }
-       }
-
-       rc = efx_selftest(efx, efx_tests, test->flags);
-
-       if (!already_up)
-               dev_close(efx->net_dev);
-
-       netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n",
-                  rc == 0 ? "passed" : "failed",
-                  (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
-
-out:
-       efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
-       kfree(efx_tests);
-fail:
-       if (rc)
-               test->flags |= ETH_TEST_FL_FAILED;
-}
-
-/* Restart autonegotiation */
-static int efx_ethtool_nway_reset(struct net_device *net_dev)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-
-       return mdio45_nway_restart(&efx->mdio);
-}
-
 /*
  * Each channel has a single IRQ and moderation timer, started by any
  * completion (or other event).  Unless the module parameter
@@ -300,64 +192,6 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev,
        return efx_realloc_channels(efx, ring->rx_pending, txq_entries);
 }
 
-static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
-                                     struct ethtool_pauseparam *pause)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-       u8 wanted_fc, old_fc;
-       u32 old_adv;
-       int rc = 0;
-
-       mutex_lock(&efx->mac_lock);
-
-       wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) |
-                    (pause->tx_pause ? EFX_FC_TX : 0) |
-                    (pause->autoneg ? EFX_FC_AUTO : 0));
-
-       if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) {
-               netif_dbg(efx, drv, efx->net_dev,
-                         "Flow control unsupported: tx ON rx OFF\n");
-               rc = -EINVAL;
-               goto out;
-       }
-
-       if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising[0]) {
-               netif_dbg(efx, drv, efx->net_dev,
-                         "Autonegotiation is disabled\n");
-               rc = -EINVAL;
-               goto out;
-       }
-
-       /* Hook for Falcon bug 11482 workaround */
-       if (efx->type->prepare_enable_fc_tx &&
-           (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX))
-               efx->type->prepare_enable_fc_tx(efx);
-
-       old_adv = efx->link_advertising[0];
-       old_fc = efx->wanted_fc;
-       efx_link_set_wanted_fc(efx, wanted_fc);
-       if (efx->link_advertising[0] != old_adv ||
-           (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
-               rc = efx->phy_op->reconfigure(efx);
-               if (rc) {
-                       netif_err(efx, drv, efx->net_dev,
-                                 "Unable to advertise requested flow "
-                                 "control setting\n");
-                       goto out;
-               }
-       }
-
-       /* Reconfigure the MAC. The PHY *may* generate a link state change event
-        * if the user just changed the advertised capabilities, but there's no
-        * harm doing this twice */
-       efx_mac_reconfigure(efx);
-
-out:
-       mutex_unlock(&efx->mac_lock);
-
-       return rc;
-}
-
 static void efx_ethtool_get_wol(struct net_device *net_dev,
                                struct ethtool_wolinfo *wol)
 {
@@ -373,690 +207,6 @@ static int efx_ethtool_set_wol(struct net_device *net_dev,
        return efx->type->set_wol(efx, wol->wolopts);
 }
 
-static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-       int rc;
-
-       rc = efx->type->map_reset_flags(flags);
-       if (rc < 0)
-               return rc;
-
-       return efx_reset(efx, rc);
-}
-
-/* MAC address mask including only I/G bit */
-static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0};
-
-#define IP4_ADDR_FULL_MASK     ((__force __be32)~0)
-#define IP_PROTO_FULL_MASK     0xFF
-#define PORT_FULL_MASK         ((__force __be16)~0)
-#define ETHER_TYPE_FULL_MASK   ((__force __be16)~0)
-
-static inline void ip6_fill_mask(__be32 *mask)
-{
-       mask[0] = mask[1] = mask[2] = mask[3] = ~(__be32)0;
-}
-
-static int efx_ethtool_get_class_rule(struct efx_nic *efx,
-                                     struct ethtool_rx_flow_spec *rule,
-                                     u32 *rss_context)
-{
-       struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
-       struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
-       struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
-       struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
-       struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
-       struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
-       struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
-       struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
-       struct ethhdr *mac_entry = &rule->h_u.ether_spec;
-       struct ethhdr *mac_mask = &rule->m_u.ether_spec;
-       struct efx_filter_spec spec;
-       int rc;
-
-       rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
-                                       rule->location, &spec);
-       if (rc)
-               return rc;
-
-       if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
-               rule->ring_cookie = RX_CLS_FLOW_DISC;
-       else
-               rule->ring_cookie = spec.dmaq_id;
-
-       if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
-           spec.ether_type == htons(ETH_P_IP) &&
-           (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
-           (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
-           !(spec.match_flags &
-             ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
-               EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
-               EFX_FILTER_MATCH_IP_PROTO |
-               EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
-               rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
-                                  TCP_V4_FLOW : UDP_V4_FLOW);
-               if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
-                       ip_entry->ip4dst = spec.loc_host[0];
-                       ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
-               }
-               if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
-                       ip_entry->ip4src = spec.rem_host[0];
-                       ip_mask->ip4src = IP4_ADDR_FULL_MASK;
-               }
-               if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
-                       ip_entry->pdst = spec.loc_port;
-                       ip_mask->pdst = PORT_FULL_MASK;
-               }
-               if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
-                       ip_entry->psrc = spec.rem_port;
-                       ip_mask->psrc = PORT_FULL_MASK;
-               }
-       } else if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
-           spec.ether_type == htons(ETH_P_IPV6) &&
-           (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
-           (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
-           !(spec.match_flags &
-             ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
-               EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
-               EFX_FILTER_MATCH_IP_PROTO |
-               EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
-               rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
-                                  TCP_V6_FLOW : UDP_V6_FLOW);
-               if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
-                       memcpy(ip6_entry->ip6dst, spec.loc_host,
-                              sizeof(ip6_entry->ip6dst));
-                       ip6_fill_mask(ip6_mask->ip6dst);
-               }
-               if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
-                       memcpy(ip6_entry->ip6src, spec.rem_host,
-                              sizeof(ip6_entry->ip6src));
-                       ip6_fill_mask(ip6_mask->ip6src);
-               }
-               if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
-                       ip6_entry->pdst = spec.loc_port;
-                       ip6_mask->pdst = PORT_FULL_MASK;
-               }
-               if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
-                       ip6_entry->psrc = spec.rem_port;
-                       ip6_mask->psrc = PORT_FULL_MASK;
-               }
-       } else if (!(spec.match_flags &
-                    ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG |
-                      EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE |
-                      EFX_FILTER_MATCH_OUTER_VID))) {
-               rule->flow_type = ETHER_FLOW;
-               if (spec.match_flags &
-                   (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) {
-                       ether_addr_copy(mac_entry->h_dest, spec.loc_mac);
-                       if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC)
-                               eth_broadcast_addr(mac_mask->h_dest);
-                       else
-                               ether_addr_copy(mac_mask->h_dest,
-                                               mac_addr_ig_mask);
-               }
-               if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) {
-                       ether_addr_copy(mac_entry->h_source, spec.rem_mac);
-                       eth_broadcast_addr(mac_mask->h_source);
-               }
-               if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
-                       mac_entry->h_proto = spec.ether_type;
-                       mac_mask->h_proto = ETHER_TYPE_FULL_MASK;
-               }
-       } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
-                  spec.ether_type == htons(ETH_P_IP) &&
-                  !(spec.match_flags &
-                    ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
-                      EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
-                      EFX_FILTER_MATCH_IP_PROTO))) {
-               rule->flow_type = IPV4_USER_FLOW;
-               uip_entry->ip_ver = ETH_RX_NFC_IP4;
-               if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) {
-                       uip_mask->proto = IP_PROTO_FULL_MASK;
-                       uip_entry->proto = spec.ip_proto;
-               }
-               if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
-                       uip_entry->ip4dst = spec.loc_host[0];
-                       uip_mask->ip4dst = IP4_ADDR_FULL_MASK;
-               }
-               if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
-                       uip_entry->ip4src = spec.rem_host[0];
-                       uip_mask->ip4src = IP4_ADDR_FULL_MASK;
-               }
-       } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
-                  spec.ether_type == htons(ETH_P_IPV6) &&
-                  !(spec.match_flags &
-                    ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
-                      EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
-                      EFX_FILTER_MATCH_IP_PROTO))) {
-               rule->flow_type = IPV6_USER_FLOW;
-               if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) {
-                       uip6_mask->l4_proto = IP_PROTO_FULL_MASK;
-                       uip6_entry->l4_proto = spec.ip_proto;
-               }
-               if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
-                       memcpy(uip6_entry->ip6dst, spec.loc_host,
-                              sizeof(uip6_entry->ip6dst));
-                       ip6_fill_mask(uip6_mask->ip6dst);
-               }
-               if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
-                       memcpy(uip6_entry->ip6src, spec.rem_host,
-                              sizeof(uip6_entry->ip6src));
-                       ip6_fill_mask(uip6_mask->ip6src);
-               }
-       } else {
-               /* The above should handle all filters that we insert */
-               WARN_ON(1);
-               return -EINVAL;
-       }
-
-       if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) {
-               rule->flow_type |= FLOW_EXT;
-               rule->h_ext.vlan_tci = spec.outer_vid;
-               rule->m_ext.vlan_tci = htons(0xfff);
-       }
-
-       if (spec.flags & EFX_FILTER_FLAG_RX_RSS) {
-               rule->flow_type |= FLOW_RSS;
-               *rss_context = spec.rss_context;
-       }
-
-       return rc;
-}
-
-static int
-efx_ethtool_get_rxnfc(struct net_device *net_dev,
-                     struct ethtool_rxnfc *info, u32 *rule_locs)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-       u32 rss_context = 0;
-       s32 rc = 0;
-
-       switch (info->cmd) {
-       case ETHTOOL_GRXRINGS:
-               info->data = efx->n_rx_channels;
-               return 0;
-
-       case ETHTOOL_GRXFH: {
-               struct efx_rss_context *ctx = &efx->rss_context;
-               __u64 data;
-
-               mutex_lock(&efx->rss_lock);
-               if (info->flow_type & FLOW_RSS && info->rss_context) {
-                       ctx = efx_find_rss_context_entry(efx, info->rss_context);
-                       if (!ctx) {
-                               rc = -ENOENT;
-                               goto out_unlock;
-                       }
-               }
-
-               data = 0;
-               if (!efx_rss_active(ctx)) /* No RSS */
-                       goto out_setdata_unlock;
-
-               switch (info->flow_type & ~FLOW_RSS) {
-               case UDP_V4_FLOW:
-               case UDP_V6_FLOW:
-                       if (ctx->rx_hash_udp_4tuple)
-                               data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
-                                       RXH_IP_SRC | RXH_IP_DST);
-                       else
-                               data = RXH_IP_SRC | RXH_IP_DST;
-                       break;
-               case TCP_V4_FLOW:
-               case TCP_V6_FLOW:
-                       data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
-                               RXH_IP_SRC | RXH_IP_DST);
-                       break;
-               case SCTP_V4_FLOW:
-               case SCTP_V6_FLOW:
-               case AH_ESP_V4_FLOW:
-               case AH_ESP_V6_FLOW:
-               case IPV4_FLOW:
-               case IPV6_FLOW:
-                       data = RXH_IP_SRC | RXH_IP_DST;
-                       break;
-               default:
-                       break;
-               }
-out_setdata_unlock:
-               info->data = data;
-out_unlock:
-               mutex_unlock(&efx->rss_lock);
-               return rc;
-       }
-
-       case ETHTOOL_GRXCLSRLCNT:
-               info->data = efx_filter_get_rx_id_limit(efx);
-               if (info->data == 0)
-                       return -EOPNOTSUPP;
-               info->data |= RX_CLS_LOC_SPECIAL;
-               info->rule_cnt =
-                       efx_filter_count_rx_used(efx, EFX_FILTER_PRI_MANUAL);
-               return 0;
-
-       case ETHTOOL_GRXCLSRULE:
-               if (efx_filter_get_rx_id_limit(efx) == 0)
-                       return -EOPNOTSUPP;
-               rc = efx_ethtool_get_class_rule(efx, &info->fs, &rss_context);
-               if (rc < 0)
-                       return rc;
-               if (info->fs.flow_type & FLOW_RSS)
-                       info->rss_context = rss_context;
-               return 0;
-
-       case ETHTOOL_GRXCLSRLALL:
-               info->data = efx_filter_get_rx_id_limit(efx);
-               if (info->data == 0)
-                       return -EOPNOTSUPP;
-               rc = efx_filter_get_rx_ids(efx, EFX_FILTER_PRI_MANUAL,
-                                          rule_locs, info->rule_cnt);
-               if (rc < 0)
-                       return rc;
-               info->rule_cnt = rc;
-               return 0;
-
-       default:
-               return -EOPNOTSUPP;
-       }
-}
-
-static inline bool ip6_mask_is_full(__be32 mask[4])
-{
-       return !~(mask[0] & mask[1] & mask[2] & mask[3]);
-}
-
-static inline bool ip6_mask_is_empty(__be32 mask[4])
-{
-       return !(mask[0] | mask[1] | mask[2] | mask[3]);
-}
-
-static int efx_ethtool_set_class_rule(struct efx_nic *efx,
-                                     struct ethtool_rx_flow_spec *rule,
-                                     u32 rss_context)
-{
-       struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
-       struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
-       struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
-       struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
-       struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
-       struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
-       struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
-       struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
-       u32 flow_type = rule->flow_type & ~(FLOW_EXT | FLOW_RSS);
-       struct ethhdr *mac_entry = &rule->h_u.ether_spec;
-       struct ethhdr *mac_mask = &rule->m_u.ether_spec;
-       enum efx_filter_flags flags = 0;
-       struct efx_filter_spec spec;
-       int rc;
-
-       /* Check that user wants us to choose the location */
-       if (rule->location != RX_CLS_LOC_ANY)
-               return -EINVAL;
-
-       /* Range-check ring_cookie */
-       if (rule->ring_cookie >= efx->n_rx_channels &&
-           rule->ring_cookie != RX_CLS_FLOW_DISC)
-               return -EINVAL;
-
-       /* Check for unsupported extensions */
-       if ((rule->flow_type & FLOW_EXT) &&
-           (rule->m_ext.vlan_etype || rule->m_ext.data[0] ||
-            rule->m_ext.data[1]))
-               return -EINVAL;
-
-       if (efx->rx_scatter)
-               flags |= EFX_FILTER_FLAG_RX_SCATTER;
-       if (rule->flow_type & FLOW_RSS)
-               flags |= EFX_FILTER_FLAG_RX_RSS;
-
-       efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, flags,
-                          (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
-                          EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie);
-
-       if (rule->flow_type & FLOW_RSS)
-               spec.rss_context = rss_context;
-
-       switch (flow_type) {
-       case TCP_V4_FLOW:
-       case UDP_V4_FLOW:
-               spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
-                                   EFX_FILTER_MATCH_IP_PROTO);
-               spec.ether_type = htons(ETH_P_IP);
-               spec.ip_proto = flow_type == TCP_V4_FLOW ? IPPROTO_TCP
-                                                        : IPPROTO_UDP;
-               if (ip_mask->ip4dst) {
-                       if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK)
-                               return -EINVAL;
-                       spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
-                       spec.loc_host[0] = ip_entry->ip4dst;
-               }
-               if (ip_mask->ip4src) {
-                       if (ip_mask->ip4src != IP4_ADDR_FULL_MASK)
-                               return -EINVAL;
-                       spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
-                       spec.rem_host[0] = ip_entry->ip4src;
-               }
-               if (ip_mask->pdst) {
-                       if (ip_mask->pdst != PORT_FULL_MASK)
-                               return -EINVAL;
-                       spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
-                       spec.loc_port = ip_entry->pdst;
-               }
-               if (ip_mask->psrc) {
-                       if (ip_mask->psrc != PORT_FULL_MASK)
-                               return -EINVAL;
-                       spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
-                       spec.rem_port = ip_entry->psrc;
-               }
-               if (ip_mask->tos)
-                       return -EINVAL;
-               break;
-
-       case TCP_V6_FLOW:
-       case UDP_V6_FLOW:
-               spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
-                                   EFX_FILTER_MATCH_IP_PROTO);
-               spec.ether_type = htons(ETH_P_IPV6);
-               spec.ip_proto = flow_type == TCP_V6_FLOW ? IPPROTO_TCP
-                                                        : IPPROTO_UDP;
-               if (!ip6_mask_is_empty(ip6_mask->ip6dst)) {
-                       if (!ip6_mask_is_full(ip6_mask->ip6dst))
-                               return -EINVAL;
-                       spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
-                       memcpy(spec.loc_host, ip6_entry->ip6dst, sizeof(spec.loc_host));
-               }
-               if (!ip6_mask_is_empty(ip6_mask->ip6src)) {
-                       if (!ip6_mask_is_full(ip6_mask->ip6src))
-                               return -EINVAL;
-                       spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
-                       memcpy(spec.rem_host, ip6_entry->ip6src, sizeof(spec.rem_host));
-               }
-               if (ip6_mask->pdst) {
-                       if (ip6_mask->pdst != PORT_FULL_MASK)
-                               return -EINVAL;
-                       spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
-                       spec.loc_port = ip6_entry->pdst;
-               }
-               if (ip6_mask->psrc) {
-                       if (ip6_mask->psrc != PORT_FULL_MASK)
-                               return -EINVAL;
-                       spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
-                       spec.rem_port = ip6_entry->psrc;
-               }
-               if (ip6_mask->tclass)
-                       return -EINVAL;
-               break;
-
-       case IPV4_USER_FLOW:
-               if (uip_mask->l4_4_bytes || uip_mask->tos || uip_mask->ip_ver ||
-                   uip_entry->ip_ver != ETH_RX_NFC_IP4)
-                       return -EINVAL;
-               spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE;
-               spec.ether_type = htons(ETH_P_IP);
-               if (uip_mask->ip4dst) {
-                       if (uip_mask->ip4dst != IP4_ADDR_FULL_MASK)
-                               return -EINVAL;
-                       spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
-                       spec.loc_host[0] = uip_entry->ip4dst;
-               }
-               if (uip_mask->ip4src) {
-                       if (uip_mask->ip4src != IP4_ADDR_FULL_MASK)
-                               return -EINVAL;
-                       spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
-                       spec.rem_host[0] = uip_entry->ip4src;
-               }
-               if (uip_mask->proto) {
-                       if (uip_mask->proto != IP_PROTO_FULL_MASK)
-                               return -EINVAL;
-                       spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO;
-                       spec.ip_proto = uip_entry->proto;
-               }
-               break;
-
-       case IPV6_USER_FLOW:
-               if (uip6_mask->l4_4_bytes || uip6_mask->tclass)
-                       return -EINVAL;
-               spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE;
-               spec.ether_type = htons(ETH_P_IPV6);
-               if (!ip6_mask_is_empty(uip6_mask->ip6dst)) {
-                       if (!ip6_mask_is_full(uip6_mask->ip6dst))
-                               return -EINVAL;
-                       spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
-                       memcpy(spec.loc_host, uip6_entry->ip6dst, sizeof(spec.loc_host));
-               }
-               if (!ip6_mask_is_empty(uip6_mask->ip6src)) {
-                       if (!ip6_mask_is_full(uip6_mask->ip6src))
-                               return -EINVAL;
-                       spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
-                       memcpy(spec.rem_host, uip6_entry->ip6src, sizeof(spec.rem_host));
-               }
-               if (uip6_mask->l4_proto) {
-                       if (uip6_mask->l4_proto != IP_PROTO_FULL_MASK)
-                               return -EINVAL;
-                       spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO;
-                       spec.ip_proto = uip6_entry->l4_proto;
-               }
-               break;
-
-       case ETHER_FLOW:
-               if (!is_zero_ether_addr(mac_mask->h_dest)) {
-                       if (ether_addr_equal(mac_mask->h_dest,
-                                            mac_addr_ig_mask))
-                               spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
-                       else if (is_broadcast_ether_addr(mac_mask->h_dest))
-                               spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC;
-                       else
-                               return -EINVAL;
-                       ether_addr_copy(spec.loc_mac, mac_entry->h_dest);
-               }
-               if (!is_zero_ether_addr(mac_mask->h_source)) {
-                       if (!is_broadcast_ether_addr(mac_mask->h_source))
-                               return -EINVAL;
-                       spec.match_flags |= EFX_FILTER_MATCH_REM_MAC;
-                       ether_addr_copy(spec.rem_mac, mac_entry->h_source);
-               }
-               if (mac_mask->h_proto) {
-                       if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
-                               return -EINVAL;
-                       spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
-                       spec.ether_type = mac_entry->h_proto;
-               }
-               break;
-
-       default:
-               return -EINVAL;
-       }
-
-       if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) {
-               if (rule->m_ext.vlan_tci != htons(0xfff))
-                       return -EINVAL;
-               spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID;
-               spec.outer_vid = rule->h_ext.vlan_tci;
-       }
-
-       rc = efx_filter_insert_filter(efx, &spec, true);
-       if (rc < 0)
-               return rc;
-
-       rule->location = rc;
-       return 0;
-}
-
-static int efx_ethtool_set_rxnfc(struct net_device *net_dev,
-                                struct ethtool_rxnfc *info)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-
-       if (efx_filter_get_rx_id_limit(efx) == 0)
-               return -EOPNOTSUPP;
-
-       switch (info->cmd) {
-       case ETHTOOL_SRXCLSRLINS:
-               return efx_ethtool_set_class_rule(efx, &info->fs,
-                                                 info->rss_context);
-
-       case ETHTOOL_SRXCLSRLDEL:
-               return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL,
-                                                info->fs.location);
-
-       default:
-               return -EOPNOTSUPP;
-       }
-}
-
-static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-
-       if (efx->n_rx_channels == 1)
-               return 0;
-       return ARRAY_SIZE(efx->rss_context.rx_indir_table);
-}
-
-static u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-
-       return efx->type->rx_hash_key_size;
-}
-
-static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
-                               u8 *hfunc)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-       int rc;
-
-       rc = efx->type->rx_pull_rss_config(efx);
-       if (rc)
-               return rc;
-
-       if (hfunc)
-               *hfunc = ETH_RSS_HASH_TOP;
-       if (indir)
-               memcpy(indir, efx->rss_context.rx_indir_table,
-                      sizeof(efx->rss_context.rx_indir_table));
-       if (key)
-               memcpy(key, efx->rss_context.rx_hash_key,
-                      efx->type->rx_hash_key_size);
-       return 0;
-}
-
-static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
-                               const u8 *key, const u8 hfunc)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-
-       /* Hash function is Toeplitz, cannot be changed */
-       if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
-               return -EOPNOTSUPP;
-       if (!indir && !key)
-               return 0;
-
-       if (!key)
-               key = efx->rss_context.rx_hash_key;
-       if (!indir)
-               indir = efx->rss_context.rx_indir_table;
-
-       return efx->type->rx_push_rss_config(efx, true, indir, key);
-}
-
-static int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
-                                       u8 *key, u8 *hfunc, u32 rss_context)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-       struct efx_rss_context *ctx;
-       int rc = 0;
-
-       if (!efx->type->rx_pull_rss_context_config)
-               return -EOPNOTSUPP;
-
-       mutex_lock(&efx->rss_lock);
-       ctx = efx_find_rss_context_entry(efx, rss_context);
-       if (!ctx) {
-               rc = -ENOENT;
-               goto out_unlock;
-       }
-       rc = efx->type->rx_pull_rss_context_config(efx, ctx);
-       if (rc)
-               goto out_unlock;
-
-       if (hfunc)
-               *hfunc = ETH_RSS_HASH_TOP;
-       if (indir)
-               memcpy(indir, ctx->rx_indir_table, sizeof(ctx->rx_indir_table));
-       if (key)
-               memcpy(key, ctx->rx_hash_key, efx->type->rx_hash_key_size);
-out_unlock:
-       mutex_unlock(&efx->rss_lock);
-       return rc;
-}
-
-static int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
-                                       const u32 *indir, const u8 *key,
-                                       const u8 hfunc, u32 *rss_context,
-                                       bool delete)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-       struct efx_rss_context *ctx;
-       bool allocated = false;
-       int rc;
-
-       if (!efx->type->rx_push_rss_context_config)
-               return -EOPNOTSUPP;
-       /* Hash function is Toeplitz, cannot be changed */
-       if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
-               return -EOPNOTSUPP;
-
-       mutex_lock(&efx->rss_lock);
-
-       if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
-               if (delete) {
-                       /* alloc + delete == Nothing to do */
-                       rc = -EINVAL;
-                       goto out_unlock;
-               }
-               ctx = efx_alloc_rss_context_entry(efx);
-               if (!ctx) {
-                       rc = -ENOMEM;
-                       goto out_unlock;
-               }
-               ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
-               /* Initialise indir table and key to defaults */
-               efx_set_default_rx_indir_table(efx, ctx);
-               netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key));
-               allocated = true;
-       } else {
-               ctx = efx_find_rss_context_entry(efx, *rss_context);
-               if (!ctx) {
-                       rc = -ENOENT;
-                       goto out_unlock;
-               }
-       }
-
-       if (delete) {
-               /* delete this context */
-               rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL);
-               if (!rc)
-                       efx_free_rss_context_entry(ctx);
-               goto out_unlock;
-       }
-
-       if (!key)
-               key = ctx->rx_hash_key;
-       if (!indir)
-               indir = ctx->rx_indir_table;
-
-       rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key);
-       if (rc && allocated)
-               efx_free_rss_context_entry(ctx);
-       else
-               *rss_context = ctx->user_id;
-out_unlock:
-       mutex_unlock(&efx->rss_lock);
-       return rc;
-}
-
 static int efx_ethtool_get_ts_info(struct net_device *net_dev,
                                   struct ethtool_ts_info *ts_info)
 {
@@ -1071,68 +221,7 @@ static int efx_ethtool_get_ts_info(struct net_device *net_dev,
        return 0;
 }
 
-static int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
-                                        struct ethtool_eeprom *ee,
-                                        u8 *data)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-       int ret;
-
-       if (!efx->phy_op || !efx->phy_op->get_module_eeprom)
-               return -EOPNOTSUPP;
-
-       mutex_lock(&efx->mac_lock);
-       ret = efx->phy_op->get_module_eeprom(efx, ee, data);
-       mutex_unlock(&efx->mac_lock);
-
-       return ret;
-}
-
-static int efx_ethtool_get_module_info(struct net_device *net_dev,
-                                      struct ethtool_modinfo *modinfo)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-       int ret;
-
-       if (!efx->phy_op || !efx->phy_op->get_module_info)
-               return -EOPNOTSUPP;
-
-       mutex_lock(&efx->mac_lock);
-       ret = efx->phy_op->get_module_info(efx, modinfo);
-       mutex_unlock(&efx->mac_lock);
-
-       return ret;
-}
-
-static int efx_ethtool_get_fecparam(struct net_device *net_dev,
-                                   struct ethtool_fecparam *fecparam)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-       int rc;
-
-       if (!efx->phy_op || !efx->phy_op->get_fecparam)
-               return -EOPNOTSUPP;
-       mutex_lock(&efx->mac_lock);
-       rc = efx->phy_op->get_fecparam(efx, fecparam);
-       mutex_unlock(&efx->mac_lock);
-
-       return rc;
-}
-
-static int efx_ethtool_set_fecparam(struct net_device *net_dev,
-                                   struct ethtool_fecparam *fecparam)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-       int rc;
-
-       if (!efx->phy_op || !efx->phy_op->get_fecparam)
-               return -EOPNOTSUPP;
-       mutex_lock(&efx->mac_lock);
-       rc = efx->phy_op->set_fecparam(efx, fecparam);
-       mutex_unlock(&efx->mac_lock);
-
-       return rc;
-}
+const char *efx_driver_name = KBUILD_MODNAME;
 
 const struct ethtool_ops efx_ethtool_ops = {
        .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
index b8d281a..e9a5a66 100644 (file)
@@ -13,6 +13,7 @@
 #include "mcdi.h"
 #include "nic.h"
 #include "selftest.h"
+#include "rx_common.h"
 #include "ethtool_common.h"
 
 struct efx_sw_stat_desc {
@@ -103,7 +104,7 @@ void efx_ethtool_get_drvinfo(struct net_device *net_dev,
 {
        struct efx_nic *efx = netdev_priv(net_dev);
 
-       strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+       strlcpy(info->driver, efx_driver_name, sizeof(info->driver));
        strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
        efx_mcdi_print_fwver(efx, info->fw_version,
                             sizeof(info->fw_version));
@@ -124,6 +125,62 @@ void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
        efx->msg_enable = msg_enable;
 }
 
+void efx_ethtool_self_test(struct net_device *net_dev,
+                          struct ethtool_test *test, u64 *data)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+       struct efx_self_tests *efx_tests;
+       bool already_up;
+       int rc = -ENOMEM;
+
+       efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
+       if (!efx_tests)
+               goto fail;
+
+       if (efx->state != STATE_READY) {
+               rc = -EBUSY;
+               goto out;
+       }
+
+       netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
+                  (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
+
+       /* We need rx buffers and interrupts. */
+       already_up = (efx->net_dev->flags & IFF_UP);
+       if (!already_up) {
+               rc = dev_open(efx->net_dev, NULL);
+               if (rc) {
+                       netif_err(efx, drv, efx->net_dev,
+                                 "failed opening device.\n");
+                       goto out;
+               }
+       }
+
+       rc = efx_selftest(efx, efx_tests, test->flags);
+
+       if (!already_up)
+               dev_close(efx->net_dev);
+
+       netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n",
+                  rc == 0 ? "passed" : "failed",
+                  (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
+
+out:
+       efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
+       kfree(efx_tests);
+fail:
+       if (rc)
+               test->flags |= ETH_TEST_FL_FAILED;
+}
+
+/* Restart autonegotiation */
+int efx_ethtool_nway_reset(struct net_device *net_dev)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+
+       return mdio45_nway_restart(&efx->mdio);
+}
+
 void efx_ethtool_get_pauseparam(struct net_device *net_dev,
                                struct ethtool_pauseparam *pause)
 {
@@ -134,6 +191,64 @@ void efx_ethtool_get_pauseparam(struct net_device *net_dev,
        pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
 }
 
+int efx_ethtool_set_pauseparam(struct net_device *net_dev,
+                              struct ethtool_pauseparam *pause)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+       u8 wanted_fc, old_fc;
+       u32 old_adv;
+       int rc = 0;
+
+       mutex_lock(&efx->mac_lock);
+
+       wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) |
+                    (pause->tx_pause ? EFX_FC_TX : 0) |
+                    (pause->autoneg ? EFX_FC_AUTO : 0));
+
+       if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) {
+               netif_dbg(efx, drv, efx->net_dev,
+                         "Flow control unsupported: tx ON rx OFF\n");
+               rc = -EINVAL;
+               goto out;
+       }
+
+       if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising[0]) {
+               netif_dbg(efx, drv, efx->net_dev,
+                         "Autonegotiation is disabled\n");
+               rc = -EINVAL;
+               goto out;
+       }
+
+       /* Hook for Falcon bug 11482 workaround */
+       if (efx->type->prepare_enable_fc_tx &&
+           (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX))
+               efx->type->prepare_enable_fc_tx(efx);
+
+       old_adv = efx->link_advertising[0];
+       old_fc = efx->wanted_fc;
+       efx_link_set_wanted_fc(efx, wanted_fc);
+       if (efx->link_advertising[0] != old_adv ||
+           (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
+               rc = efx->phy_op->reconfigure(efx);
+               if (rc) {
+                       netif_err(efx, drv, efx->net_dev,
+                                 "Unable to advertise requested flow "
+                                 "control setting\n");
+                       goto out;
+               }
+       }
+
+       /* Reconfigure the MAC. The PHY *may* generate a link state change event
+        * if the user just changed the advertised capabilities, but there's no
+        * harm doing this twice */
+       efx_mac_reconfigure(efx, false);
+
+out:
+       mutex_unlock(&efx->mac_lock);
+
+       return rc;
+}
+
 /**
  * efx_fill_test - fill in an individual self-test entry
  * @test_index:                Index of the test
@@ -172,8 +287,7 @@ static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
 }
 
 #define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel
-#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
-#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
+#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->label
 #define EFX_LOOPBACK_NAME(_mode, _counter)                     \
        "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
 
@@ -201,11 +315,11 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
 
        efx_for_each_channel_tx_queue(tx_queue, channel) {
                efx_fill_test(test_index++, strings, data,
-                             &lb_tests->tx_sent[tx_queue->queue],
+                             &lb_tests->tx_sent[tx_queue->label],
                              EFX_TX_QUEUE_NAME(tx_queue),
                              EFX_LOOPBACK_NAME(mode, "tx_sent"));
                efx_fill_test(test_index++, strings, data,
-                             &lb_tests->tx_done[tx_queue->queue],
+                             &lb_tests->tx_done[tx_queue->label],
                              EFX_TX_QUEUE_NAME(tx_queue),
                              EFX_LOOPBACK_NAME(mode, "tx_done"));
        }
@@ -455,3 +569,799 @@ void efx_ethtool_get_stats(struct net_device *net_dev,
 
        efx_ptp_update_stats(efx, data);
 }
+
+/* This must be called with rtnl_lock held. */
+int efx_ethtool_get_link_ksettings(struct net_device *net_dev,
+                                  struct ethtool_link_ksettings *cmd)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+       struct efx_link_state *link_state = &efx->link_state;
+       u32 supported;
+
+       mutex_lock(&efx->mac_lock);
+       efx->phy_op->get_link_ksettings(efx, cmd);
+       mutex_unlock(&efx->mac_lock);
+
+       /* Both MACs support pause frames (bidirectional and respond-only) */
+       ethtool_convert_link_mode_to_legacy_u32(&supported,
+                                               cmd->link_modes.supported);
+
+       supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+
+       ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+                                               supported);
+
+       if (LOOPBACK_INTERNAL(efx)) {
+               cmd->base.speed = link_state->speed;
+               cmd->base.duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
+       }
+
+       return 0;
+}
+
+/* This must be called with rtnl_lock held. */
+int efx_ethtool_set_link_ksettings(struct net_device *net_dev,
+                                  const struct ethtool_link_ksettings *cmd)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+       int rc;
+
+       /* GMAC does not support 1000Mbps HD */
+       if ((cmd->base.speed == SPEED_1000) &&
+           (cmd->base.duplex != DUPLEX_FULL)) {
+               netif_dbg(efx, drv, efx->net_dev,
+                         "rejecting unsupported 1000Mbps HD setting\n");
+               return -EINVAL;
+       }
+
+       mutex_lock(&efx->mac_lock);
+       rc = efx->phy_op->set_link_ksettings(efx, cmd);
+       mutex_unlock(&efx->mac_lock);
+       return rc;
+}
+
+int efx_ethtool_get_fecparam(struct net_device *net_dev,
+                            struct ethtool_fecparam *fecparam)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+       int rc;
+
+       if (!efx->phy_op || !efx->phy_op->get_fecparam)
+               return -EOPNOTSUPP;
+       mutex_lock(&efx->mac_lock);
+       rc = efx->phy_op->get_fecparam(efx, fecparam);
+       mutex_unlock(&efx->mac_lock);
+
+       return rc;
+}
+
+int efx_ethtool_set_fecparam(struct net_device *net_dev,
+                            struct ethtool_fecparam *fecparam)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+       int rc;
+
+       if (!efx->phy_op || !efx->phy_op->get_fecparam)
+               return -EOPNOTSUPP;
+       mutex_lock(&efx->mac_lock);
+       rc = efx->phy_op->set_fecparam(efx, fecparam);
+       mutex_unlock(&efx->mac_lock);
+
+       return rc;
+}
+
+/* MAC address mask including only I/G bit */
+static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0};
+
+#define IP4_ADDR_FULL_MASK     ((__force __be32)~0)
+#define IP_PROTO_FULL_MASK     0xFF
+#define PORT_FULL_MASK         ((__force __be16)~0)
+#define ETHER_TYPE_FULL_MASK   ((__force __be16)~0)
+
+static inline void ip6_fill_mask(__be32 *mask)
+{
+       mask[0] = mask[1] = mask[2] = mask[3] = ~(__be32)0;
+}
+
+static int efx_ethtool_get_class_rule(struct efx_nic *efx,
+                                     struct ethtool_rx_flow_spec *rule,
+                                     u32 *rss_context)
+{
+       struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
+       struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
+       struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
+       struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
+       struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
+       struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
+       struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
+       struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
+       struct ethhdr *mac_entry = &rule->h_u.ether_spec;
+       struct ethhdr *mac_mask = &rule->m_u.ether_spec;
+       struct efx_filter_spec spec;
+       int rc;
+
+       rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
+                                       rule->location, &spec);
+       if (rc)
+               return rc;
+
+       if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
+               rule->ring_cookie = RX_CLS_FLOW_DISC;
+       else
+               rule->ring_cookie = spec.dmaq_id;
+
+       if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
+           spec.ether_type == htons(ETH_P_IP) &&
+           (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
+           (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
+           !(spec.match_flags &
+             ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
+               EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
+               EFX_FILTER_MATCH_IP_PROTO |
+               EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
+               rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
+                                  TCP_V4_FLOW : UDP_V4_FLOW);
+               if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
+                       ip_entry->ip4dst = spec.loc_host[0];
+                       ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
+               }
+               if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
+                       ip_entry->ip4src = spec.rem_host[0];
+                       ip_mask->ip4src = IP4_ADDR_FULL_MASK;
+               }
+               if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
+                       ip_entry->pdst = spec.loc_port;
+                       ip_mask->pdst = PORT_FULL_MASK;
+               }
+               if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
+                       ip_entry->psrc = spec.rem_port;
+                       ip_mask->psrc = PORT_FULL_MASK;
+               }
+       } else if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
+           spec.ether_type == htons(ETH_P_IPV6) &&
+           (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
+           (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
+           !(spec.match_flags &
+             ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
+               EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
+               EFX_FILTER_MATCH_IP_PROTO |
+               EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
+               rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
+                                  TCP_V6_FLOW : UDP_V6_FLOW);
+               if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
+                       memcpy(ip6_entry->ip6dst, spec.loc_host,
+                              sizeof(ip6_entry->ip6dst));
+                       ip6_fill_mask(ip6_mask->ip6dst);
+               }
+               if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
+                       memcpy(ip6_entry->ip6src, spec.rem_host,
+                              sizeof(ip6_entry->ip6src));
+                       ip6_fill_mask(ip6_mask->ip6src);
+               }
+               if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
+                       ip6_entry->pdst = spec.loc_port;
+                       ip6_mask->pdst = PORT_FULL_MASK;
+               }
+               if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
+                       ip6_entry->psrc = spec.rem_port;
+                       ip6_mask->psrc = PORT_FULL_MASK;
+               }
+       } else if (!(spec.match_flags &
+                    ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG |
+                      EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE |
+                      EFX_FILTER_MATCH_OUTER_VID))) {
+               rule->flow_type = ETHER_FLOW;
+               if (spec.match_flags &
+                   (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) {
+                       ether_addr_copy(mac_entry->h_dest, spec.loc_mac);
+                       if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC)
+                               eth_broadcast_addr(mac_mask->h_dest);
+                       else
+                               ether_addr_copy(mac_mask->h_dest,
+                                               mac_addr_ig_mask);
+               }
+               if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) {
+                       ether_addr_copy(mac_entry->h_source, spec.rem_mac);
+                       eth_broadcast_addr(mac_mask->h_source);
+               }
+               if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
+                       mac_entry->h_proto = spec.ether_type;
+                       mac_mask->h_proto = ETHER_TYPE_FULL_MASK;
+               }
+       } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
+                  spec.ether_type == htons(ETH_P_IP) &&
+                  !(spec.match_flags &
+                    ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
+                      EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
+                      EFX_FILTER_MATCH_IP_PROTO))) {
+               rule->flow_type = IPV4_USER_FLOW;
+               uip_entry->ip_ver = ETH_RX_NFC_IP4;
+               if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) {
+                       uip_mask->proto = IP_PROTO_FULL_MASK;
+                       uip_entry->proto = spec.ip_proto;
+               }
+               if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
+                       uip_entry->ip4dst = spec.loc_host[0];
+                       uip_mask->ip4dst = IP4_ADDR_FULL_MASK;
+               }
+               if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
+                       uip_entry->ip4src = spec.rem_host[0];
+                       uip_mask->ip4src = IP4_ADDR_FULL_MASK;
+               }
+       } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
+                  spec.ether_type == htons(ETH_P_IPV6) &&
+                  !(spec.match_flags &
+                    ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
+                      EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
+                      EFX_FILTER_MATCH_IP_PROTO))) {
+               rule->flow_type = IPV6_USER_FLOW;
+               if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) {
+                       uip6_mask->l4_proto = IP_PROTO_FULL_MASK;
+                       uip6_entry->l4_proto = spec.ip_proto;
+               }
+               if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
+                       memcpy(uip6_entry->ip6dst, spec.loc_host,
+                              sizeof(uip6_entry->ip6dst));
+                       ip6_fill_mask(uip6_mask->ip6dst);
+               }
+               if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
+                       memcpy(uip6_entry->ip6src, spec.rem_host,
+                              sizeof(uip6_entry->ip6src));
+                       ip6_fill_mask(uip6_mask->ip6src);
+               }
+       } else {
+               /* The above should handle all filters that we insert */
+               WARN_ON(1);
+               return -EINVAL;
+       }
+
+       if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) {
+               rule->flow_type |= FLOW_EXT;
+               rule->h_ext.vlan_tci = spec.outer_vid;
+               rule->m_ext.vlan_tci = htons(0xfff);
+       }
+
+       if (spec.flags & EFX_FILTER_FLAG_RX_RSS) {
+               rule->flow_type |= FLOW_RSS;
+               *rss_context = spec.rss_context;
+       }
+
+       return rc;
+}
+
+int efx_ethtool_get_rxnfc(struct net_device *net_dev,
+                         struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+       u32 rss_context = 0;
+       s32 rc = 0;
+
+       switch (info->cmd) {
+       case ETHTOOL_GRXRINGS:
+               info->data = efx->n_rx_channels;
+               return 0;
+
+       case ETHTOOL_GRXFH: {
+               struct efx_rss_context *ctx = &efx->rss_context;
+               __u64 data;
+
+               mutex_lock(&efx->rss_lock);
+               if (info->flow_type & FLOW_RSS && info->rss_context) {
+                       ctx = efx_find_rss_context_entry(efx, info->rss_context);
+                       if (!ctx) {
+                               rc = -ENOENT;
+                               goto out_unlock;
+                       }
+               }
+
+               data = 0;
+               if (!efx_rss_active(ctx)) /* No RSS */
+                       goto out_setdata_unlock;
+
+               switch (info->flow_type & ~FLOW_RSS) {
+               case UDP_V4_FLOW:
+               case UDP_V6_FLOW:
+                       if (ctx->rx_hash_udp_4tuple)
+                               data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+                                       RXH_IP_SRC | RXH_IP_DST);
+                       else
+                               data = RXH_IP_SRC | RXH_IP_DST;
+                       break;
+               case TCP_V4_FLOW:
+               case TCP_V6_FLOW:
+                       data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+                               RXH_IP_SRC | RXH_IP_DST);
+                       break;
+               case SCTP_V4_FLOW:
+               case SCTP_V6_FLOW:
+               case AH_ESP_V4_FLOW:
+               case AH_ESP_V6_FLOW:
+               case IPV4_FLOW:
+               case IPV6_FLOW:
+                       data = RXH_IP_SRC | RXH_IP_DST;
+                       break;
+               default:
+                       break;
+               }
+out_setdata_unlock:
+               info->data = data;
+out_unlock:
+               mutex_unlock(&efx->rss_lock);
+               return rc;
+       }
+
+       case ETHTOOL_GRXCLSRLCNT:
+               info->data = efx_filter_get_rx_id_limit(efx);
+               if (info->data == 0)
+                       return -EOPNOTSUPP;
+               info->data |= RX_CLS_LOC_SPECIAL;
+               info->rule_cnt =
+                       efx_filter_count_rx_used(efx, EFX_FILTER_PRI_MANUAL);
+               return 0;
+
+       case ETHTOOL_GRXCLSRULE:
+               if (efx_filter_get_rx_id_limit(efx) == 0)
+                       return -EOPNOTSUPP;
+               rc = efx_ethtool_get_class_rule(efx, &info->fs, &rss_context);
+               if (rc < 0)
+                       return rc;
+               if (info->fs.flow_type & FLOW_RSS)
+                       info->rss_context = rss_context;
+               return 0;
+
+       case ETHTOOL_GRXCLSRLALL:
+               info->data = efx_filter_get_rx_id_limit(efx);
+               if (info->data == 0)
+                       return -EOPNOTSUPP;
+               rc = efx_filter_get_rx_ids(efx, EFX_FILTER_PRI_MANUAL,
+                                          rule_locs, info->rule_cnt);
+               if (rc < 0)
+                       return rc;
+               info->rule_cnt = rc;
+               return 0;
+
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static inline bool ip6_mask_is_full(__be32 mask[4])
+{
+       return !~(mask[0] & mask[1] & mask[2] & mask[3]);
+}
+
+static inline bool ip6_mask_is_empty(__be32 mask[4])
+{
+       return !(mask[0] | mask[1] | mask[2] | mask[3]);
+}
+
+static int efx_ethtool_set_class_rule(struct efx_nic *efx,
+                                     struct ethtool_rx_flow_spec *rule,
+                                     u32 rss_context)
+{
+       struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
+       struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
+       struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
+       struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
+       struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
+       struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
+       struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
+       struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
+       u32 flow_type = rule->flow_type & ~(FLOW_EXT | FLOW_RSS);
+       struct ethhdr *mac_entry = &rule->h_u.ether_spec;
+       struct ethhdr *mac_mask = &rule->m_u.ether_spec;
+       enum efx_filter_flags flags = 0;
+       struct efx_filter_spec spec;
+       int rc;
+
+       /* Check that user wants us to choose the location */
+       if (rule->location != RX_CLS_LOC_ANY)
+               return -EINVAL;
+
+       /* Range-check ring_cookie */
+       if (rule->ring_cookie >= efx->n_rx_channels &&
+           rule->ring_cookie != RX_CLS_FLOW_DISC)
+               return -EINVAL;
+
+       /* Check for unsupported extensions */
+       if ((rule->flow_type & FLOW_EXT) &&
+           (rule->m_ext.vlan_etype || rule->m_ext.data[0] ||
+            rule->m_ext.data[1]))
+               return -EINVAL;
+
+       if (efx->rx_scatter)
+               flags |= EFX_FILTER_FLAG_RX_SCATTER;
+       if (rule->flow_type & FLOW_RSS)
+               flags |= EFX_FILTER_FLAG_RX_RSS;
+
+       efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, flags,
+                          (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
+                          EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie);
+
+       if (rule->flow_type & FLOW_RSS)
+               spec.rss_context = rss_context;
+
+       switch (flow_type) {
+       case TCP_V4_FLOW:
+       case UDP_V4_FLOW:
+               spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
+                                   EFX_FILTER_MATCH_IP_PROTO);
+               spec.ether_type = htons(ETH_P_IP);
+               spec.ip_proto = flow_type == TCP_V4_FLOW ? IPPROTO_TCP
+                                                        : IPPROTO_UDP;
+               if (ip_mask->ip4dst) {
+                       if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK)
+                               return -EINVAL;
+                       spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+                       spec.loc_host[0] = ip_entry->ip4dst;
+               }
+               if (ip_mask->ip4src) {
+                       if (ip_mask->ip4src != IP4_ADDR_FULL_MASK)
+                               return -EINVAL;
+                       spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
+                       spec.rem_host[0] = ip_entry->ip4src;
+               }
+               if (ip_mask->pdst) {
+                       if (ip_mask->pdst != PORT_FULL_MASK)
+                               return -EINVAL;
+                       spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
+                       spec.loc_port = ip_entry->pdst;
+               }
+               if (ip_mask->psrc) {
+                       if (ip_mask->psrc != PORT_FULL_MASK)
+                               return -EINVAL;
+                       spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
+                       spec.rem_port = ip_entry->psrc;
+               }
+               if (ip_mask->tos)
+                       return -EINVAL;
+               break;
+
+       case TCP_V6_FLOW:
+       case UDP_V6_FLOW:
+               spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
+                                   EFX_FILTER_MATCH_IP_PROTO);
+               spec.ether_type = htons(ETH_P_IPV6);
+               spec.ip_proto = flow_type == TCP_V6_FLOW ? IPPROTO_TCP
+                                                        : IPPROTO_UDP;
+               if (!ip6_mask_is_empty(ip6_mask->ip6dst)) {
+                       if (!ip6_mask_is_full(ip6_mask->ip6dst))
+                               return -EINVAL;
+                       spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+                       memcpy(spec.loc_host, ip6_entry->ip6dst, sizeof(spec.loc_host));
+               }
+               if (!ip6_mask_is_empty(ip6_mask->ip6src)) {
+                       if (!ip6_mask_is_full(ip6_mask->ip6src))
+                               return -EINVAL;
+                       spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
+                       memcpy(spec.rem_host, ip6_entry->ip6src, sizeof(spec.rem_host));
+               }
+               if (ip6_mask->pdst) {
+                       if (ip6_mask->pdst != PORT_FULL_MASK)
+                               return -EINVAL;
+                       spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
+                       spec.loc_port = ip6_entry->pdst;
+               }
+               if (ip6_mask->psrc) {
+                       if (ip6_mask->psrc != PORT_FULL_MASK)
+                               return -EINVAL;
+                       spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
+                       spec.rem_port = ip6_entry->psrc;
+               }
+               if (ip6_mask->tclass)
+                       return -EINVAL;
+               break;
+
+       case IPV4_USER_FLOW:
+               if (uip_mask->l4_4_bytes || uip_mask->tos || uip_mask->ip_ver ||
+                   uip_entry->ip_ver != ETH_RX_NFC_IP4)
+                       return -EINVAL;
+               spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE;
+               spec.ether_type = htons(ETH_P_IP);
+               if (uip_mask->ip4dst) {
+                       if (uip_mask->ip4dst != IP4_ADDR_FULL_MASK)
+                               return -EINVAL;
+                       spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+                       spec.loc_host[0] = uip_entry->ip4dst;
+               }
+               if (uip_mask->ip4src) {
+                       if (uip_mask->ip4src != IP4_ADDR_FULL_MASK)
+                               return -EINVAL;
+                       spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
+                       spec.rem_host[0] = uip_entry->ip4src;
+               }
+               if (uip_mask->proto) {
+                       if (uip_mask->proto != IP_PROTO_FULL_MASK)
+                               return -EINVAL;
+                       spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+                       spec.ip_proto = uip_entry->proto;
+               }
+               break;
+
+       case IPV6_USER_FLOW:
+               if (uip6_mask->l4_4_bytes || uip6_mask->tclass)
+                       return -EINVAL;
+               spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE;
+               spec.ether_type = htons(ETH_P_IPV6);
+               if (!ip6_mask_is_empty(uip6_mask->ip6dst)) {
+                       if (!ip6_mask_is_full(uip6_mask->ip6dst))
+                               return -EINVAL;
+                       spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+                       memcpy(spec.loc_host, uip6_entry->ip6dst, sizeof(spec.loc_host));
+               }
+               if (!ip6_mask_is_empty(uip6_mask->ip6src)) {
+                       if (!ip6_mask_is_full(uip6_mask->ip6src))
+                               return -EINVAL;
+                       spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
+                       memcpy(spec.rem_host, uip6_entry->ip6src, sizeof(spec.rem_host));
+               }
+               if (uip6_mask->l4_proto) {
+                       if (uip6_mask->l4_proto != IP_PROTO_FULL_MASK)
+                               return -EINVAL;
+                       spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+                       spec.ip_proto = uip6_entry->l4_proto;
+               }
+               break;
+
+       case ETHER_FLOW:
+               if (!is_zero_ether_addr(mac_mask->h_dest)) {
+                       if (ether_addr_equal(mac_mask->h_dest,
+                                            mac_addr_ig_mask))
+                               spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
+                       else if (is_broadcast_ether_addr(mac_mask->h_dest))
+                               spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC;
+                       else
+                               return -EINVAL;
+                       ether_addr_copy(spec.loc_mac, mac_entry->h_dest);
+               }
+               if (!is_zero_ether_addr(mac_mask->h_source)) {
+                       if (!is_broadcast_ether_addr(mac_mask->h_source))
+                               return -EINVAL;
+                       spec.match_flags |= EFX_FILTER_MATCH_REM_MAC;
+                       ether_addr_copy(spec.rem_mac, mac_entry->h_source);
+               }
+               if (mac_mask->h_proto) {
+                       if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
+                               return -EINVAL;
+                       spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+                       spec.ether_type = mac_entry->h_proto;
+               }
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) {
+               if (rule->m_ext.vlan_tci != htons(0xfff))
+                       return -EINVAL;
+               spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+               spec.outer_vid = rule->h_ext.vlan_tci;
+       }
+
+       rc = efx_filter_insert_filter(efx, &spec, true);
+       if (rc < 0)
+               return rc;
+
+       rule->location = rc;
+       return 0;
+}
+
+int efx_ethtool_set_rxnfc(struct net_device *net_dev,
+                         struct ethtool_rxnfc *info)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+
+       if (efx_filter_get_rx_id_limit(efx) == 0)
+               return -EOPNOTSUPP;
+
+       switch (info->cmd) {
+       case ETHTOOL_SRXCLSRLINS:
+               return efx_ethtool_set_class_rule(efx, &info->fs,
+                                                 info->rss_context);
+
+       case ETHTOOL_SRXCLSRLDEL:
+               return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL,
+                                                info->fs.location);
+
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+
+       if (efx->n_rx_channels == 1)
+               return 0;
+       return ARRAY_SIZE(efx->rss_context.rx_indir_table);
+}
+
+u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+
+       return efx->type->rx_hash_key_size;
+}
+
+int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
+                        u8 *hfunc)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+       int rc;
+
+       rc = efx->type->rx_pull_rss_config(efx);
+       if (rc)
+               return rc;
+
+       if (hfunc)
+               *hfunc = ETH_RSS_HASH_TOP;
+       if (indir)
+               memcpy(indir, efx->rss_context.rx_indir_table,
+                      sizeof(efx->rss_context.rx_indir_table));
+       if (key)
+               memcpy(key, efx->rss_context.rx_hash_key,
+                      efx->type->rx_hash_key_size);
+       return 0;
+}
+
+int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
+                        const u8 *key, const u8 hfunc)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+
+       /* Hash function is Toeplitz, cannot be changed */
+       if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+               return -EOPNOTSUPP;
+       if (!indir && !key)
+               return 0;
+
+       if (!key)
+               key = efx->rss_context.rx_hash_key;
+       if (!indir)
+               indir = efx->rss_context.rx_indir_table;
+
+       return efx->type->rx_push_rss_config(efx, true, indir, key);
+}
+
+int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
+                                u8 *key, u8 *hfunc, u32 rss_context)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+       struct efx_rss_context *ctx;
+       int rc = 0;
+
+       if (!efx->type->rx_pull_rss_context_config)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&efx->rss_lock);
+       ctx = efx_find_rss_context_entry(efx, rss_context);
+       if (!ctx) {
+               rc = -ENOENT;
+               goto out_unlock;
+       }
+       rc = efx->type->rx_pull_rss_context_config(efx, ctx);
+       if (rc)
+               goto out_unlock;
+
+       if (hfunc)
+               *hfunc = ETH_RSS_HASH_TOP;
+       if (indir)
+               memcpy(indir, ctx->rx_indir_table, sizeof(ctx->rx_indir_table));
+       if (key)
+               memcpy(key, ctx->rx_hash_key, efx->type->rx_hash_key_size);
+out_unlock:
+       mutex_unlock(&efx->rss_lock);
+       return rc;
+}
+
+int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
+                                const u32 *indir, const u8 *key,
+                                const u8 hfunc, u32 *rss_context,
+                                bool delete)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+       struct efx_rss_context *ctx;
+       bool allocated = false;
+       int rc;
+
+       if (!efx->type->rx_push_rss_context_config)
+               return -EOPNOTSUPP;
+       /* Hash function is Toeplitz, cannot be changed */
+       if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&efx->rss_lock);
+
+       if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
+               if (delete) {
+                       /* alloc + delete == Nothing to do */
+                       rc = -EINVAL;
+                       goto out_unlock;
+               }
+               ctx = efx_alloc_rss_context_entry(efx);
+               if (!ctx) {
+                       rc = -ENOMEM;
+                       goto out_unlock;
+               }
+               ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+               /* Initialise indir table and key to defaults */
+               efx_set_default_rx_indir_table(efx, ctx);
+               netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key));
+               allocated = true;
+       } else {
+               ctx = efx_find_rss_context_entry(efx, *rss_context);
+               if (!ctx) {
+                       rc = -ENOENT;
+                       goto out_unlock;
+               }
+       }
+
+       if (delete) {
+               /* delete this context */
+               rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL);
+               if (!rc)
+                       efx_free_rss_context_entry(ctx);
+               goto out_unlock;
+       }
+
+       if (!key)
+               key = ctx->rx_hash_key;
+       if (!indir)
+               indir = ctx->rx_indir_table;
+
+       rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key);
+       if (rc && allocated)
+               efx_free_rss_context_entry(ctx);
+       else
+               *rss_context = ctx->user_id;
+out_unlock:
+       mutex_unlock(&efx->rss_lock);
+       return rc;
+}
+
+int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+       int rc;
+
+       rc = efx->type->map_reset_flags(flags);
+       if (rc < 0)
+               return rc;
+
+       return efx_reset(efx, rc);
+}
+
+int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
+                                 struct ethtool_eeprom *ee,
+                                 u8 *data)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+       int ret;
+
+       if (!efx->phy_op || !efx->phy_op->get_module_eeprom)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&efx->mac_lock);
+       ret = efx->phy_op->get_module_eeprom(efx, ee, data);
+       mutex_unlock(&efx->mac_lock);
+
+       return ret;
+}
+
+int efx_ethtool_get_module_info(struct net_device *net_dev,
+                               struct ethtool_modinfo *modinfo)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+       int ret;
+
+       if (!efx->phy_op || !efx->phy_op->get_module_info)
+               return -EOPNOTSUPP;
+
+       mutex_lock(&efx->mac_lock);
+       ret = efx->phy_op->get_module_info(efx, modinfo);
+       mutex_unlock(&efx->mac_lock);
+
+       return ret;
+}
index fa62431..3f3aaa9 100644 (file)
 #ifndef EFX_ETHTOOL_COMMON_H
 #define EFX_ETHTOOL_COMMON_H
 
+extern const char *efx_driver_name;
+
 void efx_ethtool_get_drvinfo(struct net_device *net_dev,
                             struct ethtool_drvinfo *info);
 u32 efx_ethtool_get_msglevel(struct net_device *net_dev);
 void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable);
+void efx_ethtool_self_test(struct net_device *net_dev,
+                          struct ethtool_test *test, u64 *data);
+int efx_ethtool_nway_reset(struct net_device *net_dev);
 void efx_ethtool_get_pauseparam(struct net_device *net_dev,
                                struct ethtool_pauseparam *pause);
+int efx_ethtool_set_pauseparam(struct net_device *net_dev,
+                              struct ethtool_pauseparam *pause);
 int efx_ethtool_fill_self_tests(struct efx_nic *efx,
                                struct efx_self_tests *tests,
                                u8 *strings, u64 *data);
@@ -26,5 +33,34 @@ void efx_ethtool_get_strings(struct net_device *net_dev, u32 string_set,
 void efx_ethtool_get_stats(struct net_device *net_dev,
                           struct ethtool_stats *stats __attribute__ ((unused)),
                           u64 *data);
-
+int efx_ethtool_get_link_ksettings(struct net_device *net_dev,
+                                  struct ethtool_link_ksettings *out);
+int efx_ethtool_set_link_ksettings(struct net_device *net_dev,
+                                  const struct ethtool_link_ksettings *settings);
+int efx_ethtool_get_fecparam(struct net_device *net_dev,
+                            struct ethtool_fecparam *fecparam);
+int efx_ethtool_set_fecparam(struct net_device *net_dev,
+                            struct ethtool_fecparam *fecparam);
+int efx_ethtool_get_rxnfc(struct net_device *net_dev,
+                         struct ethtool_rxnfc *info, u32 *rule_locs);
+int efx_ethtool_set_rxnfc(struct net_device *net_dev,
+                         struct ethtool_rxnfc *info);
+u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev);
+u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev);
+int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
+                        u8 *hfunc);
+int efx_ethtool_set_rxfh(struct net_device *net_dev,
+                        const u32 *indir, const u8 *key, const u8 hfunc);
+int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
+                                u8 *key, u8 *hfunc, u32 rss_context);
+int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
+                                const u32 *indir, const u8 *key,
+                                const u8 hfunc, u32 *rss_context,
+                                bool delete);
+int efx_ethtool_reset(struct net_device *net_dev, u32 *flags);
+int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
+                                 struct ethtool_eeprom *ee,
+                                 u8 *data);
+int efx_ethtool_get_module_info(struct net_device *net_dev,
+                               struct ethtool_modinfo *modinfo);
 #endif
index dbbb898..d07eeaa 100644 (file)
@@ -379,7 +379,7 @@ int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
 
 void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
 {
-       int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
+       int csum = tx_queue->label & EFX_TXQ_TYPE_OFFLOAD;
        struct efx_nic *efx = tx_queue->efx;
        efx_oword_t reg;
 
@@ -395,7 +395,7 @@ void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
                              FRF_AZ_TX_DESCQ_EVQ_ID,
                              tx_queue->channel->channel,
                              FRF_AZ_TX_DESCQ_OWNER_ID, 0,
-                             FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
+                             FRF_AZ_TX_DESCQ_LABEL, tx_queue->label,
                              FRF_AZ_TX_DESCQ_SIZE,
                              __ffs(tx_queue->txd.entries),
                              FRF_AZ_TX_DESCQ_TYPE, 0,
@@ -409,7 +409,7 @@ void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
 
        EFX_POPULATE_OWORD_1(reg,
                             FRF_BZ_TX_PACE,
-                            (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
+                            (tx_queue->label & EFX_TXQ_TYPE_HIGHPRI) ?
                             FFE_BZ_TX_PACE_OFF :
                             FFE_BZ_TX_PACE_RESERVED);
        efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL, tx_queue->queue);
index a8cc388..6c49740 100644 (file)
@@ -1299,6 +1299,14 @@ static void efx_mcdi_abandon(struct efx_nic *efx)
        efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT);
 }
 
+static void efx_handle_drain_event(struct efx_nic *efx)
+{
+       if (atomic_dec_and_test(&efx->active_queues))
+               wake_up(&efx->flush_wq);
+
+       WARN_ON(atomic_read(&efx->active_queues) < 0);
+}
+
 /* Called from efx_farch_ev_process and efx_ef10_ev_process for MCDI events */
 void efx_mcdi_process_event(struct efx_channel *channel,
                            efx_qword_t *event)
@@ -1371,7 +1379,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
                BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN !=
                             MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN);
                if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER))
-                       efx_ef10_handle_drain_event(efx);
+                       efx_handle_drain_event(efx);
                break;
        case MCDI_EVENT_CODE_TX_ERR:
        case MCDI_EVENT_CODE_RX_ERR:
@@ -1613,6 +1621,35 @@ fail:
        return rc;
 }
 
+/* This function finds types using the new NVRAM_PARTITIONS mcdi. */
+static int efx_new_mcdi_nvram_types(struct efx_nic *efx, u32 *number,
+                                   u32 *nvram_types)
+{
+       efx_dword_t *outbuf = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2,
+                                     GFP_KERNEL);
+       size_t outlen;
+       int rc;
+
+       if (!outbuf)
+               return -ENOMEM;
+
+       BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
+
+       rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
+                         outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2, &outlen);
+       if (rc)
+               goto fail;
+
+       *number = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
+
+       memcpy(nvram_types, MCDI_PTR(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID),
+              *number * sizeof(u32));
+
+fail:
+       kfree(outbuf);
+       return rc;
+}
+
 int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
                        size_t *size_out, size_t *erase_size_out,
                        bool *protected_out)
@@ -1666,6 +1703,39 @@ static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
        }
 }
 
+/* This function tests nvram partitions using the new mcdi partition lookup scheme */
+int efx_new_mcdi_nvram_test_all(struct efx_nic *efx)
+{
+       u32 *nvram_types = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2,
+                                  GFP_KERNEL);
+       unsigned int number;
+       int rc, i;
+
+       if (!nvram_types)
+               return -ENOMEM;
+
+       rc = efx_new_mcdi_nvram_types(efx, &number, nvram_types);
+       if (rc)
+               goto fail;
+
+       /* Require at least one check */
+       rc = -EAGAIN;
+
+       for (i = 0; i < number; i++) {
+               if (nvram_types[i] == NVRAM_PARTITION_TYPE_PARTITION_MAP ||
+                   nvram_types[i] == NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG)
+                       continue;
+
+               rc = efx_mcdi_nvram_test(efx, nvram_types[i]);
+               if (rc)
+                       goto fail;
+       }
+
+fail:
+       kfree(nvram_types);
+       return rc;
+}
+
 int efx_mcdi_nvram_test_all(struct efx_nic *efx)
 {
        u32 nvram_types;
index b107e4c..e053adf 100644 (file)
@@ -332,10 +332,9 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
 #define MCDI_CAPABILITY_OFST(field) \
        MC_CMD_GET_CAPABILITIES_V4_OUT_ ## field ## _OFST
 
-/* field is FLAGS1 or FLAGS2 */
-#define efx_has_cap(efx, flag, field) \
+#define efx_has_cap(efx, field) \
        efx->type->check_caps(efx, \
-                             MCDI_CAPABILITY(flag), \
+                             MCDI_CAPABILITY(field), \
                              MCDI_CAPABILITY_OFST(field))
 
 void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
@@ -346,6 +345,7 @@ int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
 int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
                        size_t *size_out, size_t *erase_size_out,
                        bool *protected_out);
+int efx_new_mcdi_nvram_test_all(struct efx_nic *efx);
 int efx_mcdi_nvram_test_all(struct efx_nic *efx);
 int efx_mcdi_handle_assertion(struct efx_nic *efx);
 void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
@@ -355,15 +355,11 @@ int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
 int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
 int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
 int efx_mcdi_flush_rxqs(struct efx_nic *efx);
-int efx_mcdi_port_probe(struct efx_nic *efx);
-void efx_mcdi_port_remove(struct efx_nic *efx);
 int efx_mcdi_port_reconfigure(struct efx_nic *efx);
-u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
 void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
 void efx_mcdi_mac_start_stats(struct efx_nic *efx);
 void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
 void efx_mcdi_mac_pull_stats(struct efx_nic *efx);
-bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
 enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
 int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
 int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
index 455a628..5a74d88 100644 (file)
@@ -1,3 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2018 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
 #include "mcdi_filters.h"
 #include "mcdi.h"
 #include "nic.h"
@@ -828,7 +839,7 @@ static int efx_mcdi_filter_insert_def(struct efx_nic *efx,
                efx_filter_set_uc_def(&spec);
 
        if (encap_type) {
-               if (efx_has_cap(efx, VXLAN_NVGRE, FLAGS1))
+               if (efx_has_cap(efx, VXLAN_NVGRE))
                        efx_filter_set_encap_type(&spec, encap_type);
                else
                        /*
@@ -1304,7 +1315,7 @@ int efx_mcdi_filter_table_probe(struct efx_nic *efx, bool multicast_chaining)
        rc = efx_mcdi_filter_table_probe_matches(efx, table, false);
        if (rc)
                goto fail;
-       if (efx_has_cap(efx, VXLAN_NVGRE, FLAGS1))
+       if (efx_has_cap(efx, VXLAN_NVGRE))
                rc = efx_mcdi_filter_table_probe_matches(efx, table, true);
        if (rc)
                goto fail;
@@ -1448,7 +1459,7 @@ not_restored:
                table->must_restore_filters = false;
 }
 
-void efx_mcdi_filter_table_remove(struct efx_nic *efx)
+void efx_mcdi_filter_table_down(struct efx_nic *efx)
 {
        struct efx_mcdi_filter_table *table = efx->filter_state;
        MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
@@ -1456,21 +1467,11 @@ void efx_mcdi_filter_table_remove(struct efx_nic *efx)
        unsigned int filter_idx;
        int rc;
 
-       efx_mcdi_filter_cleanup_vlans(efx);
-       efx->filter_state = NULL;
-       /*
-        * If we were called without locking, then it's not safe to free
-        * the table as others might be using it.  So we just WARN, leak
-        * the memory, and potentially get an inconsistent filter table
-        * state.
-        * This should never actually happen.
-        */
-       if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
-               return;
-
        if (!table)
                return;
 
+       efx_mcdi_filter_cleanup_vlans(efx);
+
        for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) {
                spec = efx_mcdi_filter_entry_spec(table, filter_idx);
                if (!spec)
@@ -1490,6 +1491,27 @@ void efx_mcdi_filter_table_remove(struct efx_nic *efx)
                                   __func__, filter_idx);
                kfree(spec);
        }
+}
+
+void efx_mcdi_filter_table_remove(struct efx_nic *efx)
+{
+       struct efx_mcdi_filter_table *table = efx->filter_state;
+
+       efx_mcdi_filter_table_down(efx);
+
+       efx->filter_state = NULL;
+       /*
+        * If we were called without locking, then it's not safe to free
+        * the table as others might be using it.  So we just WARN, leak
+        * the memory, and potentially get an inconsistent filter table
+        * state.
+        * This should never actually happen.
+        */
+       if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+               return;
+
+       if (!table)
+               return;
 
        vfree(table->entry);
        kfree(table);
@@ -1927,7 +1949,7 @@ static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive
                return 0;
        }
 
-       if (efx_has_cap(efx, RX_RSS_LIMITED, FLAGS1))
+       if (efx_has_cap(efx, RX_RSS_LIMITED))
                return -EOPNOTSUPP;
 
        MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
@@ -1948,7 +1970,7 @@ static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive
        if (context_size)
                *context_size = rss_spread;
 
-       if (efx_has_cap(efx, ADDITIONAL_RSS_MODES, FLAGS1))
+       if (efx_has_cap(efx, ADDITIONAL_RSS_MODES))
                efx_mcdi_set_rss_context_flags(efx, ctx);
 
        return 0;
@@ -2254,3 +2276,24 @@ int efx_mcdi_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
                return 0;
        return efx_mcdi_filter_rx_push_shared_rss_config(efx, NULL);
 }
+
+int efx_mcdi_push_default_indir_table(struct efx_nic *efx,
+                                     unsigned int rss_spread)
+{
+       int rc = 0;
+
+       if (efx->rss_spread == rss_spread)
+               return 0;
+
+       efx->rss_spread = rss_spread;
+       if (!efx->filter_state)
+               return 0;
+
+       efx_mcdi_rx_free_indir_table(efx);
+       if (rss_spread > 1) {
+               efx_set_default_rx_indir_table(efx, &efx->rss_context);
+               rc = efx->type->rx_push_rss_config(efx, false,
+                                  efx->rss_context.rx_indir_table, NULL);
+       }
+       return rc;
+}
index 03a8bf7..06426aa 100644 (file)
@@ -93,6 +93,7 @@ struct efx_mcdi_filter_table {
 };
 
 int efx_mcdi_filter_table_probe(struct efx_nic *efx, bool multicast_chaining);
+void efx_mcdi_filter_table_down(struct efx_nic *efx);
 void efx_mcdi_filter_table_remove(struct efx_nic *efx);
 void efx_mcdi_filter_table_restore(struct efx_nic *efx);
 
@@ -154,6 +155,8 @@ int efx_mcdi_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
                                   __attribute__ ((unused)),
                                   const u8 *key
                                   __attribute__ ((unused)));
+int efx_mcdi_push_default_indir_table(struct efx_nic *efx,
+                                     unsigned int rss_spread);
 int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx);
 int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx,
                                        struct efx_rss_context *ctx);
index 962d839..d8a3af8 100644 (file)
@@ -164,7 +164,7 @@ int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2)
 {
        MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
                                                       EFX_BUF_SIZE));
-       bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
+       bool csum_offload = tx_queue->label & EFX_TXQ_TYPE_OFFLOAD;
        size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
        struct efx_channel *channel = tx_queue->channel;
        struct efx_nic *efx = tx_queue->efx;
@@ -176,7 +176,7 @@ int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2)
 
        MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
        MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
-       MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
+       MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->label);
        MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
        MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
        MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, efx->vport_id);
@@ -267,20 +267,22 @@ int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue)
 
 void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
 {
-       MCDI_DECLARE_BUF(inbuf,
-                        MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
-                                               EFX_BUF_SIZE));
        struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
        size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_RXQ_V4_IN_LEN);
        struct efx_nic *efx = rx_queue->efx;
+       unsigned int buffer_size;
        dma_addr_t dma_addr;
-       size_t inlen;
        int rc;
        int i;
        BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
 
        rx_queue->scatter_n = 0;
        rx_queue->scatter_len = 0;
+       if (efx->type->revision == EFX_REV_EF100)
+               buffer_size = efx->rx_page_buf_step;
+       else
+               buffer_size = 0;
 
        MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
        MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
@@ -292,6 +294,7 @@ void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
                              INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
        MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
        MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, efx->vport_id);
+       MCDI_SET_DWORD(inbuf, INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES, buffer_size);
 
        dma_addr = rx_queue->rxd.buf.dma_addr;
 
@@ -303,9 +306,7 @@ void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
                dma_addr += EFX_BUF_SIZE;
        }
 
-       inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
-
-       rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
+       rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, sizeof(inbuf),
                          NULL, 0, NULL);
        if (rc)
                netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
@@ -341,6 +342,44 @@ fail:
                               outbuf, outlen, rc);
 }
 
+int efx_fini_dmaq(struct efx_nic *efx)
+{
+       struct efx_tx_queue *tx_queue;
+       struct efx_rx_queue *rx_queue;
+       struct efx_channel *channel;
+       int pending;
+
+       /* If the MC has just rebooted, the TX/RX queues will have already been
+        * torn down, but efx->active_queues needs to be set to zero.
+        */
+       if (efx->must_realloc_vis) {
+               atomic_set(&efx->active_queues, 0);
+               return 0;
+       }
+
+       /* Do not attempt to write to the NIC during EEH recovery */
+       if (efx->state != STATE_RECOVERY) {
+               efx_for_each_channel(channel, efx) {
+                       efx_for_each_channel_rx_queue(rx_queue, channel)
+                               efx_mcdi_rx_fini(rx_queue);
+                       efx_for_each_channel_tx_queue(tx_queue, channel)
+                               efx_mcdi_tx_fini(tx_queue);
+               }
+
+               wait_event_timeout(efx->flush_wq,
+                                  atomic_read(&efx->active_queues) == 0,
+                                  msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
+               pending = atomic_read(&efx->active_queues);
+               if (pending) {
+                       netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
+                                 pending);
+                       return -ETIMEDOUT;
+               }
+       }
+
+       return 0;
+}
+
 int efx_mcdi_window_mode_to_stride(struct efx_nic *efx, u8 vi_window_mode)
 {
        switch (vi_window_mode) {
index ca4a5ac..687be8b 100644 (file)
@@ -26,6 +26,7 @@ int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue);
 void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue);
 void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue);
 void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue);
+int efx_fini_dmaq(struct efx_nic *efx);
 int efx_mcdi_window_mode_to_stride(struct efx_nic *efx, u8 vi_window_mode);
 int efx_get_pf_index(struct efx_nic *efx, unsigned int *pf_index);
 
index 79d834a..d3fcbf9 100644 (file)
@@ -1,7 +1,8 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /****************************************************************************
  * Driver for Solarflare network controllers and boards
- * Copyright 2009-2013 Solarflare Communications Inc.
+ * Copyright 2009-2018 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
  */
 
 
 #define          MCDI_EVENT_LEVEL_FATAL 0x3
 #define       MCDI_EVENT_DATA_OFST 0
 #define       MCDI_EVENT_DATA_LEN 4
+#define        MCDI_EVENT_CMDDONE_SEQ_OFST 0
 #define        MCDI_EVENT_CMDDONE_SEQ_LBN 0
 #define        MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
+#define        MCDI_EVENT_CMDDONE_DATALEN_OFST 0
 #define        MCDI_EVENT_CMDDONE_DATALEN_LBN 8
 #define        MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8
+#define        MCDI_EVENT_CMDDONE_ERRNO_OFST 0
 #define        MCDI_EVENT_CMDDONE_ERRNO_LBN 16
 #define        MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8
+#define        MCDI_EVENT_LINKCHANGE_LP_CAP_OFST 0
 #define        MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0
 #define        MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
+#define        MCDI_EVENT_LINKCHANGE_SPEED_OFST 0
 #define        MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
 #define        MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
 /* enum: Link is down or link speed could not be determined */
 #define          MCDI_EVENT_LINKCHANGE_SPEED_50G 0x6
 /* enum: 100Gbs */
 #define          MCDI_EVENT_LINKCHANGE_SPEED_100G 0x7
+#define        MCDI_EVENT_LINKCHANGE_FCNTL_OFST 0
 #define        MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
 #define        MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
+#define        MCDI_EVENT_LINKCHANGE_LINK_FLAGS_OFST 0
 #define        MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
 #define        MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
+#define        MCDI_EVENT_SENSOREVT_MONITOR_OFST 0
 #define        MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
 #define        MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
+#define        MCDI_EVENT_SENSOREVT_STATE_OFST 0
 #define        MCDI_EVENT_SENSOREVT_STATE_LBN 8
 #define        MCDI_EVENT_SENSOREVT_STATE_WIDTH 8
+#define        MCDI_EVENT_SENSOREVT_VALUE_OFST 0
 #define        MCDI_EVENT_SENSOREVT_VALUE_LBN 16
 #define        MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16
+#define        MCDI_EVENT_FWALERT_DATA_OFST 0
 #define        MCDI_EVENT_FWALERT_DATA_LBN 8
 #define        MCDI_EVENT_FWALERT_DATA_WIDTH 24
+#define        MCDI_EVENT_FWALERT_REASON_OFST 0
 #define        MCDI_EVENT_FWALERT_REASON_LBN 0
 #define        MCDI_EVENT_FWALERT_REASON_WIDTH 8
 /* enum: SRAM Access. */
 #define          MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1
+#define        MCDI_EVENT_FLR_VF_OFST 0
 #define        MCDI_EVENT_FLR_VF_LBN 0
 #define        MCDI_EVENT_FLR_VF_WIDTH 8
+#define        MCDI_EVENT_TX_ERR_TXQ_OFST 0
 #define        MCDI_EVENT_TX_ERR_TXQ_LBN 0
 #define        MCDI_EVENT_TX_ERR_TXQ_WIDTH 12
+#define        MCDI_EVENT_TX_ERR_TYPE_OFST 0
 #define        MCDI_EVENT_TX_ERR_TYPE_LBN 12
 #define        MCDI_EVENT_TX_ERR_TYPE_WIDTH 4
 /* enum: Descriptor loader reported failure */
 #define          MCDI_EVENT_TX_OPT_IN_PKT 0x8
 /* enum: DMA or PIO data access error */
 #define          MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9
+#define        MCDI_EVENT_TX_ERR_INFO_OFST 0
 #define        MCDI_EVENT_TX_ERR_INFO_LBN 16
 #define        MCDI_EVENT_TX_ERR_INFO_WIDTH 16
+#define        MCDI_EVENT_TX_FLUSH_TO_DRIVER_OFST 0
 #define        MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN 12
 #define        MCDI_EVENT_TX_FLUSH_TO_DRIVER_WIDTH 1
+#define        MCDI_EVENT_TX_FLUSH_TXQ_OFST 0
 #define        MCDI_EVENT_TX_FLUSH_TXQ_LBN 0
 #define        MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12
+#define        MCDI_EVENT_PTP_ERR_TYPE_OFST 0
 #define        MCDI_EVENT_PTP_ERR_TYPE_LBN 0
 #define        MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8
 /* enum: PLL lost lock */
 #define          MCDI_EVENT_PTP_ERR_FIFO 0x3
 /* enum: Merge queue overflow */
 #define          MCDI_EVENT_PTP_ERR_QUEUE 0x4
+#define        MCDI_EVENT_AOE_ERR_TYPE_OFST 0
 #define        MCDI_EVENT_AOE_ERR_TYPE_LBN 0
 #define        MCDI_EVENT_AOE_ERR_TYPE_WIDTH 8
 /* enum: AOE failed to load - no valid image? */
 #define          MCDI_EVENT_AOE_FPGA_CLOCKS_PROGRAM_FAILED 0x13
 /* enum: Notify that FPGA Controller is alive to serve MCDI requests */
 #define          MCDI_EVENT_AOE_FC_RUNNING 0x14
+#define        MCDI_EVENT_AOE_ERR_DATA_OFST 0
 #define        MCDI_EVENT_AOE_ERR_DATA_LBN 8
 #define        MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
+#define        MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_OFST 0
 #define        MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_LBN 8
 #define        MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8
 /* enum: FC Assert happened, but the register information is not available */
 /* enum: The register information for FC Assert is ready for readinng by driver
  */
 #define          MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_OFST 0
 #define        MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8
 #define        MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8
 /* enum: Reading from NV failed */
 #define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SIZE 0x7
 /* enum: Unsupported DDR rank */
 #define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_RANK 0x8
+#define        MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_OFST 0
 #define        MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_LBN 8
 #define        MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_WIDTH 8
 /* enum: Primary boot flash */
 #define          MCDI_EVENT_AOE_FLASH_TYPE_BOOT_PRIMARY 0x0
 /* enum: Secondary boot flash */
 #define          MCDI_EVENT_AOE_FLASH_TYPE_BOOT_SECONDARY 0x1
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_OFST 0
 #define        MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_LBN 8
 #define        MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_WIDTH 8
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_OFST 0
 #define        MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_LBN 8
 #define        MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_WIDTH 8
+#define        MCDI_EVENT_RX_ERR_RXQ_OFST 0
 #define        MCDI_EVENT_RX_ERR_RXQ_LBN 0
 #define        MCDI_EVENT_RX_ERR_RXQ_WIDTH 12
+#define        MCDI_EVENT_RX_ERR_TYPE_OFST 0
 #define        MCDI_EVENT_RX_ERR_TYPE_LBN 12
 #define        MCDI_EVENT_RX_ERR_TYPE_WIDTH 4
+#define        MCDI_EVENT_RX_ERR_INFO_OFST 0
 #define        MCDI_EVENT_RX_ERR_INFO_LBN 16
 #define        MCDI_EVENT_RX_ERR_INFO_WIDTH 16
+#define        MCDI_EVENT_RX_FLUSH_TO_DRIVER_OFST 0
 #define        MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN 12
 #define        MCDI_EVENT_RX_FLUSH_TO_DRIVER_WIDTH 1
+#define        MCDI_EVENT_RX_FLUSH_RXQ_OFST 0
 #define        MCDI_EVENT_RX_FLUSH_RXQ_LBN 0
 #define        MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12
+#define        MCDI_EVENT_MC_REBOOT_COUNT_OFST 0
 #define        MCDI_EVENT_MC_REBOOT_COUNT_LBN 0
 #define        MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16
+#define        MCDI_EVENT_MUM_ERR_TYPE_OFST 0
 #define        MCDI_EVENT_MUM_ERR_TYPE_LBN 0
 #define        MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8
 /* enum: MUM failed to load - no valid image? */
 #define          MCDI_EVENT_MUM_ASSERT 0x2
 /* enum: MUM not kicking watchdog */
 #define          MCDI_EVENT_MUM_WATCHDOG 0x3
+#define        MCDI_EVENT_MUM_ERR_DATA_OFST 0
 #define        MCDI_EVENT_MUM_ERR_DATA_LBN 8
 #define        MCDI_EVENT_MUM_ERR_DATA_WIDTH 8
+#define        MCDI_EVENT_DBRET_SEQ_OFST 0
 #define        MCDI_EVENT_DBRET_SEQ_LBN 0
 #define        MCDI_EVENT_DBRET_SEQ_WIDTH 8
+#define        MCDI_EVENT_SUC_ERR_TYPE_OFST 0
 #define        MCDI_EVENT_SUC_ERR_TYPE_LBN 0
 #define        MCDI_EVENT_SUC_ERR_TYPE_WIDTH 8
 /* enum: Corrupted or bad SUC application. */
 #define          MCDI_EVENT_SUC_EXCEPTION 0x3
 /* enum: SUC watchdog timer expired. */
 #define          MCDI_EVENT_SUC_WATCHDOG 0x4
+#define        MCDI_EVENT_SUC_ERR_ADDRESS_OFST 0
 #define        MCDI_EVENT_SUC_ERR_ADDRESS_LBN 8
 #define        MCDI_EVENT_SUC_ERR_ADDRESS_WIDTH 24
+#define        MCDI_EVENT_SUC_ERR_DATA_OFST 0
 #define        MCDI_EVENT_SUC_ERR_DATA_LBN 8
 #define        MCDI_EVENT_SUC_ERR_DATA_WIDTH 24
+#define        MCDI_EVENT_LINKCHANGE_V2_LP_CAP_OFST 0
+#define        MCDI_EVENT_LINKCHANGE_V2_LP_CAP_LBN 0
+#define        MCDI_EVENT_LINKCHANGE_V2_LP_CAP_WIDTH 24
+#define        MCDI_EVENT_LINKCHANGE_V2_SPEED_OFST 0
+#define        MCDI_EVENT_LINKCHANGE_V2_SPEED_LBN 24
+#define        MCDI_EVENT_LINKCHANGE_V2_SPEED_WIDTH 4
+/*             Enum values, see field(s): */
+/*                MCDI_EVENT/LINKCHANGE_SPEED */
+#define        MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_OFST 0
+#define        MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_LBN 28
+#define        MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_WIDTH 1
+#define        MCDI_EVENT_LINKCHANGE_V2_FCNTL_OFST 0
+#define        MCDI_EVENT_LINKCHANGE_V2_FCNTL_LBN 29
+#define        MCDI_EVENT_LINKCHANGE_V2_FCNTL_WIDTH 3
+/*             Enum values, see field(s): */
+/*                MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
+#define        MCDI_EVENT_MODULECHANGE_LD_CAP_OFST 0
+#define        MCDI_EVENT_MODULECHANGE_LD_CAP_LBN 0
+#define        MCDI_EVENT_MODULECHANGE_LD_CAP_WIDTH 30
+#define        MCDI_EVENT_MODULECHANGE_SEQ_OFST 0
+#define        MCDI_EVENT_MODULECHANGE_SEQ_LBN 30
+#define        MCDI_EVENT_MODULECHANGE_SEQ_WIDTH 2
 #define       MCDI_EVENT_DATA_LBN 0
 #define       MCDI_EVENT_DATA_WIDTH 32
+/* Alias for PTP_DATA. */
 #define       MCDI_EVENT_SRC_LBN 36
 #define       MCDI_EVENT_SRC_WIDTH 8
+/* Data associated with PTP events which doesn't fit into the main DATA field
+ */
+#define       MCDI_EVENT_PTP_DATA_LBN 36
+#define       MCDI_EVENT_PTP_DATA_WIDTH 8
+/* EF100 specific. Defined by QDMA. The phase bit, changes each time round the
+ * event ring
+ */
+#define       MCDI_EVENT_EV_EVQ_PHASE_LBN 59
+#define       MCDI_EVENT_EV_EVQ_PHASE_WIDTH 1
 #define       MCDI_EVENT_EV_CODE_LBN 60
 #define       MCDI_EVENT_EV_CODE_WIDTH 4
 #define       MCDI_EVENT_CODE_LBN 44
 #define          MCDI_EVENT_CODE_DBRET 0x1e
 /* enum: The MC has detected a fault on the SUC */
 #define          MCDI_EVENT_CODE_SUC 0x1f
+/* enum: Link change. This event is sent instead of LINKCHANGE if
+ * WANT_V2_LINKCHANGES was set on driver attach.
+ */
+#define          MCDI_EVENT_CODE_LINKCHANGE_V2 0x20
+/* enum: This event is sent if WANT_V2_LINKCHANGES was set on driver attach
+ * when the local device capabilities changes. This will usually correspond to
+ * a module change.
+ */
+#define          MCDI_EVENT_CODE_MODULECHANGE 0x21
+/* enum: Notification that the sensors have been added and/or removed from the
+ * sensor table. This event includes the new sensor table generation count, if
+ * this does not match the driver's local copy it is expected to call
+ * DYNAMIC_SENSORS_LIST to refresh it.
+ */
+#define          MCDI_EVENT_CODE_DYNAMIC_SENSORS_CHANGE 0x22
+/* enum: Notification that a sensor has changed state as a result of a reading
+ * crossing a threshold. This is sent as two events, the first event contains
+ * the handle and the sensor's state (in the SRC field), and the second
+ * contains the value.
+ */
+#define          MCDI_EVENT_CODE_DYNAMIC_SENSORS_STATE_CHANGE 0x23
+/* enum: Notification that a descriptor proxy function configuration has been
+ * pushed to "live" status (visible to host). SRC field contains the handle of
+ * the affected descriptor proxy function. DATA field contains the generation
+ * count of configuration set applied. See MC_CMD_DESC_PROXY_FUNC_CONFIG_SET /
+ * MC_CMD_DESC_PROXY_FUNC_CONFIG_COMMIT and SF-122927-TC for details.
+ */
+#define          MCDI_EVENT_CODE_DESC_PROXY_FUNC_CONFIG_COMMITTED 0x24
+/* enum: Notification that a descriptor proxy function has been reset. SRC
+ * field contains the handle of the affected descriptor proxy function. See
+ * SF-122927-TC for details.
+ */
+#define          MCDI_EVENT_CODE_DESC_PROXY_FUNC_RESET 0x25
+/* enum: Notification that a driver attached to a descriptor proxy function.
+ * SRC field contains the handle of the affected descriptor proxy function. For
+ * Virtio proxy functions this message consists of two MCDI events, where the
+ * first event's (CONT=1) DATA field carries negotiated virtio feature bits 0
+ * to 31 and the second (CONT=0) carries bits 32 to 63. For EF100 proxy
+ * functions event length and meaning of DATA field is not yet defined. See
+ * SF-122927-TC for details.
+ */
+#define          MCDI_EVENT_CODE_DESC_PROXY_FUNC_DRIVER_ATTACH 0x26
 /* enum: Artificial event generated by host and posted via MC for test
  * purposes.
  */
 #define       MCDI_EVENT_DBRET_DATA_LEN 4
 #define       MCDI_EVENT_DBRET_DATA_LBN 0
 #define       MCDI_EVENT_DBRET_DATA_WIDTH 32
+#define       MCDI_EVENT_LINKCHANGE_V2_DATA_OFST 0
+#define       MCDI_EVENT_LINKCHANGE_V2_DATA_LEN 4
+#define       MCDI_EVENT_LINKCHANGE_V2_DATA_LBN 0
+#define       MCDI_EVENT_LINKCHANGE_V2_DATA_WIDTH 32
+#define       MCDI_EVENT_MODULECHANGE_DATA_OFST 0
+#define       MCDI_EVENT_MODULECHANGE_DATA_LEN 4
+#define       MCDI_EVENT_MODULECHANGE_DATA_LBN 0
+#define       MCDI_EVENT_MODULECHANGE_DATA_WIDTH 32
+/* The new generation count after a sensor has been added or deleted. */
+#define       MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_OFST 0
+#define       MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_LEN 4
+#define       MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_LBN 0
+#define       MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_WIDTH 32
+/* The handle of a dynamic sensor. */
+#define       MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_OFST 0
+#define       MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_LEN 4
+#define       MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_LBN 0
+#define       MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_WIDTH 32
+/* The current values of a sensor. */
+#define       MCDI_EVENT_DYNAMIC_SENSORS_VALUE_OFST 0
+#define       MCDI_EVENT_DYNAMIC_SENSORS_VALUE_LEN 4
+#define       MCDI_EVENT_DYNAMIC_SENSORS_VALUE_LBN 0
+#define       MCDI_EVENT_DYNAMIC_SENSORS_VALUE_WIDTH 32
+/* The current state of a sensor. */
+#define       MCDI_EVENT_DYNAMIC_SENSORS_STATE_LBN 36
+#define       MCDI_EVENT_DYNAMIC_SENSORS_STATE_WIDTH 8
+#define       MCDI_EVENT_DESC_PROXY_DATA_OFST 0
+#define       MCDI_EVENT_DESC_PROXY_DATA_LEN 4
+#define       MCDI_EVENT_DESC_PROXY_DATA_LBN 0
+#define       MCDI_EVENT_DESC_PROXY_DATA_WIDTH 32
+/* Generation count of applied configuration set */
+#define       MCDI_EVENT_DESC_PROXY_GENERATION_OFST 0
+#define       MCDI_EVENT_DESC_PROXY_GENERATION_LEN 4
+#define       MCDI_EVENT_DESC_PROXY_GENERATION_LBN 0
+#define       MCDI_EVENT_DESC_PROXY_GENERATION_WIDTH 32
+/* Virtio features negotiated with the host driver. First event (CONT=1)
+ * carries bits 0 to 31. Second event (CONT=0) carries bits 32 to 63.
+ */
+#define       MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_OFST 0
+#define       MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_LEN 4
+#define       MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_LBN 0
+#define       MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_WIDTH 32
 
 /* FCDI_EVENT structuredef */
 #define    FCDI_EVENT_LEN 8
 #define          FCDI_EVENT_LEVEL_FATAL 0x3
 #define       FCDI_EVENT_DATA_OFST 0
 #define       FCDI_EVENT_DATA_LEN 4
+#define        FCDI_EVENT_LINK_STATE_STATUS_OFST 0
 #define        FCDI_EVENT_LINK_STATE_STATUS_LBN 0
 #define        FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
 #define          FCDI_EVENT_LINK_DOWN 0x0 /* enum */
  */
 #define    FCDI_EXTENDED_EVENT_PPS_LENMIN 16
 #define    FCDI_EXTENDED_EVENT_PPS_LENMAX 248
+#define    FCDI_EXTENDED_EVENT_PPS_LENMAX_MCDI2 1016
 #define    FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
+#define    FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_NUM(len) (((len)-8)/8)
 /* Number of timestamps following */
 #define       FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
 #define       FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4
 #define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12
 #define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1
 #define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30
+#define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM_MCDI2 126
 #define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
 #define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
 
 #define          MUM_EVENT_LEVEL_FATAL 0x3
 #define       MUM_EVENT_DATA_OFST 0
 #define       MUM_EVENT_DATA_LEN 4
+#define        MUM_EVENT_SENSOR_ID_OFST 0
 #define        MUM_EVENT_SENSOR_ID_LBN 0
 #define        MUM_EVENT_SENSOR_ID_WIDTH 8
 /*             Enum values, see field(s): */
 /*                MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define        MUM_EVENT_SENSOR_STATE_OFST 0
 #define        MUM_EVENT_SENSOR_STATE_LBN 8
 #define        MUM_EVENT_SENSOR_STATE_WIDTH 8
+#define        MUM_EVENT_PORT_PHY_READY_OFST 0
 #define        MUM_EVENT_PORT_PHY_READY_LBN 0
 #define        MUM_EVENT_PORT_PHY_READY_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_LINK_UP_OFST 0
 #define        MUM_EVENT_PORT_PHY_LINK_UP_LBN 1
 #define        MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_TX_LOL_OFST 0
 #define        MUM_EVENT_PORT_PHY_TX_LOL_LBN 2
 #define        MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_RX_LOL_OFST 0
 #define        MUM_EVENT_PORT_PHY_RX_LOL_LBN 3
 #define        MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_TX_LOS_OFST 0
 #define        MUM_EVENT_PORT_PHY_TX_LOS_LBN 4
 #define        MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_RX_LOS_OFST 0
 #define        MUM_EVENT_PORT_PHY_RX_LOS_LBN 5
 #define        MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_TX_FAULT_OFST 0
 #define        MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6
 #define        MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1
 #define       MUM_EVENT_DATA_LBN 0
  * has additional checks to reject insecure calls.
  */
 #define MC_CMD_READ32 0x1
+#undef MC_CMD_0x1_PRIVILEGE_CTG
 
 #define MC_CMD_0x1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_READ32_OUT msgresponse */
 #define    MC_CMD_READ32_OUT_LENMIN 4
 #define    MC_CMD_READ32_OUT_LENMAX 252
+#define    MC_CMD_READ32_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_READ32_OUT_LEN(num) (0+4*(num))
+#define    MC_CMD_READ32_OUT_BUFFER_NUM(len) (((len)-0)/4)
 #define       MC_CMD_READ32_OUT_BUFFER_OFST 0
 #define       MC_CMD_READ32_OUT_BUFFER_LEN 4
 #define       MC_CMD_READ32_OUT_BUFFER_MINNUM 1
 #define       MC_CMD_READ32_OUT_BUFFER_MAXNUM 63
+#define       MC_CMD_READ32_OUT_BUFFER_MAXNUM_MCDI2 255
 
 
 /***********************************/
  * Write multiple 32byte words to MC memory.
  */
 #define MC_CMD_WRITE32 0x2
+#undef MC_CMD_0x2_PRIVILEGE_CTG
 
 #define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_WRITE32_IN msgrequest */
 #define    MC_CMD_WRITE32_IN_LENMIN 8
 #define    MC_CMD_WRITE32_IN_LENMAX 252
+#define    MC_CMD_WRITE32_IN_LENMAX_MCDI2 1020
 #define    MC_CMD_WRITE32_IN_LEN(num) (4+4*(num))
+#define    MC_CMD_WRITE32_IN_BUFFER_NUM(len) (((len)-4)/4)
 #define       MC_CMD_WRITE32_IN_ADDR_OFST 0
 #define       MC_CMD_WRITE32_IN_ADDR_LEN 4
 #define       MC_CMD_WRITE32_IN_BUFFER_OFST 4
 #define       MC_CMD_WRITE32_IN_BUFFER_LEN 4
 #define       MC_CMD_WRITE32_IN_BUFFER_MINNUM 1
 #define       MC_CMD_WRITE32_IN_BUFFER_MAXNUM 62
+#define       MC_CMD_WRITE32_IN_BUFFER_MAXNUM_MCDI2 254
 
 /* MC_CMD_WRITE32_OUT msgresponse */
 #define    MC_CMD_WRITE32_OUT_LEN 0
  * has additional checks to reject insecure calls.
  */
 #define MC_CMD_COPYCODE 0x3
+#undef MC_CMD_0x3_PRIVILEGE_CTG
 
 #define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
  * below)
  */
 #define          MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_OFST 0
 #define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17
 #define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_OFST 0
 #define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2
 #define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_OFST 0
 #define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3
 #define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_OFST 0
 #define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4
 #define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_OFST 0
 #define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5
 #define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_OFST 0
 #define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_LBN 6
 #define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1
 /* Destination address */
  * Select function for function-specific commands.
  */
 #define MC_CMD_SET_FUNC 0x4
+#undef MC_CMD_0x4_PRIVILEGE_CTG
 
 #define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
  * Get the instruction address from which the MC booted.
  */
 #define MC_CMD_GET_BOOT_STATUS 0x5
+#undef MC_CMD_0x5_PRIVILEGE_CTG
 
 #define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 #define          MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef
 #define       MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
 #define       MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_LEN 4
+#define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_OFST 4
 #define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
 #define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
+#define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_OFST 4
 #define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1
 #define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_WIDTH 1
+#define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_OFST 4
 #define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_LBN 2
 #define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_WIDTH 1
 
  * fields will only be present if OUT.GLOBAL_FLAGS != NO_FAILS
  */
 #define MC_CMD_GET_ASSERTS 0x6
+#undef MC_CMD_0x6_PRIVILEGE_CTG
 
 #define MC_CMD_0x6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 #define       MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
 #define       MC_CMD_GET_ASSERTS_OUT_RESERVED_LEN 4
 
+/* MC_CMD_GET_ASSERTS_OUT_V2 msgresponse: Extended response for MicroBlaze CPUs
+ * found on Riverhead designs
+ */
+#define    MC_CMD_GET_ASSERTS_OUT_V2_LEN 240
+/* Assertion status flag. */
+#define       MC_CMD_GET_ASSERTS_OUT_V2_GLOBAL_FLAGS_OFST 0
+#define       MC_CMD_GET_ASSERTS_OUT_V2_GLOBAL_FLAGS_LEN 4
+/* enum: No assertions have failed. */
+/*               MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 */
+/* enum: A system-level assertion has failed. */
+/*               MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 */
+/* enum: A thread-level assertion has failed. */
+/*               MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 */
+/* enum: The system was reset by the watchdog. */
+/*               MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 */
+/* enum: An illegal address trap stopped the system (huntington and later) */
+/*               MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 */
+/* Failing PC value */
+#define       MC_CMD_GET_ASSERTS_OUT_V2_SAVED_PC_OFFS_OFST 4
+#define       MC_CMD_GET_ASSERTS_OUT_V2_SAVED_PC_OFFS_LEN 4
+/* Saved GP regs */
+#define       MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_OFST 8
+#define       MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_LEN 4
+#define       MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_NUM 31
+/* enum: A magic value hinting that the value in this register at the time of
+ * the failure has likely been lost.
+ */
+/*               MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 */
+/* Failing thread address */
+#define       MC_CMD_GET_ASSERTS_OUT_V2_THREAD_OFFS_OFST 132
+#define       MC_CMD_GET_ASSERTS_OUT_V2_THREAD_OFFS_LEN 4
+#define       MC_CMD_GET_ASSERTS_OUT_V2_RESERVED_OFST 136
+#define       MC_CMD_GET_ASSERTS_OUT_V2_RESERVED_LEN 4
+/* Saved Special Function Registers */
+#define       MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_OFST 136
+#define       MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_LEN 4
+#define       MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_NUM 26
+
+/* MC_CMD_GET_ASSERTS_OUT_V3 msgresponse: Extended response with asserted
+ * firmware version information
+ */
+#define    MC_CMD_GET_ASSERTS_OUT_V3_LEN 360
+/* Assertion status flag. */
+#define       MC_CMD_GET_ASSERTS_OUT_V3_GLOBAL_FLAGS_OFST 0
+#define       MC_CMD_GET_ASSERTS_OUT_V3_GLOBAL_FLAGS_LEN 4
+/* enum: No assertions have failed. */
+/*               MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 */
+/* enum: A system-level assertion has failed. */
+/*               MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 */
+/* enum: A thread-level assertion has failed. */
+/*               MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 */
+/* enum: The system was reset by the watchdog. */
+/*               MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 */
+/* enum: An illegal address trap stopped the system (huntington and later) */
+/*               MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 */
+/* Failing PC value */
+#define       MC_CMD_GET_ASSERTS_OUT_V3_SAVED_PC_OFFS_OFST 4
+#define       MC_CMD_GET_ASSERTS_OUT_V3_SAVED_PC_OFFS_LEN 4
+/* Saved GP regs */
+#define       MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_OFST 8
+#define       MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_LEN 4
+#define       MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_NUM 31
+/* enum: A magic value hinting that the value in this register at the time of
+ * the failure has likely been lost.
+ */
+/*               MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 */
+/* Failing thread address */
+#define       MC_CMD_GET_ASSERTS_OUT_V3_THREAD_OFFS_OFST 132
+#define       MC_CMD_GET_ASSERTS_OUT_V3_THREAD_OFFS_LEN 4
+#define       MC_CMD_GET_ASSERTS_OUT_V3_RESERVED_OFST 136
+#define       MC_CMD_GET_ASSERTS_OUT_V3_RESERVED_LEN 4
+/* Saved Special Function Registers */
+#define       MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_OFST 136
+#define       MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_LEN 4
+#define       MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_NUM 26
+/* MC firmware unique build ID (as binary SHA-1 value) */
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_ID_OFST 240
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_ID_LEN 20
+/* MC firmware build date (as Unix timestamp) */
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_OFST 260
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LEN 8
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LO_OFST 260
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_HI_OFST 264
+/* MC firmware version number */
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_OFST 268
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LEN 8
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LO_OFST 268
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_HI_OFST 272
+/* MC firmware security level */
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_SECURITY_LEVEL_OFST 276
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_SECURITY_LEVEL_LEN 4
+/* MC firmware extra version info (as null-terminated US-ASCII string) */
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_EXTRA_INFO_OFST 280
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_EXTRA_INFO_LEN 16
+/* MC firmware build name (as null-terminated US-ASCII string) */
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_NAME_OFST 296
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_NAME_LEN 64
+
 
 /***********************************/
 /* MC_CMD_LOG_CTRL
  * sensor notifications and MCDI completions
  */
 #define MC_CMD_LOG_CTRL 0x7
+#undef MC_CMD_0x7_PRIVILEGE_CTG
 
 #define MC_CMD_0x7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 
 /***********************************/
 /* MC_CMD_GET_VERSION
- * Get version information about the MC firmware.
+ * Get version information about adapter components.
  */
 #define MC_CMD_GET_VERSION 0x8
+#undef MC_CMD_0x8_PRIVILEGE_CTG
 
 #define MC_CMD_0x8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 #define       MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32
 #define       MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16
 
+/* MC_CMD_GET_VERSION_V2_OUT msgresponse: Extended response providing version
+ * information for all adapter components. For Riverhead based designs, base MC
+ * firmware version fields refer to NMC firmware, while CMC firmware data is in
+ * dedicated CMC fields. Flags indicate which data is present in the response
+ * (depending on which components exist on a particular adapter)
+ */
+#define    MC_CMD_GET_VERSION_V2_OUT_LEN 304
+/*            MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/*            MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
+/*            Enum values, see field(s): */
+/*               MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define       MC_CMD_GET_VERSION_V2_OUT_PCOL_OFST 4
+#define       MC_CMD_GET_VERSION_V2_OUT_PCOL_LEN 4
+/* 128bit mask of functions supported by the current firmware */
+#define       MC_CMD_GET_VERSION_V2_OUT_SUPPORTED_FUNCS_OFST 8
+#define       MC_CMD_GET_VERSION_V2_OUT_SUPPORTED_FUNCS_LEN 16
+#define       MC_CMD_GET_VERSION_V2_OUT_VERSION_OFST 24
+#define       MC_CMD_GET_VERSION_V2_OUT_VERSION_LEN 8
+#define       MC_CMD_GET_VERSION_V2_OUT_VERSION_LO_OFST 24
+#define       MC_CMD_GET_VERSION_V2_OUT_VERSION_HI_OFST 28
+/* extra info */
+#define       MC_CMD_GET_VERSION_V2_OUT_EXTRA_OFST 32
+#define       MC_CMD_GET_VERSION_V2_OUT_EXTRA_LEN 16
+/* Flags indicating which extended fields are valid */
+#define       MC_CMD_GET_VERSION_V2_OUT_FLAGS_OFST 48
+#define       MC_CMD_GET_VERSION_V2_OUT_FLAGS_LEN 4
+#define        MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_OFST 48
+#define        MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_LBN 0
+#define        MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_WIDTH 1
+#define        MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_OFST 48
+#define        MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_LBN 1
+#define        MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_WIDTH 1
+#define        MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_OFST 48
+#define        MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_LBN 2
+#define        MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_WIDTH 1
+#define        MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_OFST 48
+#define        MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_LBN 3
+#define        MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_WIDTH 1
+#define        MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_OFST 48
+#define        MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_LBN 4
+#define        MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_WIDTH 1
+/* MC firmware unique build ID (as binary SHA-1 value) */
+#define       MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_ID_OFST 52
+#define       MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_ID_LEN 20
+/* MC firmware security level */
+#define       MC_CMD_GET_VERSION_V2_OUT_MCFW_SECURITY_LEVEL_OFST 72
+#define       MC_CMD_GET_VERSION_V2_OUT_MCFW_SECURITY_LEVEL_LEN 4
+/* MC firmware build name (as null-terminated US-ASCII string) */
+#define       MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_NAME_OFST 76
+#define       MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_NAME_LEN 64
+/* The SUC firmware version as four numbers - a.b.c.d */
+#define       MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_OFST 140
+#define       MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_LEN 4
+#define       MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_NUM 4
+/* SUC firmware build date (as 64-bit Unix timestamp) */
+#define       MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_OFST 156
+#define       MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LEN 8
+#define       MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LO_OFST 156
+#define       MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_HI_OFST 160
+/* The ID of the SUC chip. This is specific to the platform but typically
+ * indicates family, memory sizes etc. See SF-116728-SW for further details.
+ */
+#define       MC_CMD_GET_VERSION_V2_OUT_SUCFW_CHIP_ID_OFST 164
+#define       MC_CMD_GET_VERSION_V2_OUT_SUCFW_CHIP_ID_LEN 4
+/* The CMC firmware version as four numbers - a.b.c.d */
+#define       MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_OFST 168
+#define       MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_LEN 4
+#define       MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_NUM 4
+/* CMC firmware build date (as 64-bit Unix timestamp) */
+#define       MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_OFST 184
+#define       MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LEN 8
+#define       MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LO_OFST 184
+#define       MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_HI_OFST 188
+/* FPGA version as three numbers. On Riverhead based systems this field uses
+ * the same encoding as hardware version ID registers (MC_FPGA_BUILD_HWRD_REG):
+ * FPGA_VERSION[0]: x => Image H{x} FPGA_VERSION[1]: Revision letter (0 => A, 1
+ * => B, ...) FPGA_VERSION[2]: Sub-revision number
+ */
+#define       MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_OFST 192
+#define       MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_LEN 4
+#define       MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_NUM 3
+/* Extra FPGA revision information (as null-terminated US-ASCII string) */
+#define       MC_CMD_GET_VERSION_V2_OUT_FPGA_EXTRA_OFST 204
+#define       MC_CMD_GET_VERSION_V2_OUT_FPGA_EXTRA_LEN 16
+/* Board name / adapter model (as null-terminated US-ASCII string) */
+#define       MC_CMD_GET_VERSION_V2_OUT_BOARD_NAME_OFST 220
+#define       MC_CMD_GET_VERSION_V2_OUT_BOARD_NAME_LEN 16
+/* Board revision number */
+#define       MC_CMD_GET_VERSION_V2_OUT_BOARD_REVISION_OFST 236
+#define       MC_CMD_GET_VERSION_V2_OUT_BOARD_REVISION_LEN 4
+/* Board serial number (as null-terminated US-ASCII string) */
+#define       MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_OFST 240
+#define       MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_LEN 64
+
 
 /***********************************/
 /* MC_CMD_PTP
  * Perform PTP operation
  */
 #define MC_CMD_PTP 0xb
+#undef MC_CMD_0xb_PRIVILEGE_CTG
 
 #define MC_CMD_0xb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* MC_CMD_PTP_IN_TRANSMIT msgrequest */
 #define    MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
 #define    MC_CMD_PTP_IN_TRANSMIT_LENMAX 252
+#define    MC_CMD_PTP_IN_TRANSMIT_LENMAX_MCDI2 1020
 #define    MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
+#define    MC_CMD_PTP_IN_TRANSMIT_PACKET_NUM(len) (((len)-12)/1)
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
 /*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
 #define       MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
 #define       MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1
 #define       MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 240
+#define       MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM_MCDI2 1008
 
 /* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
 #define    MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
 /* MC_CMD_PTP_IN_FPGAWRITE msgrequest */
 #define    MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13
 #define    MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252
+#define    MC_CMD_PTP_IN_FPGAWRITE_LENMAX_MCDI2 1020
 #define    MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num))
+#define    MC_CMD_PTP_IN_FPGAWRITE_BUFFER_NUM(len) (((len)-12)/1)
 /*            MC_CMD_PTP_IN_CMD_OFST 0 */
 /*            MC_CMD_PTP_IN_CMD_LEN 4 */
 /*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
 #define       MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1
 #define       MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1
 #define       MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM 240
+#define       MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM_MCDI2 1008
 
 /* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */
 #define    MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16
 /* Original field containing queue ID. Now extended to include flags. */
 #define       MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
 #define       MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_LEN 4
+#define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_OFST 8
 #define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0
 #define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16
+#define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_OFST 8
 #define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31
 #define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_WIDTH 1
 
 /* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
 #define    MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
 #define    MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240
+#define    MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX_MCDI2 1020
 #define    MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num))
+#define    MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_NUM(len) (((len)-0)/20)
 /* A set of host and NIC times */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM_MCDI2 51
 /* Host time immediately before NIC's hardware clock read */
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
 #define       MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_LEN 4
 /* MC_CMD_PTP_OUT_FPGAREAD msgresponse */
 #define    MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1
 #define    MC_CMD_PTP_OUT_FPGAREAD_LENMAX 252
+#define    MC_CMD_PTP_OUT_FPGAREAD_LENMAX_MCDI2 1020
 #define    MC_CMD_PTP_OUT_FPGAREAD_LEN(num) (0+1*(num))
+#define    MC_CMD_PTP_OUT_FPGAREAD_BUFFER_NUM(len) (((len)-0)/1)
 #define       MC_CMD_PTP_OUT_FPGAREAD_BUFFER_OFST 0
 #define       MC_CMD_PTP_OUT_FPGAREAD_BUFFER_LEN 1
 #define       MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1
 #define       MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252
+#define       MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM_MCDI2 1020
 
 /* MC_CMD_PTP_OUT_GET_TIME_FORMAT msgresponse */
 #define    MC_CMD_PTP_OUT_GET_TIME_FORMAT_LEN 4
 /* Various PTP capabilities */
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_LEN 4
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_OFST 8
 #define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
 #define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_OFST 8
 #define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1
 #define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_OFST 8
 #define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_LBN 2
 #define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_WIDTH 1
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_OFST 8
 #define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN 3
 #define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_WIDTH 1
 #define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
  * Read 32bit words from the indirect memory map.
  */
 #define MC_CMD_CSR_READ32 0xc
+#undef MC_CMD_0xc_PRIVILEGE_CTG
 
 #define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_CSR_READ32_OUT msgresponse */
 #define    MC_CMD_CSR_READ32_OUT_LENMIN 4
 #define    MC_CMD_CSR_READ32_OUT_LENMAX 252
+#define    MC_CMD_CSR_READ32_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
+#define    MC_CMD_CSR_READ32_OUT_BUFFER_NUM(len) (((len)-0)/4)
 /* The last dword is the status, not a value read */
 #define       MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
 #define       MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
 #define       MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
 #define       MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63
+#define       MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM_MCDI2 255
 
 
 /***********************************/
  * Write 32bit dwords to the indirect memory map.
  */
 #define MC_CMD_CSR_WRITE32 0xd
+#undef MC_CMD_0xd_PRIVILEGE_CTG
 
 #define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_CSR_WRITE32_IN msgrequest */
 #define    MC_CMD_CSR_WRITE32_IN_LENMIN 12
 #define    MC_CMD_CSR_WRITE32_IN_LENMAX 252
+#define    MC_CMD_CSR_WRITE32_IN_LENMAX_MCDI2 1020
 #define    MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
+#define    MC_CMD_CSR_WRITE32_IN_BUFFER_NUM(len) (((len)-8)/4)
 /* Address */
 #define       MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
 #define       MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4
 #define       MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
 #define       MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
 #define       MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61
+#define       MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM_MCDI2 253
 
 /* MC_CMD_CSR_WRITE32_OUT msgresponse */
 #define    MC_CMD_CSR_WRITE32_OUT_LEN 4
  * MCDI command to avoid creating too many MCDI commands.
  */
 #define MC_CMD_HP 0x54
+#undef MC_CMD_0x54_PRIVILEGE_CTG
 
 #define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
  * Get stack information.
  */
 #define MC_CMD_STACKINFO 0xf
+#undef MC_CMD_0xf_PRIVILEGE_CTG
 
 #define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_STACKINFO_OUT msgresponse */
 #define    MC_CMD_STACKINFO_OUT_LENMIN 12
 #define    MC_CMD_STACKINFO_OUT_LENMAX 252
+#define    MC_CMD_STACKINFO_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
+#define    MC_CMD_STACKINFO_OUT_THREAD_INFO_NUM(len) (((len)-0)/12)
 /* (thread ptr, stack size, free space) for each thread in system */
 #define       MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
 #define       MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
 #define       MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
 #define       MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21
+#define       MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM_MCDI2 85
 
 
 /***********************************/
  * MDIO register read.
  */
 #define MC_CMD_MDIO_READ 0x10
+#undef MC_CMD_0x10_PRIVILEGE_CTG
 
 #define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * MDIO register write.
  */
 #define MC_CMD_MDIO_WRITE 0x11
+#undef MC_CMD_0x11_PRIVILEGE_CTG
 
 #define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
  * Write DBI register(s).
  */
 #define MC_CMD_DBI_WRITE 0x12
+#undef MC_CMD_0x12_PRIVILEGE_CTG
 
 #define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DBI_WRITE_IN msgrequest */
 #define    MC_CMD_DBI_WRITE_IN_LENMIN 12
 #define    MC_CMD_DBI_WRITE_IN_LENMAX 252
+#define    MC_CMD_DBI_WRITE_IN_LENMAX_MCDI2 1020
 #define    MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
+#define    MC_CMD_DBI_WRITE_IN_DBIWROP_NUM(len) (((len)-0)/12)
 /* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset
  * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF.
  */
 #define       MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
 #define       MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
 #define       MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21
+#define       MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM_MCDI2 85
 
 /* MC_CMD_DBI_WRITE_OUT msgresponse */
 #define    MC_CMD_DBI_WRITE_OUT_LEN 0
 #define       MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
 #define       MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
 #define       MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4
+#define        MC_CMD_DBIWROP_TYPEDEF_VF_NUM_OFST 4
 #define        MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
 #define        MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
+#define        MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_OFST 4
 #define        MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
 #define        MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define        MC_CMD_DBIWROP_TYPEDEF_CS2_OFST 4
 #define        MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14
 #define        MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1
 #define       MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
  * Returns the MC firmware configuration structure.
  */
 #define MC_CMD_GET_BOARD_CFG 0x18
+#undef MC_CMD_0x18_PRIVILEGE_CTG
 
 #define MC_CMD_0x18_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* MC_CMD_GET_BOARD_CFG_OUT msgresponse */
 #define    MC_CMD_GET_BOARD_CFG_OUT_LENMIN 96
 #define    MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136
+#define    MC_CMD_GET_BOARD_CFG_OUT_LENMAX_MCDI2 136
 #define    MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num))
+#define    MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_NUM(len) (((len)-72)/2)
 #define       MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
 #define       MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_LEN 4
 #define       MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
 #define       MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
 #define       MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12
 #define       MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM 32
+#define       MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM_MCDI2 32
 
 
 /***********************************/
  * Read DBI register(s) -- extended functionality
  */
 #define MC_CMD_DBI_READX 0x19
+#undef MC_CMD_0x19_PRIVILEGE_CTG
 
 #define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DBI_READX_IN msgrequest */
 #define    MC_CMD_DBI_READX_IN_LENMIN 8
 #define    MC_CMD_DBI_READX_IN_LENMAX 248
+#define    MC_CMD_DBI_READX_IN_LENMAX_MCDI2 1016
 #define    MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
+#define    MC_CMD_DBI_READX_IN_DBIRDOP_NUM(len) (((len)-0)/8)
 /* Each Read op consists of an address (offset 0), VF/CS2) */
 #define       MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
 #define       MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
 #define       MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4
 #define       MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1
 #define       MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31
+#define       MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM_MCDI2 127
 
 /* MC_CMD_DBI_READX_OUT msgresponse */
 #define    MC_CMD_DBI_READX_OUT_LENMIN 4
 #define    MC_CMD_DBI_READX_OUT_LENMAX 252
+#define    MC_CMD_DBI_READX_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
+#define    MC_CMD_DBI_READX_OUT_VALUE_NUM(len) (((len)-0)/4)
 /* Value */
 #define       MC_CMD_DBI_READX_OUT_VALUE_OFST 0
 #define       MC_CMD_DBI_READX_OUT_VALUE_LEN 4
 #define       MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
 #define       MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
+#define       MC_CMD_DBI_READX_OUT_VALUE_MAXNUM_MCDI2 255
 
 /* MC_CMD_DBIRDOP_TYPEDEF structuredef */
 #define    MC_CMD_DBIRDOP_TYPEDEF_LEN 8
 #define       MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
 #define       MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
 #define       MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4
+#define        MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_OFST 4
 #define        MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
 #define        MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
+#define        MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_OFST 4
 #define        MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
 #define        MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define        MC_CMD_DBIRDOP_TYPEDEF_CS2_OFST 4
 #define        MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14
 #define        MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1
 #define       MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32
  * Set the 16byte seed for the MC pseudo-random generator.
  */
 #define MC_CMD_SET_RAND_SEED 0x1a
+#undef MC_CMD_0x1a_PRIVILEGE_CTG
 
 #define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_LTSSM_HIST_OUT msgresponse */
 #define    MC_CMD_LTSSM_HIST_OUT_LENMIN 0
 #define    MC_CMD_LTSSM_HIST_OUT_LENMAX 252
+#define    MC_CMD_LTSSM_HIST_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
+#define    MC_CMD_LTSSM_HIST_OUT_DATA_NUM(len) (((len)-0)/4)
 /* variable number of LTSSM values, as bytes. The history is read-to-clear. */
 #define       MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
 #define       MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
 #define       MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
 #define       MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63
+#define       MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM_MCDI2 255
 
 
 /***********************************/
  * platforms.
  */
 #define MC_CMD_DRV_ATTACH 0x1c
+#undef MC_CMD_0x1c_PRIVILEGE_CTG
 
 #define MC_CMD_0x1c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* new state to set if UPDATE=1 */
 #define       MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
 #define       MC_CMD_DRV_ATTACH_IN_NEW_STATE_LEN 4
+#define        MC_CMD_DRV_ATTACH_OFST 0
 #define        MC_CMD_DRV_ATTACH_LBN 0
 #define        MC_CMD_DRV_ATTACH_WIDTH 1
+#define        MC_CMD_DRV_ATTACH_IN_ATTACH_OFST 0
 #define        MC_CMD_DRV_ATTACH_IN_ATTACH_LBN 0
 #define        MC_CMD_DRV_ATTACH_IN_ATTACH_WIDTH 1
+#define        MC_CMD_DRV_PREBOOT_OFST 0
 #define        MC_CMD_DRV_PREBOOT_LBN 1
 #define        MC_CMD_DRV_PREBOOT_WIDTH 1
+#define        MC_CMD_DRV_ATTACH_IN_PREBOOT_OFST 0
 #define        MC_CMD_DRV_ATTACH_IN_PREBOOT_LBN 1
 #define        MC_CMD_DRV_ATTACH_IN_PREBOOT_WIDTH 1
+#define        MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_OFST 0
 #define        MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_LBN 2
 #define        MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_WIDTH 1
+#define        MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_OFST 0
 #define        MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_LBN 3
 #define        MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_WIDTH 1
+#define        MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_OFST 0
+#define        MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_LBN 4
+#define        MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_WIDTH 1
+#define        MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_OFST 0
+#define        MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_LBN 5
+#define        MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_WIDTH 1
+#define        MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_OFST 0
+#define        MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_LBN 5
+#define        MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_WIDTH 1
 /* 1 to set new state, or 0 to just report the existing state */
 #define       MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
 #define       MC_CMD_DRV_ATTACH_IN_UPDATE_LEN 4
  * bug69716)
  */
 #define          MC_CMD_FW_L3XUDP 0x7
+/* enum: Requests that the MC keep whatever datapath firmware is currently
+ * running. It's used for test purposes, where we want to be able to shmboot
+ * special test firmware variants. This option is only recognised in eftest
+ * (i.e. non-production) builds.
+ */
+#define          MC_CMD_FW_KEEP_CURRENT_EFTEST_ONLY 0xfffffffe
 /* enum: Only this option is allowed for non-admin functions */
 #define          MC_CMD_FW_DONT_CARE 0xffffffff
 
+/* MC_CMD_DRV_ATTACH_IN_V2 msgrequest: Updated DRV_ATTACH to include driver
+ * version
+ */
+#define    MC_CMD_DRV_ATTACH_IN_V2_LEN 32
+/* new state to set if UPDATE=1 */
+#define       MC_CMD_DRV_ATTACH_IN_V2_NEW_STATE_OFST 0
+#define       MC_CMD_DRV_ATTACH_IN_V2_NEW_STATE_LEN 4
+/*             MC_CMD_DRV_ATTACH_OFST 0 */
+/*             MC_CMD_DRV_ATTACH_LBN 0 */
+/*             MC_CMD_DRV_ATTACH_WIDTH 1 */
+#define        MC_CMD_DRV_ATTACH_IN_V2_ATTACH_OFST 0
+#define        MC_CMD_DRV_ATTACH_IN_V2_ATTACH_LBN 0
+#define        MC_CMD_DRV_ATTACH_IN_V2_ATTACH_WIDTH 1
+/*             MC_CMD_DRV_PREBOOT_OFST 0 */
+/*             MC_CMD_DRV_PREBOOT_LBN 1 */
+/*             MC_CMD_DRV_PREBOOT_WIDTH 1 */
+#define        MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_OFST 0
+#define        MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_LBN 1
+#define        MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_WIDTH 1
+#define        MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_OFST 0
+#define        MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_LBN 2
+#define        MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_WIDTH 1
+#define        MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_OFST 0
+#define        MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_LBN 3
+#define        MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_WIDTH 1
+#define        MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_OFST 0
+#define        MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_LBN 4
+#define        MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_WIDTH 1
+#define        MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_OFST 0
+#define        MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_LBN 5
+#define        MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_WIDTH 1
+#define        MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_OFST 0
+#define        MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_LBN 5
+#define        MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_WIDTH 1
+/* 1 to set new state, or 0 to just report the existing state */
+#define       MC_CMD_DRV_ATTACH_IN_V2_UPDATE_OFST 4
+#define       MC_CMD_DRV_ATTACH_IN_V2_UPDATE_LEN 4
+/* preferred datapath firmware (for Huntington; ignored for Siena) */
+#define       MC_CMD_DRV_ATTACH_IN_V2_FIRMWARE_ID_OFST 8
+#define       MC_CMD_DRV_ATTACH_IN_V2_FIRMWARE_ID_LEN 4
+/* enum: Prefer to use full featured firmware */
+/*               MC_CMD_FW_FULL_FEATURED 0x0 */
+/* enum: Prefer to use firmware with fewer features but lower latency */
+/*               MC_CMD_FW_LOW_LATENCY 0x1 */
+/* enum: Prefer to use firmware for SolarCapture packed stream mode */
+/*               MC_CMD_FW_PACKED_STREAM 0x2 */
+/* enum: Prefer to use firmware with fewer features and simpler TX event
+ * batching but higher TX packet rate
+ */
+/*               MC_CMD_FW_HIGH_TX_RATE 0x3 */
+/* enum: Reserved value */
+/*               MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4 */
+/* enum: Prefer to use firmware with additional "rules engine" filtering
+ * support
+ */
+/*               MC_CMD_FW_RULES_ENGINE 0x5 */
+/* enum: Prefer to use firmware with additional DPDK support */
+/*               MC_CMD_FW_DPDK 0x6 */
+/* enum: Prefer to use "l3xudp" custom datapath firmware (see SF-119495-PD and
+ * bug69716)
+ */
+/*               MC_CMD_FW_L3XUDP 0x7 */
+/* enum: Requests that the MC keep whatever datapath firmware is currently
+ * running. It's used for test purposes, where we want to be able to shmboot
+ * special test firmware variants. This option is only recognised in eftest
+ * (i.e. non-production) builds.
+ */
+/*               MC_CMD_FW_KEEP_CURRENT_EFTEST_ONLY 0xfffffffe */
+/* enum: Only this option is allowed for non-admin functions */
+/*               MC_CMD_FW_DONT_CARE 0xffffffff */
+/* Version of the driver to be reported by management protocols (e.g. NC-SI)
+ * handled by the NIC. This is a zero-terminated ASCII string.
+ */
+#define       MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_OFST 12
+#define       MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_LEN 20
+
 /* MC_CMD_DRV_ATTACH_OUT msgresponse */
 #define    MC_CMD_DRV_ATTACH_OUT_LEN 4
 /* previous or existing state, see the bitmask at NEW_STATE */
  * input.
  */
 #define          MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_VI_SPREADING_ENABLED 0x4
+/* enum: Used during development only. Should no longer be used. */
+#define          MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_RX_VI_SPREADING_INHIBITED 0x5
+/* enum: If set, indicates that TX only spreading is enabled. Even-numbered
+ * TXQs will use one engine, and odd-numbered TXQs will use the other. This
+ * also has the effect that only even-numbered RXQs will receive traffic.
+ */
+#define          MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TX_ONLY_VI_SPREADING_ENABLED 0x5
 
 
 /***********************************/
  * use MC_CMD_ENTITY_RESET instead.
  */
 #define MC_CMD_PORT_RESET 0x20
+#undef MC_CMD_0x20_PRIVILEGE_CTG
 
 #define MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  */
 #define       MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
 #define       MC_CMD_ENTITY_RESET_IN_FLAG_LEN 4
+#define        MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_OFST 0
 #define        MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
 #define        MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
 
  * Copy the given ASCII string out onto UART and/or out of the network port.
  */
 #define MC_CMD_PUTS 0x23
+#undef MC_CMD_0x23_PRIVILEGE_CTG
 
 #define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_PUTS_IN msgrequest */
 #define    MC_CMD_PUTS_IN_LENMIN 13
 #define    MC_CMD_PUTS_IN_LENMAX 252
+#define    MC_CMD_PUTS_IN_LENMAX_MCDI2 1020
 #define    MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
+#define    MC_CMD_PUTS_IN_STRING_NUM(len) (((len)-12)/1)
 #define       MC_CMD_PUTS_IN_DEST_OFST 0
 #define       MC_CMD_PUTS_IN_DEST_LEN 4
+#define        MC_CMD_PUTS_IN_UART_OFST 0
 #define        MC_CMD_PUTS_IN_UART_LBN 0
 #define        MC_CMD_PUTS_IN_UART_WIDTH 1
+#define        MC_CMD_PUTS_IN_PORT_OFST 0
 #define        MC_CMD_PUTS_IN_PORT_LBN 1
 #define        MC_CMD_PUTS_IN_PORT_WIDTH 1
 #define       MC_CMD_PUTS_IN_DHOST_OFST 4
 #define       MC_CMD_PUTS_IN_STRING_LEN 1
 #define       MC_CMD_PUTS_IN_STRING_MINNUM 1
 #define       MC_CMD_PUTS_IN_STRING_MAXNUM 240
+#define       MC_CMD_PUTS_IN_STRING_MAXNUM_MCDI2 1008
 
 /* MC_CMD_PUTS_OUT msgresponse */
 #define    MC_CMD_PUTS_OUT_LEN 0
  * 'zombie' state. Locks required: None
  */
 #define MC_CMD_GET_PHY_CFG 0x24
+#undef MC_CMD_0x24_PRIVILEGE_CTG
 
 #define MC_CMD_0x24_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* flags */
 #define       MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
 #define       MC_CMD_GET_PHY_CFG_OUT_FLAGS_LEN 4
+#define        MC_CMD_GET_PHY_CFG_OUT_PRESENT_OFST 0
 #define        MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
 #define        MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
+#define        MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_OFST 0
 #define        MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1
 #define        MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_WIDTH 1
+#define        MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_OFST 0
 #define        MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN 2
 #define        MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_WIDTH 1
+#define        MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_OFST 0
 #define        MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN 3
 #define        MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_WIDTH 1
+#define        MC_CMD_GET_PHY_CFG_OUT_POWEROFF_OFST 0
 #define        MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN 4
 #define        MC_CMD_GET_PHY_CFG_OUT_POWEROFF_WIDTH 1
+#define        MC_CMD_GET_PHY_CFG_OUT_TXDIS_OFST 0
 #define        MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN 5
 #define        MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1
+#define        MC_CMD_GET_PHY_CFG_OUT_BIST_OFST 0
 #define        MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6
 #define        MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
 /* ?? */
 /* Bitmask of supported capabilities */
 #define       MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
 #define       MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_LEN 4
+#define        MC_CMD_PHY_CAP_10HDX_OFST 8
 #define        MC_CMD_PHY_CAP_10HDX_LBN 1
 #define        MC_CMD_PHY_CAP_10HDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_10FDX_OFST 8
 #define        MC_CMD_PHY_CAP_10FDX_LBN 2
 #define        MC_CMD_PHY_CAP_10FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_100HDX_OFST 8
 #define        MC_CMD_PHY_CAP_100HDX_LBN 3
 #define        MC_CMD_PHY_CAP_100HDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_100FDX_OFST 8
 #define        MC_CMD_PHY_CAP_100FDX_LBN 4
 #define        MC_CMD_PHY_CAP_100FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_1000HDX_OFST 8
 #define        MC_CMD_PHY_CAP_1000HDX_LBN 5
 #define        MC_CMD_PHY_CAP_1000HDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_1000FDX_OFST 8
 #define        MC_CMD_PHY_CAP_1000FDX_LBN 6
 #define        MC_CMD_PHY_CAP_1000FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_10000FDX_OFST 8
 #define        MC_CMD_PHY_CAP_10000FDX_LBN 7
 #define        MC_CMD_PHY_CAP_10000FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_PAUSE_OFST 8
 #define        MC_CMD_PHY_CAP_PAUSE_LBN 8
 #define        MC_CMD_PHY_CAP_PAUSE_WIDTH 1
+#define        MC_CMD_PHY_CAP_ASYM_OFST 8
 #define        MC_CMD_PHY_CAP_ASYM_LBN 9
 #define        MC_CMD_PHY_CAP_ASYM_WIDTH 1
+#define        MC_CMD_PHY_CAP_AN_OFST 8
 #define        MC_CMD_PHY_CAP_AN_LBN 10
 #define        MC_CMD_PHY_CAP_AN_WIDTH 1
+#define        MC_CMD_PHY_CAP_40000FDX_OFST 8
 #define        MC_CMD_PHY_CAP_40000FDX_LBN 11
 #define        MC_CMD_PHY_CAP_40000FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_DDM_OFST 8
 #define        MC_CMD_PHY_CAP_DDM_LBN 12
 #define        MC_CMD_PHY_CAP_DDM_WIDTH 1
+#define        MC_CMD_PHY_CAP_100000FDX_OFST 8
 #define        MC_CMD_PHY_CAP_100000FDX_LBN 13
 #define        MC_CMD_PHY_CAP_100000FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_25000FDX_OFST 8
 #define        MC_CMD_PHY_CAP_25000FDX_LBN 14
 #define        MC_CMD_PHY_CAP_25000FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_50000FDX_OFST 8
 #define        MC_CMD_PHY_CAP_50000FDX_LBN 15
 #define        MC_CMD_PHY_CAP_50000FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_BASER_FEC_OFST 8
 #define        MC_CMD_PHY_CAP_BASER_FEC_LBN 16
 #define        MC_CMD_PHY_CAP_BASER_FEC_WIDTH 1
+#define        MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_OFST 8
 #define        MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN 17
 #define        MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_WIDTH 1
+#define        MC_CMD_PHY_CAP_RS_FEC_OFST 8
 #define        MC_CMD_PHY_CAP_RS_FEC_LBN 18
 #define        MC_CMD_PHY_CAP_RS_FEC_WIDTH 1
+#define        MC_CMD_PHY_CAP_RS_FEC_REQUESTED_OFST 8
 #define        MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN 19
 #define        MC_CMD_PHY_CAP_RS_FEC_REQUESTED_WIDTH 1
+#define        MC_CMD_PHY_CAP_25G_BASER_FEC_OFST 8
 #define        MC_CMD_PHY_CAP_25G_BASER_FEC_LBN 20
 #define        MC_CMD_PHY_CAP_25G_BASER_FEC_WIDTH 1
+#define        MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_OFST 8
 #define        MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21
 #define        MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1
 /* ?? */
  * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held)
  */
 #define MC_CMD_START_BIST 0x25
+#undef MC_CMD_0x25_PRIVILEGE_CTG
 
 #define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
  * EACCES (if PHY_LOCK is not held).
  */
 #define MC_CMD_POLL_BIST 0x26
+#undef MC_CMD_0x26_PRIVILEGE_CTG
 
 #define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */
 #define    MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4
 #define    MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252
+#define    MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX_MCDI2 1020
 #define    MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num))
+#define    MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_NUM(len) (((len)-0)/4)
 #define       MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0
 #define       MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4
 #define       MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1
 #define       MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63
+#define       MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM_MCDI2 255
 
 /* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */
 #define    MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0
  * Returns a bitmask of loopback modes available at each speed.
  */
 #define MC_CMD_GET_LOOPBACK_MODES 0x28
+#undef MC_CMD_0x28_PRIVILEGE_CTG
 
 #define MC_CMD_0x28_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * ETIME.
  */
 #define MC_CMD_GET_LINK 0x29
+#undef MC_CMD_0x29_PRIVILEGE_CTG
 
 #define MC_CMD_0x29_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /*               MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
 #define       MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
 #define       MC_CMD_GET_LINK_OUT_FLAGS_LEN 4
+#define        MC_CMD_GET_LINK_OUT_LINK_UP_OFST 16
 #define        MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0
 #define        MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_FULL_DUPLEX_OFST 16
 #define        MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1
 #define        MC_CMD_GET_LINK_OUT_FULL_DUPLEX_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_BPX_LINK_OFST 16
 #define        MC_CMD_GET_LINK_OUT_BPX_LINK_LBN 2
 #define        MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_PHY_LINK_OFST 16
 #define        MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
 #define        MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_OFST 16
 #define        MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6
 #define        MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_OFST 16
 #define        MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7
 #define        MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_OFST 16
+#define        MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_LBN 8
+#define        MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_MODULE_UP_OFST 16
+#define        MC_CMD_GET_LINK_OUT_MODULE_UP_LBN 9
+#define        MC_CMD_GET_LINK_OUT_MODULE_UP_WIDTH 1
 /* This returns the negotiated flow control value. */
 #define       MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
 #define       MC_CMD_GET_LINK_OUT_FCNTL_LEN 4
 /*               MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
 #define       MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
 #define       MC_CMD_GET_LINK_OUT_MAC_FAULT_LEN 4
+#define        MC_CMD_MAC_FAULT_XGMII_LOCAL_OFST 24
 #define        MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
 #define        MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
+#define        MC_CMD_MAC_FAULT_XGMII_REMOTE_OFST 24
 #define        MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
 #define        MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1
+#define        MC_CMD_MAC_FAULT_SGMII_REMOTE_OFST 24
 #define        MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2
 #define        MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1
+#define        MC_CMD_MAC_FAULT_PENDING_RECONFIG_OFST 24
 #define        MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3
 #define        MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1
 
 /*               MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
 #define       MC_CMD_GET_LINK_OUT_V2_FLAGS_OFST 16
 #define       MC_CMD_GET_LINK_OUT_V2_FLAGS_LEN 4
+#define        MC_CMD_GET_LINK_OUT_V2_LINK_UP_OFST 16
 #define        MC_CMD_GET_LINK_OUT_V2_LINK_UP_LBN 0
 #define        MC_CMD_GET_LINK_OUT_V2_LINK_UP_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_OFST 16
 #define        MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_LBN 1
 #define        MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_BPX_LINK_OFST 16
 #define        MC_CMD_GET_LINK_OUT_V2_BPX_LINK_LBN 2
 #define        MC_CMD_GET_LINK_OUT_V2_BPX_LINK_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_PHY_LINK_OFST 16
 #define        MC_CMD_GET_LINK_OUT_V2_PHY_LINK_LBN 3
 #define        MC_CMD_GET_LINK_OUT_V2_PHY_LINK_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_OFST 16
 #define        MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_LBN 6
 #define        MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_OFST 16
 #define        MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_LBN 7
 #define        MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_OFST 16
+#define        MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_LBN 8
+#define        MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_MODULE_UP_OFST 16
+#define        MC_CMD_GET_LINK_OUT_V2_MODULE_UP_LBN 9
+#define        MC_CMD_GET_LINK_OUT_V2_MODULE_UP_WIDTH 1
 /* This returns the negotiated flow control value. */
 #define       MC_CMD_GET_LINK_OUT_V2_FCNTL_OFST 20
 #define       MC_CMD_GET_LINK_OUT_V2_FCNTL_LEN 4
 /*               MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
 #define       MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_OFST 24
 #define       MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_LEN 4
+/*             MC_CMD_MAC_FAULT_XGMII_LOCAL_OFST 24 */
 /*             MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 */
 /*             MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 */
+/*             MC_CMD_MAC_FAULT_XGMII_REMOTE_OFST 24 */
 /*             MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 */
 /*             MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 */
+/*             MC_CMD_MAC_FAULT_SGMII_REMOTE_OFST 24 */
 /*             MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 */
 /*             MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 */
+/*             MC_CMD_MAC_FAULT_PENDING_RECONFIG_OFST 24 */
 /*             MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 */
 /*             MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 */
 /* True local device capabilities (taking into account currently used PMD/MDI,
 /*               FEC_TYPE/TYPE */
 #define       MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_OFST 40
 #define       MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_LEN 4
+#define        MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_OFST 40
 #define        MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_LBN 0
 #define        MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_PMD_READY_OFST 40
 #define        MC_CMD_GET_LINK_OUT_V2_PMD_READY_LBN 1
 #define        MC_CMD_GET_LINK_OUT_V2_PMD_READY_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_OFST 40
 #define        MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_LBN 2
 #define        MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_OFST 40
 #define        MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_LBN 3
 #define        MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_OFST 40
 #define        MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_LBN 4
 #define        MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_OFST 40
 #define        MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_LBN 5
 #define        MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_HI_BER_OFST 40
 #define        MC_CMD_GET_LINK_OUT_V2_HI_BER_LBN 6
 #define        MC_CMD_GET_LINK_OUT_V2_HI_BER_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_OFST 40
 #define        MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_LBN 7
 #define        MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_AN_DONE_OFST 40
 #define        MC_CMD_GET_LINK_OUT_V2_AN_DONE_LBN 8
 #define        MC_CMD_GET_LINK_OUT_V2_AN_DONE_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_PORT_SHUTDOWN_OFST 40
+#define        MC_CMD_GET_LINK_OUT_V2_PORT_SHUTDOWN_LBN 9
+#define        MC_CMD_GET_LINK_OUT_V2_PORT_SHUTDOWN_WIDTH 1
 
 
 /***********************************/
 /* MC_CMD_SET_LINK
  * Write the unified MAC/PHY link configuration. Locks required: None. Return
- * code: 0, EINVAL, ETIME
+ * code: 0, EINVAL, ETIME, EAGAIN
  */
 #define MC_CMD_SET_LINK 0x2a
+#undef MC_CMD_0x2a_PRIVILEGE_CTG
 
 #define MC_CMD_0x2a_PRIVILEGE_CTG SRIOV_CTG_LINK
 
 /* Flags */
 #define       MC_CMD_SET_LINK_IN_FLAGS_OFST 4
 #define       MC_CMD_SET_LINK_IN_FLAGS_LEN 4
+#define        MC_CMD_SET_LINK_IN_LOWPOWER_OFST 4
 #define        MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
 #define        MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
+#define        MC_CMD_SET_LINK_IN_POWEROFF_OFST 4
 #define        MC_CMD_SET_LINK_IN_POWEROFF_LBN 1
 #define        MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1
+#define        MC_CMD_SET_LINK_IN_TXDIS_OFST 4
 #define        MC_CMD_SET_LINK_IN_TXDIS_LBN 2
 #define        MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
+#define        MC_CMD_SET_LINK_IN_LINKDOWN_OFST 4
+#define        MC_CMD_SET_LINK_IN_LINKDOWN_LBN 3
+#define        MC_CMD_SET_LINK_IN_LINKDOWN_WIDTH 1
 /* Loopback mode. */
 #define       MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
 #define       MC_CMD_SET_LINK_IN_LOOPBACK_MODE_LEN 4
 #define       MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
 #define       MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_LEN 4
 
+/* MC_CMD_SET_LINK_IN_V2 msgrequest: Updated SET_LINK to include sequence
+ * number to ensure this SET_LINK command corresponds to the latest
+ * MODULECHANGE event.
+ */
+#define    MC_CMD_SET_LINK_IN_V2_LEN 17
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define       MC_CMD_SET_LINK_IN_V2_CAP_OFST 0
+#define       MC_CMD_SET_LINK_IN_V2_CAP_LEN 4
+/* Flags */
+#define       MC_CMD_SET_LINK_IN_V2_FLAGS_OFST 4
+#define       MC_CMD_SET_LINK_IN_V2_FLAGS_LEN 4
+#define        MC_CMD_SET_LINK_IN_V2_LOWPOWER_OFST 4
+#define        MC_CMD_SET_LINK_IN_V2_LOWPOWER_LBN 0
+#define        MC_CMD_SET_LINK_IN_V2_LOWPOWER_WIDTH 1
+#define        MC_CMD_SET_LINK_IN_V2_POWEROFF_OFST 4
+#define        MC_CMD_SET_LINK_IN_V2_POWEROFF_LBN 1
+#define        MC_CMD_SET_LINK_IN_V2_POWEROFF_WIDTH 1
+#define        MC_CMD_SET_LINK_IN_V2_TXDIS_OFST 4
+#define        MC_CMD_SET_LINK_IN_V2_TXDIS_LBN 2
+#define        MC_CMD_SET_LINK_IN_V2_TXDIS_WIDTH 1
+#define        MC_CMD_SET_LINK_IN_V2_LINKDOWN_OFST 4
+#define        MC_CMD_SET_LINK_IN_V2_LINKDOWN_LBN 3
+#define        MC_CMD_SET_LINK_IN_V2_LINKDOWN_WIDTH 1
+/* Loopback mode. */
+#define       MC_CMD_SET_LINK_IN_V2_LOOPBACK_MODE_OFST 8
+#define       MC_CMD_SET_LINK_IN_V2_LOOPBACK_MODE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+/* A loopback speed of "0" is supported, and means (choose any available
+ * speed).
+ */
+#define       MC_CMD_SET_LINK_IN_V2_LOOPBACK_SPEED_OFST 12
+#define       MC_CMD_SET_LINK_IN_V2_LOOPBACK_SPEED_LEN 4
+#define       MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_OFST 16
+#define       MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_LEN 1
+#define        MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_OFST 16
+#define        MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_LBN 0
+#define        MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_WIDTH 7
+#define        MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_OFST 16
+#define        MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_LBN 7
+#define        MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_WIDTH 1
+
 /* MC_CMD_SET_LINK_OUT msgresponse */
 #define    MC_CMD_SET_LINK_OUT_LEN 0
 
  * Set identification LED state. Locks required: None. Return code: 0, EINVAL
  */
 #define MC_CMD_SET_ID_LED 0x2b
+#undef MC_CMD_0x2b_PRIVILEGE_CTG
 
 #define MC_CMD_0x2b_PRIVILEGE_CTG SRIOV_CTG_LINK
 
  * Set MAC configuration. Locks required: None. Return code: 0, EINVAL
  */
 #define MC_CMD_SET_MAC 0x2c
+#undef MC_CMD_0x2c_PRIVILEGE_CTG
 
 #define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 #define       MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
 #define       MC_CMD_SET_MAC_IN_REJECT_OFST 16
 #define       MC_CMD_SET_MAC_IN_REJECT_LEN 4
+#define        MC_CMD_SET_MAC_IN_REJECT_UNCST_OFST 16
 #define        MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
 #define        MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
+#define        MC_CMD_SET_MAC_IN_REJECT_BRDCST_OFST 16
 #define        MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
 #define        MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
 #define       MC_CMD_SET_MAC_IN_FCNTL_OFST 20
 #define          MC_CMD_FCNTL_GENERATE 0x5
 #define       MC_CMD_SET_MAC_IN_FLAGS_OFST 24
 #define       MC_CMD_SET_MAC_IN_FLAGS_LEN 4
+#define        MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_OFST 24
 #define        MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
 #define        MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
 
 #define       MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12
 #define       MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16
 #define       MC_CMD_SET_MAC_EXT_IN_REJECT_LEN 4
+#define        MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_OFST 16
 #define        MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0
 #define        MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1
+#define        MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_OFST 16
 #define        MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1
 #define        MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1
 #define       MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20
 /*               MC_CMD_FCNTL_GENERATE 0x5 */
 #define       MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24
 #define       MC_CMD_SET_MAC_EXT_IN_FLAGS_LEN 4
+#define        MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_OFST 24
 #define        MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0
 #define        MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1
 /* Select which parameters to configure. A parameter will only be modified if
  */
 #define       MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28
 #define       MC_CMD_SET_MAC_EXT_IN_CONTROL_LEN 4
+#define        MC_CMD_SET_MAC_EXT_IN_CFG_MTU_OFST 28
 #define        MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0
 #define        MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1
+#define        MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_OFST 28
 #define        MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1
 #define        MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_WIDTH 1
+#define        MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_OFST 28
 #define        MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_LBN 2
 #define        MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_WIDTH 1
+#define        MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_OFST 28
 #define        MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_LBN 3
 #define        MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_WIDTH 1
+#define        MC_CMD_SET_MAC_EXT_IN_CFG_FCS_OFST 28
 #define        MC_CMD_SET_MAC_EXT_IN_CFG_FCS_LBN 4
 #define        MC_CMD_SET_MAC_EXT_IN_CFG_FCS_WIDTH 1
 
  * Returns: 0, ETIME
  */
 #define MC_CMD_PHY_STATS 0x2d
+#undef MC_CMD_0x2d_PRIVILEGE_CTG
 
 #define MC_CMD_0x2d_PRIVILEGE_CTG SRIOV_CTG_LINK
 
  * effect. Returns: 0, ETIME
  */
 #define MC_CMD_MAC_STATS 0x2e
+#undef MC_CMD_0x2e_PRIVILEGE_CTG
 
 #define MC_CMD_0x2e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 #define       MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
 #define       MC_CMD_MAC_STATS_IN_CMD_OFST 8
 #define       MC_CMD_MAC_STATS_IN_CMD_LEN 4
+#define        MC_CMD_MAC_STATS_IN_DMA_OFST 8
 #define        MC_CMD_MAC_STATS_IN_DMA_LBN 0
 #define        MC_CMD_MAC_STATS_IN_DMA_WIDTH 1
+#define        MC_CMD_MAC_STATS_IN_CLEAR_OFST 8
 #define        MC_CMD_MAC_STATS_IN_CLEAR_LBN 1
 #define        MC_CMD_MAC_STATS_IN_CLEAR_WIDTH 1
+#define        MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_OFST 8
 #define        MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_LBN 2
 #define        MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_WIDTH 1
+#define        MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_OFST 8
 #define        MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_LBN 3
 #define        MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_WIDTH 1
+#define        MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_OFST 8
 #define        MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_LBN 4
 #define        MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_WIDTH 1
+#define        MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_OFST 8
 #define        MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_LBN 5
 #define        MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1
+#define        MC_CMD_MAC_STATS_IN_PERIOD_MS_OFST 8
 #define        MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
 #define        MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
 /* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as
 /*            Other enum values, see field(s): */
 /*               MC_CMD_MAC_STATS_V2_OUT_NO_DMA/STATISTICS */
 
+/* MC_CMD_MAC_STATS_V4_OUT_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_V4_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V4_OUT_NO_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_V4_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V4*64))>>3)
+#define       MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_OFST 0
+#define       MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LEN 8
+#define       MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define       MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define       MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V4
+/* enum: Start of V4 stats buffer space */
+#define          MC_CMD_MAC_V4_DMABUF_START 0x79
+/* enum: RXDP counter: Number of packets truncated because scattering was
+ * disabled.
+ */
+#define          MC_CMD_MAC_RXDP_SCATTER_DISABLED_TRUNC 0x79
+/* enum: RXDP counter: Number of times the RXDP head of line blocked waiting
+ * for descriptors. Will be zero unless RXDP_HLB_IDLE capability is set.
+ */
+#define          MC_CMD_MAC_RXDP_HLB_IDLE 0x7a
+/* enum: RXDP counter: Number of times the RXDP timed out while head of line
+ * blocking. Will be zero unless RXDP_HLB_IDLE capability is set.
+ */
+#define          MC_CMD_MAC_RXDP_HLB_TIMEOUT 0x7b
+/* enum: This includes the space at offset 124 which is the final
+ * GENERATION_END in a MAC_STATS_V4 response and otherwise unused.
+ */
+#define          MC_CMD_MAC_NSTATS_V4 0x7d
+/*            Other enum values, see field(s): */
+/*               MC_CMD_MAC_STATS_V3_OUT_NO_DMA/STATISTICS */
+
 
 /***********************************/
 /* MC_CMD_SRIOV
 /* MC_CMD_MEMCPY_IN msgrequest */
 #define    MC_CMD_MEMCPY_IN_LENMIN 32
 #define    MC_CMD_MEMCPY_IN_LENMAX 224
+#define    MC_CMD_MEMCPY_IN_LENMAX_MCDI2 992
 #define    MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
+#define    MC_CMD_MEMCPY_IN_RECORD_NUM(len) (((len)-0)/32)
 /* see MC_CMD_MEMCPY_RECORD_TYPEDEF */
 #define       MC_CMD_MEMCPY_IN_RECORD_OFST 0
 #define       MC_CMD_MEMCPY_IN_RECORD_LEN 32
 #define       MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
 #define       MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7
+#define       MC_CMD_MEMCPY_IN_RECORD_MAXNUM_MCDI2 31
 
 /* MC_CMD_MEMCPY_OUT msgresponse */
 #define    MC_CMD_MEMCPY_OUT_LEN 0
  * Set a WoL filter.
  */
 #define MC_CMD_WOL_FILTER_SET 0x32
+#undef MC_CMD_0x32_PRIVILEGE_CTG
 
 #define MC_CMD_0x32_PRIVILEGE_CTG SRIOV_CTG_LINK
 
 /*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
 #define       MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8
 #define       MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_LEN 4
+#define        MC_CMD_WOL_FILTER_SET_IN_LINK_UP_OFST 8
 #define        MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
 #define        MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
+#define        MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_OFST 8
 #define        MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
 #define        MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1
 
  * Remove a WoL filter. Locks required: None. Returns: 0, EINVAL, ENOSYS
  */
 #define MC_CMD_WOL_FILTER_REMOVE 0x33
+#undef MC_CMD_0x33_PRIVILEGE_CTG
 
 #define MC_CMD_0x33_PRIVILEGE_CTG SRIOV_CTG_LINK
 
  * ENOSYS
  */
 #define MC_CMD_WOL_FILTER_RESET 0x34
+#undef MC_CMD_0x34_PRIVILEGE_CTG
 
 #define MC_CMD_0x34_PRIVILEGE_CTG SRIOV_CTG_LINK
 
  * Locks required: none. Returns: 0
  */
 #define MC_CMD_NVRAM_TYPES 0x36
+#undef MC_CMD_0x36_PRIVILEGE_CTG
 
 #define MC_CMD_0x36_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
  * EINVAL (bad type).
  */
 #define MC_CMD_NVRAM_INFO 0x37
+#undef MC_CMD_0x37_PRIVILEGE_CTG
 
 #define MC_CMD_0x37_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 #define       MC_CMD_NVRAM_INFO_OUT_ERASESIZE_LEN 4
 #define       MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
 #define       MC_CMD_NVRAM_INFO_OUT_FLAGS_LEN 4
+#define        MC_CMD_NVRAM_INFO_OUT_PROTECTED_OFST 12
 #define        MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
 #define        MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_OUT_TLV_OFST 12
 #define        MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
 #define        MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_OFST 12
 #define        MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2
 #define        MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_OUT_CRC_OFST 12
+#define        MC_CMD_NVRAM_INFO_OUT_CRC_LBN 3
+#define        MC_CMD_NVRAM_INFO_OUT_CRC_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_OUT_READ_ONLY_OFST 12
 #define        MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5
 #define        MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_OUT_CMAC_OFST 12
 #define        MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6
 #define        MC_CMD_NVRAM_INFO_OUT_CMAC_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_OUT_A_B_OFST 12
 #define        MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
 #define        MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
 #define       MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
 #define       MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_LEN 4
 #define       MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12
 #define       MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_LEN 4
+#define        MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_OFST 12
 #define        MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0
 #define        MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_V2_OUT_TLV_OFST 12
 #define        MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1
 #define        MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_OFST 12
 #define        MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2
 #define        MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_OFST 12
 #define        MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5
 #define        MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_V2_OUT_A_B_OFST 12
 #define        MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
 #define        MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
 #define       MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
  * EPERM.
  */
 #define MC_CMD_NVRAM_UPDATE_START 0x38
+#undef MC_CMD_0x38_PRIVILEGE_CTG
 
 #define MC_CMD_0x38_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
 #define       MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4
 #define       MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_LEN 4
+#define        MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_OFST 4
 #define        MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
 #define        MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
 
  * PHY_LOCK required and not held)
  */
 #define MC_CMD_NVRAM_READ 0x39
+#undef MC_CMD_0x39_PRIVILEGE_CTG
 
 #define MC_CMD_0x39_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_NVRAM_READ_OUT msgresponse */
 #define    MC_CMD_NVRAM_READ_OUT_LENMIN 1
 #define    MC_CMD_NVRAM_READ_OUT_LENMAX 252
+#define    MC_CMD_NVRAM_READ_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num))
+#define    MC_CMD_NVRAM_READ_OUT_READ_BUFFER_NUM(len) (((len)-0)/1)
 #define       MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0
 #define       MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1
 #define       MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1
 #define       MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 252
+#define       MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM_MCDI2 1020
 
 
 /***********************************/
  * PHY_LOCK required and not held)
  */
 #define MC_CMD_NVRAM_WRITE 0x3a
+#undef MC_CMD_0x3a_PRIVILEGE_CTG
 
 #define MC_CMD_0x3a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_NVRAM_WRITE_IN msgrequest */
 #define    MC_CMD_NVRAM_WRITE_IN_LENMIN 13
 #define    MC_CMD_NVRAM_WRITE_IN_LENMAX 252
+#define    MC_CMD_NVRAM_WRITE_IN_LENMAX_MCDI2 1020
 #define    MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
+#define    MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_NUM(len) (((len)-12)/1)
 #define       MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
 #define       MC_CMD_NVRAM_WRITE_IN_TYPE_LEN 4
 /*            Enum values, see field(s): */
 #define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
 #define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
 #define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 240
+#define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM_MCDI2 1008
 
 /* MC_CMD_NVRAM_WRITE_OUT msgresponse */
 #define    MC_CMD_NVRAM_WRITE_OUT_LEN 0
  * PHY_LOCK required and not held)
  */
 #define MC_CMD_NVRAM_ERASE 0x3b
+#undef MC_CMD_0x3b_PRIVILEGE_CTG
 
 #define MC_CMD_0x3b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
  * the error EPERM.
  */
 #define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
+#undef MC_CMD_0x3c_PRIVILEGE_CTG
 
 #define MC_CMD_0x3c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 #define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_LEN 4
 #define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8
 #define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_LEN 4
+#define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_OFST 8
 #define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
 #define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_OFST 8
+#define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_LBN 1
+#define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_WIDTH 1
+#define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_OFST 8
+#define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_LBN 2
+#define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_WIDTH 1
 
 /* MC_CMD_NVRAM_UPDATE_FINISH_OUT msgresponse: Legacy NVRAM_UPDATE_FINISH
  * response. Use NVRAM_UPDATE_FINISH_V2_OUT in new code
  * has completed.
  */
 #define    MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4
-/* Result of nvram update completion processing */
+/* Result of nvram update completion processing. Result codes that indicate an
+ * internal build failure and therefore not expected to be seen by customers in
+ * the field are marked with a prefix 'Internal-error'.
+ */
 #define       MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0
 #define       MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_LEN 4
 /* enum: Invalid return code; only non-zero values are defined. Defined as
 #define          MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc
 /* enum: The image has a lower security level than the current firmware. */
 #define          MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd
+/* enum: Internal-error. The signed image is missing the 'contents' section,
+ * where the 'contents' section holds the actual image payload to be applied.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_CONTENT_NOT_FOUND 0xe
+/* enum: Internal-error. The bundle header is invalid. */
+#define          MC_CMD_NVRAM_VERIFY_RC_BUNDLE_CONTENT_HEADER_INVALID 0xf
+/* enum: Internal-error. The bundle does not have a valid reflash image layout.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_BUNDLE_REFLASH_IMAGE_INVALID 0x10
+/* enum: Internal-error. The bundle has an inconsistent layout of components or
+ * incorrect checksum.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_BUNDLE_IMAGE_LAYOUT_INVALID 0x11
+/* enum: Internal-error. The bundle manifest is inconsistent with components in
+ * the bundle.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_INVALID 0x12
+/* enum: Internal-error. The number of components in a bundle do not match the
+ * number of components advertised by the bundle manifest.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_NUM_COMPONENTS_MISMATCH 0x13
+/* enum: Internal-error. The bundle contains too many components for the MC
+ * firmware to process
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_TOO_MANY_COMPONENTS 0x14
+/* enum: Internal-error. The bundle manifest has an invalid/inconsistent
+ * component.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_INVALID 0x15
+/* enum: Internal-error. The hash of a component does not match the hash stored
+ * in the bundle manifest.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_MISMATCH 0x16
+/* enum: Internal-error. Component hash calculation failed. */
+#define          MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_FAILED 0x17
+/* enum: Internal-error. The component does not have a valid reflash image
+ * layout.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_REFLASH_IMAGE_INVALID 0x18
+/* enum: The bundle processing code failed to copy a component to its target
+ * partition.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_COPY_FAILED 0x19
+/* enum: The update operation is in-progress. */
+#define          MC_CMD_NVRAM_VERIFY_RC_PENDING 0x1a
 
 
 /***********************************/
  * DATALEN=0
  */
 #define MC_CMD_REBOOT 0x3d
+#undef MC_CMD_0x3d_PRIVILEGE_CTG
 
 #define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
  * thread address.
  */
 #define MC_CMD_SCHEDINFO 0x3e
+#undef MC_CMD_0x3e_PRIVILEGE_CTG
 
 #define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_SCHEDINFO_OUT msgresponse */
 #define    MC_CMD_SCHEDINFO_OUT_LENMIN 4
 #define    MC_CMD_SCHEDINFO_OUT_LENMAX 252
+#define    MC_CMD_SCHEDINFO_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num))
+#define    MC_CMD_SCHEDINFO_OUT_DATA_NUM(len) (((len)-0)/4)
 #define       MC_CMD_SCHEDINFO_OUT_DATA_OFST 0
 #define       MC_CMD_SCHEDINFO_OUT_DATA_LEN 4
 #define       MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1
 #define       MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63
+#define       MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM_MCDI2 255
 
 
 /***********************************/
  * mode to the specified value. Returns the old mode.
  */
 #define MC_CMD_REBOOT_MODE 0x3f
+#undef MC_CMD_0x3f_PRIVILEGE_CTG
 
 #define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 #define          MC_CMD_REBOOT_MODE_SNAPPER 0x3
 /* enum: snapper fake POR */
 #define          MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4
+#define        MC_CMD_REBOOT_MODE_IN_FAKE_OFST 0
 #define        MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7
 #define        MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1
 
  * Locks required: None Returns: 0
  */
 #define MC_CMD_SENSOR_INFO 0x41
+#undef MC_CMD_0x41_PRIVILEGE_CTG
 
 #define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 #define       MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
 #define       MC_CMD_SENSOR_INFO_EXT_IN_PAGE_LEN 4
 
+/* MC_CMD_SENSOR_INFO_EXT_IN_V2 msgrequest */
+#define    MC_CMD_SENSOR_INFO_EXT_IN_V2_LEN 8
+/* Which page of sensors to report.
+ *
+ * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit).
+ *
+ * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
+ */
+#define       MC_CMD_SENSOR_INFO_EXT_IN_V2_PAGE_OFST 0
+#define       MC_CMD_SENSOR_INFO_EXT_IN_V2_PAGE_LEN 4
+/* Flags controlling information retrieved */
+#define       MC_CMD_SENSOR_INFO_EXT_IN_V2_FLAGS_OFST 4
+#define       MC_CMD_SENSOR_INFO_EXT_IN_V2_FLAGS_LEN 4
+#define        MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_OFST 4
+#define        MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_LBN 0
+#define        MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_WIDTH 1
+
 /* MC_CMD_SENSOR_INFO_OUT msgresponse */
 #define    MC_CMD_SENSOR_INFO_OUT_LENMIN 4
 #define    MC_CMD_SENSOR_INFO_OUT_LENMAX 252
+#define    MC_CMD_SENSOR_INFO_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
+#define    MC_CMD_SENSOR_INFO_OUT_MC_CMD_SENSOR_ENTRY_NUM(len) (((len)-4)/8)
 #define       MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
 #define       MC_CMD_SENSOR_INFO_OUT_MASK_LEN 4
 /* enum: Controller temperature: degC */
 #define          MC_CMD_SENSOR_IN_1V3 0x55
 /* enum: 1.3v power current: mA */
 #define          MC_CMD_SENSOR_IN_I1V3 0x56
+/* enum: Engineering sensor 1 */
+#define          MC_CMD_SENSOR_ENGINEERING_1 0x57
+/* enum: Engineering sensor 2 */
+#define          MC_CMD_SENSOR_ENGINEERING_2 0x58
+/* enum: Engineering sensor 3 */
+#define          MC_CMD_SENSOR_ENGINEERING_3 0x59
+/* enum: Engineering sensor 4 */
+#define          MC_CMD_SENSOR_ENGINEERING_4 0x5a
+/* enum: Engineering sensor 5 */
+#define          MC_CMD_SENSOR_ENGINEERING_5 0x5b
+/* enum: Engineering sensor 6 */
+#define          MC_CMD_SENSOR_ENGINEERING_6 0x5c
+/* enum: Engineering sensor 7 */
+#define          MC_CMD_SENSOR_ENGINEERING_7 0x5d
+/* enum: Engineering sensor 8 */
+#define          MC_CMD_SENSOR_ENGINEERING_8 0x5e
 /* enum: Not a sensor: reserved for the next page flag */
 #define          MC_CMD_SENSOR_PAGE2_NEXT 0x5f
 /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
 #define       MC_CMD_SENSOR_ENTRY_HI_OFST 8
 #define       MC_CMD_SENSOR_ENTRY_MINNUM 0
 #define       MC_CMD_SENSOR_ENTRY_MAXNUM 31
+#define       MC_CMD_SENSOR_ENTRY_MAXNUM_MCDI2 127
 
 /* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */
 #define    MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 4
 #define    MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
+#define    MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
+#define    MC_CMD_SENSOR_INFO_EXT_OUT_MC_CMD_SENSOR_ENTRY_NUM(len) (((len)-4)/8)
 #define       MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
 #define       MC_CMD_SENSOR_INFO_EXT_OUT_MASK_LEN 4
 /*            Enum values, see field(s): */
 /*               MC_CMD_SENSOR_INFO_OUT */
+#define        MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_OFST 0
 #define        MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31
 #define        MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_WIDTH 1
 /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
 /*            MC_CMD_SENSOR_ENTRY_HI_OFST 8 */
 /*            MC_CMD_SENSOR_ENTRY_MINNUM 0 */
 /*            MC_CMD_SENSOR_ENTRY_MAXNUM 31 */
+/*            MC_CMD_SENSOR_ENTRY_MAXNUM_MCDI2 127 */
 
 /* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
 #define    MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8
  * STATE_WARNING. Otherwise the board should not be expected to function.
  */
 #define MC_CMD_READ_SENSORS 0x42
+#undef MC_CMD_0x42_PRIVILEGE_CTG
 
 #define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* MC_CMD_READ_SENSORS_IN msgrequest */
 #define    MC_CMD_READ_SENSORS_IN_LEN 8
-/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned).
+ *
+ * If the address is 0xffffffffffffffff send the readings in the response (used
+ * by cmdclient).
+ */
 #define       MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0
 #define       MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8
 #define       MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
 
 /* MC_CMD_READ_SENSORS_EXT_IN msgrequest */
 #define    MC_CMD_READ_SENSORS_EXT_IN_LEN 12
-/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned).
+ *
+ * If the address is 0xffffffffffffffff send the readings in the response (used
+ * by cmdclient).
+ */
 #define       MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0
 #define       MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8
 #define       MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0
 #define       MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
 #define       MC_CMD_READ_SENSORS_EXT_IN_LENGTH_LEN 4
 
+/* MC_CMD_READ_SENSORS_EXT_IN_V2 msgrequest */
+#define    MC_CMD_READ_SENSORS_EXT_IN_V2_LEN 16
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned).
+ *
+ * If the address is 0xffffffffffffffff send the readings in the response (used
+ * by cmdclient).
+ */
+#define       MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_OFST 0
+#define       MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LEN 8
+#define       MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LO_OFST 0
+#define       MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_HI_OFST 4
+/* Size in bytes of host buffer. */
+#define       MC_CMD_READ_SENSORS_EXT_IN_V2_LENGTH_OFST 8
+#define       MC_CMD_READ_SENSORS_EXT_IN_V2_LENGTH_LEN 4
+/* Flags controlling information retrieved */
+#define       MC_CMD_READ_SENSORS_EXT_IN_V2_FLAGS_OFST 12
+#define       MC_CMD_READ_SENSORS_EXT_IN_V2_FLAGS_LEN 4
+#define        MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_OFST 12
+#define        MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_LBN 0
+#define        MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_WIDTH 1
+
 /* MC_CMD_READ_SENSORS_OUT msgresponse */
 #define    MC_CMD_READ_SENSORS_OUT_LEN 0
 
  * code: 0
  */
 #define MC_CMD_GET_PHY_STATE 0x43
+#undef MC_CMD_0x43_PRIVILEGE_CTG
 
 #define MC_CMD_0x43_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS
  */
 #define MC_CMD_WOL_FILTER_GET 0x45
+#undef MC_CMD_0x45_PRIVILEGE_CTG
 
 #define MC_CMD_0x45_PRIVILEGE_CTG SRIOV_CTG_LINK
 
  * Returns: 0, ENOSYS
  */
 #define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
+#undef MC_CMD_0x46_PRIVILEGE_CTG
 
 #define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK
 
 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
+#define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX_MCDI2 1020
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
+#define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_NUM(len) (((len)-4)/4)
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
 #define          MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1
 #define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM_MCDI2 254
 
 /* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
 #define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
  * None. Returns: 0, ENOSYS
  */
 #define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
+#undef MC_CMD_0x47_PRIVILEGE_CTG
 
 #define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK
 
  * required: None Returns: 0
  */
 #define MC_CMD_TESTASSERT 0x49
+#undef MC_CMD_0x49_PRIVILEGE_CTG
 
 #define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
  * basis. Locks required: None. Returns: 0, EINVAL .
  */
 #define MC_CMD_WORKAROUND 0x4a
+#undef MC_CMD_0x4a_PRIVILEGE_CTG
 
 #define MC_CMD_0x4a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 #define    MC_CMD_WORKAROUND_EXT_OUT_LEN 4
 #define       MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0
 #define       MC_CMD_WORKAROUND_EXT_OUT_FLAGS_LEN 4
+#define        MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_OFST 0
 #define        MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0
 #define        MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1
 
  * Anything else: currently undefined. Locks required: None. Return code: 0.
  */
 #define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
+#undef MC_CMD_0x4b_PRIVILEGE_CTG
 
 #define MC_CMD_0x4b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
 #define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
 #define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
+#define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
+#define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_NUM(len) (((len)-4)/1)
 /* in bytes */
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_LEN 4
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
 #define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 248
+#define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM_MCDI2 1016
 
 
 /***********************************/
  * on the type of partition).
  */
 #define MC_CMD_NVRAM_TEST 0x4c
+#undef MC_CMD_0x4c_PRIVILEGE_CTG
 
 #define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
  * of range.
  */
 #define MC_CMD_SENSOR_SET_LIMS 0x4e
+#undef MC_CMD_0x4e_PRIVILEGE_CTG
 
 #define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
  * none. Returns: 0, EINVAL (bad type).
  */
 #define MC_CMD_NVRAM_PARTITIONS 0x51
+#undef MC_CMD_0x51_PRIVILEGE_CTG
 
 #define MC_CMD_0x51_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_NVRAM_PARTITIONS_OUT msgresponse */
 #define    MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN 4
 #define    MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX 252
+#define    MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num))
+#define    MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_NUM(len) (((len)-4)/4)
 /* total number of partitions */
 #define       MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0
 #define       MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_LEN 4
 #define       MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4
 #define       MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MINNUM 0
 #define       MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM 62
+#define       MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM_MCDI2 254
 
 
 /***********************************/
  * none. Returns: 0, EINVAL (bad type).
  */
 #define MC_CMD_NVRAM_METADATA 0x52
+#undef MC_CMD_0x52_PRIVILEGE_CTG
 
 #define MC_CMD_0x52_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_NVRAM_METADATA_OUT msgresponse */
 #define    MC_CMD_NVRAM_METADATA_OUT_LENMIN 20
 #define    MC_CMD_NVRAM_METADATA_OUT_LENMAX 252
+#define    MC_CMD_NVRAM_METADATA_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num))
+#define    MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(len) (((len)-20)/1)
 /* Partition type ID code */
 #define       MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0
 #define       MC_CMD_NVRAM_METADATA_OUT_TYPE_LEN 4
 #define       MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4
 #define       MC_CMD_NVRAM_METADATA_OUT_FLAGS_LEN 4
+#define        MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_OFST 4
 #define        MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0
 #define        MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1
+#define        MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_OFST 4
 #define        MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1
 #define        MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_WIDTH 1
+#define        MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_OFST 4
 #define        MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN 2
 #define        MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1
 /* Subtype ID code for content of this partition */
 #define       MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_LEN 1
 #define       MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MINNUM 0
 #define       MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM 232
+#define       MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM_MCDI2 1000
 
 
 /***********************************/
  * Returns the base MAC, count and stride for the requesting function
  */
 #define MC_CMD_GET_MAC_ADDRESSES 0x55
+#undef MC_CMD_0x55_PRIVILEGE_CTG
 
 #define MC_CMD_0x55_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 
 /***********************************/
 /* MC_CMD_CLP
- * Perform a CLP related operation
+ * Perform a CLP related operation, see SF-110495-PS for details of CLP
+ * processing. This command has been extended to accomodate the requirements of
+ * different manufacturers which are to be found in SF-119187-TC, SF-119186-TC,
+ * SF-120509-TC and SF-117282-PS.
  */
 #define MC_CMD_CLP 0x56
+#undef MC_CMD_0x56_PRIVILEGE_CTG
 
 #define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 #define    MC_CMD_CLP_IN_SET_MAC_LEN 12
 /*            MC_CMD_CLP_IN_OP_OFST 0 */
 /*            MC_CMD_CLP_IN_OP_LEN 4 */
-/* MAC address assigned to port */
+/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00
+ * restores the permanent (factory-programmed) MAC address associated with the
+ * port. A non-zero MAC address persists until a PCIe reset or a power cycle.
+ */
 #define       MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
 #define       MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
 /* Padding */
 /* MC_CMD_CLP_OUT_SET_MAC msgresponse */
 #define    MC_CMD_CLP_OUT_SET_MAC_LEN 0
 
+/* MC_CMD_CLP_IN_SET_MAC_V2 msgrequest */
+#define    MC_CMD_CLP_IN_SET_MAC_V2_LEN 16
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
+/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00
+ * restores the permanent (factory-programmed) MAC address associated with the
+ * port. A non-zero MAC address persists until a PCIe reset or a power cycle.
+ */
+#define       MC_CMD_CLP_IN_SET_MAC_V2_ADDR_OFST 4
+#define       MC_CMD_CLP_IN_SET_MAC_V2_ADDR_LEN 6
+/* Padding */
+#define       MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_OFST 10
+#define       MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_LEN 2
+#define       MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_OFST 12
+#define       MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_LEN 4
+#define        MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_OFST 12
+#define        MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_LBN 0
+#define        MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_WIDTH 1
+
 /* MC_CMD_CLP_IN_GET_MAC msgrequest */
 #define    MC_CMD_CLP_IN_GET_MAC_LEN 4
 /*            MC_CMD_CLP_IN_OP_OFST 0 */
 /*            MC_CMD_CLP_IN_OP_LEN 4 */
 
+/* MC_CMD_CLP_IN_GET_MAC_V2 msgrequest */
+#define    MC_CMD_CLP_IN_GET_MAC_V2_LEN 8
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
+#define       MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_OFST 4
+#define       MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_LEN 4
+#define        MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_OFST 4
+#define        MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_LBN 0
+#define        MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_WIDTH 1
+
 /* MC_CMD_CLP_OUT_GET_MAC msgresponse */
 #define    MC_CMD_CLP_OUT_GET_MAC_LEN 8
 /* MAC address assigned to port */
  * Perform a MUM operation
  */
 #define MC_CMD_MUM 0x57
+#undef MC_CMD_0x57_PRIVILEGE_CTG
 
 #define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 #define    MC_CMD_MUM_IN_LEN 4
 #define       MC_CMD_MUM_IN_OP_HDR_OFST 0
 #define       MC_CMD_MUM_IN_OP_HDR_LEN 4
+#define        MC_CMD_MUM_IN_OP_OFST 0
 #define        MC_CMD_MUM_IN_OP_LBN 0
 #define        MC_CMD_MUM_IN_OP_WIDTH 8
 /* enum: NULL MCDI command to MUM */
 /* MC_CMD_MUM_IN_WRITE msgrequest */
 #define    MC_CMD_MUM_IN_WRITE_LENMIN 16
 #define    MC_CMD_MUM_IN_WRITE_LENMAX 252
+#define    MC_CMD_MUM_IN_WRITE_LENMAX_MCDI2 1020
 #define    MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
+#define    MC_CMD_MUM_IN_WRITE_BUFFER_NUM(len) (((len)-12)/4)
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
 /*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
 #define       MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1
 #define       MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60
+#define       MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM_MCDI2 252
 
 /* MC_CMD_MUM_IN_RAW_CMD msgrequest */
 #define    MC_CMD_MUM_IN_RAW_CMD_LENMIN 17
 #define    MC_CMD_MUM_IN_RAW_CMD_LENMAX 252
+#define    MC_CMD_MUM_IN_RAW_CMD_LENMAX_MCDI2 1020
 #define    MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
+#define    MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_NUM(len) (((len)-16)/1)
 /* MUM cmd header */
 /*            MC_CMD_MUM_IN_CMD_OFST 0 */
 /*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
 #define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1
 #define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236
+#define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM_MCDI2 1004
 
 /* MC_CMD_MUM_IN_LOG msgrequest */
 #define    MC_CMD_MUM_IN_LOG_LEN 8
 /*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_HDR_OFST 4
 #define       MC_CMD_MUM_IN_GPIO_HDR_LEN 4
+#define        MC_CMD_MUM_IN_GPIO_OPCODE_OFST 4
 #define        MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
 #define        MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
 #define          MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
 /*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
 #define       MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4
+#define        MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_OFST 4
 #define        MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
 #define        MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
 #define          MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
 #define          MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */
 #define          MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */
 #define          MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */
+#define        MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_OFST 4
 #define        MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16
 #define        MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8
 
 /*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
 #define       MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_OFST 4
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
 
 /*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
 #define       MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_OFST 4
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
 
 /*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
 #define       MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_OFST 4
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
 #define        MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
 
 /*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
 #define       MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4
+#define        MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_OFST 4
 #define        MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
 #define        MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
+#define        MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_OFST 4
 #define        MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
 #define        MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8
 
 /* Control flags for clock programming */
 #define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
 #define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4
+#define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_OFST 8
 #define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
 #define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
+#define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_OFST 8
 #define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
 #define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1
+#define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_OFST 8
 #define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2
 #define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1
 
 /*            MC_CMD_MUM_IN_CMD_LEN 4 */
 #define       MC_CMD_MUM_IN_QSFP_HDR_OFST 4
 #define       MC_CMD_MUM_IN_QSFP_HDR_LEN 4
+#define        MC_CMD_MUM_IN_QSFP_OPCODE_OFST 4
 #define        MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
 #define        MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
 #define          MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
 /* MC_CMD_MUM_OUT_RAW_CMD msgresponse */
 #define    MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1
 #define    MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252
+#define    MC_CMD_MUM_OUT_RAW_CMD_LENMAX_MCDI2 1020
 #define    MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num))
+#define    MC_CMD_MUM_OUT_RAW_CMD_DATA_NUM(len) (((len)-0)/1)
 /* returned data */
 #define       MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0
 #define       MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1
 #define       MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1
 #define       MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252
+#define       MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM_MCDI2 1020
 
 /* MC_CMD_MUM_OUT_READ msgresponse */
 #define    MC_CMD_MUM_OUT_READ_LENMIN 4
 #define    MC_CMD_MUM_OUT_READ_LENMAX 252
+#define    MC_CMD_MUM_OUT_READ_LENMAX_MCDI2 1020
 #define    MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num))
+#define    MC_CMD_MUM_OUT_READ_BUFFER_NUM(len) (((len)-0)/4)
 #define       MC_CMD_MUM_OUT_READ_BUFFER_OFST 0
 #define       MC_CMD_MUM_OUT_READ_BUFFER_LEN 4
 #define       MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1
 #define       MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63
+#define       MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM_MCDI2 255
 
 /* MC_CMD_MUM_OUT_WRITE msgresponse */
 #define    MC_CMD_MUM_OUT_WRITE_LEN 0
 /* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */
 #define    MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4
 #define    MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252
+#define    MC_CMD_MUM_OUT_READ_SENSORS_LENMAX_MCDI2 1020
 #define    MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num))
+#define    MC_CMD_MUM_OUT_READ_SENSORS_DATA_NUM(len) (((len)-0)/4)
 #define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0
 #define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4
 #define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1
 #define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63
+#define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM_MCDI2 255
+#define        MC_CMD_MUM_OUT_READ_SENSORS_READING_OFST 0
 #define        MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0
 #define        MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16
+#define        MC_CMD_MUM_OUT_READ_SENSORS_STATE_OFST 0
 #define        MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16
 #define        MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8
+#define        MC_CMD_MUM_OUT_READ_SENSORS_TYPE_OFST 0
 #define        MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24
 #define        MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8
 
 #define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4
 #define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
 #define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4
+#define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_OFST 4
 #define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
 #define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
+#define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_OFST 4
 #define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
 #define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1
 
 /* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
 #define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252
+#define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX_MCDI2 1020
 #define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
+#define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_NUM(len) (((len)-4)/1)
 /* in bytes */
 #define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
 #define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4
 #define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
 #define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
 #define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM_MCDI2 1016
 
 /* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
 #define    MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
 /* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
 #define    MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
 #define    MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248
+#define    MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX_MCDI2 1016
 #define    MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
+#define    MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_NUM(len) (((len)-8)/8)
 /* Discrete (soldered) DDR resistor strap info */
 #define       MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
 #define       MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_OFST 0
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_OFST 0
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
 /* Number of SODIMM info records */
 #define       MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12
 #define       MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2
 #define       MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30
+#define       MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM_MCDI2 126
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_OFST 8
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8
 /* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */
 #define          MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1
 /* enum: Total number of SODIMM banks */
 #define          MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_OFST 8
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_OFST 8
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_OFST 8
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4
 #define          MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */
 #define          MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */
 /* enum: Values 5-15 are reserved for future usage */
 #define          MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_OFST 8
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_OFST 8
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_OFST 8
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4
 /* enum: No module present */
 /* enum: Modules may or may not be present, but cannot establish contact by I2C
  */
 #define          MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_OFST 8
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52
 #define        MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12
 
-/* MC_CMD_RESOURCE_SPECIFIER enum */
-/* enum: Any */
-#define          MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
-/* enum: None */
-#define          MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe
+/* MC_CMD_DYNAMIC_SENSORS_LIMITS structuredef: Set of sensor limits. This
+ * should match the equivalent structure in the sensor_query SPHINX service.
+ */
+#define    MC_CMD_DYNAMIC_SENSORS_LIMITS_LEN 24
+/* A value below this will trigger a warning event. */
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_OFST 0
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_LEN 4
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_LBN 0
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_WIDTH 32
+/* A value below this will trigger a critical event. */
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_OFST 4
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_LEN 4
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_LBN 32
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_WIDTH 32
+/* A value below this will shut down the card. */
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_OFST 8
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_LEN 4
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_LBN 64
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_WIDTH 32
+/* A value above this will trigger a warning event. */
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_OFST 12
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_LEN 4
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_LBN 96
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_WIDTH 32
+/* A value above this will trigger a critical event. */
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_OFST 16
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_LEN 4
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_LBN 128
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_WIDTH 32
+/* A value above this will shut down the card. */
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_OFST 20
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_LEN 4
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_LBN 160
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_WIDTH 32
+
+/* MC_CMD_DYNAMIC_SENSORS_DESCRIPTION structuredef: Description of a sensor.
+ * This should match the equivalent structure in the sensor_query SPHINX
+ * service.
+ */
+#define    MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LEN 64
+/* The handle used to identify the sensor in calls to
+ * MC_CMD_DYNAMIC_SENSORS_GET_VALUES
+ */
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_OFST 0
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_LEN 4
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_LBN 0
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_WIDTH 32
+/* A human-readable name for the sensor (zero terminated string, max 32 bytes)
+ */
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_OFST 4
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_LEN 32
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_LBN 32
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_WIDTH 256
+/* The type of the sensor device, and by implication the unit of that the
+ * values will be reported in
+ */
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_OFST 36
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_LEN 4
+/* enum: A voltage sensor. Unit is mV */
+#define          MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_VOLTAGE 0x0
+/* enum: A current sensor. Unit is mA */
+#define          MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_CURRENT 0x1
+/* enum: A power sensor. Unit is mW */
+#define          MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_POWER 0x2
+/* enum: A temperature sensor. Unit is Celsius */
+#define          MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TEMPERATURE 0x3
+/* enum: A cooling fan sensor. Unit is RPM */
+#define          MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_FAN 0x4
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_LBN 288
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_WIDTH 32
+/* A single MC_CMD_DYNAMIC_SENSORS_LIMITS structure */
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_OFST 40
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_LEN 24
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_LBN 320
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_WIDTH 192
+
+/* MC_CMD_DYNAMIC_SENSORS_READING structuredef: State and value of a sensor.
+ * This should match the equivalent structure in the sensor_query SPHINX
+ * service.
+ */
+#define    MC_CMD_DYNAMIC_SENSORS_READING_LEN 12
+/* The handle used to identify the sensor */
+#define       MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_OFST 0
+#define       MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_LEN 4
+#define       MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_LBN 0
+#define       MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_WIDTH 32
+/* The current value of the sensor */
+#define       MC_CMD_DYNAMIC_SENSORS_READING_VALUE_OFST 4
+#define       MC_CMD_DYNAMIC_SENSORS_READING_VALUE_LEN 4
+#define       MC_CMD_DYNAMIC_SENSORS_READING_VALUE_LBN 32
+#define       MC_CMD_DYNAMIC_SENSORS_READING_VALUE_WIDTH 32
+/* The sensor's condition, e.g. good, broken or removed */
+#define       MC_CMD_DYNAMIC_SENSORS_READING_STATE_OFST 8
+#define       MC_CMD_DYNAMIC_SENSORS_READING_STATE_LEN 4
+/* enum: Sensor working normally within limits */
+#define          MC_CMD_DYNAMIC_SENSORS_READING_OK 0x0
+/* enum: Warning threshold breached */
+#define          MC_CMD_DYNAMIC_SENSORS_READING_WARNING 0x1
+/* enum: Critical threshold breached */
+#define          MC_CMD_DYNAMIC_SENSORS_READING_CRITICAL 0x2
+/* enum: Fatal threshold breached */
+#define          MC_CMD_DYNAMIC_SENSORS_READING_FATAL 0x3
+/* enum: Sensor not working */
+#define          MC_CMD_DYNAMIC_SENSORS_READING_BROKEN 0x4
+/* enum: Sensor working but no reading available */
+#define          MC_CMD_DYNAMIC_SENSORS_READING_NO_READING 0x5
+/* enum: Sensor initialization failed */
+#define          MC_CMD_DYNAMIC_SENSORS_READING_INIT_FAILED 0x6
+#define       MC_CMD_DYNAMIC_SENSORS_READING_STATE_LBN 64
+#define       MC_CMD_DYNAMIC_SENSORS_READING_STATE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_DYNAMIC_SENSORS_LIST
+ * Return a complete list of handles for sensors currently managed by the MC,
+ * and a generation count for this version of the sensor table. On systems
+ * advertising the DYNAMIC_SENSORS capability bit, this replaces the
+ * MC_CMD_READ_SENSORS command. On multi-MC systems this may include sensors
+ * added by the NMC.
+ *
+ * Sensor handles are persistent for the lifetime of the sensor and are used to
+ * identify sensors in MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS and
+ * MC_CMD_DYNAMIC_SENSORS_GET_VALUES.
+ *
+ * The generation count is maintained by the MC, is persistent across reboots
+ * and will be incremented each time the sensor table is modified. When the
+ * table is modified, a CODE_DYNAMIC_SENSORS_CHANGE event will be generated
+ * containing the new generation count. The driver should compare this against
+ * the current generation count, and if it is different, call
+ * MC_CMD_DYNAMIC_SENSORS_LIST again to update it's copy of the sensor table.
+ *
+ * The sensor count is provided to allow a future path to supporting more than
+ * MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM_MCDI2 sensors, i.e.
+ * the maximum number that will fit in a single response. As this is a fairly
+ * large number (253) it is not anticipated that this will be needed in the
+ * near future, so can currently be ignored.
+ *
+ * On Riverhead this command is implemented as a a wrapper for `list` in the
+ * sensor_query SPHINX service.
+ */
+#define MC_CMD_DYNAMIC_SENSORS_LIST 0x66
+#undef MC_CMD_0x66_PRIVILEGE_CTG
+
+#define MC_CMD_0x66_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DYNAMIC_SENSORS_LIST_IN msgrequest */
+#define    MC_CMD_DYNAMIC_SENSORS_LIST_IN_LEN 0
+
+/* MC_CMD_DYNAMIC_SENSORS_LIST_OUT msgresponse */
+#define    MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMIN 8
+#define    MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMAX 252
+#define    MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LEN(num) (8+4*(num))
+#define    MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_NUM(len) (((len)-8)/4)
+/* Generation count, which will be updated each time a sensor is added to or
+ * removed from the MC sensor table.
+ */
+#define       MC_CMD_DYNAMIC_SENSORS_LIST_OUT_GENERATION_OFST 0
+#define       MC_CMD_DYNAMIC_SENSORS_LIST_OUT_GENERATION_LEN 4
+/* Number of sensors managed by the MC. Note that in principle, this can be
+ * larger than the size of the HANDLES array.
+ */
+#define       MC_CMD_DYNAMIC_SENSORS_LIST_OUT_COUNT_OFST 4
+#define       MC_CMD_DYNAMIC_SENSORS_LIST_OUT_COUNT_LEN 4
+/* Array of sensor handles */
+#define       MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_OFST 8
+#define       MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_LEN 4
+#define       MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MINNUM 0
+#define       MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MAXNUM 61
+#define       MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MAXNUM_MCDI2 253
+
+
+/***********************************/
+/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS
+ * Get descriptions for a set of sensors, specified as an array of sensor
+ * handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST
+ *
+ * Any handles which do not correspond to a sensor currently managed by the MC
+ * will be dropped from from the response. This may happen when a sensor table
+ * update is in progress, and effectively means the set of usable sensors is
+ * the intersection between the sets of sensors known to the driver and the MC.
+ *
+ * On Riverhead this command is implemented as a a wrapper for
+ * `get_descriptions` in the sensor_query SPHINX service.
+ */
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS 0x67
+#undef MC_CMD_0x67_PRIVILEGE_CTG
+
+#define MC_CMD_0x67_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN msgrequest */
+#define    MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMIN 0
+#define    MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMAX 252
+#define    MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LEN(num) (0+4*(num))
+#define    MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_NUM(len) (((len)-0)/4)
+/* Array of sensor handles */
+#define       MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_OFST 0
+#define       MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_LEN 4
+#define       MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MINNUM 0
+#define       MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MAXNUM 63
+#define       MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MAXNUM_MCDI2 255
+
+/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT msgresponse */
+#define    MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMIN 0
+#define    MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMAX 192
+#define    MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMAX_MCDI2 960
+#define    MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LEN(num) (0+64*(num))
+#define    MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_NUM(len) (((len)-0)/64)
+/* Array of MC_CMD_DYNAMIC_SENSORS_DESCRIPTION structures */
+#define       MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_OFST 0
+#define       MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_LEN 64
+#define       MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MINNUM 0
+#define       MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MAXNUM 3
+#define       MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MAXNUM_MCDI2 15
+
+
+/***********************************/
+/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS
+ * Read the state and value for a set of sensors, specified as an array of
+ * sensor handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST.
+ *
+ * In the case of a broken sensor, then the state of the response's
+ * MC_CMD_DYNAMIC_SENSORS_VALUE entry will be set to BROKEN, and any value
+ * provided should be treated as erroneous.
+ *
+ * Any handles which do not correspond to a sensor currently managed by the MC
+ * will be dropped from from the response. This may happen when a sensor table
+ * update is in progress, and effectively means the set of usable sensors is
+ * the intersection between the sets of sensors known to the driver and the MC.
+ *
+ * On Riverhead this command is implemented as a a wrapper for `get_readings`
+ * in the sensor_query SPHINX service.
+ */
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS 0x68
+#undef MC_CMD_0x68_PRIVILEGE_CTG
+
+#define MC_CMD_0x68_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN msgrequest */
+#define    MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMIN 0
+#define    MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMAX 252
+#define    MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LEN(num) (0+4*(num))
+#define    MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_NUM(len) (((len)-0)/4)
+/* Array of sensor handles */
+#define       MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_OFST 0
+#define       MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_LEN 4
+#define       MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MINNUM 0
+#define       MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM 63
+#define       MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM_MCDI2 255
+
+/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT msgresponse */
+#define    MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMIN 0
+#define    MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMAX 252
+#define    MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LEN(num) (0+12*(num))
+#define    MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_NUM(len) (((len)-0)/12)
+/* Array of MC_CMD_DYNAMIC_SENSORS_READING structures */
+#define       MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_OFST 0
+#define       MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_LEN 12
+#define       MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MINNUM 0
+#define       MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM 21
+#define       MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM_MCDI2 85
+
+
+/***********************************/
+/* MC_CMD_EVENT_CTRL
+ * Configure which categories of unsolicited events the driver expects to
+ * receive (Riverhead).
+ */
+#define MC_CMD_EVENT_CTRL 0x69
+#undef MC_CMD_0x69_PRIVILEGE_CTG
+
+#define MC_CMD_0x69_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_EVENT_CTRL_IN msgrequest */
+#define    MC_CMD_EVENT_CTRL_IN_LENMIN 0
+#define    MC_CMD_EVENT_CTRL_IN_LENMAX 252
+#define    MC_CMD_EVENT_CTRL_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_EVENT_CTRL_IN_LEN(num) (0+4*(num))
+#define    MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_NUM(len) (((len)-0)/4)
+/* Array of event categories for which the driver wishes to receive events. */
+#define       MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_OFST 0
+#define       MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_LEN 4
+#define       MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MINNUM 0
+#define       MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM 63
+#define       MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM_MCDI2 255
+/* enum: Driver wishes to receive LINKCHANGE events. */
+#define          MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_LINKCHANGE 0x0
+/* enum: Driver wishes to receive SENSOR_CHANGE and SENSOR_STATE_CHANGE events.
+ */
+#define          MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_SENSOREVT 0x1
+/* enum: Driver wishes to receive receive errors. */
+#define          MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_RX_ERR 0x2
+/* enum: Driver wishes to receive transmit errors. */
+#define          MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_TX_ERR 0x3
+/* enum: Driver wishes to receive firmware alerts. */
+#define          MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_FWALERT 0x4
+/* enum: Driver wishes to receive reboot events. */
+#define          MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_MC_REBOOT 0x5
+
+/* MC_CMD_EVENT_CTRL_OUT msgrequest */
+#define    MC_CMD_EVENT_CTRL_OUT_LEN 0
 
 /* EVB_PORT_ID structuredef */
 #define    EVB_PORT_ID_LEN 4
 #define          NVRAM_PARTITION_TYPE_BUNDLE_METADATA 0x1e01
 /* enum: Bundle update non-volatile log output partition */
 #define          NVRAM_PARTITION_TYPE_BUNDLE_LOG 0x1e02
+/* enum: Partition for Solarflare gPXE bootrom installed via Bundle update. */
+#define          NVRAM_PARTITION_TYPE_EXPANSION_ROM_INTERNAL 0x1e03
 /* enum: Start of reserved value range (firmware may use for any purpose) */
 #define          NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
 /* enum: End of reserved value range (firmware may use for any purpose) */
 #define       LICENSED_FEATURES_MASK_LEN 8
 #define       LICENSED_FEATURES_MASK_LO_OFST 0
 #define       LICENSED_FEATURES_MASK_HI_OFST 4
+#define        LICENSED_FEATURES_RX_CUT_THROUGH_OFST 0
 #define        LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0
 #define        LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1
+#define        LICENSED_FEATURES_PIO_OFST 0
 #define        LICENSED_FEATURES_PIO_LBN 1
 #define        LICENSED_FEATURES_PIO_WIDTH 1
+#define        LICENSED_FEATURES_EVQ_TIMER_OFST 0
 #define        LICENSED_FEATURES_EVQ_TIMER_LBN 2
 #define        LICENSED_FEATURES_EVQ_TIMER_WIDTH 1
+#define        LICENSED_FEATURES_CLOCK_OFST 0
 #define        LICENSED_FEATURES_CLOCK_LBN 3
 #define        LICENSED_FEATURES_CLOCK_WIDTH 1
+#define        LICENSED_FEATURES_RX_TIMESTAMPS_OFST 0
 #define        LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4
 #define        LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1
+#define        LICENSED_FEATURES_TX_TIMESTAMPS_OFST 0
 #define        LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5
 #define        LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1
+#define        LICENSED_FEATURES_RX_SNIFF_OFST 0
 #define        LICENSED_FEATURES_RX_SNIFF_LBN 6
 #define        LICENSED_FEATURES_RX_SNIFF_WIDTH 1
+#define        LICENSED_FEATURES_TX_SNIFF_OFST 0
 #define        LICENSED_FEATURES_TX_SNIFF_LBN 7
 #define        LICENSED_FEATURES_TX_SNIFF_WIDTH 1
+#define        LICENSED_FEATURES_PROXY_FILTER_OPS_OFST 0
 #define        LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8
 #define        LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define        LICENSED_FEATURES_EVENT_CUT_THROUGH_OFST 0
 #define        LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9
 #define        LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
 #define       LICENSED_FEATURES_MASK_LBN 0
 #define       LICENSED_V3_APPS_MASK_LEN 8
 #define       LICENSED_V3_APPS_MASK_LO_OFST 0
 #define       LICENSED_V3_APPS_MASK_HI_OFST 4
+#define        LICENSED_V3_APPS_ONLOAD_OFST 0
 #define        LICENSED_V3_APPS_ONLOAD_LBN 0
 #define        LICENSED_V3_APPS_ONLOAD_WIDTH 1
+#define        LICENSED_V3_APPS_PTP_OFST 0
 #define        LICENSED_V3_APPS_PTP_LBN 1
 #define        LICENSED_V3_APPS_PTP_WIDTH 1
+#define        LICENSED_V3_APPS_SOLARCAPTURE_PRO_OFST 0
 #define        LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2
 #define        LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1
+#define        LICENSED_V3_APPS_SOLARSECURE_OFST 0
 #define        LICENSED_V3_APPS_SOLARSECURE_LBN 3
 #define        LICENSED_V3_APPS_SOLARSECURE_WIDTH 1
+#define        LICENSED_V3_APPS_PERF_MONITOR_OFST 0
 #define        LICENSED_V3_APPS_PERF_MONITOR_LBN 4
 #define        LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1
+#define        LICENSED_V3_APPS_SOLARCAPTURE_LIVE_OFST 0
 #define        LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5
 #define        LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1
+#define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_OFST 0
 #define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6
 #define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1
+#define        LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_OFST 0
 #define        LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7
 #define        LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1
+#define        LICENSED_V3_APPS_TCP_DIRECT_OFST 0
 #define        LICENSED_V3_APPS_TCP_DIRECT_LBN 8
 #define        LICENSED_V3_APPS_TCP_DIRECT_WIDTH 1
+#define        LICENSED_V3_APPS_LOW_LATENCY_OFST 0
 #define        LICENSED_V3_APPS_LOW_LATENCY_LBN 9
 #define        LICENSED_V3_APPS_LOW_LATENCY_WIDTH 1
+#define        LICENSED_V3_APPS_SOLARCAPTURE_TAP_OFST 0
 #define        LICENSED_V3_APPS_SOLARCAPTURE_TAP_LBN 10
 #define        LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1
+#define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_OFST 0
 #define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11
 #define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1
+#define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_OFST 0
 #define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12
 #define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1
+#define        LICENSED_V3_APPS_SCALEOUT_ONLOAD_OFST 0
 #define        LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13
 #define        LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1
+#define        LICENSED_V3_APPS_DSHBRD_OFST 0
 #define        LICENSED_V3_APPS_DSHBRD_LBN 14
 #define        LICENSED_V3_APPS_DSHBRD_WIDTH 1
+#define        LICENSED_V3_APPS_SCATRD_OFST 0
 #define        LICENSED_V3_APPS_SCATRD_LBN 15
 #define        LICENSED_V3_APPS_SCATRD_WIDTH 1
 #define       LICENSED_V3_APPS_MASK_LBN 0
 #define       LICENSED_V3_FEATURES_MASK_LEN 8
 #define       LICENSED_V3_FEATURES_MASK_LO_OFST 0
 #define       LICENSED_V3_FEATURES_MASK_HI_OFST 4
+#define        LICENSED_V3_FEATURES_RX_CUT_THROUGH_OFST 0
 #define        LICENSED_V3_FEATURES_RX_CUT_THROUGH_LBN 0
 #define        LICENSED_V3_FEATURES_RX_CUT_THROUGH_WIDTH 1
+#define        LICENSED_V3_FEATURES_PIO_OFST 0
 #define        LICENSED_V3_FEATURES_PIO_LBN 1
 #define        LICENSED_V3_FEATURES_PIO_WIDTH 1
+#define        LICENSED_V3_FEATURES_EVQ_TIMER_OFST 0
 #define        LICENSED_V3_FEATURES_EVQ_TIMER_LBN 2
 #define        LICENSED_V3_FEATURES_EVQ_TIMER_WIDTH 1
+#define        LICENSED_V3_FEATURES_CLOCK_OFST 0
 #define        LICENSED_V3_FEATURES_CLOCK_LBN 3
 #define        LICENSED_V3_FEATURES_CLOCK_WIDTH 1
+#define        LICENSED_V3_FEATURES_RX_TIMESTAMPS_OFST 0
 #define        LICENSED_V3_FEATURES_RX_TIMESTAMPS_LBN 4
 #define        LICENSED_V3_FEATURES_RX_TIMESTAMPS_WIDTH 1
+#define        LICENSED_V3_FEATURES_TX_TIMESTAMPS_OFST 0
 #define        LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN 5
 #define        LICENSED_V3_FEATURES_TX_TIMESTAMPS_WIDTH 1
+#define        LICENSED_V3_FEATURES_RX_SNIFF_OFST 0
 #define        LICENSED_V3_FEATURES_RX_SNIFF_LBN 6
 #define        LICENSED_V3_FEATURES_RX_SNIFF_WIDTH 1
+#define        LICENSED_V3_FEATURES_TX_SNIFF_OFST 0
 #define        LICENSED_V3_FEATURES_TX_SNIFF_LBN 7
 #define        LICENSED_V3_FEATURES_TX_SNIFF_WIDTH 1
+#define        LICENSED_V3_FEATURES_PROXY_FILTER_OPS_OFST 0
 #define        LICENSED_V3_FEATURES_PROXY_FILTER_OPS_LBN 8
 #define        LICENSED_V3_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define        LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_OFST 0
 #define        LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_LBN 9
 #define        LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
 #define       LICENSED_V3_FEATURES_MASK_LBN 0
  */
 #define       RSS_MODE_HASH_SELECTOR_OFST 0
 #define       RSS_MODE_HASH_SELECTOR_LEN 1
+#define        RSS_MODE_HASH_SRC_ADDR_OFST 0
 #define        RSS_MODE_HASH_SRC_ADDR_LBN 0
 #define        RSS_MODE_HASH_SRC_ADDR_WIDTH 1
+#define        RSS_MODE_HASH_DST_ADDR_OFST 0
 #define        RSS_MODE_HASH_DST_ADDR_LBN 1
 #define        RSS_MODE_HASH_DST_ADDR_WIDTH 1
+#define        RSS_MODE_HASH_SRC_PORT_OFST 0
 #define        RSS_MODE_HASH_SRC_PORT_LBN 2
 #define        RSS_MODE_HASH_SRC_PORT_WIDTH 1
+#define        RSS_MODE_HASH_DST_PORT_OFST 0
 #define        RSS_MODE_HASH_DST_PORT_LBN 3
 #define        RSS_MODE_HASH_DST_PORT_WIDTH 1
 #define       RSS_MODE_HASH_SELECTOR_LBN 0
  * Get a dump of the MCPU registers
  */
 #define MC_CMD_READ_REGS 0x50
+#undef MC_CMD_0x50_PRIVILEGE_CTG
 
 #define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
  * end with an address for each 4k of host memory required to back the EVQ.
  */
 #define MC_CMD_INIT_EVQ 0x80
+#undef MC_CMD_0x80_PRIVILEGE_CTG
 
 #define MC_CMD_0x80_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* MC_CMD_INIT_EVQ_IN msgrequest */
 #define    MC_CMD_INIT_EVQ_IN_LENMIN 44
 #define    MC_CMD_INIT_EVQ_IN_LENMAX 548
+#define    MC_CMD_INIT_EVQ_IN_LENMAX_MCDI2 548
 #define    MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num))
+#define    MC_CMD_INIT_EVQ_IN_DMA_ADDR_NUM(len) (((len)-36)/8)
 /* Size, in entries */
 #define       MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
 #define       MC_CMD_INIT_EVQ_IN_SIZE_LEN 4
 /* tbd */
 #define       MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
 #define       MC_CMD_INIT_EVQ_IN_FLAGS_LEN 4
+#define        MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_OFST 16
 #define        MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
 #define        MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
+#define        MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_OFST 16
 #define        MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
 #define        MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_WIDTH 1
+#define        MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_OFST 16
 #define        MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_LBN 2
 #define        MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_WIDTH 1
+#define        MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_OFST 16
 #define        MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_LBN 3
 #define        MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_WIDTH 1
+#define        MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_OFST 16
 #define        MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_LBN 4
 #define        MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1
+#define        MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_OFST 16
 #define        MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5
 #define        MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1
+#define        MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_OFST 16
 #define        MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6
 #define        MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1
 #define       MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
 #define       MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40
 #define       MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1
 #define       MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64
+#define       MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM_MCDI2 64
 
 /* MC_CMD_INIT_EVQ_OUT msgresponse */
 #define    MC_CMD_INIT_EVQ_OUT_LEN 4
 /* MC_CMD_INIT_EVQ_V2_IN msgrequest */
 #define    MC_CMD_INIT_EVQ_V2_IN_LENMIN 44
 #define    MC_CMD_INIT_EVQ_V2_IN_LENMAX 548
+#define    MC_CMD_INIT_EVQ_V2_IN_LENMAX_MCDI2 548
 #define    MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num))
+#define    MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_NUM(len) (((len)-36)/8)
 /* Size, in entries */
 #define       MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0
 #define       MC_CMD_INIT_EVQ_V2_IN_SIZE_LEN 4
 /* tbd */
 #define       MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16
 #define       MC_CMD_INIT_EVQ_V2_IN_FLAGS_LEN 4
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_OFST 16
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_OFST 16
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_WIDTH 1
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_OFST 16
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_LBN 2
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_WIDTH 1
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_OFST 16
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_LBN 3
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_WIDTH 1
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_OFST 16
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_LBN 4
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_WIDTH 1
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_OFST 16
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_LBN 5
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_WIDTH 1
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_OFST 16
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_LBN 6
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_WIDTH 1
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_OFST 16
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LBN 7
 #define        MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_WIDTH 4
 /* enum: All initialisation flags specified by host. */
  * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
  */
 #define          MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_EXT_WIDTH_OFST 16
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_EXT_WIDTH_LBN 11
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_EXT_WIDTH_WIDTH 1
 #define       MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20
 #define       MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_LEN 4
 /* enum: Disabled */
 #define       MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_OFST 40
 #define       MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MINNUM 1
 #define       MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM 64
+#define       MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM_MCDI2 64
 
 /* MC_CMD_INIT_EVQ_V2_OUT msgresponse */
 #define    MC_CMD_INIT_EVQ_V2_OUT_LEN 8
 /* Actual configuration applied on the card */
 #define       MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4
 #define       MC_CMD_INIT_EVQ_V2_OUT_FLAGS_LEN 4
+#define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_OFST 4
 #define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0
 #define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1
+#define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_OFST 4
 #define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1
 #define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_WIDTH 1
+#define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_OFST 4
 #define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_LBN 2
 #define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_WIDTH 1
+#define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_OFST 4
 #define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3
 #define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1
 
  * the RXQ.
  */
 #define MC_CMD_INIT_RXQ 0x81
+#undef MC_CMD_0x81_PRIVILEGE_CTG
 
 #define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  */
 #define    MC_CMD_INIT_RXQ_IN_LENMIN 36
 #define    MC_CMD_INIT_RXQ_IN_LENMAX 252
+#define    MC_CMD_INIT_RXQ_IN_LENMAX_MCDI2 1020
 #define    MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
+#define    MC_CMD_INIT_RXQ_IN_DMA_ADDR_NUM(len) (((len)-28)/8)
 /* Size, in entries */
 #define       MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
 #define       MC_CMD_INIT_RXQ_IN_SIZE_LEN 4
 /* There will be more flags here. */
 #define       MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
 #define       MC_CMD_INIT_RXQ_IN_FLAGS_LEN 4
+#define        MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_OFST 16
 #define        MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
 #define        MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_OFST 16
 #define        MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
 #define        MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define        MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_OFST 16
 #define        MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_LBN 2
 #define        MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define        MC_CMD_INIT_RXQ_IN_CRC_MODE_OFST 16
 #define        MC_CMD_INIT_RXQ_IN_CRC_MODE_LBN 3
 #define        MC_CMD_INIT_RXQ_IN_CRC_MODE_WIDTH 4
+#define        MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_OFST 16
 #define        MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_LBN 7
 #define        MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1
+#define        MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_OFST 16
 #define        MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8
 #define        MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
+#define        MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_OFST 16
 #define        MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9
 #define        MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_INIT_RXQ_IN_UNUSED_OFST 16
 #define        MC_CMD_INIT_RXQ_IN_UNUSED_LBN 10
 #define        MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32
 #define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
 #define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
+#define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM_MCDI2 124
 
 /* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode
  * flags
 #define       MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4
 #define       MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_LEN 4
 /* The value to put in the event data. Check hardware spec. for valid range.
- * This field is ignored if DMA_MODE == EQUAL_STRIDE_PACKED_STREAM or DMA_MODE
+ * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE
  * == PACKED_STREAM.
  */
 #define       MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
 /* There will be more flags here. */
 #define       MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16
 #define       MC_CMD_INIT_RXQ_EXT_IN_FLAGS_LEN 4
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_OFST 16
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_OFST 16
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_OFST 16
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_OFST 16
 #define        MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3
 #define        MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_OFST 16
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_OFST 16
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_OFST 16
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_OFST 16
 #define        MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10
 #define        MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4
 /* enum: One packet per descriptor (for normal networking) */
  * description see SF-119419-TC. This mode is only supported by "dpdk" datapath
  * firmware.
  */
+#define          MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2
+/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */
 #define          MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_PACKED_STREAM 0x2
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_OFST 16
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_OFST 16
 #define        MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
 #define        MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
 #define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */
 #define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */
 #define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */
 #define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_OFST 16
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_OFST 16
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19
 #define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_OFST 16
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_LBN 20
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
 #define       MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4
 #define       MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_OFST 4
 #define       MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_LEN 4
 /* The value to put in the event data. Check hardware spec. for valid range.
- * This field is ignored if DMA_MODE == EQUAL_STRIDE_PACKED_STREAM or DMA_MODE
+ * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE
  * == PACKED_STREAM.
  */
 #define       MC_CMD_INIT_RXQ_V3_IN_LABEL_OFST 8
 /* There will be more flags here. */
 #define       MC_CMD_INIT_RXQ_V3_IN_FLAGS_OFST 16
 #define       MC_CMD_INIT_RXQ_V3_IN_FLAGS_LEN 4
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_OFST 16
 #define        MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_LBN 0
 #define        MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_OFST 16
 #define        MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_LBN 1
 #define        MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_OFST 16
 #define        MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_LBN 2
 #define        MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_OFST 16
 #define        MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_LBN 3
 #define        MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_WIDTH 4
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_OFST 16
 #define        MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_LBN 7
 #define        MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_OFST 16
 #define        MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_LBN 8
 #define        MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_OFST 16
 #define        MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_LBN 9
 #define        MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_OFST 16
 #define        MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_LBN 10
 #define        MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_WIDTH 4
 /* enum: One packet per descriptor (for normal networking) */
  * description see SF-119419-TC. This mode is only supported by "dpdk" datapath
  * firmware.
  */
+#define          MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2
+/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */
 #define          MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_PACKED_STREAM 0x2
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_OFST 16
 #define        MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_LBN 14
 #define        MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_OFST 16
 #define        MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
 #define        MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
 #define          MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_1M 0x0 /* enum */
 #define          MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_256K 0x2 /* enum */
 #define          MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_128K 0x3 /* enum */
 #define          MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_64K 0x4 /* enum */
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_OFST 16
 #define        MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
 #define        MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_OFST 16
 #define        MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_LBN 19
 #define        MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_OFST 16
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_LBN 20
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_OFST 20
 #define       MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_LEN 4
 #define       MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_OFST 540
 #define       MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_LEN 4
 /* The number of packet buffers that will be contained within each
- * EQUAL_STRIDE_PACKED_STREAM format bucket supplied by the driver. This field
- * is ignored unless DMA_MODE == EQUAL_STRIDE_PACKED_STREAM.
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field
+ * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
  */
 #define       MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544
 #define       MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4
 /* The length in bytes of the area in each packet buffer that can be written to
  * by the adapter. This is used to store the packet prefix and the packet
  * payload. This length does not include any end padding added by the driver.
- * This field is ignored unless DMA_MODE == EQUAL_STRIDE_PACKED_STREAM.
+ * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
  */
 #define       MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_OFST 548
 #define       MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_LEN 4
 /* The length in bytes of a single packet buffer within a
- * EQUAL_STRIDE_PACKED_STREAM format bucket. This field is ignored unless
- * DMA_MODE == EQUAL_STRIDE_PACKED_STREAM.
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
  */
 #define       MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_OFST 552
 #define       MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_LEN 4
  * there are no RX descriptors available. If the timeout is reached and there
  * are still no descriptors then the packet will be dropped. A timeout of 0
  * means the datapath will never be blocked. This field is ignored unless
- * DMA_MODE == EQUAL_STRIDE_PACKED_STREAM.
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
  */
 #define       MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556
 #define       MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4
 
+/* MC_CMD_INIT_RXQ_V4_IN msgrequest: INIT_RXQ request with new field required
+ * for systems with a QDMA (currently, Riverhead)
+ */
+#define    MC_CMD_INIT_RXQ_V4_IN_LEN 564
+/* Size, in entries */
+#define       MC_CMD_INIT_RXQ_V4_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_RXQ_V4_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE.
+ */
+#define       MC_CMD_INIT_RXQ_V4_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_INIT_RXQ_V4_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range.
+ * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE
+ * == PACKED_STREAM.
+ */
+#define       MC_CMD_INIT_RXQ_V4_IN_LABEL_OFST 8
+#define       MC_CMD_INIT_RXQ_V4_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define       MC_CMD_INIT_RXQ_V4_IN_INSTANCE_OFST 12
+#define       MC_CMD_INIT_RXQ_V4_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define       MC_CMD_INIT_RXQ_V4_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_RXQ_V4_IN_FLAGS_LEN 4
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_LBN 0
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_LBN 1
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_LBN 2
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_LBN 3
+#define        MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_WIDTH 4
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_LBN 7
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_LBN 8
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_LBN 10
+#define        MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define          MC_CMD_INIT_RXQ_V4_IN_SINGLE_PACKET 0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define          MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM 0x1
+/* enum: Pack multiple packets into large descriptors using the format designed
+ * to maximise packet rate. This mode uses 1 "bucket" per descriptor with
+ * multiple fixed-size packet buffers within each bucket. For a full
+ * description see SF-119419-TC. This mode is only supported by "dpdk" datapath
+ * firmware.
+ */
+#define          MC_CMD_INIT_RXQ_V4_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2
+/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */
+#define          MC_CMD_INIT_RXQ_V4_IN_EQUAL_STRIDE_PACKED_STREAM 0x2
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define        MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define          MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_1M 0x0 /* enum */
+#define          MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_512K 0x1 /* enum */
+#define          MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_256K 0x2 /* enum */
+#define          MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_128K 0x3 /* enum */
+#define          MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_64K 0x4 /* enum */
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_LBN 19
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_LBN 20
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define       MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_OFST 20
+#define       MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define       MC_CMD_INIT_RXQ_V4_IN_PORT_ID_OFST 24
+#define       MC_CMD_INIT_RXQ_V4_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define       MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_OFST 28
+#define       MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LEN 8
+#define       MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LO_OFST 28
+#define       MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_HI_OFST 32
+#define       MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define       MC_CMD_INIT_RXQ_V4_IN_SNAPSHOT_LENGTH_OFST 540
+#define       MC_CMD_INIT_RXQ_V4_IN_SNAPSHOT_LENGTH_LEN 4
+/* The number of packet buffers that will be contained within each
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field
+ * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define       MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544
+#define       MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4
+/* The length in bytes of the area in each packet buffer that can be written to
+ * by the adapter. This is used to store the packet prefix and the packet
+ * payload. This length does not include any end padding added by the driver.
+ * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define       MC_CMD_INIT_RXQ_V4_IN_ES_MAX_DMA_LEN_OFST 548
+#define       MC_CMD_INIT_RXQ_V4_IN_ES_MAX_DMA_LEN_LEN 4
+/* The length in bytes of a single packet buffer within a
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define       MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_STRIDE_OFST 552
+#define       MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_STRIDE_LEN 4
+/* The maximum time in nanoseconds that the datapath will be backpressured if
+ * there are no RX descriptors available. If the timeout is reached and there
+ * are still no descriptors then the packet will be dropped. A timeout of 0
+ * means the datapath will never be blocked. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define       MC_CMD_INIT_RXQ_V4_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556
+#define       MC_CMD_INIT_RXQ_V4_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4
+/* V4 message data */
+#define       MC_CMD_INIT_RXQ_V4_IN_V4_DATA_OFST 560
+#define       MC_CMD_INIT_RXQ_V4_IN_V4_DATA_LEN 4
+/* Size in bytes of buffers attached to descriptors posted to this queue. Set
+ * to zero if using this message on non-QDMA based platforms. Currently in
+ * Riverhead there is a global limit of eight different buffer sizes across all
+ * active queues. A 2KB and 4KB buffer is guaranteed to be available, but a
+ * request for a different buffer size will fail if there are already eight
+ * other buffer sizes in use. In future Riverhead this limit will go away and
+ * any size will be accepted.
+ */
+#define       MC_CMD_INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES_OFST 560
+#define       MC_CMD_INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES_LEN 4
+
+/* MC_CMD_INIT_RXQ_V5_IN msgrequest: INIT_RXQ request with ability to request a
+ * different RX packet prefix
+ */
+#define    MC_CMD_INIT_RXQ_V5_IN_LEN 568
+/* Size, in entries */
+#define       MC_CMD_INIT_RXQ_V5_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_RXQ_V5_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE.
+ */
+#define       MC_CMD_INIT_RXQ_V5_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_INIT_RXQ_V5_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range.
+ * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE
+ * == PACKED_STREAM.
+ */
+#define       MC_CMD_INIT_RXQ_V5_IN_LABEL_OFST 8
+#define       MC_CMD_INIT_RXQ_V5_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define       MC_CMD_INIT_RXQ_V5_IN_INSTANCE_OFST 12
+#define       MC_CMD_INIT_RXQ_V5_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define       MC_CMD_INIT_RXQ_V5_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_RXQ_V5_IN_FLAGS_LEN 4
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_LBN 0
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_LBN 1
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_LBN 2
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_LBN 3
+#define        MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_WIDTH 4
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_LBN 7
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_LBN 8
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_LBN 10
+#define        MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define          MC_CMD_INIT_RXQ_V5_IN_SINGLE_PACKET 0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define          MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM 0x1
+/* enum: Pack multiple packets into large descriptors using the format designed
+ * to maximise packet rate. This mode uses 1 "bucket" per descriptor with
+ * multiple fixed-size packet buffers within each bucket. For a full
+ * description see SF-119419-TC. This mode is only supported by "dpdk" datapath
+ * firmware.
+ */
+#define          MC_CMD_INIT_RXQ_V5_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2
+/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */
+#define          MC_CMD_INIT_RXQ_V5_IN_EQUAL_STRIDE_PACKED_STREAM 0x2
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define        MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define          MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_1M 0x0 /* enum */
+#define          MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_512K 0x1 /* enum */
+#define          MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_256K 0x2 /* enum */
+#define          MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_128K 0x3 /* enum */
+#define          MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_64K 0x4 /* enum */
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_LBN 19
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_LBN 20
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define       MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_OFST 20
+#define       MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define       MC_CMD_INIT_RXQ_V5_IN_PORT_ID_OFST 24
+#define       MC_CMD_INIT_RXQ_V5_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define       MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_OFST 28
+#define       MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LEN 8
+#define       MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LO_OFST 28
+#define       MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_HI_OFST 32
+#define       MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define       MC_CMD_INIT_RXQ_V5_IN_SNAPSHOT_LENGTH_OFST 540
+#define       MC_CMD_INIT_RXQ_V5_IN_SNAPSHOT_LENGTH_LEN 4
+/* The number of packet buffers that will be contained within each
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field
+ * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define       MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544
+#define       MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4
+/* The length in bytes of the area in each packet buffer that can be written to
+ * by the adapter. This is used to store the packet prefix and the packet
+ * payload. This length does not include any end padding added by the driver.
+ * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define       MC_CMD_INIT_RXQ_V5_IN_ES_MAX_DMA_LEN_OFST 548
+#define       MC_CMD_INIT_RXQ_V5_IN_ES_MAX_DMA_LEN_LEN 4
+/* The length in bytes of a single packet buffer within a
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define       MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_STRIDE_OFST 552
+#define       MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_STRIDE_LEN 4
+/* The maximum time in nanoseconds that the datapath will be backpressured if
+ * there are no RX descriptors available. If the timeout is reached and there
+ * are still no descriptors then the packet will be dropped. A timeout of 0
+ * means the datapath will never be blocked. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define       MC_CMD_INIT_RXQ_V5_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556
+#define       MC_CMD_INIT_RXQ_V5_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4
+/* V4 message data */
+#define       MC_CMD_INIT_RXQ_V5_IN_V4_DATA_OFST 560
+#define       MC_CMD_INIT_RXQ_V5_IN_V4_DATA_LEN 4
+/* Size in bytes of buffers attached to descriptors posted to this queue. Set
+ * to zero if using this message on non-QDMA based platforms. Currently in
+ * Riverhead there is a global limit of eight different buffer sizes across all
+ * active queues. A 2KB and 4KB buffer is guaranteed to be available, but a
+ * request for a different buffer size will fail if there are already eight
+ * other buffer sizes in use. In future Riverhead this limit will go away and
+ * any size will be accepted.
+ */
+#define       MC_CMD_INIT_RXQ_V5_IN_BUFFER_SIZE_BYTES_OFST 560
+#define       MC_CMD_INIT_RXQ_V5_IN_BUFFER_SIZE_BYTES_LEN 4
+/* Prefix id for the RX prefix format to use on packets delivered this queue.
+ * Zero is always a valid prefix id and means the default prefix format
+ * documented for the platform. Other prefix ids can be obtained by calling
+ * MC_CMD_GET_RX_PREFIX_ID with a requested set of prefix fields.
+ */
+#define       MC_CMD_INIT_RXQ_V5_IN_RX_PREFIX_ID_OFST 564
+#define       MC_CMD_INIT_RXQ_V5_IN_RX_PREFIX_ID_LEN 4
+
 /* MC_CMD_INIT_RXQ_OUT msgresponse */
 #define    MC_CMD_INIT_RXQ_OUT_LEN 0
 
 /* MC_CMD_INIT_RXQ_V3_OUT msgresponse */
 #define    MC_CMD_INIT_RXQ_V3_OUT_LEN 0
 
+/* MC_CMD_INIT_RXQ_V4_OUT msgresponse */
+#define    MC_CMD_INIT_RXQ_V4_OUT_LEN 0
+
+/* MC_CMD_INIT_RXQ_V5_OUT msgresponse */
+#define    MC_CMD_INIT_RXQ_V5_OUT_LEN 0
+
 
 /***********************************/
 /* MC_CMD_INIT_TXQ
  */
 #define MC_CMD_INIT_TXQ 0x82
+#undef MC_CMD_0x82_PRIVILEGE_CTG
 
 #define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  */
 #define    MC_CMD_INIT_TXQ_IN_LENMIN 36
 #define    MC_CMD_INIT_TXQ_IN_LENMAX 252
+#define    MC_CMD_INIT_TXQ_IN_LENMAX_MCDI2 1020
 #define    MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
+#define    MC_CMD_INIT_TXQ_IN_DMA_ADDR_NUM(len) (((len)-28)/8)
 /* Size, in entries */
 #define       MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
 #define       MC_CMD_INIT_TXQ_IN_SIZE_LEN 4
 /* There will be more flags here. */
 #define       MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
 #define       MC_CMD_INIT_TXQ_IN_FLAGS_LEN 4
+#define        MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_OFST 16
 #define        MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
 #define        MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define        MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_OFST 16
 #define        MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
 #define        MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_OFST 16
 #define        MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_LBN 2
 #define        MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_OFST 16
 #define        MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_LBN 3
 #define        MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define        MC_CMD_INIT_TXQ_IN_CRC_MODE_OFST 16
 #define        MC_CMD_INIT_TXQ_IN_CRC_MODE_LBN 4
 #define        MC_CMD_INIT_TXQ_IN_CRC_MODE_WIDTH 4
+#define        MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_OFST 16
 #define        MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_LBN 8
 #define        MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define        MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_OFST 16
 #define        MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9
 #define        MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_OFST 16
 #define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
 #define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_OFST 16
 #define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
 #define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32
 #define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
 #define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
+#define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM_MCDI2 124
 
 /* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode
  * flags
 /* There will be more flags here. */
 #define       MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16
 #define       MC_CMD_INIT_TXQ_EXT_IN_FLAGS_LEN 4
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_OFST 16
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_OFST 16
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_OFST 16
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_OFST 16
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_OFST 16
 #define        MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4
 #define        MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_OFST 16
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_OFST 16
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_OFST 16
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_OFST 16
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_OFST 16
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_LBN 12
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_OFST 16
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_OFST 16
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_LBN 14
 #define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_M2M_D2C_OFST 16
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_M2M_D2C_LBN 15
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_M2M_D2C_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_OFST 16
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_LBN 16
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_WIDTH 1
 /* Owner ID to use if in buffer mode (zero if physical) */
 #define       MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
 #define       MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_LEN 4
 #define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32
 #define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1
 #define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM_MCDI2 64
 /* Flags related to Qbb flow control mode. */
 #define       MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540
 #define       MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_LEN 4
+#define        MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_OFST 540
 #define        MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0
 #define        MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_OFST 540
 #define        MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1
 #define        MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3
 
  * or the operation will fail with EBUSY
  */
 #define MC_CMD_FINI_EVQ 0x83
+#undef MC_CMD_0x83_PRIVILEGE_CTG
 
 #define MC_CMD_0x83_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * Teardown a RXQ.
  */
 #define MC_CMD_FINI_RXQ 0x84
+#undef MC_CMD_0x84_PRIVILEGE_CTG
 
 #define MC_CMD_0x84_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * Teardown a TXQ.
  */
 #define MC_CMD_FINI_TXQ 0x85
+#undef MC_CMD_0x85_PRIVILEGE_CTG
 
 #define MC_CMD_0x85_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * Generate an event on an EVQ belonging to the function issuing the command.
  */
 #define MC_CMD_DRIVER_EVENT 0x86
+#undef MC_CMD_0x86_PRIVILEGE_CTG
 
 #define MC_CMD_0x86_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * MC_CMD_SET_FUNC, which remains available for Siena but now deprecated.
  */
 #define MC_CMD_PROXY_CMD 0x5b
+#undef MC_CMD_0x5b_PRIVILEGE_CTG
 
 #define MC_CMD_0x5b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* The handle of the target function. */
 #define       MC_CMD_PROXY_CMD_IN_TARGET_OFST 0
 #define       MC_CMD_PROXY_CMD_IN_TARGET_LEN 4
+#define        MC_CMD_PROXY_CMD_IN_TARGET_PF_OFST 0
 #define        MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0
 #define        MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16
+#define        MC_CMD_PROXY_CMD_IN_TARGET_VF_OFST 0
 #define        MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16
 #define        MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16
 #define          MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */
  * a designated admin function
  */
 #define MC_CMD_PROXY_CONFIGURE 0x58
+#undef MC_CMD_0x58_PRIVILEGE_CTG
 
 #define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 #define    MC_CMD_PROXY_CONFIGURE_IN_LEN 108
 #define       MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
 #define       MC_CMD_PROXY_CONFIGURE_IN_FLAGS_LEN 4
+#define        MC_CMD_PROXY_CONFIGURE_IN_ENABLE_OFST 0
 #define        MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
 #define        MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
 #define    MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
 #define       MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_LEN 4
+#define        MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_OFST 0
 #define        MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
 #define        MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
 /* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
  * MC_CMD_PROXY_CONFIGURE).
  */
 #define MC_CMD_PROXY_COMPLETE 0x5f
+#undef MC_CMD_0x5f_PRIVILEGE_CTG
 
 #define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
  * cannot do so). The buffer table entries will initially be zeroed.
  */
 #define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
+#undef MC_CMD_0x87_PRIVILEGE_CTG
 
 #define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
 
  * Reprogram a set of buffer table entries in the specified chunk.
  */
 #define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
+#undef MC_CMD_0x88_PRIVILEGE_CTG
 
 #define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
 
 /* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
 #define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
 #define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
+#define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX_MCDI2 268
 #define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
+#define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_NUM(len) (((len)-12)/8)
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4
 /* ID */
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
 #define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM_MCDI2 32
 
 /* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
 #define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
 /* MC_CMD_FREE_BUFTBL_CHUNK
  */
 #define MC_CMD_FREE_BUFTBL_CHUNK 0x89
+#undef MC_CMD_0x89_PRIVILEGE_CTG
 
 #define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
 
  * Multiplexed MCDI call for filter operations
  */
 #define MC_CMD_FILTER_OP 0x8a
+#undef MC_CMD_0x8a_PRIVILEGE_CTG
 
 #define MC_CMD_0x8a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* fields to include in match criteria */
 #define       MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16
 #define       MC_CMD_FILTER_OP_IN_MATCH_FIELDS_LEN 4
+#define        MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_OFST 16
 #define        MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
 #define        MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_DST_IP_OFST 16
 #define        MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
 #define        MC_CMD_FILTER_OP_IN_MATCH_DST_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_OFST 16
 #define        MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_LBN 2
 #define        MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_OFST 16
 #define        MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_LBN 3
 #define        MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_OFST 16
 #define        MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_LBN 4
 #define        MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_OFST 16
 #define        MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_LBN 5
 #define        MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_OFST 16
 #define        MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_LBN 6
 #define        MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_OFST 16
 #define        MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_LBN 7
 #define        MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_OFST 16
 #define        MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_LBN 8
 #define        MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_OFST 16
 #define        MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_LBN 9
 #define        MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_OFST 16
 #define        MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_LBN 10
 #define        MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_OFST 16
 #define        MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11
 #define        MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16
 #define        MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
 #define        MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_OFST 16
 #define        MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
 #define        MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
 /* receive destination */
 #define       MC_CMD_FILTER_OP_IN_TX_DEST_LEN 4
 /* enum: request default behaviour (based on filter type) */
 #define          MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff
+#define        MC_CMD_FILTER_OP_IN_TX_DEST_MAC_OFST 40
 #define        MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
 #define        MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_TX_DEST_PM_OFST 40
 #define        MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1
 #define        MC_CMD_FILTER_OP_IN_TX_DEST_PM_WIDTH 1
 /* source MAC address to match (as bytes in network order) */
 /* fields to include in match criteria */
 #define       MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16
 #define       MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_LEN 4
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_OFST 16
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
 #define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
 /* receive destination */
 #define       MC_CMD_FILTER_OP_EXT_IN_TX_DEST_LEN 4
 /* enum: request default behaviour (based on filter type) */
 #define          MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff
+#define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_OFST 40
 #define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0
 #define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_OFST 40
 #define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1
 #define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1
 /* source MAC address to match (as bytes in network order) */
  */
 #define       MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72
 #define       MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_LEN 4
+#define        MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_OFST 72
 #define        MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0
 #define        MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24
+#define        MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_OFST 72
 #define        MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24
 #define        MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8
 /* enum: Match VXLAN traffic with this VNI */
 #define          MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1
 /* enum: Reserved for experimental development use */
 #define          MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe
+#define        MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_OFST 72
 #define        MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0
 #define        MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24
+#define        MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_OFST 72
 #define        MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24
 #define        MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8
 /* enum: Match NVGRE traffic with this VSID */
 /* fields to include in match criteria */
 #define       MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_OFST 16
 #define       MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_LEN 4
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_LBN 0
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_LBN 1
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_LBN 2
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_LBN 3
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_LBN 4
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_LBN 5
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_LBN 6
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_LBN 7
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_LBN 8
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_LBN 9
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_LBN 10
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_LBN 11
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_LBN 12
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_LBN 13
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_LBN 14
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_LBN 15
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_LBN 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_LBN 17
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_LBN 18
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_LBN 19
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_LBN 20
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_LBN 21
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_LBN 22
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_LBN 23
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_OFST 16
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
 #define        MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
 /* receive destination */
 #define       MC_CMD_FILTER_OP_V3_IN_TX_DEST_LEN 4
 /* enum: request default behaviour (based on filter type) */
 #define          MC_CMD_FILTER_OP_V3_IN_TX_DEST_DEFAULT 0xffffffff
+#define        MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_OFST 40
 #define        MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_LBN 0
 #define        MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_OFST 40
 #define        MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_LBN 1
 #define        MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_WIDTH 1
 /* source MAC address to match (as bytes in network order) */
  */
 #define       MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_OFST 72
 #define       MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_LEN 4
+#define        MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_OFST 72
 #define        MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_LBN 0
 #define        MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_WIDTH 24
+#define        MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_OFST 72
 #define        MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_LBN 24
 #define        MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_WIDTH 8
 /* enum: Match VXLAN traffic with this VNI */
 #define          MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_GENEVE 0x1
 /* enum: Reserved for experimental development use */
 #define          MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_EXPERIMENTAL 0xfe
+#define        MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_OFST 72
 #define        MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_LBN 0
 #define        MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_WIDTH 24
+#define        MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_OFST 72
 #define        MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_LBN 24
 #define        MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_WIDTH 8
 /* enum: Match NVGRE traffic with this VSID */
  * support the DPDK rte_flow "MARK" action.
  */
 #define          MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_MARK 0x2
-/* the mark value for MATCH_ACTION_MARK */
+/* the mark value for MATCH_ACTION_MARK. Requesting a value larger than the
+ * maximum (obtained from MC_CMD_GET_CAPABILITIES_V5/FILTER_ACTION_MARK_MAX)
+ * will cause the filter insertion to fail with EINVAL.
+ */
 #define       MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_OFST 176
 #define       MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_LEN 4
 
  * Get information related to the parser-dispatcher subsystem
  */
 #define MC_CMD_GET_PARSER_DISP_INFO 0xe4
+#undef MC_CMD_0xe4_PRIVILEGE_CTG
 
 #define MC_CMD_0xe4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * frames (Medford only)
  */
 #define          MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4
+/* enum: read the list of supported matches for the encapsulation detection
+ * rules inserted by MC_CMD_VNIC_ENCAP_RULE_ADD. (ef100 and later)
+ */
+#define          MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES 0x5
 
 /* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
 #define    MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
 #define    MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252
+#define    MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num))
+#define    MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_NUM(len) (((len)-8)/4)
 /* identifies the type of operation requested */
 #define       MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0
 #define       MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_LEN 4
 #define       MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN 4
 #define       MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0
 #define       MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61
+#define       MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM_MCDI2 253
 
 /* MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT msgresponse */
 #define    MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8
 /* bitfield of filter insertion restrictions */
 #define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4
 #define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_LEN 4
+#define        MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_OFST 4
 #define        MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
 #define        MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
 
+/* MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT msgresponse: This response is
+ * returned if a MC_CMD_GET_PARSER_DISP_INFO_IN request is sent with OP value
+ * OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES. It contains information about the
+ * supported match types that can be used in the encapsulation detection rules
+ * inserted by MC_CMD_VNIC_ENCAP_RULE_ADD.
+ */
+#define    MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LENMIN 8
+#define    MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LENMAX 252
+#define    MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LEN(num) (8+4*(num))
+#define    MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_NUM(len) (((len)-8)/4)
+/* The op code OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES is returned. */
+#define       MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_OP_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_OP_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* number of supported match types */
+#define       MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+#define       MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_NUM_SUPPORTED_MATCHES_LEN 4
+/* array of supported match types (valid MATCH_FLAGS values for
+ * MC_CMD_VNIC_ENCAP_RULE_ADD) sorted in decreasing priority order
+ */
+#define       MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_OFST 8
+#define       MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_LEN 4
+#define       MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MINNUM 0
+#define       MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MAXNUM 61
+#define       MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MAXNUM_MCDI2 253
+
 
 /***********************************/
 /* MC_CMD_PARSER_DISP_RW
  * permitted.
  */
 #define MC_CMD_PARSER_DISP_RW 0xe5
+#undef MC_CMD_0xe5_PRIVILEGE_CTG
 
 #define MC_CMD_0xe5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
  * Get number of PFs on the device.
  */
 #define MC_CMD_GET_PF_COUNT 0xb6
+#undef MC_CMD_0xb6_PRIVILEGE_CTG
 
 #define MC_CMD_0xb6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * Get port assignment for current PCI function.
  */
 #define MC_CMD_GET_PORT_ASSIGNMENT 0xb8
+#undef MC_CMD_0xb8_PRIVILEGE_CTG
 
 #define MC_CMD_0xb8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * Set port assignment for current PCI function.
  */
 #define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
+#undef MC_CMD_0xb9_PRIVILEGE_CTG
 
 #define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
  * Allocate VIs for current PCI function.
  */
 #define MC_CMD_ALLOC_VIS 0x8b
+#undef MC_CMD_0x8b_PRIVILEGE_CTG
 
 #define MC_CMD_0x8b_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * but not freed.
  */
 #define MC_CMD_FREE_VIS 0x8c
+#undef MC_CMD_0x8c_PRIVILEGE_CTG
 
 #define MC_CMD_0x8c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * Get SRIOV config for this PF.
  */
 #define MC_CMD_GET_SRIOV_CFG 0xba
+#undef MC_CMD_0xba_PRIVILEGE_CTG
 
 #define MC_CMD_0xba_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 #define       MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_LEN 4
 #define       MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
 #define       MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_LEN 4
+#define        MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_OFST 8
 #define        MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
 #define        MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
 /* RID offset of first VF from PF. */
  * Set SRIOV config for this PF.
  */
 #define MC_CMD_SET_SRIOV_CFG 0xbb
+#undef MC_CMD_0xbb_PRIVILEGE_CTG
 
 #define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 #define       MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4
 #define       MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
 #define       MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4
+#define        MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_OFST 8
 #define        MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
 #define        MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
 /* RID offset of first VF from PF, or 0 for no change, or
  * function.
  */
 #define MC_CMD_GET_VI_ALLOC_INFO 0x8d
+#undef MC_CMD_0x8d_PRIVILEGE_CTG
 
 #define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * For CmdClient use. Dump pertinent information on a specific absolute VI.
  */
 #define MC_CMD_DUMP_VI_STATE 0x8e
+#undef MC_CMD_0x8e_PRIVILEGE_CTG
 
 #define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* Combined metadata field. */
 #define       MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
 #define       MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_OFST 28
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_OFST 28
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_OFST 28
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8
 /* TXDPCPU raw table data for queue. */
 #define       MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8
 #define       MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56
 #define       MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_OFST 56
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_OFST 56
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_OFST 56
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_OFST 56
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_OFST 56
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24
 /* RXDPCPU raw table data for queue. */
 #define       MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8
 #define       MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88
 #define       MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_OFST 88
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_OFST 88
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_OFST 88
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_OFST 88
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32
 #define        MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8
 
  * Allocate a push I/O buffer for later use with a tx queue.
  */
 #define MC_CMD_ALLOC_PIOBUF 0x8f
+#undef MC_CMD_0x8f_PRIVILEGE_CTG
 
 #define MC_CMD_0x8f_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
 
  * Free a push I/O buffer.
  */
 #define MC_CMD_FREE_PIOBUF 0x90
+#undef MC_CMD_0x90_PRIVILEGE_CTG
 
 #define MC_CMD_0x90_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
 
  * Get TLP steering and ordering information for a VI.
  */
 #define MC_CMD_GET_VI_TLP_PROCESSING 0xb0
+#undef MC_CMD_0xb0_PRIVILEGE_CTG
 
 #define MC_CMD_0xb0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * Set TLP steering and ordering information for a VI.
  */
 #define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
+#undef MC_CMD_0xb1_PRIVILEGE_CTG
 
 #define MC_CMD_0xb1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * Get global PCIe steering and transaction processing configuration.
  */
 #define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
+#undef MC_CMD_0xbc_PRIVILEGE_CTG
 
 #define MC_CMD_0xbc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* Amalgamated TLP info word. */
 #define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
 #define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_LEN 4
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_OFST 4
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_OFST 4
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_OFST 4
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_OFST 4
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_OFST 4
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_OFST 4
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_OFST 4
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_OFST 4
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_OFST 4
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_OFST 4
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_OFST 4
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_OFST 4
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_OFST 4
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_OFST 4
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_OFST 4
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_OFST 4
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_OFST 4
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9
 #define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23
 
  * Set global PCIe steering and transaction processing configuration.
  */
 #define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
+#undef MC_CMD_0xbd_PRIVILEGE_CTG
 
 #define MC_CMD_0xbd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* Amalgamated TLP info word. */
 #define       MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
 #define       MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_LEN 4
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_OFST 4
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_OFST 4
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_OFST 4
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_OFST 4
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_OFST 4
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_OFST 4
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_OFST 4
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_OFST 4
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_OFST 4
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_OFST 4
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_OFST 4
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_OFST 4
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_OFST 4
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_OFST 4
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10
 #define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22
 
  * Download a new set of images to the satellite CPUs from the host.
  */
 #define MC_CMD_SATELLITE_DOWNLOAD 0x91
+#undef MC_CMD_0x91_PRIVILEGE_CTG
 
 #define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
  */
 #define    MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20
 #define    MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252
+#define    MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX_MCDI2 1020
 #define    MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num))
+#define    MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_NUM(len) (((len)-16)/4)
 /* Download phase. (Note: the IDLE phase is used internally and is never valid
  * in a command from the host.)
  */
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1
 #define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM_MCDI2 251
 
 /* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */
 #define    MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
  * reference inherent device capabilities as opposed to current NVRAM config.
  */
 #define MC_CMD_GET_CAPABILITIES 0xbe
+#undef MC_CMD_0xbe_PRIVILEGE_CTG
 
 #define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* First word of flags. */
 #define       MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
 #define       MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3
 #define        MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4
 #define        MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN 5
 #define        MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
 #define        MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_LBN 7
 #define        MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_LBN 8
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_LBN 9
 #define        MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
 #define        MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
 #define        MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13
 #define        MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_QBB_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_QBB_LBN 14
 #define        MC_CMD_GET_CAPABILITIES_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN 16
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_LBN 17
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN 18
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19
 #define        MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN 21
 #define        MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_LBN 22
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN 23
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_LBN 24
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN 25
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
 #define        MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
 #define        MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_LBN 28
 #define        MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
 #define        MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_EVB_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30
 #define        MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN 31
 #define        MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_WIDTH 1
 /* RxDPCPU firmware id. */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103
 #define       MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8
 #define       MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_OFST 8
 #define        MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_OFST 8
 #define        MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
 /* enum: reserved value - do not use (may indicate alternative interpretation
  * development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
 /* enum: RX PD firmware with approximately Siena-compatible behaviour
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
 #define       MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10
 #define       MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_OFST 10
 #define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0
 #define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_OFST 10
 #define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
 /* enum: reserved value - do not use (may indicate alternative interpretation
  * development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
 /* enum: TX PD firmware with approximately Siena-compatible behaviour
  * (Huntington development only)
  */
 /* First word of flags. */
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_LBN 5
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_LBN 7
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_LBN 8
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_LBN 9
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_LBN 13
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_LBN 14
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_LBN 16
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_LBN 17
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_LBN 18
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_LBN 19
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_LBN 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_LBN 21
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_LBN 22
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_LBN 23
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_LBN 24
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_LBN 25
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_LBN 26
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_LBN 27
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_LBN 28
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_LBN 30
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_LBN 31
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_WIDTH 1
 /* RxDPCPU firmware id. */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_OFST 8
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_OFST 8
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
 /* enum: reserved value - do not use (may indicate alternative interpretation
  * development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
 /* enum: RX PD firmware with approximately Siena-compatible behaviour
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_OFST 10
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_OFST 10
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
 /* enum: reserved value - do not use (may indicate alternative interpretation
  * development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
 /* enum: TX PD firmware with approximately Siena-compatible behaviour
  * (Huntington development only)
  */
 /* Second word of flags. Not present on older firmware (check the length). */
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_LBN 2
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_LBN 3
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_LBN 5
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN 7
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN 8
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_LBN 9
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_LBN 10
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_LBN 11
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_LBN 13
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_LBN 15
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_LBN 16
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_LBN 17
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_LBN 19
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_LBN 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_LBN 22
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_LBN 24
 #define        MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_WIDTH 1
-/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
- * on older firmware (check the length).
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
  */
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
 #define       MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
 /* First word of flags. */
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_LBN 5
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_LBN 7
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_LBN 8
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_LBN 9
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_LBN 13
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_LBN 14
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_LBN 16
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_LBN 17
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_LBN 18
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_LBN 19
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_LBN 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_LBN 21
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_LBN 22
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_LBN 23
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_LBN 24
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_LBN 25
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_LBN 26
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_LBN 27
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_LBN 28
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_LBN 30
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_LBN 31
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_WIDTH 1
 /* RxDPCPU firmware id. */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_OFST 8
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_OFST 8
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_LBN 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_OFST 8
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
 /* enum: reserved value - do not use (may indicate alternative interpretation
  * development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
 /* enum: RX PD firmware with approximately Siena-compatible behaviour
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_OFST 10
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_OFST 10
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_LBN 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_OFST 10
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
 /* enum: reserved value - do not use (may indicate alternative interpretation
  * development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
 /* enum: TX PD firmware with approximately Siena-compatible behaviour
  * (Huntington development only)
  */
 /* Second word of flags. Not present on older firmware (check the length). */
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_LBN 2
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_LBN 3
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_LBN 4
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_LBN 5
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_LBN 7
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_LBN 8
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_LBN 9
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_LBN 10
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_LBN 11
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_LBN 13
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_LBN 15
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_LBN 16
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_LBN 17
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_LBN 19
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_LBN 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_LBN 22
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_LBN 24
 #define        MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_WIDTH 1
-/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
- * on older firmware (check the length).
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
  */
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
 #define       MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
 /* First word of flags. */
 #define       MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST 0
 #define       MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_LBN 3
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_LBN 4
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_LBN 5
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_LBN 7
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_LBN 8
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_LBN 9
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_LBN 13
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_LBN 14
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_LBN 16
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_LBN 17
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_LBN 18
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_LBN 19
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_LBN 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_LBN 21
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_LBN 22
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_LBN 23
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_LBN 24
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_LBN 25
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_LBN 26
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_LBN 27
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_LBN 28
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_LBN 30
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_OFST 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_LBN 31
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_WIDTH 1
 /* RxDPCPU firmware id. */
 #define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR 0x103
 #define       MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_OFST 8
 #define       MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_OFST 8
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_LBN 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_OFST 8
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
 /* enum: reserved value - do not use (may indicate alternative interpretation
  * development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
 /* enum: RX PD firmware with approximately Siena-compatible behaviour
  * (Huntington development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
 #define       MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_OFST 10
 #define       MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_OFST 10
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_LBN 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_OFST 10
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
 /* enum: reserved value - do not use (may indicate alternative interpretation
  * development only)
  */
 #define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
 /* enum: TX PD firmware with approximately Siena-compatible behaviour
  * (Huntington development only)
  */
 /* Second word of flags. Not present on older firmware (check the length). */
 #define       MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST 20
 #define       MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_LBN 0
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_LBN 1
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_LBN 2
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_LBN 3
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_LBN 4
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_LBN 5
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_LBN 7
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_LBN 8
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_LBN 9
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_LBN 10
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_LBN 11
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_LBN 13
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_LBN 14
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN 15
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_LBN 16
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_LBN 17
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_LBN 19
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_LBN 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_LBN 22
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_OFST 20
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_LBN 24
 #define        MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_WIDTH 1
-/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
- * on older firmware (check the length).
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
  */
 #define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
 #define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
 #define       MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_OFST 76
 #define       MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_LEN 2
 
-
-/***********************************/
-/* MC_CMD_V2_EXTN
- * Encapsulation for a v2 extended command
- */
-#define MC_CMD_V2_EXTN 0x7f
-
-/* MC_CMD_V2_EXTN_IN msgrequest */
-#define    MC_CMD_V2_EXTN_IN_LEN 4
-/* the extended command number */
-#define       MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0
-#define       MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15
-#define       MC_CMD_V2_EXTN_IN_UNUSED_LBN 15
-#define       MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1
-/* the actual length of the encapsulated command (which is not in the v1
- * header)
- */
-#define       MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16
-#define       MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10
-#define       MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26
-#define       MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 2
-/* Type of command/response */
-#define       MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_LBN 28
-#define       MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_WIDTH 4
-/* enum: MCDI command directed to or response originating from the MC. */
-#define          MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_MC 0x0
-/* enum: MCDI command directed to a TSA controller. MCDI responses of this type
- * are not defined.
+/* MC_CMD_GET_CAPABILITIES_V5_OUT msgresponse */
+#define    MC_CMD_GET_CAPABILITIES_V5_OUT_LEN 84
+/* First word of flags. */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_OFST 4
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_OFST 6
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_CSR 0x103
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_OFST 8
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_OFST 10
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_OFST 20
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_LBN 2
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_OFST 42
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/*               MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/*               MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_OFST 58
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_LEN 2
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_OFST 68
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_OFST 70
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_OFST 72
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+
+/* MC_CMD_GET_CAPABILITIES_V6_OUT msgresponse */
+#define    MC_CMD_GET_CAPABILITIES_V6_OUT_LEN 148
+/* First word of flags. */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS1_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DPCPU_FW_ID_OFST 4
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DPCPU_FW_ID_OFST 6
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_CSR 0x103
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_OFST 8
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_OFST 10
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS2_OFST 20
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS2_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_LBN 2
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_OFST 42
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/*               MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/*               MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_OFST 58
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_LEN 2
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_PIO_BUFFS_OFST 68
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_SIZE_PIO_BUFF_OFST 70
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_OFST 72
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+
+/* MC_CMD_GET_CAPABILITIES_V7_OUT msgresponse */
+#define    MC_CMD_GET_CAPABILITIES_V7_OUT_LEN 152
+/* First word of flags. */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS1_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DPCPU_FW_ID_OFST 4
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DPCPU_FW_ID_OFST 6
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_CSR 0x103
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_OFST 8
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_OFST 10
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS2_OFST 20
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS2_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_LBN 2
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_OFST 42
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/*               MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/*               MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_OFST 58
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_LEN 2
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_PIO_BUFFS_OFST 68
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_SIZE_PIO_BUFF_OFST 70
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_OFST 72
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS3_OFST 148
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS3_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_LBN 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+
+/* MC_CMD_GET_CAPABILITIES_V8_OUT msgresponse */
+#define    MC_CMD_GET_CAPABILITIES_V8_OUT_LEN 160
+/* First word of flags. */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DPCPU_FW_ID_OFST 4
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DPCPU_FW_ID_OFST 6
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_CSR 0x103
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_OFST 8
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_OFST 10
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_OFST 20
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_LBN 2
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_OFST 42
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/*               MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/*               MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_OFST 58
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_LEN 2
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_PIO_BUFFS_OFST 68
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_SIZE_PIO_BUFF_OFST 70
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_OFST 72
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_OFST 148
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_LBN 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+/* These bits are reserved for communicating test-specific capabilities to
+ * host-side test software. All production drivers should treat this field as
+ * opaque.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_OFST 152
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LEN 8
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_OFST 152
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_OFST 156
+
+/* MC_CMD_GET_CAPABILITIES_V9_OUT msgresponse */
+#define    MC_CMD_GET_CAPABILITIES_V9_OUT_LEN 184
+/* First word of flags. */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS1_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DPCPU_FW_ID_OFST 4
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DPCPU_FW_ID_OFST 6
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_CSR 0x103
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_OFST 8
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_OFST 10
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS2_OFST 20
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS2_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_LBN 2
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_OFST 42
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/*               MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/*               MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_OFST 58
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_LEN 2
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_PIO_BUFFS_OFST 68
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_SIZE_PIO_BUFF_OFST 70
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_OFST 72
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS3_OFST 148
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS3_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_LBN 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+/* These bits are reserved for communicating test-specific capabilities to
+ * host-side test software. All production drivers should treat this field as
+ * opaque.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_OFST 152
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LEN 8
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_OFST 152
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_OFST 156
+/* The minimum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum number of queues that can be used by an RSS context in exclusive
+ * mode. In exclusive mode the context has a configurable indirection table and
+ * a configurable RSS key.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4
+/* The maximum number of queues that can be used by an RSS context in even-
+ * spreading mode. In even-spreading mode the context has no indirection table
+ * but it does have a configurable RSS key.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4
+/* The total number of RSS contexts supported. Note that the number of
+ * available contexts using indirection tables is also limited by the
+ * availability of indirection table space allocated from a common pool.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_NUM_CONTEXTS_OFST 176
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_NUM_CONTEXTS_LEN 4
+/* The total amount of indirection table space that can be shared between RSS
+ * contexts.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_OFST 180
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_V2_EXTN
+ * Encapsulation for a v2 extended command
+ */
+#define MC_CMD_V2_EXTN 0x7f
+
+/* MC_CMD_V2_EXTN_IN msgrequest */
+#define    MC_CMD_V2_EXTN_IN_LEN 4
+/* the extended command number */
+#define       MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0
+#define       MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15
+#define       MC_CMD_V2_EXTN_IN_UNUSED_LBN 15
+#define       MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1
+/* the actual length of the encapsulated command (which is not in the v1
+ * header)
+ */
+#define       MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16
+#define       MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10
+#define       MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26
+#define       MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 2
+/* Type of command/response */
+#define       MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_LBN 28
+#define       MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_WIDTH 4
+/* enum: MCDI command directed to or response originating from the MC. */
+#define          MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_MC 0x0
+/* enum: MCDI command directed to a TSA controller. MCDI responses of this type
+ * are not defined.
  */
 #define          MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_TSA 0x1
 
  * Allocate a pacer bucket (for qau rp or a snapper test)
  */
 #define MC_CMD_TCM_BUCKET_ALLOC 0xb2
+#undef MC_CMD_0xb2_PRIVILEGE_CTG
 
 #define MC_CMD_0xb2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * Free a pacer bucket
  */
 #define MC_CMD_TCM_BUCKET_FREE 0xb3
+#undef MC_CMD_0xb3_PRIVILEGE_CTG
 
 #define MC_CMD_0xb3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * Initialise pacer bucket with a given rate
  */
 #define MC_CMD_TCM_BUCKET_INIT 0xb4
+#undef MC_CMD_0xb4_PRIVILEGE_CTG
 
 #define MC_CMD_0xb4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * Initialise txq in pacer with given options or set options
  */
 #define MC_CMD_TCM_TXQ_INIT 0xb5
+#undef MC_CMD_0xb5_PRIVILEGE_CTG
 
 #define MC_CMD_0xb5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* bitmask of the priority queues this txq is inserted into when inserted. */
 #define       MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
 #define       MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_LEN 4
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_OFST 8
 #define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
 #define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_OFST 8
 #define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
 #define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_OFST 8
 #define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2
 #define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
 /* the reaction point (RP) bucket */
 /* bitmask of the priority queues this txq is inserted into when inserted. */
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
 #define       MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_LEN 4
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_OFST 8
 #define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
 #define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_OFST 8
 #define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
 #define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_OFST 8
 #define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2
 #define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
 /* the reaction point (RP) bucket */
  * Link a push I/O buffer to a TxQ
  */
 #define MC_CMD_LINK_PIOBUF 0x92
+#undef MC_CMD_0x92_PRIVILEGE_CTG
 
 #define MC_CMD_0x92_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
 
  * Unlink a push I/O buffer from a TxQ
  */
 #define MC_CMD_UNLINK_PIOBUF 0x93
+#undef MC_CMD_0x93_PRIVILEGE_CTG
 
 #define MC_CMD_0x93_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
 
  * allocate and initialise a v-switch.
  */
 #define MC_CMD_VSWITCH_ALLOC 0x94
+#undef MC_CMD_0x94_PRIVILEGE_CTG
 
 #define MC_CMD_0x94_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* Flags controlling v-port creation */
 #define       MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
 #define       MC_CMD_VSWITCH_ALLOC_IN_FLAGS_LEN 4
+#define        MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_OFST 8
 #define        MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
 #define        MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
 /* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators,
  * de-allocate a v-switch.
  */
 #define MC_CMD_VSWITCH_FREE 0x95
+#undef MC_CMD_0x95_PRIVILEGE_CTG
 
 #define MC_CMD_0x95_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * not, then the command returns ENOENT).
  */
 #define MC_CMD_VSWITCH_QUERY 0x63
+#undef MC_CMD_0x63_PRIVILEGE_CTG
 
 #define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * allocate a v-port.
  */
 #define MC_CMD_VPORT_ALLOC 0x96
+#undef MC_CMD_0x96_PRIVILEGE_CTG
 
 #define MC_CMD_0x96_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* Flags controlling v-port creation */
 #define       MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
 #define       MC_CMD_VPORT_ALLOC_IN_FLAGS_LEN 4
+#define        MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_OFST 8
 #define        MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
 #define        MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+#define        MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_OFST 8
 #define        MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1
 #define        MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_WIDTH 1
 /* The number of VLAN tags to insert/remove. An error will be returned if
 /* The actual VLAN tags to insert/remove */
 #define       MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
 #define       MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_LEN 4
+#define        MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_OFST 16
 #define        MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0
 #define        MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define        MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_OFST 16
 #define        MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16
 #define        MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_WIDTH 16
 
  * de-allocate a v-port.
  */
 #define MC_CMD_VPORT_FREE 0x97
+#undef MC_CMD_0x97_PRIVILEGE_CTG
 
 #define MC_CMD_0x97_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * allocate a v-adaptor.
  */
 #define MC_CMD_VADAPTOR_ALLOC 0x98
+#undef MC_CMD_0x98_PRIVILEGE_CTG
 
 #define MC_CMD_0x98_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* Flags controlling v-adaptor creation */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
 #define       MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_LEN 4
+#define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_OFST 8
 #define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
 #define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
+#define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 8
 #define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1
 #define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
 /* The number of VLAN tags to strip on receive */
 /* The actual VLAN tags to insert/remove */
 #define       MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20
 #define       MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_LEN 4
+#define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_OFST 20
 #define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0
 #define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_OFST 20
 #define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16
 #define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_WIDTH 16
 /* The MAC address to assign to this v-adaptor */
  * de-allocate a v-adaptor.
  */
 #define MC_CMD_VADAPTOR_FREE 0x99
+#undef MC_CMD_0x99_PRIVILEGE_CTG
 
 #define MC_CMD_0x99_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * assign a new MAC address to a v-adaptor.
  */
 #define MC_CMD_VADAPTOR_SET_MAC 0x5d
+#undef MC_CMD_0x5d_PRIVILEGE_CTG
 
 #define MC_CMD_0x5d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * read the MAC address assigned to a v-adaptor.
  */
 #define MC_CMD_VADAPTOR_GET_MAC 0x5e
+#undef MC_CMD_0x5e_PRIVILEGE_CTG
 
 #define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * read some config of v-adaptor.
  */
 #define MC_CMD_VADAPTOR_QUERY 0x61
+#undef MC_CMD_0x61_PRIVILEGE_CTG
 
 #define MC_CMD_0x61_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * assign a port to a PCI function.
  */
 #define MC_CMD_EVB_PORT_ASSIGN 0x9a
+#undef MC_CMD_0x9a_PRIVILEGE_CTG
 
 #define MC_CMD_0x9a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* The target function to modify. */
 #define       MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4
 #define       MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_LEN 4
+#define        MC_CMD_EVB_PORT_ASSIGN_IN_PF_OFST 4
 #define        MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0
 #define        MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16
+#define        MC_CMD_EVB_PORT_ASSIGN_IN_VF_OFST 4
 #define        MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16
 #define        MC_CMD_EVB_PORT_ASSIGN_IN_VF_WIDTH 16
 
  * Assign the 64 bit region addresses.
  */
 #define MC_CMD_RDWR_A64_REGIONS 0x9b
+#undef MC_CMD_0x9b_PRIVILEGE_CTG
 
 #define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
  * Allocate an Onload stack ID.
  */
 #define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
+#undef MC_CMD_0x9c_PRIVILEGE_CTG
 
 #define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
 
  * Free an Onload stack ID.
  */
 #define MC_CMD_ONLOAD_STACK_FREE 0x9d
+#undef MC_CMD_0x9d_PRIVILEGE_CTG
 
 #define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
 
  * Allocate an RSS context.
  */
 #define MC_CMD_RSS_CONTEXT_ALLOC 0x9e
+#undef MC_CMD_0x9e_PRIVILEGE_CTG
 
 #define MC_CMD_0x9e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64.
  */
 #define          MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1
-/* Number of queues spanned by this context, in the range 1-64; valid offsets
- * in the indirection table will be in the range 0 to NUM_QUEUES-1.
+/* enum: Allocate a context to spread evenly across an arbitrary number of
+ * queues. No indirection table space is allocated for this context. (EF100 and
+ * later)
+ */
+#define          MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EVEN_SPREADING 0x2
+/* Number of queues spanned by this context. For exclusive contexts this must
+ * be in the range 1 to RSS_MAX_INDIRECTION_QUEUES, where
+ * RSS_MAX_INDIRECTION_QUEUES is queried from MC_CMD_GET_CAPABILITIES_V9 or if
+ * V9 is not supported then RSS_MAX_INDIRECTION_QUEUES is 64. Valid entries in
+ * the indirection table will be in the range 0 to NUM_QUEUES-1. For even-
+ * spreading contexts this must be in the range 1 to
+ * RSS_MAX_EVEN_SPREADING_QUEUES as queried from MC_CMD_GET_CAPABILITIES. Note
+ * that specifying NUM_QUEUES = 1 will not perform any spreading but may still
+ * be useful as a way of obtaining the Toeplitz hash.
  */
 #define       MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8
 #define       MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_LEN 4
 
+/* MC_CMD_RSS_CONTEXT_ALLOC_V2_IN msgrequest */
+#define    MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_LEN 16
+/* The handle of the owning upstream port */
+#define       MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_UPSTREAM_PORT_ID_LEN 4
+/* The type of context to allocate */
+#define       MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_OFST 4
+#define       MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_LEN 4
+/* enum: Allocate a context for exclusive use. The key and indirection table
+ * must be explicitly configured.
+ */
+#define          MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_EXCLUSIVE 0x0
+/* enum: Allocate a context for shared use; this will spread across a range of
+ * queues, but the key and indirection table are pre-configured and may not be
+ * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64.
+ */
+#define          MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_SHARED 0x1
+/* enum: Allocate a context to spread evenly across an arbitrary number of
+ * queues. No indirection table space is allocated for this context. (EF100 and
+ * later)
+ */
+#define          MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_EVEN_SPREADING 0x2
+/* Number of queues spanned by this context. For exclusive contexts this must
+ * be in the range 1 to RSS_MAX_INDIRECTION_QUEUES, where
+ * RSS_MAX_INDIRECTION_QUEUES is queried from MC_CMD_GET_CAPABILITIES_V9 or if
+ * V9 is not supported then RSS_MAX_INDIRECTION_QUEUES is 64. Valid entries in
+ * the indirection table will be in the range 0 to NUM_QUEUES-1. For even-
+ * spreading contexts this must be in the range 1 to
+ * RSS_MAX_EVEN_SPREADING_QUEUES as queried from MC_CMD_GET_CAPABILITIES. Note
+ * that specifying NUM_QUEUES = 1 will not perform any spreading but may still
+ * be useful as a way of obtaining the Toeplitz hash.
+ */
+#define       MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_NUM_QUEUES_OFST 8
+#define       MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_NUM_QUEUES_LEN 4
+/* Size of indirection table to be allocated to this context from the pool.
+ * Must be a power of 2. The minimum and maximum table size can be queried
+ * using MC_CMD_GET_CAPABILITIES_V9. If there is not enough space remaining in
+ * the common pool to allocate the requested table size, due to allocating
+ * table space to other RSS contexts, then the command will fail with
+ * MC_CMD_ERR_ENOSPC.
+ */
+#define       MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_INDIRECTION_TABLE_SIZE_OFST 12
+#define       MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_INDIRECTION_TABLE_SIZE_LEN 4
+
 /* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
 #define    MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
 /* The handle of the new RSS context. This should be considered opaque to the
  * Free an RSS context.
  */
 #define MC_CMD_RSS_CONTEXT_FREE 0x9f
+#undef MC_CMD_0x9f_PRIVILEGE_CTG
 
 #define MC_CMD_0x9f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * Set the Toeplitz hash key for an RSS context.
  */
 #define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0
+#undef MC_CMD_0xa0_PRIVILEGE_CTG
 
 #define MC_CMD_0xa0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * Get the Toeplitz hash key for an RSS context.
  */
 #define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1
+#undef MC_CMD_0xa1_PRIVILEGE_CTG
 
 #define MC_CMD_0xa1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 
 /***********************************/
 /* MC_CMD_RSS_CONTEXT_SET_TABLE
- * Set the indirection table for an RSS context.
+ * Set the indirection table for an RSS context. This command should only be
+ * used with indirection tables containing 128 entries, which is the default
+ * when the RSS context is allocated without specifying a table size.
  */
 #define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2
+#undef MC_CMD_0xa2_PRIVILEGE_CTG
 
 #define MC_CMD_0xa2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 
 /***********************************/
 /* MC_CMD_RSS_CONTEXT_GET_TABLE
- * Get the indirection table for an RSS context.
+ * Get the indirection table for an RSS context. This command should only be
+ * used with indirection tables containing 128 entries, which is the default
+ * when the RSS context is allocated without specifying a table size.
  */
 #define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3
+#undef MC_CMD_0xa3_PRIVILEGE_CTG
 
 #define MC_CMD_0xa3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 
 
 /***********************************/
+/* MC_CMD_RSS_CONTEXT_WRITE_TABLE
+ * Write a portion of a selectable-size indirection table for an RSS context.
+ * This command must be used instead of MC_CMD_RSS_CONTEXT_SET_TABLE if the
+ * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES.
+ */
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE 0x13e
+#undef MC_CMD_0x13e_PRIVILEGE_CTG
+
+#define MC_CMD_0x13e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN msgrequest */
+#define    MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMIN 8
+#define    MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX 252
+#define    MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LEN(num) (4+4*(num))
+#define    MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_NUM(len) (((len)-4)/4)
+/* The handle of the RSS context */
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_LEN 4
+/* An array of index-value pairs to be written to the table. Structure is
+ * MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY.
+ */
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_OFST 4
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_LEN 4
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MINNUM 1
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM 62
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM_MCDI2 254
+
+/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT msgresponse */
+#define    MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT_LEN 0
+
+/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY structuredef */
+#define    MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_LEN 4
+/* The index of the table entry to be written. */
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_OFST 0
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LEN 2
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LBN 0
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_WIDTH 16
+/* The value to write into the table entry. */
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_OFST 2
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LEN 2
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LBN 16
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_READ_TABLE
+ * Read a portion of a selectable-size indirection table for an RSS context.
+ * This command must be used instead of MC_CMD_RSS_CONTEXT_GET_TABLE if the
+ * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES.
+ */
+#define MC_CMD_RSS_CONTEXT_READ_TABLE 0x13f
+#undef MC_CMD_0x13f_PRIVILEGE_CTG
+
+#define MC_CMD_0x13f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_READ_TABLE_IN msgrequest */
+#define    MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMIN 6
+#define    MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX 252
+#define    MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LEN(num) (4+2*(num))
+#define    MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_NUM(len) (((len)-4)/2)
+/* The handle of the RSS context */
+#define       MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_LEN 4
+/* An array containing the indices of the entries to be read. */
+#define       MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_OFST 4
+#define       MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_LEN 2
+#define       MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MINNUM 1
+#define       MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM 124
+#define       MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM_MCDI2 508
+
+/* MC_CMD_RSS_CONTEXT_READ_TABLE_OUT msgresponse */
+#define    MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMIN 2
+#define    MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX 252
+#define    MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LEN(num) (0+2*(num))
+#define    MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_NUM(len) (((len)-0)/2)
+/* A buffer containing the requested entries read from the table. */
+#define       MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_OFST 0
+#define       MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_LEN 2
+#define       MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MINNUM 1
+#define       MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM 126
+#define       MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM_MCDI2 510
+
+
+/***********************************/
 /* MC_CMD_RSS_CONTEXT_SET_FLAGS
  * Set various control flags for an RSS context.
  */
 #define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1
+#undef MC_CMD_0xe1_PRIVILEGE_CTG
 
 #define MC_CMD_0xe1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  */
 #define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
 #define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_LEN 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_OFST 4
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_OFST 4
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_OFST 4
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_LBN 2
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_OFST 4
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_OFST 4
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_LBN 4
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_OFST 4
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN 8
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_OFST 4
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_LBN 12
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_OFST 4
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN 16
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_OFST 4
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN 20
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_OFST 4
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_LBN 24
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_OFST 4
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN 28
 #define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH 4
 
  * Get various control flags for an RSS context.
  */
 #define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2
+#undef MC_CMD_0xe2_PRIVILEGE_CTG
 
 #define MC_CMD_0xe2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  */
 #define       MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
 #define       MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_LEN 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_OFST 4
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_OFST 4
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_OFST 4
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN 2
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_OFST 4
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_OFST 4
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_LBN 4
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_OFST 4
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN 8
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_OFST 4
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN 12
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_OFST 4
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN 16
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_OFST 4
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN 20
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_OFST 4
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN 24
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_OFST 4
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN 28
 #define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4
 
  * Allocate a .1p mapping.
  */
 #define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
+#undef MC_CMD_0xa4_PRIVILEGE_CTG
 
 #define MC_CMD_0xa4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
  * Free a .1p mapping.
  */
 #define MC_CMD_DOT1P_MAPPING_FREE 0xa5
+#undef MC_CMD_0xa5_PRIVILEGE_CTG
 
 #define MC_CMD_0xa5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
  * Set the mapping table for a .1p mapping.
  */
 #define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
+#undef MC_CMD_0xa6_PRIVILEGE_CTG
 
 #define MC_CMD_0xa6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
  * Get the mapping table for a .1p mapping.
  */
 #define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
+#undef MC_CMD_0xa7_PRIVILEGE_CTG
 
 #define MC_CMD_0xa7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
  * Get Interrupt Vector config for this PF.
  */
 #define MC_CMD_GET_VECTOR_CFG 0xbf
+#undef MC_CMD_0xbf_PRIVILEGE_CTG
 
 #define MC_CMD_0xbf_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * Set Interrupt Vector config for this PF.
  */
 #define MC_CMD_SET_VECTOR_CFG 0xc0
+#undef MC_CMD_0xc0_PRIVILEGE_CTG
 
 #define MC_CMD_0xc0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * Add a MAC address to a v-port
  */
 #define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
+#undef MC_CMD_0xa8_PRIVILEGE_CTG
 
 #define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * Delete a MAC address from a v-port
  */
 #define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
+#undef MC_CMD_0xa9_PRIVILEGE_CTG
 
 #define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * Delete a MAC address from a v-port
  */
 #define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa
+#undef MC_CMD_0xaa_PRIVILEGE_CTG
 
 #define MC_CMD_0xaa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */
 #define    MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4
 #define    MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX 250
+#define    MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX_MCDI2 1018
 #define    MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num))
+#define    MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_NUM(len) (((len)-4)/6)
 /* The number of MAC addresses returned */
 #define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0
 #define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_LEN 4
 #define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6
 #define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MINNUM 0
 #define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM 41
+#define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM_MCDI2 169
 
 
 /***********************************/
  * function will be reset before applying the changes.
  */
 #define MC_CMD_VPORT_RECONFIGURE 0xeb
+#undef MC_CMD_0xeb_PRIVILEGE_CTG
 
 #define MC_CMD_0xeb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* Flags requesting what should be changed. */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4
 #define       MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_LEN 4
+#define        MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_OFST 4
 #define        MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0
 #define        MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1
+#define        MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_OFST 4
 #define        MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1
 #define        MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_WIDTH 1
 /* The number of VLAN tags to insert/remove. An error will be returned if
 /* The actual VLAN tags to insert/remove */
 #define       MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12
 #define       MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_LEN 4
+#define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_OFST 12
 #define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0
 #define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16
+#define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_OFST 12
 #define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16
 #define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16
 /* The number of MAC addresses to add */
 #define    MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4
 #define       MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0
 #define       MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_LEN 4
+#define        MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_OFST 0
 #define        MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0
 #define        MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1
 
  * read some config of v-port.
  */
 #define MC_CMD_EVB_PORT_QUERY 0x62
+#undef MC_CMD_0x62_PRIVILEGE_CTG
 
 #define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * lifted in future.
  */
 #define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
+#undef MC_CMD_0xab_PRIVILEGE_CTG
 
 #define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
+#define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
+#define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_NUM(len) (((len)-0)/12)
 /* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
 #define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21
+#define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM_MCDI2 85
 
 
 /***********************************/
  * Set global RXDP configuration settings
  */
 #define MC_CMD_SET_RXDP_CONFIG 0xc1
+#undef MC_CMD_0xc1_PRIVILEGE_CTG
 
 #define MC_CMD_0xc1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 #define    MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
 #define       MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
 #define       MC_CMD_SET_RXDP_CONFIG_IN_DATA_LEN 4
+#define        MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_OFST 0
 #define        MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
 #define        MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
+#define        MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_OFST 0
 #define        MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
 #define        MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2
 /* enum: pad to 64 bytes */
  * Get global RXDP configuration settings
  */
 #define MC_CMD_GET_RXDP_CONFIG 0xc2
+#undef MC_CMD_0xc2_PRIVILEGE_CTG
 
 #define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 #define    MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
 #define       MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
 #define       MC_CMD_GET_RXDP_CONFIG_OUT_DATA_LEN 4
+#define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_OFST 0
 #define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
 #define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
+#define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_OFST 0
 #define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
 #define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_WIDTH 2
 /*             Enum values, see field(s): */
  * Return the system and PDCPU clock frequencies.
  */
 #define MC_CMD_GET_CLOCK 0xac
+#undef MC_CMD_0xac_PRIVILEGE_CTG
 
 #define MC_CMD_0xac_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * Control the system and DPCPU clock frequencies. Changes are lost reboot.
  */
 #define MC_CMD_SET_CLOCK 0xad
+#undef MC_CMD_0xad_PRIVILEGE_CTG
 
 #define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
  * Send an arbitrary DPCPU message.
  */
 #define MC_CMD_DPCPU_RPC 0xae
+#undef MC_CMD_0xae_PRIVILEGE_CTG
 
 #define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
  */
 #define       MC_CMD_DPCPU_RPC_IN_DATA_OFST 4
 #define       MC_CMD_DPCPU_RPC_IN_DATA_LEN 32
+#define        MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_OFST 4
 #define        MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8
 #define        MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8
 #define          MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */
 #define          MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */
 #define          MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */
 #define          MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */
+#define        MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_OFST 4
 #define        MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16
 #define        MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16
+#define        MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_OFST 4
 #define        MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16
 #define        MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16
+#define        MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_OFST 4
 #define        MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48
 #define        MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16
+#define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_OFST 4
 #define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16
 #define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240
+#define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_OFST 4
 #define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16
 #define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16
 #define          MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */
 #define          MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */
 #define          MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */
 #define          MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */
+#define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_OFST 4
 #define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48
 #define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16
+#define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_OFST 4
 #define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64
 #define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16
+#define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_OFST 4
 #define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80
 #define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16
+#define        MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_OFST 4
 #define        MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16
 #define        MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16
 #define          MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */
 #define          MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */
 #define          MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */
+#define        MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_OFST 4
 #define        MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64
 #define        MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16
 #define       MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12
 /* DATA */
 #define       MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
 #define       MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
+#define        MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_OFST 4
 #define        MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32
 #define        MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16
+#define        MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_OFST 4
 #define        MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48
 #define        MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16
 #define       MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
  * Trigger an interrupt by prodding the BIU.
  */
 #define MC_CMD_TRIGGER_INTERRUPT 0xe3
+#undef MC_CMD_0xe3_PRIVILEGE_CTG
 
 #define MC_CMD_0xe3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * Special operations to support (for now) shmboot.
  */
 #define MC_CMD_SHMBOOT_OP 0xe6
+#undef MC_CMD_0xe6_PRIVILEGE_CTG
 
 #define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
  * Read multiple 64bit words from capture block memory
  */
 #define MC_CMD_CAP_BLK_READ 0xe7
+#undef MC_CMD_0xe7_PRIVILEGE_CTG
 
 #define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_CAP_BLK_READ_OUT msgresponse */
 #define    MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
 #define    MC_CMD_CAP_BLK_READ_OUT_LENMAX 248
+#define    MC_CMD_CAP_BLK_READ_OUT_LENMAX_MCDI2 1016
 #define    MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num))
+#define    MC_CMD_CAP_BLK_READ_OUT_BUFFER_NUM(len) (((len)-0)/8)
 #define       MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0
 #define       MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8
 #define       MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0
 #define       MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4
 #define       MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1
 #define       MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31
+#define       MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM_MCDI2 127
 
 
 /***********************************/
  * Take a dump of the DUT state
  */
 #define MC_CMD_DUMP_DO 0xe8
+#undef MC_CMD_0xe8_PRIVILEGE_CTG
 
 #define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
  * Configure unsolicited dumps
  */
 #define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
+#undef MC_CMD_0xe9_PRIVILEGE_CTG
 
 #define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
  * the parameter is out of range.
  */
 #define MC_CMD_SET_PSU 0xea
+#undef MC_CMD_0xea_PRIVILEGE_CTG
 
 #define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
  * Get function information. PF and VF number.
  */
 #define MC_CMD_GET_FUNCTION_INFO 0xec
+#undef MC_CMD_0xec_PRIVILEGE_CTG
 
 #define MC_CMD_0xec_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * reboot.
  */
 #define MC_CMD_ENABLE_OFFLINE_BIST 0xed
+#undef MC_CMD_0xed_PRIVILEGE_CTG
 
 #define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
  * forget.
  */
 #define MC_CMD_UART_SEND_DATA 0xee
+#undef MC_CMD_0xee_PRIVILEGE_CTG
 
 #define MC_CMD_0xee_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* MC_CMD_UART_SEND_DATA_OUT msgrequest */
 #define    MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
 #define    MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
+#define    MC_CMD_UART_SEND_DATA_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
+#define    MC_CMD_UART_SEND_DATA_OUT_DATA_NUM(len) (((len)-16)/1)
 /* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
 #define       MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
 #define       MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_LEN 4
 #define       MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
 #define       MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
 #define       MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236
+#define       MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM_MCDI2 1004
 
 /* MC_CMD_UART_SEND_DATA_IN msgresponse */
 #define    MC_CMD_UART_SEND_DATA_IN_LEN 0
  * subject to change and not currently implemented.
  */
 #define MC_CMD_UART_RECV_DATA 0xef
+#undef MC_CMD_0xef_PRIVILEGE_CTG
 
 #define MC_CMD_0xef_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* MC_CMD_UART_RECV_DATA_IN msgresponse */
 #define    MC_CMD_UART_RECV_DATA_IN_LENMIN 16
 #define    MC_CMD_UART_RECV_DATA_IN_LENMAX 252
+#define    MC_CMD_UART_RECV_DATA_IN_LENMAX_MCDI2 1020
 #define    MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
+#define    MC_CMD_UART_RECV_DATA_IN_DATA_NUM(len) (((len)-16)/1)
 /* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
 #define       MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
 #define       MC_CMD_UART_RECV_DATA_IN_CHECKSUM_LEN 4
 #define       MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
 #define       MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
 #define       MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236
+#define       MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM_MCDI2 1004
 
 
 /***********************************/
  * Read data programmed into the device One-Time-Programmable (OTP) Fuses
  */
 #define MC_CMD_READ_FUSES 0xf0
+#undef MC_CMD_0xf0_PRIVILEGE_CTG
 
 #define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_READ_FUSES_OUT msgresponse */
 #define    MC_CMD_READ_FUSES_OUT_LENMIN 4
 #define    MC_CMD_READ_FUSES_OUT_LENMAX 252
+#define    MC_CMD_READ_FUSES_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
+#define    MC_CMD_READ_FUSES_OUT_DATA_NUM(len) (((len)-4)/1)
 /* Length of returned OTP data in bytes */
 #define       MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
 #define       MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4
 #define       MC_CMD_READ_FUSES_OUT_DATA_LEN 1
 #define       MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0
 #define       MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248
+#define       MC_CMD_READ_FUSES_OUT_DATA_MAXNUM_MCDI2 1016
 
 
 /***********************************/
  * Get or set KR Serdes RXEQ and TX Driver settings
  */
 #define MC_CMD_KR_TUNE 0xf1
+#undef MC_CMD_0xf1_PRIVILEGE_CTG
 
 #define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_KR_TUNE_IN msgrequest */
 #define    MC_CMD_KR_TUNE_IN_LENMIN 4
 #define    MC_CMD_KR_TUNE_IN_LENMAX 252
+#define    MC_CMD_KR_TUNE_IN_LENMAX_MCDI2 1020
 #define    MC_CMD_KR_TUNE_IN_LEN(num) (4+4*(num))
+#define    MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_NUM(len) (((len)-4)/4)
 /* Requested operation */
 #define       MC_CMD_KR_TUNE_IN_KR_TUNE_OP_OFST 0
 #define       MC_CMD_KR_TUNE_IN_KR_TUNE_OP_LEN 1
 #define       MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_LEN 4
 #define       MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MINNUM 0
 #define       MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MAXNUM 62
+#define       MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MAXNUM_MCDI2 254
 
 /* MC_CMD_KR_TUNE_OUT msgresponse */
 #define    MC_CMD_KR_TUNE_OUT_LEN 0
 /* MC_CMD_KR_TUNE_RXEQ_GET_OUT msgresponse */
 #define    MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMIN 4
 #define    MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMAX 252
+#define    MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_KR_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
+#define    MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4)
 /* RXEQ Parameter */
 #define       MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
 #define       MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
 #define       MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
 #define       MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
+#define       MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255
+#define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_OFST 0
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
 /* enum: Attenuation (0-15, Huntington) */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_PVT 0x20
 /* enum: CDR integral loop code (Medford2) */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_INTEG 0x21
+/* enum: CTLE Boost stages - retimer lineside (Medford2 with DS250x retimer - 4
+ * stages, 2 bits per stage)
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST_RT_LS 0x22
+/* enum: DFE Tap1 - retimer lineside (Medford2 with DS250x retimer (-31 - 31))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_RT_LS 0x23
+/* enum: DFE Tap2 - retimer lineside (Medford2 with DS250x retimer (-15 - 15))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2_RT_LS 0x24
+/* enum: DFE Tap3 - retimer lineside (Medford2 with DS250x retimer (-15 - 15))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3_RT_LS 0x25
+/* enum: DFE Tap4 - retimer lineside (Medford2 with DS250x retimer (-15 - 15))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4_RT_LS 0x26
+/* enum: DFE Tap5 - retimer lineside (Medford2 with DS250x retimer (-15 - 15))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5_RT_LS 0x27
+/* enum: CTLE Boost stages - retimer hostside (Medford2 with DS250x retimer - 4
+ * stages, 2 bits per stage)
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST_RT_HS 0x28
+/* enum: DFE Tap1 - retimer hostside (Medford2 with DS250x retimer (-31 - 31))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_RT_HS 0x29
+/* enum: DFE Tap2 - retimer hostside (Medford2 with DS250x retimer (-15 - 15))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2_RT_HS 0x2a
+/* enum: DFE Tap3 - retimer hostside (Medford2 with DS250x retimer (-15 - 15))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3_RT_HS 0x2b
+/* enum: DFE Tap4 - retimer hostside (Medford2 with DS250x retimer (-15 - 15))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4_RT_HS 0x2c
+/* enum: DFE Tap5 - retimer hostside (Medford2 with DS250x retimer (-15 - 15))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5_RT_HS 0x2d
+#define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_OFST 0
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
 #define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_OFST 0
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 11
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
+#define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_OFST 0
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 4
+#define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_OFST 0
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_LBN 16
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_OFST 0
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
 #define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
 
 /* MC_CMD_KR_TUNE_RXEQ_SET_IN msgrequest */
 #define    MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMIN 8
 #define    MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMAX 252
+#define    MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMAX_MCDI2 1020
 #define    MC_CMD_KR_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
+#define    MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_NUM(len) (((len)-4)/4)
 /* Requested operation */
 #define       MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_OFST 0
 #define       MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_LEN 1
 #define       MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LEN 4
 #define       MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
 #define       MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
+#define       MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MAXNUM_MCDI2 254
+#define        MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_OFST 4
 #define        MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
 #define        MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
 /*             Enum values, see field(s): */
 /*                MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_ID */
+#define        MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_OFST 4
 #define        MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
 #define        MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 3
 /*             Enum values, see field(s): */
 /*                MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define        MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_OFST 4
 #define        MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 11
 #define        MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
+#define        MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_OFST 4
 #define        MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_LBN 12
 #define        MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 4
+#define        MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_OFST 4
 #define        MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
 #define        MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define        MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_OFST 4
 #define        MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
 #define        MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
 
 /* MC_CMD_KR_TUNE_TXEQ_GET_OUT msgresponse */
 #define    MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMIN 4
 #define    MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define    MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_KR_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+#define    MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4)
 /* TXEQ Parameter */
 #define       MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
 #define       MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
 #define       MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
 #define       MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define       MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_OFST 0
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
 /* enum: TX Amplitude (Huntington, Medford, Medford2) */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9
 /* enum: TX Amplitude Fine control (Medford) */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa
-/* enum: Pre-shoot Tap (Medford, Medford2) */
+/* enum: Pre-cursor Tap (Medford, Medford2) */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb
-/* enum: De-emphasis Tap (Medford, Medford2) */
+/* enum: Post-cursor Tap (Medford, Medford2) */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc
+/* enum: TX Amplitude (Retimer Lineside) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_RT_LS 0xd
+/* enum: Pre-cursor Tap (Retimer Lineside) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV_RT_LS 0xe
+/* enum: Post-cursor Tap (Retimer Lineside) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY_RT_LS 0xf
+/* enum: TX Amplitude (Retimer Hostside) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_RT_HS 0x10
+/* enum: Pre-cursor Tap (Retimer Hostside) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV_RT_HS 0x11
+/* enum: Post-cursor Tap (Retimer Hostside) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY_RT_HS 0x12
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_OFST 0
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */
 #define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_OFST 0
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_LBN 11
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 5
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_OFST 0
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_LBN 16
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_OFST 0
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_LBN 24
 #define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_WIDTH 8
 
 /* MC_CMD_KR_TUNE_TXEQ_SET_IN msgrequest */
 #define    MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMIN 8
 #define    MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMAX 252
+#define    MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMAX_MCDI2 1020
 #define    MC_CMD_KR_TUNE_TXEQ_SET_IN_LEN(num) (4+4*(num))
+#define    MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_NUM(len) (((len)-4)/4)
 /* Requested operation */
 #define       MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_OFST 0
 #define       MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_LEN 1
 #define       MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LEN 4
 #define       MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MINNUM 1
 #define       MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MAXNUM 62
+#define       MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MAXNUM_MCDI2 254
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_OFST 4
 #define        MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_LBN 0
 #define        MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_WIDTH 8
 /*             Enum values, see field(s): */
 /*                MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_ID */
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_OFST 4
 #define        MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_LBN 8
 #define        MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_WIDTH 3
 /*             Enum values, see field(s): */
 /*                MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_LANE */
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_OFST 4
 #define        MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_LBN 11
 #define        MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_WIDTH 5
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_OFST 4
 #define        MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_LBN 16
 #define        MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_OFST 4
 #define        MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_LBN 24
 #define        MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_WIDTH 8
 
 #define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_LEN 3
 #define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_OFST 4
 #define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_LEN 4
+#define        MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_OFST 4
 #define        MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_LBN 0
 #define        MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_WIDTH 8
+#define        MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_OFST 4
 #define        MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_LBN 31
 #define        MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_WIDTH 1
 /* Scan duration / cycle count */
 /* MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT msgresponse */
 #define    MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
 #define    MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define    MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define    MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_NUM(len) (((len)-0)/2)
 #define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
 #define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
 #define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
 #define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+#define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM_MCDI2 510
 
 /* MC_CMD_KR_TUNE_READ_FOM_IN msgrequest */
 #define    MC_CMD_KR_TUNE_READ_FOM_IN_LEN 8
 #define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3
 #define       MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4
 #define       MC_CMD_KR_TUNE_READ_FOM_IN_LANE_LEN 4
+#define        MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_OFST 4
 #define        MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_LBN 0
 #define        MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_WIDTH 8
+#define        MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_OFST 4
 #define        MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_LBN 31
 #define        MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_WIDTH 1
 
  * Get or set PCIE Serdes RXEQ and TX Driver settings
  */
 #define MC_CMD_PCIE_TUNE 0xf2
+#undef MC_CMD_0xf2_PRIVILEGE_CTG
 
 #define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_PCIE_TUNE_IN msgrequest */
 #define    MC_CMD_PCIE_TUNE_IN_LENMIN 4
 #define    MC_CMD_PCIE_TUNE_IN_LENMAX 252
+#define    MC_CMD_PCIE_TUNE_IN_LENMAX_MCDI2 1020
 #define    MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num))
+#define    MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_NUM(len) (((len)-4)/4)
 /* Requested operation */
 #define       MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0
 #define       MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1
 #define       MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4
 #define       MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0
 #define       MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62
+#define       MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM_MCDI2 254
 
 /* MC_CMD_PCIE_TUNE_OUT msgresponse */
 #define    MC_CMD_PCIE_TUNE_OUT_LEN 0
 /* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */
 #define    MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4
 #define    MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252
+#define    MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
+#define    MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4)
 /* RXEQ Parameter */
 #define       MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
 #define       MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
 #define       MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
 #define       MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
+#define       MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255
+#define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_OFST 0
 #define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
 #define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
 /* enum: Attenuation (0-15) */
 #define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
 /* enum: CTLE EQ Resistor (DC Gain) */
 #define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
+#define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_OFST 0
 #define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
 #define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5
 #define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
 #define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */
 #define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */
 #define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */
+#define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_OFST 0
 #define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13
 #define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
+#define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_OFST 0
 #define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14
 #define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 10
+#define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_OFST 0
 #define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
 #define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
 
 /* MC_CMD_PCIE_TUNE_RXEQ_SET_IN msgrequest */
 #define    MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMIN 8
 #define    MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX 252
+#define    MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX_MCDI2 1020
 #define    MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
+#define    MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_NUM(len) (((len)-4)/4)
 /* Requested operation */
 #define       MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_OFST 0
 #define       MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_LEN 1
 #define       MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LEN 4
 #define       MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
 #define       MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
+#define       MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM_MCDI2 254
+#define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_OFST 4
 #define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
 #define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
 /*             Enum values, see field(s): */
 /*                MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_ID */
+#define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_OFST 4
 #define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
 #define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 5
 /*             Enum values, see field(s): */
 /*                MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_OFST 4
 #define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 13
 #define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
+#define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_OFST 4
 #define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_LBN 14
 #define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 2
+#define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_OFST 4
 #define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
 #define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_OFST 4
 #define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
 #define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
 
 /* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */
 #define    MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4
 #define    MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define    MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+#define    MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4)
 /* RXEQ Parameter */
 #define       MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
 #define       MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
 #define       MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
 #define       MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define       MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255
+#define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_OFST 0
 #define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
 #define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
 /* enum: TxMargin (PIPE) */
 #define          MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3
 /* enum: De-emphasis coefficient C(+1) (PIPE) */
 #define          MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4
+#define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_OFST 0
 #define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
 #define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4
 /*             Enum values, see field(s): */
 /*                MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_OFST 0
 #define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12
 #define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12
+#define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_OFST 0
 #define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
 #define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
 
 /* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */
 #define    MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
 #define    MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define    MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define    MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_NUM(len) (((len)-0)/2)
 #define       MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
 #define       MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
 #define       MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
 #define       MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+#define       MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM_MCDI2 510
 
 /* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN msgrequest */
 #define    MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN_LEN 0
  * - not used for V3 licensing
  */
 #define MC_CMD_LICENSING 0xf3
+#undef MC_CMD_0xf3_PRIVILEGE_CTG
 
 #define MC_CMD_0xf3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * - V3 licensing (Medford)
  */
 #define MC_CMD_LICENSING_V3 0xd0
+#undef MC_CMD_0xd0_PRIVILEGE_CTG
 
 #define MC_CMD_0xd0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * partition - V3 licensing (Medford)
  */
 #define MC_CMD_LICENSING_GET_ID_V3 0xd1
+#undef MC_CMD_0xd1_PRIVILEGE_CTG
 
 #define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */
 #define    MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8
 #define    MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252
+#define    MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
+#define    MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_NUM(len) (((len)-8)/1)
 /* type of license (eg 3) */
 #define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
 #define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4
 #define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
 #define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0
 #define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244
+#define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM_MCDI2 1012
 
 
 /***********************************/
  * This will fail on a single-core system.
  */
 #define MC_CMD_MC2MC_PROXY 0xf4
+#undef MC_CMD_0xf4_PRIVILEGE_CTG
 
 #define MC_CMD_0xf4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * or a reboot of the MC.) Not used for V3 licensing
  */
 #define MC_CMD_GET_LICENSED_APP_STATE 0xf5
+#undef MC_CMD_0xf5_PRIVILEGE_CTG
 
 #define MC_CMD_0xf5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * operation or a reboot of the MC.) Used for V3 licensing (Medford)
  */
 #define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2
+#undef MC_CMD_0xd2_PRIVILEGE_CTG
 
 #define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 
 /***********************************/
 /* MC_CMD_GET_LICENSED_V3_FEATURE_STATES
- * Query the state of one or more licensed features. (Note that the actual
+ * Query the state of an one or more licensed features. (Note that the actual
  * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
  * operation or a reboot of the MC.) Used for V3 licensing (Medford)
  */
 #define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3
+#undef MC_CMD_0xd3_PRIVILEGE_CTG
 
 #define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * licensing.
  */
 #define MC_CMD_LICENSED_APP_OP 0xf6
+#undef MC_CMD_0xf6_PRIVILEGE_CTG
 
 #define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* MC_CMD_LICENSED_APP_OP_IN msgrequest */
 #define    MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
 #define    MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
+#define    MC_CMD_LICENSED_APP_OP_IN_LENMAX_MCDI2 1020
 #define    MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
+#define    MC_CMD_LICENSED_APP_OP_IN_ARGS_NUM(len) (((len)-8)/4)
 /* application ID */
 #define       MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
 #define       MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4
 #define       MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
 #define       MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0
 #define       MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61
+#define       MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM_MCDI2 253
 
 /* MC_CMD_LICENSED_APP_OP_OUT msgresponse */
 #define    MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0
 #define    MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252
+#define    MC_CMD_LICENSED_APP_OP_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num))
+#define    MC_CMD_LICENSED_APP_OP_OUT_RESULT_NUM(len) (((len)-0)/4)
 /* result specific to this particular operation */
 #define       MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0
 #define       MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4
 #define       MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0
 #define       MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63
+#define       MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM_MCDI2 255
 
 /* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */
 #define    MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
  * (Medford)
  */
 #define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4
+#undef MC_CMD_0xd4_PRIVILEGE_CTG
 
 #define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * Mask features - V3 licensing (Medford)
  */
 #define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
+#undef MC_CMD_0xd5_PRIVILEGE_CTG
 
 #define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
  * erased when the adapter is power cycled
  */
 #define MC_CMD_LICENSING_V3_TEMPORARY 0xd6
+#undef MC_CMD_0xd6_PRIVILEGE_CTG
 
 #define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
  * delivered to a specific queue, or a set of queues with RSS.
  */
 #define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
+#undef MC_CMD_0xf7_PRIVILEGE_CTG
 
 #define MC_CMD_0xf7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* configuration flags */
 #define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
 #define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
+#define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_OFST 0
 #define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
 #define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+#define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_OFST 0
 #define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
 #define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
 /* receive queue handle (for RSS mode, this is the base queue) */
  * the configuration.
  */
 #define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
+#undef MC_CMD_0xf8_PRIVILEGE_CTG
 
 #define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* configuration flags */
 #define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
 #define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
+#define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_OFST 0
 #define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
 #define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+#define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_OFST 0
 #define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
 #define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
 /* receiving queue handle (for RSS mode, this is the base queue) */
  * Change configuration related to the parser-dispatcher subsystem.
  */
 #define MC_CMD_SET_PARSER_DISP_CONFIG 0xf9
+#undef MC_CMD_0xf9_PRIVILEGE_CTG
 
 #define MC_CMD_0xf9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* MC_CMD_SET_PARSER_DISP_CONFIG_IN msgrequest */
 #define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMIN 12
 #define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX 252
+#define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX_MCDI2 1020
 #define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num))
+#define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_NUM(len) (((len)-8)/4)
 /* the type of configuration setting to change */
 #define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
 #define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
 #define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_LEN 4
 #define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MINNUM 1
 #define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM 61
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM_MCDI2 253
 
 /* MC_CMD_SET_PARSER_DISP_CONFIG_OUT msgresponse */
 #define    MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0
  * Read configuration related to the parser-dispatcher subsystem.
  */
 #define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa
+#undef MC_CMD_0xfa_PRIVILEGE_CTG
 
 #define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
 #define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
 #define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252
+#define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num))
+#define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_NUM(len) (((len)-0)/4)
 /* current value: the details depend on the type of configuration setting being
  * read
  */
 #define       MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4
 #define       MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1
 #define       MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM_MCDI2 255
 
 
 /***********************************/
  * dedicated as TX sniff receivers.
  */
 #define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb
+#undef MC_CMD_0xfb_PRIVILEGE_CTG
 
 #define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* configuration flags */
 #define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
 #define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
+#define        MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_OFST 0
 #define        MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
 #define        MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
 /* receive queue handle (for RSS mode, this is the base queue) */
  * the configuration.
  */
 #define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc
+#undef MC_CMD_0xfc_PRIVILEGE_CTG
 
 #define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 /* configuration flags */
 #define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
 #define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
+#define        MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_OFST 0
 #define        MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
 #define        MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
 /* receiving queue handle (for RSS mode, this is the base queue) */
  * Per queue rx error stats.
  */
 #define MC_CMD_RMON_STATS_RX_ERRORS 0xfe
+#undef MC_CMD_0xfe_PRIVILEGE_CTG
 
 #define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 #define       MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_LEN 4
 #define       MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
 #define       MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_LEN 4
+#define        MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_OFST 4
 #define        MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
 #define        MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
 
  * Find out about available PCIE resources
  */
 #define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
+#undef MC_CMD_0xfd_PRIVILEGE_CTG
 
 #define MC_CMD_0xfd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * Find out about available port modes
  */
 #define MC_CMD_GET_PORT_MODES 0xff
+#undef MC_CMD_0xff_PRIVILEGE_CTG
 
 #define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 
 /* MC_CMD_GET_PORT_MODES_OUT msgresponse */
 #define    MC_CMD_GET_PORT_MODES_OUT_LEN 12
-/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */
+/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*)
+ * that are supported for customer use in production firmware.
+ */
 #define       MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
 #define       MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4
 /* Default (canonical) board mode */
 #define       MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
 #define       MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4
 
+/* MC_CMD_GET_PORT_MODES_OUT_V2 msgresponse */
+#define    MC_CMD_GET_PORT_MODES_OUT_V2_LEN 16
+/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*)
+ * that are supported for customer use in production firmware.
+ */
+#define       MC_CMD_GET_PORT_MODES_OUT_V2_MODES_OFST 0
+#define       MC_CMD_GET_PORT_MODES_OUT_V2_MODES_LEN 4
+/* Default (canonical) board mode */
+#define       MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_OFST 4
+#define       MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_LEN 4
+/* Current board mode */
+#define       MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_OFST 8
+#define       MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_LEN 4
+/* Bitmask of engineering port modes available on the board (indexed by
+ * TLV_PORT_MODE_*). A superset of MC_CMD_GET_PORT_MODES_OUT/MODES that
+ * contains all modes implemented in firmware for a particular board. Modes
+ * listed in MODES are considered production modes and should be exposed in
+ * userland tools. Modes listed in in ENGINEERING_MODES, but not in MODES
+ * should be considered hidden (not to be exposed in userland tools) and for
+ * engineering use only. There are no other semantic differences and any mode
+ * listed in either MODES or ENGINEERING_MODES can be set on the board.
+ */
+#define       MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_OFST 12
+#define       MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_LEN 4
+
+
+/***********************************/
+/* MC_CMD_OVERRIDE_PORT_MODE
+ * Override flash config port mode for subsequent MC reboot(s). Override data
+ * is stored in the presistent data section of DMEM and activated on next MC
+ * warm reboot. A cold reboot resets the override. It is assumed that a
+ * sufficient number of PFs are available and that port mapping is valid for
+ * the new port mode, as the override does not affect PF configuration.
+ */
+#define MC_CMD_OVERRIDE_PORT_MODE 0x137
+#undef MC_CMD_0x137_PRIVILEGE_CTG
+
+#define MC_CMD_0x137_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_OVERRIDE_PORT_MODE_IN msgrequest */
+#define    MC_CMD_OVERRIDE_PORT_MODE_IN_LEN 8
+#define       MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_OFST 0
+#define       MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_LEN 4
+#define        MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_OFST 0
+#define        MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_LBN 0
+#define        MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_WIDTH 1
+/* New mode (TLV_PORT_MODE_*) to set, if override enabled */
+#define       MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_OFST 4
+#define       MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_LEN 4
+
+/* MC_CMD_OVERRIDE_PORT_MODE_OUT msgresponse */
+#define    MC_CMD_OVERRIDE_PORT_MODE_OUT_LEN 0
+
 
 /***********************************/
 /* MC_CMD_READ_ATB
  * Sample voltages on the ATB
  */
 #define MC_CMD_READ_ATB 0x100
+#undef MC_CMD_0x100_PRIVILEGE_CTG
 
 #define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
  * enums here must correspond with those in MC_CMD_WORKAROUND.
  */
 #define MC_CMD_GET_WORKAROUNDS 0x59
+#undef MC_CMD_0x59_PRIVILEGE_CTG
 
 #define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * Read/set privileges of an arbitrary PCIe function
  */
 #define MC_CMD_PRIVILEGE_MASK 0x5a
+#undef MC_CMD_0x5a_PRIVILEGE_CTG
 
 #define MC_CMD_0x5a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  */
 #define       MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0
 #define       MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_LEN 4
+#define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_OFST 0
 #define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0
 #define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16
+#define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_OFST 0
 #define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16
 #define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16
 #define          MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */
  * are not permitted on secure adapters regardless of the privilege mask.
  */
 #define          MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE 0x4000
+/* enum: Trusted Server Adapter (TSA) / ServerLock. Privilege for
+ * administrator-level operations that are not allowed from the local host once
+ * an adapter has Bound to a remote ServerLock Controller (see doxbox
+ * SF-117064-DG for background).
+ */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN_TSA_UNBOUND 0x8000
 /* enum: Set this bit to indicate that a new privilege mask is to be set,
  * otherwise the command will only read the existing mask.
  */
  * Read/set link state mode of a VF
  */
 #define MC_CMD_LINK_STATE_MODE 0x5c
+#undef MC_CMD_0x5c_PRIVILEGE_CTG
 
 #define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  */
 #define       MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
 #define       MC_CMD_LINK_STATE_MODE_IN_FUNCTION_LEN 4
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_OFST 0
 #define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
 #define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_OFST 0
 #define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
 #define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
 /* New link state mode to be set */
  * parameter to MC_CMD_INIT_RXQ.
  */
 #define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
+#undef MC_CMD_0x101_PRIVILEGE_CTG
 
 #define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * Additional fuse diagnostics
  */
 #define MC_CMD_FUSE_DIAGS 0x102
+#undef MC_CMD_0x102_PRIVILEGE_CTG
 
 #define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
  * included in one of the masks provided.
  */
 #define MC_CMD_PRIVILEGE_MODIFY 0x60
+#undef MC_CMD_0x60_PRIVILEGE_CTG
 
 #define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* For VFS_OF_PF specify the PF, for ONE specify the target function */
 #define       MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
 #define       MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4
+#define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_OFST 4
 #define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
 #define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
+#define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_OFST 4
 #define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
 #define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16
 /* Privileges to be added to the target functions. For privilege definitions
  * Read XPM memory
  */
 #define MC_CMD_XPM_READ_BYTES 0x103
+#undef MC_CMD_0x103_PRIVILEGE_CTG
 
 #define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
 #define    MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
 #define    MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252
+#define    MC_CMD_XPM_READ_BYTES_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num))
+#define    MC_CMD_XPM_READ_BYTES_OUT_DATA_NUM(len) (((len)-0)/1)
 /* Data */
 #define       MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0
 #define       MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1
 #define       MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0
 #define       MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252
+#define       MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM_MCDI2 1020
 
 
 /***********************************/
  * Write XPM memory
  */
 #define MC_CMD_XPM_WRITE_BYTES 0x104
+#undef MC_CMD_0x104_PRIVILEGE_CTG
 
 #define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
 #define    MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
 #define    MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252
+#define    MC_CMD_XPM_WRITE_BYTES_IN_LENMAX_MCDI2 1020
 #define    MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
+#define    MC_CMD_XPM_WRITE_BYTES_IN_DATA_NUM(len) (((len)-8)/1)
 /* Start address (byte) */
 #define       MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
 #define       MC_CMD_XPM_WRITE_BYTES_IN_ADDR_LEN 4
 #define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
 #define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0
 #define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244
+#define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM_MCDI2 1012
 
 /* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */
 #define    MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0
  * Read XPM sector
  */
 #define MC_CMD_XPM_READ_SECTOR 0x105
+#undef MC_CMD_0x105_PRIVILEGE_CTG
 
 #define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
 #define    MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
 #define    MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36
+#define    MC_CMD_XPM_READ_SECTOR_OUT_LENMAX_MCDI2 36
 #define    MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
+#define    MC_CMD_XPM_READ_SECTOR_OUT_DATA_NUM(len) (((len)-4)/1)
 /* Sector type */
 #define       MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
 #define       MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4
 #define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1
 #define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0
 #define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32
+#define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM_MCDI2 32
 
 
 /***********************************/
  * Write XPM sector
  */
 #define MC_CMD_XPM_WRITE_SECTOR 0x106
+#undef MC_CMD_0x106_PRIVILEGE_CTG
 
 #define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
 #define    MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
 #define    MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44
+#define    MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX_MCDI2 44
 #define    MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num))
+#define    MC_CMD_XPM_WRITE_SECTOR_IN_DATA_NUM(len) (((len)-12)/1)
 /* If writing fails due to an uncorrectable error, try up to RETRIES following
  * sectors (or until no more space available). If 0, only one write attempt is
  * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair
 #define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
 #define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0
 #define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM_MCDI2 32
 
 /* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */
 #define    MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
  * Invalidate XPM sector
  */
 #define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
+#undef MC_CMD_0x107_PRIVILEGE_CTG
 
 #define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
  * Blank-check XPM memory and report bad locations
  */
 #define MC_CMD_XPM_BLANK_CHECK 0x108
+#undef MC_CMD_0x108_PRIVILEGE_CTG
 
 #define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
 /* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
 #define    MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
 #define    MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252
+#define    MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
+#define    MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_NUM(len) (((len)-4)/2)
 /* Total number of bad (non-blank) locations */
 #define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
 #define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_LEN 4
 #define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2
 #define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0
 #define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM_MCDI2 508
 
 
 /***********************************/
  * Blank-check and repair XPM memory
  */
 #define MC_CMD_XPM_REPAIR 0x109
+#undef MC_CMD_0x109_PRIVILEGE_CTG
 
 #define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
  * be performed on an unprogrammed part.
  */
 #define MC_CMD_XPM_DECODER_TEST 0x10a
+#undef MC_CMD_0x10a_PRIVILEGE_CTG
 
 #define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
  * first available location to use, or fail with ENOSPC if none left.
  */
 #define MC_CMD_XPM_WRITE_TEST 0x10b
+#undef MC_CMD_0x10b_PRIVILEGE_CTG
 
 #define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_INSECURE
 
  * does match, otherwise it will respond with success before it jumps to IMEM.
  */
 #define MC_CMD_EXEC_SIGNED 0x10c
+#undef MC_CMD_0x10c_PRIVILEGE_CTG
 
 #define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
  * MC_CMD_EXEC_SIGNED.
  */
 #define MC_CMD_PREPARE_SIGNED 0x10d
+#undef MC_CMD_0x10d_PRIVILEGE_CTG
 
 #define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
  * cause all functions to see a reset. (Available on Medford only.)
  */
 #define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS 0x117
+#undef MC_CMD_0x117_PRIVILEGE_CTG
 
 #define MC_CMD_0x117_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN msgrequest */
 #define    MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMIN 4
 #define    MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX 68
+#define    MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX_MCDI2 68
 #define    MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num) (4+4*(num))
+#define    MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_NUM(len) (((len)-4)/4)
 /* Flags */
 #define       MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST 0
 #define       MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_LEN 2
+#define        MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_OFST 0
 #define        MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_LBN 0
 #define        MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_WIDTH 1
 /* The number of entries in the ENTRIES array */
 #define       MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_LEN 4
 #define       MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MINNUM 0
 #define       MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM 16
+#define       MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM_MCDI2 16
 
 /* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT msgresponse */
 #define    MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN 2
 /* Flags */
 #define       MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_OFST 0
 #define       MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_LEN 2
+#define        MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_OFST 0
 #define        MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0
 #define        MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1
 
  * priority.
  */
 #define MC_CMD_RX_BALANCING 0x118
+#undef MC_CMD_0x118_PRIVILEGE_CTG
 
 #define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
  * if the tag is already present.
  */
 #define MC_CMD_NVRAM_PRIVATE_APPEND 0x11c
+#undef MC_CMD_0x11c_PRIVILEGE_CTG
 
 #define MC_CMD_0x11c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_NVRAM_PRIVATE_APPEND_IN msgrequest */
 #define    MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMIN 9
 #define    MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX 252
+#define    MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX_MCDI2 1020
 #define    MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num))
+#define    MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_NUM(len) (((len)-8)/1)
 /* The tag to be appended */
 #define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0
 #define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_LEN 4
 #define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1
 #define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MINNUM 1
 #define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM 244
+#define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM_MCDI2 1012
 
 /* MC_CMD_NVRAM_PRIVATE_APPEND_OUT msgresponse */
 #define    MC_CMD_NVRAM_PRIVATE_APPEND_OUT_LEN 0
  * correctly at ATE.
  */
 #define MC_CMD_XPM_VERIFY_CONTENTS 0x11b
+#undef MC_CMD_0x11b_PRIVILEGE_CTG
 
 #define MC_CMD_0x11b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
 
 /* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */
 #define    MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12
 #define    MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX 252
+#define    MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX_MCDI2 1020
 #define    MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num))
+#define    MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_NUM(len) (((len)-12)/1)
 /* Number of sectors found (test builds only) */
 #define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0
 #define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_LEN 4
 #define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1
 #define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MINNUM 0
 #define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM 240
+#define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM_MCDI2 1008
 
 
 /***********************************/
  * and TMR_RELOAD_ACT_NS).
  */
 #define MC_CMD_SET_EVQ_TMR 0x120
+#undef MC_CMD_0x120_PRIVILEGE_CTG
 
 #define MC_CMD_0x120_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * Query properties about the event queue timers.
  */
 #define MC_CMD_GET_EVQ_TMR_PROPERTIES 0x122
+#undef MC_CMD_0x122_PRIVILEGE_CTG
 
 #define MC_CMD_0x122_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * non used switch buffers.
  */
 #define MC_CMD_ALLOCATE_TX_VFIFO_CP 0x11d
+#undef MC_CMD_0x11d_PRIVILEGE_CTG
 
 #define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * previously allocated common pools.
  */
 #define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO 0x11e
+#undef MC_CMD_0x11e_PRIVILEGE_CTG
 
 #define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * ready to be re-used.
  */
 #define MC_CMD_TEARDOWN_TX_VFIFO_VF 0x11f
+#undef MC_CMD_0x11f_PRIVILEGE_CTG
 
 #define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * it ready to be re-used.
  */
 #define MC_CMD_DEALLOCATE_TX_VFIFO_CP 0x121
+#undef MC_CMD_0x121_PRIVILEGE_CTG
 
 #define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
  * not yet assigned.
  */
 #define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS 0x124
+#undef MC_CMD_0x124_PRIVILEGE_CTG
 
 #define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_GENERAL
 
 #define       MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_LEN 4
 
 
+/***********************************/
+/* MC_CMD_SUC_VERSION
+ * Get the version of the SUC
+ */
+#define MC_CMD_SUC_VERSION 0x134
+#undef MC_CMD_0x134_PRIVILEGE_CTG
+
+#define MC_CMD_0x134_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SUC_VERSION_IN msgrequest */
+#define    MC_CMD_SUC_VERSION_IN_LEN 0
+
+/* MC_CMD_SUC_VERSION_OUT msgresponse */
+#define    MC_CMD_SUC_VERSION_OUT_LEN 24
+/* The SUC firmware version as four numbers - a.b.c.d */
+#define       MC_CMD_SUC_VERSION_OUT_VERSION_OFST 0
+#define       MC_CMD_SUC_VERSION_OUT_VERSION_LEN 4
+#define       MC_CMD_SUC_VERSION_OUT_VERSION_NUM 4
+/* The date, in seconds since the Unix epoch, when the firmware image was
+ * built.
+ */
+#define       MC_CMD_SUC_VERSION_OUT_BUILD_DATE_OFST 16
+#define       MC_CMD_SUC_VERSION_OUT_BUILD_DATE_LEN 4
+/* The ID of the SUC chip. This is specific to the platform but typically
+ * indicates family, memory sizes etc. See SF-116728-SW for further details.
+ */
+#define       MC_CMD_SUC_VERSION_OUT_CHIP_ID_OFST 20
+#define       MC_CMD_SUC_VERSION_OUT_CHIP_ID_LEN 4
+
+/* MC_CMD_SUC_BOOT_VERSION_IN msgrequest: Get the version of the SUC boot
+ * loader.
+ */
+#define    MC_CMD_SUC_BOOT_VERSION_IN_LEN 4
+#define       MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_OFST 0
+#define       MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_LEN 4
+/* enum: Requests the SUC boot version. */
+#define          MC_CMD_SUC_VERSION_GET_BOOT_VERSION 0xb007700b
+
+/* MC_CMD_SUC_BOOT_VERSION_OUT msgresponse */
+#define    MC_CMD_SUC_BOOT_VERSION_OUT_LEN 4
+/* The SUC boot version */
+#define       MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_OFST 0
+#define       MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_LEN 4
+
+
+/***********************************/
+/* MC_CMD_GET_RX_PREFIX_ID
+ * This command is part of the mechanism for configuring the format of the RX
+ * packet prefix. It takes as input a bitmask of the fields the host would like
+ * to be in the prefix. If the hardware supports RX prefixes with that
+ * combination of fields, then this command returns a list of prefix-ids,
+ * opaque identifiers suitable for use in the RX_PREFIX_ID field of a
+ * MC_CMD_INIT_RXQ_V5_IN message. If the combination of fields is not
+ * supported, returns ENOTSUP. If the firmware can't create any new prefix-ids
+ * due to resource constraints, returns ENOSPC.
+ */
+#define MC_CMD_GET_RX_PREFIX_ID 0x13b
+#undef MC_CMD_0x13b_PRIVILEGE_CTG
+
+#define MC_CMD_0x13b_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_RX_PREFIX_ID_IN msgrequest */
+#define    MC_CMD_GET_RX_PREFIX_ID_IN_LEN 8
+/* Field bitmask. */
+#define       MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_OFST 0
+#define       MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LEN 8
+#define       MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_OFST 0
+#define       MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_OFST 4
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_OFST 0
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_LBN 0
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_WIDTH 1
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_OFST 0
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_LBN 1
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_WIDTH 1
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_OFST 0
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_LBN 2
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_WIDTH 1
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_OFST 0
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_LBN 3
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_WIDTH 1
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_OFST 0
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_LBN 4
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_WIDTH 1
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_OFST 0
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_LBN 5
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_WIDTH 1
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_OFST 0
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_LBN 6
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_WIDTH 1
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_OFST 0
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_LBN 7
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_WIDTH 1
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_OFST 0
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_LBN 8
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_WIDTH 1
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_OFST 0
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_LBN 9
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_WIDTH 1
+
+/* MC_CMD_GET_RX_PREFIX_ID_OUT msgresponse */
+#define    MC_CMD_GET_RX_PREFIX_ID_OUT_LENMIN 8
+#define    MC_CMD_GET_RX_PREFIX_ID_OUT_LENMAX 252
+#define    MC_CMD_GET_RX_PREFIX_ID_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_GET_RX_PREFIX_ID_OUT_LEN(num) (4+4*(num))
+#define    MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_NUM(len) (((len)-4)/4)
+/* Number of prefix-ids returned */
+#define       MC_CMD_GET_RX_PREFIX_ID_OUT_NUM_RX_PREFIX_IDS_OFST 0
+#define       MC_CMD_GET_RX_PREFIX_ID_OUT_NUM_RX_PREFIX_IDS_LEN 4
+/* Opaque prefix identifiers which can be passed into MC_CMD_INIT_RXQ_V5 or
+ * MC_CMD_QUERY_PREFIX_ID
+ */
+#define       MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_OFST 4
+#define       MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_LEN 4
+#define       MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MINNUM 1
+#define       MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MAXNUM 62
+#define       MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MAXNUM_MCDI2 254
+
+/* RX_PREFIX_FIELD_INFO structuredef: Information about a single RX prefix
+ * field
+ */
+#define    RX_PREFIX_FIELD_INFO_LEN 4
+/* The offset of the field from the start of the prefix, in bits */
+#define       RX_PREFIX_FIELD_INFO_OFFSET_BITS_OFST 0
+#define       RX_PREFIX_FIELD_INFO_OFFSET_BITS_LEN 2
+#define       RX_PREFIX_FIELD_INFO_OFFSET_BITS_LBN 0
+#define       RX_PREFIX_FIELD_INFO_OFFSET_BITS_WIDTH 16
+/* The width of the field, in bits */
+#define       RX_PREFIX_FIELD_INFO_WIDTH_BITS_OFST 2
+#define       RX_PREFIX_FIELD_INFO_WIDTH_BITS_LEN 1
+#define       RX_PREFIX_FIELD_INFO_WIDTH_BITS_LBN 16
+#define       RX_PREFIX_FIELD_INFO_WIDTH_BITS_WIDTH 8
+/* The type of the field. These enum values are in the same order as the fields
+ * in the MC_CMD_GET_RX_PREFIX_ID_IN bitmask
+ */
+#define       RX_PREFIX_FIELD_INFO_TYPE_OFST 3
+#define       RX_PREFIX_FIELD_INFO_TYPE_LEN 1
+#define          RX_PREFIX_FIELD_INFO_LENGTH 0x0 /* enum */
+#define          RX_PREFIX_FIELD_INFO_RSS_HASH_VALID 0x1 /* enum */
+#define          RX_PREFIX_FIELD_INFO_USER_FLAG 0x2 /* enum */
+#define          RX_PREFIX_FIELD_INFO_CLASS 0x3 /* enum */
+#define          RX_PREFIX_FIELD_INFO_PARTIAL_TSTAMP 0x4 /* enum */
+#define          RX_PREFIX_FIELD_INFO_RSS_HASH 0x5 /* enum */
+#define          RX_PREFIX_FIELD_INFO_USER_MARK 0x6 /* enum */
+#define          RX_PREFIX_FIELD_INFO_INGRESS_VPORT 0x7 /* enum */
+#define          RX_PREFIX_FIELD_INFO_CSUM_FRAME 0x8 /* enum */
+#define          RX_PREFIX_FIELD_INFO_VLAN_STRIP_TCI 0x9 /* enum */
+#define       RX_PREFIX_FIELD_INFO_TYPE_LBN 24
+#define       RX_PREFIX_FIELD_INFO_TYPE_WIDTH 8
+
+/* RX_PREFIX_FIXED_RESPONSE structuredef: Information about an RX prefix in
+ * which every field has a fixed offset and width
+ */
+#define    RX_PREFIX_FIXED_RESPONSE_LENMIN 4
+#define    RX_PREFIX_FIXED_RESPONSE_LENMAX 252
+#define    RX_PREFIX_FIXED_RESPONSE_LENMAX_MCDI2 1020
+#define    RX_PREFIX_FIXED_RESPONSE_LEN(num) (4+4*(num))
+#define    RX_PREFIX_FIXED_RESPONSE_FIELDS_NUM(len) (((len)-4)/4)
+/* Length of the RX prefix in bytes */
+#define       RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_OFST 0
+#define       RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_LEN 1
+#define       RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_LBN 0
+#define       RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_WIDTH 8
+/* Number of fields present in the prefix */
+#define       RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_OFST 1
+#define       RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_LEN 1
+#define       RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_LBN 8
+#define       RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_WIDTH 8
+#define       RX_PREFIX_FIXED_RESPONSE_RESERVED_OFST 2
+#define       RX_PREFIX_FIXED_RESPONSE_RESERVED_LEN 2
+#define       RX_PREFIX_FIXED_RESPONSE_RESERVED_LBN 16
+#define       RX_PREFIX_FIXED_RESPONSE_RESERVED_WIDTH 16
+/* Array of RX_PREFIX_FIELD_INFO structures, of length FIELD_COUNT */
+#define       RX_PREFIX_FIXED_RESPONSE_FIELDS_OFST 4
+#define       RX_PREFIX_FIXED_RESPONSE_FIELDS_LEN 4
+#define       RX_PREFIX_FIXED_RESPONSE_FIELDS_MINNUM 0
+#define       RX_PREFIX_FIXED_RESPONSE_FIELDS_MAXNUM 62
+#define       RX_PREFIX_FIXED_RESPONSE_FIELDS_MAXNUM_MCDI2 254
+#define       RX_PREFIX_FIXED_RESPONSE_FIELDS_LBN 32
+#define       RX_PREFIX_FIXED_RESPONSE_FIELDS_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_QUERY_RX_PREFIX_ID
+ * This command takes an RX prefix id (obtained from MC_CMD_GET_RX_PREFIX_ID)
+ * and returns a description of the RX prefix of packets delievered to an RXQ
+ * created with that prefix id
+ */
+#define MC_CMD_QUERY_RX_PREFIX_ID 0x13c
+#undef MC_CMD_0x13c_PRIVILEGE_CTG
+
+#define MC_CMD_0x13c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_QUERY_RX_PREFIX_ID_IN msgrequest */
+#define    MC_CMD_QUERY_RX_PREFIX_ID_IN_LEN 4
+/* Prefix id to query */
+#define       MC_CMD_QUERY_RX_PREFIX_ID_IN_RX_PREFIX_ID_OFST 0
+#define       MC_CMD_QUERY_RX_PREFIX_ID_IN_RX_PREFIX_ID_LEN 4
+
+/* MC_CMD_QUERY_RX_PREFIX_ID_OUT msgresponse */
+#define    MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMIN 4
+#define    MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMAX 252
+#define    MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_QUERY_RX_PREFIX_ID_OUT_LEN(num) (4+1*(num))
+#define    MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_NUM(len) (((len)-4)/1)
+/* An enum describing the structure of this response. */
+#define       MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_OFST 0
+#define       MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_LEN 1
+/* enum: The response is of format RX_PREFIX_FIXED_RESPONSE */
+#define          MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_FIXED 0x0
+#define       MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESERVED_OFST 1
+#define       MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESERVED_LEN 3
+/* The response. Its format is as defined by the RESPONSE_TYPE value */
+#define       MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_OFST 4
+#define       MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_LEN 1
+#define       MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MINNUM 0
+#define       MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MAXNUM 248
+#define       MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MAXNUM_MCDI2 1016
+
+
+/***********************************/
+/* MC_CMD_BUNDLE
+ * A command to perform various bundle-related operations on insecure cards.
+ */
+#define MC_CMD_BUNDLE 0x13d
+#undef MC_CMD_0x13d_PRIVILEGE_CTG
+
+#define MC_CMD_0x13d_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_BUNDLE_IN msgrequest */
+#define    MC_CMD_BUNDLE_IN_LEN 4
+/* Sub-command code */
+#define       MC_CMD_BUNDLE_IN_OP_OFST 0
+#define       MC_CMD_BUNDLE_IN_OP_LEN 4
+/* enum: Get the current host access mode set on component partitions. */
+#define          MC_CMD_BUNDLE_IN_OP_COMPONENT_ACCESS_GET 0x0
+/* enum: Set the host access mode set on component partitions. */
+#define          MC_CMD_BUNDLE_IN_OP_COMPONENT_ACCESS_SET 0x1
+
+/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN msgrequest: Retrieve the current
+ * access mode on component partitions such as MC_FIRMWARE, SUC_FIRMWARE and
+ * EXPANSION_UEFI. This command only works on engineering (insecure) cards. On
+ * secure adapters, this command returns MC_CMD_ERR_EPERM.
+ */
+#define    MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_LEN 4
+/* Sub-command code. Must be OP_COMPONENT_ACCESS_GET. */
+#define       MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_OP_OFST 0
+#define       MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_OP_LEN 4
+
+/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT msgresponse: Returns the access
+ * control mode.
+ */
+#define    MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_LEN 4
+/* Access mode of component partitions. */
+#define       MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_ACCESS_MODE_OFST 0
+#define       MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_ACCESS_MODE_LEN 4
+/* enum: Component partitions are read-only from the host. */
+#define          MC_CMD_BUNDLE_COMPONENTS_READ_ONLY 0x0
+/* enum: Component partitions can read read-from written-to by the host. */
+#define          MC_CMD_BUNDLE_COMPONENTS_READ_WRITE 0x1
+
+/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN msgrequest: The component
+ * partitions such as MC_FIRMWARE, SUC_FIRMWARE, EXPANSION_UEFI are set as
+ * read-only on firmware built with bundle support. This command marks these
+ * partitions as read/writeable. The access status set by this command does not
+ * persist across MC reboots. This command only works on engineering (insecure)
+ * cards. On secure adapters, this command returns MC_CMD_ERR_EPERM.
+ */
+#define    MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_LEN 8
+/* Sub-command code. Must be OP_COMPONENT_ACCESS_SET. */
+#define       MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_OP_OFST 0
+#define       MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_OP_LEN 4
+/* Access mode of component partitions. */
+#define       MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_ACCESS_MODE_OFST 4
+#define       MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_ACCESS_MODE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT/ACCESS_MODE */
+
+/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_OUT msgresponse */
+#define    MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VPD
+ * Read all VPD starting from a given address
+ */
+#define MC_CMD_GET_VPD 0x165
+#undef MC_CMD_0x165_PRIVILEGE_CTG
+
+#define MC_CMD_0x165_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VPD_IN msgresponse */
+#define    MC_CMD_GET_VPD_IN_LEN 4
+/* VPD address to start from. In case VPD is longer than MCDI buffer
+ * (unlikely), user can make multiple calls with different starting addresses.
+ */
+#define       MC_CMD_GET_VPD_IN_ADDR_OFST 0
+#define       MC_CMD_GET_VPD_IN_ADDR_LEN 4
+
+/* MC_CMD_GET_VPD_OUT msgresponse */
+#define    MC_CMD_GET_VPD_OUT_LENMIN 0
+#define    MC_CMD_GET_VPD_OUT_LENMAX 252
+#define    MC_CMD_GET_VPD_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_GET_VPD_OUT_LEN(num) (0+1*(num))
+#define    MC_CMD_GET_VPD_OUT_DATA_NUM(len) (((len)-0)/1)
+/* VPD data returned. */
+#define       MC_CMD_GET_VPD_OUT_DATA_OFST 0
+#define       MC_CMD_GET_VPD_OUT_DATA_LEN 1
+#define       MC_CMD_GET_VPD_OUT_DATA_MINNUM 0
+#define       MC_CMD_GET_VPD_OUT_DATA_MAXNUM 252
+#define       MC_CMD_GET_VPD_OUT_DATA_MAXNUM_MCDI2 1020
+
+
+/***********************************/
+/* MC_CMD_GET_NCSI_INFO
+ * Provide information about the NC-SI stack
+ */
+#define MC_CMD_GET_NCSI_INFO 0x167
+#undef MC_CMD_0x167_PRIVILEGE_CTG
+
+#define MC_CMD_0x167_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_NCSI_INFO_IN msgrequest */
+#define    MC_CMD_GET_NCSI_INFO_IN_LEN 8
+/* Operation to be performed */
+#define       MC_CMD_GET_NCSI_INFO_IN_OP_OFST 0
+#define       MC_CMD_GET_NCSI_INFO_IN_OP_LEN 4
+/* enum: Information on the link settings. */
+#define          MC_CMD_GET_NCSI_INFO_IN_OP_LINK 0x0
+/* enum: Statistics associated with the channel */
+#define          MC_CMD_GET_NCSI_INFO_IN_OP_STATISTICS 0x1
+/* The NC-SI channel on which the operation is to be performed */
+#define       MC_CMD_GET_NCSI_INFO_IN_CHANNEL_OFST 4
+#define       MC_CMD_GET_NCSI_INFO_IN_CHANNEL_LEN 4
+
+/* MC_CMD_GET_NCSI_INFO_LINK_OUT msgresponse */
+#define    MC_CMD_GET_NCSI_INFO_LINK_OUT_LEN 12
+/* Settings as received from BMC. */
+#define       MC_CMD_GET_NCSI_INFO_LINK_OUT_SETTINGS_OFST 0
+#define       MC_CMD_GET_NCSI_INFO_LINK_OUT_SETTINGS_LEN 4
+/* Advertised capabilities applied to channel. */
+#define       MC_CMD_GET_NCSI_INFO_LINK_OUT_ADV_CAP_OFST 4
+#define       MC_CMD_GET_NCSI_INFO_LINK_OUT_ADV_CAP_LEN 4
+/* General status */
+#define       MC_CMD_GET_NCSI_INFO_LINK_OUT_STATUS_OFST 8
+#define       MC_CMD_GET_NCSI_INFO_LINK_OUT_STATUS_LEN 4
+#define        MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_OFST 8
+#define        MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_LBN 0
+#define        MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_WIDTH 2
+#define        MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_OFST 8
+#define        MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_LBN 2
+#define        MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_WIDTH 1
+#define        MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_OFST 8
+#define        MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_LBN 3
+#define        MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_WIDTH 1
+#define        MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_OFST 8
+#define        MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_LBN 4
+#define        MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_WIDTH 1
+
+/* MC_CMD_GET_NCSI_INFO_STATISTICS_OUT msgresponse */
+#define    MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_LEN 28
+/* The number of NC-SI commands received. */
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMDS_RX_OFST 0
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMDS_RX_LEN 4
+/* The number of NC-SI commands dropped. */
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_PKTS_DROPPED_OFST 4
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_PKTS_DROPPED_LEN 4
+/* The number of invalid NC-SI commands received. */
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_TYPE_ERRS_OFST 8
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_TYPE_ERRS_LEN 4
+/* The number of checksum errors seen. */
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_CSUM_ERRS_OFST 12
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_CSUM_ERRS_LEN 4
+/* The number of NC-SI requests received. */
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_RX_PKTS_OFST 16
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_RX_PKTS_LEN 4
+/* The number of NC-SI responses sent (includes AENs) */
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_TX_PKTS_OFST 20
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_TX_PKTS_LEN 4
+/* The number of NC-SI AENs sent */
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_OFST 24
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_LEN 4
+
+
+/* CLOCK_INFO structuredef: Information about a single hardware clock */
+#define    CLOCK_INFO_LEN 28
+/* Enumeration that uniquely identifies the clock */
+#define       CLOCK_INFO_CLOCK_ID_OFST 0
+#define       CLOCK_INFO_CLOCK_ID_LEN 2
+/* enum: The Riverhead CMC (card MC) */
+#define          CLOCK_INFO_CLOCK_CMC 0x0
+/* enum: The Riverhead NMC (network MC) */
+#define          CLOCK_INFO_CLOCK_NMC 0x1
+/* enum: The Riverhead SDNET slice main logic */
+#define          CLOCK_INFO_CLOCK_SDNET 0x2
+/* enum: The Riverhead SDNET LUT */
+#define          CLOCK_INFO_CLOCK_SDNET_LUT 0x3
+/* enum: The Riverhead SDNET control logic */
+#define          CLOCK_INFO_CLOCK_SDNET_CTRL 0x4
+/* enum: The Riverhead Streaming SubSystem */
+#define          CLOCK_INFO_CLOCK_SSS 0x5
+/* enum: The Riverhead network MAC and associated CSR registers */
+#define          CLOCK_INFO_CLOCK_MAC 0x6
+#define       CLOCK_INFO_CLOCK_ID_LBN 0
+#define       CLOCK_INFO_CLOCK_ID_WIDTH 16
+/* Assorted flags */
+#define       CLOCK_INFO_FLAGS_OFST 2
+#define       CLOCK_INFO_FLAGS_LEN 2
+#define        CLOCK_INFO_SETTABLE_OFST 2
+#define        CLOCK_INFO_SETTABLE_LBN 0
+#define        CLOCK_INFO_SETTABLE_WIDTH 1
+#define       CLOCK_INFO_FLAGS_LBN 16
+#define       CLOCK_INFO_FLAGS_WIDTH 16
+/* The frequency in HZ */
+#define       CLOCK_INFO_FREQUENCY_OFST 4
+#define       CLOCK_INFO_FREQUENCY_LEN 8
+#define       CLOCK_INFO_FREQUENCY_LO_OFST 4
+#define       CLOCK_INFO_FREQUENCY_HI_OFST 8
+#define       CLOCK_INFO_FREQUENCY_LBN 32
+#define       CLOCK_INFO_FREQUENCY_WIDTH 64
+/* Human-readable ASCII name for clock, with NUL termination */
+#define       CLOCK_INFO_NAME_OFST 12
+#define       CLOCK_INFO_NAME_LEN 1
+#define       CLOCK_INFO_NAME_NUM 16
+#define       CLOCK_INFO_NAME_LBN 96
+#define       CLOCK_INFO_NAME_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_GET_CLOCKS_INFO
+ * Get information about the device clocks
+ */
+#define MC_CMD_GET_CLOCKS_INFO 0x166
+#undef MC_CMD_0x166_PRIVILEGE_CTG
+
+#define MC_CMD_0x166_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CLOCKS_INFO_IN msgrequest */
+#define    MC_CMD_GET_CLOCKS_INFO_IN_LEN 0
+
+/* MC_CMD_GET_CLOCKS_INFO_OUT msgresponse */
+#define    MC_CMD_GET_CLOCKS_INFO_OUT_LENMIN 0
+#define    MC_CMD_GET_CLOCKS_INFO_OUT_LENMAX 252
+#define    MC_CMD_GET_CLOCKS_INFO_OUT_LENMAX_MCDI2 1008
+#define    MC_CMD_GET_CLOCKS_INFO_OUT_LEN(num) (0+28*(num))
+#define    MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_NUM(len) (((len)-0)/28)
+/* An array of CLOCK_INFO structures. */
+#define       MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_OFST 0
+#define       MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_LEN 28
+#define       MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MINNUM 0
+#define       MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MAXNUM 9
+#define       MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MAXNUM_MCDI2 36
+
+
+/***********************************/
+/* MC_CMD_VNIC_ENCAP_RULE_ADD
+ * Add a rule for detecting encapsulations in the VNIC stage. Currently this only affects checksum validation in VNIC RX - on TX the send descriptor explicitly specifies encapsulation. These rules are per-VNIC, i.e. only apply to the current driver. If a rule matches, then the packet is considered to have the corresponding encapsulation type, and the inner packet is parsed. It is up to the driver to ensure that overlapping rules are not inserted. (If a packet would match multiple rules, a random one of them will be used.) A rule with the exact same match criteria may not be inserted twice (EALREADY). Only a limited number MATCH_FLAGS values are supported, use MC_CMD_GET_PARSER_DISP_INFO with OP OP_GET_SUPPORTED_VNIC_ENCAP_RULE_MATCHES to get a list of supported combinations. Each driver may only have a limited set of active rules - returns ENOSPC if the caller's table is full.
+ */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD 0x16d
+#undef MC_CMD_0x16d_PRIVILEGE_CTG
+
+#define MC_CMD_0x16d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VNIC_ENCAP_RULE_ADD_IN msgrequest */
+#define    MC_CMD_VNIC_ENCAP_RULE_ADD_IN_LEN 36
+/* Set to MAE_MPORT_SELECTOR_ASSIGNED. In the future this may be relaxed. */
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_OFST 0
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_LEN 4
+/* Any non-zero bits other than the ones named below or an unsupported
+ * combination will cause the NIC to return EOPNOTSUPP. In the future more
+ * flags may be added.
+ */
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_OFST 4
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_LEN 4
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_OFST 4
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_LBN 0
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_OFST 4
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_LBN 1
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_OFST 4
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_LBN 2
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_WIDTH 1
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_OFST 4
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_LBN 3
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_WIDTH 1
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_OFST 4
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_LBN 4
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_WIDTH 1
+/* Only if MATCH_ETHER_TYPE is set. Ethertype value as bytes in network order.
+ * Currently only IPv4 (0x0800) and IPv6 (0x86DD) ethertypes may be used.
+ */
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_OFST 8
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_LEN 2
+/* Only if MATCH_OUTER_VLAN is set. VID value as bytes in network order.
+ * (Deprecated)
+ */
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_LBN 80
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WIDTH 12
+/* Only if MATCH_OUTER_VLAN is set. Aligned wrapper for OUTER_VLAN_VID. */
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_OFST 10
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_LEN 2
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_OFST 10
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_LBN 0
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_WIDTH 12
+/* Only if MATCH_DST_IP is set. IP address as bytes in network order. In the
+ * case of IPv4, the IP should be in the first 4 bytes and all other bytes
+ * should be zero.
+ */
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_OFST 12
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_LEN 16
+/* Only if MATCH_IP_PROTO is set. Currently only UDP proto (17) may be used. */
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_OFST 28
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_LEN 1
+/* Actions that should be applied to packets match the rule. */
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_OFST 29
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_LEN 1
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_OFST 29
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_LBN 0
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_WIDTH 1
+/* Only if MATCH_DST_PORT is set. Port number as bytes in network order. */
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_OFST 30
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_LEN 2
+/* Resulting encapsulation type, as per MAE_MCDI_ENCAP_TYPE enumeration. */
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_OFST 32
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_LEN 4
+
+/* MC_CMD_VNIC_ENCAP_RULE_ADD_OUT msgresponse */
+#define    MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_LEN 4
+/* Handle to inserted rule. Used for removing the rule. */
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_OFST 0
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_VNIC_ENCAP_RULE_REMOVE
+ * Remove a VNIC encapsulation rule. Packets which would have previously matched the rule will then be considered as unencapsulated. Returns EALREADY if the input HANDLE doesn't correspond to an existing rule.
+ */
+#define MC_CMD_VNIC_ENCAP_RULE_REMOVE 0x16e
+#undef MC_CMD_0x16e_PRIVILEGE_CTG
+
+#define MC_CMD_0x16e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN msgrequest */
+#define    MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_LEN 4
+/* Handle which was returned by MC_CMD_VNIC_ENCAP_RULE_ADD. */
+#define       MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_OFST 0
+#define       MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_LEN 4
+
+/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT msgresponse */
+#define    MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT_LEN 0
+
+/* FUNCTION_PERSONALITY structuredef: The meanings of the personalities are
+ * defined in SF-120734-TC with more information in SF-122717-TC.
+ */
+#define    FUNCTION_PERSONALITY_LEN 4
+#define       FUNCTION_PERSONALITY_ID_OFST 0
+#define       FUNCTION_PERSONALITY_ID_LEN 4
+/* enum: Function has no assigned personality */
+#define          FUNCTION_PERSONALITY_NULL 0x0
+/* enum: Function has an EF100-style function control window and VI windows
+ * with both EF100 and vDPA doorbells.
+ */
+#define          FUNCTION_PERSONALITY_EF100 0x1
+/* enum: Function has virtio net device configuration registers and doorbells
+ * for virtio queue pairs.
+ */
+#define          FUNCTION_PERSONALITY_VIRTIO_NET 0x2
+/* enum: Function has virtio block device configuration registers and a
+ * doorbell for a single virtqueue.
+ */
+#define          FUNCTION_PERSONALITY_VIRTIO_BLK 0x3
+/* enum: Function is a Xilinx acceleration device - management function */
+#define          FUNCTION_PERSONALITY_ACCEL_MGMT 0x4
+/* enum: Function is a Xilinx acceleration device - user function */
+#define          FUNCTION_PERSONALITY_ACCEL_USR 0x5
+#define       FUNCTION_PERSONALITY_ID_LBN 0
+#define       FUNCTION_PERSONALITY_ID_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_VIRTIO_GET_FEATURES
+ * Get a list of the virtio features supported by the device.
+ */
+#define MC_CMD_VIRTIO_GET_FEATURES 0x168
+#undef MC_CMD_0x168_PRIVILEGE_CTG
+
+#define MC_CMD_0x168_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VIRTIO_GET_FEATURES_IN msgrequest */
+#define    MC_CMD_VIRTIO_GET_FEATURES_IN_LEN 4
+/* Type of device to get features for. Matches the device id as defined by the
+ * virtio spec.
+ */
+#define       MC_CMD_VIRTIO_GET_FEATURES_IN_DEVICE_ID_OFST 0
+#define       MC_CMD_VIRTIO_GET_FEATURES_IN_DEVICE_ID_LEN 4
+/* enum: Reserved. Do not use. */
+#define          MC_CMD_VIRTIO_GET_FEATURES_IN_RESERVED 0x0
+/* enum: Net device. */
+#define          MC_CMD_VIRTIO_GET_FEATURES_IN_NET 0x1
+/* enum: Block device. */
+#define          MC_CMD_VIRTIO_GET_FEATURES_IN_BLOCK 0x2
+
+/* MC_CMD_VIRTIO_GET_FEATURES_OUT msgresponse */
+#define    MC_CMD_VIRTIO_GET_FEATURES_OUT_LEN 8
+/* Features supported by the device. The result is a bitfield in the format of
+ * the feature bits of the specified device type as defined in the virtIO 1.1
+ * specification ( https://docs.oasis-
+ * open.org/virtio/virtio/v1.1/csprd01/virtio-v1.1-csprd01.pdf )
+ */
+#define       MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_OFST 0
+#define       MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LEN 8
+#define       MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LO_OFST 0
+#define       MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_HI_OFST 4
+
+
+/***********************************/
+/* MC_CMD_VIRTIO_TEST_FEATURES
+ * Query whether a given set of features is supported. Fails with ENOSUP if the
+ * driver requests a feature the device doesn't support. Fails with EINVAL if
+ * the driver fails to request a feature which the device requires.
+ */
+#define MC_CMD_VIRTIO_TEST_FEATURES 0x169
+#undef MC_CMD_0x169_PRIVILEGE_CTG
+
+#define MC_CMD_0x169_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VIRTIO_TEST_FEATURES_IN msgrequest */
+#define    MC_CMD_VIRTIO_TEST_FEATURES_IN_LEN 16
+/* Type of device to test features for. Matches the device id as defined by the
+ * virtio spec.
+ */
+#define       MC_CMD_VIRTIO_TEST_FEATURES_IN_DEVICE_ID_OFST 0
+#define       MC_CMD_VIRTIO_TEST_FEATURES_IN_DEVICE_ID_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_VIRTIO_GET_FEATURES/MC_CMD_VIRTIO_GET_FEATURES_IN/DEVICE_ID */
+#define       MC_CMD_VIRTIO_TEST_FEATURES_IN_RESERVED_OFST 4
+#define       MC_CMD_VIRTIO_TEST_FEATURES_IN_RESERVED_LEN 4
+/* Features requested. Same format as the returned value from
+ * MC_CMD_VIRTIO_GET_FEATURES.
+ */
+#define       MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_OFST 8
+#define       MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LEN 8
+#define       MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LO_OFST 8
+#define       MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_HI_OFST 12
+
+/* MC_CMD_VIRTIO_TEST_FEATURES_OUT msgresponse */
+#define    MC_CMD_VIRTIO_TEST_FEATURES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VIRTIO_INIT_QUEUE
+ * Create a virtio virtqueue. Fails with EALREADY if the queue already exists.
+ * Fails with ENOSUP if a feature is requested that isn't supported. Fails with
+ * EINVAL if a required feature isn't requested, or any other parameter is
+ * invalid.
+ */
+#define MC_CMD_VIRTIO_INIT_QUEUE 0x16a
+#undef MC_CMD_0x16a_PRIVILEGE_CTG
+
+#define MC_CMD_0x16a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VIRTIO_INIT_QUEUE_REQ msgrequest */
+#define    MC_CMD_VIRTIO_INIT_QUEUE_REQ_LEN 68
+/* Type of virtqueue to create. A network rxq and a txq can exist at the same
+ * time on a single VI.
+ */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_QUEUE_TYPE_OFST 0
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_QUEUE_TYPE_LEN 1
+/* enum: A network device receive queue */
+#define          MC_CMD_VIRTIO_INIT_QUEUE_REQ_NET_RXQ 0x0
+/* enum: A network device transmit queue */
+#define          MC_CMD_VIRTIO_INIT_QUEUE_REQ_NET_TXQ 0x1
+/* enum: A block device request queue */
+#define          MC_CMD_VIRTIO_INIT_QUEUE_REQ_BLOCK 0x2
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_RESERVED_OFST 1
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_RESERVED_LEN 1
+/* If the calling function is a PF and this field is not VF_NULL, create the
+ * queue on the specified child VF instead of on the PF.
+ */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_TARGET_VF_OFST 2
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_TARGET_VF_LEN 2
+/* enum: No VF, create queue on the PF. */
+#define          MC_CMD_VIRTIO_INIT_QUEUE_REQ_VF_NULL 0xffff
+/* Desired instance. This is the function-local index of the associated VI, not
+ * the virtqueue number as counted by the virtqueue spec.
+ */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_INSTANCE_OFST 4
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_INSTANCE_LEN 4
+/* Queue size, in entries. Must be a power of two. */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_SIZE_OFST 8
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_SIZE_LEN 4
+/* Flags */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_FLAGS_OFST 12
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_FLAGS_LEN 4
+#define        MC_CMD_VIRTIO_INIT_QUEUE_REQ_USE_PASID_OFST 12
+#define        MC_CMD_VIRTIO_INIT_QUEUE_REQ_USE_PASID_LBN 0
+#define        MC_CMD_VIRTIO_INIT_QUEUE_REQ_USE_PASID_WIDTH 1
+/* Address of the descriptor table in the virtqueue. */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_OFST 16
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LEN 8
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LO_OFST 16
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_HI_OFST 20
+/* Address of the available ring in the virtqueue. */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_OFST 24
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LEN 8
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LO_OFST 24
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_HI_OFST 28
+/* Address of the used ring in the virtqueue. */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_OFST 32
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LEN 8
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LO_OFST 32
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_HI_OFST 36
+/* PASID to use on PCIe transactions involving this queue. Ignored if the
+ * USE_PASID flag is not set.
+ */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_PASID_OFST 40
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_PASID_LEN 4
+/* Which MSIX vector to use for this virtqueue, or NO_VECTOR if MSIX should not
+ * be used.
+ */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_MSIX_VECTOR_OFST 44
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_MSIX_VECTOR_LEN 2
+/* enum: Do not enable interrupts for this virtqueue */
+#define          MC_CMD_VIRTIO_INIT_QUEUE_REQ_NO_VECTOR 0xffff
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_RESERVED2_OFST 46
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_RESERVED2_LEN 2
+/* Virtio features to apply to this queue. Same format as the in the virtio
+ * spec and in the return from MC_CMD_VIRTIO_GET_FEATURES. Must be a subset of
+ * the features returned from MC_CMD_VIRTIO_GET_FEATURES. Features are per-
+ * queue because with vDPA multiple queues on the same function can be passed
+ * through to different virtual hosts as independent devices.
+ */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_OFST 48
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LEN 8
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LO_OFST 48
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_HI_OFST 52
+/*            Enum values, see field(s): */
+/*               MC_CMD_VIRTIO_GET_FEATURES/MC_CMD_VIRTIO_GET_FEATURES_OUT/FEATURES */
+/* The inital producer index for this queue's used ring. If this queue is being
+ * created to be migrated into, this should be the FINAL_PIDX value returned by
+ * MC_CMD_VIRTIO_FINI_QUEUE of the queue being migrated from. Otherwise, it
+ * should be zero.
+ */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_PIDX_OFST 56
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_PIDX_LEN 4
+/* The inital consumer index for this queue's available ring. If this queue is
+ * being created to be migrated into, this should be the FINAL_CIDX value
+ * returned by MC_CMD_VIRTIO_FINI_QUEUE of the queue being migrated from.
+ * Otherwise, it should be zero.
+ */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_CIDX_OFST 60
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_CIDX_LEN 4
+/* A MAE_MPORT_SELECTOR defining which mport this queue should be associated
+ * with. Use MAE_MPORT_SELECTOR_ASSIGNED to request the default mport for the
+ * function this queue is being created on.
+ */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_MPORT_SELECTOR_OFST 64
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_MPORT_SELECTOR_LEN 4
+
+/* MC_CMD_VIRTIO_INIT_QUEUE_RESP msgresponse */
+#define    MC_CMD_VIRTIO_INIT_QUEUE_RESP_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VIRTIO_FINI_QUEUE
+ * Destroy a virtio virtqueue
+ */
+#define MC_CMD_VIRTIO_FINI_QUEUE 0x16b
+#undef MC_CMD_0x16b_PRIVILEGE_CTG
+
+#define MC_CMD_0x16b_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VIRTIO_FINI_QUEUE_REQ msgrequest */
+#define    MC_CMD_VIRTIO_FINI_QUEUE_REQ_LEN 8
+/* Type of virtqueue to destroy. */
+#define       MC_CMD_VIRTIO_FINI_QUEUE_REQ_QUEUE_TYPE_OFST 0
+#define       MC_CMD_VIRTIO_FINI_QUEUE_REQ_QUEUE_TYPE_LEN 1
+/*            Enum values, see field(s): */
+/*               MC_CMD_VIRTIO_INIT_QUEUE/MC_CMD_VIRTIO_INIT_QUEUE_REQ/QUEUE_TYPE */
+#define       MC_CMD_VIRTIO_FINI_QUEUE_REQ_RESERVED_OFST 1
+#define       MC_CMD_VIRTIO_FINI_QUEUE_REQ_RESERVED_LEN 1
+/* If the calling function is a PF and this field is not VF_NULL, destroy the
+ * queue on the specified child VF instead of on the PF.
+ */
+#define       MC_CMD_VIRTIO_FINI_QUEUE_REQ_TARGET_VF_OFST 2
+#define       MC_CMD_VIRTIO_FINI_QUEUE_REQ_TARGET_VF_LEN 2
+/* enum: No VF, destroy the queue on the PF. */
+#define          MC_CMD_VIRTIO_FINI_QUEUE_REQ_VF_NULL 0xffff
+/* Instance to destroy */
+#define       MC_CMD_VIRTIO_FINI_QUEUE_REQ_INSTANCE_OFST 4
+#define       MC_CMD_VIRTIO_FINI_QUEUE_REQ_INSTANCE_LEN 4
+
+/* MC_CMD_VIRTIO_FINI_QUEUE_RESP msgresponse */
+#define    MC_CMD_VIRTIO_FINI_QUEUE_RESP_LEN 8
+/* The producer index of the used ring when the queue was stopped. */
+#define       MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_PIDX_OFST 0
+#define       MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_PIDX_LEN 4
+/* The consumer index of the available ring when the queue was stopped. */
+#define       MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_CIDX_OFST 4
+#define       MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_CIDX_LEN 4
+
+
+/***********************************/
+/* MC_CMD_VIRTIO_GET_DOORBELL_OFFSET
+ * Get the offset in the BAR of the doorbells for a VI. Doesn't require the
+ * queue(s) to be allocated.
+ */
+#define MC_CMD_VIRTIO_GET_DOORBELL_OFFSET 0x16c
+#undef MC_CMD_0x16c_PRIVILEGE_CTG
+
+#define MC_CMD_0x16c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ msgrequest */
+#define    MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_LEN 8
+/* Type of device to get information for. Matches the device id as defined by
+ * the virtio spec.
+ */
+#define       MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_DEVICE_ID_OFST 0
+#define       MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_DEVICE_ID_LEN 1
+/*            Enum values, see field(s): */
+/*               MC_CMD_VIRTIO_GET_FEATURES/MC_CMD_VIRTIO_GET_FEATURES_IN/DEVICE_ID */
+#define       MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_RESERVED_OFST 1
+#define       MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_RESERVED_LEN 1
+/* If the calling function is a PF and this field is not VF_NULL, query the VI
+ * on the specified child VF instead of on the PF.
+ */
+#define       MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_TARGET_VF_OFST 2
+#define       MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_TARGET_VF_LEN 2
+/* enum: No VF, query the PF. */
+#define          MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_VF_NULL 0xffff
+/* VI instance to query */
+#define       MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_INSTANCE_OFST 4
+#define       MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_INSTANCE_LEN 4
+
+/* MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP msgresponse */
+#define    MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP_LEN 8
+/* Offset of RX doorbell in BAR */
+#define       MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP_RX_DBL_OFFSET_OFST 0
+#define       MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP_RX_DBL_OFFSET_LEN 4
+/* Offset of TX doorbell in BAR */
+#define       MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP_TX_DBL_OFFSET_OFST 4
+#define       MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP_TX_DBL_OFFSET_LEN 4
+
+/* MC_CMD_VIRTIO_GET_BLOCK_DOORBELL_OFFSET_RESP msgresponse */
+#define    MC_CMD_VIRTIO_GET_BLOCK_DOORBELL_OFFSET_RESP_LEN 4
+/* Offset of request doorbell in BAR */
+#define       MC_CMD_VIRTIO_GET_BLOCK_DOORBELL_OFFSET_RESP_DBL_OFFSET_OFST 0
+#define       MC_CMD_VIRTIO_GET_BLOCK_DOORBELL_OFFSET_RESP_DBL_OFFSET_LEN 4
+
+/* PCIE_FUNCTION structuredef: Structure representing a PCIe function ID
+ * (interface/PF/VF tuple)
+ */
+#define    PCIE_FUNCTION_LEN 8
+/* PCIe PF function number */
+#define       PCIE_FUNCTION_PF_OFST 0
+#define       PCIE_FUNCTION_PF_LEN 2
+/* enum: Wildcard value representing any available function (e.g in resource
+ * allocation requests)
+ */
+#define          PCIE_FUNCTION_PF_ANY 0xfffe
+/* enum: Value representing invalid (null) function */
+#define          PCIE_FUNCTION_PF_NULL 0xffff
+#define       PCIE_FUNCTION_PF_LBN 0
+#define       PCIE_FUNCTION_PF_WIDTH 16
+/* PCIe VF Function number (PF relative) */
+#define       PCIE_FUNCTION_VF_OFST 2
+#define       PCIE_FUNCTION_VF_LEN 2
+/* enum: Wildcard value representing any available function (e.g in resource
+ * allocation requests)
+ */
+#define          PCIE_FUNCTION_VF_ANY 0xfffe
+/* enum: Function is a PF (when PF != PF_NULL) or invalid function (when PF ==
+ * PF_NULL)
+ */
+#define          PCIE_FUNCTION_VF_NULL 0xffff
+#define       PCIE_FUNCTION_VF_LBN 16
+#define       PCIE_FUNCTION_VF_WIDTH 16
+/* PCIe interface of the function */
+#define       PCIE_FUNCTION_INTF_OFST 4
+#define       PCIE_FUNCTION_INTF_LEN 4
+/* enum: Host PCIe interface */
+#define          PCIE_FUNCTION_INTF_HOST 0x0
+/* enum: Application Processor interface */
+#define          PCIE_FUNCTION_INTF_AP 0x1
+#define       PCIE_FUNCTION_INTF_LBN 32
+#define       PCIE_FUNCTION_INTF_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_DESC_PROXY_FUNC_CREATE
+ * Descriptor proxy functions are abstract devices that forward all request
+ * submitted to the host PCIe function (descriptors submitted to Virtio or
+ * EF100 queues) to be handled on another function (most commonly on the
+ * embedded Application Processor), via EF100 descriptor proxy, memory-to-
+ * memory and descriptor-to-completion mechanisms. Primary user is Virtio-blk
+ * subsystem, see SF-122927-TC. This function allocates a new descriptor proxy
+ * function on the host and assigns a user-defined label. The actual function
+ * configuration is not persisted until the caller configures it with
+ * MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN and commits with
+ * MC_CMD_DESC_PROXY_FUNC_COMMIT_IN.
+ */
+#define MC_CMD_DESC_PROXY_FUNC_CREATE 0x172
+#undef MC_CMD_0x172_PRIVILEGE_CTG
+
+#define MC_CMD_0x172_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_FUNC_CREATE_IN msgrequest */
+#define    MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LEN 52
+/* PCIe Function ID to allocate (as struct PCIE_FUNCTION). Set to
+ * {PF_ANY,VF_ANY,interface} for "any available function" Set to
+ * {PF_ANY,VF_NULL,interface} for "any available PF"
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LEN 8
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_OFST 4
+/* The personality to set. The meanings of the personalities are defined in
+ * SF-120734-TC with more information in SF-122717-TC. At present, we only
+ * support proxying for VIRTIO_BLK
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_IN_PERSONALITY_OFST 8
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_IN_PERSONALITY_LEN 4
+/*            Enum values, see field(s): */
+/*               FUNCTION_PERSONALITY/ID */
+/* User-defined label (zero-terminated ASCII string) to uniquely identify the
+ * function
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LABEL_OFST 12
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LABEL_LEN 40
+
+/* MC_CMD_DESC_PROXY_FUNC_CREATE_OUT msgresponse */
+#define    MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_LEN 12
+/* Handle to the descriptor proxy function */
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_HANDLE_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_HANDLE_LEN 4
+/* Allocated function ID (as struct PCIE_FUNCTION) */
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_OFST 4
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LEN 8
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_OFST 4
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_OFST 8
+
+
+/***********************************/
+/* MC_CMD_DESC_PROXY_FUNC_DESTROY
+ * Remove an existing descriptor proxy function. Underlying function
+ * personality and configuration reverts back to factory default. Function
+ * configuration is committed immediately to specified store and any function
+ * ownership is released.
+ */
+#define MC_CMD_DESC_PROXY_FUNC_DESTROY 0x173
+#undef MC_CMD_0x173_PRIVILEGE_CTG
+
+#define MC_CMD_0x173_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_FUNC_DESTROY_IN msgrequest */
+#define    MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LEN 44
+/* User-defined label (zero-terminated ASCII string) to uniquely identify the
+ * function
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LABEL_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LABEL_LEN 40
+/* Store from which to remove function configuration */
+#define       MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_STORE_OFST 40
+#define       MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_STORE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_DESC_PROXY_FUNC_COMMIT/MC_CMD_DESC_PROXY_FUNC_COMMIT_IN/STORE */
+
+/* MC_CMD_DESC_PROXY_FUNC_DESTROY_OUT msgresponse */
+#define    MC_CMD_DESC_PROXY_FUNC_DESTROY_OUT_LEN 0
+
+/* VIRTIO_BLK_CONFIG structuredef: Virtio block device configuration. See
+ * Virtio specification v1.1, Sections 5.2.3 and 6 for definition of feature
+ * bits. See Virtio specification v1.1, Section 5.2.4 (struct
+ * virtio_blk_config) for definition of remaining configuration fields
+ */
+#define    VIRTIO_BLK_CONFIG_LEN 68
+/* Virtio block device features to advertise, per Virtio 1.1, 5.2.3 and 6 */
+#define       VIRTIO_BLK_CONFIG_FEATURES_OFST 0
+#define       VIRTIO_BLK_CONFIG_FEATURES_LEN 8
+#define       VIRTIO_BLK_CONFIG_FEATURES_LO_OFST 0
+#define       VIRTIO_BLK_CONFIG_FEATURES_HI_OFST 4
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_LBN 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_LBN 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_LBN 2
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_LBN 4
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_LBN 5
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_LBN 6
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_LBN 7
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_LBN 9
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_LBN 10
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_LBN 11
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_LBN 12
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_LBN 13
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_LBN 14
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_LBN 28
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_LBN 29
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_LBN 32
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_LBN 33
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_LBN 34
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_LBN 35
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_LBN 36
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_LBN 37
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_LBN 38
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_WIDTH 1
+#define       VIRTIO_BLK_CONFIG_FEATURES_LBN 0
+#define       VIRTIO_BLK_CONFIG_FEATURES_WIDTH 64
+/* The capacity of the device (expressed in 512-byte sectors) */
+#define       VIRTIO_BLK_CONFIG_CAPACITY_OFST 8
+#define       VIRTIO_BLK_CONFIG_CAPACITY_LEN 8
+#define       VIRTIO_BLK_CONFIG_CAPACITY_LO_OFST 8
+#define       VIRTIO_BLK_CONFIG_CAPACITY_HI_OFST 12
+#define       VIRTIO_BLK_CONFIG_CAPACITY_LBN 64
+#define       VIRTIO_BLK_CONFIG_CAPACITY_WIDTH 64
+/* Maximum size of any single segment. Only valid when VIRTIO_BLK_F_SIZE_MAX is
+ * set.
+ */
+#define       VIRTIO_BLK_CONFIG_SIZE_MAX_OFST 16
+#define       VIRTIO_BLK_CONFIG_SIZE_MAX_LEN 4
+#define       VIRTIO_BLK_CONFIG_SIZE_MAX_LBN 128
+#define       VIRTIO_BLK_CONFIG_SIZE_MAX_WIDTH 32
+/* Maximum number of segments in a request. Only valid when
+ * VIRTIO_BLK_F_SEG_MAX is set.
+ */
+#define       VIRTIO_BLK_CONFIG_SEG_MAX_OFST 20
+#define       VIRTIO_BLK_CONFIG_SEG_MAX_LEN 4
+#define       VIRTIO_BLK_CONFIG_SEG_MAX_LBN 160
+#define       VIRTIO_BLK_CONFIG_SEG_MAX_WIDTH 32
+/* Disk-style geometry - cylinders. Only valid when VIRTIO_BLK_F_GEOMETRY is
+ * set.
+ */
+#define       VIRTIO_BLK_CONFIG_CYLINDERS_OFST 24
+#define       VIRTIO_BLK_CONFIG_CYLINDERS_LEN 2
+#define       VIRTIO_BLK_CONFIG_CYLINDERS_LBN 192
+#define       VIRTIO_BLK_CONFIG_CYLINDERS_WIDTH 16
+/* Disk-style geometry - heads. Only valid when VIRTIO_BLK_F_GEOMETRY is set.
+ */
+#define       VIRTIO_BLK_CONFIG_HEADS_OFST 26
+#define       VIRTIO_BLK_CONFIG_HEADS_LEN 1
+#define       VIRTIO_BLK_CONFIG_HEADS_LBN 208
+#define       VIRTIO_BLK_CONFIG_HEADS_WIDTH 8
+/* Disk-style geometry - sectors. Only valid when VIRTIO_BLK_F_GEOMETRY is set.
+ */
+#define       VIRTIO_BLK_CONFIG_SECTORS_OFST 27
+#define       VIRTIO_BLK_CONFIG_SECTORS_LEN 1
+#define       VIRTIO_BLK_CONFIG_SECTORS_LBN 216
+#define       VIRTIO_BLK_CONFIG_SECTORS_WIDTH 8
+/* Block size of disk. Only valid when VIRTIO_BLK_F_BLK_SIZE is set. */
+#define       VIRTIO_BLK_CONFIG_BLK_SIZE_OFST 28
+#define       VIRTIO_BLK_CONFIG_BLK_SIZE_LEN 4
+#define       VIRTIO_BLK_CONFIG_BLK_SIZE_LBN 224
+#define       VIRTIO_BLK_CONFIG_BLK_SIZE_WIDTH 32
+/* Block topology - number of logical blocks per physical block (log2). Only
+ * valid when VIRTIO_BLK_F_TOPOLOGY is set.
+ */
+#define       VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_OFST 32
+#define       VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_LEN 1
+#define       VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_LBN 256
+#define       VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_WIDTH 8
+/* Block topology - offset of first aligned logical block. Only valid when
+ * VIRTIO_BLK_F_TOPOLOGY is set.
+ */
+#define       VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_OFST 33
+#define       VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_LEN 1
+#define       VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_LBN 264
+#define       VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_WIDTH 8
+/* Block topology - suggested minimum I/O size in blocks. Only valid when
+ * VIRTIO_BLK_F_TOPOLOGY is set.
+ */
+#define       VIRTIO_BLK_CONFIG_MIN_IO_SIZE_OFST 34
+#define       VIRTIO_BLK_CONFIG_MIN_IO_SIZE_LEN 2
+#define       VIRTIO_BLK_CONFIG_MIN_IO_SIZE_LBN 272
+#define       VIRTIO_BLK_CONFIG_MIN_IO_SIZE_WIDTH 16
+/* Block topology - optimal (suggested maximum) I/O size in blocks. Only valid
+ * when VIRTIO_BLK_F_TOPOLOGY is set.
+ */
+#define       VIRTIO_BLK_CONFIG_OPT_IO_SIZE_OFST 36
+#define       VIRTIO_BLK_CONFIG_OPT_IO_SIZE_LEN 4
+#define       VIRTIO_BLK_CONFIG_OPT_IO_SIZE_LBN 288
+#define       VIRTIO_BLK_CONFIG_OPT_IO_SIZE_WIDTH 32
+/* Unused, set to zero. Note that virtio_blk_config.writeback is volatile and
+ * not carried in config data.
+ */
+#define       VIRTIO_BLK_CONFIG_UNUSED0_OFST 40
+#define       VIRTIO_BLK_CONFIG_UNUSED0_LEN 2
+#define       VIRTIO_BLK_CONFIG_UNUSED0_LBN 320
+#define       VIRTIO_BLK_CONFIG_UNUSED0_WIDTH 16
+/* Number of queues. Only valid if the VIRTIO_BLK_F_MQ feature is negotiated.
+ */
+#define       VIRTIO_BLK_CONFIG_NUM_QUEUES_OFST 42
+#define       VIRTIO_BLK_CONFIG_NUM_QUEUES_LEN 2
+#define       VIRTIO_BLK_CONFIG_NUM_QUEUES_LBN 336
+#define       VIRTIO_BLK_CONFIG_NUM_QUEUES_WIDTH 16
+/* Maximum discard sectors size, in 512-byte units. Only valid if
+ * VIRTIO_BLK_F_DISCARD is set.
+ */
+#define       VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_OFST 44
+#define       VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_LEN 4
+#define       VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_LBN 352
+#define       VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_WIDTH 32
+/* Maximum discard segment number. Only valid if VIRTIO_BLK_F_DISCARD is set.
+ */
+#define       VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_OFST 48
+#define       VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_LEN 4
+#define       VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_LBN 384
+#define       VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_WIDTH 32
+/* Discard sector alignment, in 512-byte units. Only valid if
+ * VIRTIO_BLK_F_DISCARD is set.
+ */
+#define       VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_OFST 52
+#define       VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_LEN 4
+#define       VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_LBN 416
+#define       VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_WIDTH 32
+/* Maximum write zeroes sectors size, in 512-byte units. Only valid if
+ * VIRTIO_BLK_F_WRITE_ZEROES is set.
+ */
+#define       VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_OFST 56
+#define       VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_LEN 4
+#define       VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_LBN 448
+#define       VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_WIDTH 32
+/* Maximum write zeroes segment number. Only valid if VIRTIO_BLK_F_WRITE_ZEROES
+ * is set.
+ */
+#define       VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_OFST 60
+#define       VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_LEN 4
+#define       VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_LBN 480
+#define       VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_WIDTH 32
+/* Write zeroes request can result in deallocating one or more sectors. Only
+ * valid if VIRTIO_BLK_F_WRITE_ZEROES is set.
+ */
+#define       VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_OFST 64
+#define       VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_LEN 1
+#define       VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_LBN 512
+#define       VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_WIDTH 8
+/* Unused, set to zero. */
+#define       VIRTIO_BLK_CONFIG_UNUSED1_OFST 65
+#define       VIRTIO_BLK_CONFIG_UNUSED1_LEN 3
+#define       VIRTIO_BLK_CONFIG_UNUSED1_LBN 520
+#define       VIRTIO_BLK_CONFIG_UNUSED1_WIDTH 24
+
+
+/***********************************/
+/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET
+ * Set configuration for an existing descriptor proxy function. Configuration
+ * data must match function personality. The actual function configuration is
+ * not persisted until the caller commits with MC_CMD_DESC_PROXY_FUNC_COMMIT_IN
+ */
+#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET 0x174
+#undef MC_CMD_0x174_PRIVILEGE_CTG
+
+#define MC_CMD_0x174_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN msgrequest */
+#define    MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMIN 20
+#define    MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMAX 252
+#define    MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LEN(num) (20+1*(num))
+#define    MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_NUM(len) (((len)-20)/1)
+/* Handle to descriptor proxy function (as returned by
+ * MC_CMD_DESC_PROXY_FUNC_OPEN)
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_HANDLE_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_HANDLE_LEN 4
+/* Reserved for future extension, set to zero. */
+#define       MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_RESERVED_OFST 4
+#define       MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_RESERVED_LEN 16
+/* Configuration data. Format of configuration data is determined implicitly
+ * from function personality referred to by HANDLE. Currently, only supported
+ * format is VIRTIO_BLK_CONFIG.
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_OFST 20
+#define       MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_LEN 1
+#define       MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MINNUM 0
+#define       MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MAXNUM 232
+#define       MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MAXNUM_MCDI2 1000
+
+/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_OUT msgresponse */
+#define    MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DESC_PROXY_FUNC_COMMIT
+ * Commit function configuration to non-volatile or volatile store. Once
+ * configuration is applied to hardware (which may happen immediately or on
+ * next function/device reset) a DESC_PROXY_FUNC_CONFIG_SET MCDI event will be
+ * delivered to callers MCDI event queue.
+ */
+#define MC_CMD_DESC_PROXY_FUNC_COMMIT 0x175
+#undef MC_CMD_0x175_PRIVILEGE_CTG
+
+#define MC_CMD_0x175_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_FUNC_COMMIT_IN msgrequest */
+#define    MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_LEN 8
+/* Handle to descriptor proxy function (as returned by
+ * MC_CMD_DESC_PROXY_FUNC_OPEN)
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_HANDLE_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_HANDLE_LEN 4
+#define       MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_STORE_OFST 4
+#define       MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_STORE_LEN 4
+/* enum: Store into non-volatile (dynamic) config */
+#define          MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_NON_VOLATILE 0x0
+/* enum: Store into volatile (ephemeral) config */
+#define          MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_VOLATILE 0x1
+
+/* MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT msgresponse */
+#define    MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_LEN 4
+/* Generation count to be delivered in an event once configuration becomes live
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_CONFIG_GENERATION_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_CONFIG_GENERATION_LEN 4
+
+
+/***********************************/
+/* MC_CMD_DESC_PROXY_FUNC_OPEN
+ * Retrieve a handle for an existing descriptor proxy function. Returns an
+ * integer handle, valid until function is deallocated, MC rebooted or power-
+ * cycle. Returns ENODEV if no function with given label exists.
+ */
+#define MC_CMD_DESC_PROXY_FUNC_OPEN 0x176
+#undef MC_CMD_0x176_PRIVILEGE_CTG
+
+#define MC_CMD_0x176_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_FUNC_OPEN_IN msgrequest */
+#define    MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LEN 40
+/* User-defined label (zero-terminated ASCII string) to uniquely identify the
+ * function
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LABEL_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LABEL_LEN 40
+
+/* MC_CMD_DESC_PROXY_FUNC_OPEN_OUT msgresponse */
+#define    MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMIN 40
+#define    MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMAX 252
+#define    MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LEN(num) (40+1*(num))
+#define    MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_NUM(len) (((len)-40)/1)
+/* Handle to the descriptor proxy function */
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_HANDLE_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_HANDLE_LEN 4
+/* PCIe Function ID (as struct PCIE_FUNCTION) */
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_OFST 4
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LEN 8
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_OFST 4
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_OFST 8
+/* Function personality */
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_OFST 12
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_LEN 4
+/*            Enum values, see field(s): */
+/*               FUNCTION_PERSONALITY/ID */
+/* Function configuration state */
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_STATUS_OFST 16
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_STATUS_LEN 4
+/* enum: Function configuration is visible to the host (live) */
+#define          MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LIVE 0x0
+/* enum: Function configuration is pending reset */
+#define          MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PENDING 0x1
+/* Generation count to be delivered in an event once the configuration becomes
+ * live (if status is "pending")
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_GENERATION_OFST 20
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_GENERATION_LEN 4
+/* Reserved for future extension, set to zero. */
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_OFST 24
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_LEN 16
+/* Configuration data corresponding to function personality. Currently, only
+ * supported format is VIRTIO_BLK_CONFIG
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_OFST 40
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_LEN 1
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MINNUM 0
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MAXNUM 212
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MAXNUM_MCDI2 980
+
+
+/***********************************/
+/* MC_CMD_DESC_PROXY_FUNC_CLOSE
+ * Releases a handle for an open descriptor proxy function. If proxying was
+ * enabled on the device, the caller is expected to gracefully stop it using
+ * MC_CMD_DESC_PROXY_FUNC_DISABLE prior to calling this function. Closing an
+ * active device without disabling proxying will result in forced close, which
+ * will put the device into a failed state and signal the host driver of the
+ * error (for virtio, DEVICE_NEEDS_RESET flag would be set on the host side)
+ */
+#define MC_CMD_DESC_PROXY_FUNC_CLOSE 0x1a1
+#undef MC_CMD_0x1a1_PRIVILEGE_CTG
+
+#define MC_CMD_0x1a1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_FUNC_CLOSE_IN msgrequest */
+#define    MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_LEN 4
+/* Handle to the descriptor proxy function */
+#define       MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_HANDLE_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_HANDLE_LEN 4
+
+/* MC_CMD_DESC_PROXY_FUNC_CLOSE_OUT msgresponse */
+#define    MC_CMD_DESC_PROXY_FUNC_CLOSE_OUT_LEN 0
+
+/* DESC_PROXY_FUNC_MAP structuredef */
+#define    DESC_PROXY_FUNC_MAP_LEN 52
+/* PCIe function ID (as struct PCIE_FUNCTION) */
+#define       DESC_PROXY_FUNC_MAP_FUNC_OFST 0
+#define       DESC_PROXY_FUNC_MAP_FUNC_LEN 8
+#define       DESC_PROXY_FUNC_MAP_FUNC_LO_OFST 0
+#define       DESC_PROXY_FUNC_MAP_FUNC_HI_OFST 4
+#define       DESC_PROXY_FUNC_MAP_FUNC_LBN 0
+#define       DESC_PROXY_FUNC_MAP_FUNC_WIDTH 64
+/* Function personality */
+#define       DESC_PROXY_FUNC_MAP_PERSONALITY_OFST 8
+#define       DESC_PROXY_FUNC_MAP_PERSONALITY_LEN 4
+/*            Enum values, see field(s): */
+/*               FUNCTION_PERSONALITY/ID */
+#define       DESC_PROXY_FUNC_MAP_PERSONALITY_LBN 64
+#define       DESC_PROXY_FUNC_MAP_PERSONALITY_WIDTH 32
+/* User-defined label (zero-terminated ASCII string) to uniquely identify the
+ * function
+ */
+#define       DESC_PROXY_FUNC_MAP_LABEL_OFST 12
+#define       DESC_PROXY_FUNC_MAP_LABEL_LEN 40
+#define       DESC_PROXY_FUNC_MAP_LABEL_LBN 96
+#define       DESC_PROXY_FUNC_MAP_LABEL_WIDTH 320
+
+
+/***********************************/
+/* MC_CMD_DESC_PROXY_FUNC_ENUM
+ * Enumerate existing descriptor proxy functions
+ */
+#define MC_CMD_DESC_PROXY_FUNC_ENUM 0x177
+#undef MC_CMD_0x177_PRIVILEGE_CTG
+
+#define MC_CMD_0x177_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_FUNC_ENUM_IN msgrequest */
+#define    MC_CMD_DESC_PROXY_FUNC_ENUM_IN_LEN 4
+/* Starting index, set to 0 on first request. See
+ * MC_CMD_DESC_PROXY_FUNC_ENUM_OUT/FLAGS.
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_ENUM_IN_START_IDX_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_ENUM_IN_START_IDX_LEN 4
+
+/* MC_CMD_DESC_PROXY_FUNC_ENUM_OUT msgresponse */
+#define    MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMIN 4
+#define    MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMAX 212
+#define    MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMAX_MCDI2 992
+#define    MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LEN(num) (4+52*(num))
+#define    MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_NUM(len) (((len)-4)/52)
+#define       MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FLAGS_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FLAGS_LEN 4
+#define        MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_OFST 0
+#define        MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_LBN 0
+#define        MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_WIDTH 1
+/* Function map, as array of DESC_PROXY_FUNC_MAP */
+#define       MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_OFST 4
+#define       MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_LEN 52
+#define       MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MINNUM 0
+#define       MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MAXNUM 4
+#define       MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MAXNUM_MCDI2 19
+
+
+/***********************************/
+/* MC_CMD_DESC_PROXY_FUNC_ENABLE
+ * Enable descriptor proxying for function into target event queue. Returns VI
+ * allocation info for the proxy source function, so that the caller can map
+ * absolute VI IDs from descriptor proxy events back to the originating
+ * function.
+ */
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE 0x178
+#undef MC_CMD_0x178_PRIVILEGE_CTG
+
+#define MC_CMD_0x178_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_FUNC_ENABLE_IN msgrequest */
+#define    MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_LEN 8
+/* Handle to descriptor proxy function (as returned by
+ * MC_CMD_DESC_PROXY_FUNC_OPEN)
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_HANDLE_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_HANDLE_LEN 4
+/* Descriptor proxy sink queue (caller function relative). Must be extended
+ * width event queue
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_TARGET_EVQ_LEN 4
+
+/* MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT msgresponse */
+#define    MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_LEN 8
+/* The number of VIs allocated on the function */
+#define       MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_COUNT_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_COUNT_LEN 4
+/* The base absolute VI number allocated to the function. */
+#define       MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_BASE_OFST 4
+#define       MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_BASE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_DESC_PROXY_FUNC_DISABLE
+ * Disable descriptor proxying for function
+ */
+#define MC_CMD_DESC_PROXY_FUNC_DISABLE 0x179
+#undef MC_CMD_0x179_PRIVILEGE_CTG
+
+#define MC_CMD_0x179_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_FUNC_DISABLE_IN msgrequest */
+#define    MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_LEN 4
+/* Handle to descriptor proxy function (as returned by
+ * MC_CMD_DESC_PROXY_FUNC_OPEN)
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_HANDLE_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_HANDLE_LEN 4
+
+/* MC_CMD_DESC_PROXY_FUNC_DISABLE_OUT msgresponse */
+#define    MC_CMD_DESC_PROXY_FUNC_DISABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_ADDR_SPC_ID
+ * Get Address space identifier for use in mem2mem descriptors for a given
+ * target. See SF-120734-TC for details on ADDR_SPC_IDs and mem2mem
+ * descriptors.
+ */
+#define MC_CMD_GET_ADDR_SPC_ID 0x1a0
+#undef MC_CMD_0x1a0_PRIVILEGE_CTG
+
+#define MC_CMD_0x1a0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_ADDR_SPC_ID_IN msgrequest */
+#define    MC_CMD_GET_ADDR_SPC_ID_IN_LEN 16
+/* Resource type to get ADDR_SPC_ID for */
+#define       MC_CMD_GET_ADDR_SPC_ID_IN_TYPE_OFST 0
+#define       MC_CMD_GET_ADDR_SPC_ID_IN_TYPE_LEN 4
+/* enum: Address space ID for host/AP memory DMA over the same interface this
+ * MCDI was called on
+ */
+#define          MC_CMD_GET_ADDR_SPC_ID_IN_SELF 0x0
+/* enum: Address space ID for host/AP memory DMA via PCI interface and function
+ * specified by FUNC
+ */
+#define          MC_CMD_GET_ADDR_SPC_ID_IN_PCI_FUNC 0x1
+/* enum: Address space ID for host/AP memory DMA via PCI interface and function
+ * specified by FUNC with PASID value specified by PASID
+ */
+#define          MC_CMD_GET_ADDR_SPC_ID_IN_PCI_FUNC_PASID 0x2
+/* enum: Address space ID for host/AP memory DMA via PCI interface and function
+ * specified by FUNC with PASID value of relative VI specified by VI
+ */
+#define          MC_CMD_GET_ADDR_SPC_ID_IN_REL_VI 0x3
+/* enum: Address space ID for host/AP memory DMA via PCI interface, function
+ * and PASID value of absolute VI specified by VI
+ */
+#define          MC_CMD_GET_ADDR_SPC_ID_IN_ABS_VI 0x4
+/* enum: Address space ID for host memory DMA via PCI interface and function of
+ * descriptor proxy function specified by HANDLE
+ */
+#define          MC_CMD_GET_ADDR_SPC_ID_IN_DESC_PROXY_HANDLE 0x5
+/* enum: Address space ID for DMA to/from MC memory */
+#define          MC_CMD_GET_ADDR_SPC_ID_IN_MC_MEM 0x6
+/* enum: Address space ID for DMA to/from other SmartNIC memory (on-chip, DDR)
+ */
+#define          MC_CMD_GET_ADDR_SPC_ID_IN_NIC_MEM 0x7
+/* PCIe Function ID (as struct PCIE_FUNCTION). Only valid if TYPE is PCI_FUNC,
+ * PCI_FUNC_PASID or REL_VI.
+ */
+#define       MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_OFST 4
+#define       MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LEN 8
+#define       MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_OFST 4
+#define       MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_OFST 8
+/* PASID value. Only valid if TYPE is PCI_FUNC_PASID. */
+#define       MC_CMD_GET_ADDR_SPC_ID_IN_PASID_OFST 12
+#define       MC_CMD_GET_ADDR_SPC_ID_IN_PASID_LEN 4
+/* Relative or absolute VI number. Only valid if TYPE is REL_VI or ABS_VI */
+#define       MC_CMD_GET_ADDR_SPC_ID_IN_VI_OFST 12
+#define       MC_CMD_GET_ADDR_SPC_ID_IN_VI_LEN 4
+/* Descriptor proxy function handle. Only valid if TYPE is DESC_PROXY_HANDLE.
+ */
+#define       MC_CMD_GET_ADDR_SPC_ID_IN_HANDLE_OFST 4
+#define       MC_CMD_GET_ADDR_SPC_ID_IN_HANDLE_LEN 4
+
+/* MC_CMD_GET_ADDR_SPC_ID_OUT msgresponse */
+#define    MC_CMD_GET_ADDR_SPC_ID_OUT_LEN 8
+/* Address Space ID for the requested target. Only the lower 36 bits are valid
+ * in the current SmartNIC implementation.
+ */
+#define       MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_OFST 0
+#define       MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LEN 8
+#define       MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_OFST 0
+#define       MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_OFST 4
+
+
 #endif /* MCDI_PCOL_H */
index b807871..98eeb40 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/slab.h>
 #include "efx.h"
+#include "mcdi_port.h"
 #include "mcdi.h"
 #include "mcdi_pcol.h"
 #include "nic.h"
@@ -175,19 +176,6 @@ fail:
        return rc;
 }
 
-int efx_mcdi_port_reconfigure(struct efx_nic *efx)
-{
-       struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
-       u32 caps = (efx->link_advertising[0] ?
-                   ethtool_linkset_to_mcdi_cap(efx->link_advertising) :
-                   phy_cfg->forced_cap);
-
-       caps |= ethtool_fec_caps_to_mcdi(efx->fec_config);
-
-       return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
-                                efx->loopback_mode, 0);
-}
-
 static void efx_mcdi_phy_remove(struct efx_nic *efx)
 {
        struct efx_mcdi_phy_data *phy_data = efx->phy_data;
@@ -691,80 +679,6 @@ bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
        return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
 }
 
-enum efx_stats_action {
-       EFX_STATS_ENABLE,
-       EFX_STATS_DISABLE,
-       EFX_STATS_PULL,
-};
-
-static int efx_mcdi_mac_stats(struct efx_nic *efx,
-                             enum efx_stats_action action, int clear)
-{
-       MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
-       int rc;
-       int change = action == EFX_STATS_PULL ? 0 : 1;
-       int enable = action == EFX_STATS_ENABLE ? 1 : 0;
-       int period = action == EFX_STATS_ENABLE ? 1000 : 0;
-       dma_addr_t dma_addr = efx->stats_buffer.dma_addr;
-       u32 dma_len = action != EFX_STATS_DISABLE ?
-               efx->num_mac_stats * sizeof(u64) : 0;
-
-       BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
-
-       MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr);
-       MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD,
-                             MAC_STATS_IN_DMA, !!enable,
-                             MAC_STATS_IN_CLEAR, clear,
-                             MAC_STATS_IN_PERIODIC_CHANGE, change,
-                             MAC_STATS_IN_PERIODIC_ENABLE, enable,
-                             MAC_STATS_IN_PERIODIC_CLEAR, 0,
-                             MAC_STATS_IN_PERIODIC_NOEVENT, 1,
-                             MAC_STATS_IN_PERIOD_MS, period);
-       MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
-
-       if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
-               MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, efx->vport_id);
-
-       rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
-                               NULL, 0, NULL);
-       /* Expect ENOENT if DMA queues have not been set up */
-       if (rc && (rc != -ENOENT || atomic_read(&efx->active_queues)))
-               efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, sizeof(inbuf),
-                                      NULL, 0, rc);
-       return rc;
-}
-
-void efx_mcdi_mac_start_stats(struct efx_nic *efx)
-{
-       __le64 *dma_stats = efx->stats_buffer.addr;
-
-       dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
-
-       efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0);
-}
-
-void efx_mcdi_mac_stop_stats(struct efx_nic *efx)
-{
-       efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 0);
-}
-
-#define EFX_MAC_STATS_WAIT_US 100
-#define EFX_MAC_STATS_WAIT_ATTEMPTS 10
-
-void efx_mcdi_mac_pull_stats(struct efx_nic *efx)
-{
-       __le64 *dma_stats = efx->stats_buffer.addr;
-       int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS;
-
-       dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
-       efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0);
-
-       while (dma_stats[efx->num_mac_stats - 1] ==
-                               EFX_MC_STATS_GENERATION_INVALID &&
-                       attempts-- != 0)
-               udelay(EFX_MAC_STATS_WAIT_US);
-}
-
 int efx_mcdi_port_probe(struct efx_nic *efx)
 {
        int rc;
@@ -782,24 +696,11 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
        if (rc != 0)
                return rc;
 
-       /* Allocate buffer for stats */
-       rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
-                                 efx->num_mac_stats * sizeof(u64), GFP_KERNEL);
-       if (rc)
-               return rc;
-       netif_dbg(efx, probe, efx->net_dev,
-                 "stats buffer at %llx (virt %p phys %llx)\n",
-                 (u64)efx->stats_buffer.dma_addr,
-                 efx->stats_buffer.addr,
-                 (u64)virt_to_phys(efx->stats_buffer.addr));
-
-       efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 1);
-
-       return 0;
+       return efx_mcdi_mac_init_stats(efx);
 }
 
 void efx_mcdi_port_remove(struct efx_nic *efx)
 {
        efx->phy_op->remove(efx);
-       efx_nic_free_buffer(efx, &efx->stats_buffer);
+       efx_mcdi_mac_fini_stats(efx);
 }
diff --git a/drivers/net/ethernet/sfc/mcdi_port.h b/drivers/net/ethernet/sfc/mcdi_port.h
new file mode 100644 (file)
index 0000000..07863dd
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2008-2013 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ */
+
+#ifndef EFX_MCDI_PORT_H
+#define EFX_MCDI_PORT_H
+
+#include "net_driver.h"
+
+u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
+bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
+int efx_mcdi_port_probe(struct efx_nic *efx);
+void efx_mcdi_port_remove(struct efx_nic *efx);
+
+#endif /* EFX_MCDI_PORT_H */
index a6a072b..56af8b5 100644 (file)
@@ -10,6 +10,7 @@
 
 #include "mcdi_port_common.h"
 #include "efx_common.h"
+#include "nic.h"
 
 int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
 {
@@ -475,6 +476,24 @@ int efx_mcdi_phy_test_alive(struct efx_nic *efx)
        return 0;
 }
 
+int efx_mcdi_port_reconfigure(struct efx_nic *efx)
+{
+       struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+       u32 caps = (efx->link_advertising[0] ?
+                   ethtool_linkset_to_mcdi_cap(efx->link_advertising) :
+                   phy_cfg->forced_cap);
+
+       caps |= ethtool_fec_caps_to_mcdi(efx->fec_config);
+
+       return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
+                                efx->loopback_mode, 0);
+}
+
+static unsigned int efx_calc_mac_mtu(struct efx_nic *efx)
+{
+       return EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
+}
+
 int efx_mcdi_set_mac(struct efx_nic *efx)
 {
        u32 fcntl;
@@ -486,8 +505,7 @@ int efx_mcdi_set_mac(struct efx_nic *efx)
        ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
                        efx->net_dev->dev_addr);
 
-       MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
-                      EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
+       MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU, efx_calc_mac_mtu(efx));
        MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
 
        /* Set simple MAC filter for Siena */
@@ -520,6 +538,125 @@ int efx_mcdi_set_mac(struct efx_nic *efx)
                            NULL, 0, NULL);
 }
 
+int efx_mcdi_set_mtu(struct efx_nic *efx)
+{
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MAC_EXT_IN_LEN);
+
+       BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
+
+       MCDI_SET_DWORD(inbuf, SET_MAC_EXT_IN_MTU, efx_calc_mac_mtu(efx));
+
+       MCDI_POPULATE_DWORD_1(inbuf, SET_MAC_EXT_IN_CONTROL,
+                             SET_MAC_EXT_IN_CFG_MTU, 1);
+
+       return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, inbuf, sizeof(inbuf),
+                           NULL, 0, NULL);
+}
+
+enum efx_stats_action {
+       EFX_STATS_ENABLE,
+       EFX_STATS_DISABLE,
+       EFX_STATS_PULL,
+};
+
+static int efx_mcdi_mac_stats(struct efx_nic *efx,
+                             enum efx_stats_action action, int clear)
+{
+       MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
+       int rc;
+       int change = action == EFX_STATS_PULL ? 0 : 1;
+       int enable = action == EFX_STATS_ENABLE ? 1 : 0;
+       int period = action == EFX_STATS_ENABLE ? 1000 : 0;
+       dma_addr_t dma_addr = efx->stats_buffer.dma_addr;
+       u32 dma_len = action != EFX_STATS_DISABLE ?
+               efx->num_mac_stats * sizeof(u64) : 0;
+
+       BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
+
+       MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr);
+       MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD,
+                             MAC_STATS_IN_DMA, !!enable,
+                             MAC_STATS_IN_CLEAR, clear,
+                             MAC_STATS_IN_PERIODIC_CHANGE, change,
+                             MAC_STATS_IN_PERIODIC_ENABLE, enable,
+                             MAC_STATS_IN_PERIODIC_CLEAR, 0,
+                             MAC_STATS_IN_PERIODIC_NOEVENT, 1,
+                             MAC_STATS_IN_PERIOD_MS, period);
+       MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
+
+       if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
+               MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, efx->vport_id);
+
+       rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
+                               NULL, 0, NULL);
+       /* Expect ENOENT if DMA queues have not been set up */
+       if (rc && (rc != -ENOENT || atomic_read(&efx->active_queues)))
+               efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, sizeof(inbuf),
+                                      NULL, 0, rc);
+       return rc;
+}
+
+void efx_mcdi_mac_start_stats(struct efx_nic *efx)
+{
+       __le64 *dma_stats = efx->stats_buffer.addr;
+
+       dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
+
+       efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0);
+}
+
+void efx_mcdi_mac_stop_stats(struct efx_nic *efx)
+{
+       efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 0);
+}
+
+#define EFX_MAC_STATS_WAIT_US 100
+#define EFX_MAC_STATS_WAIT_ATTEMPTS 10
+
+void efx_mcdi_mac_pull_stats(struct efx_nic *efx)
+{
+       __le64 *dma_stats = efx->stats_buffer.addr;
+       int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS;
+
+       dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
+       efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0);
+
+       while (dma_stats[efx->num_mac_stats - 1] ==
+                               EFX_MC_STATS_GENERATION_INVALID &&
+                       attempts-- != 0)
+               udelay(EFX_MAC_STATS_WAIT_US);
+}
+
+int efx_mcdi_mac_init_stats(struct efx_nic *efx)
+{
+       int rc;
+
+       if (!efx->num_mac_stats)
+               return 0;
+
+       /* Allocate buffer for stats */
+       rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
+                                 efx->num_mac_stats * sizeof(u64), GFP_KERNEL);
+       if (rc) {
+               netif_warn(efx, probe, efx->net_dev,
+                          "failed to allocate DMA buffer: %d\n", rc);
+               return rc;
+       }
+
+       netif_dbg(efx, probe, efx->net_dev,
+                 "stats buffer at %llx (virt %p phys %llx)\n",
+                 (u64) efx->stats_buffer.dma_addr,
+                 efx->stats_buffer.addr,
+                 (u64) virt_to_phys(efx->stats_buffer.addr));
+
+       return 0;
+}
+
+void efx_mcdi_mac_fini_stats(struct efx_nic *efx)
+{
+       efx_nic_free_buffer(efx, &efx->stats_buffer);
+}
+
 /* Get physical port number (EF10 only; on Siena it is same as PF number) */
 int efx_mcdi_port_get_number(struct efx_nic *efx)
 {
index b16f112..9dbeee8 100644 (file)
@@ -28,8 +28,6 @@ struct efx_mcdi_phy_data {
        u32 forced_cap;
 };
 
-#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
-
 int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg);
 void efx_link_set_advertising(struct efx_nic *efx,
                              const unsigned long *advertising);
@@ -51,6 +49,9 @@ int efx_mcdi_phy_get_fecparam(struct efx_nic *efx,
                              struct ethtool_fecparam *fec);
 int efx_mcdi_phy_test_alive(struct efx_nic *efx);
 int efx_mcdi_set_mac(struct efx_nic *efx);
+int efx_mcdi_set_mtu(struct efx_nic *efx);
+int efx_mcdi_mac_init_stats(struct efx_nic *efx);
+void efx_mcdi_mac_fini_stats(struct efx_nic *efx);
 int efx_mcdi_port_get_number(struct efx_nic *efx);
 void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
 
index 1afb58f..786fda5 100644 (file)
@@ -189,6 +189,8 @@ struct efx_tx_buffer {
  *
  * @efx: The associated Efx NIC
  * @queue: DMA queue number
+ * @label: Label for TX completion events.
+ *     Is our index within @channel->tx_queue array.
  * @tso_version: Version of TSO in use for this queue.
  * @channel: The associated channel
  * @core_txq: The networking core TX queue structure
@@ -250,7 +252,8 @@ struct efx_tx_buffer {
 struct efx_tx_queue {
        /* Members which don't change on the fast path */
        struct efx_nic *efx ____cacheline_aligned_in_smp;
-       unsigned queue;
+       unsigned int queue;
+       unsigned int label;
        unsigned int tso_version;
        struct efx_channel *channel;
        struct netdev_queue *core_txq;
@@ -606,8 +609,6 @@ extern const unsigned int efx_reset_type_max;
 #define RESET_TYPE(type) \
        STRING_TABLE_LOOKUP(type, efx_reset_type)
 
-void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen);
-
 enum efx_int_mode {
        /* Be careful if altering to correct macro below */
        EFX_INT_MODE_MSIX = 0,
@@ -867,6 +868,7 @@ struct efx_async_filter_insertion {
  * @n_rx_channels: Number of channels used for RX (= number of RX queues)
  * @n_tx_channels: Number of channels used for TX
  * @n_extra_tx_channels: Number of extra channels with TX queues
+ * @tx_queues_per_channel: number of TX queues probed on each channel
  * @n_xdp_channels: Number of channels used for XDP TX
  * @xdp_channel_offset: Offset of zeroth channel used for XPD TX.
  * @xdp_tx_per_channel: Max number of TX queues on an XDP TX channel.
@@ -961,6 +963,7 @@ struct efx_async_filter_insertion {
  * @vpd_sn: Serial number read from VPD
  * @xdp_rxq_info_failed: Have any of the rx queues failed to initialise their
  *      xdp_rxq_info structures?
+ * @mem_bar: The BAR that is mapped into membase.
  * @monitor_work: Hardware monitor workitem
  * @biu_lock: BIU (bus interface unit) lock
  * @last_irq_cpu: Last CPU to handle a possible test interrupt.  This
@@ -1022,6 +1025,7 @@ struct efx_nic {
        unsigned next_buffer_table;
 
        unsigned int max_channels;
+       unsigned int max_vis;
        unsigned int max_tx_channels;
        unsigned n_channels;
        unsigned n_rx_channels;
@@ -1029,6 +1033,7 @@ struct efx_nic {
        unsigned tx_channel_offset;
        unsigned n_tx_channels;
        unsigned n_extra_tx_channels;
+       unsigned int tx_queues_per_channel;
        unsigned int n_xdp_channels;
        unsigned int xdp_channel_offset;
        unsigned int xdp_tx_per_channel;
@@ -1136,6 +1141,8 @@ struct efx_nic {
        char *vpd_sn;
        bool xdp_rxq_info_failed;
 
+       unsigned int mem_bar;
+
        /* The following fields may be written more often */
 
        struct delayed_work monitor_work ____cacheline_aligned_in_smp;
@@ -1164,12 +1171,9 @@ struct efx_mtd_partition {
 };
 
 struct efx_udp_tunnel {
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID    0xffff
        u16 type; /* TUNNEL_ENCAP_UDP_PORT_ENTRY_foo, see mcdi_pcol.h */
        __be16 port;
-       /* Count of repeated adds of the same port.  Used only inside the list,
-        * not in request arguments.
-        */
-       u16 count;
 };
 
 /**
@@ -1293,9 +1297,7 @@ struct efx_udp_tunnel {
  * @tso_versions: Returns mask of firmware-assisted TSO versions supported.
  *     If %NULL, then device does not support any TSO version.
  * @udp_tnl_push_ports: Push the list of UDP tunnel ports to the NIC if required.
- * @udp_tnl_add_port: Add a UDP tunnel port
  * @udp_tnl_has_port: Check if a port has been added as UDP tunnel
- * @udp_tnl_del_port: Remove a UDP tunnel port
  * @print_additional_fwver: Dump NIC-specific additional FW version info
  * @revision: Hardware architecture revision
  * @txd_ptr_tbl_base: TX descriptor ring base address
@@ -1313,8 +1315,6 @@ struct efx_udp_tunnel {
  * @option_descriptors: NIC supports TX option descriptors
  * @min_interrupt_mode: Lowest capability interrupt mode supported
  *     from &enum efx_int_mode.
- * @max_interrupt_mode: Highest capability interrupt mode supported
- *     from &enum efx_int_mode.
  * @timer_period_max: Maximum period of interrupt timer (in ticks)
  * @offload_features: net_device feature flags for protocol offload
  *     features implemented in hardware
@@ -1352,7 +1352,7 @@ struct efx_nic_type {
        void (*push_irq_moderation)(struct efx_channel *channel);
        int (*reconfigure_port)(struct efx_nic *efx);
        void (*prepare_enable_fc_tx)(struct efx_nic *efx);
-       int (*reconfigure_mac)(struct efx_nic *efx);
+       int (*reconfigure_mac)(struct efx_nic *efx, bool mtu_only);
        bool (*check_mac_fault)(struct efx_nic *efx);
        void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
        int (*set_wol)(struct efx_nic *efx, u32 type);
@@ -1467,9 +1467,7 @@ struct efx_nic_type {
        int (*set_mac_address)(struct efx_nic *efx);
        u32 (*tso_versions)(struct efx_nic *efx);
        int (*udp_tnl_push_ports)(struct efx_nic *efx);
-       int (*udp_tnl_add_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl);
        bool (*udp_tnl_has_port)(struct efx_nic *efx, __be16 port);
-       int (*udp_tnl_del_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl);
        size_t (*print_additional_fwver)(struct efx_nic *efx, char *buf,
                                         size_t len);
 
@@ -1488,7 +1486,6 @@ struct efx_nic_type {
        bool always_rx_scatter;
        bool option_descriptors;
        unsigned int min_interrupt_mode;
-       unsigned int max_interrupt_mode;
        unsigned int timer_period_max;
        netdev_features_t offload_features;
        int mcdi_max_ver;
@@ -1528,7 +1525,7 @@ static inline struct efx_tx_queue *
 efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
 {
        EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_tx_channels ||
-                                 type >= EFX_TXQ_TYPES);
+                                 type >= efx->tx_queues_per_channel);
        return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
 }
 
@@ -1550,18 +1547,18 @@ static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
        return true;
 }
 
-static inline struct efx_tx_queue *
-efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
+static inline unsigned int efx_channel_num_tx_queues(struct efx_channel *channel)
 {
-       EFX_WARN_ON_ONCE_PARANOID(!efx_channel_has_tx_queues(channel) ||
-                                 type >= EFX_TXQ_TYPES);
-       return &channel->tx_queue[type];
+       if (efx_channel_is_xdp_tx(channel))
+               return channel->efx->xdp_tx_per_channel;
+       return channel->efx->tx_queues_per_channel;
 }
 
-static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
+static inline struct efx_tx_queue *
+efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
 {
-       return !(tx_queue->efx->net_dev->num_tc < 2 &&
-                tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
+       EFX_WARN_ON_ONCE_PARANOID(type >= efx_channel_num_tx_queues(channel));
+       return &channel->tx_queue[type];
 }
 
 /* Iterate over all TX queues belonging to a channel */
@@ -1570,18 +1567,8 @@ static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
                ;                                                       \
        else                                                            \
                for (_tx_queue = (_channel)->tx_queue;                  \
-                    _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
-                            (efx_tx_queue_used(_tx_queue) ||            \
-                             efx_channel_is_xdp_tx(_channel));         \
-                    _tx_queue++)
-
-/* Iterate over all possible TX queues belonging to a channel */
-#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel)    \
-       if (!efx_channel_has_tx_queues(_channel))                       \
-               ;                                                       \
-       else                                                            \
-               for (_tx_queue = (_channel)->tx_queue;                  \
-                    _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES;  \
+                    _tx_queue < (_channel)->tx_queue +                 \
+                                efx_channel_num_tx_queues(_channel);           \
                     _tx_queue++)
 
 static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
index b0baa70..d994d13 100644 (file)
@@ -20,6 +20,7 @@
 #include "farch_regs.h"
 #include "io.h"
 #include "workarounds.h"
+#include "mcdi_pcol.h"
 
 /**************************************************************************
  *
@@ -471,6 +472,49 @@ size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
 }
 
 /**
+ * efx_nic_copy_stats - Copy stats from the DMA buffer in to an
+ *     intermediate buffer. This is used to get a consistent
+ *     set of stats while the DMA buffer can be written at any time
+ *     by the NIC.
+ * @efx: The associated NIC.
+ * @dest: Destination buffer. Must be the same size as the DMA buffer.
+ */
+int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest)
+{
+       __le64 *dma_stats = efx->stats_buffer.addr;
+       __le64 generation_start, generation_end;
+       int rc = 0, retry;
+
+       if (!dest)
+               return 0;
+
+       if (!dma_stats)
+               goto return_zeroes;
+
+       /* If we're unlucky enough to read statistics during the DMA, wait
+        * up to 10ms for it to finish (typically takes <500us)
+        */
+       for (retry = 0; retry < 100; ++retry) {
+               generation_end = dma_stats[efx->num_mac_stats - 1];
+               if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
+                       goto return_zeroes;
+               rmb();
+               memcpy(dest, dma_stats, efx->num_mac_stats * sizeof(__le64));
+               rmb();
+               generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
+               if (generation_end == generation_start)
+                       return 0; /* return good data */
+               udelay(100);
+       }
+
+       rc = -EIO;
+
+return_zeroes:
+       memset(dest, 0, efx->num_mac_stats * sizeof(u64));
+       return rc;
+}
+
+/**
  * efx_nic_update_stats - Convert statistics DMA buffer to array of u64
  * @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer
  *     layout.  DMA widths of 0, 16, 32 and 64 are supported; where
index 8f73c5d..724e277 100644 (file)
 #ifndef EFX_NIC_H
 #define EFX_NIC_H
 
-#include <linux/net_tstamp.h>
-#include "net_driver.h"
+#include "nic_common.h"
 #include "efx.h"
-#include "efx_common.h"
-#include "mcdi.h"
-
-enum {
-       /* Revisions 0-2 were Falcon A0, A1 and B0 respectively.
-        * They are not supported by this driver but these revision numbers
-        * form part of the ethtool API for register dumping.
-        */
-       EFX_REV_SIENA_A0 = 3,
-       EFX_REV_HUNT_A0 = 4,
-};
-
-static inline int efx_nic_rev(struct efx_nic *efx)
-{
-       return efx->type->revision;
-}
 
 u32 efx_farch_fpga_ver(struct efx_nic *efx);
 
-/* Read the current event from the event queue */
-static inline efx_qword_t *efx_event(struct efx_channel *channel,
-                                    unsigned int index)
-{
-       return ((efx_qword_t *) (channel->eventq.buf.addr)) +
-               (index & channel->eventq_mask);
-}
-
-/* See if an event is present
- *
- * We check both the high and low dword of the event for all ones.  We
- * wrote all ones when we cleared the event, and no valid event can
- * have all ones in either its high or low dwords.  This approach is
- * robust against reordering.
- *
- * Note that using a single 64-bit comparison is incorrect; even
- * though the CPU read will be atomic, the DMA write may not be.
- */
-static inline int efx_event_present(efx_qword_t *event)
-{
-       return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
-                 EFX_DWORD_IS_ALL_ONES(event->dword[1]));
-}
-
-/* Returns a pointer to the specified transmit descriptor in the TX
- * descriptor queue belonging to the specified channel.
- */
-static inline efx_qword_t *
-efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
-{
-       return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
-}
-
-/* Get partner of a TX queue, seen as part of the same net core queue */
-static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
-{
-       if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
-               return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
-       else
-               return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
-}
-
-/* Report whether this TX queue would be empty for the given write_count.
- * May return false negative.
- */
-static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
-                                        unsigned int write_count)
-{
-       unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
-
-       if (empty_read_count == 0)
-               return false;
-
-       return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
-}
-
-/* Report whether the NIC considers this TX queue empty, using
- * packet_write_count (the write count recorded for the last completable
- * doorbell push).  May return false negative.  EF10 only, which is OK
- * because only EF10 supports PIO.
- */
-static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue)
-{
-       EFX_WARN_ON_ONCE_PARANOID(!tx_queue->efx->type->option_descriptors);
-       return __efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count);
-}
-
-/* Decide whether we can use TX PIO, ie. write packet data directly into
- * a buffer on the device.  This can reduce latency at the expense of
- * throughput, so we only do this if both hardware and software TX rings
- * are empty.  This also ensures that only one packet at a time can be
- * using the PIO buffer.
- */
-static inline bool efx_nic_may_tx_pio(struct efx_tx_queue *tx_queue)
-{
-       struct efx_tx_queue *partner = efx_tx_queue_partner(tx_queue);
-
-       return tx_queue->piobuf && efx_nic_tx_is_empty(tx_queue) &&
-              efx_nic_tx_is_empty(partner);
-}
-
-/* Decide whether to push a TX descriptor to the NIC vs merely writing
- * the doorbell.  This can reduce latency when we are adding a single
- * descriptor to an empty queue, but is otherwise pointless.  Further,
- * Falcon and Siena have hardware bugs (SF bug 33851) that may be
- * triggered if we don't check this.
- * We use the write_count used for the last doorbell push, to get the
- * NIC's view of the tx queue.
- */
-static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
-                                           unsigned int write_count)
-{
-       bool was_empty = __efx_nic_tx_is_empty(tx_queue, write_count);
-
-       tx_queue->empty_read_count = 0;
-       return was_empty && tx_queue->write_count - write_count == 1;
-}
-
-/* Returns a pointer to the specified descriptor in the RX descriptor queue */
-static inline efx_qword_t *
-efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
-{
-       return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index;
-}
-
 enum {
        PHY_TYPE_NONE = 0,
        PHY_TYPE_TXC43128 = 1,
@@ -147,18 +25,6 @@ enum {
        PHY_TYPE_SFT9001B = 10,
 };
 
-/* Alignment of PCIe DMA boundaries (4KB) */
-#define EFX_PAGE_SIZE  4096
-/* Size and alignment of buffer table entries (same) */
-#define EFX_BUF_SIZE   EFX_PAGE_SIZE
-
-/* NIC-generic software stats */
-enum {
-       GENERIC_STAT_rx_noskb_drops,
-       GENERIC_STAT_rx_nodesc_trunc,
-       GENERIC_STAT_COUNT
-};
-
 enum {
        SIENA_STAT_tx_bytes = GENERIC_STAT_COUNT,
        SIENA_STAT_tx_good_bytes,
@@ -368,6 +234,7 @@ enum {
  * @piobuf_size: size of a single PIO buffer
  * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
  *     reboot
+ * @mc_stats: Scratch buffer for converting statistics to the kernel's format
  * @stats: Hardware statistics
  * @workaround_35388: Flag: firmware supports workaround for bug 35388
  * @workaround_26807: Flag: firmware supports workaround for bug 26807
@@ -404,6 +271,7 @@ struct efx_ef10_nic_data {
        unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
        u16 piobuf_size;
        bool must_restore_piobufs;
+       __le64 *mc_stats;
        u64 stats[EF10_STAT_COUNT];
        bool workaround_35388;
        bool workaround_26807;
@@ -432,123 +300,12 @@ struct efx_ef10_nic_data {
 int efx_init_sriov(void);
 void efx_fini_sriov(void);
 
-struct ethtool_ts_info;
-int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
-void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
-struct efx_channel *efx_ptp_channel(struct efx_nic *efx);
-void efx_ptp_remove(struct efx_nic *efx);
-int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
-int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
-void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
-bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
-int efx_ptp_get_mode(struct efx_nic *efx);
-int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
-                       unsigned int new_mode);
-int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
-void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
-size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings);
-size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats);
-void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev);
-void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
-                                  struct sk_buff *skb);
-static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel,
-                                              struct sk_buff *skb)
-{
-       if (channel->sync_events_state == SYNC_EVENTS_VALID)
-               __efx_rx_skb_attach_timestamp(channel, skb);
-}
-void efx_ptp_start_datapath(struct efx_nic *efx);
-void efx_ptp_stop_datapath(struct efx_nic *efx);
-bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx);
-ktime_t efx_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue);
-
-extern const struct efx_nic_type falcon_a1_nic_type;
-extern const struct efx_nic_type falcon_b0_nic_type;
 extern const struct efx_nic_type siena_a0_nic_type;
 extern const struct efx_nic_type efx_hunt_a0_nic_type;
 extern const struct efx_nic_type efx_hunt_a0_vf_nic_type;
 
-/**************************************************************************
- *
- * Externs
- *
- **************************************************************************
- */
-
 int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
 
-/* TX data path */
-static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
-{
-       return tx_queue->efx->type->tx_probe(tx_queue);
-}
-static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
-{
-       tx_queue->efx->type->tx_init(tx_queue);
-}
-static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
-{
-       tx_queue->efx->type->tx_remove(tx_queue);
-}
-static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
-{
-       tx_queue->efx->type->tx_write(tx_queue);
-}
-
-int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
-                       bool *data_mapped);
-
-/* RX data path */
-static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
-{
-       return rx_queue->efx->type->rx_probe(rx_queue);
-}
-static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
-{
-       rx_queue->efx->type->rx_init(rx_queue);
-}
-static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
-{
-       rx_queue->efx->type->rx_remove(rx_queue);
-}
-static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
-{
-       rx_queue->efx->type->rx_write(rx_queue);
-}
-static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
-{
-       rx_queue->efx->type->rx_defer_refill(rx_queue);
-}
-
-/* Event data path */
-static inline int efx_nic_probe_eventq(struct efx_channel *channel)
-{
-       return channel->efx->type->ev_probe(channel);
-}
-static inline int efx_nic_init_eventq(struct efx_channel *channel)
-{
-       return channel->efx->type->ev_init(channel);
-}
-static inline void efx_nic_fini_eventq(struct efx_channel *channel)
-{
-       channel->efx->type->ev_fini(channel);
-}
-static inline void efx_nic_remove_eventq(struct efx_channel *channel)
-{
-       channel->efx->type->ev_remove(channel);
-}
-static inline int
-efx_nic_process_eventq(struct efx_channel *channel, int quota)
-{
-       return channel->efx->type->ev_process(channel, quota);
-}
-static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
-{
-       channel->efx->type->ev_read_ack(channel);
-}
-
-void efx_nic_event_test_start(struct efx_channel *channel);
-
 /* Falcon/Siena queue operations */
 int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
 void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
@@ -598,31 +355,6 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
 #endif
 void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
 
-bool efx_nic_event_present(struct efx_channel *channel);
-
-/* Some statistics are computed as A - B where A and B each increase
- * linearly with some hardware counter(s) and the counters are read
- * asynchronously.  If the counters contributing to B are always read
- * after those contributing to A, the computed value may be lower than
- * the true value by some variable amount, and may decrease between
- * subsequent computations.
- *
- * We should never allow statistics to decrease or to exceed the true
- * value.  Since the computed value will never be greater than the
- * true value, we can achieve this by only storing the computed value
- * when it increases.
- */
-static inline void efx_update_diff_stat(u64 *stat, u64 diff)
-{
-       if ((s64)(diff - *stat) > 0)
-               *stat = diff;
-}
-
-/* Interrupts */
-int efx_nic_init_interrupt(struct efx_nic *efx);
-int efx_nic_irq_test_start(struct efx_nic *efx);
-void efx_nic_fini_interrupt(struct efx_nic *efx);
-
 /* Falcon/Siena interrupts */
 void efx_farch_irq_enable_master(struct efx_nic *efx);
 int efx_farch_irq_test_generate(struct efx_nic *efx);
@@ -631,17 +363,7 @@ irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
 irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
 irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
 
-static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
-{
-       return READ_ONCE(channel->event_test_cpu);
-}
-static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
-{
-       return READ_ONCE(efx->last_irq_cpu);
-}
-
 /* Global Resources */
-int efx_nic_flush_queues(struct efx_nic *efx);
 void siena_prepare_flush(struct efx_nic *efx);
 int efx_farch_fini_dmaq(struct efx_nic *efx);
 void efx_farch_finish_flr(struct efx_nic *efx);
@@ -651,14 +373,9 @@ void falcon_stop_nic_stats(struct efx_nic *efx);
 int falcon_reset_xaui(struct efx_nic *efx);
 void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
 void efx_farch_init_common(struct efx_nic *efx);
-void efx_ef10_handle_drain_event(struct efx_nic *efx);
 void efx_farch_rx_push_indir_table(struct efx_nic *efx);
 void efx_farch_rx_pull_indir_table(struct efx_nic *efx);
 
-int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
-                        unsigned int len, gfp_t gfp_flags);
-void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
-
 /* Tests */
 struct efx_farch_register_test {
        unsigned address;
@@ -669,18 +386,6 @@ int efx_farch_test_registers(struct efx_nic *efx,
                             const struct efx_farch_register_test *regs,
                             size_t n_regs);
 
-size_t efx_nic_get_regs_len(struct efx_nic *efx);
-void efx_nic_get_regs(struct efx_nic *efx, void *buf);
-
-size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
-                             const unsigned long *mask, u8 *names);
-void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
-                         const unsigned long *mask, u64 *stats,
-                         const void *dma_buf, bool accumulate);
-void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat);
-
-#define EFX_MAX_FLUSH_TIME 5000
-
 void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
                              efx_qword_t *event);
 
diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h
new file mode 100644 (file)
index 0000000..e04b681
--- /dev/null
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ */
+
+#ifndef EFX_NIC_COMMON_H
+#define EFX_NIC_COMMON_H
+
+#include "net_driver.h"
+#include "efx_common.h"
+#include "mcdi.h"
+#include "ptp.h"
+
+enum {
+       /* Revisions 0-2 were Falcon A0, A1 and B0 respectively.
+        * They are not supported by this driver but these revision numbers
+        * form part of the ethtool API for register dumping.
+        */
+       EFX_REV_SIENA_A0 = 3,
+       EFX_REV_HUNT_A0 = 4,
+       EFX_REV_EF100 = 5,
+};
+
+static inline int efx_nic_rev(struct efx_nic *efx)
+{
+       return efx->type->revision;
+}
+
+/* Read the current event from the event queue */
+static inline efx_qword_t *efx_event(struct efx_channel *channel,
+                                    unsigned int index)
+{
+       return ((efx_qword_t *) (channel->eventq.buf.addr)) +
+               (index & channel->eventq_mask);
+}
+
+/* See if an event is present
+ *
+ * We check both the high and low dword of the event for all ones.  We
+ * wrote all ones when we cleared the event, and no valid event can
+ * have all ones in either its high or low dwords.  This approach is
+ * robust against reordering.
+ *
+ * Note that using a single 64-bit comparison is incorrect; even
+ * though the CPU read will be atomic, the DMA write may not be.
+ */
+static inline int efx_event_present(efx_qword_t *event)
+{
+       return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
+                 EFX_DWORD_IS_ALL_ONES(event->dword[1]));
+}
+
+/* Returns a pointer to the specified transmit descriptor in the TX
+ * descriptor queue belonging to the specified channel.
+ */
+static inline efx_qword_t *
+efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
+{
+       return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
+}
+
+/* Report whether this TX queue would be empty for the given write_count.
+ * May return false negative.
+ */
+static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
+                                        unsigned int write_count)
+{
+       unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
+
+       if (empty_read_count == 0)
+               return false;
+
+       return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
+}
+
+/* Report whether the NIC considers this TX queue empty, using
+ * packet_write_count (the write count recorded for the last completable
+ * doorbell push).  May return false negative.  EF10 only, which is OK
+ * because only EF10 supports PIO.
+ */
+static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue)
+{
+       EFX_WARN_ON_ONCE_PARANOID(!tx_queue->efx->type->option_descriptors);
+       return __efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count);
+}
+
+/* Get partner of a TX queue, seen as part of the same net core queue */
+/* XXX is this a thing on EF100? */
+static inline struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
+{
+       if (tx_queue->label & EFX_TXQ_TYPE_OFFLOAD)
+               return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
+       else
+               return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
+}
+
+/* Decide whether we can use TX PIO, ie. write packet data directly into
+ * a buffer on the device.  This can reduce latency at the expense of
+ * throughput, so we only do this if both hardware and software TX rings
+ * are empty.  This also ensures that only one packet at a time can be
+ * using the PIO buffer.
+ */
+static inline bool efx_nic_may_tx_pio(struct efx_tx_queue *tx_queue)
+{
+       struct efx_tx_queue *partner = efx_tx_queue_partner(tx_queue);
+
+       return tx_queue->piobuf && efx_nic_tx_is_empty(tx_queue) &&
+              efx_nic_tx_is_empty(partner);
+}
+
+int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+                       bool *data_mapped);
+
+/* Decide whether to push a TX descriptor to the NIC vs merely writing
+ * the doorbell.  This can reduce latency when we are adding a single
+ * descriptor to an empty queue, but is otherwise pointless.  Further,
+ * Falcon and Siena have hardware bugs (SF bug 33851) that may be
+ * triggered if we don't check this.
+ * We use the write_count used for the last doorbell push, to get the
+ * NIC's view of the tx queue.
+ */
+static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
+                                           unsigned int write_count)
+{
+       bool was_empty = __efx_nic_tx_is_empty(tx_queue, write_count);
+
+       tx_queue->empty_read_count = 0;
+       return was_empty && tx_queue->write_count - write_count == 1;
+}
+
+/* Returns a pointer to the specified descriptor in the RX descriptor queue */
+static inline efx_qword_t *
+efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
+{
+       return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index;
+}
+
+/* Alignment of PCIe DMA boundaries (4KB) */
+#define EFX_PAGE_SIZE  4096
+/* Size and alignment of buffer table entries (same) */
+#define EFX_BUF_SIZE   EFX_PAGE_SIZE
+
+/* NIC-generic software stats */
+enum {
+       GENERIC_STAT_rx_noskb_drops,
+       GENERIC_STAT_rx_nodesc_trunc,
+       GENERIC_STAT_COUNT
+};
+
+#define EFX_GENERIC_SW_STAT(ext_name)                          \
+       [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+
+/* TX data path */
+static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
+{
+       return tx_queue->efx->type->tx_probe(tx_queue);
+}
+static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
+{
+       tx_queue->efx->type->tx_init(tx_queue);
+}
+static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
+{
+       if (tx_queue->efx->type->tx_remove)
+               tx_queue->efx->type->tx_remove(tx_queue);
+}
+static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
+{
+       tx_queue->efx->type->tx_write(tx_queue);
+}
+
+/* RX data path */
+static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
+{
+       return rx_queue->efx->type->rx_probe(rx_queue);
+}
+static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
+{
+       rx_queue->efx->type->rx_init(rx_queue);
+}
+static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
+{
+       rx_queue->efx->type->rx_remove(rx_queue);
+}
+static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
+{
+       rx_queue->efx->type->rx_write(rx_queue);
+}
+static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
+{
+       rx_queue->efx->type->rx_defer_refill(rx_queue);
+}
+
+/* Event data path */
+static inline int efx_nic_probe_eventq(struct efx_channel *channel)
+{
+       return channel->efx->type->ev_probe(channel);
+}
+static inline int efx_nic_init_eventq(struct efx_channel *channel)
+{
+       return channel->efx->type->ev_init(channel);
+}
+static inline void efx_nic_fini_eventq(struct efx_channel *channel)
+{
+       channel->efx->type->ev_fini(channel);
+}
+static inline void efx_nic_remove_eventq(struct efx_channel *channel)
+{
+       channel->efx->type->ev_remove(channel);
+}
+static inline int
+efx_nic_process_eventq(struct efx_channel *channel, int quota)
+{
+       return channel->efx->type->ev_process(channel, quota);
+}
+static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
+{
+       channel->efx->type->ev_read_ack(channel);
+}
+
+void efx_nic_event_test_start(struct efx_channel *channel);
+
+bool efx_nic_event_present(struct efx_channel *channel);
+
+/* Some statistics are computed as A - B where A and B each increase
+ * linearly with some hardware counter(s) and the counters are read
+ * asynchronously.  If the counters contributing to B are always read
+ * after those contributing to A, the computed value may be lower than
+ * the true value by some variable amount, and may decrease between
+ * subsequent computations.
+ *
+ * We should never allow statistics to decrease or to exceed the true
+ * value.  Since the computed value will never be greater than the
+ * true value, we can achieve this by only storing the computed value
+ * when it increases.
+ */
+static inline void efx_update_diff_stat(u64 *stat, u64 diff)
+{
+       if ((s64)(diff - *stat) > 0)
+               *stat = diff;
+}
+
+/* Interrupts */
+int efx_nic_init_interrupt(struct efx_nic *efx);
+int efx_nic_irq_test_start(struct efx_nic *efx);
+void efx_nic_fini_interrupt(struct efx_nic *efx);
+
+static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
+{
+       return READ_ONCE(channel->event_test_cpu);
+}
+static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
+{
+       return READ_ONCE(efx->last_irq_cpu);
+}
+
+/* Global Resources */
+int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
+                        unsigned int len, gfp_t gfp_flags);
+void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
+
+size_t efx_nic_get_regs_len(struct efx_nic *efx);
+void efx_nic_get_regs(struct efx_nic *efx, void *buf);
+
+#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
+
+size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
+                             const unsigned long *mask, u8 *names);
+int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest);
+void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
+                         const unsigned long *mask, u64 *stats,
+                         const void *dma_buf, bool accumulate);
+void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat);
+
+#define EFX_MAX_FLUSH_TIME 5000
+
+#endif /* EFX_NIC_COMMON_H */
index 04c7283..393b7cb 100644 (file)
@@ -35,7 +35,6 @@
 #include <linux/time.h>
 #include <linux/ktime.h>
 #include <linux/module.h>
-#include <linux/net_tstamp.h>
 #include <linux/pps_kernel.h>
 #include <linux/ptp_clock_kernel.h>
 #include "net_driver.h"
@@ -44,7 +43,7 @@
 #include "mcdi_pcol.h"
 #include "io.h"
 #include "farch_regs.h"
-#include "nic.h"
+#include "nic.h" /* indirectly includes ptp.h */
 
 /* Maximum number of events expected to make up a PTP event */
 #define        MAX_EVENT_FRAGS                 3
@@ -352,7 +351,7 @@ static int efx_phc_enable(struct ptp_clock_info *ptp,
 
 bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx)
 {
-       return efx_has_cap(efx, TX_MAC_TIMESTAMPING, FLAGS2);
+       return efx_has_cap(efx, TX_MAC_TIMESTAMPING);
 }
 
 /* PTP 'extra' channel is still a traffic channel, but we only create TX queues
diff --git a/drivers/net/ethernet/sfc/ptp.h b/drivers/net/ethernet/sfc/ptp.h
new file mode 100644 (file)
index 0000000..9855e8c
--- /dev/null
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ */
+
+#ifndef EFX_PTP_H
+#define EFX_PTP_H
+
+#include <linux/net_tstamp.h>
+#include "net_driver.h"
+
+struct ethtool_ts_info;
+int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
+void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
+struct efx_channel *efx_ptp_channel(struct efx_nic *efx);
+void efx_ptp_remove(struct efx_nic *efx);
+int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
+int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
+void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
+bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+int efx_ptp_get_mode(struct efx_nic *efx);
+int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
+                       unsigned int new_mode);
+int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
+size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings);
+size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats);
+void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev);
+void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+                                  struct sk_buff *skb);
+static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+                                              struct sk_buff *skb)
+{
+       if (channel->sync_events_state == SYNC_EVENTS_VALID)
+               __efx_rx_skb_attach_timestamp(channel, skb);
+}
+void efx_ptp_start_datapath(struct efx_nic *efx);
+void efx_ptp_stop_datapath(struct efx_nic *efx);
+bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx);
+ktime_t efx_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue);
+
+#endif /* EFX_PTP_H */
index c01916c..59a43d5 100644 (file)
 #define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
                                      EFX_RX_USR_BUF_SIZE)
 
-static inline void efx_sync_rx_buffer(struct efx_nic *efx,
-                                     struct efx_rx_buffer *rx_buf,
-                                     unsigned int len)
-{
-       dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
-                               DMA_FROM_DEVICE);
-}
-
 static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
                                     struct efx_rx_buffer *rx_buf,
                                     int len)
@@ -411,243 +403,9 @@ void __efx_rx_packet(struct efx_channel *channel)
                rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
 
        if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
-               efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
+               efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, 0);
        else
                efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
 out:
        channel->rx_pkt_n_frags = 0;
 }
-
-#ifdef CONFIG_RFS_ACCEL
-
-static void efx_filter_rfs_work(struct work_struct *data)
-{
-       struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
-                                                             work);
-       struct efx_nic *efx = netdev_priv(req->net_dev);
-       struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
-       int slot_idx = req - efx->rps_slot;
-       struct efx_arfs_rule *rule;
-       u16 arfs_id = 0;
-       int rc;
-
-       rc = efx->type->filter_insert(efx, &req->spec, true);
-       if (rc >= 0)
-               /* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */
-               rc %= efx->type->max_rx_ip_filters;
-       if (efx->rps_hash_table) {
-               spin_lock_bh(&efx->rps_hash_lock);
-               rule = efx_rps_hash_find(efx, &req->spec);
-               /* The rule might have already gone, if someone else's request
-                * for the same spec was already worked and then expired before
-                * we got around to our work.  In that case we have nothing
-                * tying us to an arfs_id, meaning that as soon as the filter
-                * is considered for expiry it will be removed.
-                */
-               if (rule) {
-                       if (rc < 0)
-                               rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
-                       else
-                               rule->filter_id = rc;
-                       arfs_id = rule->arfs_id;
-               }
-               spin_unlock_bh(&efx->rps_hash_lock);
-       }
-       if (rc >= 0) {
-               /* Remember this so we can check whether to expire the filter
-                * later.
-                */
-               mutex_lock(&efx->rps_mutex);
-               if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID)
-                       channel->rfs_filter_count++;
-               channel->rps_flow_id[rc] = req->flow_id;
-               mutex_unlock(&efx->rps_mutex);
-
-               if (req->spec.ether_type == htons(ETH_P_IP))
-                       netif_info(efx, rx_status, efx->net_dev,
-                                  "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
-                                  (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
-                                  req->spec.rem_host, ntohs(req->spec.rem_port),
-                                  req->spec.loc_host, ntohs(req->spec.loc_port),
-                                  req->rxq_index, req->flow_id, rc, arfs_id);
-               else
-                       netif_info(efx, rx_status, efx->net_dev,
-                                  "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
-                                  (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
-                                  req->spec.rem_host, ntohs(req->spec.rem_port),
-                                  req->spec.loc_host, ntohs(req->spec.loc_port),
-                                  req->rxq_index, req->flow_id, rc, arfs_id);
-               channel->n_rfs_succeeded++;
-       } else {
-               if (req->spec.ether_type == htons(ETH_P_IP))
-                       netif_dbg(efx, rx_status, efx->net_dev,
-                                 "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n",
-                                 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
-                                 req->spec.rem_host, ntohs(req->spec.rem_port),
-                                 req->spec.loc_host, ntohs(req->spec.loc_port),
-                                 req->rxq_index, req->flow_id, rc, arfs_id);
-               else
-                       netif_dbg(efx, rx_status, efx->net_dev,
-                                 "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n",
-                                 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
-                                 req->spec.rem_host, ntohs(req->spec.rem_port),
-                                 req->spec.loc_host, ntohs(req->spec.loc_port),
-                                 req->rxq_index, req->flow_id, rc, arfs_id);
-               channel->n_rfs_failed++;
-               /* We're overloading the NIC's filter tables, so let's do a
-                * chunk of extra expiry work.
-                */
-               __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count,
-                                                    100u));
-       }
-
-       /* Release references */
-       clear_bit(slot_idx, &efx->rps_slot_map);
-       dev_put(req->net_dev);
-}
-
-int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
-                  u16 rxq_index, u32 flow_id)
-{
-       struct efx_nic *efx = netdev_priv(net_dev);
-       struct efx_async_filter_insertion *req;
-       struct efx_arfs_rule *rule;
-       struct flow_keys fk;
-       int slot_idx;
-       bool new;
-       int rc;
-
-       /* find a free slot */
-       for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
-               if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
-                       break;
-       if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
-               return -EBUSY;
-
-       if (flow_id == RPS_FLOW_ID_INVALID) {
-               rc = -EINVAL;
-               goto out_clear;
-       }
-
-       if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
-               rc = -EPROTONOSUPPORT;
-               goto out_clear;
-       }
-
-       if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
-               rc = -EPROTONOSUPPORT;
-               goto out_clear;
-       }
-       if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
-               rc = -EPROTONOSUPPORT;
-               goto out_clear;
-       }
-
-       req = efx->rps_slot + slot_idx;
-       efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
-                          efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
-                          rxq_index);
-       req->spec.match_flags =
-               EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
-               EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
-               EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
-       req->spec.ether_type = fk.basic.n_proto;
-       req->spec.ip_proto = fk.basic.ip_proto;
-
-       if (fk.basic.n_proto == htons(ETH_P_IP)) {
-               req->spec.rem_host[0] = fk.addrs.v4addrs.src;
-               req->spec.loc_host[0] = fk.addrs.v4addrs.dst;
-       } else {
-               memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src,
-                      sizeof(struct in6_addr));
-               memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst,
-                      sizeof(struct in6_addr));
-       }
-
-       req->spec.rem_port = fk.ports.src;
-       req->spec.loc_port = fk.ports.dst;
-
-       if (efx->rps_hash_table) {
-               /* Add it to ARFS hash table */
-               spin_lock(&efx->rps_hash_lock);
-               rule = efx_rps_hash_add(efx, &req->spec, &new);
-               if (!rule) {
-                       rc = -ENOMEM;
-                       goto out_unlock;
-               }
-               if (new)
-                       rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
-               rc = rule->arfs_id;
-               /* Skip if existing or pending filter already does the right thing */
-               if (!new && rule->rxq_index == rxq_index &&
-                   rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
-                       goto out_unlock;
-               rule->rxq_index = rxq_index;
-               rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
-               spin_unlock(&efx->rps_hash_lock);
-       } else {
-               /* Without an ARFS hash table, we just use arfs_id 0 for all
-                * filters.  This means if multiple flows hash to the same
-                * flow_id, all but the most recently touched will be eligible
-                * for expiry.
-                */
-               rc = 0;
-       }
-
-       /* Queue the request */
-       dev_hold(req->net_dev = net_dev);
-       INIT_WORK(&req->work, efx_filter_rfs_work);
-       req->rxq_index = rxq_index;
-       req->flow_id = flow_id;
-       schedule_work(&req->work);
-       return rc;
-out_unlock:
-       spin_unlock(&efx->rps_hash_lock);
-out_clear:
-       clear_bit(slot_idx, &efx->rps_slot_map);
-       return rc;
-}
-
-bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
-{
-       bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
-       struct efx_nic *efx = channel->efx;
-       unsigned int index, size, start;
-       u32 flow_id;
-
-       if (!mutex_trylock(&efx->rps_mutex))
-               return false;
-       expire_one = efx->type->filter_rfs_expire_one;
-       index = channel->rfs_expire_index;
-       start = index;
-       size = efx->type->max_rx_ip_filters;
-       while (quota) {
-               flow_id = channel->rps_flow_id[index];
-
-               if (flow_id != RPS_FLOW_ID_INVALID) {
-                       quota--;
-                       if (expire_one(efx, flow_id, index)) {
-                               netif_info(efx, rx_status, efx->net_dev,
-                                          "expired filter %d [channel %u flow %u]\n",
-                                          index, channel->channel, flow_id);
-                               channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
-                               channel->rfs_filter_count--;
-                       }
-               }
-               if (++index == size)
-                       index = 0;
-               /* If we were called with a quota that exceeds the total number
-                * of filters in the table (which shouldn't happen, but could
-                * if two callers race), ensure that we don't loop forever -
-                * stop when we've examined every row of the table.
-                */
-               if (index == start)
-                       break;
-       }
-
-       channel->rfs_expire_index = index;
-       mutex_unlock(&efx->rps_mutex);
-       return true;
-}
-
-#endif /* CONFIG_RFS_ACCEL */
index e10c238..fb77c7b 100644 (file)
@@ -510,7 +510,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
  */
 void
 efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
-                 unsigned int n_frags, u8 *eh)
+                 unsigned int n_frags, u8 *eh, __wsum csum)
 {
        struct napi_struct *napi = &channel->napi_str;
        struct efx_nic *efx = channel->efx;
@@ -528,8 +528,13 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
        if (efx->net_dev->features & NETIF_F_RXHASH)
                skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
                             PKT_HASH_TYPE_L3);
-       skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
-                         CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
+       if (csum) {
+               skb->csum = csum;
+               skb->ip_summed = CHECKSUM_COMPLETE;
+       } else {
+               skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
+                                 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
+       }
        skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
 
        for (;;) {
@@ -849,3 +854,237 @@ void efx_remove_filters(struct efx_nic *efx)
        efx->type->filter_table_remove(efx);
        up_write(&efx->filter_sem);
 }
+
+#ifdef CONFIG_RFS_ACCEL
+
+static void efx_filter_rfs_work(struct work_struct *data)
+{
+       struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
+                                                             work);
+       struct efx_nic *efx = netdev_priv(req->net_dev);
+       struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
+       int slot_idx = req - efx->rps_slot;
+       struct efx_arfs_rule *rule;
+       u16 arfs_id = 0;
+       int rc;
+
+       rc = efx->type->filter_insert(efx, &req->spec, true);
+       if (rc >= 0)
+               /* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */
+               rc %= efx->type->max_rx_ip_filters;
+       if (efx->rps_hash_table) {
+               spin_lock_bh(&efx->rps_hash_lock);
+               rule = efx_rps_hash_find(efx, &req->spec);
+               /* The rule might have already gone, if someone else's request
+                * for the same spec was already worked and then expired before
+                * we got around to our work.  In that case we have nothing
+                * tying us to an arfs_id, meaning that as soon as the filter
+                * is considered for expiry it will be removed.
+                */
+               if (rule) {
+                       if (rc < 0)
+                               rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
+                       else
+                               rule->filter_id = rc;
+                       arfs_id = rule->arfs_id;
+               }
+               spin_unlock_bh(&efx->rps_hash_lock);
+       }
+       if (rc >= 0) {
+               /* Remember this so we can check whether to expire the filter
+                * later.
+                */
+               mutex_lock(&efx->rps_mutex);
+               if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID)
+                       channel->rfs_filter_count++;
+               channel->rps_flow_id[rc] = req->flow_id;
+               mutex_unlock(&efx->rps_mutex);
+
+               if (req->spec.ether_type == htons(ETH_P_IP))
+                       netif_info(efx, rx_status, efx->net_dev,
+                                  "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
+                                  (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+                                  req->spec.rem_host, ntohs(req->spec.rem_port),
+                                  req->spec.loc_host, ntohs(req->spec.loc_port),
+                                  req->rxq_index, req->flow_id, rc, arfs_id);
+               else
+                       netif_info(efx, rx_status, efx->net_dev,
+                                  "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
+                                  (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+                                  req->spec.rem_host, ntohs(req->spec.rem_port),
+                                  req->spec.loc_host, ntohs(req->spec.loc_port),
+                                  req->rxq_index, req->flow_id, rc, arfs_id);
+               channel->n_rfs_succeeded++;
+       } else {
+               if (req->spec.ether_type == htons(ETH_P_IP))
+                       netif_dbg(efx, rx_status, efx->net_dev,
+                                 "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n",
+                                 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+                                 req->spec.rem_host, ntohs(req->spec.rem_port),
+                                 req->spec.loc_host, ntohs(req->spec.loc_port),
+                                 req->rxq_index, req->flow_id, rc, arfs_id);
+               else
+                       netif_dbg(efx, rx_status, efx->net_dev,
+                                 "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n",
+                                 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+                                 req->spec.rem_host, ntohs(req->spec.rem_port),
+                                 req->spec.loc_host, ntohs(req->spec.loc_port),
+                                 req->rxq_index, req->flow_id, rc, arfs_id);
+               channel->n_rfs_failed++;
+               /* We're overloading the NIC's filter tables, so let's do a
+                * chunk of extra expiry work.
+                */
+               __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count,
+                                                    100u));
+       }
+
+       /* Release references */
+       clear_bit(slot_idx, &efx->rps_slot_map);
+       dev_put(req->net_dev);
+}
+
+int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+                  u16 rxq_index, u32 flow_id)
+{
+       struct efx_nic *efx = netdev_priv(net_dev);
+       struct efx_async_filter_insertion *req;
+       struct efx_arfs_rule *rule;
+       struct flow_keys fk;
+       int slot_idx;
+       bool new;
+       int rc;
+
+       /* find a free slot */
+       for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
+               if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
+                       break;
+       if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
+               return -EBUSY;
+
+       if (flow_id == RPS_FLOW_ID_INVALID) {
+               rc = -EINVAL;
+               goto out_clear;
+       }
+
+       if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
+               rc = -EPROTONOSUPPORT;
+               goto out_clear;
+       }
+
+       if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
+               rc = -EPROTONOSUPPORT;
+               goto out_clear;
+       }
+       if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
+               rc = -EPROTONOSUPPORT;
+               goto out_clear;
+       }
+
+       req = efx->rps_slot + slot_idx;
+       efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
+                          efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
+                          rxq_index);
+       req->spec.match_flags =
+               EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+               EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+               EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+       req->spec.ether_type = fk.basic.n_proto;
+       req->spec.ip_proto = fk.basic.ip_proto;
+
+       if (fk.basic.n_proto == htons(ETH_P_IP)) {
+               req->spec.rem_host[0] = fk.addrs.v4addrs.src;
+               req->spec.loc_host[0] = fk.addrs.v4addrs.dst;
+       } else {
+               memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src,
+                      sizeof(struct in6_addr));
+               memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst,
+                      sizeof(struct in6_addr));
+       }
+
+       req->spec.rem_port = fk.ports.src;
+       req->spec.loc_port = fk.ports.dst;
+
+       if (efx->rps_hash_table) {
+               /* Add it to ARFS hash table */
+               spin_lock(&efx->rps_hash_lock);
+               rule = efx_rps_hash_add(efx, &req->spec, &new);
+               if (!rule) {
+                       rc = -ENOMEM;
+                       goto out_unlock;
+               }
+               if (new)
+                       rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
+               rc = rule->arfs_id;
+               /* Skip if existing or pending filter already does the right thing */
+               if (!new && rule->rxq_index == rxq_index &&
+                   rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
+                       goto out_unlock;
+               rule->rxq_index = rxq_index;
+               rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
+               spin_unlock(&efx->rps_hash_lock);
+       } else {
+               /* Without an ARFS hash table, we just use arfs_id 0 for all
+                * filters.  This means if multiple flows hash to the same
+                * flow_id, all but the most recently touched will be eligible
+                * for expiry.
+                */
+               rc = 0;
+       }
+
+       /* Queue the request */
+       dev_hold(req->net_dev = net_dev);
+       INIT_WORK(&req->work, efx_filter_rfs_work);
+       req->rxq_index = rxq_index;
+       req->flow_id = flow_id;
+       schedule_work(&req->work);
+       return rc;
+out_unlock:
+       spin_unlock(&efx->rps_hash_lock);
+out_clear:
+       clear_bit(slot_idx, &efx->rps_slot_map);
+       return rc;
+}
+
+bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
+{
+       bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
+       struct efx_nic *efx = channel->efx;
+       unsigned int index, size, start;
+       u32 flow_id;
+
+       if (!mutex_trylock(&efx->rps_mutex))
+               return false;
+       expire_one = efx->type->filter_rfs_expire_one;
+       index = channel->rfs_expire_index;
+       start = index;
+       size = efx->type->max_rx_ip_filters;
+       while (quota) {
+               flow_id = channel->rps_flow_id[index];
+
+               if (flow_id != RPS_FLOW_ID_INVALID) {
+                       quota--;
+                       if (expire_one(efx, flow_id, index)) {
+                               netif_info(efx, rx_status, efx->net_dev,
+                                          "expired filter %d [channel %u flow %u]\n",
+                                          index, channel->channel, flow_id);
+                               channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
+                               channel->rfs_filter_count--;
+                       }
+               }
+               if (++index == size)
+                       index = 0;
+               /* If we were called with a quota that exceeds the total number
+                * of filters in the table (which shouldn't happen, but could
+                * if two callers race), ensure that we don't loop forever -
+                * stop when we've examined every row of the table.
+                */
+               if (index == start)
+                       break;
+       }
+
+       channel->rfs_expire_index = index;
+       mutex_unlock(&efx->rps_mutex);
+       return true;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
index c41f12a..207ccd8 100644 (file)
@@ -57,6 +57,15 @@ void efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
                        unsigned int page_offset,
                        u16 flags);
 void efx_unmap_rx_buffer(struct efx_nic *efx, struct efx_rx_buffer *rx_buf);
+
+static inline void efx_sync_rx_buffer(struct efx_nic *efx,
+                                     struct efx_rx_buffer *rx_buf,
+                                     unsigned int len)
+{
+       dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
+                               DMA_FROM_DEVICE);
+}
+
 void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
                         struct efx_rx_buffer *rx_buf,
                         unsigned int num_bufs);
@@ -67,7 +76,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
 
 void
 efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
-                 unsigned int n_frags, u8 *eh);
+                 unsigned int n_frags, u8 *eh, __wsum csum);
 
 struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
 struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
@@ -89,6 +98,10 @@ struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
                                       const struct efx_filter_spec *spec,
                                       bool *new);
 void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec);
+
+int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+                  u16 rxq_index, u32 flow_id);
+bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota);
 #endif
 
 int efx_probe_filters(struct efx_nic *efx);
index 1ae3690..e71d6d3 100644 (file)
@@ -445,7 +445,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
                if (rc != NETDEV_TX_OK) {
                        netif_err(efx, drv, efx->net_dev,
                                  "TX queue %d could not transmit packet %d of "
-                                 "%d in %s loopback test\n", tx_queue->queue,
+                                 "%d in %s loopback test\n", tx_queue->label,
                                  i + 1, state->packet_count,
                                  LOOPBACK_MODE(efx));
 
@@ -497,7 +497,7 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue,
                netif_err(efx, drv, efx->net_dev,
                          "TX queue %d saw only %d out of an expected %d "
                          "TX completion events in %s loopback test\n",
-                         tx_queue->queue, tx_done, state->packet_count,
+                         tx_queue->label, tx_done, state->packet_count,
                          LOOPBACK_MODE(efx));
                rc = -ETIMEDOUT;
                /* Allow to fall through so we see the RX errors as well */
@@ -508,15 +508,15 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue,
                netif_dbg(efx, drv, efx->net_dev,
                          "TX queue %d saw only %d out of an expected %d "
                          "received packets in %s loopback test\n",
-                         tx_queue->queue, rx_good, state->packet_count,
+                         tx_queue->label, rx_good, state->packet_count,
                          LOOPBACK_MODE(efx));
                rc = -ETIMEDOUT;
                /* Fall through */
        }
 
        /* Update loopback test structure */
-       lb_tests->tx_sent[tx_queue->queue] += state->packet_count;
-       lb_tests->tx_done[tx_queue->queue] += tx_done;
+       lb_tests->tx_sent[tx_queue->label] += state->packet_count;
+       lb_tests->tx_done[tx_queue->label] += tx_done;
        lb_tests->rx_good += rx_good;
        lb_tests->rx_bad += rx_bad;
 
@@ -542,8 +542,8 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
                state->flush = false;
 
                netif_dbg(efx, drv, efx->net_dev,
-                         "TX queue %d testing %s loopback with %d packets\n",
-                         tx_queue->queue, LOOPBACK_MODE(efx),
+                         "TX queue %d (hw %d) testing %s loopback with %d packets\n",
+                         tx_queue->label, tx_queue->queue, LOOPBACK_MODE(efx),
                          state->packet_count);
 
                efx_iterate_state(efx);
@@ -570,7 +570,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
 
        netif_dbg(efx, drv, efx->net_dev,
                  "TX queue %d passed %s loopback test with a burst length "
-                 "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx),
+                 "of %d packets\n", tx_queue->label, LOOPBACK_MODE(efx),
                  state->packet_count);
 
        return 0;
@@ -660,7 +660,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
 
                /* Test all enabled types of TX queue */
                efx_for_each_channel_tx_queue(tx_queue, channel) {
-                       state->offload_csum = (tx_queue->queue &
+                       state->offload_csum = (tx_queue->label &
                                               EFX_TXQ_TYPE_OFFLOAD);
                        rc = efx_test_loopback(tx_queue,
                                               &tests->loopback[mode]);
index 891e9fb..219fb3a 100644 (file)
@@ -21,6 +21,7 @@
 #include "workarounds.h"
 #include "mcdi.h"
 #include "mcdi_pcol.h"
+#include "mcdi_port.h"
 #include "mcdi_port_common.h"
 #include "selftest.h"
 #include "siena_sriov.h"
@@ -276,7 +277,9 @@ static int siena_probe_nic(struct efx_nic *efx)
        }
 
        efx->max_channels = EFX_MAX_CHANNELS;
+       efx->max_vis = EFX_MAX_CHANNELS;
        efx->max_tx_channels = EFX_MAX_CHANNELS;
+       efx->tx_queues_per_channel = 4;
 
        efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
        efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
@@ -631,7 +634,7 @@ static size_t siena_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
        return SIENA_STAT_COUNT;
 }
 
-static int siena_mac_reconfigure(struct efx_nic *efx)
+static int siena_mac_reconfigure(struct efx_nic *efx, bool mtu_only __always_unused)
 {
        MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MCAST_HASH_IN_LEN);
        int rc;
@@ -1083,7 +1086,6 @@ const struct efx_nic_type siena_a0_nic_type = {
        .can_rx_scatter = true,
        .option_descriptors = false,
        .min_interrupt_mode = EFX_INT_MODE_LEGACY,
-       .max_interrupt_mode = EFX_INT_MODE_MSIX,
        .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH,
        .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                             NETIF_F_RXHASH | NETIF_F_NTUPLE),
index 19b5856..1bcf50a 100644 (file)
@@ -269,34 +269,6 @@ static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
 #endif /* EFX_USE_PIO */
 
 /*
- * Fallback to software TSO.
- *
- * This is used if we are unable to send a GSO packet through hardware TSO.
- * This should only ever happen due to per-queue restrictions - unsupported
- * packets should first be filtered by the feature flags.
- *
- * Returns 0 on success, error code otherwise.
- */
-static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
-                              struct sk_buff *skb)
-{
-       struct sk_buff *segments, *next;
-
-       segments = skb_gso_segment(skb, 0);
-       if (IS_ERR(segments))
-               return PTR_ERR(segments);
-
-       dev_consume_skb_any(skb);
-
-       skb_list_walk_safe(segments, skb, next) {
-               skb_mark_not_on_list(skb);
-               efx_enqueue_skb(tx_queue, skb);
-       }
-
-       return 0;
-}
-
-/*
  * Add a socket buffer to a TX queue
  *
  * This maps all fragments of a socket buffer for DMA and adds them to
@@ -579,8 +551,8 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
        /* Must be inverse of queue lookup in efx_hard_start_xmit() */
        tx_queue->core_txq =
                netdev_get_tx_queue(efx->net_dev,
-                                   tx_queue->queue / EFX_TXQ_TYPES +
-                                   ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
+                                   tx_queue->channel->channel +
+                                   ((tx_queue->label & EFX_TXQ_TYPE_HIGHPRI) ?
                                     efx->n_tx_channels : 0));
 }
 
@@ -589,14 +561,15 @@ int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
 {
        struct efx_nic *efx = netdev_priv(net_dev);
        struct tc_mqprio_qopt *mqprio = type_data;
-       struct efx_channel *channel;
-       struct efx_tx_queue *tx_queue;
        unsigned tc, num_tc;
-       int rc;
 
        if (type != TC_SETUP_QDISC_MQPRIO)
                return -EOPNOTSUPP;
 
+       /* Only Siena supported highpri queues */
+       if (efx_nic_rev(efx) > EFX_REV_SIENA_A0)
+               return -EOPNOTSUPP;
+
        num_tc = mqprio->num_tc;
 
        if (num_tc > EFX_MAX_TX_TC)
@@ -612,40 +585,9 @@ int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
                net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
        }
 
-       if (num_tc > net_dev->num_tc) {
-               /* Initialise high-priority queues as necessary */
-               efx_for_each_channel(channel, efx) {
-                       efx_for_each_possible_channel_tx_queue(tx_queue,
-                                                              channel) {
-                               if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
-                                       continue;
-                               if (!tx_queue->buffer) {
-                                       rc = efx_probe_tx_queue(tx_queue);
-                                       if (rc)
-                                               return rc;
-                               }
-                               if (!tx_queue->initialised)
-                                       efx_init_tx_queue(tx_queue);
-                               efx_init_tx_queue_core_txq(tx_queue);
-                       }
-               }
-       } else {
-               /* Reduce number of classes before number of queues */
-               net_dev->num_tc = num_tc;
-       }
-
-       rc = netif_set_real_num_tx_queues(net_dev,
-                                         max_t(int, num_tc, 1) *
-                                         efx->n_tx_channels);
-       if (rc)
-               return rc;
-
-       /* Do not destroy high-priority queues when they become
-        * unused.  We would have to flush them first, and it is
-        * fairly difficult to flush a subset of TX queues.  Leave
-        * it to efx_fini_channels().
-        */
-
        net_dev->num_tc = num_tc;
-       return 0;
+
+       return netif_set_real_num_tx_queues(net_dev,
+                                           max_t(int, num_tc, 1) *
+                                           efx->n_tx_channels);
 }
index e04d5dd..a3cf06c 100644 (file)
@@ -18,7 +18,4 @@ unsigned int efx_tx_limit_len(struct efx_tx_queue *tx_queue,
 u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
                                   struct efx_tx_buffer *buffer, size_t len);
 
-int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
-                       bool *data_mapped);
-
 #endif /* EFX_TX_H */
index 70876df..11b64c6 100644 (file)
@@ -10,7 +10,7 @@
 
 #include "net_driver.h"
 #include "efx.h"
-#include "nic.h"
+#include "nic_common.h"
 #include "tx_common.h"
 
 static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
@@ -298,7 +298,11 @@ struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
        /* Map the fragment taking account of NIC-dependent DMA limits. */
        do {
                buffer = efx_tx_queue_get_insert_buffer(tx_queue);
-               dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
+
+               if (nic_type->tx_limit_len)
+                       dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
+               else
+                       dma_len = len;
 
                buffer->len = dma_len;
                buffer->dma_addr = dma_addr;
@@ -311,6 +315,20 @@ struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
        return buffer;
 }
 
+int efx_tx_tso_header_length(struct sk_buff *skb)
+{
+       size_t header_len;
+
+       if (skb->encapsulation)
+               header_len = skb_inner_transport_header(skb) -
+                               skb->data +
+                               (inner_tcp_hdr(skb)->doff << 2u);
+       else
+               header_len = skb_transport_header(skb) - skb->data +
+                               (tcp_hdr(skb)->doff << 2u);
+       return header_len;
+}
+
 /* Map all data from an SKB for DMA and create descriptors on the queue. */
 int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
                    unsigned int segment_count)
@@ -339,8 +357,7 @@ int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
                /* For TSO we need to put the header in to a separate
                 * descriptor. Map this separately if necessary.
                 */
-               size_t header_len = skb_transport_header(skb) - skb->data +
-                               (tcp_hdr(skb)->doff << 2u);
+               size_t header_len = efx_tx_tso_header_length(skb);
 
                if (header_len != len) {
                        tx_queue->tso_long_headers++;
@@ -405,3 +422,30 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
 
        return max_descs;
 }
+
+/*
+ * Fallback to software TSO.
+ *
+ * This is used if we are unable to send a GSO packet through hardware TSO.
+ * This should only ever happen due to per-queue restrictions - unsupported
+ * packets should first be filtered by the feature flags.
+ *
+ * Returns 0 on success, error code otherwise.
+ */
+int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+{
+       struct sk_buff *segments, *next;
+
+       segments = skb_gso_segment(skb, 0);
+       if (IS_ERR(segments))
+               return PTR_ERR(segments);
+
+       dev_consume_skb_any(skb);
+
+       skb_list_walk_safe(segments, skb, next) {
+               skb_mark_not_on_list(skb);
+               efx_enqueue_skb(tx_queue, skb);
+       }
+
+       return 0;
+}
index 99cf7ce..cbe995b 100644 (file)
@@ -34,9 +34,10 @@ void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
 
 struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
                                       dma_addr_t dma_addr, size_t len);
+int efx_tx_tso_header_length(struct sk_buff *skb);
 int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
                    unsigned int segment_count);
 
 unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
-
+int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
 #endif
index 5a4b6e3..676b193 100644 (file)
@@ -494,9 +494,9 @@ static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
        skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
        if (unlikely(!skb))
                goto skb_alloc_failed;
-       mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
-                       PCI_DMA_FROMDEVICE);
-       if (pci_dma_mapping_error(tp->pci_dev, mapping))
+       mapping = dma_map_single(&tp->pci_dev->dev, skb->data, tp->rx_buf_sz,
+                                DMA_FROM_DEVICE);
+       if (dma_mapping_error(&tp->pci_dev->dev, mapping))
                goto out;
        sis190_map_to_asic(desc, mapping, rx_buf_sz);
 
@@ -542,8 +542,8 @@ static bool sis190_try_rx_copy(struct sis190_private *tp,
        if (!skb)
                goto out;
 
-       pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
-                               PCI_DMA_FROMDEVICE);
+       dma_sync_single_for_cpu(&tp->pci_dev->dev, addr, tp->rx_buf_sz,
+                               DMA_FROM_DEVICE);
        skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
        *sk_buff = skb;
        done = true;
@@ -612,12 +612,14 @@ static int sis190_rx_interrupt(struct net_device *dev,
 
 
                        if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) {
-                               pci_dma_sync_single_for_device(pdev, addr,
-                                       tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+                               dma_sync_single_for_device(&pdev->dev, addr,
+                                                          tp->rx_buf_sz,
+                                                          DMA_FROM_DEVICE);
                                sis190_give_to_asic(desc, tp->rx_buf_sz);
                        } else {
-                               pci_unmap_single(pdev, addr, tp->rx_buf_sz,
-                                                PCI_DMA_FROMDEVICE);
+                               dma_unmap_single(&pdev->dev, addr,
+                                                tp->rx_buf_sz,
+                                                DMA_FROM_DEVICE);
                                tp->Rx_skbuff[entry] = NULL;
                                sis190_make_unusable_by_asic(desc);
                        }
@@ -654,7 +656,8 @@ static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
 
        len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
 
-       pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
+       dma_unmap_single(&pdev->dev, le32_to_cpu(desc->addr), len,
+                        DMA_TO_DEVICE);
 
        memset(desc, 0x00, sizeof(*desc));
 }
@@ -785,8 +788,8 @@ static void sis190_free_rx_skb(struct sis190_private *tp,
 {
        struct pci_dev *pdev = tp->pci_dev;
 
-       pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
-                        PCI_DMA_FROMDEVICE);
+       dma_unmap_single(&pdev->dev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
+                        DMA_FROM_DEVICE);
        dev_kfree_skb(*sk_buff);
        *sk_buff = NULL;
        sis190_make_unusable_by_asic(desc);
@@ -1069,11 +1072,13 @@ static int sis190_open(struct net_device *dev)
         * Rx and Tx descriptors need 256 bytes alignment.
         * pci_alloc_consistent() guarantees a stronger alignment.
         */
-       tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
+       tp->TxDescRing = dma_alloc_coherent(&pdev->dev, TX_RING_BYTES,
+                                           &tp->tx_dma, GFP_KERNEL);
        if (!tp->TxDescRing)
                goto out;
 
-       tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
+       tp->RxDescRing = dma_alloc_coherent(&pdev->dev, RX_RING_BYTES,
+                                           &tp->rx_dma, GFP_KERNEL);
        if (!tp->RxDescRing)
                goto err_free_tx_0;
 
@@ -1095,9 +1100,11 @@ err_release_timer_2:
        sis190_delete_timer(dev);
        sis190_rx_clear(tp);
 err_free_rx_1:
-       pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
+       dma_free_coherent(&pdev->dev, RX_RING_BYTES, tp->RxDescRing,
+                         tp->rx_dma);
 err_free_tx_0:
-       pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
+       dma_free_coherent(&pdev->dev, TX_RING_BYTES, tp->TxDescRing,
+                         tp->tx_dma);
        goto out;
 }
 
@@ -1159,8 +1166,10 @@ static int sis190_close(struct net_device *dev)
 
        free_irq(pdev->irq, dev);
 
-       pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
-       pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
+       dma_free_coherent(&pdev->dev, TX_RING_BYTES, tp->TxDescRing,
+                         tp->tx_dma);
+       dma_free_coherent(&pdev->dev, RX_RING_BYTES, tp->RxDescRing,
+                         tp->rx_dma);
 
        tp->TxDescRing = NULL;
        tp->RxDescRing = NULL;
@@ -1197,8 +1206,9 @@ static netdev_tx_t sis190_start_xmit(struct sk_buff *skb,
                return NETDEV_TX_BUSY;
        }
 
-       mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
-       if (pci_dma_mapping_error(tp->pci_dev, mapping)) {
+       mapping = dma_map_single(&tp->pci_dev->dev, skb->data, len,
+                                DMA_TO_DEVICE);
+       if (dma_mapping_error(&tp->pci_dev->dev, mapping)) {
                netif_err(tp, tx_err, dev,
                                "PCI mapping failed, dropping packet");
                return NETDEV_TX_BUSY;
@@ -1498,7 +1508,7 @@ static struct net_device *sis190_init_board(struct pci_dev *pdev)
                goto err_pci_disable_2;
        }
 
-       rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+       rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
        if (rc < 0) {
                if (netif_msg_probe(tp))
                        pr_err("%s: DMA configuration failed\n",
index 81ed758..82e020a 100644 (file)
@@ -446,7 +446,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
        ret = pci_enable_device(pci_dev);
        if(ret) return ret;
 
-       i = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+       i = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
        if(i){
                printk(KERN_ERR "sis900.c: architecture does not support "
                        "32bit PCI busmaster DMA\n");
@@ -481,7 +481,8 @@ static int sis900_probe(struct pci_dev *pci_dev,
 
        pci_set_drvdata(pci_dev, net_dev);
 
-       ring_space = pci_alloc_consistent(pci_dev, TX_TOTAL_SIZE, &ring_dma);
+       ring_space = dma_alloc_coherent(&pci_dev->dev, TX_TOTAL_SIZE,
+                                       &ring_dma, GFP_KERNEL);
        if (!ring_space) {
                ret = -ENOMEM;
                goto err_out_unmap;
@@ -489,7 +490,8 @@ static int sis900_probe(struct pci_dev *pci_dev,
        sis_priv->tx_ring = ring_space;
        sis_priv->tx_ring_dma = ring_dma;
 
-       ring_space = pci_alloc_consistent(pci_dev, RX_TOTAL_SIZE, &ring_dma);
+       ring_space = dma_alloc_coherent(&pci_dev->dev, RX_TOTAL_SIZE,
+                                       &ring_dma, GFP_KERNEL);
        if (!ring_space) {
                ret = -ENOMEM;
                goto err_unmap_tx;
@@ -572,11 +574,11 @@ static int sis900_probe(struct pci_dev *pci_dev,
        return 0;
 
 err_unmap_rx:
-       pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring,
-               sis_priv->rx_ring_dma);
+       dma_free_coherent(&pci_dev->dev, RX_TOTAL_SIZE, sis_priv->rx_ring,
+                         sis_priv->rx_ring_dma);
 err_unmap_tx:
-       pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
-               sis_priv->tx_ring_dma);
+       dma_free_coherent(&pci_dev->dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
+                         sis_priv->tx_ring_dma);
 err_out_unmap:
        pci_iounmap(pci_dev, ioaddr);
 err_out_cleardev:
@@ -1188,10 +1190,12 @@ sis900_init_rx_ring(struct net_device *net_dev)
                }
                sis_priv->rx_skbuff[i] = skb;
                sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE;
-               sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev,
-                               skb->data, RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
-               if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
-                               sis_priv->rx_ring[i].bufptr))) {
+               sis_priv->rx_ring[i].bufptr = dma_map_single(&sis_priv->pci_dev->dev,
+                                                            skb->data,
+                                                            RX_BUF_SIZE,
+                                                            DMA_FROM_DEVICE);
+               if (unlikely(dma_mapping_error(&sis_priv->pci_dev->dev,
+                                              sis_priv->rx_ring[i].bufptr))) {
                        dev_kfree_skb(skb);
                        sis_priv->rx_skbuff[i] = NULL;
                        break;
@@ -1561,9 +1565,9 @@ static void sis900_tx_timeout(struct net_device *net_dev, unsigned int txqueue)
                struct sk_buff *skb = sis_priv->tx_skbuff[i];
 
                if (skb) {
-                       pci_unmap_single(sis_priv->pci_dev,
-                               sis_priv->tx_ring[i].bufptr, skb->len,
-                               PCI_DMA_TODEVICE);
+                       dma_unmap_single(&sis_priv->pci_dev->dev,
+                                        sis_priv->tx_ring[i].bufptr,
+                                        skb->len, DMA_TO_DEVICE);
                        dev_kfree_skb_irq(skb);
                        sis_priv->tx_skbuff[i] = NULL;
                        sis_priv->tx_ring[i].cmdsts = 0;
@@ -1612,10 +1616,11 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
        sis_priv->tx_skbuff[entry] = skb;
 
        /* set the transmit buffer descriptor and enable Transmit State Machine */
-       sis_priv->tx_ring[entry].bufptr = pci_map_single(sis_priv->pci_dev,
-               skb->data, skb->len, PCI_DMA_TODEVICE);
-       if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
-               sis_priv->tx_ring[entry].bufptr))) {
+       sis_priv->tx_ring[entry].bufptr = dma_map_single(&sis_priv->pci_dev->dev,
+                                                        skb->data, skb->len,
+                                                        DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(&sis_priv->pci_dev->dev,
+                                      sis_priv->tx_ring[entry].bufptr))) {
                        dev_kfree_skb_any(skb);
                        sis_priv->tx_skbuff[entry] = NULL;
                        net_dev->stats.tx_dropped++;
@@ -1778,9 +1783,9 @@ static int sis900_rx(struct net_device *net_dev)
                        struct sk_buff * skb;
                        struct sk_buff * rx_skb;
 
-                       pci_unmap_single(sis_priv->pci_dev,
-                               sis_priv->rx_ring[entry].bufptr, RX_BUF_SIZE,
-                               PCI_DMA_FROMDEVICE);
+                       dma_unmap_single(&sis_priv->pci_dev->dev,
+                                        sis_priv->rx_ring[entry].bufptr,
+                                        RX_BUF_SIZE, DMA_FROM_DEVICE);
 
                        /* refill the Rx buffer, what if there is not enough
                         * memory for new socket buffer ?? */
@@ -1826,10 +1831,11 @@ refill_rx_ring:
                        sis_priv->rx_skbuff[entry] = skb;
                        sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
                        sis_priv->rx_ring[entry].bufptr =
-                               pci_map_single(sis_priv->pci_dev, skb->data,
-                                       RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
-                       if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
-                               sis_priv->rx_ring[entry].bufptr))) {
+                               dma_map_single(&sis_priv->pci_dev->dev,
+                                              skb->data, RX_BUF_SIZE,
+                                              DMA_FROM_DEVICE);
+                       if (unlikely(dma_mapping_error(&sis_priv->pci_dev->dev,
+                                                      sis_priv->rx_ring[entry].bufptr))) {
                                dev_kfree_skb_irq(skb);
                                sis_priv->rx_skbuff[entry] = NULL;
                                break;
@@ -1860,10 +1866,11 @@ refill_rx_ring:
                        sis_priv->rx_skbuff[entry] = skb;
                        sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
                        sis_priv->rx_ring[entry].bufptr =
-                               pci_map_single(sis_priv->pci_dev, skb->data,
-                                       RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
-                       if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
-                                       sis_priv->rx_ring[entry].bufptr))) {
+                               dma_map_single(&sis_priv->pci_dev->dev,
+                                              skb->data, RX_BUF_SIZE,
+                                              DMA_FROM_DEVICE);
+                       if (unlikely(dma_mapping_error(&sis_priv->pci_dev->dev,
+                                                      sis_priv->rx_ring[entry].bufptr))) {
                                dev_kfree_skb_irq(skb);
                                sis_priv->rx_skbuff[entry] = NULL;
                                break;
@@ -1928,9 +1935,9 @@ static void sis900_finish_xmit (struct net_device *net_dev)
                }
                /* Free the original skb. */
                skb = sis_priv->tx_skbuff[entry];
-               pci_unmap_single(sis_priv->pci_dev,
-                       sis_priv->tx_ring[entry].bufptr, skb->len,
-                       PCI_DMA_TODEVICE);
+               dma_unmap_single(&sis_priv->pci_dev->dev,
+                                sis_priv->tx_ring[entry].bufptr, skb->len,
+                                DMA_TO_DEVICE);
                dev_consume_skb_irq(skb);
                sis_priv->tx_skbuff[entry] = NULL;
                sis_priv->tx_ring[entry].bufptr = 0;
@@ -1979,8 +1986,9 @@ static int sis900_close(struct net_device *net_dev)
        for (i = 0; i < NUM_RX_DESC; i++) {
                skb = sis_priv->rx_skbuff[i];
                if (skb) {
-                       pci_unmap_single(pdev, sis_priv->rx_ring[i].bufptr,
-                                        RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+                       dma_unmap_single(&pdev->dev,
+                                        sis_priv->rx_ring[i].bufptr,
+                                        RX_BUF_SIZE, DMA_FROM_DEVICE);
                        dev_kfree_skb(skb);
                        sis_priv->rx_skbuff[i] = NULL;
                }
@@ -1988,8 +1996,9 @@ static int sis900_close(struct net_device *net_dev)
        for (i = 0; i < NUM_TX_DESC; i++) {
                skb = sis_priv->tx_skbuff[i];
                if (skb) {
-                       pci_unmap_single(pdev, sis_priv->tx_ring[i].bufptr,
-                                        skb->len, PCI_DMA_TODEVICE);
+                       dma_unmap_single(&pdev->dev,
+                                        sis_priv->tx_ring[i].bufptr,
+                                        skb->len, DMA_TO_DEVICE);
                        dev_kfree_skb(skb);
                        sis_priv->tx_skbuff[i] = NULL;
                }
@@ -2484,10 +2493,10 @@ static void sis900_remove(struct pci_dev *pci_dev)
                kfree(phy);
        }
 
-       pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring,
-               sis_priv->rx_ring_dma);
-       pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
-               sis_priv->tx_ring_dma);
+       dma_free_coherent(&pci_dev->dev, RX_TOTAL_SIZE, sis_priv->rx_ring,
+                         sis_priv->rx_ring_dma);
+       dma_free_coherent(&pci_dev->dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
+                         sis_priv->tx_ring_dma);
        pci_iounmap(pci_dev, sis_priv->ioaddr);
        free_netdev(net_dev);
        pci_release_regions(pci_dev);
index 6293b1e..b77e427 100644 (file)
@@ -28,7 +28,7 @@ config SMC9194
          option if you have a DELL laptop with the docking station, or
          another SMC9192/9194 based chipset.  Say Y if you want it compiled
          into the kernel, and read the file
-         <file:Documentation/networking/device_drivers/smsc/smc9.rst>.
+         <file:Documentation/networking/device_drivers/ethernet/smsc/smc9.rst>.
 
          To compile this driver as a module, choose M here. The module
          will be called smc9194.
@@ -44,7 +44,7 @@ config SMC91X
          This is a driver for SMC's 91x series of Ethernet chipsets,
          including the SMC91C94 and the SMC91C111. Say Y if you want it
          compiled into the kernel, and read the file
-         <file:Documentation/networking/device_drivers/smsc/smc9.rst>.
+         <file:Documentation/networking/device_drivers/ethernet/smsc/smc9.rst>.
 
          This driver is also available as a module ( = code which can be
          inserted in and removed from the running kernel whenever you want).
index 61ddee0..d950b31 100644 (file)
@@ -1512,12 +1512,9 @@ static void epic_remove_one(struct pci_dev *pdev)
        /* pci_power_off(pdev, -1); */
 }
 
-
-#ifdef CONFIG_PM
-
-static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused epic_suspend(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
        struct epic_private *ep = netdev_priv(dev);
        void __iomem *ioaddr = ep->ioaddr;
 
@@ -1531,9 +1528,9 @@ static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
 }
 
 
-static int epic_resume (struct pci_dev *pdev)
+static int __maybe_unused epic_resume(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
 
        if (!netif_running(dev))
                return 0;
@@ -1542,18 +1539,14 @@ static int epic_resume (struct pci_dev *pdev)
        return 0;
 }
 
-#endif /* CONFIG_PM */
-
+static SIMPLE_DEV_PM_OPS(epic_pm_ops, epic_suspend, epic_resume);
 
 static struct pci_driver epic_driver = {
        .name           = DRV_NAME,
        .id_table       = epic_pci_tbl,
        .probe          = epic_init_one,
        .remove         = epic_remove_one,
-#ifdef CONFIG_PM
-       .suspend        = epic_suspend,
-       .resume         = epic_resume,
-#endif /* CONFIG_PM */
+       .driver.pm      = &epic_pm_ops,
 };
 
 
index 7312e52..42bef04 100644 (file)
@@ -1422,11 +1422,9 @@ out_0:
        return result;
 }
 
-#ifdef CONFIG_PM
-
-static int smsc9420_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused smsc9420_suspend(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
        struct smsc9420_pdata *pd = netdev_priv(dev);
        u32 int_cfg;
        ulong flags;
@@ -1451,34 +1449,21 @@ static int smsc9420_suspend(struct pci_dev *pdev, pm_message_t state)
                netif_device_detach(dev);
        }
 
-       pci_save_state(pdev);
-       pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
-       pci_disable_device(pdev);
-       pci_set_power_state(pdev, pci_choose_state(pdev, state));
+       device_wakeup_disable(dev_d);
 
        return 0;
 }
 
-static int smsc9420_resume(struct pci_dev *pdev)
+static int __maybe_unused smsc9420_resume(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
-       struct smsc9420_pdata *pd = netdev_priv(dev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
        int err;
 
-       pci_set_power_state(pdev, PCI_D0);
-       pci_restore_state(pdev);
-
-       err = pci_enable_device(pdev);
-       if (err)
-               return err;
+       pci_set_master(to_pci_dev(dev_d));
 
-       pci_set_master(pdev);
-
-       err = pci_enable_wake(pdev, PCI_D0, 0);
-       if (err)
-               netif_warn(pd, ifup, pd->dev, "pci_enable_wake failed: %d\n",
-                          err);
+       device_wakeup_disable(dev_d);
 
+       err = 0;
        if (netif_running(dev)) {
                /* FIXME: gross. It looks like ancient PM relic.*/
                err = smsc9420_open(dev);
@@ -1487,8 +1472,6 @@ static int smsc9420_resume(struct pci_dev *pdev)
        return err;
 }
 
-#endif /* CONFIG_PM */
-
 static const struct net_device_ops smsc9420_netdev_ops = {
        .ndo_open               = smsc9420_open,
        .ndo_stop               = smsc9420_stop,
@@ -1658,15 +1641,14 @@ static void smsc9420_remove(struct pci_dev *pdev)
        pci_disable_device(pdev);
 }
 
+static SIMPLE_DEV_PM_OPS(smsc9420_pm_ops, smsc9420_suspend, smsc9420_resume);
+
 static struct pci_driver smsc9420_driver = {
        .name = DRV_NAME,
        .id_table = smsc9420_id_table,
        .probe = smsc9420_probe,
        .remove = smsc9420_remove,
-#ifdef CONFIG_PM
-       .suspend = smsc9420_suspend,
-       .resume = smsc9420_resume,
-#endif /* CONFIG_PM */
+       .driver.pm = &smsc9420_pm_ops,
 };
 
 static int __init smsc9420_init_module(void)
index 328bc38..0f366cc 100644 (file)
@@ -1044,8 +1044,9 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
 
 next:
-               if ((skb && napi_gro_receive(&priv->napi, skb) != GRO_DROP) ||
-                   xdp_result) {
+               if (skb)
+                       napi_gro_receive(&priv->napi, skb);
+               if (skb || xdp_result) {
                        ndev->stats.rx_packets++;
                        ndev->stats.rx_bytes += xdp.data_end - xdp.data;
                }
index 234e8b6..5afcf05 100644 (file)
@@ -69,8 +69,6 @@
  */
 #define PRG_ETH0_ADJ_SKEW              GENMASK(24, 20)
 
-#define MUX_CLK_NUM_PARENTS            2
-
 struct meson8b_dwmac;
 
 struct meson8b_dwmac_data {
@@ -110,12 +108,12 @@ static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg,
 
 static struct clk *meson8b_dwmac_register_clk(struct meson8b_dwmac *dwmac,
                                              const char *name_suffix,
-                                             const char **parent_names,
+                                             const struct clk_parent_data *parents,
                                              int num_parents,
                                              const struct clk_ops *ops,
                                              struct clk_hw *hw)
 {
-       struct clk_init_data init;
+       struct clk_init_data init = { };
        char clk_name[32];
 
        snprintf(clk_name, sizeof(clk_name), "%s#%s", dev_name(dwmac->dev),
@@ -124,7 +122,7 @@ static struct clk *meson8b_dwmac_register_clk(struct meson8b_dwmac *dwmac,
        init.name = clk_name;
        init.ops = ops;
        init.flags = CLK_SET_RATE_PARENT;
-       init.parent_names = parent_names;
+       init.parent_data = parents;
        init.num_parents = num_parents;
 
        hw->init = &init;
@@ -134,11 +132,12 @@ static struct clk *meson8b_dwmac_register_clk(struct meson8b_dwmac *dwmac,
 
 static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
 {
-       int i, ret;
        struct clk *clk;
        struct device *dev = dwmac->dev;
-       const char *parent_name, *mux_parent_names[MUX_CLK_NUM_PARENTS];
-       struct meson8b_dwmac_clk_configs *clk_configs;
+       static const struct clk_parent_data mux_parents[] = {
+               { .fw_name = "clkin0", },
+               { .fw_name = "clkin1", },
+       };
        static const struct clk_div_table div_table[] = {
                { .div = 2, .val = 2, },
                { .div = 3, .val = 3, },
@@ -148,62 +147,48 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
                { .div = 7, .val = 7, },
                { /* end of array */ }
        };
+       struct meson8b_dwmac_clk_configs *clk_configs;
+       struct clk_parent_data parent_data = { };
 
        clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL);
        if (!clk_configs)
                return -ENOMEM;
 
-       /* get the mux parents from DT */
-       for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) {
-               char name[16];
-
-               snprintf(name, sizeof(name), "clkin%d", i);
-               clk = devm_clk_get(dev, name);
-               if (IS_ERR(clk)) {
-                       ret = PTR_ERR(clk);
-                       if (ret != -EPROBE_DEFER)
-                               dev_err(dev, "Missing clock %s\n", name);
-                       return ret;
-               }
-
-               mux_parent_names[i] = __clk_get_name(clk);
-       }
-
        clk_configs->m250_mux.reg = dwmac->regs + PRG_ETH0;
        clk_configs->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT;
        clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK;
-       clk = meson8b_dwmac_register_clk(dwmac, "m250_sel", mux_parent_names,
-                                        MUX_CLK_NUM_PARENTS, &clk_mux_ops,
+       clk = meson8b_dwmac_register_clk(dwmac, "m250_sel", mux_parents,
+                                        ARRAY_SIZE(mux_parents), &clk_mux_ops,
                                         &clk_configs->m250_mux.hw);
        if (WARN_ON(IS_ERR(clk)))
                return PTR_ERR(clk);
 
-       parent_name = __clk_get_name(clk);
+       parent_data.hw = &clk_configs->m250_mux.hw;
        clk_configs->m250_div.reg = dwmac->regs + PRG_ETH0;
        clk_configs->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT;
        clk_configs->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH;
        clk_configs->m250_div.table = div_table;
        clk_configs->m250_div.flags = CLK_DIVIDER_ALLOW_ZERO |
                                      CLK_DIVIDER_ROUND_CLOSEST;
-       clk = meson8b_dwmac_register_clk(dwmac, "m250_div", &parent_name, 1,
+       clk = meson8b_dwmac_register_clk(dwmac, "m250_div", &parent_data, 1,
                                         &clk_divider_ops,
                                         &clk_configs->m250_div.hw);
        if (WARN_ON(IS_ERR(clk)))
                return PTR_ERR(clk);
 
-       parent_name = __clk_get_name(clk);
+       parent_data.hw = &clk_configs->m250_div.hw;
        clk_configs->fixed_div2.mult = 1;
        clk_configs->fixed_div2.div = 2;
-       clk = meson8b_dwmac_register_clk(dwmac, "fixed_div2", &parent_name, 1,
+       clk = meson8b_dwmac_register_clk(dwmac, "fixed_div2", &parent_data, 1,
                                         &clk_fixed_factor_ops,
                                         &clk_configs->fixed_div2.hw);
        if (WARN_ON(IS_ERR(clk)))
                return PTR_ERR(clk);
 
-       parent_name = __clk_get_name(clk);
+       parent_data.hw = &clk_configs->fixed_div2.hw;
        clk_configs->rgmii_tx_en.reg = dwmac->regs + PRG_ETH0;
        clk_configs->rgmii_tx_en.bit_idx = PRG_ETH0_RGMII_TX_CLK_EN;
-       clk = meson8b_dwmac_register_clk(dwmac, "rgmii_tx_en", &parent_name, 1,
+       clk = meson8b_dwmac_register_clk(dwmac, "rgmii_tx_en", &parent_data, 1,
                                         &clk_gate_ops,
                                         &clk_configs->rgmii_tx_en.hw);
        if (WARN_ON(IS_ERR(clk)))
@@ -491,6 +476,10 @@ static const struct of_device_id meson8b_dwmac_match[] = {
                .compatible = "amlogic,meson-axg-dwmac",
                .data = &meson_axg_dwmac_data,
        },
+       {
+               .compatible = "amlogic,meson-g12a-dwmac",
+               .data = &meson_axg_dwmac_data,
+       },
        { }
 };
 MODULE_DEVICE_TABLE(of, meson8b_dwmac_match);
index e669649..e113b13 100644 (file)
@@ -1094,7 +1094,7 @@ static int stmmac_test_rxp(struct stmmac_priv *priv)
        if (!priv->dma_cap.frpsel)
                return -EOPNOTSUPP;
 
-       sel = kzalloc(sizeof(*sel) + nk * sizeof(struct tc_u32_key), GFP_KERNEL);
+       sel = kzalloc(struct_size(sel, keys, nk), GFP_KERNEL);
        if (!sel)
                return -ENOMEM;
 
index debd3c3..4536e97 100644 (file)
@@ -443,8 +443,8 @@ static void cas_phy_powerdown(struct cas *cp)
 /* cp->lock held. note: the last put_page will free the buffer */
 static int cas_page_free(struct cas *cp, cas_page_t *page)
 {
-       pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
-                      PCI_DMA_FROMDEVICE);
+       dma_unmap_page(&cp->pdev->dev, page->dma_addr, cp->page_size,
+                      DMA_FROM_DEVICE);
        __free_pages(page->buffer, cp->page_order);
        kfree(page);
        return 0;
@@ -474,8 +474,8 @@ static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
        page->buffer = alloc_pages(flags, cp->page_order);
        if (!page->buffer)
                goto page_err;
-       page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
-                                     cp->page_size, PCI_DMA_FROMDEVICE);
+       page->dma_addr = dma_map_page(&cp->pdev->dev, page->buffer, 0,
+                                     cp->page_size, DMA_FROM_DEVICE);
        return page;
 
 page_err:
@@ -1863,8 +1863,8 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
                        daddr = le64_to_cpu(txd->buffer);
                        dlen = CAS_VAL(TX_DESC_BUFLEN,
                                       le64_to_cpu(txd->control));
-                       pci_unmap_page(cp->pdev, daddr, dlen,
-                                      PCI_DMA_TODEVICE);
+                       dma_unmap_page(&cp->pdev->dev, daddr, dlen,
+                                      DMA_TO_DEVICE);
                        entry = TX_DESC_NEXT(ring, entry);
 
                        /* tiny buffer may follow */
@@ -1957,12 +1957,13 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
                i = hlen;
                if (!dlen) /* attach FCS */
                        i += cp->crc_size;
-               pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
-                                   PCI_DMA_FROMDEVICE);
+               dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
+                                       i, DMA_FROM_DEVICE);
                addr = cas_page_map(page->buffer);
                memcpy(p, addr + off, i);
-               pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
-                                   PCI_DMA_FROMDEVICE);
+               dma_sync_single_for_device(&cp->pdev->dev,
+                                          page->dma_addr + off, i,
+                                          DMA_FROM_DEVICE);
                cas_page_unmap(addr);
                RX_USED_ADD(page, 0x100);
                p += hlen;
@@ -1988,16 +1989,17 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
                i = hlen;
                if (i == dlen)  /* attach FCS */
                        i += cp->crc_size;
-               pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
-                                   PCI_DMA_FROMDEVICE);
+               dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
+                                       i, DMA_FROM_DEVICE);
 
                /* make sure we always copy a header */
                swivel = 0;
                if (p == (char *) skb->data) { /* not split */
                        addr = cas_page_map(page->buffer);
                        memcpy(p, addr + off, RX_COPY_MIN);
-                       pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
-                                       PCI_DMA_FROMDEVICE);
+                       dma_sync_single_for_device(&cp->pdev->dev,
+                                                  page->dma_addr + off, i,
+                                                  DMA_FROM_DEVICE);
                        cas_page_unmap(addr);
                        off += RX_COPY_MIN;
                        swivel = RX_COPY_MIN;
@@ -2024,12 +2026,14 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
 
                        i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
                        page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
-                       pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
-                                           hlen + cp->crc_size,
-                                           PCI_DMA_FROMDEVICE);
-                       pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
-                                           hlen + cp->crc_size,
-                                           PCI_DMA_FROMDEVICE);
+                       dma_sync_single_for_cpu(&cp->pdev->dev,
+                                               page->dma_addr,
+                                               hlen + cp->crc_size,
+                                               DMA_FROM_DEVICE);
+                       dma_sync_single_for_device(&cp->pdev->dev,
+                                                  page->dma_addr,
+                                                  hlen + cp->crc_size,
+                                                  DMA_FROM_DEVICE);
 
                        skb_shinfo(skb)->nr_frags++;
                        skb->data_len += hlen;
@@ -2066,12 +2070,13 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
                i = hlen;
                if (i == dlen) /* attach FCS */
                        i += cp->crc_size;
-               pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i,
-                                   PCI_DMA_FROMDEVICE);
+               dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off,
+                                       i, DMA_FROM_DEVICE);
                addr = cas_page_map(page->buffer);
                memcpy(p, addr + off, i);
-               pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i,
-                                   PCI_DMA_FROMDEVICE);
+               dma_sync_single_for_device(&cp->pdev->dev,
+                                          page->dma_addr + off, i,
+                                          DMA_FROM_DEVICE);
                cas_page_unmap(addr);
                if (p == (char *) skb->data) /* not split */
                        RX_USED_ADD(page, cp->mtu_stride);
@@ -2083,14 +2088,16 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
                        p += hlen;
                        i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]);
                        page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)];
-                       pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr,
-                                           dlen + cp->crc_size,
-                                           PCI_DMA_FROMDEVICE);
+                       dma_sync_single_for_cpu(&cp->pdev->dev,
+                                               page->dma_addr,
+                                               dlen + cp->crc_size,
+                                               DMA_FROM_DEVICE);
                        addr = cas_page_map(page->buffer);
                        memcpy(p, addr, dlen + cp->crc_size);
-                       pci_dma_sync_single_for_device(cp->pdev, page->dma_addr,
-                                           dlen + cp->crc_size,
-                                           PCI_DMA_FROMDEVICE);
+                       dma_sync_single_for_device(&cp->pdev->dev,
+                                                  page->dma_addr,
+                                                  dlen + cp->crc_size,
+                                                  DMA_FROM_DEVICE);
                        cas_page_unmap(addr);
                        RX_USED_ADD(page, dlen + cp->crc_size);
                }
@@ -2766,9 +2773,8 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
 
        nr_frags = skb_shinfo(skb)->nr_frags;
        len = skb_headlen(skb);
-       mapping = pci_map_page(cp->pdev, virt_to_page(skb->data),
-                              offset_in_page(skb->data), len,
-                              PCI_DMA_TODEVICE);
+       mapping = dma_map_page(&cp->pdev->dev, virt_to_page(skb->data),
+                              offset_in_page(skb->data), len, DMA_TO_DEVICE);
 
        tentry = entry;
        tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len);
@@ -3882,8 +3888,8 @@ static void cas_clean_txd(struct cas *cp, int ring)
                        daddr = le64_to_cpu(txd[ent].buffer);
                        dlen  =  CAS_VAL(TX_DESC_BUFLEN,
                                         le64_to_cpu(txd[ent].control));
-                       pci_unmap_page(cp->pdev, daddr, dlen,
-                                      PCI_DMA_TODEVICE);
+                       dma_unmap_page(&cp->pdev->dev, daddr, dlen,
+                                      DMA_TO_DEVICE);
 
                        if (frag != skb_shinfo(skb)->nr_frags) {
                                i++;
@@ -4181,9 +4187,8 @@ static void cas_tx_tiny_free(struct cas *cp)
                if (!cp->tx_tiny_bufs[i])
                        continue;
 
-               pci_free_consistent(pdev, TX_TINY_BUF_BLOCK,
-                                   cp->tx_tiny_bufs[i],
-                                   cp->tx_tiny_dvma[i]);
+               dma_free_coherent(&pdev->dev, TX_TINY_BUF_BLOCK,
+                                 cp->tx_tiny_bufs[i], cp->tx_tiny_dvma[i]);
                cp->tx_tiny_bufs[i] = NULL;
        }
 }
@@ -4195,8 +4200,8 @@ static int cas_tx_tiny_alloc(struct cas *cp)
 
        for (i = 0; i < N_TX_RINGS; i++) {
                cp->tx_tiny_bufs[i] =
-                       pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK,
-                                            &cp->tx_tiny_dvma[i]);
+                       dma_alloc_coherent(&pdev->dev, TX_TINY_BUF_BLOCK,
+                                          &cp->tx_tiny_dvma[i], GFP_KERNEL);
                if (!cp->tx_tiny_bufs[i]) {
                        cas_tx_tiny_free(cp);
                        return -1;
@@ -4958,10 +4963,9 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 
        /* Configure DMA attributes. */
-       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+       if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
                pci_using_dac = 1;
-               err = pci_set_consistent_dma_mask(pdev,
-                                                 DMA_BIT_MASK(64));
+               err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
                if (err < 0) {
                        dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
                               "for consistent allocations\n");
@@ -4969,7 +4973,7 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                }
 
        } else {
-               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
                        dev_err(&pdev->dev, "No usable DMA configuration, "
                               "aborting\n");
@@ -5048,8 +5052,8 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                cas_saturn_firmware_init(cp);
 
        cp->init_block =
-               pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
-                                    &cp->block_dvma);
+               dma_alloc_coherent(&pdev->dev, sizeof(struct cas_init_block),
+                                  &cp->block_dvma, GFP_KERNEL);
        if (!cp->init_block) {
                dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
                goto err_out_iounmap;
@@ -5109,8 +5113,8 @@ static int cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        return 0;
 
 err_out_free_consistent:
-       pci_free_consistent(pdev, sizeof(struct cas_init_block),
-                           cp->init_block, cp->block_dvma);
+       dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block),
+                         cp->init_block, cp->block_dvma);
 
 err_out_iounmap:
        mutex_lock(&cp->pm_mutex);
@@ -5164,18 +5168,17 @@ static void cas_remove_one(struct pci_dev *pdev)
                                      cp->orig_cacheline_size);
        }
 #endif
-       pci_free_consistent(pdev, sizeof(struct cas_init_block),
-                           cp->init_block, cp->block_dvma);
+       dma_free_coherent(&pdev->dev, sizeof(struct cas_init_block),
+                         cp->init_block, cp->block_dvma);
        pci_iounmap(pdev, cp->regs);
        free_netdev(dev);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
 }
 
-#ifdef CONFIG_PM
-static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused cas_suspend(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
        struct cas *cp = netdev_priv(dev);
        unsigned long flags;
 
@@ -5204,9 +5207,9 @@ static int cas_suspend(struct pci_dev *pdev, pm_message_t state)
        return 0;
 }
 
-static int cas_resume(struct pci_dev *pdev)
+static int __maybe_unused cas_resume(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
        struct cas *cp = netdev_priv(dev);
 
        netdev_info(dev, "resuming\n");
@@ -5227,17 +5230,15 @@ static int cas_resume(struct pci_dev *pdev)
        mutex_unlock(&cp->pm_mutex);
        return 0;
 }
-#endif /* CONFIG_PM */
+
+static SIMPLE_DEV_PM_OPS(cas_pm_ops, cas_suspend, cas_resume);
 
 static struct pci_driver cas_driver = {
        .name           = DRV_MODULE_NAME,
        .id_table       = cas_pci_tbl,
        .probe          = cas_init_one,
        .remove         = cas_remove_one,
-#ifdef CONFIG_PM
-       .suspend        = cas_suspend,
-       .resume         = cas_resume
-#endif
+       .driver.pm      = &cas_pm_ops,
 };
 
 static int __init cas_init(void)
index 9a5004f..b4e20d1 100644 (file)
@@ -9873,9 +9873,9 @@ static void niu_pci_remove_one(struct pci_dev *pdev)
        }
 }
 
-static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused niu_suspend(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
        struct niu *np = netdev_priv(dev);
        unsigned long flags;
 
@@ -9897,14 +9897,12 @@ static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
        niu_stop_hw(np);
        spin_unlock_irqrestore(&np->lock, flags);
 
-       pci_save_state(pdev);
-
        return 0;
 }
 
-static int niu_resume(struct pci_dev *pdev)
+static int __maybe_unused niu_resume(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
        struct niu *np = netdev_priv(dev);
        unsigned long flags;
        int err;
@@ -9912,8 +9910,6 @@ static int niu_resume(struct pci_dev *pdev)
        if (!netif_running(dev))
                return 0;
 
-       pci_restore_state(pdev);
-
        netif_device_attach(dev);
 
        spin_lock_irqsave(&np->lock, flags);
@@ -9930,13 +9926,14 @@ static int niu_resume(struct pci_dev *pdev)
        return err;
 }
 
+static SIMPLE_DEV_PM_OPS(niu_pm_ops, niu_suspend, niu_resume);
+
 static struct pci_driver niu_pci_driver = {
        .name           = DRV_MODULE_NAME,
        .id_table       = niu_pci_tbl,
        .probe          = niu_pci_init_one,
        .remove         = niu_pci_remove_one,
-       .suspend        = niu_suspend,
-       .resume         = niu_resume,
+       .driver.pm      = &niu_pm_ops,
 };
 
 #ifdef CONFIG_SPARC64
index 2d392a7..eeb8518 100644 (file)
@@ -670,7 +670,8 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
                        dma_addr = le64_to_cpu(txd->buffer);
                        dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ;
 
-                       pci_unmap_page(gp->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE);
+                       dma_unmap_page(&gp->pdev->dev, dma_addr, dma_len,
+                                      DMA_TO_DEVICE);
                        entry = NEXT_TX(entry);
                }
 
@@ -809,16 +810,15 @@ static int gem_rx(struct gem *gp, int work_to_do)
                                drops++;
                                goto drop_it;
                        }
-                       pci_unmap_page(gp->pdev, dma_addr,
-                                      RX_BUF_ALLOC_SIZE(gp),
-                                      PCI_DMA_FROMDEVICE);
+                       dma_unmap_page(&gp->pdev->dev, dma_addr,
+                                      RX_BUF_ALLOC_SIZE(gp), DMA_FROM_DEVICE);
                        gp->rx_skbs[entry] = new_skb;
                        skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET));
-                       rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev,
+                       rxd->buffer = cpu_to_le64(dma_map_page(&gp->pdev->dev,
                                                               virt_to_page(new_skb->data),
                                                               offset_in_page(new_skb->data),
                                                               RX_BUF_ALLOC_SIZE(gp),
-                                                              PCI_DMA_FROMDEVICE));
+                                                              DMA_FROM_DEVICE));
                        skb_reserve(new_skb, RX_OFFSET);
 
                        /* Trim the original skb for the netif. */
@@ -833,9 +833,11 @@ static int gem_rx(struct gem *gp, int work_to_do)
 
                        skb_reserve(copy_skb, 2);
                        skb_put(copy_skb, len);
-                       pci_dma_sync_single_for_cpu(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
+                       dma_sync_single_for_cpu(&gp->pdev->dev, dma_addr, len,
+                                               DMA_FROM_DEVICE);
                        skb_copy_from_linear_data(skb, copy_skb->data, len);
-                       pci_dma_sync_single_for_device(gp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
+                       dma_sync_single_for_device(&gp->pdev->dev, dma_addr,
+                                                  len, DMA_FROM_DEVICE);
 
                        /* We'll reuse the original ring buffer. */
                        skb = copy_skb;
@@ -1020,10 +1022,10 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
                u32 len;
 
                len = skb->len;
-               mapping = pci_map_page(gp->pdev,
+               mapping = dma_map_page(&gp->pdev->dev,
                                       virt_to_page(skb->data),
                                       offset_in_page(skb->data),
-                                      len, PCI_DMA_TODEVICE);
+                                      len, DMA_TO_DEVICE);
                ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len;
                if (gem_intme(entry))
                        ctrl |= TXDCTRL_INTME;
@@ -1046,9 +1048,10 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
                 * Otherwise we could race with the device.
                 */
                first_len = skb_headlen(skb);
-               first_mapping = pci_map_page(gp->pdev, virt_to_page(skb->data),
+               first_mapping = dma_map_page(&gp->pdev->dev,
+                                            virt_to_page(skb->data),
                                             offset_in_page(skb->data),
-                                            first_len, PCI_DMA_TODEVICE);
+                                            first_len, DMA_TO_DEVICE);
                entry = NEXT_TX(entry);
 
                for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
@@ -1574,9 +1577,9 @@ static void gem_clean_rings(struct gem *gp)
                if (gp->rx_skbs[i] != NULL) {
                        skb = gp->rx_skbs[i];
                        dma_addr = le64_to_cpu(rxd->buffer);
-                       pci_unmap_page(gp->pdev, dma_addr,
+                       dma_unmap_page(&gp->pdev->dev, dma_addr,
                                       RX_BUF_ALLOC_SIZE(gp),
-                                      PCI_DMA_FROMDEVICE);
+                                      DMA_FROM_DEVICE);
                        dev_kfree_skb_any(skb);
                        gp->rx_skbs[i] = NULL;
                }
@@ -1598,9 +1601,9 @@ static void gem_clean_rings(struct gem *gp)
 
                                txd = &gb->txd[ent];
                                dma_addr = le64_to_cpu(txd->buffer);
-                               pci_unmap_page(gp->pdev, dma_addr,
+                               dma_unmap_page(&gp->pdev->dev, dma_addr,
                                               le64_to_cpu(txd->control_word) &
-                                              TXDCTRL_BUFSZ, PCI_DMA_TODEVICE);
+                                              TXDCTRL_BUFSZ, DMA_TO_DEVICE);
 
                                if (frag != skb_shinfo(skb)->nr_frags)
                                        i++;
@@ -1637,11 +1640,11 @@ static void gem_init_rings(struct gem *gp)
 
                gp->rx_skbs[i] = skb;
                skb_put(skb, (gp->rx_buf_sz + RX_OFFSET));
-               dma_addr = pci_map_page(gp->pdev,
+               dma_addr = dma_map_page(&gp->pdev->dev,
                                        virt_to_page(skb->data),
                                        offset_in_page(skb->data),
                                        RX_BUF_ALLOC_SIZE(gp),
-                                       PCI_DMA_FROMDEVICE);
+                                       DMA_FROM_DEVICE);
                rxd->buffer = cpu_to_le64(dma_addr);
                dma_wmb();
                rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp));
@@ -2139,20 +2142,6 @@ static int gem_do_start(struct net_device *dev)
        struct gem *gp = netdev_priv(dev);
        int rc;
 
-       /* Enable the cell */
-       gem_get_cell(gp);
-
-       /* Make sure PCI access and bus master are enabled */
-       rc = pci_enable_device(gp->pdev);
-       if (rc) {
-               netdev_err(dev, "Failed to enable chip on PCI bus !\n");
-
-               /* Put cell and forget it for now, it will be considered as
-                * still asleep, a new sleep cycle may bring it back
-                */
-               gem_put_cell(gp);
-               return -ENXIO;
-       }
        pci_set_master(gp->pdev);
 
        /* Init & setup chip hardware */
@@ -2230,13 +2219,6 @@ static void gem_do_stop(struct net_device *dev, int wol)
 
        /* Shut the PHY down eventually and setup WOL */
        gem_stop_phy(gp, wol);
-
-       /* Make sure bus master is disabled */
-       pci_disable_device(gp->pdev);
-
-       /* Cell not needed neither if no WOL */
-       if (!wol)
-               gem_put_cell(gp);
 }
 
 static void gem_reset_task(struct work_struct *work)
@@ -2288,26 +2270,53 @@ static void gem_reset_task(struct work_struct *work)
 
 static int gem_open(struct net_device *dev)
 {
+       struct gem *gp = netdev_priv(dev);
+       int rc;
+
        /* We allow open while suspended, we just do nothing,
         * the chip will be initialized in resume()
         */
-       if (netif_device_present(dev))
+       if (netif_device_present(dev)) {
+               /* Enable the cell */
+               gem_get_cell(gp);
+
+               /* Make sure PCI access and bus master are enabled */
+               rc = pci_enable_device(gp->pdev);
+               if (rc) {
+                       netdev_err(dev, "Failed to enable chip on PCI bus !\n");
+
+                       /* Put cell and forget it for now, it will be considered
+                        *as still asleep, a new sleep cycle may bring it back
+                        */
+                       gem_put_cell(gp);
+                       return -ENXIO;
+               }
                return gem_do_start(dev);
+       }
+
        return 0;
 }
 
 static int gem_close(struct net_device *dev)
 {
-       if (netif_device_present(dev))
+       struct gem *gp = netdev_priv(dev);
+
+       if (netif_device_present(dev)) {
                gem_do_stop(dev, 0);
 
+               /* Make sure bus master is disabled */
+               pci_disable_device(gp->pdev);
+
+               /* Cell not needed neither if no WOL */
+               if (!gp->asleep_wol)
+                       gem_put_cell(gp);
+       }
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused gem_suspend(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
        struct gem *gp = netdev_priv(dev);
 
        /* Lock the network stack first to avoid racing with open/close,
@@ -2336,15 +2345,19 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
        gp->asleep_wol = !!gp->wake_on_lan;
        gem_do_stop(dev, gp->asleep_wol);
 
+       /* Cell not needed neither if no WOL */
+       if (!gp->asleep_wol)
+               gem_put_cell(gp);
+
        /* Unlock the network stack */
        rtnl_unlock();
 
        return 0;
 }
 
-static int gem_resume(struct pci_dev *pdev)
+static int __maybe_unused gem_resume(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
        struct gem *gp = netdev_priv(dev);
 
        /* See locking comment in gem_suspend */
@@ -2359,6 +2372,9 @@ static int gem_resume(struct pci_dev *pdev)
                return 0;
        }
 
+       /* Enable the cell */
+       gem_get_cell(gp);
+
        /* Restart chip. If that fails there isn't much we can do, we
         * leave things stopped.
         */
@@ -2375,7 +2391,6 @@ static int gem_resume(struct pci_dev *pdev)
 
        return 0;
 }
-#endif /* CONFIG_PM */
 
 static struct net_device_stats *gem_get_stats(struct net_device *dev)
 {
@@ -2802,10 +2817,8 @@ static void gem_remove_one(struct pci_dev *pdev)
                cancel_work_sync(&gp->reset_task);
 
                /* Free resources */
-               pci_free_consistent(pdev,
-                                   sizeof(struct gem_init_block),
-                                   gp->init_block,
-                                   gp->gblock_dvma);
+               dma_free_coherent(&pdev->dev, sizeof(struct gem_init_block),
+                                 gp->init_block, gp->gblock_dvma);
                iounmap(gp->regs);
                pci_release_regions(pdev);
                free_netdev(dev);
@@ -2861,10 +2874,10 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
         */
        if (pdev->vendor == PCI_VENDOR_ID_SUN &&
            pdev->device == PCI_DEVICE_ID_SUN_GEM &&
-           !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+           !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
                pci_using_dac = 1;
        } else {
-               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
                if (err) {
                        pr_err("No usable DMA configuration, aborting\n");
                        goto err_disable_device;
@@ -2953,8 +2966,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
         * PAGE_SIZE aligned.
         */
        gp->init_block = (struct gem_init_block *)
-               pci_alloc_consistent(pdev, sizeof(struct gem_init_block),
-                                    &gp->gblock_dvma);
+               dma_alloc_coherent(&pdev->dev, sizeof(struct gem_init_block),
+                                  &gp->gblock_dvma, GFP_KERNEL);
        if (!gp->init_block) {
                pr_err("Cannot allocate init block, aborting\n");
                err = -ENOMEM;
@@ -3019,16 +3032,14 @@ err_disable_device:
 
 }
 
+static SIMPLE_DEV_PM_OPS(gem_pm_ops, gem_suspend, gem_resume);
 
 static struct pci_driver gem_driver = {
        .name           = GEM_MODULE_NAME,
        .id_table       = gem_pci_tbl,
        .probe          = gem_init_one,
        .remove         = gem_remove_one,
-#ifdef CONFIG_PM
-       .suspend        = gem_suspend,
-       .resume         = gem_resume,
-#endif /* CONFIG_PM */
+       .driver.pm      = &gem_pm_ops,
 };
 
 module_pci_driver(gem_driver);
index 07046a2..26aa7f3 100644 (file)
@@ -697,7 +697,7 @@ static void xlgmac_tx_timeout(struct net_device *netdev, unsigned int txqueue)
        schedule_work(&pdata->restart_work);
 }
 
-static int xlgmac_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t xlgmac_xmit(struct sk_buff *skb, struct net_device *netdev)
 {
        struct xlgmac_pdata *pdata = netdev_priv(netdev);
        struct xlgmac_pkt_info *tx_pkt_info;
index 50f5536..abfc4c4 100644 (file)
@@ -156,7 +156,7 @@ config TLAN
 
          Devices currently supported by this driver are Compaq Netelligent,
          Compaq NetFlex and Olicom cards.  Please read the file
-         <file:Documentation/networking/device_drivers/ti/tlan.rst>
+         <file:Documentation/networking/device_drivers/ethernet/ti/tlan.rst>
          for more details.
 
          To compile this driver as a module, choose M here. The module
index 8c4690f..496dafb 100644 (file)
@@ -445,7 +445,7 @@ static int am65_cpsw_set_channels(struct net_device *ndev,
        /* Check if interface is up. Can change the num queues when
         * the interface is down.
         */
-       if (netif_running(ndev))
+       if (common->usage_count)
                return -EBUSY;
 
        am65_cpsw_nuss_remove_tx_chns(common);
@@ -734,6 +734,9 @@ static int am65_cpsw_set_ethtool_priv_flags(struct net_device *ndev, u32 flags)
 
        rrobin = !!(flags & AM65_CPSW_PRIV_P0_RX_PTYPE_RROBIN);
 
+       if (common->usage_count)
+               return -EBUSY;
+
        if (common->est_enabled && rrobin) {
                netdev_err(ndev,
                           "p0-rx-ptype-rrobin flag conflicts with QOS\n");
@@ -741,7 +744,6 @@ static int am65_cpsw_set_ethtool_priv_flags(struct net_device *ndev, u32 flags)
        }
 
        common->pf_p0_rx_ptype_rrobin = rrobin;
-       am65_cpsw_nuss_set_p0_ptype(common);
 
        return 0;
 }
index 1492648..9fdcd90 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/regmap.h>
 #include <linux/mfd/syscon.h>
+#include <linux/sys_soc.h>
 #include <linux/dma/ti-cppi5.h>
 #include <linux/dma/k3-udma-glue.h>
 
@@ -148,10 +149,11 @@ static void am65_cpsw_nuss_get_ver(struct am65_cpsw_common *common)
        common->nuss_ver = readl(common->ss_base);
        common->cpsw_ver = readl(common->cpsw_base);
        dev_info(common->dev,
-                "initializing am65 cpsw nuss version 0x%08X, cpsw version 0x%08X Ports: %u\n",
+                "initializing am65 cpsw nuss version 0x%08X, cpsw version 0x%08X Ports: %u quirks:%08x\n",
                common->nuss_ver,
                common->cpsw_ver,
-               common->port_num + 1);
+               common->port_num + 1,
+               common->pdata.quirks);
 }
 
 void am65_cpsw_nuss_adjust_link(struct net_device *ndev)
@@ -223,6 +225,9 @@ static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
        u32 port_mask, unreg_mcast = 0;
        int ret;
 
+       if (!netif_running(ndev) || !vid)
+               return 0;
+
        ret = pm_runtime_get_sync(common->dev);
        if (ret < 0) {
                pm_runtime_put_noidle(common->dev);
@@ -246,6 +251,9 @@ static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
        struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
        int ret;
 
+       if (!netif_running(ndev) || !vid)
+               return 0;
+
        ret = pm_runtime_get_sync(common->dev);
        if (ret < 0) {
                pm_runtime_put_noidle(common->dev);
@@ -571,6 +579,16 @@ static int am65_cpsw_nuss_ndo_slave_stop(struct net_device *ndev)
        return 0;
 }
 
+static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
+{
+       struct am65_cpsw_port *port = arg;
+
+       if (!vdev)
+               return 0;
+
+       return am65_cpsw_nuss_ndo_slave_add_vid(port->ndev, 0, vid);
+}
+
 static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
 {
        struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
@@ -644,6 +662,9 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
                }
        }
 
+       /* restore vlan configurations */
+       vlan_for_each(ndev, cpsw_restore_vlans, port);
+
        phy_attached_info(port->slave.phy);
        phy_start(port->slave.phy);
 
@@ -1749,6 +1770,10 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
                                common->cpsw_base + AM65_CPSW_NU_FRAM_BASE +
                                (AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1));
 
+               port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base);
+               if (IS_ERR(port->slave.mac_sl))
+                       return PTR_ERR(port->slave.mac_sl);
+
                port->disabled = !of_device_is_available(port_np);
                if (port->disabled)
                        continue;
@@ -1792,10 +1817,6 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
                        return ret;
                }
 
-               port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base);
-               if (IS_ERR(port->slave.mac_sl))
-                       return PTR_ERR(port->slave.mac_sl);
-
                mac_addr = of_get_mac_address(port_np);
                if (!IS_ERR(mac_addr)) {
                        ether_addr_copy(port->slave.mac_addr, mac_addr);
@@ -1858,7 +1879,7 @@ static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common)
        port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave;
 
        /* Disable TX checksum offload by default due to HW bug */
-       if (common->pdata->quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM)
+       if (common->pdata.quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM)
                port->ndev->features &= ~NETIF_F_HW_CSUM;
 
        ndev_priv->stats = netdev_alloc_pcpu_stats(struct am65_cpsw_ndev_stats);
@@ -1875,8 +1896,6 @@ static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common)
        netif_napi_add(port->ndev, &common->napi_rx,
                       am65_cpsw_nuss_rx_poll, NAPI_POLL_WEIGHT);
 
-       common->pf_p0_rx_ptype_rrobin = false;
-
        return ret;
 }
 
@@ -1964,21 +1983,50 @@ static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
        }
 }
 
+struct am65_cpsw_soc_pdata {
+       u32     quirks_dis;
+};
+
+static const struct am65_cpsw_soc_pdata am65x_soc_sr2_0 = {
+       .quirks_dis = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM,
+};
+
+static const struct soc_device_attribute am65_cpsw_socinfo[] = {
+       { .family = "AM65X",
+         .revision = "SR2.0",
+         .data = &am65x_soc_sr2_0
+       },
+       {/* sentinel */}
+};
+
 static const struct am65_cpsw_pdata am65x_sr1_0 = {
        .quirks = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM,
 };
 
-static const struct am65_cpsw_pdata j721e_sr1_0 = {
+static const struct am65_cpsw_pdata j721e_pdata = {
        .quirks = 0,
 };
 
 static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
-       { .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0 },
-       { .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_sr1_0 },
+       { .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0},
+       { .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_pdata},
        { /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable);
 
+static void am65_cpsw_nuss_apply_socinfo(struct am65_cpsw_common *common)
+{
+       const struct soc_device_attribute *soc;
+
+       soc = soc_device_match(am65_cpsw_socinfo);
+       if (soc && soc->data) {
+               const struct am65_cpsw_soc_pdata *socdata = soc->data;
+
+               /* disable quirks */
+               common->pdata.quirks &= ~socdata->quirks_dis;
+       }
+}
+
 static int am65_cpsw_nuss_probe(struct platform_device *pdev)
 {
        struct cpsw_ale_params ale_params = { 0 };
@@ -1997,7 +2045,9 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
        of_id = of_match_device(am65_cpsw_nuss_of_mtable, dev);
        if (!of_id)
                return -EINVAL;
-       common->pdata = of_id->data;
+       common->pdata = *(const struct am65_cpsw_pdata *)of_id->data;
+
+       am65_cpsw_nuss_apply_socinfo(common);
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpsw_nuss");
        common->ss_base = devm_ioremap_resource(&pdev->dev, res);
@@ -2019,6 +2069,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
        common->rx_flow_id_base = -1;
        init_completion(&common->tdown_complete);
        common->tx_ch_num = 1;
+       common->pf_p0_rx_ptype_rrobin = false;
 
        ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
        if (ret) {
index 9faf4fb..94f666e 100644 (file)
@@ -82,7 +82,7 @@ struct am65_cpsw_pdata {
 struct am65_cpsw_common {
        struct device           *dev;
        struct device           *mdio_dev;
-       const struct am65_cpsw_pdata *pdata;
+       struct am65_cpsw_pdata  pdata;
 
        void __iomem            *ss_base;
        void __iomem            *cpsw_base;
index 32eac04..3bdd4db 100644 (file)
@@ -505,7 +505,6 @@ static int am65_cpsw_set_taprio(struct net_device *ndev, void *type_data)
        struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
        struct tc_taprio_qopt_offload *taprio = type_data;
        struct am65_cpsw_est *est_new;
-       size_t size;
        int ret = 0;
 
        if (taprio->cycle_time_extension) {
@@ -513,10 +512,9 @@ static int am65_cpsw_set_taprio(struct net_device *ndev, void *type_data)
                return -EOPNOTSUPP;
        }
 
-       size = sizeof(struct tc_taprio_sched_entry) * taprio->num_entries +
-              sizeof(struct am65_cpsw_est);
-
-       est_new = devm_kzalloc(&ndev->dev, size, GFP_KERNEL);
+       est_new = devm_kzalloc(&ndev->dev,
+                              struct_size(est_new, taprio.entries, taprio->num_entries),
+                              GFP_KERNEL);
        if (!est_new)
                return -ENOMEM;
 
index 8577098..583cd2e 100644 (file)
@@ -70,7 +70,7 @@ MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
 MODULE_LICENSE("GPL");
 
 /* Turn on debugging.
- * See Documentation/networking/device_drivers/ti/tlan.rst for details
+ * See Documentation/networking/device_drivers/ethernet/ti/tlan.rst for details
  */
 static  int            debug;
 module_param(debug, int, 0);
index fbaf3c9..f34c790 100644 (file)
 #define XAE_RAF_TXVSTRPMODE_MASK       0x00000180 /* Tx VLAN STRIP mode */
 #define XAE_RAF_RXVSTRPMODE_MASK       0x00000600 /* Rx VLAN STRIP mode */
 #define XAE_RAF_NEWFNCENBL_MASK                0x00000800 /* New function mode */
-/* Exteneded Multicast Filtering mode */
+/* Extended Multicast Filtering mode */
 #define XAE_RAF_EMULTIFLTRENBL_MASK    0x00001000
 #define XAE_RAF_STATSRST_MASK          0x00002000 /* Stats. Counter Reset */
 #define XAE_RAF_RXBADFRMEN_MASK                0x00004000 /* Recv Bad Frame Enable */
index 480ab72..3e3883a 100644 (file)
@@ -1473,7 +1473,7 @@ do_reset(struct net_device *dev, int full)
     unsigned int ioaddr = dev->base_addr;
     unsigned value;
 
-    pr_debug("%s: do_reset(%p,%d)\n", dev? dev->name:"eth?", dev, full);
+    pr_debug("%s: do_reset(%p,%d)\n", dev->name, dev, full);
 
     hardreset(dev);
     PutByte(XIRCREG_CR, SoftReset); /* set */
index 60cc752..f722079 100644 (file)
@@ -77,8 +77,8 @@ config SKFP
          - Netelligent 100 FDDI SAS UTP
          - Netelligent 100 FDDI SAS Fibre MIC
 
-         Read <file:Documentation/networking/skfp.rst> for information about
-         the driver.
+         Read <file:Documentation/networking/device_drivers/fddi/skfp.rst>
+         for information about the driver.
 
          Questions concerning this driver can be addressed to:
          <linux@syskonnect.de>
index a546eaf..afd5ca3 100644 (file)
@@ -148,7 +148,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
 
        DB_ESSN(2, "fc %x       ft %x", sm->smt_class, sm->smt_type);
        DB_ESSN(2, "ver %x      tran %x", sm->smt_version, sm->smt_tid);
-       DB_ESSN(2, "stn_id %s", addr_to_string(&sm->smt_source));
+       DB_ESSN(2, "stn_id %pM", &sm->smt_source);
 
        DB_ESSN(2, "infolen %x  res %lx", sm->smt_len, msg_res_type);
        DB_ESSN(2, "sbacmd %x", cmd->sba_cmd);
@@ -308,8 +308,8 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
                p = (void *) sm_to_para(smc,sm,SMT_P3210) ;
                overhead = ((struct smt_p_3210 *)p)->mib_overhead ;
 
-               DB_ESSN(2, "ESS: Change Request from %s",
-                       addr_to_string(&sm->smt_source));
+               DB_ESSN(2, "ESS: Change Request from %pM",
+                       &sm->smt_source);
                DB_ESSN(2, "payload= %lx        overhead= %lx",
                        payload, overhead);
 
@@ -339,8 +339,8 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
                        return fs;
                }
 
-               DB_ESSN(2, "ESS: Report Request from %s",
-                       addr_to_string(&sm->smt_source));
+               DB_ESSN(2, "ESS: Report Request from %pM",
+                       &sm->smt_source);
 
                /*
                 * verify that the resource type is sync bw only
index 3a1ceb7..4dd590d 100644 (file)
@@ -640,7 +640,6 @@ void dump_smt(struct s_smc *smc, struct smt_header *sm, char *text);
 #define        dump_smt(smc,sm,text)
 #endif
 
-char* addr_to_string(struct fddi_addr *addr);
 #ifdef DEBUG
 void dump_hex(char *p, int len);
 #endif
index 47c4820..b8c59d8 100644 (file)
@@ -520,8 +520,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
         * ignore any packet with NSA and A-indicator set
         */
        if ( (fs & A_INDICATOR) && m_fc(mb) == FC_SMT_NSA) {
-               DB_SMT("SMT : ignoring NSA with A-indicator set from %s",
-                      addr_to_string(&sm->smt_source));
+               DB_SMT("SMT : ignoring NSA with A-indicator set from %pM",
+                      &sm->smt_source);
                smt_free_mbuf(smc,mb) ;
                return ;
        }
@@ -552,8 +552,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
                break ;
        }
        if (illegal) {
-               DB_SMT("SMT : version = %d, dest = %s",
-                      sm->smt_version, addr_to_string(&sm->smt_source));
+               DB_SMT("SMT : version = %d, dest = %pM",
+                      sm->smt_version, &sm->smt_source);
                smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_VERSION,local) ;
                smt_free_mbuf(smc,mb) ;
                return ;
@@ -582,8 +582,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
                                if (!is_equal(
                                        &smc->mib.m[MAC0].fddiMACUpstreamNbr,
                                        &sm->smt_source)) {
-                                       DB_SMT("SMT : updated my UNA = %s",
-                                              addr_to_string(&sm->smt_source));
+                                       DB_SMT("SMT : updated my UNA = %pM",
+                                              &sm->smt_source);
                                        if (!is_equal(&smc->mib.m[MAC0].
                                            fddiMACUpstreamNbr,&SMT_Unknown)){
                                         /* Do not update unknown address */
@@ -612,8 +612,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
                            is_individual(&sm->smt_source) &&
                            ((!(fs & A_INDICATOR) && m_fc(mb) == FC_SMT_NSA) ||
                             (m_fc(mb) != FC_SMT_NSA))) {
-                               DB_SMT("SMT : replying to NIF request %s",
-                                      addr_to_string(&sm->smt_source));
+                               DB_SMT("SMT : replying to NIF request %pM",
+                                      &sm->smt_source);
                                smt_send_nif(smc,&sm->smt_source,
                                        FC_SMT_INFO,
                                        sm->smt_tid,
@@ -621,8 +621,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
                        }
                        break ;
                case SMT_REPLY :
-                       DB_SMT("SMT : received NIF response from %s",
-                              addr_to_string(&sm->smt_source));
+                       DB_SMT("SMT : received NIF response from %pM",
+                              &sm->smt_source);
                        if (fs & A_INDICATOR) {
                                smc->sm.pend[SMT_TID_NIF] = 0 ;
                                DB_SMT("SMT : duplicate address");
@@ -682,23 +682,23 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
        case SMT_SIF_CONFIG :   /* station information */
                if (sm->smt_type != SMT_REQUEST)
                        break ;
-               DB_SMT("SMT : replying to SIF Config request from %s",
-                      addr_to_string(&sm->smt_source));
+               DB_SMT("SMT : replying to SIF Config request from %pM",
+                      &sm->smt_source);
                smt_send_sif_config(smc,&sm->smt_source,sm->smt_tid,local) ;
                break ;
        case SMT_SIF_OPER :     /* station information */
                if (sm->smt_type != SMT_REQUEST)
                        break ;
-               DB_SMT("SMT : replying to SIF Operation request from %s",
-                      addr_to_string(&sm->smt_source));
+               DB_SMT("SMT : replying to SIF Operation request from %pM",
+                      &sm->smt_source);
                smt_send_sif_operation(smc,&sm->smt_source,sm->smt_tid,local) ;
                break ;
        case SMT_ECF :          /* echo frame */
                switch (sm->smt_type) {
                case SMT_REPLY :
                        smc->mib.priv.fddiPRIVECF_Reply_Rx++ ;
-                       DB_SMT("SMT: received ECF reply from %s",
-                              addr_to_string(&sm->smt_source));
+                       DB_SMT("SMT: received ECF reply from %pM",
+                              &sm->smt_source);
                        if (sm_to_para(smc,sm,SMT_P_ECHODATA) == NULL) {
                                DB_SMT("SMT: ECHODATA missing");
                                break ;
@@ -727,8 +727,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
                                        local) ;
                                break ;
                        }
-                       DB_SMT("SMT - sending ECF reply to %s",
-                              addr_to_string(&sm->smt_source));
+                       DB_SMT("SMT - sending ECF reply to %pM",
+                              &sm->smt_source);
 
                        /* set destination addr.  & reply */
                        sm->smt_dest = sm->smt_source ;
@@ -794,8 +794,8 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
                 * we need to send a RDF frame according to 8.1.3.1.1,
                 * only if it is a REQUEST.
                 */
-               DB_SMT("SMT : class = %d, send RDF to %s",
-                      sm->smt_class, addr_to_string(&sm->smt_source));
+               DB_SMT("SMT : class = %d, send RDF to %pM",
+                      sm->smt_class, &sm->smt_source);
 
                smt_send_rdf(smc,mb,m_fc(mb),SMT_RDF_CLASS,local) ;
                break ;
@@ -864,8 +864,8 @@ static void smt_send_rdf(struct s_smc *smc, SMbuf *rej, int fc, int reason,
        if (sm->smt_type != SMT_REQUEST)
                return ;
 
-       DB_SMT("SMT: sending RDF to %s,reason = 0x%x",
-              addr_to_string(&sm->smt_source), reason);
+       DB_SMT("SMT: sending RDF to %pM,reason = 0x%x",
+              &sm->smt_source, reason);
 
 
        /*
@@ -1715,22 +1715,6 @@ void fddi_send_antc(struct s_smc *smc, struct fddi_addr *dest)
 }
 #endif
 
-#ifdef DEBUG
-char *addr_to_string(struct fddi_addr *addr)
-{
-       int     i ;
-       static char     string[6*3] = "****" ;
-
-       for (i = 0 ; i < 6 ; i++) {
-               string[i * 3] = hex_asc_hi(addr->a[i]);
-               string[i * 3 + 1] = hex_asc_lo(addr->a[i]);
-               string[i * 3 + 2] = ':';
-       }
-       string[5 * 3 + 2] = 0;
-       return string;
-}
-#endif
-
 /*
  * return static mac index
  */
index 7526658..49b00de 100644 (file)
@@ -48,6 +48,14 @@ struct geneve_dev_node {
        struct geneve_dev *geneve;
 };
 
+struct geneve_config {
+       struct ip_tunnel_info   info;
+       bool                    collect_md;
+       bool                    use_udp6_rx_checksums;
+       bool                    ttl_inherit;
+       enum ifla_geneve_df     df;
+};
+
 /* Pseudo network device */
 struct geneve_dev {
        struct geneve_dev_node hlist4;  /* vni hash table for IPv4 socket */
@@ -56,17 +64,13 @@ struct geneve_dev {
 #endif
        struct net         *net;        /* netns for packet i/o */
        struct net_device  *dev;        /* netdev for geneve tunnel */
-       struct ip_tunnel_info info;
        struct geneve_sock __rcu *sock4;        /* IPv4 socket used for geneve tunnel */
 #if IS_ENABLED(CONFIG_IPV6)
        struct geneve_sock __rcu *sock6;        /* IPv6 socket used for geneve tunnel */
 #endif
        struct list_head   next;        /* geneve's per namespace list */
        struct gro_cells   gro_cells;
-       bool               collect_md;
-       bool               use_udp6_rx_checksums;
-       bool               ttl_inherit;
-       enum ifla_geneve_df df;
+       struct geneve_config cfg;
 };
 
 struct geneve_sock {
@@ -132,8 +136,8 @@ static struct geneve_dev *geneve_lookup(struct geneve_sock *gs,
        hash = geneve_net_vni_hash(vni);
        vni_list_head = &gs->vni_list[hash];
        hlist_for_each_entry_rcu(node, vni_list_head, hlist) {
-               if (eq_tun_id_and_vni((u8 *)&node->geneve->info.key.tun_id, vni) &&
-                   addr == node->geneve->info.key.u.ipv4.dst)
+               if (eq_tun_id_and_vni((u8 *)&node->geneve->cfg.info.key.tun_id, vni) &&
+                   addr == node->geneve->cfg.info.key.u.ipv4.dst)
                        return node->geneve;
        }
        return NULL;
@@ -151,8 +155,8 @@ static struct geneve_dev *geneve6_lookup(struct geneve_sock *gs,
        hash = geneve_net_vni_hash(vni);
        vni_list_head = &gs->vni_list[hash];
        hlist_for_each_entry_rcu(node, vni_list_head, hlist) {
-               if (eq_tun_id_and_vni((u8 *)&node->geneve->info.key.tun_id, vni) &&
-                   ipv6_addr_equal(&addr6, &node->geneve->info.key.u.ipv6.dst))
+               if (eq_tun_id_and_vni((u8 *)&node->geneve->cfg.info.key.tun_id, vni) &&
+                   ipv6_addr_equal(&addr6, &node->geneve->cfg.info.key.u.ipv6.dst))
                        return node->geneve;
        }
        return NULL;
@@ -321,7 +325,7 @@ static int geneve_init(struct net_device *dev)
                return err;
        }
 
-       err = dst_cache_init(&geneve->info.dst_cache, GFP_KERNEL);
+       err = dst_cache_init(&geneve->cfg.info.dst_cache, GFP_KERNEL);
        if (err) {
                free_percpu(dev->tstats);
                gro_cells_destroy(&geneve->gro_cells);
@@ -334,7 +338,7 @@ static void geneve_uninit(struct net_device *dev)
 {
        struct geneve_dev *geneve = netdev_priv(dev);
 
-       dst_cache_destroy(&geneve->info.dst_cache);
+       dst_cache_destroy(&geneve->cfg.info.dst_cache);
        gro_cells_destroy(&geneve->gro_cells);
        free_percpu(dev->tstats);
 }
@@ -654,19 +658,19 @@ static int geneve_sock_add(struct geneve_dev *geneve, bool ipv6)
        __u8 vni[3];
        __u32 hash;
 
-       gs = geneve_find_sock(gn, ipv6 ? AF_INET6 : AF_INET, geneve->info.key.tp_dst);
+       gs = geneve_find_sock(gn, ipv6 ? AF_INET6 : AF_INET, geneve->cfg.info.key.tp_dst);
        if (gs) {
                gs->refcnt++;
                goto out;
        }
 
-       gs = geneve_socket_create(net, geneve->info.key.tp_dst, ipv6,
-                                 geneve->use_udp6_rx_checksums);
+       gs = geneve_socket_create(net, geneve->cfg.info.key.tp_dst, ipv6,
+                                 geneve->cfg.use_udp6_rx_checksums);
        if (IS_ERR(gs))
                return PTR_ERR(gs);
 
 out:
-       gs->collect_md = geneve->collect_md;
+       gs->collect_md = geneve->cfg.collect_md;
 #if IS_ENABLED(CONFIG_IPV6)
        if (ipv6) {
                rcu_assign_pointer(geneve->sock6, gs);
@@ -679,7 +683,7 @@ out:
        }
        node->geneve = geneve;
 
-       tunnel_id_to_vni(geneve->info.key.tun_id, vni);
+       tunnel_id_to_vni(geneve->cfg.info.key.tun_id, vni);
        hash = geneve_net_vni_hash(vni);
        hlist_add_head_rcu(&node->hlist, &gs->vni_list[hash]);
        return 0;
@@ -688,11 +692,11 @@ out:
 static int geneve_open(struct net_device *dev)
 {
        struct geneve_dev *geneve = netdev_priv(dev);
-       bool metadata = geneve->collect_md;
+       bool metadata = geneve->cfg.collect_md;
        bool ipv4, ipv6;
        int ret = 0;
 
-       ipv6 = geneve->info.mode & IP_TUNNEL_INFO_IPV6 || metadata;
+       ipv6 = geneve->cfg.info.mode & IP_TUNNEL_INFO_IPV6 || metadata;
        ipv4 = !ipv6 || metadata;
 #if IS_ENABLED(CONFIG_IPV6)
        if (ipv6) {
@@ -791,7 +795,7 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb,
        fl4->saddr = info->key.u.ipv4.src;
 
        tos = info->key.tos;
-       if ((tos == 1) && !geneve->collect_md) {
+       if ((tos == 1) && !geneve->cfg.collect_md) {
                tos = ip_tunnel_get_dsfield(ip_hdr(skb), skb);
                use_cache = false;
        }
@@ -840,7 +844,7 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb,
        fl6->daddr = info->key.u.ipv6.dst;
        fl6->saddr = info->key.u.ipv6.src;
        prio = info->key.tos;
-       if ((prio == 1) && !geneve->collect_md) {
+       if ((prio == 1) && !geneve->cfg.collect_md) {
                prio = ip_tunnel_get_dsfield(ip_hdr(skb), skb);
                use_cache = false;
        }
@@ -893,22 +897,22 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
                              GENEVE_IPV4_HLEN + info->options_len);
 
        sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
-       if (geneve->collect_md) {
+       if (geneve->cfg.collect_md) {
                tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
                ttl = key->ttl;
 
                df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
        } else {
                tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb);
-               if (geneve->ttl_inherit)
+               if (geneve->cfg.ttl_inherit)
                        ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb);
                else
                        ttl = key->ttl;
                ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
 
-               if (geneve->df == GENEVE_DF_SET) {
+               if (geneve->cfg.df == GENEVE_DF_SET) {
                        df = htons(IP_DF);
-               } else if (geneve->df == GENEVE_DF_INHERIT) {
+               } else if (geneve->cfg.df == GENEVE_DF_INHERIT) {
                        struct ethhdr *eth = eth_hdr(skb);
 
                        if (ntohs(eth->h_proto) == ETH_P_IPV6) {
@@ -927,7 +931,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
                return err;
 
        udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, fl4.saddr, fl4.daddr,
-                           tos, ttl, df, sport, geneve->info.key.tp_dst,
+                           tos, ttl, df, sport, geneve->cfg.info.key.tp_dst,
                            !net_eq(geneve->net, dev_net(geneve->dev)),
                            !(info->key.tun_flags & TUNNEL_CSUM));
        return 0;
@@ -954,13 +958,13 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
        skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len);
 
        sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
-       if (geneve->collect_md) {
+       if (geneve->cfg.collect_md) {
                prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
                ttl = key->ttl;
        } else {
                prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel),
                                           ip_hdr(skb), skb);
-               if (geneve->ttl_inherit)
+               if (geneve->cfg.ttl_inherit)
                        ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb);
                else
                        ttl = key->ttl;
@@ -972,7 +976,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
 
        udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev,
                             &fl6.saddr, &fl6.daddr, prio, ttl,
-                            info->key.label, sport, geneve->info.key.tp_dst,
+                            info->key.label, sport, geneve->cfg.info.key.tp_dst,
                             !(info->key.tun_flags & TUNNEL_CSUM));
        return 0;
 }
@@ -984,7 +988,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
        struct ip_tunnel_info *info = NULL;
        int err;
 
-       if (geneve->collect_md) {
+       if (geneve->cfg.collect_md) {
                info = skb_tunnel_info(skb);
                if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
                        netdev_dbg(dev, "no tunnel metadata\n");
@@ -993,7 +997,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
                        return NETDEV_TX_OK;
                }
        } else {
-               info = &geneve->info;
+               info = &geneve->cfg.info;
        }
 
        rcu_read_lock();
@@ -1065,7 +1069,7 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
 
        info->key.tp_src = udp_flow_src_port(geneve->net, skb,
                                             1, USHRT_MAX, true);
-       info->key.tp_dst = geneve->info.key.tp_dst;
+       info->key.tp_dst = geneve->cfg.info.key.tp_dst;
        return 0;
 }
 
@@ -1227,13 +1231,13 @@ static struct geneve_dev *geneve_find_dev(struct geneve_net *gn,
        *tun_on_same_port = false;
        *tun_collect_md = false;
        list_for_each_entry(geneve, &gn->geneve_list, next) {
-               if (info->key.tp_dst == geneve->info.key.tp_dst) {
-                       *tun_collect_md = geneve->collect_md;
+               if (info->key.tp_dst == geneve->cfg.info.key.tp_dst) {
+                       *tun_collect_md = geneve->cfg.collect_md;
                        *tun_on_same_port = true;
                }
-               if (info->key.tun_id == geneve->info.key.tun_id &&
-                   info->key.tp_dst == geneve->info.key.tp_dst &&
-                   !memcmp(&info->key.u, &geneve->info.key.u, sizeof(info->key.u)))
+               if (info->key.tun_id == geneve->cfg.info.key.tun_id &&
+                   info->key.tp_dst == geneve->cfg.info.key.tp_dst &&
+                   !memcmp(&info->key.u, &geneve->cfg.info.key.u, sizeof(info->key.u)))
                        t = geneve;
        }
        return t;
@@ -1257,16 +1261,15 @@ static bool geneve_dst_addr_equal(struct ip_tunnel_info *a,
 
 static int geneve_configure(struct net *net, struct net_device *dev,
                            struct netlink_ext_ack *extack,
-                           const struct ip_tunnel_info *info,
-                           bool metadata, bool ipv6_rx_csum,
-                           bool ttl_inherit, enum ifla_geneve_df df)
+                           const struct geneve_config *cfg)
 {
        struct geneve_net *gn = net_generic(net, geneve_net_id);
        struct geneve_dev *t, *geneve = netdev_priv(dev);
+       const struct ip_tunnel_info *info = &cfg->info;
        bool tun_collect_md, tun_on_same_port;
        int err, encap_len;
 
-       if (metadata && !is_tnl_info_zero(info)) {
+       if (cfg->collect_md && !is_tnl_info_zero(info)) {
                NL_SET_ERR_MSG(extack,
                               "Device is externally controlled, so attributes (VNI, Port, and so on) must not be specified");
                return -EINVAL;
@@ -1281,7 +1284,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
 
        /* make enough headroom for basic scenario */
        encap_len = GENEVE_BASE_HLEN + ETH_HLEN;
-       if (!metadata && ip_tunnel_info_af(info) == AF_INET) {
+       if (!cfg->collect_md && ip_tunnel_info_af(info) == AF_INET) {
                encap_len += sizeof(struct iphdr);
                dev->max_mtu -= sizeof(struct iphdr);
        } else {
@@ -1290,7 +1293,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
        }
        dev->needed_headroom = encap_len + ETH_HLEN;
 
-       if (metadata) {
+       if (cfg->collect_md) {
                if (tun_on_same_port) {
                        NL_SET_ERR_MSG(extack,
                                       "There can be only one externally controlled device on a destination port");
@@ -1304,12 +1307,8 @@ static int geneve_configure(struct net *net, struct net_device *dev,
                }
        }
 
-       dst_cache_reset(&geneve->info.dst_cache);
-       geneve->info = *info;
-       geneve->collect_md = metadata;
-       geneve->use_udp6_rx_checksums = ipv6_rx_csum;
-       geneve->ttl_inherit = ttl_inherit;
-       geneve->df = df;
+       dst_cache_reset(&geneve->cfg.info.dst_cache);
+       memcpy(&geneve->cfg, cfg, sizeof(*cfg));
 
        err = register_netdevice(dev);
        if (err)
@@ -1327,11 +1326,10 @@ static void init_tnl_info(struct ip_tunnel_info *info, __u16 dst_port)
 
 static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
                          struct netlink_ext_ack *extack,
-                         struct ip_tunnel_info *info, bool *metadata,
-                         bool *use_udp6_rx_checksums, bool *ttl_inherit,
-                         enum ifla_geneve_df *df, bool changelink)
+                         struct geneve_config *cfg, bool changelink)
 {
        int attrtype;
+       struct ip_tunnel_info *info = &cfg->info;
 
        if (data[IFLA_GENEVE_REMOTE] && data[IFLA_GENEVE_REMOTE6]) {
                NL_SET_ERR_MSG(extack,
@@ -1378,7 +1376,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
                        return -EINVAL;
                }
                info->key.tun_flags |= TUNNEL_CSUM;
-               *use_udp6_rx_checksums = true;
+               cfg->use_udp6_rx_checksums = true;
 #else
                NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6],
                                    "IPv6 support not enabled in the kernel");
@@ -1406,19 +1404,19 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
 
        if (data[IFLA_GENEVE_TTL_INHERIT]) {
                if (nla_get_u8(data[IFLA_GENEVE_TTL_INHERIT]))
-                       *ttl_inherit = true;
+                       cfg->ttl_inherit = true;
                else
-                       *ttl_inherit = false;
+                       cfg->ttl_inherit = false;
        } else if (data[IFLA_GENEVE_TTL]) {
                info->key.ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
-               *ttl_inherit = false;
+               cfg->ttl_inherit = false;
        }
 
        if (data[IFLA_GENEVE_TOS])
                info->key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
 
        if (data[IFLA_GENEVE_DF])
-               *df = nla_get_u8(data[IFLA_GENEVE_DF]);
+               cfg->df = nla_get_u8(data[IFLA_GENEVE_DF]);
 
        if (data[IFLA_GENEVE_LABEL]) {
                info->key.label = nla_get_be32(data[IFLA_GENEVE_LABEL]) &
@@ -1443,7 +1441,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
                        attrtype = IFLA_GENEVE_COLLECT_METADATA;
                        goto change_notsup;
                }
-               *metadata = true;
+               cfg->collect_md = true;
        }
 
        if (data[IFLA_GENEVE_UDP_CSUM]) {
@@ -1477,7 +1475,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
                        goto change_notsup;
                }
                if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]))
-                       *use_udp6_rx_checksums = false;
+                       cfg->use_udp6_rx_checksums = false;
 #else
                NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX],
                                    "IPv6 support not enabled in the kernel");
@@ -1542,25 +1540,24 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
                          struct nlattr *tb[], struct nlattr *data[],
                          struct netlink_ext_ack *extack)
 {
-       enum ifla_geneve_df df = GENEVE_DF_UNSET;
-       bool use_udp6_rx_checksums = false;
-       struct ip_tunnel_info info;
-       bool ttl_inherit = false;
-       bool metadata = false;
+       struct geneve_config cfg = {
+               .df = GENEVE_DF_UNSET,
+               .use_udp6_rx_checksums = false,
+               .ttl_inherit = false,
+               .collect_md = false,
+       };
        int err;
 
-       init_tnl_info(&info, GENEVE_UDP_PORT);
-       err = geneve_nl2info(tb, data, extack, &info, &metadata,
-                            &use_udp6_rx_checksums, &ttl_inherit, &df, false);
+       init_tnl_info(&cfg.info, GENEVE_UDP_PORT);
+       err = geneve_nl2info(tb, data, extack, &cfg, false);
        if (err)
                return err;
 
-       err = geneve_configure(net, dev, extack, &info, metadata,
-                              use_udp6_rx_checksums, ttl_inherit, df);
+       err = geneve_configure(net, dev, extack, &cfg);
        if (err)
                return err;
 
-       geneve_link_config(dev, &info, tb);
+       geneve_link_config(dev, &cfg.info, tb);
 
        return 0;
 }
@@ -1616,39 +1613,28 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[],
 {
        struct geneve_dev *geneve = netdev_priv(dev);
        struct geneve_sock *gs4, *gs6;
-       struct ip_tunnel_info info;
-       bool metadata;
-       bool use_udp6_rx_checksums;
-       enum ifla_geneve_df df;
-       bool ttl_inherit;
+       struct geneve_config cfg;
        int err;
 
        /* If the geneve device is configured for metadata (or externally
         * controlled, for example, OVS), then nothing can be changed.
         */
-       if (geneve->collect_md)
+       if (geneve->cfg.collect_md)
                return -EOPNOTSUPP;
 
        /* Start with the existing info. */
-       memcpy(&info, &geneve->info, sizeof(info));
-       metadata = geneve->collect_md;
-       use_udp6_rx_checksums = geneve->use_udp6_rx_checksums;
-       ttl_inherit = geneve->ttl_inherit;
-       err = geneve_nl2info(tb, data, extack, &info, &metadata,
-                            &use_udp6_rx_checksums, &ttl_inherit, &df, true);
+       memcpy(&cfg, &geneve->cfg, sizeof(cfg));
+       err = geneve_nl2info(tb, data, extack, &cfg, true);
        if (err)
                return err;
 
-       if (!geneve_dst_addr_equal(&geneve->info, &info)) {
-               dst_cache_reset(&info.dst_cache);
-               geneve_link_config(dev, &info, tb);
+       if (!geneve_dst_addr_equal(&geneve->cfg.info, &cfg.info)) {
+               dst_cache_reset(&cfg.info.dst_cache);
+               geneve_link_config(dev, &cfg.info, tb);
        }
 
        geneve_quiesce(geneve, &gs4, &gs6);
-       geneve->info = info;
-       geneve->collect_md = metadata;
-       geneve->use_udp6_rx_checksums = use_udp6_rx_checksums;
-       geneve->ttl_inherit = ttl_inherit;
+       memcpy(&geneve->cfg, &cfg, sizeof(cfg));
        geneve_unquiesce(geneve, gs4, gs6);
 
        return 0;
@@ -1682,9 +1668,9 @@ static size_t geneve_get_size(const struct net_device *dev)
 static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
 {
        struct geneve_dev *geneve = netdev_priv(dev);
-       struct ip_tunnel_info *info = &geneve->info;
-       bool ttl_inherit = geneve->ttl_inherit;
-       bool metadata = geneve->collect_md;
+       struct ip_tunnel_info *info = &geneve->cfg.info;
+       bool ttl_inherit = geneve->cfg.ttl_inherit;
+       bool metadata = geneve->cfg.collect_md;
        __u8 tmp_vni[3];
        __u32 vni;
 
@@ -1717,7 +1703,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
            nla_put_be32(skb, IFLA_GENEVE_LABEL, info->key.label))
                goto nla_put_failure;
 
-       if (nla_put_u8(skb, IFLA_GENEVE_DF, geneve->df))
+       if (nla_put_u8(skb, IFLA_GENEVE_DF, geneve->cfg.df))
                goto nla_put_failure;
 
        if (nla_put_be16(skb, IFLA_GENEVE_PORT, info->key.tp_dst))
@@ -1728,7 +1714,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
 
 #if IS_ENABLED(CONFIG_IPV6)
        if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
-                      !geneve->use_udp6_rx_checksums))
+                      !geneve->cfg.use_udp6_rx_checksums))
                goto nla_put_failure;
 #endif
 
@@ -1759,10 +1745,15 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
                                        u8 name_assign_type, u16 dst_port)
 {
        struct nlattr *tb[IFLA_MAX + 1];
-       struct ip_tunnel_info info;
        struct net_device *dev;
        LIST_HEAD(list_kill);
        int err;
+       struct geneve_config cfg = {
+               .df = GENEVE_DF_UNSET,
+               .use_udp6_rx_checksums = true,
+               .ttl_inherit = false,
+               .collect_md = true,
+       };
 
        memset(tb, 0, sizeof(tb));
        dev = rtnl_create_link(net, name, name_assign_type,
@@ -1770,9 +1761,8 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
        if (IS_ERR(dev))
                return dev;
 
-       init_tnl_info(&info, dst_port);
-       err = geneve_configure(net, dev, NULL, &info,
-                              true, true, false, GENEVE_DF_UNSET);
+       init_tnl_info(&cfg.info, dst_port);
+       err = geneve_configure(net, dev, NULL, &cfg);
        if (err) {
                free_netdev(dev);
                return ERR_PTR(err);
@@ -1806,9 +1796,11 @@ static int geneve_netdevice_event(struct notifier_block *unused,
            event == NETDEV_UDP_TUNNEL_DROP_INFO) {
                geneve_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO);
        } else if (event == NETDEV_UNREGISTER) {
-               geneve_offload_rx_ports(dev, false);
+               if (!dev->udp_tunnel_nic_info)
+                       geneve_offload_rx_ports(dev, false);
        } else if (event == NETDEV_REGISTER) {
-               geneve_offload_rx_ports(dev, true);
+               if (!dev->udp_tunnel_nic_info)
+                       geneve_offload_rx_ports(dev, true);
        }
 
        return NOTIFY_DONE;
index 70f754a..f4843f9 100644 (file)
@@ -84,8 +84,9 @@ config SCC
        help
          These cards are used to connect your Linux box to an amateur radio
          in order to communicate with other computers. If you want to use
-         this, read <file:Documentation/networking/z8530drv.rst> and the
-         AX25-HOWTO, available from
+         this, read
+         <file:Documentation/networking/device_drivers/hamradio/z8530drv.rst>
+         and the AX25-HOWTO, available from
          <http://www.tldp.org/docs.html#howto>. Also make sure to say Y
          to "Amateur Radio AX.25 Level 2" support.
 
@@ -98,7 +99,8 @@ config SCC_DELAY
        help
          Say Y here if you experience problems with the SCC driver not
          working properly; please read
-         <file:Documentation/networking/z8530drv.rst> for details.
+         <file:Documentation/networking/device_drivers/hamradio/z8530drv.rst>
+         for details.
 
          If unsure, say N.
 
@@ -127,7 +129,7 @@ config BAYCOM_SER_FDX
          your serial interface chip. To configure the driver, use the sethdlc
          utility available in the standard ax25 utilities package. For
          information on the modems, see <http://www.baycom.de/> and
-         <file:Documentation/networking/baycom.rst>.
+         <file:Documentation/networking/device_drivers/hamradio/baycom.rst>.
 
          To compile this driver as a module, choose M here: the module
          will be called baycom_ser_fdx.  This is recommended.
@@ -145,7 +147,7 @@ config BAYCOM_SER_HDX
          the driver, use the sethdlc utility available in the standard ax25
          utilities package. For information on the modems, see
          <http://www.baycom.de/> and
-         <file:Documentation/networking/baycom.rst>.
+         <file:Documentation/networking/device_drivers/hamradio/baycom.rst>.
 
          To compile this driver as a module, choose M here: the module
          will be called baycom_ser_hdx.  This is recommended.
@@ -160,7 +162,7 @@ config BAYCOM_PAR
          par96 designs. To configure the driver, use the sethdlc utility
          available in the standard ax25 utilities package. For information on
          the modems, see <http://www.baycom.de/> and the file
-         <file:Documentation/networking/baycom.rst>.
+         <file:Documentation/networking/device_drivers/hamradio/baycom.rst>.
 
          To compile this driver as a module, choose M here: the module
          will be called baycom_par.  This is recommended.
@@ -175,7 +177,7 @@ config BAYCOM_EPP
          designs. To configure the driver, use the sethdlc utility available
          in the standard ax25 utilities package. For information on the
          modems, see <http://www.baycom.de/> and the file
-         <file:Documentation/networking/baycom.rst>.
+         <file:Documentation/networking/device_drivers/hamradio/baycom.rst>.
 
          To compile this driver as a module, choose M here: the module
          will be called baycom_epp.  This is recommended.
index 33fdd55..1e91587 100644 (file)
@@ -7,7 +7,7 @@
  *            ------------------
  *
  * You can find a subset of the documentation in 
- * Documentation/networking/z8530drv.rst.
+ * Documentation/networking/device_drivers/wan/z8530drv.rst.
  */
 
 /*
index abda736..2181d45 100644 (file)
@@ -897,6 +897,7 @@ struct netvsc_ethtool_stats {
        unsigned long rx_no_memory;
        unsigned long stop_queue;
        unsigned long wake_queue;
+       unsigned long vlan_error;
 };
 
 struct netvsc_ethtool_pcpu_stats {
index 6267f70..d27b90b 100644 (file)
@@ -605,6 +605,29 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx)
                *hash_info = hash;
        }
 
+       /* When using AF_PACKET we need to drop VLAN header from
+        * the frame and update the SKB to allow the HOST OS
+        * to transmit the 802.1Q packet
+        */
+       if (skb->protocol == htons(ETH_P_8021Q)) {
+               u16 vlan_tci;
+
+               skb_reset_mac_header(skb);
+               if (eth_type_vlan(eth_hdr(skb)->h_proto)) {
+                       if (unlikely(__skb_vlan_pop(skb, &vlan_tci) != 0)) {
+                               ++net_device_ctx->eth_stats.vlan_error;
+                               goto drop;
+                       }
+
+                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
+                       /* Update the NDIS header pkt lengths */
+                       packet->total_data_buflen -= VLAN_HLEN;
+                       packet->total_bytes -= VLAN_HLEN;
+                       rndis_msg->msg_len = packet->total_data_buflen;
+                       rndis_msg->msg.pkt.data_len = packet->total_data_buflen;
+               }
+       }
+
        if (skb_vlan_tag_present(skb)) {
                struct ndis_pkt_8021q_info *vlan;
 
@@ -1427,6 +1450,7 @@ static const struct {
        { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
        { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
        { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
+       { "vlan_error", offsetof(struct netvsc_ethtool_stats, vlan_error) },
 }, pcpu_stats[] = {
        { "cpu%u_rx_packets",
                offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) },
@@ -1934,6 +1958,23 @@ syncvf:
        return ret;
 }
 
+static int netvsc_get_regs_len(struct net_device *netdev)
+{
+       return VRSS_SEND_TAB_SIZE * sizeof(u32);
+}
+
+static void netvsc_get_regs(struct net_device *netdev,
+                           struct ethtool_regs *regs, void *p)
+{
+       struct net_device_context *ndc = netdev_priv(netdev);
+       u32 *regs_buff = p;
+
+       /* increase the version, if buffer format is changed. */
+       regs->version = 1;
+
+       memcpy(regs_buff, ndc->tx_table, VRSS_SEND_TAB_SIZE * sizeof(u32));
+}
+
 static u32 netvsc_get_msglevel(struct net_device *ndev)
 {
        struct net_device_context *ndev_ctx = netdev_priv(ndev);
@@ -1950,6 +1991,8 @@ static void netvsc_set_msglevel(struct net_device *ndev, u32 val)
 
 static const struct ethtool_ops ethtool_ops = {
        .get_drvinfo    = netvsc_get_drvinfo,
+       .get_regs_len   = netvsc_get_regs_len,
+       .get_regs       = netvsc_get_regs,
        .get_msglevel   = netvsc_get_msglevel,
        .set_msglevel   = netvsc_set_msglevel,
        .get_link       = ethtool_op_get_link,
index 55226b2..0e63d35 100644 (file)
@@ -336,6 +336,7 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
 {
        struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
        struct completion *completion = &evt_ring->completion;
+       struct device *dev = gsi->dev;
        u32 val;
 
        val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
@@ -344,8 +345,8 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
        if (gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion))
                return 0;       /* Success! */
 
-       dev_err(gsi->dev, "GSI command %u to event ring %u timed out "
-               "(state is %u)\n", opcode, evt_ring_id, evt_ring->state);
+       dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
+               opcode, evt_ring_id, evt_ring->state);
 
        return -ETIMEDOUT;
 }
@@ -358,13 +359,15 @@ static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
 
        /* Get initial event ring state */
        evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
-
-       if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
+       if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
+               dev_err(gsi->dev, "bad event ring state %u before alloc\n",
+                       evt_ring->state);
                return -EINVAL;
+       }
 
        ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
        if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
-               dev_err(gsi->dev, "bad event ring state (%u) after alloc\n",
+               dev_err(gsi->dev, "bad event ring state %u after alloc\n",
                        evt_ring->state);
                ret = -EIO;
        }
@@ -381,14 +384,14 @@ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
 
        if (state != GSI_EVT_RING_STATE_ALLOCATED &&
            state != GSI_EVT_RING_STATE_ERROR) {
-               dev_err(gsi->dev, "bad event ring state (%u) before reset\n",
+               dev_err(gsi->dev, "bad event ring state %u before reset\n",
                        evt_ring->state);
                return;
        }
 
        ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
        if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED)
-               dev_err(gsi->dev, "bad event ring state (%u) after reset\n",
+               dev_err(gsi->dev, "bad event ring state %u after reset\n",
                        evt_ring->state);
 }
 
@@ -399,14 +402,14 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
        int ret;
 
        if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
-               dev_err(gsi->dev, "bad event ring state (%u) before dealloc\n",
+               dev_err(gsi->dev, "bad event ring state %u before dealloc\n",
                        evt_ring->state);
                return;
        }
 
        ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
        if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
-               dev_err(gsi->dev, "bad event ring state (%u) after dealloc\n",
+               dev_err(gsi->dev, "bad event ring state %u after dealloc\n",
                        evt_ring->state);
 }
 
@@ -429,6 +432,7 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
        struct completion *completion = &channel->completion;
        u32 channel_id = gsi_channel_id(channel);
        struct gsi *gsi = channel->gsi;
+       struct device *dev = gsi->dev;
        u32 val;
 
        val = u32_encode_bits(channel_id, CH_CHID_FMASK);
@@ -437,8 +441,7 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
        if (gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion))
                return 0;       /* Success! */
 
-       dev_err(gsi->dev,
-               "GSI command %u to channel %u timed out (state is %u)\n",
+       dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
                opcode, channel_id, gsi_channel_state(channel));
 
        return -ETIMEDOUT;
@@ -448,21 +451,23 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
 static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
 {
        struct gsi_channel *channel = &gsi->channel[channel_id];
+       struct device *dev = gsi->dev;
        enum gsi_channel_state state;
        int ret;
 
        /* Get initial channel state */
        state = gsi_channel_state(channel);
-       if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
+       if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) {
+               dev_err(dev, "bad channel state %u before alloc\n", state);
                return -EINVAL;
+       }
 
        ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
 
        /* Channel state will normally have been updated */
        state = gsi_channel_state(channel);
        if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) {
-               dev_err(gsi->dev, "bad channel state (%u) after alloc\n",
-                       state);
+               dev_err(dev, "bad channel state %u after alloc\n", state);
                ret = -EIO;
        }
 
@@ -472,21 +477,23 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
 /* Start an ALLOCATED channel */
 static int gsi_channel_start_command(struct gsi_channel *channel)
 {
+       struct device *dev = channel->gsi->dev;
        enum gsi_channel_state state;
        int ret;
 
        state = gsi_channel_state(channel);
        if (state != GSI_CHANNEL_STATE_ALLOCATED &&
-           state != GSI_CHANNEL_STATE_STOPPED)
+           state != GSI_CHANNEL_STATE_STOPPED) {
+               dev_err(dev, "bad channel state %u before start\n", state);
                return -EINVAL;
+       }
 
        ret = gsi_channel_command(channel, GSI_CH_START);
 
        /* Channel state will normally have been updated */
        state = gsi_channel_state(channel);
        if (!ret && state != GSI_CHANNEL_STATE_STARTED) {
-               dev_err(channel->gsi->dev,
-                       "bad channel state (%u) after start\n", state);
+               dev_err(dev, "bad channel state %u after start\n", state);
                ret = -EIO;
        }
 
@@ -496,13 +503,23 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
 /* Stop a GSI channel in STARTED state */
 static int gsi_channel_stop_command(struct gsi_channel *channel)
 {
+       struct device *dev = channel->gsi->dev;
        enum gsi_channel_state state;
        int ret;
 
        state = gsi_channel_state(channel);
+
+       /* Channel could have entered STOPPED state since last call
+        * if it timed out.  If so, we're done.
+        */
+       if (state == GSI_CHANNEL_STATE_STOPPED)
+               return 0;
+
        if (state != GSI_CHANNEL_STATE_STARTED &&
-           state != GSI_CHANNEL_STATE_STOP_IN_PROC)
+           state != GSI_CHANNEL_STATE_STOP_IN_PROC) {
+               dev_err(dev, "bad channel state %u before stop\n", state);
                return -EINVAL;
+       }
 
        ret = gsi_channel_command(channel, GSI_CH_STOP);
 
@@ -515,8 +532,7 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
        if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
                return -EAGAIN;
 
-       dev_err(channel->gsi->dev,
-               "bad channel state (%u) after stop\n", state);
+       dev_err(dev, "bad channel state %u after stop\n", state);
 
        return -EIO;
 }
@@ -524,6 +540,7 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
 /* Reset a GSI channel in ALLOCATED or ERROR state. */
 static void gsi_channel_reset_command(struct gsi_channel *channel)
 {
+       struct device *dev = channel->gsi->dev;
        enum gsi_channel_state state;
        int ret;
 
@@ -532,8 +549,7 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
        state = gsi_channel_state(channel);
        if (state != GSI_CHANNEL_STATE_STOPPED &&
            state != GSI_CHANNEL_STATE_ERROR) {
-               dev_err(channel->gsi->dev,
-                       "bad channel state (%u) before reset\n", state);
+               dev_err(dev, "bad channel state %u before reset\n", state);
                return;
        }
 
@@ -542,21 +558,20 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
        /* Channel state will normally have been updated */
        state = gsi_channel_state(channel);
        if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED)
-               dev_err(channel->gsi->dev,
-                       "bad channel state (%u) after reset\n", state);
+               dev_err(dev, "bad channel state %u after reset\n", state);
 }
 
 /* Deallocate an ALLOCATED GSI channel */
 static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
 {
        struct gsi_channel *channel = &gsi->channel[channel_id];
+       struct device *dev = gsi->dev;
        enum gsi_channel_state state;
        int ret;
 
        state = gsi_channel_state(channel);
        if (state != GSI_CHANNEL_STATE_ALLOCATED) {
-               dev_err(gsi->dev,
-                       "bad channel state (%u) before dealloc\n", state);
+               dev_err(dev, "bad channel state %u before dealloc\n", state);
                return;
        }
 
@@ -565,8 +580,7 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
        /* Channel state will normally have been updated */
        state = gsi_channel_state(channel);
        if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
-               dev_err(gsi->dev,
-                       "bad channel state (%u) after dealloc\n", state);
+               dev_err(dev, "bad channel state %u after dealloc\n", state);
 }
 
 /* Ring an event ring doorbell, reporting the last entry processed by the AP.
@@ -789,20 +803,11 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id)
 int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
 {
        struct gsi_channel *channel = &gsi->channel[channel_id];
-       enum gsi_channel_state state;
        u32 retries;
        int ret;
 
        gsi_channel_freeze(channel);
 
-       /* Channel could have entered STOPPED state since last call if the
-        * STOP command timed out.  We won't stop a channel if stopping it
-        * was successful previously (so we still want the freeze above).
-        */
-       state = gsi_channel_state(channel);
-       if (state == GSI_CHANNEL_STATE_STOPPED)
-               return 0;
-
        /* RX channels might require a little time to enter STOPPED state */
        retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES;
 
@@ -1148,8 +1153,8 @@ static irqreturn_t gsi_isr(int irq, void *dev_id)
                                break;
                        default:
                                dev_err(gsi->dev,
-                                       "%s: unrecognized type 0x%08x\n",
-                                       __func__, gsi_intr);
+                                       "unrecognized interrupt type 0x%08x\n",
+                                       gsi_intr);
                                break;
                        }
                } while (intr_mask);
@@ -1253,7 +1258,7 @@ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
        if (ring->virt && addr % size) {
                dma_free_coherent(dev, size, ring->virt, ring->addr);
                dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
-                               size);
+                       size);
                return -EINVAL; /* Not a good error value, but distinct */
        } else if (!ring->virt) {
                return -ENOMEM;
@@ -1358,7 +1363,7 @@ static void gsi_channel_update(struct gsi_channel *channel)
  * gsi_channel_poll_one() - Return a single completed transaction on a channel
  * @channel:   Channel to be polled
  *
- * @Return:    Transaction pointer, or null if none are available
+ * Return:     Transaction pointer, or null if none are available
  *
  * This function returns the first entry on a channel's completed transaction
  * list.  If that list is empty, the hardware is consulted to determine
@@ -1388,8 +1393,8 @@ static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
  * gsi_channel_poll() - NAPI poll function for a channel
  * @napi:      NAPI structure for the channel
  * @budget:    Budget supplied by NAPI core
-
- * @Return:     Number of items polled (<= budget)
+ *
+ * Return:     Number of items polled (<= budget)
  *
  * Single transactions completed by hardware are polled until either
  * the budget is exhausted, or there are no more.  Each transaction
@@ -1644,12 +1649,13 @@ static void gsi_channel_teardown(struct gsi *gsi)
 /* Setup function for GSI.  GSI firmware must be loaded and initialized */
 int gsi_setup(struct gsi *gsi, bool legacy)
 {
+       struct device *dev = gsi->dev;
        u32 val;
 
        /* Here is where we first touch the GSI hardware */
        val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
        if (!(val & ENABLED_FMASK)) {
-               dev_err(gsi->dev, "GSI has not been enabled\n");
+               dev_err(dev, "GSI has not been enabled\n");
                return -EIO;
        }
 
@@ -1657,24 +1663,24 @@ int gsi_setup(struct gsi *gsi, bool legacy)
 
        gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
        if (!gsi->channel_count) {
-               dev_err(gsi->dev, "GSI reports zero channels supported\n");
+               dev_err(dev, "GSI reports zero channels supported\n");
                return -EINVAL;
        }
        if (gsi->channel_count > GSI_CHANNEL_COUNT_MAX) {
-               dev_warn(gsi->dev,
-                       "limiting to %u channels (hardware supports %u)\n",
+               dev_warn(dev,
+                        "limiting to %u channels; hardware supports %u\n",
                         GSI_CHANNEL_COUNT_MAX, gsi->channel_count);
                gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
        }
 
        gsi->evt_ring_count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
        if (!gsi->evt_ring_count) {
-               dev_err(gsi->dev, "GSI reports zero event rings supported\n");
+               dev_err(dev, "GSI reports zero event rings supported\n");
                return -EINVAL;
        }
        if (gsi->evt_ring_count > GSI_EVT_RING_COUNT_MAX) {
-               dev_warn(gsi->dev,
-                       "limiting to %u event rings (hardware supports %u)\n",
+               dev_warn(dev,
+                        "limiting to %u event rings; hardware supports %u\n",
                         GSI_EVT_RING_COUNT_MAX, gsi->evt_ring_count);
                gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
        }
@@ -1760,19 +1766,19 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
 
        /* Make sure channel ids are in the range driver supports */
        if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
-               dev_err(dev, "bad channel id %u (must be less than %u)\n",
+               dev_err(dev, "bad channel id %u; must be less than %u\n",
                        channel_id, GSI_CHANNEL_COUNT_MAX);
                return false;
        }
 
        if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
-               dev_err(dev, "bad EE id %u (AP or modem)\n", data->ee_id);
+               dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id);
                return false;
        }
 
        if (!data->channel.tlv_count ||
            data->channel.tlv_count > GSI_TLV_MAX) {
-               dev_err(dev, "channel %u bad tlv_count %u (must be 1..%u)\n",
+               dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
                        channel_id, data->channel.tlv_count, GSI_TLV_MAX);
                return false;
        }
@@ -1790,13 +1796,13 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
        }
 
        if (!is_power_of_2(data->channel.tre_count)) {
-               dev_err(dev, "channel %u bad tre_count %u (not power of 2)\n",
+               dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
                        channel_id, data->channel.tre_count);
                return false;
        }
 
        if (!is_power_of_2(data->channel.event_count)) {
-               dev_err(dev, "channel %u bad event_count %u (not power of 2)\n",
+               dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
                        channel_id, data->channel.event_count);
                return false;
        }
@@ -1950,6 +1956,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
             u32 count, const struct ipa_gsi_endpoint_data *data,
             bool modem_alloc)
 {
+       struct device *dev = &pdev->dev;
        struct resource *res;
        resource_size_t size;
        unsigned int irq;
@@ -1957,7 +1964,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
 
        gsi_validate_build();
 
-       gsi->dev = &pdev->dev;
+       gsi->dev = dev;
 
        /* The GSI layer performs NAPI on all endpoints.  NAPI requires a
         * network device structure, but the GSI layer does not have one,
@@ -1968,43 +1975,41 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
        /* Get the GSI IRQ and request for it to wake the system */
        ret = platform_get_irq_byname(pdev, "gsi");
        if (ret <= 0) {
-               dev_err(gsi->dev,
-                       "DT error %d getting \"gsi\" IRQ property\n", ret);
+               dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret);
                return ret ? : -EINVAL;
        }
        irq = ret;
 
        ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
        if (ret) {
-               dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret);
+               dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret);
                return ret;
        }
        gsi->irq = irq;
 
        ret = enable_irq_wake(gsi->irq);
        if (ret)
-               dev_warn(gsi->dev, "error %d enabling gsi wake irq\n", ret);
+               dev_warn(dev, "error %d enabling gsi wake irq\n", ret);
        gsi->irq_wake_enabled = !ret;
 
        /* Get GSI memory range and map it */
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
        if (!res) {
-               dev_err(gsi->dev,
-                       "DT error getting \"gsi\" memory property\n");
+               dev_err(dev, "DT error getting \"gsi\" memory property\n");
                ret = -ENODEV;
                goto err_disable_irq_wake;
        }
 
        size = resource_size(res);
        if (res->start > U32_MAX || size > U32_MAX - res->start) {
-               dev_err(gsi->dev, "DT memory resource \"gsi\" out of range\n");
+               dev_err(dev, "DT memory resource \"gsi\" out of range\n");
                ret = -EINVAL;
                goto err_disable_irq_wake;
        }
 
        gsi->virt = ioremap(res->start, size);
        if (!gsi->virt) {
-               dev_err(gsi->dev, "unable to remap \"gsi\" memory\n");
+               dev_err(dev, "unable to remap \"gsi\" memory\n");
                ret = -ENOMEM;
                goto err_disable_irq_wake;
        }
index 90a0219..0613127 100644 (file)
@@ -167,7 +167,7 @@ struct gsi {
  * @gsi:       Address of GSI structure embedded in an IPA structure
  * @legacy:    Set up for legacy hardware
  *
- * @Return:    0 if successful, or a negative error code
+ * Return:     0 if successful, or a negative error code
  *
  * Performs initialization that must wait until the GSI hardware is
  * ready (including firmware loaded).
@@ -185,7 +185,7 @@ void gsi_teardown(struct gsi *gsi);
  * @gsi:       GSI pointer
  * @channel_id:        Channel whose limit is to be returned
  *
- * @Return:     The maximum number of TREs oustanding on the channel
+ * Return:      The maximum number of TREs oustanding on the channel
  */
 u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id);
 
@@ -194,7 +194,7 @@ u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id);
  * @gsi:       GSI pointer
  * @channel_id:        Channel whose limit is to be returned
  *
- * @Return:     The maximum TRE count per transaction on the channel
+ * Return:      The maximum TRE count per transaction on the channel
  */
 u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id);
 
@@ -203,7 +203,7 @@ u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id);
  * @gsi:       GSI pointer
  * @channel_id:        Channel to start
  *
- * @Return:    0 if successful, or a negative error code
+ * Return:     0 if successful, or a negative error code
  */
 int gsi_channel_start(struct gsi *gsi, u32 channel_id);
 
@@ -212,7 +212,7 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id);
  * @gsi:       GSI pointer returned by gsi_setup()
  * @channel_id:        Channel to stop
  *
- * @Return:    0 if successful, or a negative error code
+ * Return:     0 if successful, or a negative error code
  */
 int gsi_channel_stop(struct gsi *gsi, u32 channel_id);
 
@@ -238,7 +238,7 @@ int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start);
  * @gsi:       Address of GSI structure embedded in an IPA structure
  * @pdev:      IPA platform device
  *
- * @Return:    0 if successful, or a negative error code
+ * Return:     0 if successful, or a negative error code
  *
  * Early stage initialization of the GSI subsystem, performing tasks
  * that can be done before the GSI hardware is ready to use.
index b57d019..1785c9d 100644 (file)
@@ -44,7 +44,7 @@ void gsi_trans_complete(struct gsi_trans *trans);
  * @channel:   Channel associated with the transaction
  * @index:     Index of the TRE having a transaction
  *
- * @Return:    The GSI transaction pointer associated with the TRE index
+ * Return:     The GSI transaction pointer associated with the TRE index
  */
 struct gsi_trans *gsi_channel_trans_mapped(struct gsi_channel *channel,
                                           u32 index);
@@ -53,7 +53,7 @@ struct gsi_trans *gsi_channel_trans_mapped(struct gsi_channel *channel,
  * gsi_channel_trans_complete() - Return a channel's next completed transaction
  * @channel:   Channel whose next transaction is to be returned
  *
- * @Return:    The next completed transaction, or NULL if nothing new
+ * Return:     The next completed transaction, or NULL if nothing new
  */
 struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel);
 
@@ -76,7 +76,7 @@ void gsi_channel_trans_cancel_pending(struct gsi_channel *channel);
  * @gsi:       GSI pointer
  * @channel_id:        Channel number
  *
- * @Return:    0 if successful, or -ENOMEM on allocation failure
+ * Return:     0 if successful, or -ENOMEM on allocation failure
  *
  * Creates and sets up information for managing transactions on a channel
  */
index 1477fc1..4d4606b 100644 (file)
@@ -75,7 +75,7 @@ struct gsi_trans {
  * @count:     Minimum number of elements in the pool
  * @max_alloc: Maximum number of elements allocated at a time from pool
  *
- * @Return:    0 if successful, or a negative error code
+ * Return:     0 if successful, or a negative error code
  */
 int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
                        u32 max_alloc);
@@ -85,7 +85,7 @@ int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
  * @pool:      Pool pointer
  * @count:     Number of elements to allocate from the pool
  *
- * @Return:    Virtual address of element(s) allocated from the pool
+ * Return:     Virtual address of element(s) allocated from the pool
  */
 void *gsi_trans_pool_alloc(struct gsi_trans_pool *pool, u32 count);
 
@@ -103,7 +103,7 @@ void gsi_trans_pool_exit(struct gsi_trans_pool *pool);
  * @count:     Minimum number of elements in the pool
  * @max_alloc: Maximum number of elements allocated at a time from pool
  *
- * @Return:    0 if successful, or a negative error code
+ * Return:     0 if successful, or a negative error code
  *
  * Structures in this pool reside in DMA-coherent memory.
  */
@@ -115,7 +115,7 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
  * @pool:      DMA pool pointer
  * @addr:      DMA address "handle" associated with the allocation
  *
- * @Return:    Virtual address of element allocated from the pool
+ * Return:     Virtual address of element allocated from the pool
  *
  * Only one element at a time may be allocated from a DMA pool.
  */
@@ -134,7 +134,7 @@ void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool);
  * @tre_count: Number of elements in the transaction
  * @direction: DMA direction for entire SGL (or DMA_NONE)
  *
- * @Return:    A GSI transaction structure, or a null pointer if all
+ * Return:     A GSI transaction structure, or a null pointer if all
  *             available transactions are in use
  */
 struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
@@ -175,7 +175,7 @@ int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
  * @trans:     Transaction
  * @skb:       Socket buffer for transfer (outbound)
  *
- * @Return:    0, or -EMSGSIZE if socket data won't fit in transaction.
+ * Return:     0, or -EMSGSIZE if socket data won't fit in transaction.
  */
 int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb);
 
index c5204fd..398f2e4 100644 (file)
@@ -44,7 +44,7 @@
 /**
  * struct ipa_clock - IPA clocking information
  * @count:             Clocking reference count
- * @mutex;             Protects clock enable/disable
+ * @mutex:             Protects clock enable/disable
  * @core:              IPA core clock
  * @memory_path:       Memory interconnect
  * @imem_path:         Internal memory interconnect
@@ -256,6 +256,12 @@ void ipa_clock_put(struct ipa *ipa)
        mutex_unlock(&clock->mutex);
 }
 
+/* Return the current IPA core clock rate */
+u32 ipa_clock_rate(struct ipa *ipa)
+{
+       return ipa->clock ? (u32)clk_get_rate(ipa->clock->core) : 0;
+}
+
 /* Initialize IPA clocking */
 struct ipa_clock *ipa_clock_init(struct device *dev)
 {
index bc52b35..1d70f1d 100644 (file)
@@ -11,10 +11,18 @@ struct device;
 struct ipa;
 
 /**
+ * ipa_clock_rate() - Return the current IPA core clock rate
+ * @ipa:       IPA structure
+ *
+ * Return: The current clock rate (in Hz), or 0.
+ */
+u32 ipa_clock_rate(struct ipa *ipa);
+
+/**
  * ipa_clock_init() - Initialize IPA clocking
  * @dev:       IPA device
  *
- * @Return:    A pointer to an ipa_clock structure, or a pointer-coded error
+ * Return:     A pointer to an ipa_clock structure, or a pointer-coded error
  */
 struct ipa_clock *ipa_clock_init(struct device *dev);
 
index c9ab865..d92dd3f 100644 (file)
@@ -586,6 +586,21 @@ u32 ipa_cmd_tag_process_count(void)
        return 4;
 }
 
+void ipa_cmd_tag_process(struct ipa *ipa)
+{
+       u32 count = ipa_cmd_tag_process_count();
+       struct gsi_trans *trans;
+
+       trans = ipa_cmd_trans_alloc(ipa, count);
+       if (trans) {
+               ipa_cmd_tag_process_add(trans);
+               gsi_trans_commit_wait(trans);
+       } else {
+               dev_err(&ipa->pdev->dev,
+                       "error allocating %u entry tag transaction\n", count);
+       }
+}
+
 static struct ipa_cmd_info *
 ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count)
 {
index e440aa6..f7e6f87 100644 (file)
@@ -61,7 +61,7 @@ struct ipa_cmd_info {
  * @ipv6:      - Whether the table is for IPv6 or IPv4
  * @hashed:    - Whether the table is hashed or non-hashed
  *
- * @Return:    true if region is valid, false otherwise
+ * Return:     true if region is valid, false otherwise
  */
 bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
                            bool route, bool ipv6, bool hashed);
@@ -70,7 +70,7 @@ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
  * ipa_cmd_data_valid() - Validate command-realted configuration is valid
  * @ipa:       - IPA pointer
  *
- * @Return:    true if assumptions required for command are valid
+ * Return:     true if assumptions required for command are valid
  */
 bool ipa_cmd_data_valid(struct ipa *ipa);
 
@@ -95,7 +95,7 @@ static inline bool ipa_cmd_data_valid(struct ipa *ipa)
  * @channel:   AP->IPA command TX GSI channel pointer
  * @tre_count: Number of pool elements to allocate
  *
- * @Return:    0 if successful, or a negative error code
+ * Return:     0 if successful, or a negative error code
  */
 int ipa_cmd_pool_init(struct gsi_channel *gsi_channel, u32 tre_count);
 
@@ -166,17 +166,25 @@ void ipa_cmd_tag_process_add(struct gsi_trans *trans);
 /**
  * ipa_cmd_tag_process_add_count() - Number of commands in a tag process
  *
- * @Return:    The number of elements to allocate in a transaction
+ * Return:     The number of elements to allocate in a transaction
  *             to hold tag process commands
  */
 u32 ipa_cmd_tag_process_count(void);
 
 /**
+ * ipa_cmd_tag_process() - Perform a tag process
+ *
+ * @Return:    The number of elements to allocate in a transaction
+ *             to hold tag process commands
+ */
+void ipa_cmd_tag_process(struct ipa *ipa);
+
+/**
  * ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint
  * @ipa:       IPA pointer
  * @tre_count: Number of elements in the transaction
  *
- * @Return:    A GSI transaction structure, or a null pointer if all
+ * Return:     A GSI transaction structure, or a null pointer if all
  *             available transactions are in use
  */
 struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count);
index 52d4b84..de2768d 100644 (file)
@@ -44,7 +44,6 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
                .endpoint = {
                        .seq_type       = IPA_SEQ_INVALID,
                        .config = {
-                               .checksum       = true,
                                .aggregation    = true,
                                .status_enable  = true,
                                .rx = {
index 9f50d0d..b7efd7c 100644 (file)
@@ -21,6 +21,7 @@
 #include "ipa_modem.h"
 #include "ipa_table.h"
 #include "ipa_gsi.h"
+#include "ipa_clock.h"
 
 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
 
@@ -36,7 +37,7 @@
 #define IPA_ENDPOINT_QMAP_METADATA_MASK                0x000000ff /* host byte order */
 
 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX      3
-#define IPA_AGGR_TIME_LIMIT_DEFAULT            1000    /* microseconds */
+#define IPA_AGGR_TIME_LIMIT_DEFAULT            500     /* microseconds */
 
 /** enum ipa_status_opcode - status element opcode hardware values */
 enum ipa_status_opcode {
@@ -318,41 +319,102 @@ ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
 {
        /* assert(endpoint->toward_ipa); */
 
-       (void)ipa_endpoint_init_ctrl(endpoint, enable);
+       /* Delay mode doesn't work properly for IPA v4.2 */
+       if (endpoint->ipa->version != IPA_VERSION_4_2)
+               (void)ipa_endpoint_init_ctrl(endpoint, enable);
 }
 
-/* Returns previous suspend state (true means it was enabled) */
+static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
+{
+       u32 mask = BIT(endpoint->endpoint_id);
+       struct ipa *ipa = endpoint->ipa;
+       u32 offset;
+       u32 val;
+
+       /* assert(mask & ipa->available); */
+       offset = ipa_reg_state_aggr_active_offset(ipa->version);
+       val = ioread32(ipa->reg_virt + offset);
+
+       return !!(val & mask);
+}
+
+static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
+{
+       u32 mask = BIT(endpoint->endpoint_id);
+       struct ipa *ipa = endpoint->ipa;
+
+       /* assert(mask & ipa->available); */
+       iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
+}
+
+/**
+ * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
+ * @endpoint:  Endpoint on which to emulate a suspend
+ *
+ *  Emulate suspend IPA interrupt to unsuspend an endpoint suspended
+ *  with an open aggregation frame.  This is to work around a hardware
+ *  issue in IPA version 3.5.1 where the suspend interrupt will not be
+ *  generated when it should be.
+ */
+static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
+{
+       struct ipa *ipa = endpoint->ipa;
+
+       if (!endpoint->data->aggregation)
+               return;
+
+       /* Nothing to do if the endpoint doesn't have aggregation open */
+       if (!ipa_endpoint_aggr_active(endpoint))
+               return;
+
+       /* Force close aggregation */
+       ipa_endpoint_force_close(endpoint);
+
+       ipa_interrupt_simulate_suspend(ipa->interrupt);
+}
+
+/* Returns previous suspend state (true means suspend was enabled) */
 static bool
 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
 {
+       bool suspended;
+
+       if (endpoint->ipa->version != IPA_VERSION_3_5_1)
+               return enable;  /* For IPA v4.0+, no change made */
+
        /* assert(!endpoint->toward_ipa); */
 
-       return ipa_endpoint_init_ctrl(endpoint, enable);
+       suspended = ipa_endpoint_init_ctrl(endpoint, enable);
+
+       /* A client suspended with an open aggregation frame will not
+        * generate a SUSPEND IPA interrupt.  If enabling suspend, have
+        * ipa_endpoint_suspend_aggr() handle this.
+        */
+       if (enable && !suspended)
+               ipa_endpoint_suspend_aggr(endpoint);
+
+       return suspended;
 }
 
 /* Enable or disable delay or suspend mode on all modem endpoints */
 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
 {
-       bool support_suspend;
        u32 endpoint_id;
 
        /* DELAY mode doesn't work correctly on IPA v4.2 */
        if (ipa->version == IPA_VERSION_4_2)
                return;
 
-       /* Only IPA v3.5.1 supports SUSPEND mode on RX endpoints */
-       support_suspend = ipa->version == IPA_VERSION_3_5_1;
-
        for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
                struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
 
                if (endpoint->ee_id != GSI_EE_MODEM)
                        continue;
 
-               /* Set TX delay mode, or for IPA v3.5.1 RX suspend mode */
+               /* Set TX delay mode or RX suspend mode */
                if (endpoint->toward_ipa)
                        ipa_endpoint_program_delay(endpoint, enable);
-               else if (support_suspend)
+               else
                        (void)ipa_endpoint_program_suspend(endpoint, enable);
        }
 }
@@ -437,6 +499,9 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
 }
 
 /**
+ * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
+ * @endpoint:  Endpoint pointer
+ *
  * We program QMAP endpoints so each packet received is preceded by a QMAP
  * header structure.  The QMAP header contains a 1-byte mux_id and 2-byte
  * packet size field, and we have the IPA hardware populate both for each
@@ -527,10 +592,13 @@ static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
        u32 val = 0;
        u32 offset;
 
+       if (endpoint->toward_ipa)
+               return;         /* Register not valid for TX endpoints */
+
        offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
 
        /* Note that HDR_ENDIANNESS indicates big endian header fields */
-       if (!endpoint->toward_ipa && endpoint->data->qmap)
+       if (endpoint->data->qmap)
                val = cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
 
        iowrite32(val, endpoint->ipa->reg_virt + offset);
@@ -541,7 +609,10 @@ static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
        u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
        u32 val;
 
-       if (endpoint->toward_ipa && endpoint->data->dma_mode) {
+       if (!endpoint->toward_ipa)
+               return;         /* Register not valid for RX endpoints */
+
+       if (endpoint->data->dma_mode) {
                enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
                u32 dma_endpoint_id;
 
@@ -552,7 +623,7 @@ static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
        } else {
                val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
        }
-       /* Other bitfields unspecified (and 0) */
+       /* All other bits unspecified (and 0) */
 
        iowrite32(val, endpoint->ipa->reg_virt + offset);
 }
@@ -576,17 +647,20 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
 
        if (endpoint->data->aggregation) {
                if (!endpoint->toward_ipa) {
-                       u32 aggr_size = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
                        u32 limit;
 
                        val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
                        val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
-                       val |= u32_encode_bits(aggr_size,
-                                              AGGR_BYTE_LIMIT_FMASK);
+
+                       limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
+                       val |= u32_encode_bits(limit, AGGR_BYTE_LIMIT_FMASK);
+
                        limit = IPA_AGGR_TIME_LIMIT_DEFAULT;
-                       val |= u32_encode_bits(limit / IPA_AGGR_GRANULARITY,
-                                              AGGR_TIME_LIMIT_FMASK);
-                       val |= u32_encode_bits(0, AGGR_PKT_LIMIT_FMASK);
+                       limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
+                       val |= u32_encode_bits(limit, AGGR_TIME_LIMIT_FMASK);
+
+                       /* AGGR_PKT_LIMIT is 0 (unlimited) */
+
                        if (endpoint->data->rx.aggr_close_eof)
                                val |= AGGR_SW_EOF_ACTIVE_FMASK;
                        /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
@@ -605,63 +679,70 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
        iowrite32(val, endpoint->ipa->reg_virt + offset);
 }
 
-/* A return value of 0 indicates an error */
+/* The head-of-line blocking timer is defined as a tick count, where each
+ * tick represents 128 cycles of the IPA core clock.  Return the value
+ * that should be written to that register that represents the timeout
+ * period provided.
+ */
 static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds)
 {
+       u32 width;
        u32 scale;
-       u32 base;
+       u64 ticks;
+       u64 rate;
+       u32 high;
        u32 val;
 
        if (!microseconds)
-               return 0;       /* invalid delay */
-
-       /* Timer is represented in units of clock ticks. */
-       if (ipa->version < IPA_VERSION_4_2)
-               return microseconds;    /* XXX Needs to be computed */
-
-       /* IPA v4.2 represents the tick count as base * scale */
-       scale = 1;                      /* XXX Needs to be computed */
-       if (scale > field_max(SCALE_FMASK))
-               return 0;               /* scale too big */
-
-       base = DIV_ROUND_CLOSEST(microseconds, scale);
-       if (base > field_max(BASE_VALUE_FMASK))
-               return 0;               /* microseconds too big */
+               return 0;       /* Nothing to compute if timer period is 0 */
+
+       /* Use 64 bit arithmetic to avoid overflow... */
+       rate = ipa_clock_rate(ipa);
+       ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
+       /* ...but we still need to fit into a 32-bit register */
+       WARN_ON(ticks > U32_MAX);
+
+       /* IPA v3.5.1 just records the tick count */
+       if (ipa->version == IPA_VERSION_3_5_1)
+               return (u32)ticks;
+
+       /* For IPA v4.2, the tick count is represented by base and
+        * scale fields within the 32-bit timer register, where:
+        *     ticks = base << scale;
+        * The best precision is achieved when the base value is as
+        * large as possible.  Find the highest set bit in the tick
+        * count, and extract the number of bits in the base field
+        * such that that high bit is included.
+        */
+       high = fls(ticks);              /* 1..32 */
+       width = HWEIGHT32(BASE_VALUE_FMASK);
+       scale = high > width ? high - width : 0;
+       if (scale) {
+               /* If we're scaling, round up to get a closer result */
+               ticks += 1 << (scale - 1);
+               /* High bit was set, so rounding might have affected it */
+               if (fls(ticks) != high)
+                       scale++;
+       }
 
        val = u32_encode_bits(scale, SCALE_FMASK);
-       val |= u32_encode_bits(base, BASE_VALUE_FMASK);
+       val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK);
 
        return val;
 }
 
-static int ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
-                                            u32 microseconds)
+/* If microseconds is 0, timeout is immediate */
+static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
+                                             u32 microseconds)
 {
        u32 endpoint_id = endpoint->endpoint_id;
        struct ipa *ipa = endpoint->ipa;
        u32 offset;
        u32 val;
 
-       /* XXX We'll fix this when the register definition is clear */
-       if (microseconds) {
-               struct device *dev = &ipa->pdev->dev;
-
-               dev_err(dev, "endpoint %u non-zero HOLB period (ignoring)\n",
-                       endpoint_id);
-               microseconds = 0;
-       }
-
-       if (microseconds) {
-               val = ipa_reg_init_hol_block_timer_val(ipa, microseconds);
-               if (!val)
-                       return -EINVAL;
-       } else {
-               val = 0;        /* timeout is immediate */
-       }
        offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
+       val = ipa_reg_init_hol_block_timer_val(ipa, microseconds);
        iowrite32(val, ipa->reg_virt + offset);
-
-       return 0;
 }
 
 static void
@@ -671,7 +752,7 @@ ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
        u32 offset;
        u32 val;
 
-       val = u32_encode_bits(enable ? 1 : 0, HOL_BLOCK_EN_FMASK);
+       val = enable ? HOL_BLOCK_EN_FMASK : 0;
        offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
        iowrite32(val, endpoint->ipa->reg_virt + offset);
 }
@@ -683,10 +764,10 @@ void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
        for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
                struct ipa_endpoint *endpoint = &ipa->endpoint[i];
 
-               if (endpoint->ee_id != GSI_EE_MODEM)
+               if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
                        continue;
 
-               (void)ipa_endpoint_init_hol_block_timer(endpoint, 0);
+               ipa_endpoint_init_hol_block_timer(endpoint, 0);
                ipa_endpoint_init_hol_block_enable(endpoint, true);
        }
 }
@@ -696,6 +777,9 @@ static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
        u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
        u32 val = 0;
 
+       if (!endpoint->toward_ipa)
+               return;         /* Register not valid for RX endpoints */
+
        /* DEAGGR_HDR_LEN is 0 */
        /* PACKET_OFFSET_VALID is 0 */
        /* PACKET_OFFSET_LOCATION is ignored (not valid) */
@@ -710,6 +794,9 @@ static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
        u32 seq_type = endpoint->seq_type;
        u32 val = 0;
 
+       if (!endpoint->toward_ipa)
+               return;         /* Register not valid for RX endpoints */
+
        /* Sequencer type is made up of four nibbles */
        val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK);
        val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK);
@@ -837,6 +924,8 @@ err_free_pages:
 
 /**
  * ipa_endpoint_replenish() - Replenish the Rx packets cache.
+ * @endpoint:  Endpoint to be replenished
+ * @count:     Number of buffers to send to hardware
  *
  * Allocate RX packet wrapper structures with maximal socket buffers
  * for an endpoint.  These are supplied to the hardware, which fills
@@ -1139,29 +1228,6 @@ void ipa_endpoint_default_route_clear(struct ipa *ipa)
        ipa_endpoint_default_route_set(ipa, 0);
 }
 
-static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
-{
-       u32 mask = BIT(endpoint->endpoint_id);
-       struct ipa *ipa = endpoint->ipa;
-       u32 offset;
-       u32 val;
-
-       /* assert(mask & ipa->available); */
-       offset = ipa_reg_state_aggr_active_offset(ipa->version);
-       val = ioread32(ipa->reg_virt + offset);
-
-       return !!(val & mask);
-}
-
-static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
-{
-       u32 mask = BIT(endpoint->endpoint_id);
-       struct ipa *ipa = endpoint->ipa;
-
-       /* assert(mask & ipa->available); */
-       iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
-}
-
 /**
  * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
  * @endpoint:  Endpoint to be reset
@@ -1170,7 +1236,7 @@ static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
  * on its underlying GSI channel, a special sequence of actions must be
  * taken to ensure the IPA pipeline is properly cleared.
  *
- * @Return:    0 if successful, or a negative error code
+ * Return:     0 if successful, or a negative error code
  */
 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
 {
@@ -1206,8 +1272,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
        gsi_channel_reset(gsi, endpoint->channel_id, false);
 
        /* Make sure the channel isn't suspended */
-       if (endpoint->ipa->version == IPA_VERSION_3_5_1)
-               suspended = ipa_endpoint_program_suspend(endpoint, false);
+       suspended = ipa_endpoint_program_suspend(endpoint, false);
 
        /* Start channel and do a 1 byte read */
        ret = gsi_channel_start(gsi, endpoint->channel_id);
@@ -1290,23 +1355,18 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
 
 static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
 {
-       if (endpoint->toward_ipa) {
-               if (endpoint->ipa->version != IPA_VERSION_4_2)
-                       ipa_endpoint_program_delay(endpoint, false);
-               ipa_endpoint_init_hdr_ext(endpoint);
-               ipa_endpoint_init_aggr(endpoint);
-               ipa_endpoint_init_deaggr(endpoint);
-               ipa_endpoint_init_seq(endpoint);
-       } else {
-               if (endpoint->ipa->version == IPA_VERSION_3_5_1)
-                       (void)ipa_endpoint_program_suspend(endpoint, false);
-               ipa_endpoint_init_hdr_ext(endpoint);
-               ipa_endpoint_init_aggr(endpoint);
-       }
+       if (endpoint->toward_ipa)
+               ipa_endpoint_program_delay(endpoint, false);
+       else
+               (void)ipa_endpoint_program_suspend(endpoint, false);
        ipa_endpoint_init_cfg(endpoint);
        ipa_endpoint_init_hdr(endpoint);
+       ipa_endpoint_init_hdr_ext(endpoint);
        ipa_endpoint_init_hdr_metadata_mask(endpoint);
        ipa_endpoint_init_mode(endpoint);
+       ipa_endpoint_init_aggr(endpoint);
+       ipa_endpoint_init_deaggr(endpoint);
+       ipa_endpoint_init_seq(endpoint);
        ipa_endpoint_status(endpoint);
 }
 
@@ -1362,34 +1422,6 @@ void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
                        endpoint->endpoint_id);
 }
 
-/**
- * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
- * @endpoint_id:       Endpoint on which to emulate a suspend
- *
- *  Emulate suspend IPA interrupt to unsuspend an endpoint suspended
- *  with an open aggregation frame.  This is to work around a hardware
- *  issue in IPA version 3.5.1 where the suspend interrupt will not be
- *  generated when it should be.
- */
-static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
-{
-       struct ipa *ipa = endpoint->ipa;
-
-       /* assert(ipa->version == IPA_VERSION_3_5_1); */
-
-       if (!endpoint->data->aggregation)
-               return;
-
-       /* Nothing to do if the endpoint doesn't have aggregation open */
-       if (!ipa_endpoint_aggr_active(endpoint))
-               return;
-
-       /* Force close aggregation */
-       ipa_endpoint_force_close(endpoint);
-
-       ipa_interrupt_simulate_suspend(ipa->interrupt);
-}
-
 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
 {
        struct device *dev = &endpoint->ipa->pdev->dev;
@@ -1403,19 +1435,11 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
        if (!endpoint->toward_ipa)
                ipa_endpoint_replenish_disable(endpoint);
 
-       /* IPA v3.5.1 doesn't use channel stop for suspend */
-       stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
-       if (!endpoint->toward_ipa && !stop_channel) {
-               /* Due to a hardware bug, a client suspended with an open
-                * aggregation frame will not generate a SUSPEND IPA
-                * interrupt.  We work around this by force-closing the
-                * aggregation frame, then simulating the arrival of such
-                * an interrupt.
-                */
+       if (!endpoint->toward_ipa)
                (void)ipa_endpoint_program_suspend(endpoint, true);
-               ipa_endpoint_suspend_aggr(endpoint);
-       }
 
+       /* IPA v3.5.1 doesn't use channel stop for suspend */
+       stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
        ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel);
        if (ret)
                dev_err(dev, "error %d suspending channel %u\n", ret,
@@ -1432,11 +1456,11 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
        if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
                return;
 
-       /* IPA v3.5.1 doesn't use channel start for resume */
-       start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
-       if (!endpoint->toward_ipa && !start_channel)
+       if (!endpoint->toward_ipa)
                (void)ipa_endpoint_program_suspend(endpoint, false);
 
+       /* IPA v3.5.1 doesn't use channel start for resume */
+       start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
        ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel);
        if (ret)
                dev_err(dev, "error %d resuming channel %u\n", ret,
@@ -1450,6 +1474,8 @@ void ipa_endpoint_suspend(struct ipa *ipa)
        if (ipa->modem_netdev)
                ipa_modem_suspend(ipa->modem_netdev);
 
+       ipa_cmd_tag_process(ipa);
+
        ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
        ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
 }
index dc4a5c2..d323adb 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <linux/types.h>
 
+#include "ipa_gsi.h"
 #include "gsi_trans.h"
 #include "ipa.h"
 #include "ipa_endpoint.h"
index 3cf1860..c02cb6f 100644 (file)
@@ -8,7 +8,9 @@
 
 #include <linux/types.h>
 
+struct gsi;
 struct gsi_trans;
+struct ipa_gsi_endpoint_data;
 
 /**
  * ipa_gsi_trans_complete() - GSI transaction completion callback
@@ -41,9 +43,9 @@ void ipa_gsi_trans_release(struct gsi_trans *trans);
  */
 void ipa_gsi_channel_tx_queued(struct gsi *gsi, u32 channel_id, u32 count,
                               u32 byte_count);
+
 /**
- * ipa_gsi_trans_complete() - GSI transaction completion callback
-ipa_gsi_channel_tx_completed()
+ * ipa_gsi_channel_tx_completed() - GSI transaction completion callback
  * @gsi:       GSI pointer
  * @channel_id:        Channel number
  * @count:     Number of transactions completed since last report
@@ -55,6 +57,15 @@ ipa_gsi_channel_tx_completed()
 void ipa_gsi_channel_tx_completed(struct gsi *gsi, u32 channel_id, u32 count,
                                  u32 byte_count);
 
+/* ipa_gsi_endpoint_data_empty() - Empty endpoint config data test
+ * @data:      endpoint configuration data
+ *
+ * Determines whether an endpoint configuration data entry is empty,
+ * meaning it contains no valid configuration information and should
+ * be ignored.
+ *
+ * Return:     true if empty; false otherwise
+ */
 bool ipa_gsi_endpoint_data_empty(const struct ipa_gsi_endpoint_data *data);
 
 #endif /* _IPA_GSI_TRANS_H_ */
index d4f4c1c..727e9c5 100644 (file)
@@ -104,7 +104,7 @@ void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt);
  * ipa_interrupt_setup() - Set up the IPA interrupt framework
  * @ipa:       IPA pointer
  *
- * @Return:    Pointer to IPA SMP2P info, or a pointer-coded error
+ * Return:     Pointer to IPA SMP2P info, or a pointer-coded error
  */
 struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa);
 
index 76d5108..1fdfec4 100644 (file)
@@ -277,6 +277,7 @@ static void ipa_idle_indication_cfg(struct ipa *ipa,
 
 /**
  * ipa_hardware_dcd_config() - Enable dynamic clock division on IPA
+ * @ipa:       IPA pointer
  *
  * Configures when the IPA signals it is idle to the global clock
  * controller, which can respond by scalling down the clock to
@@ -495,6 +496,7 @@ static void ipa_resource_deconfig(struct ipa *ipa)
 /**
  * ipa_config() - Configure IPA hardware
  * @ipa:       IPA pointer
+ * @data:      IPA configuration data
  *
  * Perform initialization requiring IPA clock to be enabled.
  */
@@ -674,6 +676,11 @@ static void ipa_validate_build(void)
 
        /* This is used as a divisor */
        BUILD_BUG_ON(!IPA_AGGR_GRANULARITY);
+
+       /* Aggregation granularity value can't be 0, and must fit */
+       BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY));
+       BUILD_BUG_ON(ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY) >
+                       field_max(AGGR_GRANULARITY));
 #endif /* IPA_VALIDATE */
 }
 
@@ -681,7 +688,7 @@ static void ipa_validate_build(void)
  * ipa_probe() - IPA platform driver probe function
  * @pdev:      Platform device pointer
  *
- * @Return:    0 if successful, or a negative error code (possibly
+ * Return:     0 if successful, or a negative error code (possibly
  *             EPROBE_DEFER)
  *
  * This is the main entry point for the IPA driver.  Initialization proceeds
@@ -897,7 +904,7 @@ static int ipa_remove(struct platform_device *pdev)
  * ipa_suspend() - Power management system suspend callback
  * @dev:       IPA device structure
  *
- * @Return:    Zero
+ * Return:     Always returns zero
  *
  * Called by the PM framework when a system suspend operation is invoked.
  */
@@ -915,7 +922,7 @@ static int ipa_suspend(struct device *dev)
  * ipa_resume() - Power management system resume callback
  * @dev:       IPA device structure
  *
- * @Return:    Always returns 0
+ * Return:     Always returns 0
  *
  * Called by the PM framework when a system resume operation is invoked.
  */
index 3ef8141..2d45c44 100644 (file)
@@ -41,6 +41,7 @@ ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem)
 
 /**
  * ipa_mem_setup() - Set up IPA AP and modem shared memory areas
+ * @ipa:       IPA pointer
  *
  * Set up the shared memory regions in IPA local memory.  This involves
  * zero-filling memory regions, and in the case of header memory, telling
@@ -52,7 +53,7 @@ ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem)
  * The AP informs the modem where its portions of memory are located
  * in a QMI exchange that occurs at modem startup.
  *
- * @Return:    0 if successful, or a negative error code
+ * Return:     0 if successful, or a negative error code
  */
 int ipa_mem_setup(struct ipa *ipa)
 {
@@ -137,8 +138,9 @@ static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
 
 /**
  * ipa_mem_config() - Configure IPA shared memory
+ * @ipa:       IPA pointer
  *
- * @Return:    0 if successful, or a negative error code
+ * Return:     0 if successful, or a negative error code
  */
 int ipa_mem_config(struct ipa *ipa)
 {
@@ -238,6 +240,7 @@ void ipa_mem_deconfig(struct ipa *ipa)
 
 /**
  * ipa_mem_zero_modem() - Zero IPA-local memory regions owned by the modem
+ * @ipa:       IPA pointer
  *
  * Zero regions of IPA-local memory used by the modem.  These are configured
  * (and initially zeroed) by ipa_mem_setup(), but if the modem crashes and
index 03a1d0e..7341337 100644 (file)
@@ -119,7 +119,7 @@ struct qmi_elem_info ipa_driver_init_complete_rsp_ei[] = {
                        sizeof_field(struct ipa_driver_init_complete_rsp,
                                     rsp),
                .tlv_type       = 0x02,
-               .elem_size      = offsetof(struct ipa_driver_init_complete_rsp,
+               .offset         = offsetof(struct ipa_driver_init_complete_rsp,
                                           rsp),
                .ei_array       = qmi_response_type_v01_ei,
        },
@@ -137,7 +137,7 @@ struct qmi_elem_info ipa_init_complete_ind_ei[] = {
                        sizeof_field(struct ipa_init_complete_ind,
                                     status),
                .tlv_type       = 0x02,
-               .elem_size      = offsetof(struct ipa_init_complete_ind,
+               .offset         = offsetof(struct ipa_init_complete_ind,
                                           status),
                .ei_array       = qmi_response_type_v01_ei,
        },
@@ -218,7 +218,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
                        sizeof_field(struct ipa_init_modem_driver_req,
                                     platform_type_valid),
                .tlv_type       = 0x10,
-               .elem_size      = offsetof(struct ipa_init_modem_driver_req,
+               .offset         = offsetof(struct ipa_init_modem_driver_req,
                                           platform_type_valid),
        },
        {
index 0a688d8..eb4e39f 100644 (file)
@@ -32,10 +32,12 @@ struct ipa;
  * parameter is supplied to the offset macro.  The "ee" value is a member of
  * the gsi_ee enumerated type.
  *
- * The offset of a register dependent on endpoint id is computed by a macro
- * that is supplied a parameter "ep".  The "ep" value is assumed to be less
- * than the maximum endpoint value for the current hardware, and that will
- * not exceed IPA_ENDPOINT_MAX.
+ * The offset of a register dependent on endpoint ID is computed by a macro
+ * that is supplied a parameter "ep", "txep", or "rxep".  A register with an
+ * "ep" parameter is valid for any endpoint; a register with a "txep" or
+ * "rxep" parameter is valid only for TX or RX endpoints, respectively.  The
+ * "*ep" value is assumed to be less than the maximum valid endpoint ID
+ * for the current hardware, and that will not exceed IPA_ENDPOINT_MAX.
  *
  * The offset of registers related to filter and route tables is computed
  * by a macro that is supplied a parameter "er".  The "er" represents an
@@ -190,24 +192,23 @@ static inline u32 ipa_reg_bcr_val(enum ipa_version version)
        return 0x00000000;
 }
 
-
 #define IPA_REG_LOCAL_PKT_PROC_CNTXT_BASE_OFFSET       0x000001e8
 
 #define IPA_REG_AGGR_FORCE_CLOSE_OFFSET                        0x000001ec
 /* ipa->available defines the valid bits in the AGGR_FORCE_CLOSE register */
 
+/* The internal inactivity timer clock is used for the aggregation timer */
+#define TIMER_FREQUENCY        32000   /* 32 KHz inactivity timer clock */
+
 #define IPA_REG_COUNTER_CFG_OFFSET                     0x000001f0
 #define AGGR_GRANULARITY                       GENMASK(8, 4)
-/* Compute the value to use in the AGGR_GRANULARITY field representing
- * the given number of microseconds (up to 1 millisecond).
- *     x = (32 * usec) / 1000 - 1
+/* Compute the value to use in the AGGR_GRANULARITY field representing the
+ * given number of microseconds.  The value is one less than the number of
+ * timer ticks in the requested period.  Zero not a valid granularity value.
  */
-static inline u32 ipa_aggr_granularity_val(u32 microseconds)
+static inline u32 ipa_aggr_granularity_val(u32 usec)
 {
-       /* assert(microseconds >= 16); (?) */
-       /* assert(microseconds <= 1015); */
-
-       return DIV_ROUND_CLOSEST(32 * microseconds, 1000) - 1;
+       return DIV_ROUND_CLOSEST(usec * TIMER_FREQUENCY, USEC_PER_SEC) - 1;
 }
 
 #define IPA_REG_TX_CFG_OFFSET                          0x000001fc
@@ -293,11 +294,13 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
 #define HDR_TOTAL_LEN_OR_PAD_OFFSET_FMASK      GENMASK(9, 4)
 #define HDR_PAD_TO_ALIGNMENT_FMASK             GENMASK(13, 10)
 
-#define IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(ep) \
-                                       (0x00000818 + 0x0070 * (ep))
+/* Valid only for RX (IPA producer) endpoints */
+#define IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(rxep) \
+                                       (0x00000818 + 0x0070 * (rxep))
 
-#define IPA_REG_ENDP_INIT_MODE_N_OFFSET(ep) \
-                                       (0x00000820 + 0x0070 * (ep))
+/* Valid only for TX (IPA consumer) endpoints */
+#define IPA_REG_ENDP_INIT_MODE_N_OFFSET(txep) \
+                                       (0x00000820 + 0x0070 * (txep))
 #define MODE_FMASK                             GENMASK(2, 0)
 #define DEST_PIPE_INDEX_FMASK                  GENMASK(8, 4)
 #define BYTE_THRESHOLD_FMASK                   GENMASK(27, 12)
@@ -316,19 +319,21 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
 #define AGGR_FORCE_CLOSE_FMASK                 GENMASK(22, 22)
 #define AGGR_HARD_BYTE_LIMIT_ENABLE_FMASK      GENMASK(24, 24)
 
-#define IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(ep) \
-                                       (0x0000082c +  0x0070 * (ep))
+/* Valid only for RX (IPA producer) endpoints */
+#define IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(rxep) \
+                                       (0x0000082c +  0x0070 * (rxep))
 #define HOL_BLOCK_EN_FMASK                     GENMASK(0, 0)
 
-/* The next register is valid only for RX (IPA producer) endpoints */
-#define IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(ep) \
-                                       (0x00000830 +  0x0070 * (ep))
+/* Valid only for RX (IPA producer) endpoints */
+#define IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(rxep) \
+                                       (0x00000830 +  0x0070 * (rxep))
 /* The next fields are present for IPA v4.2 only */
 #define BASE_VALUE_FMASK                       GENMASK(4, 0)
 #define SCALE_FMASK                            GENMASK(12, 8)
 
-#define IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(ep) \
-                                       (0x00000834 + 0x0070 * (ep))
+/* Valid only for TX (IPA consumer) endpoints */
+#define IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(txep) \
+                                       (0x00000834 + 0x0070 * (txep))
 #define DEAGGR_HDR_LEN_FMASK                   GENMASK(5, 0)
 #define PACKET_OFFSET_VALID_FMASK              GENMASK(7, 7)
 #define PACKET_OFFSET_LOCATION_FMASK           GENMASK(13, 8)
@@ -338,8 +343,9 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
                                        (0x00000838 + 0x0070 * (ep))
 #define RSRC_GRP_FMASK                         GENMASK(1, 0)
 
-#define IPA_REG_ENDP_INIT_SEQ_N_OFFSET(ep) \
-                                       (0x0000083c + 0x0070 * (ep))
+/* Valid only for TX (IPA consumer) endpoints */
+#define IPA_REG_ENDP_INIT_SEQ_N_OFFSET(txep) \
+                                       (0x0000083c + 0x0070 * (txep))
 #define HPS_SEQ_TYPE_FMASK                     GENMASK(3, 0)
 #define DPS_SEQ_TYPE_FMASK                     GENMASK(7, 4)
 #define HPS_REP_SEQ_TYPE_FMASK                 GENMASK(11, 8)
@@ -353,7 +359,7 @@ static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
 /* The next field is present for IPA v4.0 and above */
 #define STATUS_PKT_SUPPRESS_FMASK              GENMASK(9, 9)
 
-/* "er" is either an endpoint id (for filters) or a route id (for routes) */
+/* "er" is either an endpoint ID (for filters) or a route ID (for routes) */
 #define IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(er) \
                                        (0x0000085c + 0x0070 * (er))
 #define FILTER_HASH_MSK_SRC_ID_FMASK           GENMASK(0, 0)
index 1f65cdc..bf0e406 100644 (file)
@@ -15,7 +15,7 @@ struct ipa;
  * @ipa:       IPA pointer
  * @modem_init:        Whether the modem is responsible for GSI initialization
  *
- * @Return:    0 if successful, or a negative error code
+ * Return:     0 if successful, or a negative error code
  *
  */
 int ipa_smp2p_init(struct ipa *ipa, bool modem_init);
index 9df2a3e..2098ca2 100644 (file)
@@ -505,7 +505,7 @@ void ipa_table_teardown(struct ipa *ipa)
 
 /**
  * ipa_filter_tuple_zero() - Zero an endpoint's hashed filter tuple
- * @endpoint_id:       Endpoint whose filter hash tuple should be zeroed
+ * @endpoint:  Endpoint whose filter hash tuple should be zeroed
  *
  * Endpoint must be for the AP (not modem) and support filtering. Updates
  * the filter hash values without changing route ones.
@@ -560,6 +560,7 @@ static bool ipa_route_id_modem(u32 route_id)
 
 /**
  * ipa_route_tuple_zero() - Zero a hashed route table entry tuple
+ * @ipa:       IPA pointer
  * @route_id:  Route table entry whose hash tuple should be zeroed
  *
  * Updates the route hash values without changing filter ones.
index 64ea022..78038d1 100644 (file)
@@ -25,7 +25,7 @@ struct ipa;
  * ipa_table_valid() - Validate route and filter table memory regions
  * @ipa:       IPA pointer
 
- * @Return:    true if all regions are valid, false otherwise
+ * Return:     true if all regions are valid, false otherwise
  */
 bool ipa_table_valid(struct ipa *ipa);
 
@@ -33,7 +33,7 @@ bool ipa_table_valid(struct ipa *ipa);
  * ipa_filter_map_valid() - Validate a filter table endpoint bitmap
  * @ipa:       IPA pointer
  *
- * @Return:    true if all regions are valid, false otherwise
+ * Return:     true if all regions are valid, false otherwise
  */
 bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask);
 
index a1f8db0..1a0b04e 100644 (file)
  */
 /* Supports hardware interface version 0x2000 */
 
-/* Offset relative to the base of the IPA shared address space of the
- * shared region used for communication with the microcontroller.  The
- * region is 128 bytes in size, but only the first 40 bytes are used.
- */
-#define IPA_MEM_UC_OFFSET      0x0000
-
 /* Delay to allow a the microcontroller to save state when crashing */
 #define IPA_SEND_DELAY         100     /* microseconds */
 
 /**
  * struct ipa_uc_mem_area - AP/microcontroller shared memory area
  * @command:           command code (AP->microcontroller)
+ * @reserved0:         reserved bytes; avoid reading or writing
  * @command_param:     low 32 bits of command parameter (AP->microcontroller)
  * @command_param_hi:  high 32 bits of command parameter (AP->microcontroller)
  *
  * @response:          response code (microcontroller->AP)
+ * @reserved1:         reserved bytes; avoid reading or writing
  * @response_param:    response parameter (microcontroller->AP)
  *
  * @event:             event code (microcontroller->AP)
+ * @reserved2:         reserved bytes; avoid reading or writing
  * @event_param:       event parameter (microcontroller->AP)
  *
  * @first_error_address: address of first error-source on SNOC
  * @hw_state:          state of hardware (including error type information)
  * @warning_counter:   counter of non-fatal hardware errors
+ * @reserved3:         reserved bytes; avoid reading or writing
  * @interface_version: hardware-reported interface version
+ * @reserved4:         reserved bytes; avoid reading or writing
+ *
+ * A shared memory area at the base of IPA resident memory is used for
+ * communication with the microcontroller.  The region is 128 bytes in
+ * size, but only the first 40 bytes (structured this way) are used.
  */
 struct ipa_uc_mem_area {
        u8 command;             /* enum ipa_uc_command */
index e56547b..9159846 100644 (file)
@@ -4052,9 +4052,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
                return err;
 
        netdev_lockdep_set_classes(dev);
-       lockdep_set_class_and_subclass(&dev->addr_list_lock,
-                                      &macsec_netdev_addr_lock_key,
-                                      dev->lower_level);
+       lockdep_set_class(&dev->addr_list_lock,
+                         &macsec_netdev_addr_lock_key);
 
        err = netdev_upper_dev_link(real_dev, dev, extack);
        if (err < 0)
index 6a6cc9f..4942f61 100644 (file)
@@ -880,9 +880,8 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
 static void macvlan_set_lockdep_class(struct net_device *dev)
 {
        netdev_lockdep_set_classes(dev);
-       lockdep_set_class_and_subclass(&dev->addr_list_lock,
-                                      &macvlan_netdev_addr_lock_key,
-                                      dev->lower_level);
+       lockdep_set_class(&dev->addr_list_lock,
+                         &macvlan_netdev_addr_lock_key);
 }
 
 static int macvlan_init(struct net_device *dev)
index f4d8f62..4dfb389 100644 (file)
@@ -3,7 +3,7 @@
 obj-$(CONFIG_NETDEVSIM) += netdevsim.o
 
 netdevsim-objs := \
-       netdev.o dev.o fib.o bus.o health.o
+       netdev.o dev.o fib.o bus.o health.o udp_tunnels.o
 
 ifeq ($(CONFIG_BPF_SYSCALL),y)
 netdevsim-objs += \
index ec6b6f7..ce719c8 100644 (file)
@@ -225,6 +225,7 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
        debugfs_create_bool("fail_trap_policer_counter_get", 0600,
                            nsim_dev->ddir,
                            &nsim_dev->fail_trap_policer_counter_get);
+       nsim_udp_tunnels_debugfs_create(nsim_dev);
        return 0;
 }
 
@@ -889,6 +890,7 @@ static const struct devlink_ops nsim_dev_devlink_ops = {
 static int __nsim_dev_port_add(struct nsim_dev *nsim_dev,
                               unsigned int port_index)
 {
+       struct devlink_port_attrs attrs = {};
        struct nsim_dev_port *nsim_dev_port;
        struct devlink_port *devlink_port;
        int err;
@@ -899,10 +901,11 @@ static int __nsim_dev_port_add(struct nsim_dev *nsim_dev,
        nsim_dev_port->port_index = port_index;
 
        devlink_port = &nsim_dev_port->devlink_port;
-       devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
-                              port_index + 1, 0, 0,
-                              nsim_dev->switch_id.id,
-                              nsim_dev->switch_id.id_len);
+       attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+       attrs.phys.port_number = port_index + 1;
+       memcpy(attrs.switch_id.id, nsim_dev->switch_id.id, nsim_dev->switch_id.id_len);
+       attrs.switch_id.id_len = nsim_dev->switch_id.id_len;
+       devlink_port_attrs_set(devlink_port, &attrs);
        err = devlink_port_register(priv_to_devlink(nsim_dev), devlink_port,
                                    port_index);
        if (err)
index 2908e0a..9d0d180 100644 (file)
@@ -22,6 +22,7 @@
 #include <net/netlink.h>
 #include <net/pkt_cls.h>
 #include <net/rtnetlink.h>
+#include <net/udp_tunnel.h>
 
 #include "netdevsim.h"
 
@@ -257,6 +258,8 @@ static const struct net_device_ops nsim_netdev_ops = {
        .ndo_setup_tc           = nsim_setup_tc,
        .ndo_set_features       = nsim_set_features,
        .ndo_bpf                = nsim_bpf,
+       .ndo_udp_tunnel_add     = udp_tunnel_nic_add_port,
+       .ndo_udp_tunnel_del     = udp_tunnel_nic_del_port,
        .ndo_get_devlink_port   = nsim_get_devlink_port,
 };
 
@@ -299,10 +302,14 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
        SET_NETDEV_DEV(dev, &ns->nsim_bus_dev->dev);
        dev->netdev_ops = &nsim_netdev_ops;
 
+       err = nsim_udp_tunnels_info_create(nsim_dev, dev);
+       if (err)
+               goto err_free_netdev;
+
        rtnl_lock();
        err = nsim_bpf_init(ns);
        if (err)
-               goto err_free_netdev;
+               goto err_utn_destroy;
 
        nsim_ipsec_init(ns);
 
@@ -317,6 +324,8 @@ err_ipsec_teardown:
        nsim_ipsec_teardown(ns);
        nsim_bpf_uninit(ns);
        rtnl_unlock();
+err_utn_destroy:
+       nsim_udp_tunnels_info_destroy(dev);
 err_free_netdev:
        free_netdev(dev);
        return ERR_PTR(err);
@@ -331,6 +340,7 @@ void nsim_destroy(struct netdevsim *ns)
        nsim_ipsec_teardown(ns);
        nsim_bpf_uninit(ns);
        rtnl_unlock();
+       nsim_udp_tunnels_info_destroy(dev);
        free_netdev(dev);
 }
 
index 4ded54a..d164052 100644 (file)
@@ -13,6 +13,7 @@
  * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
  */
 
+#include <linux/debugfs.h>
 #include <linux/device.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
@@ -29,6 +30,7 @@
 
 #define NSIM_IPSEC_MAX_SA_COUNT                33
 #define NSIM_IPSEC_VALID               BIT(31)
+#define NSIM_UDP_TUNNEL_N_PORTS                4
 
 struct nsim_sa {
        struct xfrm_state *xs;
@@ -72,12 +74,23 @@ struct netdevsim {
 
        bool bpf_map_accept;
        struct nsim_ipsec ipsec;
+       struct {
+               u32 inject_error;
+               u32 sleep;
+               u32 ports[2][NSIM_UDP_TUNNEL_N_PORTS];
+               struct debugfs_u32_array dfs_ports[2];
+       } udp_ports;
 };
 
 struct netdevsim *
 nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port);
 void nsim_destroy(struct netdevsim *ns);
 
+void nsim_udp_tunnels_debugfs_create(struct nsim_dev *nsim_dev);
+int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
+                                struct net_device *dev);
+void nsim_udp_tunnels_info_destroy(struct net_device *dev);
+
 #ifdef CONFIG_BPF_SYSCALL
 int nsim_bpf_dev_init(struct nsim_dev *nsim_dev);
 void nsim_bpf_dev_exit(struct nsim_dev *nsim_dev);
@@ -183,6 +196,12 @@ struct nsim_dev {
        bool fail_trap_group_set;
        bool fail_trap_policer_set;
        bool fail_trap_policer_counter_get;
+       struct {
+               bool sync_all;
+               bool open_only;
+               bool ipv4_only;
+               u32 sleep;
+       } udp_ports;
 };
 
 static inline struct net *nsim_dev_net(struct nsim_dev *nsim_dev)
diff --git a/drivers/net/netdevsim/udp_tunnels.c b/drivers/net/netdevsim/udp_tunnels.c
new file mode 100644 (file)
index 0000000..22c06a7
--- /dev/null
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2020 Facebook Inc.
+
+#include <linux/debugfs.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <net/udp_tunnel.h>
+
+#include "netdevsim.h"
+
+static int
+nsim_udp_tunnel_set_port(struct net_device *dev, unsigned int table,
+                        unsigned int entry, struct udp_tunnel_info *ti)
+{
+       struct netdevsim *ns = netdev_priv(dev);
+       int ret;
+
+       ret = -ns->udp_ports.inject_error;
+       ns->udp_ports.inject_error = 0;
+
+       if (ns->udp_ports.sleep)
+               msleep(ns->udp_ports.sleep);
+
+       if (!ret) {
+               if (ns->udp_ports.ports[table][entry])
+                       ret = -EBUSY;
+               else
+                       ns->udp_ports.ports[table][entry] =
+                               be16_to_cpu(ti->port) << 16 | ti->type;
+       }
+
+       netdev_info(dev, "set [%d, %d] type %d family %d port %d - %d\n",
+                   table, entry, ti->type, ti->sa_family, ntohs(ti->port),
+                   ret);
+       return ret;
+}
+
+static int
+nsim_udp_tunnel_unset_port(struct net_device *dev, unsigned int table,
+                          unsigned int entry, struct udp_tunnel_info *ti)
+{
+       struct netdevsim *ns = netdev_priv(dev);
+       int ret;
+
+       ret = -ns->udp_ports.inject_error;
+       ns->udp_ports.inject_error = 0;
+
+       if (ns->udp_ports.sleep)
+               msleep(ns->udp_ports.sleep);
+       if (!ret) {
+               u32 val = be16_to_cpu(ti->port) << 16 | ti->type;
+
+               if (val == ns->udp_ports.ports[table][entry])
+                       ns->udp_ports.ports[table][entry] = 0;
+               else
+                       ret = -ENOENT;
+       }
+
+       netdev_info(dev, "unset [%d, %d] type %d family %d port %d - %d\n",
+                   table, entry, ti->type, ti->sa_family, ntohs(ti->port),
+                   ret);
+       return ret;
+}
+
+static int
+nsim_udp_tunnel_sync_table(struct net_device *dev, unsigned int table)
+{
+       struct netdevsim *ns = netdev_priv(dev);
+       struct udp_tunnel_info ti;
+       unsigned int i;
+       int ret;
+
+       ret = -ns->udp_ports.inject_error;
+       ns->udp_ports.inject_error = 0;
+
+       for (i = 0; i < NSIM_UDP_TUNNEL_N_PORTS; i++) {
+               udp_tunnel_nic_get_port(dev, table, i, &ti);
+               ns->udp_ports.ports[table][i] =
+                       be16_to_cpu(ti.port) << 16 | ti.type;
+       }
+
+       return ret;
+}
+
+static const struct udp_tunnel_nic_info nsim_udp_tunnel_info = {
+       .set_port       = nsim_udp_tunnel_set_port,
+       .unset_port     = nsim_udp_tunnel_unset_port,
+       .sync_table     = nsim_udp_tunnel_sync_table,
+
+       .tables = {
+               {
+                       .n_entries      = NSIM_UDP_TUNNEL_N_PORTS,
+                       .tunnel_types   = UDP_TUNNEL_TYPE_VXLAN,
+               },
+               {
+                       .n_entries      = NSIM_UDP_TUNNEL_N_PORTS,
+                       .tunnel_types   = UDP_TUNNEL_TYPE_GENEVE |
+                                         UDP_TUNNEL_TYPE_VXLAN_GPE,
+               },
+       },
+};
+
+static ssize_t
+nsim_udp_tunnels_info_reset_write(struct file *file, const char __user *data,
+                                 size_t count, loff_t *ppos)
+{
+       struct net_device *dev = file->private_data;
+       struct netdevsim *ns = netdev_priv(dev);
+
+       memset(&ns->udp_ports.ports, 0, sizeof(ns->udp_ports.ports));
+       rtnl_lock();
+       udp_tunnel_nic_reset_ntf(dev);
+       rtnl_unlock();
+
+       return count;
+}
+
+static const struct file_operations nsim_udp_tunnels_info_reset_fops = {
+       .open = simple_open,
+       .write = nsim_udp_tunnels_info_reset_write,
+       .llseek = generic_file_llseek,
+};
+
+int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
+                                struct net_device *dev)
+{
+       struct netdevsim *ns = netdev_priv(dev);
+       struct udp_tunnel_nic_info *info;
+
+       debugfs_create_u32("udp_ports_inject_error", 0600,
+                          ns->nsim_dev_port->ddir,
+                          &ns->udp_ports.inject_error);
+
+       ns->udp_ports.dfs_ports[0].array = ns->udp_ports.ports[0];
+       ns->udp_ports.dfs_ports[0].n_elements = NSIM_UDP_TUNNEL_N_PORTS;
+       debugfs_create_u32_array("udp_ports_table0", 0400,
+                                ns->nsim_dev_port->ddir,
+                                &ns->udp_ports.dfs_ports[0]);
+
+       ns->udp_ports.dfs_ports[1].array = ns->udp_ports.ports[1];
+       ns->udp_ports.dfs_ports[1].n_elements = NSIM_UDP_TUNNEL_N_PORTS;
+       debugfs_create_u32_array("udp_ports_table1", 0400,
+                                ns->nsim_dev_port->ddir,
+                                &ns->udp_ports.dfs_ports[1]);
+
+       debugfs_create_file("udp_ports_reset", 0200, ns->nsim_dev_port->ddir,
+                           dev, &nsim_udp_tunnels_info_reset_fops);
+
+       /* Note: it's not normal to allocate the info struct like this!
+        * Drivers are expected to use a static const one, here we're testing.
+        */
+       info = kmemdup(&nsim_udp_tunnel_info, sizeof(nsim_udp_tunnel_info),
+                      GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+       ns->udp_ports.sleep = nsim_dev->udp_ports.sleep;
+
+       if (nsim_dev->udp_ports.sync_all) {
+               info->set_port = NULL;
+               info->unset_port = NULL;
+       } else {
+               info->sync_table = NULL;
+       }
+
+       if (ns->udp_ports.sleep)
+               info->flags |= UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
+       if (nsim_dev->udp_ports.open_only)
+               info->flags |= UDP_TUNNEL_NIC_INFO_OPEN_ONLY;
+       if (nsim_dev->udp_ports.ipv4_only)
+               info->flags |= UDP_TUNNEL_NIC_INFO_IPV4_ONLY;
+
+       dev->udp_tunnel_nic_info = info;
+       return 0;
+}
+
+void nsim_udp_tunnels_info_destroy(struct net_device *dev)
+{
+       kfree(dev->udp_tunnel_nic_info);
+       dev->udp_tunnel_nic_info = NULL;
+}
+
+void nsim_udp_tunnels_debugfs_create(struct nsim_dev *nsim_dev)
+{
+       debugfs_create_bool("udp_ports_sync_all", 0600, nsim_dev->ddir,
+                           &nsim_dev->udp_ports.sync_all);
+       debugfs_create_bool("udp_ports_open_only", 0600, nsim_dev->ddir,
+                           &nsim_dev->udp_ports.open_only);
+       debugfs_create_bool("udp_ports_ipv4_only", 0600, nsim_dev->ddir,
+                           &nsim_dev->udp_ports.ipv4_only);
+       debugfs_create_u32("udp_ports_sleep", 0600, nsim_dev->ddir,
+                          &nsim_dev->udp_ports.sleep);
+}
index f257023..dd20c2c 100644 (file)
@@ -21,6 +21,9 @@ config MDIO_BUS
 
 if MDIO_BUS
 
+config MDIO_DEVRES
+       tristate
+
 config MDIO_ASPEED
        tristate "ASPEED MDIO bus controller"
        depends on ARCH_ASPEED || COMPILE_TEST
@@ -182,6 +185,7 @@ config MDIO_MOXART
 config MDIO_MSCC_MIIM
        tristate "Microsemi MIIM interface support"
        depends on HAS_IOMEM
+       select MDIO_DEVRES
        help
          This driver supports the MIIM (MDIO) interface found in the network
          switches of the Microsemi SoCs; it is recommended to switch on
@@ -252,6 +256,7 @@ menuconfig PHYLIB
        tristate "PHY Device support and infrastructure"
        depends on NETDEVICES
        select MDIO_DEVICE
+       select MDIO_DEVRES
        help
          Ethernet controllers are usually attached to PHY
          devices.  This option provides infrastructure for
@@ -480,8 +485,7 @@ config MICROCHIP_T1_PHY
 config MICROSEMI_PHY
        tristate "Microsemi PHYs"
        depends on MACSEC || MACSEC=n
-       select CRYPTO_AES
-       select CRYPTO_ECB
+       select CRYPTO_LIB_AES if MACSEC
        help
          Currently supports VSC8514, VSC8530, VSC8531, VSC8540 and VSC8541 PHYs
 
index dc9e53b..d84bab4 100644 (file)
@@ -17,6 +17,7 @@ libphy-y                      += $(mdio-bus-y)
 else
 obj-$(CONFIG_MDIO_DEVICE)      += mdio-bus.o
 endif
+obj-$(CONFIG_MDIO_DEVRES)      += mdio_devres.o
 libphy-$(CONFIG_SWPHY)         += swphy.o
 libphy-$(CONFIG_LED_TRIGGER_PHY)       += phy_led_triggers.o
 
index c7eabe4..7471a8b 100644 (file)
 
 /**
  * struct adin_cfg_reg_map - map a config value to aregister value
- * @cfg                value in device configuration
- * @reg                value in the register
+ * @cfg:       value in device configuration
+ * @reg:       value in the register
  */
 struct adin_cfg_reg_map {
        int cfg;
@@ -135,9 +135,9 @@ static const struct adin_cfg_reg_map adin_rmii_fifo_depths[] = {
 
 /**
  * struct adin_clause45_mmd_map - map to convert Clause 45 regs to Clause 22
- * @devad              device address used in Clause 45 access
- * @cl45_regnum                register address defined by Clause 45
- * @adin_regnum                equivalent register address accessible via Clause 22
+ * @devad:             device address used in Clause 45 access
+ * @cl45_regnum:       register address defined by Clause 45
+ * @adin_regnum:       equivalent register address accessible via Clause 22
  */
 struct adin_clause45_mmd_map {
        int devad;
@@ -174,7 +174,7 @@ static const struct adin_hw_stat adin_hw_stats[] = {
 
 /**
  * struct adin_priv - ADIN PHY driver private data
- * stats               statistic counters for the PHY
+ * @stats:             statistic counters for the PHY
  */
 struct adin_priv {
        u64                     stats[ARRAY_SIZE(adin_hw_stats)];
index 97cbe59..101651b 100644 (file)
 #include <linux/regulator/consumer.h>
 #include <dt-bindings/net/qca-ar803x.h>
 
+#define AT803X_SPECIFIC_FUNCTION_CONTROL       0x10
+#define AT803X_SFC_ASSERT_CRS                  BIT(11)
+#define AT803X_SFC_FORCE_LINK                  BIT(10)
+#define AT803X_SFC_MDI_CROSSOVER_MODE_M                GENMASK(6, 5)
+#define AT803X_SFC_AUTOMATIC_CROSSOVER         0x3
+#define AT803X_SFC_MANUAL_MDIX                 0x1
+#define AT803X_SFC_MANUAL_MDI                  0x0
+#define AT803X_SFC_SQE_TEST                    BIT(2)
+#define AT803X_SFC_POLARITY_REVERSAL           BIT(1)
+#define AT803X_SFC_DISABLE_JABBER              BIT(0)
+
 #define AT803X_SPECIFIC_STATUS                 0x11
 #define AT803X_SS_SPEED_MASK                   (3 << 14)
 #define AT803X_SS_SPEED_1000                   (2 << 14)
@@ -400,8 +411,8 @@ static int at803x_parse_dt(struct phy_device *phydev)
 {
        struct device_node *node = phydev->mdio.dev.of_node;
        struct at803x_priv *priv = phydev->priv;
-       unsigned int sel, mask;
        u32 freq, strength;
+       unsigned int sel;
        int ret;
 
        if (!IS_ENABLED(CONFIG_OF_MDIO))
@@ -409,7 +420,6 @@ static int at803x_parse_dt(struct phy_device *phydev)
 
        ret = of_property_read_u32(node, "qca,clk-out-frequency", &freq);
        if (!ret) {
-               mask = AT803X_CLK_OUT_MASK;
                switch (freq) {
                case 25000000:
                        sel = AT803X_CLK_OUT_25MHZ_XTAL;
@@ -428,8 +438,8 @@ static int at803x_parse_dt(struct phy_device *phydev)
                        return -EINVAL;
                }
 
-               priv->clk_25m_reg |= FIELD_PREP(mask, sel);
-               priv->clk_25m_mask |= mask;
+               priv->clk_25m_reg |= FIELD_PREP(AT803X_CLK_OUT_MASK, sel);
+               priv->clk_25m_mask |= AT803X_CLK_OUT_MASK;
 
                /* Fixup for the AR8030/AR8035. This chip has another mask and
                 * doesn't support the DSP reference. Eg. the lowest bit of the
@@ -704,6 +714,12 @@ static int at803x_read_status(struct phy_device *phydev)
                return ss;
 
        if (ss & AT803X_SS_SPEED_DUPLEX_RESOLVED) {
+               int sfc;
+
+               sfc = phy_read(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL);
+               if (sfc < 0)
+                       return sfc;
+
                switch (ss & AT803X_SS_SPEED_MASK) {
                case AT803X_SS_SPEED_10:
                        phydev->speed = SPEED_10;
@@ -719,10 +735,23 @@ static int at803x_read_status(struct phy_device *phydev)
                        phydev->duplex = DUPLEX_FULL;
                else
                        phydev->duplex = DUPLEX_HALF;
+
                if (ss & AT803X_SS_MDIX)
                        phydev->mdix = ETH_TP_MDI_X;
                else
                        phydev->mdix = ETH_TP_MDI;
+
+               switch (FIELD_GET(AT803X_SFC_MDI_CROSSOVER_MODE_M, sfc)) {
+               case AT803X_SFC_MANUAL_MDI:
+                       phydev->mdix_ctrl = ETH_TP_MDI;
+                       break;
+               case AT803X_SFC_MANUAL_MDIX:
+                       phydev->mdix_ctrl = ETH_TP_MDI_X;
+                       break;
+               case AT803X_SFC_AUTOMATIC_CROSSOVER:
+                       phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+                       break;
+               }
        }
 
        if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
@@ -731,6 +760,50 @@ static int at803x_read_status(struct phy_device *phydev)
        return 0;
 }
 
+static int at803x_config_mdix(struct phy_device *phydev, u8 ctrl)
+{
+       u16 val;
+
+       switch (ctrl) {
+       case ETH_TP_MDI:
+               val = AT803X_SFC_MANUAL_MDI;
+               break;
+       case ETH_TP_MDI_X:
+               val = AT803X_SFC_MANUAL_MDIX;
+               break;
+       case ETH_TP_MDI_AUTO:
+               val = AT803X_SFC_AUTOMATIC_CROSSOVER;
+               break;
+       default:
+               return 0;
+       }
+
+       return phy_modify_changed(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL,
+                         AT803X_SFC_MDI_CROSSOVER_MODE_M,
+                         FIELD_PREP(AT803X_SFC_MDI_CROSSOVER_MODE_M, val));
+}
+
+static int at803x_config_aneg(struct phy_device *phydev)
+{
+       int ret;
+
+       ret = at803x_config_mdix(phydev, phydev->mdix_ctrl);
+       if (ret < 0)
+               return ret;
+
+       /* Changes of the midx bits are disruptive to the normal operation;
+        * therefore any changes to these registers must be followed by a
+        * software reset to take effect.
+        */
+       if (ret == 1) {
+               ret = genphy_soft_reset(phydev);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return genphy_config_aneg(phydev);
+}
+
 static int at803x_get_downshift(struct phy_device *phydev, u8 *d)
 {
        int val;
@@ -980,6 +1053,7 @@ static struct phy_driver at803x_driver[] = {
        .flags                  = PHY_POLL_CABLE_TEST,
        .probe                  = at803x_probe,
        .remove                 = at803x_remove,
+       .config_aneg            = at803x_config_aneg,
        .config_init            = at803x_config_init,
        .soft_reset             = genphy_soft_reset,
        .set_wol                = at803x_set_wol,
@@ -1062,6 +1136,9 @@ static struct phy_driver at803x_driver[] = {
        .config_intr            = &at803x_config_intr,
        .cable_test_start       = at803x_cable_test_start,
        .cable_test_get_status  = at803x_cable_test_get_status,
+       .read_status            = at803x_read_status,
+       .soft_reset             = genphy_soft_reset,
+       .config_aneg            = at803x_config_aneg,
 } };
 
 module_phy_driver(at803x_driver);
index ecbd5e0..da31756 100644 (file)
@@ -803,9 +803,10 @@ static int decode_evnt(struct dp83640_private *dp83640,
 
 static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
 {
-       u16 *seqid, hash;
        unsigned int offset = 0;
        u8 *msgtype, *data = skb_mac_header(skb);
+       __be16 *seqid;
+       u16 hash;
 
        /* check sequenceID, messageType, 12 bit hash of offset 20-29 */
 
@@ -836,7 +837,7 @@ static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
        if (rxts->msgtype != (*msgtype & 0xf))
                return 0;
 
-       seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
+       seqid = (__be16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
        if (rxts->seqid != ntohs(*seqid))
                return 0;
 
index 1dd19d0..37643c4 100644 (file)
@@ -26,7 +26,9 @@
 #define MII_DP83822_PHYSCR     0x11
 #define MII_DP83822_MISR1      0x12
 #define MII_DP83822_MISR2      0x13
+#define MII_DP83822_RCSR       0x17
 #define MII_DP83822_RESET_CTRL 0x1f
+#define MII_DP83822_GENCFG     0x465
 
 #define DP83822_HW_RESET       BIT(15)
 #define DP83822_SW_RESET       BIT(14)
 #define DP83822_WOL_INDICATION_SEL BIT(8)
 #define DP83822_WOL_CLR_INDICATION BIT(11)
 
+/* RSCR bits */
+#define DP83822_RX_CLK_SHIFT   BIT(12)
+#define DP83822_TX_CLK_SHIFT   BIT(11)
+
 static int dp83822_ack_interrupt(struct phy_device *phydev)
 {
        int err;
@@ -255,7 +261,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
        return phy_write(phydev, MII_DP83822_PHYSCR, physcr_status);
 }
 
-static int dp83822_config_init(struct phy_device *phydev)
+static int dp8382x_disable_wol(struct phy_device *phydev)
 {
        int value = DP83822_WOL_EN | DP83822_WOL_MAGIC_EN |
                    DP83822_WOL_SECURE_ON;
@@ -264,6 +270,46 @@ static int dp83822_config_init(struct phy_device *phydev)
                                  MII_DP83822_WOL_CFG, value);
 }
 
+static int dp83822_config_init(struct phy_device *phydev)
+{
+       struct device *dev = &phydev->mdio.dev;
+       int rgmii_delay;
+       s32 rx_int_delay;
+       s32 tx_int_delay;
+       int err = 0;
+
+       if (phy_interface_is_rgmii(phydev)) {
+               rx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
+                                                     true);
+
+               if (rx_int_delay <= 0)
+                       rgmii_delay = 0;
+               else
+                       rgmii_delay = DP83822_RX_CLK_SHIFT;
+
+               tx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
+                                                     false);
+               if (tx_int_delay <= 0)
+                       rgmii_delay &= ~DP83822_TX_CLK_SHIFT;
+               else
+                       rgmii_delay |= DP83822_TX_CLK_SHIFT;
+
+               if (rgmii_delay) {
+                       err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
+                                              MII_DP83822_RCSR, rgmii_delay);
+                       if (err)
+                               return err;
+               }
+       }
+
+       return dp8382x_disable_wol(phydev);
+}
+
+static int dp8382x_config_init(struct phy_device *phydev)
+{
+       return dp8382x_disable_wol(phydev);
+}
+
 static int dp83822_phy_reset(struct phy_device *phydev)
 {
        int err;
@@ -272,9 +318,7 @@ static int dp83822_phy_reset(struct phy_device *phydev)
        if (err < 0)
                return err;
 
-       dp83822_config_init(phydev);
-
-       return 0;
+       return phydev->drv->config_init(phydev);
 }
 
 static int dp83822_suspend(struct phy_device *phydev)
@@ -318,14 +362,29 @@ static int dp83822_resume(struct phy_device *phydev)
                .resume = dp83822_resume,                       \
        }
 
+#define DP8382X_PHY_DRIVER(_id, _name)                         \
+       {                                                       \
+               PHY_ID_MATCH_MODEL(_id),                        \
+               .name           = (_name),                      \
+               /* PHY_BASIC_FEATURES */                        \
+               .soft_reset     = dp83822_phy_reset,            \
+               .config_init    = dp8382x_config_init,          \
+               .get_wol = dp83822_get_wol,                     \
+               .set_wol = dp83822_set_wol,                     \
+               .ack_interrupt = dp83822_ack_interrupt,         \
+               .config_intr = dp83822_config_intr,             \
+               .suspend = dp83822_suspend,                     \
+               .resume = dp83822_resume,                       \
+       }
+
 static struct phy_driver dp83822_driver[] = {
        DP83822_PHY_DRIVER(DP83822_PHY_ID, "TI DP83822"),
-       DP83822_PHY_DRIVER(DP83825I_PHY_ID, "TI DP83825I"),
-       DP83822_PHY_DRIVER(DP83826C_PHY_ID, "TI DP83826C"),
-       DP83822_PHY_DRIVER(DP83826NC_PHY_ID, "TI DP83826NC"),
-       DP83822_PHY_DRIVER(DP83825S_PHY_ID, "TI DP83825S"),
-       DP83822_PHY_DRIVER(DP83825CM_PHY_ID, "TI DP83825M"),
-       DP83822_PHY_DRIVER(DP83825CS_PHY_ID, "TI DP83825CS"),
+       DP8382X_PHY_DRIVER(DP83825I_PHY_ID, "TI DP83825I"),
+       DP8382X_PHY_DRIVER(DP83826C_PHY_ID, "TI DP83826C"),
+       DP8382X_PHY_DRIVER(DP83826NC_PHY_ID, "TI DP83826NC"),
+       DP8382X_PHY_DRIVER(DP83825S_PHY_ID, "TI DP83825S"),
+       DP8382X_PHY_DRIVER(DP83825CM_PHY_ID, "TI DP83825M"),
+       DP8382X_PHY_DRIVER(DP83825CS_PHY_ID, "TI DP83825CS"),
 };
 module_phy_driver(dp83822_driver);
 
index 53ed3ab..5810315 100644 (file)
 #define DP83869_RGMII_TX_CLK_DELAY_EN          BIT(1)
 #define DP83869_RGMII_RX_CLK_DELAY_EN          BIT(0)
 
+/* RGMIIDCTL */
+#define DP83869_RGMII_CLK_DELAY_SHIFT          4
+#define DP83869_CLK_DELAY_DEF                  7
+
 /* STRAP_STS1 bits */
 #define DP83869_STRAP_OP_MODE_MASK             GENMASK(2, 0)
 #define DP83869_STRAP_STS1_RESERVED            BIT(11)
@@ -78,9 +82,6 @@
 #define DP83869_PHYCR_FIFO_DEPTH_MASK  GENMASK(15, 12)
 #define DP83869_PHYCR_RESERVED_MASK    BIT(11)
 
-/* RGMIIDCTL bits */
-#define DP83869_RGMII_TX_CLK_DELAY_SHIFT       4
-
 /* IO_MUX_CFG bits */
 #define DP83869_IO_MUX_CFG_IO_IMPEDANCE_CTRL   0x1f
 
@@ -108,6 +109,8 @@ enum {
 struct dp83869_private {
        int tx_fifo_depth;
        int rx_fifo_depth;
+       s32 rx_int_delay;
+       s32 tx_int_delay;
        int io_impedance;
        int port_mirroring;
        bool rxctrl_strap_quirk;
@@ -177,11 +180,16 @@ static int dp83869_set_strapped_mode(struct phy_device *phydev)
 }
 
 #if IS_ENABLED(CONFIG_OF_MDIO)
+static const int dp83869_internal_delay[] = {250, 500, 750, 1000, 1250, 1500,
+                                            1750, 2000, 2250, 2500, 2750, 3000,
+                                            3250, 3500, 3750, 4000};
+
 static int dp83869_of_init(struct phy_device *phydev)
 {
        struct dp83869_private *dp83869 = phydev->priv;
        struct device *dev = &phydev->mdio.dev;
        struct device_node *of_node = dev->of_node;
+       int delay_size = ARRAY_SIZE(dp83869_internal_delay);
        int ret;
 
        if (!of_node)
@@ -235,6 +243,20 @@ static int dp83869_of_init(struct phy_device *phydev)
                                 &dp83869->tx_fifo_depth))
                dp83869->tx_fifo_depth = DP83869_PHYCR_FIFO_DEPTH_4_B_NIB;
 
+       dp83869->rx_int_delay = phy_get_internal_delay(phydev, dev,
+                                                      &dp83869_internal_delay[0],
+                                                      delay_size, true);
+       if (dp83869->rx_int_delay < 0)
+               dp83869->rx_int_delay =
+                               dp83869_internal_delay[DP83869_CLK_DELAY_DEF];
+
+       dp83869->tx_int_delay = phy_get_internal_delay(phydev, dev,
+                                                      &dp83869_internal_delay[0],
+                                                      delay_size, false);
+       if (dp83869->tx_int_delay < 0)
+               dp83869->tx_int_delay =
+                               dp83869_internal_delay[DP83869_CLK_DELAY_DEF];
+
        return ret;
 }
 #else
@@ -397,6 +419,31 @@ static int dp83869_config_init(struct phy_device *phydev)
                                     dp83869->clk_output_sel <<
                                     DP83869_IO_MUX_CFG_CLK_O_SEL_SHIFT);
 
+       if (phy_interface_is_rgmii(phydev)) {
+               ret = phy_write_mmd(phydev, DP83869_DEVADDR, DP83869_RGMIIDCTL,
+                                   dp83869->rx_int_delay |
+                       dp83869->tx_int_delay << DP83869_RGMII_CLK_DELAY_SHIFT);
+               if (ret)
+                       return ret;
+
+               val = phy_read_mmd(phydev, DP83869_DEVADDR, DP83869_RGMIICTL);
+               val &= ~(DP83869_RGMII_TX_CLK_DELAY_EN |
+                        DP83869_RGMII_RX_CLK_DELAY_EN);
+
+               if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+                       val |= (DP83869_RGMII_TX_CLK_DELAY_EN |
+                               DP83869_RGMII_RX_CLK_DELAY_EN);
+
+               if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+                       val |= DP83869_RGMII_TX_CLK_DELAY_EN;
+
+               if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+                       val |= DP83869_RGMII_RX_CLK_DELAY_EN;
+
+               ret = phy_write_mmd(phydev, DP83869_DEVADDR, DP83869_RGMIICTL,
+                                   val);
+       }
+
        return ret;
 }
 
index c9ecf3c..bb86ac0 100644 (file)
@@ -2625,12 +2625,12 @@ static struct phy_driver marvell_drivers[] = {
                .name = "Marvell 88E1101",
                /* PHY_GBIT_FEATURES */
                .probe = marvell_probe,
-               .config_init = &marvell_config_init,
-               .config_aneg = &m88e1101_config_aneg,
-               .ack_interrupt = &marvell_ack_interrupt,
-               .config_intr = &marvell_config_intr,
-               .resume = &genphy_resume,
-               .suspend = &genphy_suspend,
+               .config_init = marvell_config_init,
+               .config_aneg = m88e1101_config_aneg,
+               .ack_interrupt = marvell_ack_interrupt,
+               .config_intr = marvell_config_intr,
+               .resume = genphy_resume,
+               .suspend = genphy_suspend,
                .read_page = marvell_read_page,
                .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
@@ -2643,12 +2643,12 @@ static struct phy_driver marvell_drivers[] = {
                .name = "Marvell 88E1112",
                /* PHY_GBIT_FEATURES */
                .probe = marvell_probe,
-               .config_init = &m88e1111_config_init,
-               .config_aneg = &marvell_config_aneg,
-               .ack_interrupt = &marvell_ack_interrupt,
-               .config_intr = &marvell_config_intr,
-               .resume = &genphy_resume,
-               .suspend = &genphy_suspend,
+               .config_init = m88e1111_config_init,
+               .config_aneg = marvell_config_aneg,
+               .ack_interrupt = marvell_ack_interrupt,
+               .config_intr = marvell_config_intr,
+               .resume = genphy_resume,
+               .suspend = genphy_suspend,
                .read_page = marvell_read_page,
                .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
@@ -2663,13 +2663,13 @@ static struct phy_driver marvell_drivers[] = {
                .name = "Marvell 88E1111",
                /* PHY_GBIT_FEATURES */
                .probe = marvell_probe,
-               .config_init = &m88e1111_config_init,
-               .config_aneg = &marvell_config_aneg,
-               .read_status = &marvell_read_status,
-               .ack_interrupt = &marvell_ack_interrupt,
-               .config_intr = &marvell_config_intr,
-               .resume = &genphy_resume,
-               .suspend = &genphy_suspend,
+               .config_init = m88e1111_config_init,
+               .config_aneg = marvell_config_aneg,
+               .read_status = marvell_read_status,
+               .ack_interrupt = marvell_ack_interrupt,
+               .config_intr = marvell_config_intr,
+               .resume = genphy_resume,
+               .suspend = genphy_suspend,
                .read_page = marvell_read_page,
                .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
@@ -2684,12 +2684,12 @@ static struct phy_driver marvell_drivers[] = {
                .name = "Marvell 88E1118",
                /* PHY_GBIT_FEATURES */
                .probe = marvell_probe,
-               .config_init = &m88e1118_config_init,
-               .config_aneg = &m88e1118_config_aneg,
-               .ack_interrupt = &marvell_ack_interrupt,
-               .config_intr = &marvell_config_intr,
-               .resume = &genphy_resume,
-               .suspend = &genphy_suspend,
+               .config_init = m88e1118_config_init,
+               .config_aneg = m88e1118_config_aneg,
+               .ack_interrupt = marvell_ack_interrupt,
+               .config_intr = marvell_config_intr,
+               .resume = genphy_resume,
+               .suspend = genphy_suspend,
                .read_page = marvell_read_page,
                .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
@@ -2701,15 +2701,15 @@ static struct phy_driver marvell_drivers[] = {
                .phy_id_mask = MARVELL_PHY_ID_MASK,
                .name = "Marvell 88E1121R",
                /* PHY_GBIT_FEATURES */
-               .probe = &m88e1121_probe,
-               .config_init = &marvell_config_init,
-               .config_aneg = &m88e1121_config_aneg,
-               .read_status = &marvell_read_status,
-               .ack_interrupt = &marvell_ack_interrupt,
-               .config_intr = &marvell_config_intr,
-               .did_interrupt = &m88e1121_did_interrupt,
-               .resume = &genphy_resume,
-               .suspend = &genphy_suspend,
+               .probe = m88e1121_probe,
+               .config_init = marvell_config_init,
+               .config_aneg = m88e1121_config_aneg,
+               .read_status = marvell_read_status,
+               .ack_interrupt = marvell_ack_interrupt,
+               .config_intr = marvell_config_intr,
+               .did_interrupt = m88e1121_did_interrupt,
+               .resume = genphy_resume,
+               .suspend = genphy_suspend,
                .read_page = marvell_read_page,
                .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
@@ -2724,16 +2724,16 @@ static struct phy_driver marvell_drivers[] = {
                .name = "Marvell 88E1318S",
                /* PHY_GBIT_FEATURES */
                .probe = marvell_probe,
-               .config_init = &m88e1318_config_init,
-               .config_aneg = &m88e1318_config_aneg,
-               .read_status = &marvell_read_status,
-               .ack_interrupt = &marvell_ack_interrupt,
-               .config_intr = &marvell_config_intr,
-               .did_interrupt = &m88e1121_did_interrupt,
-               .get_wol = &m88e1318_get_wol,
-               .set_wol = &m88e1318_set_wol,
-               .resume = &genphy_resume,
-               .suspend = &genphy_suspend,
+               .config_init = m88e1318_config_init,
+               .config_aneg = m88e1318_config_aneg,
+               .read_status = marvell_read_status,
+               .ack_interrupt = marvell_ack_interrupt,
+               .config_intr = marvell_config_intr,
+               .did_interrupt = m88e1121_did_interrupt,
+               .get_wol = m88e1318_get_wol,
+               .set_wol = m88e1318_set_wol,
+               .resume = genphy_resume,
+               .suspend = genphy_suspend,
                .read_page = marvell_read_page,
                .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
@@ -2746,13 +2746,13 @@ static struct phy_driver marvell_drivers[] = {
                .name = "Marvell 88E1145",
                /* PHY_GBIT_FEATURES */
                .probe = marvell_probe,
-               .config_init = &m88e1145_config_init,
-               .config_aneg = &m88e1101_config_aneg,
-               .read_status = &genphy_read_status,
-               .ack_interrupt = &marvell_ack_interrupt,
-               .config_intr = &marvell_config_intr,
-               .resume = &genphy_resume,
-               .suspend = &genphy_suspend,
+               .config_init = m88e1145_config_init,
+               .config_aneg = m88e1101_config_aneg,
+               .read_status = genphy_read_status,
+               .ack_interrupt = marvell_ack_interrupt,
+               .config_intr = marvell_config_intr,
+               .resume = genphy_resume,
+               .suspend = genphy_suspend,
                .read_page = marvell_read_page,
                .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
@@ -2767,12 +2767,12 @@ static struct phy_driver marvell_drivers[] = {
                .name = "Marvell 88E1149R",
                /* PHY_GBIT_FEATURES */
                .probe = marvell_probe,
-               .config_init = &m88e1149_config_init,
-               .config_aneg = &m88e1118_config_aneg,
-               .ack_interrupt = &marvell_ack_interrupt,
-               .config_intr = &marvell_config_intr,
-               .resume = &genphy_resume,
-               .suspend = &genphy_suspend,
+               .config_init = m88e1149_config_init,
+               .config_aneg = m88e1118_config_aneg,
+               .ack_interrupt = marvell_ack_interrupt,
+               .config_intr = marvell_config_intr,
+               .resume = genphy_resume,
+               .suspend = genphy_suspend,
                .read_page = marvell_read_page,
                .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
@@ -2785,12 +2785,12 @@ static struct phy_driver marvell_drivers[] = {
                .name = "Marvell 88E1240",
                /* PHY_GBIT_FEATURES */
                .probe = marvell_probe,
-               .config_init = &m88e1111_config_init,
-               .config_aneg = &marvell_config_aneg,
-               .ack_interrupt = &marvell_ack_interrupt,
-               .config_intr = &marvell_config_intr,
-               .resume = &genphy_resume,
-               .suspend = &genphy_suspend,
+               .config_init = m88e1111_config_init,
+               .config_aneg = marvell_config_aneg,
+               .ack_interrupt = marvell_ack_interrupt,
+               .config_intr = marvell_config_intr,
+               .resume = genphy_resume,
+               .suspend = genphy_suspend,
                .read_page = marvell_read_page,
                .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
@@ -2803,11 +2803,11 @@ static struct phy_driver marvell_drivers[] = {
                .name = "Marvell 88E1116R",
                /* PHY_GBIT_FEATURES */
                .probe = marvell_probe,
-               .config_init = &m88e1116r_config_init,
-               .ack_interrupt = &marvell_ack_interrupt,
-               .config_intr = &marvell_config_intr,
-               .resume = &genphy_resume,
-               .suspend = &genphy_suspend,
+               .config_init = m88e1116r_config_init,
+               .ack_interrupt = marvell_ack_interrupt,
+               .config_intr = marvell_config_intr,
+               .resume = genphy_resume,
+               .suspend = genphy_suspend,
                .read_page = marvell_read_page,
                .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
@@ -2822,17 +2822,17 @@ static struct phy_driver marvell_drivers[] = {
                .name = "Marvell 88E1510",
                .features = PHY_GBIT_FIBRE_FEATURES,
                .flags = PHY_POLL_CABLE_TEST,
-               .probe = &m88e1510_probe,
-               .config_init = &m88e1510_config_init,
-               .config_aneg = &m88e1510_config_aneg,
-               .read_status = &marvell_read_status,
-               .ack_interrupt = &marvell_ack_interrupt,
-               .config_intr = &marvell_config_intr,
-               .did_interrupt = &m88e1121_did_interrupt,
-               .get_wol = &m88e1318_get_wol,
-               .set_wol = &m88e1318_set_wol,
-               .resume = &marvell_resume,
-               .suspend = &marvell_suspend,
+               .probe = m88e1510_probe,
+               .config_init = m88e1510_config_init,
+               .config_aneg = m88e1510_config_aneg,
+               .read_status = marvell_read_status,
+               .ack_interrupt = marvell_ack_interrupt,
+               .config_intr = marvell_config_intr,
+               .did_interrupt = m88e1121_did_interrupt,
+               .get_wol = m88e1318_get_wol,
+               .set_wol = m88e1318_set_wol,
+               .resume = marvell_resume,
+               .suspend = marvell_suspend,
                .read_page = marvell_read_page,
                .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
@@ -2852,14 +2852,14 @@ static struct phy_driver marvell_drivers[] = {
                /* PHY_GBIT_FEATURES */
                .flags = PHY_POLL_CABLE_TEST,
                .probe = m88e1510_probe,
-               .config_init = &marvell_config_init,
-               .config_aneg = &m88e1510_config_aneg,
-               .read_status = &marvell_read_status,
-               .ack_interrupt = &marvell_ack_interrupt,
-               .config_intr = &marvell_config_intr,
-               .did_interrupt = &m88e1121_did_interrupt,
-               .resume = &genphy_resume,
-               .suspend = &genphy_suspend,
+               .config_init = marvell_config_init,
+               .config_aneg = m88e1510_config_aneg,
+               .read_status = marvell_read_status,
+               .ack_interrupt = marvell_ack_interrupt,
+               .config_intr = marvell_config_intr,
+               .did_interrupt = m88e1121_did_interrupt,
+               .resume = genphy_resume,
+               .suspend = genphy_suspend,
                .read_page = marvell_read_page,
                .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
@@ -2878,14 +2878,14 @@ static struct phy_driver marvell_drivers[] = {
                .probe = m88e1510_probe,
                /* PHY_GBIT_FEATURES */
                .flags = PHY_POLL_CABLE_TEST,
-               .config_init = &marvell_config_init,
-               .config_aneg = &m88e1510_config_aneg,
-               .read_status = &marvell_read_status,
-               .ack_interrupt = &marvell_ack_interrupt,
-               .config_intr = &marvell_config_intr,
-               .did_interrupt = &m88e1121_did_interrupt,
-               .resume = &genphy_resume,
-               .suspend = &genphy_suspend,
+               .config_init = marvell_config_init,
+               .config_aneg = m88e1510_config_aneg,
+               .read_status = marvell_read_status,
+               .ack_interrupt = marvell_ack_interrupt,
+               .config_intr = marvell_config_intr,
+               .did_interrupt = m88e1121_did_interrupt,
+               .resume = genphy_resume,
+               .suspend = genphy_suspend,
                .read_page = marvell_read_page,
                .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
@@ -2903,14 +2903,14 @@ static struct phy_driver marvell_drivers[] = {
                .name = "Marvell 88E3016",
                /* PHY_BASIC_FEATURES */
                .probe = marvell_probe,
-               .config_init = &m88e3016_config_init,
-               .aneg_done = &marvell_aneg_done,
-               .read_status = &marvell_read_status,
-               .ack_interrupt = &marvell_ack_interrupt,
-               .config_intr = &marvell_config_intr,
-               .did_interrupt = &m88e1121_did_interrupt,
-               .resume = &genphy_resume,
-               .suspend = &genphy_suspend,
+               .config_init = m88e3016_config_init,
+               .aneg_done = marvell_aneg_done,
+               .read_status = marvell_read_status,
+               .ack_interrupt = marvell_ack_interrupt,
+               .config_intr = marvell_config_intr,
+               .did_interrupt = m88e1121_did_interrupt,
+               .resume = genphy_resume,
+               .suspend = genphy_suspend,
                .read_page = marvell_read_page,
                .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
@@ -2924,14 +2924,14 @@ static struct phy_driver marvell_drivers[] = {
                /* PHY_GBIT_FEATURES */
                .flags = PHY_POLL_CABLE_TEST,
                .probe = m88e6390_probe,
-               .config_init = &marvell_config_init,
-               .config_aneg = &m88e6390_config_aneg,
-               .read_status = &marvell_read_status,
-               .ack_interrupt = &marvell_ack_interrupt,
-               .config_intr = &marvell_config_intr,
-               .did_interrupt = &m88e1121_did_interrupt,
-               .resume = &genphy_resume,
-               .suspend = &genphy_suspend,
+               .config_init = marvell_config_init,
+               .config_aneg = m88e6390_config_aneg,
+               .read_status = marvell_read_status,
+               .ack_interrupt = marvell_ack_interrupt,
+               .config_intr = marvell_config_intr,
+               .did_interrupt = m88e1121_did_interrupt,
+               .resume = genphy_resume,
+               .suspend = genphy_suspend,
                .read_page = marvell_read_page,
                .write_page = marvell_write_page,
                .get_sset_count = marvell_get_sset_count,
@@ -2943,6 +2943,50 @@ static struct phy_driver marvell_drivers[] = {
                .cable_test_tdr_start = marvell_vct5_cable_test_tdr_start,
                .cable_test_get_status = marvell_vct7_cable_test_get_status,
        },
+       {
+               .phy_id = MARVELL_PHY_ID_88E1340S,
+               .phy_id_mask = MARVELL_PHY_ID_MASK,
+               .name = "Marvell 88E1340S",
+               .probe = m88e1510_probe,
+               /* PHY_GBIT_FEATURES */
+               .config_init = marvell_config_init,
+               .config_aneg = m88e1510_config_aneg,
+               .read_status = marvell_read_status,
+               .ack_interrupt = marvell_ack_interrupt,
+               .config_intr = marvell_config_intr,
+               .did_interrupt = m88e1121_did_interrupt,
+               .resume = genphy_resume,
+               .suspend = genphy_suspend,
+               .read_page = marvell_read_page,
+               .write_page = marvell_write_page,
+               .get_sset_count = marvell_get_sset_count,
+               .get_strings = marvell_get_strings,
+               .get_stats = marvell_get_stats,
+               .get_tunable = m88e1540_get_tunable,
+               .set_tunable = m88e1540_set_tunable,
+       },
+       {
+               .phy_id = MARVELL_PHY_ID_88E1548P,
+               .phy_id_mask = MARVELL_PHY_ID_MASK,
+               .name = "Marvell 88E1548P",
+               .probe = m88e1510_probe,
+               .features = PHY_GBIT_FIBRE_FEATURES,
+               .config_init = marvell_config_init,
+               .config_aneg = m88e1510_config_aneg,
+               .read_status = marvell_read_status,
+               .ack_interrupt = marvell_ack_interrupt,
+               .config_intr = marvell_config_intr,
+               .did_interrupt = m88e1121_did_interrupt,
+               .resume = genphy_resume,
+               .suspend = genphy_suspend,
+               .read_page = marvell_read_page,
+               .write_page = marvell_write_page,
+               .get_sset_count = marvell_get_sset_count,
+               .get_strings = marvell_get_strings,
+               .get_stats = marvell_get_stats,
+               .get_tunable = m88e1540_get_tunable,
+               .set_tunable = m88e1540_set_tunable,
+       },
 };
 
 module_phy_driver(marvell_drivers);
@@ -2963,6 +3007,8 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
        { MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK },
        { MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
        { MARVELL_PHY_ID_88E6390, MARVELL_PHY_ID_MASK },
+       { MARVELL_PHY_ID_88E1340S, MARVELL_PHY_ID_MASK },
+       { MARVELL_PHY_ID_88E1548P, MARVELL_PHY_ID_MASK },
        { }
 };
 
index d4c2e62..a7610eb 100644 (file)
@@ -80,6 +80,8 @@ enum {
        MV_V2_PORT_CTRL         = 0xf001,
        MV_V2_PORT_CTRL_SWRST   = BIT(15),
        MV_V2_PORT_CTRL_PWRDOWN = BIT(11),
+       MV_V2_PORT_MAC_TYPE_MASK = 0x7,
+       MV_V2_PORT_MAC_TYPE_RATE_MATCH = 0x6,
        /* Temperature control/read registers (88X3310 only) */
        MV_V2_TEMP_CTRL         = 0xf08a,
        MV_V2_TEMP_CTRL_MASK    = 0xc000,
@@ -91,6 +93,7 @@ enum {
 
 struct mv3310_priv {
        u32 firmware_ver;
+       bool rate_match;
 
        struct device *hwmon_dev;
        char *hwmon_name;
@@ -458,7 +461,9 @@ static bool mv3310_has_pma_ngbaset_quirk(struct phy_device *phydev)
 
 static int mv3310_config_init(struct phy_device *phydev)
 {
+       struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev);
        int err;
+       int val;
 
        /* Check that the PHY interface type is compatible */
        if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
@@ -475,6 +480,12 @@ static int mv3310_config_init(struct phy_device *phydev)
        if (err)
                return err;
 
+       val = phy_read_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL);
+       if (val < 0)
+               return val;
+       priv->rate_match = ((val & MV_V2_PORT_MAC_TYPE_MASK) ==
+                       MV_V2_PORT_MAC_TYPE_RATE_MATCH);
+
        /* Enable EDPD mode - saving 600mW */
        return mv3310_set_edpd(phydev, ETHTOOL_PHY_EDPD_DFLT_TX_MSECS);
 }
@@ -581,6 +592,17 @@ static int mv3310_aneg_done(struct phy_device *phydev)
 
 static void mv3310_update_interface(struct phy_device *phydev)
 {
+       struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev);
+
+       /* In "XFI with Rate Matching" mode the PHY interface is fixed at
+        * 10Gb. The PHY adapts the rate to actual wire speed with help of
+        * internal 16KB buffer.
+        */
+       if (priv->rate_match) {
+               phydev->interface = PHY_INTERFACE_MODE_10GBASER;
+               return;
+       }
+
        if ((phydev->interface == PHY_INTERFACE_MODE_SGMII ||
             phydev->interface == PHY_INTERFACE_MODE_2500BASEX ||
             phydev->interface == PHY_INTERFACE_MODE_10GBASER) &&
index d9b54c6..033df43 100644 (file)
@@ -17,7 +17,8 @@ static DEFINE_MUTEX(mdio_board_lock);
 /**
  * mdiobus_setup_mdiodev_from_board_info - create and setup MDIO devices
  * from pre-collected board specific MDIO information
- * @mdiodev: MDIO device pointer
+ * @bus: Bus the board_info belongs to
+ * @cb: Callback to create device on bus
  * Context: can sleep
  */
 void mdiobus_setup_mdiodev_from_board_info(struct mii_bus *bus,
index e33d3ea..a2245d4 100644 (file)
@@ -90,7 +90,7 @@ union cvmx_smix_wr_dat {
 
 struct cavium_mdiobus {
        struct mii_bus *mii_bus;
-       u64 register_base;
+       void __iomem *register_base;
        enum cavium_mdiobus_mode mode;
 };
 
@@ -98,20 +98,20 @@ struct cavium_mdiobus {
 
 #include <asm/octeon/octeon.h>
 
-static inline void oct_mdio_writeq(u64 val, u64 addr)
+static inline void oct_mdio_writeq(u64 val, void __iomem *addr)
 {
-       cvmx_write_csr(addr, val);
+       cvmx_write_csr((u64 __force)addr, val);
 }
 
-static inline u64 oct_mdio_readq(u64 addr)
+static inline u64 oct_mdio_readq(void __iomem *addr)
 {
-       return cvmx_read_csr(addr);
+       return cvmx_read_csr((u64 __force)addr);
 }
 #else
 #include <linux/io-64-nonatomic-lo-hi.h>
 
-#define oct_mdio_writeq(val, addr)     writeq(val, (void *)addr)
-#define oct_mdio_readq(addr)           readq((void *)addr)
+#define oct_mdio_writeq(val, addr)     writeq(val, addr)
+#define oct_mdio_readq(addr)           readq(addr)
 #endif
 
 int cavium_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum);
index 6c8960d..10a758f 100644 (file)
@@ -42,25 +42,21 @@ static int mdio_mux_gpio_probe(struct platform_device *pdev)
        struct gpio_descs *gpios;
        int r;
 
-       gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW);
+       gpios = devm_gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW);
        if (IS_ERR(gpios))
                return PTR_ERR(gpios);
 
        s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
-       if (!s) {
-               gpiod_put_array(gpios);
+       if (!s)
                return -ENOMEM;
-       }
 
        s->gpios = gpios;
 
        r = mdio_mux_init(&pdev->dev, pdev->dev.of_node,
                          mdio_mux_gpio_switch_fn, &s->mux_handle, s, NULL);
 
-       if (r != 0) {
-               gpiod_put_array(s->gpios);
+       if (r != 0)
                return r;
-       }
 
        pdev->dev.platform_data = s;
        return 0;
@@ -70,7 +66,6 @@ static int mdio_mux_gpio_remove(struct platform_device *pdev)
 {
        struct mdio_mux_gpio_state *s = dev_get_platdata(&pdev->dev);
        mdio_mux_uninit(s->mux_handle);
-       gpiod_put_array(s->gpios);
        return 0;
 }
 
index 8327382..d1e1009 100644 (file)
@@ -44,8 +44,7 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
                return -ENXIO;
        }
 
-       bus->register_base =
-               (u64)devm_ioremap(&pdev->dev, mdio_phys, regsize);
+       bus->register_base = devm_ioremap(&pdev->dev, mdio_phys, regsize);
        if (!bus->register_base) {
                dev_err(&pdev->dev, "dev_ioremap failed\n");
                return -ENOMEM;
@@ -56,7 +55,7 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
        oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
 
        bus->mii_bus->name = KBUILD_MODNAME;
-       snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%llx", bus->register_base);
+       snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%px", bus->register_base);
        bus->mii_bus->parent = &pdev->dev;
 
        bus->mii_bus->read = cavium_mdiobus_read;
@@ -109,12 +108,6 @@ static struct platform_driver octeon_mdiobus_driver = {
        .remove         = octeon_mdiobus_remove,
 };
 
-void octeon_mdiobus_force_mod_depencency(void)
-{
-       /* Let ethernet drivers force us to be loaded.  */
-}
-EXPORT_SYMBOL(octeon_mdiobus_force_mod_depencency);
-
 module_platform_driver(octeon_mdiobus_driver);
 
 MODULE_DESCRIPTION("Cavium OCTEON MDIO bus driver");
index 2a97938..3d7eda9 100644 (file)
@@ -84,7 +84,7 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev,
                nexus->buses[i] = bus;
                i++;
 
-               bus->register_base = (u64)nexus->bar0 +
+               bus->register_base = nexus->bar0 +
                        r.start - pci_resource_start(pdev, 0);
 
                smi_en.u64 = 0;
index 6ceee82..46b3370 100644 (file)
@@ -8,32 +8,32 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
 #include <linux/gpio.h>
 #include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
 #include <linux/of_device.h>
-#include <linux/of_mdio.h>
 #include <linux/of_gpio.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
 #include <linux/reset.h>
 #include <linux/skbuff.h>
+#include <linux/slab.h>
 #include <linux/spinlock.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/mii.h>
-#include <linux/ethtool.h>
-#include <linux/phy.h>
-#include <linux/io.h>
+#include <linux/string.h>
 #include <linux/uaccess.h>
+#include <linux/unistd.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/mdio.h>
@@ -165,79 +165,6 @@ struct mii_bus *mdiobus_alloc_size(size_t size)
 }
 EXPORT_SYMBOL(mdiobus_alloc_size);
 
-static void _devm_mdiobus_free(struct device *dev, void *res)
-{
-       struct mii_bus *bus = *(struct mii_bus **)res;
-
-       if (bus->is_managed_registered && bus->state == MDIOBUS_REGISTERED)
-               mdiobus_unregister(bus);
-
-       mdiobus_free(bus);
-}
-
-static int devm_mdiobus_match(struct device *dev, void *res, void *data)
-{
-       struct mii_bus **r = res;
-
-       if (WARN_ON(!r || !*r))
-               return 0;
-
-       return *r == data;
-}
-
-/**
- * devm_mdiobus_alloc_size - Resource-managed mdiobus_alloc_size()
- * @dev:               Device to allocate mii_bus for
- * @sizeof_priv:       Space to allocate for private structure.
- *
- * Managed mdiobus_alloc_size. mii_bus allocated with this function is
- * automatically freed on driver detach.
- *
- * If an mii_bus allocated with this function needs to be freed separately,
- * devm_mdiobus_free() must be used.
- *
- * RETURNS:
- * Pointer to allocated mii_bus on success, NULL on failure.
- */
-struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv)
-{
-       struct mii_bus **ptr, *bus;
-
-       ptr = devres_alloc(_devm_mdiobus_free, sizeof(*ptr), GFP_KERNEL);
-       if (!ptr)
-               return NULL;
-
-       /* use raw alloc_dr for kmalloc caller tracing */
-       bus = mdiobus_alloc_size(sizeof_priv);
-       if (bus) {
-               *ptr = bus;
-               devres_add(dev, ptr);
-               bus->is_managed = 1;
-       } else {
-               devres_free(ptr);
-       }
-
-       return bus;
-}
-EXPORT_SYMBOL_GPL(devm_mdiobus_alloc_size);
-
-/**
- * devm_mdiobus_free - Resource-managed mdiobus_free()
- * @dev:               Device this mii_bus belongs to
- * @bus:               the mii_bus associated with the device
- *
- * Free mii_bus allocated with devm_mdiobus_alloc_size().
- */
-void devm_mdiobus_free(struct device *dev, struct mii_bus *bus)
-{
-       int rc;
-
-       rc = devres_release(dev, _devm_mdiobus_free,
-                           devm_mdiobus_match, bus);
-       WARN_ON(rc);
-}
-EXPORT_SYMBOL_GPL(devm_mdiobus_free);
-
 /**
  * mdiobus_release - mii_bus device release callback
  * @d: the target struct device that contains the mii_bus
@@ -739,10 +666,24 @@ EXPORT_SYMBOL(mdiobus_free);
  */
 struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
 {
-       struct phy_device *phydev;
+       struct phy_device *phydev = ERR_PTR(-ENODEV);
        int err;
 
-       phydev = get_phy_device(bus, addr, false);
+       switch (bus->probe_capabilities) {
+       case MDIOBUS_NO_CAP:
+       case MDIOBUS_C22:
+               phydev = get_phy_device(bus, addr, false);
+               break;
+       case MDIOBUS_C45:
+               phydev = get_phy_device(bus, addr, true);
+               break;
+       case MDIOBUS_C22_C45:
+               phydev = get_phy_device(bus, addr, false);
+               if (IS_ERR(phydev))
+                       phydev = get_phy_device(bus, addr, true);
+               break;
+       }
+
        if (IS_ERR(phydev))
                return phydev;
 
index c1d345c..0f625a1 100644 (file)
@@ -6,6 +6,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/delay.h>
 #include <linux/errno.h>
 #include <linux/gpio.h>
 #include <linux/gpio/consumer.h>
@@ -20,7 +21,6 @@
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/unistd.h>
-#include <linux/delay.h>
 
 void mdio_device_free(struct mdio_device *mdiodev)
 {
@@ -150,10 +150,10 @@ static int mdio_probe(struct device *dev)
        struct mdio_driver *mdiodrv = to_mdio_driver(drv);
        int err = 0;
 
-       if (mdiodrv->probe) {
-               /* Deassert the reset signal */
-               mdio_device_reset(mdiodev, 0);
+       /* Deassert the reset signal */
+       mdio_device_reset(mdiodev, 0);
 
+       if (mdiodrv->probe) {
                err = mdiodrv->probe(mdiodev);
                if (err) {
                        /* Assert the reset signal */
@@ -170,19 +170,18 @@ static int mdio_remove(struct device *dev)
        struct device_driver *drv = mdiodev->dev.driver;
        struct mdio_driver *mdiodrv = to_mdio_driver(drv);
 
-       if (mdiodrv->remove) {
+       if (mdiodrv->remove)
                mdiodrv->remove(mdiodev);
 
-               /* Assert the reset signal */
-               mdio_device_reset(mdiodev, 1);
-       }
+       /* Assert the reset signal */
+       mdio_device_reset(mdiodev, 1);
 
        return 0;
 }
 
 /**
  * mdio_driver_register - register an mdio_driver with the MDIO layer
- * @new_driver: new mdio_driver to register
+ * @drv: new mdio_driver to register
  */
 int mdio_driver_register(struct mdio_driver *drv)
 {
diff --git a/drivers/net/phy/mdio_devres.c b/drivers/net/phy/mdio_devres.c
new file mode 100644 (file)
index 0000000..b560e99
--- /dev/null
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/device.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/stddef.h>
+
+struct mdiobus_devres {
+       struct mii_bus *mii;
+};
+
+static void devm_mdiobus_free(struct device *dev, void *this)
+{
+       struct mdiobus_devres *dr = this;
+
+       mdiobus_free(dr->mii);
+}
+
+/**
+ * devm_mdiobus_alloc_size - Resource-managed mdiobus_alloc_size()
+ * @dev:               Device to allocate mii_bus for
+ * @sizeof_priv:       Space to allocate for private structure
+ *
+ * Managed mdiobus_alloc_size. mii_bus allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * RETURNS:
+ * Pointer to allocated mii_bus on success, NULL on out-of-memory error.
+ */
+struct mii_bus *devm_mdiobus_alloc_size(struct device *dev, int sizeof_priv)
+{
+       struct mdiobus_devres *dr;
+
+       dr = devres_alloc(devm_mdiobus_free, sizeof(*dr), GFP_KERNEL);
+       if (!dr)
+               return NULL;
+
+       dr->mii = mdiobus_alloc_size(sizeof_priv);
+       if (!dr->mii) {
+               devres_free(dr);
+               return NULL;
+       }
+
+       devres_add(dev, dr);
+       return dr->mii;
+}
+EXPORT_SYMBOL(devm_mdiobus_alloc_size);
+
+static void devm_mdiobus_unregister(struct device *dev, void *this)
+{
+       struct mdiobus_devres *dr = this;
+
+       mdiobus_unregister(dr->mii);
+}
+
+static int mdiobus_devres_match(struct device *dev,
+                               void *this, void *match_data)
+{
+       struct mdiobus_devres *res = this;
+       struct mii_bus *mii = match_data;
+
+       return mii == res->mii;
+}
+
+/**
+ * __devm_mdiobus_register - Resource-managed variant of mdiobus_register()
+ * @dev:       Device to register mii_bus for
+ * @bus:       MII bus structure to register
+ * @owner:     Owning module
+ *
+ * Returns 0 on success, negative error number on failure.
+ */
+int __devm_mdiobus_register(struct device *dev, struct mii_bus *bus,
+                           struct module *owner)
+{
+       struct mdiobus_devres *dr;
+       int ret;
+
+       if (WARN_ON(!devres_find(dev, devm_mdiobus_free,
+                                mdiobus_devres_match, bus)))
+               return -EINVAL;
+
+       dr = devres_alloc(devm_mdiobus_unregister, sizeof(*dr), GFP_KERNEL);
+       if (!dr)
+               return -ENOMEM;
+
+       ret = __mdiobus_register(bus, owner);
+       if (ret) {
+               devres_free(dr);
+               return ret;
+       }
+
+       dr->mii = bus;
+       devres_add(dev, dr);
+       return 0;
+}
+EXPORT_SYMBOL(__devm_mdiobus_register);
+
+#if IS_ENABLED(CONFIG_OF_MDIO)
+/**
+ * devm_of_mdiobus_register - Resource managed variant of of_mdiobus_register()
+ * @dev:       Device to register mii_bus for
+ * @mdio:      MII bus structure to register
+ * @np:                Device node to parse
+ */
+int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
+                            struct device_node *np)
+{
+       struct mdiobus_devres *dr;
+       int ret;
+
+       if (WARN_ON(!devres_find(dev, devm_mdiobus_free,
+                                mdiobus_devres_match, mdio)))
+               return -EINVAL;
+
+       dr = devres_alloc(devm_mdiobus_unregister, sizeof(*dr), GFP_KERNEL);
+       if (!dr)
+               return -ENOMEM;
+
+       ret = of_mdiobus_register(mdio, np);
+       if (ret) {
+               devres_free(dr);
+               return ret;
+       }
+
+       dr->mii = mdio;
+       devres_add(dev, dr);
+       return 0;
+}
+EXPORT_SYMBOL(devm_of_mdiobus_register);
+#endif /* CONFIG_OF_MDIO */
+
+MODULE_LICENSE("GPL");
index 10af42c..d8e22a4 100644 (file)
@@ -8,3 +8,7 @@ mscc-objs := mscc_main.o
 ifdef CONFIG_MACSEC
 mscc-objs += mscc_macsec.o
 endif
+
+ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
+mscc-objs += mscc_ptp.o
+endif
index fbcee5f..9481bce 100644 (file)
@@ -133,6 +133,7 @@ enum rgmii_clock_delay {
  * in the same package.
  */
 #define MSCC_PHY_PAGE_EXTENDED_GPIO      0x0010 /* Extended reg - GPIO */
+#define MSCC_PHY_PAGE_1588               0x1588 /* PTP (1588) */
 #define MSCC_PHY_PAGE_TEST               0x2a30 /* Test reg */
 #define MSCC_PHY_PAGE_TR                 0x52b5 /* Token ring registers */
 
@@ -252,6 +253,7 @@ enum rgmii_clock_delay {
 /* Test page Registers */
 #define MSCC_PHY_TEST_PAGE_5             5
 #define MSCC_PHY_TEST_PAGE_8             8
+#define TR_CLK_DISABLE                   0x8000
 #define MSCC_PHY_TEST_PAGE_9             9
 #define MSCC_PHY_TEST_PAGE_20            20
 #define MSCC_PHY_TEST_PAGE_24            24
@@ -372,6 +374,35 @@ struct vsc8531_private {
        unsigned long ingr_flows;
        unsigned long egr_flows;
 #endif
+
+       struct mii_timestamper mii_ts;
+
+       bool input_clk_init;
+       struct vsc85xx_ptp *ptp;
+       /* LOAD/SAVE GPIO pin, used for retrieving or setting time to the PHC. */
+       struct gpio_desc *load_save;
+
+       /* For multiple port PHYs; the MDIO address of the base PHY in the
+        * pair of two PHYs that share a 1588 engine. PHY0 and PHY2 are coupled.
+        * PHY1 and PHY3 as well. PHY0 and PHY1 are base PHYs for their
+        * respective pair.
+        */
+       unsigned int ts_base_addr;
+       u8 ts_base_phy;
+
+       /* ts_lock: used for per-PHY timestamping operations.
+        * phc_lock: used for per-PHY PHC opertations.
+        */
+       struct mutex ts_lock;
+       struct mutex phc_lock;
+};
+
+/* Shared structure between the PHYs of the same package.
+ * gpio_lock: used for PHC operations. Common for all PHYs as the load/save GPIO
+ * is shared.
+ */
+struct vsc85xx_shared_private {
+       struct mutex gpio_lock;
 };
 
 #if IS_ENABLED(CONFIG_OF_MDIO)
@@ -398,4 +429,36 @@ static inline void vsc8584_config_macsec_intr(struct phy_device *phydev)
 }
 #endif
 
+#if IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)
+void vsc85xx_link_change_notify(struct phy_device *phydev);
+void vsc8584_config_ts_intr(struct phy_device *phydev);
+int vsc8584_ptp_init(struct phy_device *phydev);
+int vsc8584_ptp_probe_once(struct phy_device *phydev);
+int vsc8584_ptp_probe(struct phy_device *phydev);
+irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev);
+#else
+static inline void vsc85xx_link_change_notify(struct phy_device *phydev)
+{
+}
+static inline void vsc8584_config_ts_intr(struct phy_device *phydev)
+{
+}
+static inline int vsc8584_ptp_init(struct phy_device *phydev)
+{
+       return 0;
+}
+static inline int vsc8584_ptp_probe_once(struct phy_device *phydev)
+{
+       return 0;
+}
+static inline int vsc8584_ptp_probe(struct phy_device *phydev)
+{
+       return 0;
+}
+static inline irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev)
+{
+       return IRQ_NONE;
+}
+#endif
+
 #endif /* _MSCC_PHY_H_ */
index 3803e82..399e803 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Driver for Microsemi VSC85xx PHYs
  *
- * Copyright (C) 2019 Microsemi Corporation
+ * Copyright (C) 2020 Microsemi Corporation
  */
 
 #ifndef _MSCC_PHY_FC_BUFFER_H_
index 59b6837..8dd38dc 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Driver for Microsemi VSC85xx PHYs
  *
- * Copyright (c) 2017 Microsemi Corporation
+ * Copyright (c) 2020 Microsemi Corporation
  */
 
 #ifndef _MSCC_PHY_LINE_MAC_H_
index b4d3dc4..1d4c012 100644 (file)
@@ -1,16 +1,16 @@
 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
 /*
- * Driver for Microsemi VSC85xx PHYs
+ * Driver for Microsemi VSC85xx PHYs - MACsec support
  *
- * Author: Nagaraju Lakkaraju
+ * Author: Antoine Tenart
  * License: Dual MIT/GPL
- * Copyright (c) 2016 Microsemi Corporation
+ * Copyright (c) 2020 Microsemi Corporation
  */
 
 #include <linux/phy.h>
 #include <dt-bindings/net/mscc-phy-vsc8531.h>
 
-#include <crypto/skcipher.h>
+#include <crypto/aes.h>
 
 #include <net/macsec.h>
 
@@ -285,7 +285,9 @@ static void vsc8584_macsec_mac_init(struct phy_device *phydev,
                                 MSCC_MAC_CFG_PKTINF_CFG_STRIP_PREAMBLE_ENA |
                                 MSCC_MAC_CFG_PKTINF_CFG_INSERT_PREAMBLE_ENA |
                                 (bank == HOST_MAC ?
-                                 MSCC_MAC_CFG_PKTINF_CFG_ENABLE_TX_PADDING : 0));
+                                 MSCC_MAC_CFG_PKTINF_CFG_ENABLE_TX_PADDING : 0) |
+                                (IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING) ?
+                                 MSCC_MAC_CFG_PKTINF_CFG_MACSEC_BYPASS_NUM_PTP_STALL_CLKS(0x8) : 0));
 
        val = vsc8584_macsec_phy_read(phydev, bank, MSCC_MAC_CFG_MODE_CFG);
        val &= ~MSCC_MAC_CFG_MODE_CFG_DISABLE_DIC;
@@ -383,21 +385,23 @@ static void vsc8584_macsec_flow(struct phy_device *phydev,
        }
 
        if (bank == MACSEC_INGR && flow->match.sci && flow->rx_sa->sc->sci) {
+               u64 sci = (__force u64)flow->rx_sa->sc->sci;
+
                match |= MSCC_MS_SAM_MISC_MATCH_TCI(BIT(3));
                mask |= MSCC_MS_SAM_MASK_TCI_MASK(BIT(3)) |
                        MSCC_MS_SAM_MASK_SCI_MASK;
 
                vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MATCH_SCI_LO(idx),
-                                        lower_32_bits(flow->rx_sa->sc->sci));
+                                        lower_32_bits(sci));
                vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MATCH_SCI_HI(idx),
-                                        upper_32_bits(flow->rx_sa->sc->sci));
+                                        upper_32_bits(sci));
        }
 
        if (flow->match.etype) {
                mask |= MSCC_MS_SAM_MASK_MAC_ETYPE_MASK;
 
                vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_SAM_MAC_SA_MATCH_HI(idx),
-                                        MSCC_MS_SAM_MAC_SA_MATCH_HI_ETYPE(htons(flow->etype)));
+                                        MSCC_MS_SAM_MAC_SA_MATCH_HI_ETYPE((__force u32)htons(flow->etype)));
        }
 
        match |= MSCC_MS_SAM_MISC_MATCH_PRIORITY(flow->priority);
@@ -500,39 +504,17 @@ static u32 vsc8584_macsec_flow_context_id(struct macsec_flow *flow)
 static int vsc8584_macsec_derive_key(const u8 key[MACSEC_KEYID_LEN],
                                     u16 key_len, u8 hkey[16])
 {
-       struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
-       struct skcipher_request *req = NULL;
-       struct scatterlist src, dst;
-       DECLARE_CRYPTO_WAIT(wait);
-       u32 input[4] = {0};
+       const u8 input[AES_BLOCK_SIZE] = {0};
+       struct crypto_aes_ctx ctx;
        int ret;
 
-       if (IS_ERR(tfm))
-               return PTR_ERR(tfm);
-
-       req = skcipher_request_alloc(tfm, GFP_KERNEL);
-       if (!req) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
-                                     CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done,
-                                     &wait);
-       ret = crypto_skcipher_setkey(tfm, key, key_len);
-       if (ret < 0)
-               goto out;
-
-       sg_init_one(&src, input, 16);
-       sg_init_one(&dst, hkey, 16);
-       skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
-
-       ret = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
+       ret = aes_expandkey(&ctx, key, key_len);
+       if (ret)
+               return ret;
 
-out:
-       skcipher_request_free(req);
-       crypto_free_skcipher(tfm);
-       return ret;
+       aes_encrypt(&ctx, hkey, input);
+       memzero_explicit(&ctx, sizeof(ctx));
+       return 0;
 }
 
 static int vsc8584_macsec_transformation(struct phy_device *phydev,
@@ -543,7 +525,7 @@ static int vsc8584_macsec_transformation(struct phy_device *phydev,
        int i, ret, index = flow->index;
        u32 rec = 0, control = 0;
        u8 hkey[16];
-       sci_t sci;
+       u64 sci;
 
        ret = vsc8584_macsec_derive_key(flow->key, priv->secy->key_len, hkey);
        if (ret)
@@ -601,7 +583,7 @@ static int vsc8584_macsec_transformation(struct phy_device *phydev,
                                         priv->secy->replay_window);
 
        /* Set the input vectors */
-       sci = bank == MACSEC_INGR ? flow->rx_sa->sc->sci : priv->secy->sci;
+       sci = (__force u64)(bank == MACSEC_INGR ? flow->rx_sa->sc->sci : priv->secy->sci);
        vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
                                 lower_32_bits(sci));
        vsc8584_macsec_phy_write(phydev, bank, MSCC_MS_XFORM_REC(index, rec++),
index d751f29..9c6d25e 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Driver for Microsemi VSC85xx PHYs
  *
- * Copyright (c) 2018 Microsemi Corporation
+ * Copyright (c) 2020 Microsemi Corporation
  */
 
 #ifndef _MSCC_PHY_MACSEC_H_
index 5ddc44f..a4fbf3a 100644 (file)
@@ -629,7 +629,7 @@ static int vsc8531_pre_init_seq_set(struct phy_device *phydev)
        if (rc < 0)
                return rc;
        rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_TEST,
-                             MSCC_PHY_TEST_PAGE_8, 0x8000, 0x8000);
+                             MSCC_PHY_TEST_PAGE_8, TR_CLK_DISABLE, TR_CLK_DISABLE);
        if (rc < 0)
                return rc;
 
@@ -1026,7 +1026,7 @@ static int vsc8574_config_pre_init(struct phy_device *phydev)
        phy_base_write(phydev, MSCC_PHY_TEST_PAGE_5, 0x1b20);
 
        reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
-       reg |= 0x8000;
+       reg |= TR_CLK_DISABLE;
        phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
 
        phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR);
@@ -1046,7 +1046,7 @@ static int vsc8574_config_pre_init(struct phy_device *phydev)
        phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST);
 
        reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
-       reg &= ~0x8000;
+       reg &= ~TR_CLK_DISABLE;
        phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
 
        phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
@@ -1196,7 +1196,7 @@ static int vsc8584_config_pre_init(struct phy_device *phydev)
        phy_base_write(phydev, MSCC_PHY_TEST_PAGE_5, 0x1f20);
 
        reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
-       reg |= 0x8000;
+       reg |= TR_CLK_DISABLE;
        phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
 
        phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR);
@@ -1225,7 +1225,7 @@ static int vsc8584_config_pre_init(struct phy_device *phydev)
        phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST);
 
        reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
-       reg &= ~0x8000;
+       reg &= ~TR_CLK_DISABLE;
        phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
 
        phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
@@ -1288,7 +1288,7 @@ static void vsc8584_get_base_addr(struct phy_device *phydev)
        struct vsc8531_private *vsc8531 = phydev->priv;
        u16 val, addr;
 
-       mutex_lock(&phydev->mdio.bus->mdio_lock);
+       phy_lock_mdio_bus(phydev);
        __phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED);
 
        addr = __phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_4);
@@ -1297,12 +1297,28 @@ static void vsc8584_get_base_addr(struct phy_device *phydev)
        val = __phy_read(phydev, MSCC_PHY_ACTIPHY_CNTL);
 
        __phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
-       mutex_unlock(&phydev->mdio.bus->mdio_lock);
+       phy_unlock_mdio_bus(phydev);
 
-       if (val & PHY_ADDR_REVERSED)
+       /* In the package, there are two pairs of PHYs (PHY0 + PHY2 and
+        * PHY1 + PHY3). The first PHY of each pair (PHY0 and PHY1) is
+        * the base PHY for timestamping operations.
+        */
+       vsc8531->ts_base_addr = phydev->mdio.addr;
+       vsc8531->ts_base_phy = addr;
+
+       if (val & PHY_ADDR_REVERSED) {
                vsc8531->base_addr = phydev->mdio.addr + addr;
-       else
+               if (addr > 1) {
+                       vsc8531->ts_base_addr += 2;
+                       vsc8531->ts_base_phy += 2;
+               }
+       } else {
                vsc8531->base_addr = phydev->mdio.addr - addr;
+               if (addr > 1) {
+                       vsc8531->ts_base_addr -= 2;
+                       vsc8531->ts_base_phy -= 2;
+               }
+       }
 
        vsc8531->addr = addr;
 }
@@ -1315,7 +1331,7 @@ static int vsc8584_config_init(struct phy_device *phydev)
 
        phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
 
-       mutex_lock(&phydev->mdio.bus->mdio_lock);
+       phy_lock_mdio_bus(phydev);
 
        /* Some parts of the init sequence are identical for every PHY in the
         * package. Some parts are modifying the GPIO register bank which is a
@@ -1359,8 +1375,10 @@ static int vsc8584_config_init(struct phy_device *phydev)
                        goto err;
        }
 
-       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
-                      MSCC_PHY_PAGE_EXTENDED_GPIO);
+       ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+                            MSCC_PHY_PAGE_EXTENDED_GPIO);
+       if (ret)
+               goto err;
 
        val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK);
        val &= ~MAC_CFG_MASK;
@@ -1379,6 +1397,11 @@ static int vsc8584_config_init(struct phy_device *phydev)
        if (ret)
                goto err;
 
+       ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+                            MSCC_PHY_PAGE_STANDARD);
+       if (ret)
+               goto err;
+
        if (!phy_interface_is_rgmii(phydev)) {
                val = PROC_CMD_MCB_ACCESS_MAC_CONF | PROC_CMD_RST_CONF_PORT |
                        PROC_CMD_READ_MOD_WRITE_PORT;
@@ -1412,13 +1435,15 @@ static int vsc8584_config_init(struct phy_device *phydev)
        if (ret)
                goto err;
 
-       mutex_unlock(&phydev->mdio.bus->mdio_lock);
+       phy_unlock_mdio_bus(phydev);
 
        ret = vsc8584_macsec_init(phydev);
        if (ret)
                return ret;
 
-       phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+       ret = vsc8584_ptp_init(phydev);
+       if (ret)
+               return ret;
 
        val = phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_1);
        val &= ~(MEDIA_OP_MODE_MASK | VSC8584_MAC_IF_SELECTION_MASK);
@@ -1449,18 +1474,26 @@ static int vsc8584_config_init(struct phy_device *phydev)
        return 0;
 
 err:
-       mutex_unlock(&phydev->mdio.bus->mdio_lock);
+       phy_unlock_mdio_bus(phydev);
        return ret;
 }
 
 static irqreturn_t vsc8584_handle_interrupt(struct phy_device *phydev)
 {
+       irqreturn_t ret;
        int irq_status;
 
        irq_status = phy_read(phydev, MII_VSC85XX_INT_STATUS);
-       if (irq_status < 0 || !(irq_status & MII_VSC85XX_INT_MASK_MASK))
+       if (irq_status < 0)
                return IRQ_NONE;
 
+       /* Timestamping IRQ does not set a bit in the global INT_STATUS, so
+        * irq_status would be 0.
+        */
+       ret = vsc8584_handle_ts_interrupt(phydev);
+       if (!(irq_status & MII_VSC85XX_INT_MASK_MASK))
+               return ret;
+
        if (irq_status & MII_VSC85XX_INT_MASK_EXT)
                vsc8584_handle_macsec_interrupt(phydev);
 
@@ -1727,7 +1760,7 @@ static int vsc8514_config_init(struct phy_device *phydev)
 
        phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
 
-       mutex_lock(&phydev->mdio.bus->mdio_lock);
+       phy_lock_mdio_bus(phydev);
 
        /* Some parts of the init sequence are identical for every PHY in the
         * package. Some parts are modifying the GPIO register bank which is a
@@ -1743,15 +1776,21 @@ static int vsc8514_config_init(struct phy_device *phydev)
        if (phy_package_init_once(phydev))
                vsc8514_config_pre_init(phydev);
 
-       phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
-                      MSCC_PHY_PAGE_EXTENDED_GPIO);
+       ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+                            MSCC_PHY_PAGE_EXTENDED_GPIO);
+       if (ret)
+               goto err;
 
        val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK);
 
        val &= ~MAC_CFG_MASK;
        val |= MAC_CFG_QSGMII;
        ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val);
+       if (ret)
+               goto err;
 
+       ret = phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+                            MSCC_PHY_PAGE_STANDARD);
        if (ret)
                goto err;
 
@@ -1815,14 +1854,14 @@ static int vsc8514_config_init(struct phy_device *phydev)
                reg = vsc85xx_csr_ctrl_phy_read(phydev, PHY_MCB_TARGET,
                                                PHY_S6G_PLL_STATUS);
                if (reg == 0xffffffff) {
-                       mutex_unlock(&phydev->mdio.bus->mdio_lock);
+                       phy_unlock_mdio_bus(phydev);
                        return -EIO;
                }
 
        } while (time_before(jiffies, deadline) && (reg & BIT(12)));
 
        if (reg & BIT(12)) {
-               mutex_unlock(&phydev->mdio.bus->mdio_lock);
+               phy_unlock_mdio_bus(phydev);
                return -ETIMEDOUT;
        }
 
@@ -1842,23 +1881,18 @@ static int vsc8514_config_init(struct phy_device *phydev)
                reg = vsc85xx_csr_ctrl_phy_read(phydev, PHY_MCB_TARGET,
                                                PHY_S6G_IB_STATUS0);
                if (reg == 0xffffffff) {
-                       mutex_unlock(&phydev->mdio.bus->mdio_lock);
+                       phy_unlock_mdio_bus(phydev);
                        return -EIO;
                }
 
        } while (time_before(jiffies, deadline) && !(reg & BIT(8)));
 
        if (!(reg & BIT(8))) {
-               mutex_unlock(&phydev->mdio.bus->mdio_lock);
+               phy_unlock_mdio_bus(phydev);
                return -ETIMEDOUT;
        }
 
-       mutex_unlock(&phydev->mdio.bus->mdio_lock);
-
-       ret = phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
-
-       if (ret)
-               return ret;
+       phy_unlock_mdio_bus(phydev);
 
        ret = phy_modify(phydev, MSCC_PHY_EXT_PHY_CNTL_1, MEDIA_OP_MODE_MASK,
                         MEDIA_OP_MODE_COPPER << MEDIA_OP_MODE_POS);
@@ -1880,7 +1914,7 @@ static int vsc8514_config_init(struct phy_device *phydev)
        return ret;
 
 err:
-       mutex_unlock(&phydev->mdio.bus->mdio_lock);
+       phy_unlock_mdio_bus(phydev);
        return ret;
 }
 
@@ -1900,6 +1934,7 @@ static int vsc85xx_config_intr(struct phy_device *phydev)
 
        if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
                vsc8584_config_macsec_intr(phydev);
+               vsc8584_config_ts_intr(phydev);
 
                rc = phy_write(phydev, MII_VSC85XX_INT_MASK,
                               MII_VSC85XX_INT_MASK_MASK);
@@ -1999,6 +2034,7 @@ static int vsc8584_probe(struct phy_device *phydev)
        u32 default_mode[4] = {VSC8531_LINK_1000_ACTIVITY,
           VSC8531_LINK_100_ACTIVITY, VSC8531_LINK_ACTIVITY,
           VSC8531_DUPLEX_COLLISION};
+       int ret;
 
        if ((phydev->phy_id & MSCC_DEV_REV_MASK) != VSC8584_REVB) {
                dev_err(&phydev->mdio.dev, "Only VSC8584 revB is supported.\n");
@@ -2012,8 +2048,8 @@ static int vsc8584_probe(struct phy_device *phydev)
        phydev->priv = vsc8531;
 
        vsc8584_get_base_addr(phydev);
-       devm_phy_package_join(&phydev->mdio.dev, phydev,
-                             vsc8531->base_addr, 0);
+       devm_phy_package_join(&phydev->mdio.dev, phydev, vsc8531->base_addr,
+                             sizeof(struct vsc85xx_shared_private));
 
        vsc8531->nleds = 4;
        vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES;
@@ -2024,6 +2060,16 @@ static int vsc8584_probe(struct phy_device *phydev)
        if (!vsc8531->stats)
                return -ENOMEM;
 
+       if (phy_package_probe_once(phydev)) {
+               ret = vsc8584_ptp_probe_once(phydev);
+               if (ret)
+                       return ret;
+       }
+
+       ret = vsc8584_ptp_probe(phydev);
+       if (ret)
+               return ret;
+
        return vsc85xx_dt_led_modes_get(phydev, default_mode);
 }
 
@@ -2403,6 +2449,7 @@ static struct phy_driver vsc85xx_driver[] = {
        .get_sset_count = &vsc85xx_get_sset_count,
        .get_strings    = &vsc85xx_get_strings,
        .get_stats      = &vsc85xx_get_stats,
+       .link_change_notify = &vsc85xx_link_change_notify,
 }
 
 };
diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
new file mode 100644 (file)
index 0000000..b97ee79
--- /dev/null
@@ -0,0 +1,1590 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Driver for Microsemi VSC85xx PHYs - timestamping and PHC support
+ *
+ * Authors: Quentin Schulz & Antoine Tenart
+ * License: Dual MIT/GPL
+ * Copyright (c) 2020 Microsemi Corporation
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/ip.h>
+#include <linux/net_tstamp.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/ptp_classify.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/udp.h>
+#include <asm/unaligned.h>
+
+#include "mscc.h"
+#include "mscc_ptp.h"
+
+/* Two PHYs share the same 1588 processor and it's to be entirely configured
+ * through the base PHY of this processor.
+ */
+/* phydev->bus->mdio_lock should be locked when using this function */
+static int phy_ts_base_write(struct phy_device *phydev, u32 regnum, u16 val)
+{
+       struct vsc8531_private *priv = phydev->priv;
+
+       WARN_ON_ONCE(!mutex_is_locked(&phydev->mdio.bus->mdio_lock));
+       return __mdiobus_write(phydev->mdio.bus, priv->ts_base_addr, regnum,
+                              val);
+}
+
+/* phydev->bus->mdio_lock should be locked when using this function */
+static int phy_ts_base_read(struct phy_device *phydev, u32 regnum)
+{
+       struct vsc8531_private *priv = phydev->priv;
+
+       WARN_ON_ONCE(!mutex_is_locked(&phydev->mdio.bus->mdio_lock));
+       return __mdiobus_read(phydev->mdio.bus, priv->ts_base_addr, regnum);
+}
+
+enum ts_blk_hw {
+       INGRESS_ENGINE_0,
+       EGRESS_ENGINE_0,
+       INGRESS_ENGINE_1,
+       EGRESS_ENGINE_1,
+       INGRESS_ENGINE_2,
+       EGRESS_ENGINE_2,
+       PROCESSOR_0,
+       PROCESSOR_1,
+};
+
+enum ts_blk {
+       INGRESS,
+       EGRESS,
+       PROCESSOR,
+};
+
+static u32 vsc85xx_ts_read_csr(struct phy_device *phydev, enum ts_blk blk,
+                              u16 addr)
+{
+       struct vsc8531_private *priv = phydev->priv;
+       bool base_port = phydev->mdio.addr == priv->ts_base_addr;
+       u32 val, cnt = 0;
+       enum ts_blk_hw blk_hw;
+
+       switch (blk) {
+       case INGRESS:
+               blk_hw = base_port ? INGRESS_ENGINE_0 : INGRESS_ENGINE_1;
+               break;
+       case EGRESS:
+               blk_hw = base_port ? EGRESS_ENGINE_0 : EGRESS_ENGINE_1;
+               break;
+       case PROCESSOR:
+       default:
+               blk_hw = base_port ? PROCESSOR_0 : PROCESSOR_1;
+               break;
+       }
+
+       phy_lock_mdio_bus(phydev);
+
+       phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_1588);
+
+       phy_ts_base_write(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL, BIU_ADDR_EXE |
+                         BIU_ADDR_READ | BIU_BLK_ID(blk_hw) |
+                         BIU_CSR_ADDR(addr));
+
+       do {
+               val = phy_ts_base_read(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL);
+       } while (!(val & BIU_ADDR_EXE) && cnt++ < BIU_ADDR_CNT_MAX);
+
+       val = phy_ts_base_read(phydev, MSCC_PHY_TS_CSR_DATA_MSB);
+       val <<= 16;
+       val |= phy_ts_base_read(phydev, MSCC_PHY_TS_CSR_DATA_LSB);
+
+       phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+       phy_unlock_mdio_bus(phydev);
+
+       return val;
+}
+
+static void vsc85xx_ts_write_csr(struct phy_device *phydev, enum ts_blk blk,
+                                u16 addr, u32 val)
+{
+       struct vsc8531_private *priv = phydev->priv;
+       bool base_port = phydev->mdio.addr == priv->ts_base_addr;
+       u32 reg, bypass, cnt = 0, lower = val & 0xffff, upper = val >> 16;
+       bool cond = (addr == MSCC_PHY_PTP_LTC_CTRL ||
+                    addr == MSCC_PHY_1588_INGR_VSC85XX_INT_MASK ||
+                    addr == MSCC_PHY_1588_VSC85XX_INT_MASK ||
+                    addr == MSCC_PHY_1588_INGR_VSC85XX_INT_STATUS ||
+                    addr == MSCC_PHY_1588_VSC85XX_INT_STATUS) &&
+                   blk == PROCESSOR;
+       enum ts_blk_hw blk_hw;
+
+       switch (blk) {
+       case INGRESS:
+               blk_hw = base_port ? INGRESS_ENGINE_0 : INGRESS_ENGINE_1;
+               break;
+       case EGRESS:
+               blk_hw = base_port ? EGRESS_ENGINE_0 : EGRESS_ENGINE_1;
+               break;
+       case PROCESSOR:
+       default:
+               blk_hw = base_port ? PROCESSOR_0 : PROCESSOR_1;
+               break;
+       }
+
+       phy_lock_mdio_bus(phydev);
+
+       bypass = phy_ts_base_read(phydev, MSCC_PHY_BYPASS_CONTROL);
+
+       phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_1588);
+
+       if (!cond || (cond && upper))
+               phy_ts_base_write(phydev, MSCC_PHY_TS_CSR_DATA_MSB, upper);
+
+       phy_ts_base_write(phydev, MSCC_PHY_TS_CSR_DATA_LSB, lower);
+
+       phy_ts_base_write(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL, BIU_ADDR_EXE |
+                         BIU_ADDR_WRITE | BIU_BLK_ID(blk_hw) |
+                         BIU_CSR_ADDR(addr));
+
+       do {
+               reg = phy_ts_base_read(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL);
+       } while (!(reg & BIU_ADDR_EXE) && cnt++ < BIU_ADDR_CNT_MAX);
+
+       phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+       if (cond && upper)
+               phy_ts_base_write(phydev, MSCC_PHY_BYPASS_CONTROL, bypass);
+
+       phy_unlock_mdio_bus(phydev);
+}
+
+/* Pick bytes from PTP header */
+#define PTP_HEADER_TRNSP_MSG           26
+#define PTP_HEADER_DOMAIN_NUM          25
+#define PTP_HEADER_BYTE_8_31(x)                (31 - (x))
+#define MAC_ADDRESS_BYTE(x)            ((x) + (35 - ETH_ALEN + 1))
+
+static int vsc85xx_ts_fsb_init(struct phy_device *phydev)
+{
+       u8 sig_sel[16] = {};
+       signed char i, pos = 0;
+
+       /* Seq ID is 2B long and starts at 30th byte */
+       for (i = 1; i >= 0; i--)
+               sig_sel[pos++] = PTP_HEADER_BYTE_8_31(30 + i);
+
+       /* DomainNum */
+       sig_sel[pos++] = PTP_HEADER_DOMAIN_NUM;
+
+       /* MsgType */
+       sig_sel[pos++] = PTP_HEADER_TRNSP_MSG;
+
+       /* MAC address is 6B long */
+       for (i = ETH_ALEN - 1; i >= 0; i--)
+               sig_sel[pos++] = MAC_ADDRESS_BYTE(i);
+
+       /* Fill the last bytes of the signature to reach a 16B signature */
+       for (; pos < ARRAY_SIZE(sig_sel); pos++)
+               sig_sel[pos] = PTP_HEADER_TRNSP_MSG;
+
+       for (i = 0; i <= 2; i++) {
+               u32 val = 0;
+
+               for (pos = i * 5 + 4; pos >= i * 5; pos--)
+                       val = (val << 6) | sig_sel[pos];
+
+               vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_REG(i),
+                                    val);
+       }
+
+       vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_REG(3),
+                            sig_sel[15]);
+
+       return 0;
+}
+
+static const u32 vsc85xx_egr_latency[] = {
+       /* Copper Egress */
+       1272, /* 1000Mbps */
+       12516, /* 100Mbps */
+       125444, /* 10Mbps */
+       /* Fiber Egress */
+       1277, /* 1000Mbps */
+       12537, /* 100Mbps */
+};
+
+static const u32 vsc85xx_egr_latency_macsec[] = {
+       /* Copper Egress ON */
+       3496, /* 1000Mbps */
+       34760, /* 100Mbps */
+       347844, /* 10Mbps */
+       /* Fiber Egress ON */
+       3502, /* 1000Mbps */
+       34780, /* 100Mbps */
+};
+
+static const u32 vsc85xx_ingr_latency[] = {
+       /* Copper Ingress */
+       208, /* 1000Mbps */
+       304, /* 100Mbps */
+       2023, /* 10Mbps */
+       /* Fiber Ingress */
+       98, /* 1000Mbps */
+       197, /* 100Mbps */
+};
+
+static const u32 vsc85xx_ingr_latency_macsec[] = {
+       /* Copper Ingress */
+       2408, /* 1000Mbps */
+       22300, /* 100Mbps */
+       222009, /* 10Mbps */
+       /* Fiber Ingress */
+       2299, /* 1000Mbps */
+       22192, /* 100Mbps */
+};
+
+static void vsc85xx_ts_set_latencies(struct phy_device *phydev)
+{
+       u32 val, ingr_latency, egr_latency;
+       u8 idx;
+
+       /* No need to set latencies of packets if the PHY is not connected */
+       if (!phydev->link)
+               return;
+
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_STALL_LATENCY,
+                            STALL_EGR_LATENCY(phydev->speed));
+
+       switch (phydev->speed) {
+       case SPEED_100:
+               idx = 1;
+               break;
+       case SPEED_1000:
+               idx = 0;
+               break;
+       default:
+               idx = 2;
+               break;
+       }
+
+       ingr_latency = IS_ENABLED(CONFIG_MACSEC) ?
+               vsc85xx_ingr_latency_macsec[idx] : vsc85xx_ingr_latency[idx];
+       egr_latency = IS_ENABLED(CONFIG_MACSEC) ?
+               vsc85xx_egr_latency_macsec[idx] : vsc85xx_egr_latency[idx];
+
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_LOCAL_LATENCY,
+                            PTP_INGR_LOCAL_LATENCY(ingr_latency));
+
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_INGR_TSP_CTRL);
+       val |= PHY_PTP_INGR_TSP_CTRL_LOAD_DELAYS;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_TSP_CTRL,
+                            val);
+
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_LOCAL_LATENCY,
+                            PTP_EGR_LOCAL_LATENCY(egr_latency));
+
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL);
+       val |= PHY_PTP_EGR_TSP_CTRL_LOAD_DELAYS;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL, val);
+}
+
+static int vsc85xx_ts_disable_flows(struct phy_device *phydev, enum ts_blk blk)
+{
+       u8 i;
+
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_NXT_COMP, 0);
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM,
+                            IP1_NXT_PROT_UDP_CHKSUM_WIDTH(2));
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_NXT_PROT_NXT_COMP, 0);
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_NXT_PROT_UDP_CHKSUM,
+                            IP2_NXT_PROT_UDP_CHKSUM_WIDTH(2));
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_MPLS_COMP_NXT_COMP, 0);
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT, 0);
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH2_NTX_PROT, 0);
+
+       for (i = 0; i < COMP_MAX_FLOWS; i++) {
+               vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(i),
+                                    IP1_FLOW_VALID_CH0 | IP1_FLOW_VALID_CH1);
+               vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_FLOW_ENA(i),
+                                    IP2_FLOW_VALID_CH0 | IP2_FLOW_VALID_CH1);
+               vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(i),
+                                    ETH1_FLOW_VALID_CH0 | ETH1_FLOW_VALID_CH1);
+               vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH2_FLOW_ENA(i),
+                                    ETH2_FLOW_VALID_CH0 | ETH2_FLOW_VALID_CH1);
+               vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_MPLS_FLOW_CTRL(i),
+                                    MPLS_FLOW_VALID_CH0 | MPLS_FLOW_VALID_CH1);
+
+               if (i >= PTP_COMP_MAX_FLOWS)
+                       continue;
+
+               vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i), 0);
+               vsc85xx_ts_write_csr(phydev, blk,
+                                    MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i), 0);
+               vsc85xx_ts_write_csr(phydev, blk,
+                                    MSCC_ANA_PTP_FLOW_MASK_UPPER(i), 0);
+               vsc85xx_ts_write_csr(phydev, blk,
+                                    MSCC_ANA_PTP_FLOW_MASK_LOWER(i), 0);
+               vsc85xx_ts_write_csr(phydev, blk,
+                                    MSCC_ANA_PTP_FLOW_MATCH_UPPER(i), 0);
+               vsc85xx_ts_write_csr(phydev, blk,
+                                    MSCC_ANA_PTP_FLOW_MATCH_LOWER(i), 0);
+               vsc85xx_ts_write_csr(phydev, blk,
+                                    MSCC_ANA_PTP_FLOW_PTP_ACTION(i), 0);
+               vsc85xx_ts_write_csr(phydev, blk,
+                                    MSCC_ANA_PTP_FLOW_PTP_ACTION2(i), 0);
+               vsc85xx_ts_write_csr(phydev, blk,
+                                    MSCC_ANA_PTP_FLOW_PTP_0_FIELD(i), 0);
+               vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_OAM_PTP_FLOW_ENA(i),
+                                    0);
+       }
+
+       return 0;
+}
+
+static int vsc85xx_ts_eth_cmp1_sig(struct phy_device *phydev)
+{
+       u32 val;
+
+       val = vsc85xx_ts_read_csr(phydev, EGRESS, MSCC_PHY_ANA_ETH1_NTX_PROT);
+       val &= ~ANA_ETH1_NTX_PROT_SIG_OFF_MASK;
+       val |= ANA_ETH1_NTX_PROT_SIG_OFF(0);
+       vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_ETH1_NTX_PROT, val);
+
+       val = vsc85xx_ts_read_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_CFG);
+       val &= ~ANA_FSB_ADDR_FROM_BLOCK_SEL_MASK;
+       val |= ANA_FSB_ADDR_FROM_ETH1;
+       vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_CFG, val);
+
+       return 0;
+}
+
+static struct vsc85xx_ptphdr *get_ptp_header_l4(struct sk_buff *skb,
+                                               struct iphdr *iphdr,
+                                               struct udphdr *udphdr)
+{
+       if (iphdr->version != 4 || iphdr->protocol != IPPROTO_UDP)
+               return NULL;
+
+       return (struct vsc85xx_ptphdr *)(((unsigned char *)udphdr) + UDP_HLEN);
+}
+
+static struct vsc85xx_ptphdr *get_ptp_header_tx(struct sk_buff *skb)
+{
+       struct ethhdr *ethhdr = eth_hdr(skb);
+       struct udphdr *udphdr;
+       struct iphdr *iphdr;
+
+       if (ethhdr->h_proto == htons(ETH_P_1588))
+               return (struct vsc85xx_ptphdr *)(((unsigned char *)ethhdr) +
+                                                skb_mac_header_len(skb));
+
+       if (ethhdr->h_proto != htons(ETH_P_IP))
+               return NULL;
+
+       iphdr = ip_hdr(skb);
+       udphdr = udp_hdr(skb);
+
+       return get_ptp_header_l4(skb, iphdr, udphdr);
+}
+
+static struct vsc85xx_ptphdr *get_ptp_header_rx(struct sk_buff *skb,
+                                               enum hwtstamp_rx_filters rx_filter)
+{
+       struct udphdr *udphdr;
+       struct iphdr *iphdr;
+
+       if (rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT)
+               return (struct vsc85xx_ptphdr *)skb->data;
+
+       iphdr = (struct iphdr *)skb->data;
+       udphdr = (struct udphdr *)(skb->data + iphdr->ihl * 4);
+
+       return get_ptp_header_l4(skb, iphdr, udphdr);
+}
+
+static int get_sig(struct sk_buff *skb, u8 *sig)
+{
+       struct vsc85xx_ptphdr *ptphdr = get_ptp_header_tx(skb);
+       struct ethhdr *ethhdr = eth_hdr(skb);
+       unsigned int i;
+
+       if (!ptphdr)
+               return -EOPNOTSUPP;
+
+       sig[0] = (__force u16)ptphdr->seq_id >> 8;
+       sig[1] = (__force u16)ptphdr->seq_id & GENMASK(7, 0);
+       sig[2] = ptphdr->domain;
+       sig[3] = ptphdr->tsmt & GENMASK(3, 0);
+
+       memcpy(&sig[4], ethhdr->h_dest, ETH_ALEN);
+
+       /* Fill the last bytes of the signature to reach a 16B signature */
+       for (i = 10; i < 16; i++)
+               sig[i] = ptphdr->tsmt & GENMASK(3, 0);
+
+       return 0;
+}
+
+static void vsc85xx_dequeue_skb(struct vsc85xx_ptp *ptp)
+{
+       struct skb_shared_hwtstamps shhwtstamps;
+       struct vsc85xx_ts_fifo fifo;
+       struct sk_buff *skb;
+       u8 skb_sig[16], *p;
+       int i, len;
+       u32 reg;
+
+       memset(&fifo, 0, sizeof(fifo));
+       p = (u8 *)&fifo;
+
+       reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_EGR_TS_FIFO(0));
+       if (reg & PTP_EGR_TS_FIFO_EMPTY)
+               return;
+
+       *p++ = reg & 0xff;
+       *p++ = (reg >> 8) & 0xff;
+
+       /* Read the current FIFO item. Reading FIFO6 pops the next one. */
+       for (i = 1; i < 7; i++) {
+               reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR,
+                                         MSCC_PHY_PTP_EGR_TS_FIFO(i));
+               *p++ = reg & 0xff;
+               *p++ = (reg >> 8) & 0xff;
+               *p++ = (reg >> 16) & 0xff;
+               *p++ = (reg >> 24) & 0xff;
+       }
+
+       len = skb_queue_len(&ptp->tx_queue);
+       if (len < 1)
+               return;
+
+       while (len--) {
+               skb = __skb_dequeue(&ptp->tx_queue);
+               if (!skb)
+                       return;
+
+               /* Can't get the signature of the packet, won't ever
+                * be able to have one so let's dequeue the packet.
+                */
+               if (get_sig(skb, skb_sig) < 0) {
+                       kfree_skb(skb);
+                       continue;
+               }
+
+               /* Check if we found the signature we were looking for. */
+               if (!memcmp(skb_sig, fifo.sig, sizeof(fifo.sig))) {
+                       memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+                       shhwtstamps.hwtstamp = ktime_set(fifo.secs, fifo.ns);
+                       skb_complete_tx_timestamp(skb, &shhwtstamps);
+
+                       return;
+               }
+
+               /* Valid signature but does not match the one of the
+                * packet in the FIFO right now, reschedule it for later
+                * packets.
+                */
+               __skb_queue_tail(&ptp->tx_queue, skb);
+       }
+}
+
+static void vsc85xx_get_tx_ts(struct vsc85xx_ptp *ptp)
+{
+       u32 reg;
+
+       do {
+               vsc85xx_dequeue_skb(ptp);
+
+               /* If other timestamps are available in the FIFO, process them. */
+               reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR,
+                                         MSCC_PHY_PTP_EGR_TS_FIFO_CTRL);
+       } while (PTP_EGR_FIFO_LEVEL_LAST_READ(reg) > 1);
+}
+
+static int vsc85xx_ptp_cmp_init(struct phy_device *phydev, enum ts_blk blk)
+{
+       struct vsc8531_private *vsc8531 = phydev->priv;
+       bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
+       enum vsc85xx_ptp_msg_type msgs[] = {
+               PTP_MSG_TYPE_SYNC,
+               PTP_MSG_TYPE_DELAY_REQ
+       };
+       u32 val;
+       u8 i;
+
+       for (i = 0; i < ARRAY_SIZE(msgs); i++) {
+               vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i),
+                                    base ? PTP_FLOW_VALID_CH0 :
+                                    PTP_FLOW_VALID_CH1);
+
+               val = vsc85xx_ts_read_csr(phydev, blk,
+                                         MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i));
+               val &= ~PTP_FLOW_DOMAIN_RANGE_ENA;
+               vsc85xx_ts_write_csr(phydev, blk,
+                                    MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i), val);
+
+               vsc85xx_ts_write_csr(phydev, blk,
+                                    MSCC_ANA_PTP_FLOW_MATCH_UPPER(i),
+                                    msgs[i] << 24);
+
+               vsc85xx_ts_write_csr(phydev, blk,
+                                    MSCC_ANA_PTP_FLOW_MASK_UPPER(i),
+                                    PTP_FLOW_MSG_TYPE_MASK);
+       }
+
+       return 0;
+}
+
+static int vsc85xx_eth_cmp1_init(struct phy_device *phydev, enum ts_blk blk)
+{
+       struct vsc8531_private *vsc8531 = phydev->priv;
+       bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
+       u32 val;
+
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NXT_PROT_TAG, 0);
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT_VLAN_TPID,
+                            ANA_ETH1_NTX_PROT_VLAN_TPID(ETH_P_8021AD));
+
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0),
+                            base ? ETH1_FLOW_VALID_CH0 : ETH1_FLOW_VALID_CH1);
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_MATCH_MODE(0),
+                            ANA_ETH1_FLOW_MATCH_VLAN_TAG2);
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0), 0);
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), 0);
+       vsc85xx_ts_write_csr(phydev, blk,
+                            MSCC_ANA_ETH1_FLOW_VLAN_RANGE_I_TAG(0), 0);
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_VLAN_TAG1(0), 0);
+       vsc85xx_ts_write_csr(phydev, blk,
+                            MSCC_ANA_ETH1_FLOW_VLAN_TAG2_I_TAG(0), 0);
+
+       val = vsc85xx_ts_read_csr(phydev, blk,
+                                 MSCC_ANA_ETH1_FLOW_MATCH_MODE(0));
+       val &= ~ANA_ETH1_FLOW_MATCH_VLAN_TAG_MASK;
+       val |= ANA_ETH1_FLOW_MATCH_VLAN_VERIFY;
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_MATCH_MODE(0),
+                            val);
+
+       return 0;
+}
+
+static int vsc85xx_ip_cmp1_init(struct phy_device *phydev, enum ts_blk blk)
+{
+       struct vsc8531_private *vsc8531 = phydev->priv;
+       bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
+       u32 val;
+
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MATCH2_UPPER,
+                            PTP_EV_PORT);
+       /* Match on dest port only, ignore src */
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MASK2_UPPER,
+                            0xffff);
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MATCH2_LOWER,
+                            0);
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MASK2_LOWER, 0);
+
+       val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0));
+       val &= ~IP1_FLOW_ENA_CHANNEL_MASK_MASK;
+       val |= base ? IP1_FLOW_VALID_CH0 : IP1_FLOW_VALID_CH1;
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0), val);
+
+       /* Match all IPs */
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_UPPER(0), 0);
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_UPPER(0), 0);
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_UPPER_MID(0),
+                            0);
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_UPPER_MID(0),
+                            0);
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_LOWER_MID(0),
+                            0);
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_LOWER_MID(0),
+                            0);
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_LOWER(0), 0);
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_LOWER(0), 0);
+
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_IP_CHKSUM_SEL, 0);
+
+       return 0;
+}
+
+static int vsc85xx_adjfine(struct ptp_clock_info *info, long scaled_ppm)
+{
+       struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
+       struct phy_device *phydev = ptp->phydev;
+       struct vsc8531_private *priv = phydev->priv;
+       u64 adj = 0;
+       u32 val;
+
+       if (abs(scaled_ppm) < 66 || abs(scaled_ppm) > 65536UL * 1000000UL)
+               return 0;
+
+       adj = div64_u64(1000000ULL * 65536ULL, abs(scaled_ppm));
+       if (adj > 1000000000L)
+               adj = 1000000000L;
+
+       val = PTP_AUTO_ADJ_NS_ROLLOVER(adj);
+       val |= scaled_ppm > 0 ? PTP_AUTO_ADJ_ADD_1NS : PTP_AUTO_ADJ_SUB_1NS;
+
+       mutex_lock(&priv->phc_lock);
+
+       /* Update the ppb val in nano seconds to the auto adjust reg. */
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_AUTO_ADJ,
+                            val);
+
+       /* The auto adjust update val is set to 0 after write operation. */
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
+       val |= PTP_LTC_CTRL_AUTO_ADJ_UPDATE;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
+
+       mutex_unlock(&priv->phc_lock);
+
+       return 0;
+}
+
+static int __vsc85xx_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
+{
+       struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
+       struct phy_device *phydev = ptp->phydev;
+       struct vsc85xx_shared_private *shared =
+               (struct vsc85xx_shared_private *)phydev->shared->priv;
+       struct vsc8531_private *priv = phydev->priv;
+       u32 val;
+
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
+       val |= PTP_LTC_CTRL_SAVE_ENA;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
+
+       /* Local Time Counter (LTC) is put in SAVE* regs on rising edge of
+        * LOAD_SAVE pin.
+        */
+       mutex_lock(&shared->gpio_lock);
+       gpiod_set_value(priv->load_save, 1);
+
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_LTC_SAVED_SEC_MSB);
+
+       ts->tv_sec = ((time64_t)val) << 32;
+
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_LTC_SAVED_SEC_LSB);
+       ts->tv_sec += val;
+
+       ts->tv_nsec = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                         MSCC_PHY_PTP_LTC_SAVED_NS);
+
+       gpiod_set_value(priv->load_save, 0);
+       mutex_unlock(&shared->gpio_lock);
+
+       return 0;
+}
+
+static int vsc85xx_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
+{
+       struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
+       struct phy_device *phydev = ptp->phydev;
+       struct vsc8531_private *priv = phydev->priv;
+
+       mutex_lock(&priv->phc_lock);
+       __vsc85xx_gettime(info, ts);
+       mutex_unlock(&priv->phc_lock);
+
+       return 0;
+}
+
+static int __vsc85xx_settime(struct ptp_clock_info *info,
+                            const struct timespec64 *ts)
+{
+       struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
+       struct phy_device *phydev = ptp->phydev;
+       struct vsc85xx_shared_private *shared =
+               (struct vsc85xx_shared_private *)phydev->shared->priv;
+       struct vsc8531_private *priv = phydev->priv;
+       u32 val;
+
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_MSB,
+                            PTP_LTC_LOAD_SEC_MSB(ts->tv_sec));
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_LSB,
+                            PTP_LTC_LOAD_SEC_LSB(ts->tv_sec));
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_NS,
+                            PTP_LTC_LOAD_NS(ts->tv_nsec));
+
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
+       val |= PTP_LTC_CTRL_LOAD_ENA;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
+
+       /* Local Time Counter (LTC) is set from LOAD* regs on rising edge of
+        * LOAD_SAVE pin.
+        */
+       mutex_lock(&shared->gpio_lock);
+       gpiod_set_value(priv->load_save, 1);
+
+       val &= ~PTP_LTC_CTRL_LOAD_ENA;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
+
+       gpiod_set_value(priv->load_save, 0);
+       mutex_unlock(&shared->gpio_lock);
+
+       return 0;
+}
+
+static int vsc85xx_settime(struct ptp_clock_info *info,
+                          const struct timespec64 *ts)
+{
+       struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
+       struct phy_device *phydev = ptp->phydev;
+       struct vsc8531_private *priv = phydev->priv;
+
+       mutex_lock(&priv->phc_lock);
+       __vsc85xx_settime(info, ts);
+       mutex_unlock(&priv->phc_lock);
+
+       return 0;
+}
+
+static int vsc85xx_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+       struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
+       struct phy_device *phydev = ptp->phydev;
+       struct vsc8531_private *priv = phydev->priv;
+       u32 val;
+
+       /* Can't recover that big of an offset. Let's set the time directly. */
+       if (abs(delta) >= NSEC_PER_SEC) {
+               struct timespec64 ts;
+               u64 now;
+
+               mutex_lock(&priv->phc_lock);
+
+               __vsc85xx_gettime(info, &ts);
+               now = ktime_to_ns(timespec64_to_ktime(ts));
+               ts = ns_to_timespec64(now + delta);
+               __vsc85xx_settime(info, &ts);
+
+               mutex_unlock(&priv->phc_lock);
+
+               return 0;
+       }
+
+       mutex_lock(&priv->phc_lock);
+
+       val = PTP_LTC_OFFSET_VAL(abs(delta)) | PTP_LTC_OFFSET_ADJ;
+       if (delta > 0)
+               val |= PTP_LTC_OFFSET_ADD;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_OFFSET, val);
+
+       mutex_unlock(&priv->phc_lock);
+
+       return 0;
+}
+
+static int vsc85xx_eth1_next_comp(struct phy_device *phydev, enum ts_blk blk,
+                                 u32 next_comp, u32 etype)
+{
+       u32 val;
+
+       val = vsc85xx_ts_read_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT);
+       val &= ~ANA_ETH1_NTX_PROT_COMPARATOR_MASK;
+       val |= next_comp;
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT, val);
+
+       val = ANA_ETH1_NXT_PROT_ETYPE_MATCH(etype) |
+               ANA_ETH1_NXT_PROT_ETYPE_MATCH_ENA;
+       vsc85xx_ts_write_csr(phydev, blk,
+                            MSCC_PHY_ANA_ETH1_NXT_PROT_ETYPE_MATCH, val);
+
+       return 0;
+}
+
+static int vsc85xx_ip1_next_comp(struct phy_device *phydev, enum ts_blk blk,
+                                u32 next_comp, u32 header)
+{
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_NXT_COMP,
+                            ANA_IP1_NXT_PROT_NXT_COMP_BYTES_HDR(header) |
+                            next_comp);
+
+       return 0;
+}
+
+static int vsc85xx_ts_ptp_action_flow(struct phy_device *phydev, enum ts_blk blk, u8 flow, enum ptp_cmd cmd)
+{
+       u32 val;
+
+       /* Check non-zero reserved field */
+       val = PTP_FLOW_PTP_0_FIELD_PTP_FRAME | PTP_FLOW_PTP_0_FIELD_RSVRD_CHECK;
+       vsc85xx_ts_write_csr(phydev, blk,
+                            MSCC_ANA_PTP_FLOW_PTP_0_FIELD(flow), val);
+
+       val = PTP_FLOW_PTP_ACTION_CORR_OFFSET(8) |
+             PTP_FLOW_PTP_ACTION_TIME_OFFSET(8) |
+             PTP_FLOW_PTP_ACTION_PTP_CMD(cmd == PTP_SAVE_IN_TS_FIFO ?
+                                         PTP_NOP : cmd);
+       if (cmd == PTP_SAVE_IN_TS_FIFO)
+               val |= PTP_FLOW_PTP_ACTION_SAVE_LOCAL_TIME;
+       else if (cmd == PTP_WRITE_NS)
+               val |= PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_UPDATE |
+                      PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_BYTE_OFFSET(6);
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_PTP_ACTION(flow),
+                            val);
+
+       if (cmd == PTP_WRITE_1588)
+               /* Rewrite timestamp directly in frame */
+               val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(34) |
+                     PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(10);
+       else if (cmd == PTP_SAVE_IN_TS_FIFO)
+               /* no rewrite */
+               val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(0) |
+                     PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(0);
+       else
+               /* Write in reserved field */
+               val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(16) |
+                     PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(4);
+       vsc85xx_ts_write_csr(phydev, blk,
+                            MSCC_ANA_PTP_FLOW_PTP_ACTION2(flow), val);
+
+       return 0;
+}
+
+static int vsc85xx_ptp_conf(struct phy_device *phydev, enum ts_blk blk,
+                           bool one_step, bool enable)
+{
+       enum vsc85xx_ptp_msg_type msgs[] = {
+               PTP_MSG_TYPE_SYNC,
+               PTP_MSG_TYPE_DELAY_REQ
+       };
+       u32 val;
+       u8 i;
+
+       for (i = 0; i < ARRAY_SIZE(msgs); i++) {
+               if (blk == INGRESS)
+                       vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
+                                                  PTP_WRITE_NS);
+               else if (msgs[i] == PTP_MSG_TYPE_SYNC && one_step)
+                       /* no need to know Sync t when sending in one_step */
+                       vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
+                                                  PTP_WRITE_1588);
+               else
+                       vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
+                                                  PTP_SAVE_IN_TS_FIFO);
+
+               val = vsc85xx_ts_read_csr(phydev, blk,
+                                         MSCC_ANA_PTP_FLOW_ENA(i));
+               val &= ~PTP_FLOW_ENA;
+               if (enable)
+                       val |= PTP_FLOW_ENA;
+               vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i),
+                                    val);
+       }
+
+       return 0;
+}
+
+static int vsc85xx_eth1_conf(struct phy_device *phydev, enum ts_blk blk,
+                            bool enable)
+{
+       struct vsc8531_private *vsc8531 = phydev->priv;
+       u32 val = ANA_ETH1_FLOW_ADDR_MATCH2_DEST;
+
+       if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT) {
+               /* PTP over Ethernet multicast address for SYNC and DELAY msg */
+               u8 ptp_multicast[6] = {0x01, 0x1b, 0x19, 0x00, 0x00, 0x00};
+
+               val |= ANA_ETH1_FLOW_ADDR_MATCH2_FULL_ADDR |
+                      get_unaligned_be16(&ptp_multicast[4]);
+               vsc85xx_ts_write_csr(phydev, blk,
+                                    MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val);
+               vsc85xx_ts_write_csr(phydev, blk,
+                                    MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0),
+                                    get_unaligned_be32(ptp_multicast));
+       } else {
+               val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST;
+               vsc85xx_ts_write_csr(phydev, blk,
+                                    MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val);
+               vsc85xx_ts_write_csr(phydev, blk,
+                                    MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0), 0);
+       }
+
+       val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0));
+       val &= ~ETH1_FLOW_ENA;
+       if (enable)
+               val |= ETH1_FLOW_ENA;
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0), val);
+
+       return 0;
+}
+
+static int vsc85xx_ip1_conf(struct phy_device *phydev, enum ts_blk blk,
+                           bool enable)
+{
+       u32 val;
+
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_IP1_MODE,
+                            ANA_IP1_NXT_PROT_IPV4 |
+                            ANA_IP1_NXT_PROT_FLOW_OFFSET_IPV4);
+
+       /* Matching UDP protocol number */
+       val = ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MASK(0xff) |
+             ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MATCH(IPPROTO_UDP) |
+             ANA_IP1_NXT_PROT_IP_MATCH1_PROT_OFF(9);
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_IP_MATCH1,
+                            val);
+
+       /* End of IP protocol, start of next protocol (UDP) */
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_OFFSET2,
+                            ANA_IP1_NXT_PROT_OFFSET2(20));
+
+       val = vsc85xx_ts_read_csr(phydev, blk,
+                                 MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM);
+       val &= ~(IP1_NXT_PROT_UDP_CHKSUM_OFF_MASK |
+                IP1_NXT_PROT_UDP_CHKSUM_WIDTH_MASK);
+       val |= IP1_NXT_PROT_UDP_CHKSUM_WIDTH(2);
+
+       val &= ~(IP1_NXT_PROT_UDP_CHKSUM_UPDATE |
+                IP1_NXT_PROT_UDP_CHKSUM_CLEAR);
+       /* UDP checksum offset in IPv4 packet
+        * according to: https://tools.ietf.org/html/rfc768
+        */
+       val |= IP1_NXT_PROT_UDP_CHKSUM_OFF(26) | IP1_NXT_PROT_UDP_CHKSUM_CLEAR;
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM,
+                            val);
+
+       val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0));
+       val &= ~(IP1_FLOW_MATCH_ADDR_MASK | IP1_FLOW_ENA);
+       val |= IP1_FLOW_MATCH_DEST_SRC_ADDR;
+       if (enable)
+               val |= IP1_FLOW_ENA;
+       vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0), val);
+
+       return 0;
+}
+
+static int vsc85xx_ts_engine_init(struct phy_device *phydev, bool one_step)
+{
+       struct vsc8531_private *vsc8531 = phydev->priv;
+       bool ptp_l4, base = phydev->mdio.addr == vsc8531->ts_base_addr;
+       u8 eng_id = base ? 0 : 1;
+       u32 val;
+
+       ptp_l4 = vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_ANALYZER_MODE);
+       /* Disable INGRESS and EGRESS so engine eng_id can be reconfigured */
+       val &= ~(PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id)) |
+                PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id)));
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE,
+                            val);
+
+       if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT) {
+               vsc85xx_eth1_next_comp(phydev, INGRESS,
+                                      ANA_ETH1_NTX_PROT_PTP_OAM, ETH_P_1588);
+               vsc85xx_eth1_next_comp(phydev, EGRESS,
+                                      ANA_ETH1_NTX_PROT_PTP_OAM, ETH_P_1588);
+       } else {
+               vsc85xx_eth1_next_comp(phydev, INGRESS,
+                                      ANA_ETH1_NTX_PROT_IP_UDP_ACH_1,
+                                      ETH_P_IP);
+               vsc85xx_eth1_next_comp(phydev, EGRESS,
+                                      ANA_ETH1_NTX_PROT_IP_UDP_ACH_1,
+                                      ETH_P_IP);
+               /* Header length of IPv[4/6] + UDP */
+               vsc85xx_ip1_next_comp(phydev, INGRESS,
+                                     ANA_ETH1_NTX_PROT_PTP_OAM, 28);
+               vsc85xx_ip1_next_comp(phydev, EGRESS,
+                                     ANA_ETH1_NTX_PROT_PTP_OAM, 28);
+       }
+
+       vsc85xx_eth1_conf(phydev, INGRESS,
+                         vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE);
+       vsc85xx_ip1_conf(phydev, INGRESS,
+                        ptp_l4 && vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE);
+       vsc85xx_ptp_conf(phydev, INGRESS, one_step,
+                        vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE);
+
+       vsc85xx_eth1_conf(phydev, EGRESS,
+                         vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF);
+       vsc85xx_ip1_conf(phydev, EGRESS,
+                        ptp_l4 && vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF);
+       vsc85xx_ptp_conf(phydev, EGRESS, one_step,
+                        vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF);
+
+       val &= ~PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id));
+       if (vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF)
+               val |= PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id));
+
+       val &= ~PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id));
+       if (vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE)
+               val |= PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id));
+
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE,
+                            val);
+
+       return 0;
+}
+
+void vsc85xx_link_change_notify(struct phy_device *phydev)
+{
+       struct vsc8531_private *priv = phydev->priv;
+
+       mutex_lock(&priv->ts_lock);
+       vsc85xx_ts_set_latencies(phydev);
+       mutex_unlock(&priv->ts_lock);
+}
+
+static void vsc85xx_ts_reset_fifo(struct phy_device *phydev)
+{
+       u32 val;
+
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_EGR_TS_FIFO_CTRL);
+       val |= PTP_EGR_TS_FIFO_RESET;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL,
+                            val);
+
+       val &= ~PTP_EGR_TS_FIFO_RESET;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL,
+                            val);
+}
+
+static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
+{
+       struct vsc8531_private *vsc8531 =
+               container_of(mii_ts, struct vsc8531_private, mii_ts);
+       struct phy_device *phydev = vsc8531->ptp->phydev;
+       struct hwtstamp_config cfg;
+       bool one_step = false;
+       u32 val;
+
+       if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+               return -EFAULT;
+
+       if (cfg.flags)
+               return -EINVAL;
+
+       switch (cfg.tx_type) {
+       case HWTSTAMP_TX_ONESTEP_SYNC:
+               one_step = true;
+               break;
+       case HWTSTAMP_TX_ON:
+               break;
+       case HWTSTAMP_TX_OFF:
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       vsc8531->ptp->tx_type = cfg.tx_type;
+
+       switch (cfg.rx_filter) {
+       case HWTSTAMP_FILTER_NONE:
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+               /* ETH->IP->UDP->PTP */
+               break;
+       case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+               /* ETH->PTP */
+               break;
+       default:
+               return -ERANGE;
+       }
+
+       vsc8531->ptp->rx_filter = cfg.rx_filter;
+
+       mutex_lock(&vsc8531->ts_lock);
+
+       __skb_queue_purge(&vsc8531->ptp->tx_queue);
+       __skb_queue_head_init(&vsc8531->ptp->tx_queue);
+
+       /* Disable predictor while configuring the 1588 block */
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_INGR_PREDICTOR);
+       val &= ~PTP_INGR_PREDICTOR_EN;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR,
+                            val);
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_EGR_PREDICTOR);
+       val &= ~PTP_EGR_PREDICTOR_EN;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR,
+                            val);
+
+       /* Bypass egress or ingress blocks if timestamping isn't used */
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL);
+       val &= ~(PTP_IFACE_CTRL_EGR_BYPASS | PTP_IFACE_CTRL_INGR_BYPASS);
+       if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF)
+               val |= PTP_IFACE_CTRL_EGR_BYPASS;
+       if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_NONE)
+               val |= PTP_IFACE_CTRL_INGR_BYPASS;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val);
+
+       /* Resetting FIFO so that it's empty after reconfiguration */
+       vsc85xx_ts_reset_fifo(phydev);
+
+       vsc85xx_ts_engine_init(phydev, one_step);
+
+       /* Re-enable predictors now */
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_INGR_PREDICTOR);
+       val |= PTP_INGR_PREDICTOR_EN;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR,
+                            val);
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_EGR_PREDICTOR);
+       val |= PTP_EGR_PREDICTOR_EN;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR,
+                            val);
+
+       vsc8531->ptp->configured = 1;
+       mutex_unlock(&vsc8531->ts_lock);
+
+       return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int vsc85xx_ts_info(struct mii_timestamper *mii_ts,
+                          struct ethtool_ts_info *info)
+{
+       struct vsc8531_private *vsc8531 =
+               container_of(mii_ts, struct vsc8531_private, mii_ts);
+
+       info->phc_index = ptp_clock_index(vsc8531->ptp->ptp_clock);
+       info->so_timestamping =
+               SOF_TIMESTAMPING_TX_HARDWARE |
+               SOF_TIMESTAMPING_RX_HARDWARE |
+               SOF_TIMESTAMPING_RAW_HARDWARE;
+       info->tx_types =
+               (1 << HWTSTAMP_TX_OFF) |
+               (1 << HWTSTAMP_TX_ON) |
+               (1 << HWTSTAMP_TX_ONESTEP_SYNC);
+       info->rx_filters =
+               (1 << HWTSTAMP_FILTER_NONE) |
+               (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+               (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+
+       return 0;
+}
+
+static void vsc85xx_txtstamp(struct mii_timestamper *mii_ts,
+                            struct sk_buff *skb, int type)
+{
+       struct vsc8531_private *vsc8531 =
+               container_of(mii_ts, struct vsc8531_private, mii_ts);
+
+       if (!vsc8531->ptp->configured)
+               return;
+
+       if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF) {
+               kfree_skb(skb);
+               return;
+       }
+
+       skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+       mutex_lock(&vsc8531->ts_lock);
+       __skb_queue_tail(&vsc8531->ptp->tx_queue, skb);
+       mutex_unlock(&vsc8531->ts_lock);
+}
+
+static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
+                            struct sk_buff *skb, int type)
+{
+       struct vsc8531_private *vsc8531 =
+               container_of(mii_ts, struct vsc8531_private, mii_ts);
+       struct skb_shared_hwtstamps *shhwtstamps = NULL;
+       struct vsc85xx_ptphdr *ptphdr;
+       struct timespec64 ts;
+       unsigned long ns;
+
+       if (!vsc8531->ptp->configured)
+               return false;
+
+       if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_NONE ||
+           type == PTP_CLASS_NONE)
+               return false;
+
+       vsc85xx_gettime(&vsc8531->ptp->caps, &ts);
+
+       ptphdr = get_ptp_header_rx(skb, vsc8531->ptp->rx_filter);
+       if (!ptphdr)
+               return false;
+
+       shhwtstamps = skb_hwtstamps(skb);
+       memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+
+       ns = ntohl(ptphdr->rsrvd2);
+
+       /* nsec is in reserved field */
+       if (ts.tv_nsec < ns)
+               ts.tv_sec--;
+
+       shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns);
+       netif_rx_ni(skb);
+
+       return true;
+}
+
+static const struct ptp_clock_info vsc85xx_clk_caps = {
+       .owner          = THIS_MODULE,
+       .name           = "VSC85xx timer",
+       .max_adj        = S32_MAX,
+       .n_alarm        = 0,
+       .n_pins         = 0,
+       .n_ext_ts       = 0,
+       .n_per_out      = 0,
+       .pps            = 0,
+       .adjtime        = &vsc85xx_adjtime,
+       .adjfine        = &vsc85xx_adjfine,
+       .gettime64      = &vsc85xx_gettime,
+       .settime64      = &vsc85xx_settime,
+};
+
+static struct vsc8531_private *vsc8584_base_priv(struct phy_device *phydev)
+{
+       struct vsc8531_private *vsc8531 = phydev->priv;
+
+       if (vsc8531->ts_base_addr != phydev->mdio.addr) {
+               struct mdio_device *dev;
+
+               dev = phydev->mdio.bus->mdio_map[vsc8531->ts_base_addr];
+               phydev = container_of(dev, struct phy_device, mdio);
+
+               return phydev->priv;
+       }
+
+       return vsc8531;
+}
+
+static bool vsc8584_is_1588_input_clk_configured(struct phy_device *phydev)
+{
+       struct vsc8531_private *vsc8531 = vsc8584_base_priv(phydev);
+
+       return vsc8531->input_clk_init;
+}
+
+static void vsc8584_set_input_clk_configured(struct phy_device *phydev)
+{
+       struct vsc8531_private *vsc8531 = vsc8584_base_priv(phydev);
+
+       vsc8531->input_clk_init = true;
+}
+
+static int __vsc8584_init_ptp(struct phy_device *phydev)
+{
+       struct vsc8531_private *vsc8531 = phydev->priv;
+       u32 ltc_seq_e[] = { 0, 400000, 0, 0, 0 };
+       u8  ltc_seq_a[] = { 8, 6, 5, 4, 2 };
+       u32 val;
+
+       if (!vsc8584_is_1588_input_clk_configured(phydev)) {
+               phy_lock_mdio_bus(phydev);
+
+               /* 1588_DIFF_INPUT_CLK configuration: Use an external clock for
+                * the LTC, as per 3.13.29 in the VSC8584 datasheet.
+                */
+               phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+                                 MSCC_PHY_PAGE_1588);
+               phy_ts_base_write(phydev, 29, 0x7ae0);
+               phy_ts_base_write(phydev, 30, 0xb71c);
+               phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+                                 MSCC_PHY_PAGE_STANDARD);
+
+               phy_unlock_mdio_bus(phydev);
+
+               vsc8584_set_input_clk_configured(phydev);
+       }
+
+       /* Disable predictor before configuring the 1588 block */
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_INGR_PREDICTOR);
+       val &= ~PTP_INGR_PREDICTOR_EN;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR,
+                            val);
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_EGR_PREDICTOR);
+       val &= ~PTP_EGR_PREDICTOR_EN;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR,
+                            val);
+
+       /* By default, the internal clock of fixed rate 250MHz is used */
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
+       val &= ~PTP_LTC_CTRL_CLK_SEL_MASK;
+       val |= PTP_LTC_CTRL_CLK_SEL_INTERNAL_250;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
+
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQUENCE);
+       val &= ~PTP_LTC_SEQUENCE_A_MASK;
+       val |= PTP_LTC_SEQUENCE_A(ltc_seq_a[PHC_CLK_250MHZ]);
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQUENCE, val);
+
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQ);
+       val &= ~(PTP_LTC_SEQ_ERR_MASK | PTP_LTC_SEQ_ADD_SUB);
+       if (ltc_seq_e[PHC_CLK_250MHZ])
+               val |= PTP_LTC_SEQ_ADD_SUB;
+       val |= PTP_LTC_SEQ_ERR(ltc_seq_e[PHC_CLK_250MHZ]);
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQ, val);
+
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_1PPS_WIDTH_ADJ,
+                            PPS_WIDTH_ADJ);
+
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_DELAY_FIFO,
+                            IS_ENABLED(CONFIG_MACSEC) ?
+                            PTP_INGR_DELAY_FIFO_DEPTH_MACSEC :
+                            PTP_INGR_DELAY_FIFO_DEPTH_DEFAULT);
+
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_DELAY_FIFO,
+                            IS_ENABLED(CONFIG_MACSEC) ?
+                            PTP_EGR_DELAY_FIFO_DEPTH_MACSEC :
+                            PTP_EGR_DELAY_FIFO_DEPTH_DEFAULT);
+
+       /* Enable n-phase sampler for Viper Rev-B */
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_ACCUR_CFG_STATUS);
+       val &= ~(PTP_ACCUR_PPS_OUT_BYPASS | PTP_ACCUR_PPS_IN_BYPASS |
+                PTP_ACCUR_EGR_SOF_BYPASS | PTP_ACCUR_INGR_SOF_BYPASS |
+                PTP_ACCUR_LOAD_SAVE_BYPASS);
+       val |= PTP_ACCUR_PPS_OUT_CALIB_ERR | PTP_ACCUR_PPS_OUT_CALIB_DONE |
+              PTP_ACCUR_PPS_IN_CALIB_ERR | PTP_ACCUR_PPS_IN_CALIB_DONE |
+              PTP_ACCUR_EGR_SOF_CALIB_ERR | PTP_ACCUR_EGR_SOF_CALIB_DONE |
+              PTP_ACCUR_INGR_SOF_CALIB_ERR | PTP_ACCUR_INGR_SOF_CALIB_DONE |
+              PTP_ACCUR_LOAD_SAVE_CALIB_ERR | PTP_ACCUR_LOAD_SAVE_CALIB_DONE;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
+                            val);
+
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_ACCUR_CFG_STATUS);
+       val |= PTP_ACCUR_CALIB_TRIGG;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
+                            val);
+
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_ACCUR_CFG_STATUS);
+       val &= ~PTP_ACCUR_CALIB_TRIGG;
+       val |= PTP_ACCUR_PPS_OUT_CALIB_ERR | PTP_ACCUR_PPS_OUT_CALIB_DONE |
+              PTP_ACCUR_PPS_IN_CALIB_ERR | PTP_ACCUR_PPS_IN_CALIB_DONE |
+              PTP_ACCUR_EGR_SOF_CALIB_ERR | PTP_ACCUR_EGR_SOF_CALIB_DONE |
+              PTP_ACCUR_INGR_SOF_CALIB_ERR | PTP_ACCUR_INGR_SOF_CALIB_DONE |
+              PTP_ACCUR_LOAD_SAVE_CALIB_ERR | PTP_ACCUR_LOAD_SAVE_CALIB_DONE;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
+                            val);
+
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_ACCUR_CFG_STATUS);
+       val |= PTP_ACCUR_CALIB_TRIGG;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
+                            val);
+
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_ACCUR_CFG_STATUS);
+       val &= ~PTP_ACCUR_CALIB_TRIGG;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
+                            val);
+
+       /* Do not access FIFO via SI */
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_TSTAMP_FIFO_SI);
+       val &= ~PTP_TSTAMP_FIFO_SI_EN;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_TSTAMP_FIFO_SI,
+                            val);
+
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_INGR_REWRITER_CTRL);
+       val &= ~PTP_INGR_REWRITER_REDUCE_PREAMBLE;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_REWRITER_CTRL,
+                            val);
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_EGR_REWRITER_CTRL);
+       val &= ~PTP_EGR_REWRITER_REDUCE_PREAMBLE;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_REWRITER_CTRL,
+                            val);
+
+       /* Put the flag that indicates the frame has been modified to bit 7 */
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_INGR_REWRITER_CTRL);
+       val |= PTP_INGR_REWRITER_FLAG_BIT_OFF(7) | PTP_INGR_REWRITER_FLAG_VAL;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_REWRITER_CTRL,
+                            val);
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_EGR_REWRITER_CTRL);
+       val |= PTP_EGR_REWRITER_FLAG_BIT_OFF(7);
+       val &= ~PTP_EGR_REWRITER_FLAG_VAL;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_REWRITER_CTRL,
+                            val);
+
+       /* 30bit mode for RX timestamp, only the nanoseconds are kept in
+        * reserved field.
+        */
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_INGR_TSP_CTRL);
+       val |= PHY_PTP_INGR_TSP_CTRL_FRACT_NS;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_TSP_CTRL,
+                            val);
+
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL);
+       val |= PHY_PTP_EGR_TSP_CTRL_FRACT_NS;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL, val);
+
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_SERIAL_TOD_IFACE);
+       val |= PTP_SERIAL_TOD_IFACE_LS_AUTO_CLR;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_SERIAL_TOD_IFACE,
+                            val);
+
+       vsc85xx_ts_fsb_init(phydev);
+
+       /* Set the Egress timestamp FIFO configuration and status register */
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_EGR_TS_FIFO_CTRL);
+       val &= ~(PTP_EGR_TS_FIFO_SIG_BYTES_MASK | PTP_EGR_TS_FIFO_THRESH_MASK);
+       /* 16 bytes for the signature, 10 for the timestamp in the TS FIFO */
+       val |= PTP_EGR_TS_FIFO_SIG_BYTES(16) | PTP_EGR_TS_FIFO_THRESH(7);
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL,
+                            val);
+
+       vsc85xx_ts_reset_fifo(phydev);
+
+       val = PTP_IFACE_CTRL_CLK_ENA;
+       if (!IS_ENABLED(CONFIG_MACSEC))
+               val |= PTP_IFACE_CTRL_GMII_PROT;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val);
+
+       vsc85xx_ts_set_latencies(phydev);
+
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_VERSION_CODE);
+
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL);
+       val |= PTP_IFACE_CTRL_EGR_BYPASS;
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val);
+
+       vsc85xx_ts_disable_flows(phydev, EGRESS);
+       vsc85xx_ts_disable_flows(phydev, INGRESS);
+
+       val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                 MSCC_PHY_PTP_ANALYZER_MODE);
+       /* Disable INGRESS and EGRESS so engine eng_id can be reconfigured */
+       val &= ~(PTP_ANALYZER_MODE_EGR_ENA_MASK |
+                PTP_ANALYZER_MODE_INGR_ENA_MASK |
+                PTP_ANA_INGR_ENCAP_FLOW_MODE_MASK |
+                PTP_ANA_EGR_ENCAP_FLOW_MODE_MASK);
+       /* Strict matching in flow (packets should match flows from the same
+        * index in all enabled comparators (except PTP)).
+        */
+       val |= PTP_ANA_SPLIT_ENCAP_FLOW | PTP_ANA_INGR_ENCAP_FLOW_MODE(0x7) |
+              PTP_ANA_EGR_ENCAP_FLOW_MODE(0x7);
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE,
+                            val);
+
+       /* Initialized for ingress and egress flows:
+        * - The Ethernet comparator.
+        * - The IP comparator.
+        * - The PTP comparator.
+        */
+       vsc85xx_eth_cmp1_init(phydev, INGRESS);
+       vsc85xx_ip_cmp1_init(phydev, INGRESS);
+       vsc85xx_ptp_cmp_init(phydev, INGRESS);
+       vsc85xx_eth_cmp1_init(phydev, EGRESS);
+       vsc85xx_ip_cmp1_init(phydev, EGRESS);
+       vsc85xx_ptp_cmp_init(phydev, EGRESS);
+
+       vsc85xx_ts_eth_cmp1_sig(phydev);
+
+       vsc8531->mii_ts.rxtstamp = vsc85xx_rxtstamp;
+       vsc8531->mii_ts.txtstamp = vsc85xx_txtstamp;
+       vsc8531->mii_ts.hwtstamp = vsc85xx_hwtstamp;
+       vsc8531->mii_ts.ts_info  = vsc85xx_ts_info;
+       phydev->mii_ts = &vsc8531->mii_ts;
+
+       memcpy(&vsc8531->ptp->caps, &vsc85xx_clk_caps, sizeof(vsc85xx_clk_caps));
+
+       vsc8531->ptp->ptp_clock = ptp_clock_register(&vsc8531->ptp->caps,
+                                                    &phydev->mdio.dev);
+       return PTR_ERR_OR_ZERO(vsc8531->ptp->ptp_clock);
+}
+
+void vsc8584_config_ts_intr(struct phy_device *phydev)
+{
+       struct vsc8531_private *priv = phydev->priv;
+
+       mutex_lock(&priv->ts_lock);
+       vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_1588_VSC85XX_INT_MASK,
+                            VSC85XX_1588_INT_MASK_MASK);
+       mutex_unlock(&priv->ts_lock);
+}
+
+int vsc8584_ptp_init(struct phy_device *phydev)
+{
+       switch (phydev->phy_id & phydev->drv->phy_id_mask) {
+       case PHY_ID_VSC8575:
+       case PHY_ID_VSC8582:
+       case PHY_ID_VSC8584:
+               return __vsc8584_init_ptp(phydev);
+       }
+
+       return 0;
+}
+
+irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev)
+{
+       struct vsc8531_private *priv = phydev->priv;
+       int rc;
+
+       mutex_lock(&priv->ts_lock);
+       rc = vsc85xx_ts_read_csr(phydev, PROCESSOR,
+                                MSCC_PHY_1588_VSC85XX_INT_STATUS);
+       /* Ack the PTP interrupt */
+       vsc85xx_ts_write_csr(phydev, PROCESSOR,
+                            MSCC_PHY_1588_VSC85XX_INT_STATUS, rc);
+
+       if (!(rc & VSC85XX_1588_INT_MASK_MASK)) {
+               mutex_unlock(&priv->ts_lock);
+               return IRQ_NONE;
+       }
+
+       if (rc & VSC85XX_1588_INT_FIFO_ADD) {
+               vsc85xx_get_tx_ts(priv->ptp);
+       } else if (rc & VSC85XX_1588_INT_FIFO_OVERFLOW) {
+               __skb_queue_purge(&priv->ptp->tx_queue);
+               vsc85xx_ts_reset_fifo(phydev);
+       }
+
+       mutex_unlock(&priv->ts_lock);
+       return IRQ_HANDLED;
+}
+
+int vsc8584_ptp_probe(struct phy_device *phydev)
+{
+       struct vsc8531_private *vsc8531 = phydev->priv;
+
+       vsc8531->ptp = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531->ptp),
+                                   GFP_KERNEL);
+       if (!vsc8531->ptp)
+               return -ENOMEM;
+
+       mutex_init(&vsc8531->phc_lock);
+       mutex_init(&vsc8531->ts_lock);
+
+       /* Retrieve the shared load/save GPIO. Request it as non exclusive as
+        * the same GPIO can be requested by all the PHYs of the same package.
+        * This GPIO must be used with the gpio_lock taken (the lock is shared
+        * between all PHYs).
+        */
+       vsc8531->load_save = devm_gpiod_get_optional(&phydev->mdio.dev, "load-save",
+                                                    GPIOD_FLAGS_BIT_NONEXCLUSIVE |
+                                                    GPIOD_OUT_LOW);
+       if (IS_ERR(vsc8531->load_save)) {
+               phydev_err(phydev, "Can't get load-save GPIO (%ld)\n",
+                          PTR_ERR(vsc8531->load_save));
+               return PTR_ERR(vsc8531->load_save);
+       }
+
+       vsc8531->ptp->phydev = phydev;
+
+       return 0;
+}
+
+int vsc8584_ptp_probe_once(struct phy_device *phydev)
+{
+       struct vsc85xx_shared_private *shared =
+               (struct vsc85xx_shared_private *)phydev->shared->priv;
+
+       /* Initialize shared GPIO lock */
+       mutex_init(&shared->gpio_lock);
+
+       return 0;
+}
diff --git a/drivers/net/phy/mscc/mscc_ptp.h b/drivers/net/phy/mscc/mscc_ptp.h
new file mode 100644 (file)
index 0000000..3ea163a
--- /dev/null
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Driver for Microsemi VSC85xx PHYs
+ *
+ * Copyright (c) 2020 Microsemi Corporation
+ */
+
+#ifndef _MSCC_PHY_PTP_H_
+#define _MSCC_PHY_PTP_H_
+
+/* 1588 page Registers */
+#define MSCC_PHY_TS_BIU_ADDR_CNTL        16
+#define BIU_ADDR_EXE                     0x8000
+#define BIU_ADDR_READ                    0x4000
+#define BIU_ADDR_WRITE                   0x0000
+#define BIU_BLK_ID(x)                    ((x) << 11)
+#define BIU_CSR_ADDR(x)                          (x)
+#define BIU_ADDR_CNT_MAX                 8
+
+#define MSCC_PHY_TS_CSR_DATA_LSB         17
+#define MSCC_PHY_TS_CSR_DATA_MSB         18
+
+#define MSCC_PHY_1588_INGR_VSC85XX_INT_STATUS  0x002d
+#define MSCC_PHY_1588_VSC85XX_INT_STATUS  0x004d
+#define VSC85XX_1588_INT_FIFO_ADD        0x0004
+#define VSC85XX_1588_INT_FIFO_OVERFLOW   0x0001
+
+#define MSCC_PHY_1588_INGR_VSC85XX_INT_MASK      0x002e
+#define MSCC_PHY_1588_VSC85XX_INT_MASK   0x004e
+#define VSC85XX_1588_INT_MASK_MASK       (VSC85XX_1588_INT_FIFO_ADD | \
+                                          VSC85XX_1588_INT_FIFO_OVERFLOW)
+
+/* TS CSR addresses */
+#define MSCC_PHY_ANA_ETH1_NTX_PROT       0x0000
+#define ANA_ETH1_NTX_PROT_SIG_OFF_MASK   GENMASK(20, 16)
+#define ANA_ETH1_NTX_PROT_SIG_OFF(x)     (((x) << 16) & ANA_ETH1_NTX_PROT_SIG_OFF_MASK)
+#define ANA_ETH1_NTX_PROT_COMPARATOR_MASK GENMASK(2, 0)
+#define ANA_ETH1_NTX_PROT_PTP_OAM        0x0005
+#define ANA_ETH1_NTX_PROT_MPLS           0x0004
+#define ANA_ETH1_NTX_PROT_IP_UDP_ACH_2   0x0003
+#define ANA_ETH1_NTX_PROT_IP_UDP_ACH_1   0x0002
+#define ANA_ETH1_NTX_PROT_ETH2           0x0001
+
+#define MSCC_PHY_PTP_IFACE_CTRL                  0x0000
+#define PTP_IFACE_CTRL_CLK_ENA           0x0040
+#define PTP_IFACE_CTRL_INGR_BYPASS       0x0008
+#define PTP_IFACE_CTRL_EGR_BYPASS        0x0004
+#define PTP_IFACE_CTRL_MII_PROT                  0x0003
+#define PTP_IFACE_CTRL_GMII_PROT         0x0002
+#define PTP_IFACE_CTRL_XGMII_64_PROT     0x0000
+
+#define MSCC_PHY_ANA_ETH1_NTX_PROT_VLAN_TPID   0x0001
+#define ANA_ETH1_NTX_PROT_VLAN_TPID_MASK  GENMASK(31, 16)
+#define ANA_ETH1_NTX_PROT_VLAN_TPID(x)   (((x) << 16) & ANA_ETH1_NTX_PROT_VLAN_TPID_MASK)
+
+#define MSCC_PHY_PTP_ANALYZER_MODE       0x0001
+#define PTP_ANA_SPLIT_ENCAP_FLOW         0x1000000
+#define PTP_ANA_EGR_ENCAP_FLOW_MODE_MASK  GENMASK(22, 20)
+#define PTP_ANA_EGR_ENCAP_FLOW_MODE(x)   (((x) << 20) & PTP_ANA_EGR_ENCAP_FLOW_MODE_MASK)
+#define PTP_ANA_INGR_ENCAP_FLOW_MODE_MASK GENMASK(18, 16)
+#define PTP_ANA_INGR_ENCAP_FLOW_MODE(x)          (((x) << 16) & PTP_ANA_INGR_ENCAP_FLOW_MODE_MASK)
+#define PTP_ANALYZER_MODE_EGR_ENA_MASK   GENMASK(6, 4)
+#define PTP_ANALYZER_MODE_EGR_ENA(x)     (((x) << 4) & PTP_ANALYZER_MODE_EGR_ENA_MASK)
+#define PTP_ANALYZER_MODE_INGR_ENA_MASK          GENMASK(2, 0)
+#define PTP_ANALYZER_MODE_INGR_ENA(x)    ((x) & PTP_ANALYZER_MODE_INGR_ENA_MASK)
+
+#define MSCC_PHY_ANA_ETH1_NXT_PROT_TAG   0x0002
+#define ANA_ETH1_NXT_PROT_TAG_ENA        0x0001
+
+#define MSCC_PHY_PTP_MODE_CTRL           0x0002
+#define PTP_MODE_CTRL_MODE_MASK                  GENMASK(2, 0)
+#define PTP_MODE_CTRL_PKT_MODE           0x0004
+
+#define MSCC_PHY_ANA_ETH1_NXT_PROT_ETYPE_MATCH 0x0003
+#define ANA_ETH1_NXT_PROT_ETYPE_MATCH_ENA 0x10000
+#define ANA_ETH1_NXT_PROT_ETYPE_MATCH_MASK     GENMASK(15, 0)
+#define ANA_ETH1_NXT_PROT_ETYPE_MATCH(x)  ((x) & ANA_ETH1_NXT_PROT_ETYPE_MATCH_MASK)
+
+#define MSCC_PHY_PTP_VERSION_CODE        0x0003
+#define PTP_IP_VERSION_MASK              GENMASK(7, 0)
+#define PTP_IP_VERSION_2_1               0x0021
+
+#define MSCC_ANA_ETH1_FLOW_ENA(x)        (0x0010 + ((x) << 4))
+#define ETH1_FLOW_ENA_CHANNEL_MASK_MASK          GENMASK(9, 8)
+#define ETH1_FLOW_ENA_CHANNEL_MASK(x)    (((x) << 8) & ETH1_FLOW_ENA_CHANNEL_MASK_MASK)
+#define ETH1_FLOW_VALID_CH1      ETH1_FLOW_ENA_CHANNEL_MASK(2)
+#define ETH1_FLOW_VALID_CH0      ETH1_FLOW_ENA_CHANNEL_MASK(1)
+#define ETH1_FLOW_ENA                    0x0001
+
+#define MSCC_ANA_ETH1_FLOW_MATCH_MODE(x)  (MSCC_ANA_ETH1_FLOW_ENA(x) + 1)
+#define ANA_ETH1_FLOW_MATCH_VLAN_TAG_MASK GENMASK(7, 6)
+#define ANA_ETH1_FLOW_MATCH_VLAN_TAG(x)          (((x) << 6) & ANA_ETH1_FLOW_MATCH_VLAN_TAG_MASK)
+#define ANA_ETH1_FLOW_MATCH_VLAN_TAG2    0x0200
+#define ANA_ETH1_FLOW_MATCH_VLAN_VERIFY          0x0010
+
+#define MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(x) (MSCC_ANA_ETH1_FLOW_ENA(x) + 2)
+
+#define MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(x) (MSCC_ANA_ETH1_FLOW_ENA(x) + 3)
+#define ANA_ETH1_FLOW_ADDR_MATCH2_MASK_MASK    GENMASK(22, 20)
+#define ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST        0x400000
+#define ANA_ETH1_FLOW_ADDR_MATCH2_FULL_ADDR    0x100000
+#define ANA_ETH1_FLOW_ADDR_MATCH2_SRC_DEST_MASK        GENMASK(17, 16)
+#define ANA_ETH1_FLOW_ADDR_MATCH2_SRC_DEST     0x020000
+#define ANA_ETH1_FLOW_ADDR_MATCH2_SRC    0x010000
+#define ANA_ETH1_FLOW_ADDR_MATCH2_DEST   0x000000
+
+#define MSCC_ANA_ETH1_FLOW_VLAN_RANGE_I_TAG(x) (MSCC_ANA_ETH1_FLOW_ENA(x) + 4)
+#define MSCC_ANA_ETH1_FLOW_VLAN_TAG1(x)          (MSCC_ANA_ETH1_FLOW_ENA(x) + 5)
+#define MSCC_ANA_ETH1_FLOW_VLAN_TAG2_I_TAG(x)  (MSCC_ANA_ETH1_FLOW_ENA(x) + 6)
+
+#define MSCC_PHY_PTP_LTC_CTRL            0x0010
+#define PTP_LTC_CTRL_CLK_SEL_MASK        GENMASK(14, 12)
+#define PTP_LTC_CTRL_CLK_SEL(x)                  (((x) << 12) & PTP_LTC_CTRL_CLK_SEL_MASK)
+#define PTP_LTC_CTRL_CLK_SEL_INTERNAL_250 PTP_LTC_CTRL_CLK_SEL(5)
+#define PTP_LTC_CTRL_AUTO_ADJ_UPDATE     0x0010
+#define PTP_LTC_CTRL_ADD_SUB_1NS_REQ     0x0008
+#define PTP_LTC_CTRL_ADD_1NS             0x0004
+#define PTP_LTC_CTRL_SAVE_ENA            0x0002
+#define PTP_LTC_CTRL_LOAD_ENA            0x0001
+
+#define MSCC_PHY_PTP_LTC_LOAD_SEC_MSB    0x0011
+#define PTP_LTC_LOAD_SEC_MSB(x)                  (((x) & GENMASK_ULL(47, 32)) >> 32)
+
+#define MSCC_PHY_PTP_LTC_LOAD_SEC_LSB    0x0012
+#define PTP_LTC_LOAD_SEC_LSB(x)                  ((x) & GENMASK(31, 0))
+
+#define MSCC_PHY_PTP_LTC_LOAD_NS         0x0013
+#define PTP_LTC_LOAD_NS(x)               ((x) & GENMASK(31, 0))
+
+#define MSCC_PHY_PTP_LTC_SAVED_SEC_MSB   0x0014
+#define MSCC_PHY_PTP_LTC_SAVED_SEC_LSB   0x0015
+#define MSCC_PHY_PTP_LTC_SAVED_NS        0x0016
+
+#define MSCC_PHY_PTP_LTC_SEQUENCE        0x0017
+#define PTP_LTC_SEQUENCE_A_MASK                  GENMASK(3, 0)
+#define PTP_LTC_SEQUENCE_A(x)            ((x) & PTP_LTC_SEQUENCE_A_MASK)
+
+#define MSCC_PHY_PTP_LTC_SEQ             0x0018
+#define PTP_LTC_SEQ_ADD_SUB              0x80000
+#define PTP_LTC_SEQ_ERR_MASK             GENMASK(18, 0)
+#define PTP_LTC_SEQ_ERR(x)               ((x) & PTP_LTC_SEQ_ERR_MASK)
+
+#define MSCC_PHY_PTP_LTC_AUTO_ADJ        0x001a
+#define PTP_AUTO_ADJ_NS_ROLLOVER(x)      ((x) & GENMASK(29, 0))
+#define PTP_AUTO_ADJ_ADD_SUB_1NS_MASK    GENMASK(31, 30)
+#define PTP_AUTO_ADJ_SUB_1NS             0x80000000
+#define PTP_AUTO_ADJ_ADD_1NS             0x40000000
+
+#define MSCC_PHY_PTP_LTC_1PPS_WIDTH_ADJ          0x001b
+#define PTP_LTC_1PPS_WIDTH_ADJ_MASK      GENMASK(29, 0)
+
+#define MSCC_PHY_PTP_TSTAMP_FIFO_SI      0x0020
+#define PTP_TSTAMP_FIFO_SI_EN            0x0001
+
+#define MSCC_PHY_PTP_INGR_PREDICTOR      0x0022
+#define PTP_INGR_PREDICTOR_EN            0x0001
+
+#define MSCC_PHY_PTP_EGR_PREDICTOR       0x0026
+#define PTP_EGR_PREDICTOR_EN             0x0001
+
+#define MSCC_PHY_PTP_INGR_TSP_CTRL       0x0035
+#define PHY_PTP_INGR_TSP_CTRL_FRACT_NS   0x0004
+#define PHY_PTP_INGR_TSP_CTRL_LOAD_DELAYS 0x0001
+
+#define MSCC_PHY_PTP_INGR_LOCAL_LATENCY          0x0037
+#define PTP_INGR_LOCAL_LATENCY_MASK      GENMASK(22, 0)
+#define PTP_INGR_LOCAL_LATENCY(x)        ((x) & PTP_INGR_LOCAL_LATENCY_MASK)
+
+#define MSCC_PHY_PTP_INGR_DELAY_FIFO     0x003a
+#define PTP_INGR_DELAY_FIFO_DEPTH_MACSEC  0x0013
+#define PTP_INGR_DELAY_FIFO_DEPTH_DEFAULT 0x000f
+
+#define MSCC_PHY_PTP_INGR_TS_FIFO(x)     (0x005c + (x))
+#define PTP_INGR_TS_FIFO_EMPTY           0x80000000
+
+#define MSCC_PHY_PTP_INGR_REWRITER_CTRL          0x0044
+#define PTP_INGR_REWRITER_REDUCE_PREAMBLE 0x0010
+#define PTP_INGR_REWRITER_FLAG_VAL       0x0008
+#define PTP_INGR_REWRITER_FLAG_BIT_OFF_M  GENMASK(2, 0)
+#define PTP_INGR_REWRITER_FLAG_BIT_OFF(x) ((x) & PTP_INGR_REWRITER_FLAG_BIT_OFF_M)
+
+#define MSCC_PHY_PTP_EGR_STALL_LATENCY   0x004f
+
+#define MSCC_PHY_PTP_EGR_TSP_CTRL        0x0055
+#define PHY_PTP_EGR_TSP_CTRL_FRACT_NS    0x0004
+#define PHY_PTP_EGR_TSP_CTRL_LOAD_DELAYS  0x0001
+
+#define MSCC_PHY_PTP_EGR_LOCAL_LATENCY   0x0057
+#define PTP_EGR_LOCAL_LATENCY_MASK       GENMASK(22, 0)
+#define PTP_EGR_LOCAL_LATENCY(x)         ((x) & PTP_EGR_LOCAL_LATENCY_MASK)
+
+#define MSCC_PHY_PTP_EGR_DELAY_FIFO      0x005a
+#define PTP_EGR_DELAY_FIFO_DEPTH_MACSEC          0x0013
+#define PTP_EGR_DELAY_FIFO_DEPTH_DEFAULT  0x000f
+
+#define MSCC_PHY_PTP_EGR_TS_FIFO_CTRL    0x005b
+#define PTP_EGR_TS_FIFO_RESET            0x10000
+#define PTP_EGR_FIFO_LEVEL_LAST_READ_MASK GENMASK(15, 12)
+#define PTP_EGR_FIFO_LEVEL_LAST_READ(x)          (((x) & PTP_EGR_FIFO_LEVEL_LAST_READ_MASK) >> 12)
+#define PTP_EGR_TS_FIFO_THRESH_MASK      GENMASK(11, 8)
+#define PTP_EGR_TS_FIFO_THRESH(x)        (((x) << 8) & PTP_EGR_TS_FIFO_THRESH_MASK)
+#define PTP_EGR_TS_FIFO_SIG_BYTES_MASK   GENMASK(4, 0)
+#define PTP_EGR_TS_FIFO_SIG_BYTES(x)     ((x) & PTP_EGR_TS_FIFO_SIG_BYTES_MASK)
+
+#define MSCC_PHY_PTP_EGR_TS_FIFO(x)      (0x005c + (x))
+#define PTP_EGR_TS_FIFO_EMPTY            0x80000000
+#define PTP_EGR_TS_FIFO_0_MASK           GENMASK(15, 0)
+
+#define MSCC_PHY_PTP_EGR_REWRITER_CTRL   0x0064
+#define PTP_EGR_REWRITER_REDUCE_PREAMBLE  0x0010
+#define PTP_EGR_REWRITER_FLAG_VAL        0x0008
+#define PTP_EGR_REWRITER_FLAG_BIT_OFF_M   GENMASK(2, 0)
+#define PTP_EGR_REWRITER_FLAG_BIT_OFF(x)  ((x) & PTP_EGR_REWRITER_FLAG_BIT_OFF_M)
+
+#define MSCC_PHY_PTP_SERIAL_TOD_IFACE    0x006e
+#define PTP_SERIAL_TOD_IFACE_LS_AUTO_CLR  0x0004
+
+#define MSCC_PHY_PTP_LTC_OFFSET                  0x0070
+#define PTP_LTC_OFFSET_ADJ               BIT(31)
+#define PTP_LTC_OFFSET_ADD               BIT(30)
+#define PTP_LTC_OFFSET_VAL(x)            (x)
+
+#define MSCC_PHY_PTP_ACCUR_CFG_STATUS    0x0074
+#define PTP_ACCUR_PPS_OUT_CALIB_ERR      0x20000
+#define PTP_ACCUR_PPS_OUT_CALIB_DONE     0x10000
+#define PTP_ACCUR_PPS_IN_CALIB_ERR       0x4000
+#define PTP_ACCUR_PPS_IN_CALIB_DONE      0x2000
+#define PTP_ACCUR_EGR_SOF_CALIB_ERR      0x1000
+#define PTP_ACCUR_EGR_SOF_CALIB_DONE     0x0800
+#define PTP_ACCUR_INGR_SOF_CALIB_ERR     0x0400
+#define PTP_ACCUR_INGR_SOF_CALIB_DONE    0x0200
+#define PTP_ACCUR_LOAD_SAVE_CALIB_ERR    0x0100
+#define PTP_ACCUR_LOAD_SAVE_CALIB_DONE   0x0080
+#define PTP_ACCUR_CALIB_TRIGG            0x0040
+#define PTP_ACCUR_PPS_OUT_BYPASS         0x0010
+#define PTP_ACCUR_PPS_IN_BYPASS                  0x0008
+#define PTP_ACCUR_EGR_SOF_BYPASS         0x0004
+#define PTP_ACCUR_INGR_SOF_BYPASS        0x0002
+#define PTP_ACCUR_LOAD_SAVE_BYPASS       0x0001
+
+#define MSCC_PHY_ANA_ETH2_NTX_PROT       0x0090
+#define ANA_ETH2_NTX_PROT_COMPARATOR_MASK GENMASK(2, 0)
+#define ANA_ETH2_NTX_PROT_PTP_OAM        0x0005
+#define ANA_ETH2_NTX_PROT_MPLS           0x0004
+#define ANA_ETH2_NTX_PROT_IP_UDP_ACH_2   0x0003
+#define ANA_ETH2_NTX_PROT_IP_UDP_ACH_1   0x0002
+#define ANA_ETH2_NTX_PROT_ETH2           0x0001
+
+#define MSCC_PHY_ANA_ETH2_NXT_PROT_ETYPE_MATCH 0x0003
+#define ANA_ETH2_NXT_PROT_ETYPE_MATCH_ENA 0x10000
+#define ANA_ETH2_NXT_PROT_ETYPE_MATCH_MASK     GENMASK(15, 0)
+#define ANA_ETH2_NXT_PROT_ETYPE_MATCH(x)  ((x) & ANA_ETH2_NXT_PROT_ETYPE_MATCH_MASK)
+
+#define MSCC_ANA_ETH2_FLOW_ENA(x)        (0x00a0 + ((x) << 4))
+#define ETH2_FLOW_ENA_CHANNEL_MASK_MASK          GENMASK(9, 8)
+#define ETH2_FLOW_ENA_CHANNEL_MASK(x)    (((x) << 8) & ETH2_FLOW_ENA_CHANNEL_MASK_MASK)
+#define ETH2_FLOW_VALID_CH1      ETH2_FLOW_ENA_CHANNEL_MASK(2)
+#define ETH2_FLOW_VALID_CH0      ETH2_FLOW_ENA_CHANNEL_MASK(1)
+
+#define MSCC_PHY_ANA_MPLS_COMP_NXT_COMP          0x0120
+#define ANA_MPLS_NTX_PROT_COMPARATOR_MASK GENMASK(2, 0)
+#define ANA_MPLS_NTX_PROT_PTP_OAM        0x0005
+#define ANA_MPLS_NTX_PROT_MPLS           0x0004
+#define ANA_MPLS_NTX_PROT_IP_UDP_ACH_2   0x0003
+#define ANA_MPLS_NTX_PROT_IP_UDP_ACH_1   0x0002
+#define ANA_MPLS_NTX_PROT_ETH2           0x0001
+
+#define MSCC_ANA_MPLS_FLOW_CTRL(x)       (0x0130 + ((x) << 4))
+#define MPLS_FLOW_CTRL_CHANNEL_MASK_MASK  GENMASK(25, 24)
+#define MPLS_FLOW_CTRL_CHANNEL_MASK(x)   (((x) << 24) & MPLS_FLOW_CTRL_CHANNEL_MASK_MASK)
+#define MPLS_FLOW_VALID_CH1              MPLS_FLOW_CTRL_CHANNEL_MASK(2)
+#define MPLS_FLOW_VALID_CH0              MPLS_FLOW_CTRL_CHANNEL_MASK(1)
+
+#define MSCC_ANA_IP1_NXT_PROT_NXT_COMP   0x01b0
+#define ANA_IP1_NXT_PROT_NXT_COMP_BYTES_HDR_MASK       GENMASK(15, 8)
+#define ANA_IP1_NXT_PROT_NXT_COMP_BYTES_HDR(x) (((x) << 8) & ANA_IP1_NXT_PROT_NXT_COMP_BYTES_HDR_MASK)
+#define ANA_IP1_NXT_PROT_NXT_COMP_PTP_OAM      0x0005
+#define ANA_IP1_NXT_PROT_NXT_COMP_IP_UDP_ACH2  0x0003
+
+#define MSCC_ANA_IP1_NXT_PROT_IP1_MODE   0x01b1
+#define ANA_IP1_NXT_PROT_FLOW_OFFSET_IPV4 0x0c00
+#define ANA_IP1_NXT_PROT_FLOW_OFFSET_IPV6 0x0800
+#define ANA_IP1_NXT_PROT_IPV6            0x0001
+#define ANA_IP1_NXT_PROT_IPV4            0x0000
+
+#define MSCC_ANA_IP1_NXT_PROT_IP_MATCH1          0x01b2
+#define ANA_IP1_NXT_PROT_IP_MATCH1_PROT_OFF_MASK       GENMASK(20, 16)
+#define ANA_IP1_NXT_PROT_IP_MATCH1_PROT_OFF(x) (((x) << 16) & ANA_IP1_NXT_PROT_IP_MATCH1_PROT_OFF_MASK)
+#define ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MASK_MASK      GENMASK(15, 8)
+#define ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MASK(x)        (((x) << 15) & ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MASK_MASK)
+#define ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MATCH_MASK     GENMASK(7, 0)
+#define ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MATCH(x)       ((x) & ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MATCH_MASK)
+
+#define MSCC_ANA_IP1_NXT_PROT_MATCH2_UPPER     0x01b3
+#define MSCC_ANA_IP1_NXT_PROT_MATCH2_LOWER     0x01b4
+#define MSCC_ANA_IP1_NXT_PROT_MASK2_UPPER      0x01b5
+#define MSCC_ANA_IP1_NXT_PROT_MASK2_LOWER      0x01b6
+
+#define MSCC_ANA_IP1_NXT_PROT_OFFSET2    0x01b7
+#define ANA_IP1_NXT_PROT_OFFSET2_MASK    GENMASK(6, 0)
+#define ANA_IP1_NXT_PROT_OFFSET2(x)      ((x) & ANA_IP1_NXT_PROT_OFFSET2_MASK)
+
+#define MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM  0x01b8
+#define IP1_NXT_PROT_UDP_CHKSUM_OFF_MASK  GENMASK(15, 8)
+#define IP1_NXT_PROT_UDP_CHKSUM_OFF(x)   (((x) << 8) & IP1_NXT_PROT_UDP_CHKSUM_OFF_MASK)
+#define IP1_NXT_PROT_UDP_CHKSUM_WIDTH_MASK     GENMASK(5, 4)
+#define IP1_NXT_PROT_UDP_CHKSUM_WIDTH(x)  (((x) << 4) & IP1_NXT_PROT_UDP_CHKSUM_WIDTH_MASK)
+#define IP1_NXT_PROT_UDP_CHKSUM_UPDATE   0x0002
+#define IP1_NXT_PROT_UDP_CHKSUM_CLEAR    0x0001
+
+#define MSCC_ANA_IP1_FLOW_ENA(x)         (0x01c0 + ((x) << 4))
+#define IP1_FLOW_MATCH_ADDR_MASK         GENMASK(9, 8)
+#define IP1_FLOW_MATCH_DEST_SRC_ADDR     0x0200
+#define IP1_FLOW_MATCH_DEST_ADDR         0x0100
+#define IP1_FLOW_MATCH_SRC_ADDR                  0x0000
+#define IP1_FLOW_ENA_CHANNEL_MASK_MASK   GENMASK(5, 4)
+#define IP1_FLOW_ENA_CHANNEL_MASK(x)     (((x) << 4) & IP1_FLOW_ENA_CHANNEL_MASK_MASK)
+#define IP1_FLOW_VALID_CH1               IP1_FLOW_ENA_CHANNEL_MASK(2)
+#define IP1_FLOW_VALID_CH0               IP1_FLOW_ENA_CHANNEL_MASK(1)
+#define IP1_FLOW_ENA                     0x0001
+
+#define MSCC_ANA_OAM_PTP_FLOW_ENA(x)     (0x1e0 + ((x) << 4))
+#define MSCC_ANA_OAM_PTP_FLOW_MATCH_LOWER(x)   (MSCC_ANA_OAM_PTP_FLOW_ENA(x) + 2)
+#define MSCC_ANA_OAM_PTP_FLOW_MASK_LOWER(x)    (MSCC_ANA_OAM_PTP_FLOW_ENA(x) + 4)
+
+#define MSCC_ANA_OAM_PTP_FLOW_PTP_0_FIELD(x)   (MSCC_ANA_OAM_PTP_FLOW_ENA(x) + 8)
+
+#define MSCC_ANA_IP1_FLOW_MATCH_UPPER(x)  (MSCC_ANA_IP1_FLOW_ENA(x) + 1)
+#define MSCC_ANA_IP1_FLOW_MATCH_UPPER_MID(x)  (MSCC_ANA_IP1_FLOW_ENA(x) + 2)
+#define MSCC_ANA_IP1_FLOW_MATCH_LOWER_MID(x)  (MSCC_ANA_IP1_FLOW_ENA(x) + 3)
+#define MSCC_ANA_IP1_FLOW_MATCH_LOWER(x)  (MSCC_ANA_IP1_FLOW_ENA(x) + 4)
+#define MSCC_ANA_IP1_FLOW_MASK_UPPER(x)          (MSCC_ANA_IP1_FLOW_ENA(x) + 5)
+#define MSCC_ANA_IP1_FLOW_MASK_UPPER_MID(x)      (MSCC_ANA_IP1_FLOW_ENA(x) + 6)
+#define MSCC_ANA_IP1_FLOW_MASK_LOWER_MID(x)      (MSCC_ANA_IP1_FLOW_ENA(x) + 7)
+#define MSCC_ANA_IP1_FLOW_MASK_LOWER(x)          (MSCC_ANA_IP1_FLOW_ENA(x) + 8)
+
+#define MSCC_ANA_IP2_NXT_PROT_NXT_COMP   0x0240
+#define ANA_IP2_NXT_PROT_NXT_COMP_BYTES_HDR_MASK       GENMASK(15, 8)
+#define ANA_IP2_NXT_PROT_NXT_COMP_BYTES_HDR(x) (((x) << 8) & ANA_IP2_NXT_PROT_NXT_COMP_BYTES_HDR_MASK)
+#define ANA_IP2_NXT_PROT_NXT_COMP_PTP_OAM      0x0005
+#define ANA_IP2_NXT_PROT_NXT_COMP_IP_UDP_ACH2  0x0003
+
+#define MSCC_ANA_IP2_NXT_PROT_UDP_CHKSUM  0x0248
+#define IP2_NXT_PROT_UDP_CHKSUM_OFF_MASK  GENMASK(15, 8)
+#define IP2_NXT_PROT_UDP_CHKSUM_OFF(x)   (((x) << 8) & IP2_NXT_PROT_UDP_CHKSUM_OFF_MASK)
+#define IP2_NXT_PROT_UDP_CHKSUM_WIDTH_MASK  GENMASK(5, 4)
+#define IP2_NXT_PROT_UDP_CHKSUM_WIDTH(x)  (((x) << 4) & IP2_NXT_PROT_UDP_CHKSUM_WIDTH_MASK)
+
+#define MSCC_ANA_IP2_FLOW_ENA(x)         (0x0250 + ((x) << 4))
+#define IP2_FLOW_ENA_CHANNEL_MASK_MASK   GENMASK(5, 4)
+#define IP2_FLOW_ENA_CHANNEL_MASK(x)     (((x) << 4) & IP2_FLOW_ENA_CHANNEL_MASK_MASK)
+#define IP2_FLOW_VALID_CH1       IP2_FLOW_ENA_CHANNEL_MASK(2)
+#define IP2_FLOW_VALID_CH0       IP2_FLOW_ENA_CHANNEL_MASK(1)
+
+#define MSCC_ANA_PTP_FLOW_ENA(x)         (0x02d0 + ((x) << 4))
+#define PTP_FLOW_ENA_CHANNEL_MASK_MASK   GENMASK(5, 4)
+#define PTP_FLOW_ENA_CHANNEL_MASK(x)     (((x) << 4) & PTP_FLOW_ENA_CHANNEL_MASK_MASK)
+#define PTP_FLOW_VALID_CH1       PTP_FLOW_ENA_CHANNEL_MASK(2)
+#define PTP_FLOW_VALID_CH0       PTP_FLOW_ENA_CHANNEL_MASK(1)
+#define PTP_FLOW_ENA                     0x0001
+
+#define MSCC_ANA_PTP_FLOW_MATCH_UPPER(x)  (MSCC_ANA_PTP_FLOW_ENA(x) + 1)
+#define PTP_FLOW_MSG_TYPE_MASK           0x0F000000
+#define PTP_FLOW_MSG_PDELAY_RESP         0x04000000
+#define PTP_FLOW_MSG_PDELAY_REQ                  0x02000000
+#define PTP_FLOW_MSG_DELAY_REQ           0x01000000
+#define PTP_FLOW_MSG_SYNC                0x00000000
+
+#define MSCC_ANA_PTP_FLOW_MATCH_LOWER(x)  (MSCC_ANA_PTP_FLOW_ENA(x) + 2)
+#define MSCC_ANA_PTP_FLOW_MASK_UPPER(x)          (MSCC_ANA_PTP_FLOW_ENA(x) + 3)
+#define MSCC_ANA_PTP_FLOW_MASK_LOWER(x)          (MSCC_ANA_PTP_FLOW_ENA(x) + 4)
+
+#define MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(x) (MSCC_ANA_PTP_FLOW_ENA(x) + 5)
+#define PTP_FLOW_DOMAIN_RANGE_ENA         0x0001
+
+#define MSCC_ANA_PTP_FLOW_PTP_ACTION(x)          (MSCC_ANA_PTP_FLOW_ENA(x) + 6)
+#define PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_UPDATE    0x10000000
+#define PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_BYTE_OFFSET_MASK  GENMASK(26, 24)
+#define PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_BYTE_OFFSET(x)    (((x) << 24) & PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_BYTE_OFFSET_MASK)
+#define PTP_FLOW_PTP_ACTION_PTP_CMD_MASK  GENMASK(3, 0)
+#define PTP_FLOW_PTP_ACTION_PTP_CMD(x)   ((x) & PTP_FLOW_PTP_ACTION_PTP_CMD_MASK)
+#define PTP_FLOW_PTP_ACTION_SUB_DELAY_ASYM     0x00200000
+#define PTP_FLOW_PTP_ACTION_ADD_DELAY_ASYM     0x00100000
+#define PTP_FLOW_PTP_ACTION_TIME_OFFSET_MASK   GENMASK(15, 10)
+#define PTP_FLOW_PTP_ACTION_TIME_OFFSET(x)     (((x) << 10) & PTP_FLOW_PTP_ACTION_TIME_OFFSET_MASK)
+#define PTP_FLOW_PTP_ACTION_CORR_OFFSET_MASK   GENMASK(9, 5)
+#define PTP_FLOW_PTP_ACTION_CORR_OFFSET(x)     (((x) << 5) & PTP_FLOW_PTP_ACTION_CORR_OFFSET_MASK)
+#define PTP_FLOW_PTP_ACTION_SAVE_LOCAL_TIME 0x00000010
+
+#define MSCC_ANA_PTP_FLOW_PTP_ACTION2(x)  (MSCC_ANA_PTP_FLOW_ENA(x) + 7)
+#define PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET_MASK       GENMASK(15, 8)
+#define PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(x) (((x) << 8) & PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET_MASK)
+#define PTP_FLOW_PTP_ACTION2_REWRITE_BYTES_MASK        GENMASK(3, 0)
+#define PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(x)  ((x) & PTP_FLOW_PTP_ACTION2_REWRITE_BYTES_MASK)
+
+#define MSCC_ANA_PTP_FLOW_PTP_0_FIELD(x)  (MSCC_ANA_PTP_FLOW_ENA(x) + 8)
+#define PTP_FLOW_PTP_0_FIELD_PTP_FRAME   0x8000
+#define PTP_FLOW_PTP_0_FIELD_RSVRD_CHECK  0x4000
+#define PTP_FLOW_PTP_0_FIELD_OFFSET_MASK  GENMASK(13, 8)
+#define PTP_FLOW_PTP_0_FIELD_OFFSET(x)   (((x) << 8) & PTP_FLOW_PTP_0_FIELD_OFFSET_MASK)
+#define PTP_FLOW_PTP_0_FIELD_BYTES_MASK          GENMASK(3, 0)
+#define PTP_FLOW_PTP_0_FIELD_BYTES(x)    ((x) & PTP_FLOW_PTP_0_FIELD_BYTES_MASK)
+
+#define MSCC_ANA_PTP_IP_CHKSUM_SEL       0x0330
+#define ANA_PTP_IP_CHKSUM_SEL_IP_COMP_2   0x0001
+#define ANA_PTP_IP_CHKSUM_SEL_IP_COMP_1          0x0000
+
+#define MSCC_PHY_ANA_FSB_CFG             0x331
+#define ANA_FSB_ADDR_FROM_BLOCK_SEL_MASK  GENMASK(1, 0)
+#define ANA_FSB_ADDR_FROM_IP2            0x0003
+#define ANA_FSB_ADDR_FROM_IP1            0x0002
+#define ANA_FSB_ADDR_FROM_ETH2           0x0001
+#define ANA_FSB_ADDR_FROM_ETH1           0x0000
+
+#define MSCC_PHY_ANA_FSB_REG(x)                  (0x332 + (x))
+
+#define COMP_MAX_FLOWS                   8
+#define PTP_COMP_MAX_FLOWS               6
+
+#define PPS_WIDTH_ADJ                    0x1dcd6500
+#define STALL_EGR_LATENCY(x)             (1536000 / (x))
+
+/* PHC clock available frequencies. */
+enum {
+       PHC_CLK_125MHZ,
+       PHC_CLK_156_25MHZ,
+       PHC_CLK_200MHZ,
+       PHC_CLK_250MHZ,
+       PHC_CLK_500MHZ,
+};
+
+enum ptp_cmd {
+       PTP_NOP = 0,
+       PTP_WRITE_1588 = 5,
+       PTP_WRITE_NS = 7,
+       PTP_SAVE_IN_TS_FIFO = 11, /* invalid when writing in reg */
+};
+
+enum vsc85xx_ptp_msg_type {
+       PTP_MSG_TYPE_SYNC,
+       PTP_MSG_TYPE_DELAY_REQ,
+};
+
+struct vsc85xx_ptphdr {
+       u8 tsmt; /* transportSpecific | messageType */
+       u8 ver;  /* reserved0 | versionPTP */
+       __be16 msglen;
+       u8 domain;
+       u8 rsrvd1;
+       __be16 flags;
+       __be64 correction;
+       __be32 rsrvd2;
+       __be64 clk_identity;
+       __be16 src_port_id;
+       __be16 seq_id;
+       u8 ctrl;
+       u8 log_interval;
+} __attribute__((__packed__));
+
+/* Represents an entry in the timestamping FIFO */
+struct vsc85xx_ts_fifo {
+       u32 ns;
+       u64 secs:48;
+       u8 sig[16];
+} __attribute__((__packed__));
+
+struct vsc85xx_ptp {
+       struct phy_device *phydev;
+       struct ptp_clock *ptp_clock;
+       struct ptp_clock_info caps;
+       struct sk_buff_head tx_queue;
+       enum hwtstamp_tx_types tx_type;
+       enum hwtstamp_rx_filters rx_filter;
+       u8 configured:1;
+};
+
+#endif /* _MSCC_PHY_PTP_H_ */
index defe09d..bd11e62 100644 (file)
@@ -219,7 +219,7 @@ int genphy_c45_read_link(struct phy_device *phydev)
        int val, devad;
        bool link = true;
 
-       if (phydev->c45_ids.devices_in_package & MDIO_DEVS_AN) {
+       if (phydev->c45_ids.mmds_present & MDIO_DEVS_AN) {
                val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1);
                if (val < 0)
                        return val;
@@ -409,7 +409,7 @@ int genphy_c45_pma_read_abilities(struct phy_device *phydev)
        int val;
 
        linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
-       if (phydev->c45_ids.devices_in_package & MDIO_DEVS_AN) {
+       if (phydev->c45_ids.mmds_present & MDIO_DEVS_AN) {
                val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
                if (val < 0)
                        return val;
index 46bd68e..ff8e14b 100644 (file)
@@ -8,7 +8,7 @@
 
 const char *phy_speed_to_str(int speed)
 {
-       BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 75,
+       BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 90,
                "Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
                "If a speed or mode has been added please update phy_speed_to_str "
                "and the PHY settings array.\n");
@@ -78,12 +78,22 @@ static const struct phy_setting settings[] = {
        PHY_SETTING( 400000, FULL, 400000baseLR8_ER8_FR8_Full   ),
        PHY_SETTING( 400000, FULL, 400000baseDR8_Full           ),
        PHY_SETTING( 400000, FULL, 400000baseSR8_Full           ),
+       PHY_SETTING( 400000, FULL, 400000baseCR4_Full           ),
+       PHY_SETTING( 400000, FULL, 400000baseKR4_Full           ),
+       PHY_SETTING( 400000, FULL, 400000baseLR4_ER4_FR4_Full   ),
+       PHY_SETTING( 400000, FULL, 400000baseDR4_Full           ),
+       PHY_SETTING( 400000, FULL, 400000baseSR4_Full           ),
        /* 200G */
        PHY_SETTING( 200000, FULL, 200000baseCR4_Full           ),
        PHY_SETTING( 200000, FULL, 200000baseKR4_Full           ),
        PHY_SETTING( 200000, FULL, 200000baseLR4_ER4_FR4_Full   ),
        PHY_SETTING( 200000, FULL, 200000baseDR4_Full           ),
        PHY_SETTING( 200000, FULL, 200000baseSR4_Full           ),
+       PHY_SETTING( 200000, FULL, 200000baseCR2_Full           ),
+       PHY_SETTING( 200000, FULL, 200000baseKR2_Full           ),
+       PHY_SETTING( 200000, FULL, 200000baseLR2_ER2_FR2_Full   ),
+       PHY_SETTING( 200000, FULL, 200000baseDR2_Full           ),
+       PHY_SETTING( 200000, FULL, 200000baseSR2_Full           ),
        /* 100G */
        PHY_SETTING( 100000, FULL, 100000baseCR4_Full           ),
        PHY_SETTING( 100000, FULL, 100000baseKR4_Full           ),
@@ -94,6 +104,11 @@ static const struct phy_setting settings[] = {
        PHY_SETTING( 100000, FULL, 100000baseLR2_ER2_FR2_Full   ),
        PHY_SETTING( 100000, FULL, 100000baseDR2_Full           ),
        PHY_SETTING( 100000, FULL, 100000baseSR2_Full           ),
+       PHY_SETTING( 100000, FULL, 100000baseCR_Full            ),
+       PHY_SETTING( 100000, FULL, 100000baseKR_Full            ),
+       PHY_SETTING( 100000, FULL, 100000baseLR_ER_FR_Full      ),
+       PHY_SETTING( 100000, FULL, 100000baseDR_Full            ),
+       PHY_SETTING( 100000, FULL, 100000baseSR_Full            ),
        /* 56G */
        PHY_SETTING(  56000, FULL,  56000baseCR4_Full           ),
        PHY_SETTING(  56000, FULL,  56000baseKR4_Full           ),
index 1de3938..79b4f35 100644 (file)
@@ -489,6 +489,54 @@ static void phy_abort_cable_test(struct phy_device *phydev)
                phydev_err(phydev, "Error while aborting cable test");
 }
 
+int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data)
+{
+       if (!phydev->drv)
+               return -EIO;
+
+       mutex_lock(&phydev->lock);
+       phydev->drv->get_strings(phydev, data);
+       mutex_unlock(&phydev->lock);
+
+       return 0;
+}
+EXPORT_SYMBOL(phy_ethtool_get_strings);
+
+int phy_ethtool_get_sset_count(struct phy_device *phydev)
+{
+       int ret;
+
+       if (!phydev->drv)
+               return -EIO;
+
+       if (phydev->drv->get_sset_count &&
+           phydev->drv->get_strings &&
+           phydev->drv->get_stats) {
+               mutex_lock(&phydev->lock);
+               ret = phydev->drv->get_sset_count(phydev);
+               mutex_unlock(&phydev->lock);
+
+               return ret;
+       }
+
+       return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(phy_ethtool_get_sset_count);
+
+int phy_ethtool_get_stats(struct phy_device *phydev,
+                         struct ethtool_stats *stats, u64 *data)
+{
+       if (!phydev->drv)
+               return -EIO;
+
+       mutex_lock(&phydev->lock);
+       phydev->drv->get_stats(phydev, stats, data);
+       mutex_unlock(&phydev->lock);
+
+       return 0;
+}
+EXPORT_SYMBOL(phy_ethtool_get_stats);
+
 int phy_start_cable_test(struct phy_device *phydev,
                         struct netlink_ext_ack *extack)
 {
@@ -840,7 +888,7 @@ static void phy_error(struct phy_device *phydev)
  * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
  * @phydev: target phy_device struct
  */
-static int phy_disable_interrupts(struct phy_device *phydev)
+int phy_disable_interrupts(struct phy_device *phydev)
 {
        int err;
 
index 04946de..1b95235 100644 (file)
@@ -9,28 +9,29 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
+#include <linux/bitmap.h>
 #include <linux/delay.h>
-#include <linux/netdevice.h>
+#include <linux/errno.h>
 #include <linux/etherdevice.h>
-#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mdio.h>
+#include <linux/mii.h>
 #include <linux/mm.h>
 #include <linux/module.h>
-#include <linux/mii.h>
-#include <linux/ethtool.h>
-#include <linux/bitmap.h>
+#include <linux/netdevice.h>
 #include <linux/phy.h>
 #include <linux/phy_led_triggers.h>
+#include <linux/property.h>
 #include <linux/sfp.h>
-#include <linux/mdio.h>
-#include <linux/io.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/string.h>
 #include <linux/uaccess.h>
+#include <linux/unistd.h>
 
 MODULE_DESCRIPTION("PHY library");
 MODULE_AUTHOR("Andy Fleming");
@@ -105,10 +106,9 @@ const int phy_10gbit_features_array[1] = {
 };
 EXPORT_SYMBOL_GPL(phy_10gbit_features_array);
 
-const int phy_10gbit_fec_features_array[1] = {
+static const int phy_10gbit_fec_features_array[1] = {
        ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
 };
-EXPORT_SYMBOL_GPL(phy_10gbit_fec_features_array);
 
 __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
 EXPORT_SYMBOL_GPL(phy_10gbit_full_features);
@@ -226,7 +226,6 @@ static void phy_mdio_device_remove(struct mdio_device *mdiodev)
 }
 
 static struct phy_driver genphy_driver;
-extern struct phy_driver genphy_c45_driver;
 
 static LIST_HEAD(phy_fixup_list);
 static DEFINE_MUTEX(phy_fixup_lock);
@@ -661,6 +660,28 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
 }
 EXPORT_SYMBOL(phy_device_create);
 
+/* phy_c45_probe_present - checks to see if a MMD is present in the package
+ * @bus: the target MII bus
+ * @prtad: PHY package address on the MII bus
+ * @devad: PHY device (MMD) address
+ *
+ * Read the MDIO_STAT2 register, and check whether a device is responding
+ * at this address.
+ *
+ * Returns: negative error number on bus access error, zero if no device
+ * is responding, or positive if a device is present.
+ */
+static int phy_c45_probe_present(struct mii_bus *bus, int prtad, int devad)
+{
+       int stat2;
+
+       stat2 = mdiobus_c45_read(bus, prtad, devad, MDIO_STAT2);
+       if (stat2 < 0)
+               return stat2;
+
+       return (stat2 & MDIO_STAT2_DEVPRST) == MDIO_STAT2_DEVPRST_VAL;
+}
+
 /* get_phy_c45_devs_in_pkg - reads a MMD's devices in package registers.
  * @bus: the target MII bus
  * @addr: PHY address on the MII bus
@@ -687,9 +708,6 @@ static int get_phy_c45_devs_in_pkg(struct mii_bus *bus, int addr, int dev_addr,
                return -EIO;
        *devices_in_package |= phy_reg;
 
-       /* Bit 0 doesn't represent a device, it indicates c22 regs presence */
-       *devices_in_package &= ~BIT(0);
-
        return 0;
 }
 
@@ -697,54 +715,78 @@ static int get_phy_c45_devs_in_pkg(struct mii_bus *bus, int addr, int dev_addr,
  * get_phy_c45_ids - reads the specified addr for its 802.3-c45 IDs.
  * @bus: the target MII bus
  * @addr: PHY address on the MII bus
- * @phy_id: where to store the ID retrieved.
  * @c45_ids: where to store the c45 ID information.
  *
- *   If the PHY devices-in-package appears to be valid, it and the
- *   corresponding identifiers are stored in @c45_ids, zero is stored
- *   in @phy_id.  Otherwise 0xffffffff is stored in @phy_id.  Returns
- *   zero on success.
+ * Read the PHY "devices in package". If this appears to be valid, read
+ * the PHY identifiers for each device. Return the "devices in package"
+ * and identifiers in @c45_ids.
  *
+ * Returns zero on success, %-EIO on bus access error, or %-ENODEV if
+ * the "devices in package" is invalid.
  */
-static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
+static int get_phy_c45_ids(struct mii_bus *bus, int addr,
                           struct phy_c45_device_ids *c45_ids)
 {
        const int num_ids = ARRAY_SIZE(c45_ids->device_ids);
-       u32 *devs = &c45_ids->devices_in_package;
-       int i, phy_reg;
+       u32 devs_in_pkg = 0;
+       int i, ret, phy_reg;
 
        /* Find first non-zero Devices In package. Device zero is reserved
         * for 802.3 c45 complied PHYs, so don't probe it at first.
         */
-       for (i = 1; i < num_ids && *devs == 0; i++) {
-               phy_reg = get_phy_c45_devs_in_pkg(bus, addr, i, devs);
-               if (phy_reg < 0)
-                       return -EIO;
-
-               if ((*devs & 0x1fffffff) == 0x1fffffff) {
-                       /*  If mostly Fs, there is no device there,
-                        *  then let's continue to probe more, as some
-                        *  10G PHYs have zero Devices In package,
-                        *  e.g. Cortina CS4315/CS4340 PHY.
+       for (i = 1; i < MDIO_MMD_NUM && (devs_in_pkg == 0 ||
+            (devs_in_pkg & 0x1fffffff) == 0x1fffffff); i++) {
+               if (i == MDIO_MMD_VEND1 || i == MDIO_MMD_VEND2) {
+                       /* Check that there is a device present at this
+                        * address before reading the devices-in-package
+                        * register to avoid reading garbage from the PHY.
+                        * Some PHYs (88x3310) vendor space is not IEEE802.3
+                        * compliant.
                         */
-                       phy_reg = get_phy_c45_devs_in_pkg(bus, addr, 0, devs);
-                       if (phy_reg < 0)
+                       ret = phy_c45_probe_present(bus, addr, i);
+                       if (ret < 0)
                                return -EIO;
-                       /* no device there, let's get out of here */
-                       if ((*devs & 0x1fffffff) == 0x1fffffff) {
-                               *phy_id = 0xffffffff;
-                               return 0;
-                       } else {
-                               break;
-                       }
+
+                       if (!ret)
+                               continue;
                }
+               phy_reg = get_phy_c45_devs_in_pkg(bus, addr, i, &devs_in_pkg);
+               if (phy_reg < 0)
+                       return -EIO;
+       }
+
+       if ((devs_in_pkg & 0x1fffffff) == 0x1fffffff) {
+               /* If mostly Fs, there is no device there, then let's probe
+                * MMD 0, as some 10G PHYs have zero Devices In package,
+                * e.g. Cortina CS4315/CS4340 PHY.
+                */
+               phy_reg = get_phy_c45_devs_in_pkg(bus, addr, 0, &devs_in_pkg);
+               if (phy_reg < 0)
+                       return -EIO;
+
+               /* no device there, let's get out of here */
+               if ((devs_in_pkg & 0x1fffffff) == 0x1fffffff)
+                       return -ENODEV;
        }
 
        /* Now probe Device Identifiers for each device present. */
        for (i = 1; i < num_ids; i++) {
-               if (!(c45_ids->devices_in_package & (1 << i)))
+               if (!(devs_in_pkg & (1 << i)))
                        continue;
 
+               if (i == MDIO_MMD_VEND1 || i == MDIO_MMD_VEND2) {
+                       /* Probe the "Device Present" bits for the vendor MMDs
+                        * to ignore these if they do not contain IEEE 802.3
+                        * registers.
+                        */
+                       ret = phy_c45_probe_present(bus, addr, i);
+                       if (ret < 0)
+                               return ret;
+
+                       if (!ret)
+                               continue;
+               }
+
                phy_reg = mdiobus_c45_read(bus, addr, i, MII_PHYSID1);
                if (phy_reg < 0)
                        return -EIO;
@@ -755,34 +797,29 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
                        return -EIO;
                c45_ids->device_ids[i] |= phy_reg;
        }
-       *phy_id = 0;
+
+       c45_ids->devices_in_package = devs_in_pkg;
+       /* Bit 0 doesn't represent a device, it indicates c22 regs presence */
+       c45_ids->mmds_present = devs_in_pkg & ~BIT(0);
+
        return 0;
 }
 
 /**
- * get_phy_id - reads the specified addr for its ID.
+ * get_phy_c22_id - reads the specified addr for its clause 22 ID.
  * @bus: the target MII bus
  * @addr: PHY address on the MII bus
  * @phy_id: where to store the ID retrieved.
- * @is_c45: If true the PHY uses the 802.3 clause 45 protocol
- * @c45_ids: where to store the c45 ID information.
- *
- * Description: In the case of a 802.3-c22 PHY, reads the ID registers
- *   of the PHY at @addr on the @bus, stores it in @phy_id and returns
- *   zero on success.
- *
- *   In the case of a 802.3-c45 PHY, get_phy_c45_ids() is invoked, and
- *   its return value is in turn returned.
  *
+ * Read the 802.3 clause 22 PHY ID from the PHY at @addr on the @bus,
+ * placing it in @phy_id. Return zero on successful read and the ID is
+ * valid, %-EIO on bus access error, or %-ENODEV if no device responds
+ * or invalid ID.
  */
-static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
-                     bool is_c45, struct phy_c45_device_ids *c45_ids)
+static int get_phy_c22_id(struct mii_bus *bus, int addr, u32 *phy_id)
 {
        int phy_reg;
 
-       if (is_c45)
-               return get_phy_c45_ids(bus, addr, phy_id, c45_ids);
-
        /* Grab the bits from PHYIR1, and put them in the upper half */
        phy_reg = mdiobus_read(bus, addr, MII_PHYSID1);
        if (phy_reg < 0) {
@@ -794,11 +831,17 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
 
        /* Grab the bits from PHYIR2, and put them in the lower half */
        phy_reg = mdiobus_read(bus, addr, MII_PHYSID2);
-       if (phy_reg < 0)
-               return -EIO;
+       if (phy_reg < 0) {
+               /* returning -ENODEV doesn't stop bus scanning */
+               return (phy_reg == -EIO || phy_reg == -ENODEV) ? -ENODEV : -EIO;
+       }
 
        *phy_id |= phy_reg;
 
+       /* If the phy_id is mostly Fs, there is no device there */
+       if ((*phy_id & 0x1fffffff) == 0x1fffffff)
+               return -ENODEV;
+
        return 0;
 }
 
@@ -809,8 +852,17 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
  * @addr: PHY address on the MII bus
  * @is_c45: If true the PHY uses the 802.3 clause 45 protocol
  *
- * Description: Reads the ID registers of the PHY at @addr on the
- *   @bus, then allocates and returns the phy_device to represent it.
+ * Probe for a PHY at @addr on @bus.
+ *
+ * When probing for a clause 22 PHY, then read the ID registers. If we find
+ * a valid ID, allocate and return a &struct phy_device.
+ *
+ * When probing for a clause 45 PHY, read the "devices in package" registers.
+ * If the "devices in package" appears valid, read the ID registers for each
+ * MMD, allocate and return a &struct phy_device.
+ *
+ * Returns an allocated &struct phy_device on success, %-ENODEV if there is
+ * no PHY present, or %-EIO on bus access error.
  */
 struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
 {
@@ -819,16 +871,17 @@ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
        int r;
 
        c45_ids.devices_in_package = 0;
+       c45_ids.mmds_present = 0;
        memset(c45_ids.device_ids, 0xff, sizeof(c45_ids.device_ids));
 
-       r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
+       if (is_c45)
+               r = get_phy_c45_ids(bus, addr, &c45_ids);
+       else
+               r = get_phy_c22_id(bus, addr, &phy_id);
+
        if (r)
                return ERR_PTR(r);
 
-       /* If the phy_id is mostly Fs, there is no device there */
-       if ((phy_id & 0x1fffffff) == 0x1fffffff)
-               return ERR_PTR(-ENODEV);
-
        return phy_device_create(bus, addr, phy_id, is_c45, &c45_ids);
 }
 EXPORT_SYMBOL(get_phy_device);
@@ -1090,6 +1143,10 @@ int phy_init_hw(struct phy_device *phydev)
        if (ret < 0)
                return ret;
 
+       ret = phy_disable_interrupts(phydev);
+       if (ret)
+               return ret;
+
        if (phydev->drv->config_init)
                ret = phydev->drv->config_init(phydev);
 
@@ -2657,6 +2714,104 @@ void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause)
 }
 EXPORT_SYMBOL(phy_get_pause);
 
+#if IS_ENABLED(CONFIG_OF_MDIO)
+static int phy_get_int_delay_property(struct device *dev, const char *name)
+{
+       s32 int_delay;
+       int ret;
+
+       ret = device_property_read_u32(dev, name, &int_delay);
+       if (ret)
+               return ret;
+
+       return int_delay;
+}
+#else
+static int phy_get_int_delay_property(struct device *dev, const char *name)
+{
+       return -EINVAL;
+}
+#endif
+
+/**
+ * phy_get_delay_index - returns the index of the internal delay
+ * @phydev: phy_device struct
+ * @dev: pointer to the devices device struct
+ * @delay_values: array of delays the PHY supports
+ * @size: the size of the delay array
+ * @is_rx: boolean to indicate to get the rx internal delay
+ *
+ * Returns the index within the array of internal delay passed in.
+ * If the device property is not present then the interface type is checked
+ * if the interface defines use of internal delay then a 1 is returned otherwise
+ * a 0 is returned.
+ * The array must be in ascending order. If PHY does not have an ascending order
+ * array then size = 0 and the value of the delay property is returned.
+ * Return -EINVAL if the delay is invalid or cannot be found.
+ */
+s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
+                          const int *delay_values, int size, bool is_rx)
+{
+       s32 delay;
+       int i;
+
+       if (is_rx) {
+               delay = phy_get_int_delay_property(dev, "rx-internal-delay-ps");
+               if (delay < 0 && size == 0) {
+                       if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+                           phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+                               return 1;
+                       else
+                               return 0;
+               }
+
+       } else {
+               delay = phy_get_int_delay_property(dev, "tx-internal-delay-ps");
+               if (delay < 0 && size == 0) {
+                       if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+                           phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+                               return 1;
+                       else
+                               return 0;
+               }
+       }
+
+       if (delay < 0)
+               return delay;
+
+       if (delay && size == 0)
+               return delay;
+
+       if (delay < delay_values[0] || delay > delay_values[size - 1]) {
+               phydev_err(phydev, "Delay %d is out of range\n", delay);
+               return -EINVAL;
+       }
+
+       if (delay == delay_values[0])
+               return 0;
+
+       for (i = 1; i < size; i++) {
+               if (delay == delay_values[i])
+                       return i;
+
+               /* Find an approximate index by looking up the table */
+               if (delay > delay_values[i - 1] &&
+                   delay < delay_values[i]) {
+                       if (delay - delay_values[i - 1] <
+                           delay_values[i] - delay)
+                               return i - 1;
+                       else
+                               return i;
+               }
+       }
+
+       phydev_err(phydev, "error finding internal delay index for %d\n",
+                  delay);
+
+       return -EINVAL;
+}
+EXPORT_SYMBOL(phy_get_internal_delay);
+
 static bool phy_drv_supports_irq(struct phy_driver *phydrv)
 {
        return phydrv->config_intr && phydrv->ack_interrupt;
@@ -2690,16 +2845,13 @@ static int phy_probe(struct device *dev)
 
        mutex_lock(&phydev->lock);
 
-       if (phydev->drv->probe) {
-               /* Deassert the reset signal */
-               phy_device_reset(phydev, 0);
+       /* Deassert the reset signal */
+       phy_device_reset(phydev, 0);
 
+       if (phydev->drv->probe) {
                err = phydev->drv->probe(phydev);
-               if (err) {
-                       /* Assert the reset signal */
-                       phy_device_reset(phydev, 1);
+               if (err)
                        goto out;
-               }
        }
 
        /* Start out supporting everything. Eventually,
@@ -2761,6 +2913,10 @@ static int phy_probe(struct device *dev)
        phydev->state = PHY_READY;
 
 out:
+       /* Assert the reset signal */
+       if (err)
+               phy_device_reset(phydev, 1);
+
        mutex_unlock(&phydev->lock);
 
        return err;
@@ -2779,12 +2935,12 @@ static int phy_remove(struct device *dev)
        sfp_bus_del_upstream(phydev->sfp_bus);
        phydev->sfp_bus = NULL;
 
-       if (phydev->drv && phydev->drv->remove) {
+       if (phydev->drv && phydev->drv->remove)
                phydev->drv->remove(phydev);
 
-               /* Assert the reset signal */
-               phy_device_reset(phydev, 1);
-       }
+       /* Assert the reset signal */
+       phy_device_reset(phydev, 1);
+
        phydev->drv = NULL;
 
        return 0;
@@ -2872,6 +3028,14 @@ static struct phy_driver genphy_driver = {
        .set_loopback   = genphy_loopback,
 };
 
+static const struct ethtool_phy_ops phy_ethtool_phy_ops = {
+       .get_sset_count         = phy_ethtool_get_sset_count,
+       .get_strings            = phy_ethtool_get_strings,
+       .get_stats              = phy_ethtool_get_stats,
+       .start_cable_test       = phy_start_cable_test,
+       .start_cable_test_tdr   = phy_start_cable_test_tdr,
+};
+
 static int __init phy_init(void)
 {
        int rc;
@@ -2880,6 +3044,7 @@ static int __init phy_init(void)
        if (rc)
                return rc;
 
+       ethtool_set_ethtool_phy_ops(&phy_ethtool_phy_ops);
        features_init();
 
        rc = phy_driver_register(&genphy_c45_driver, THIS_MODULE);
@@ -2901,6 +3066,7 @@ static void __exit phy_exit(void)
        phy_driver_unregister(&genphy_c45_driver);
        phy_driver_unregister(&genphy_driver);
        mdio_bus_exit();
+       ethtool_set_ethtool_phy_ops(NULL);
 }
 
 subsys_initcall(phy_init);
index 0ab65fb..32b4bd6 100644 (file)
@@ -43,6 +43,7 @@ struct phylink {
        const struct phylink_mac_ops *mac_ops;
        const struct phylink_pcs_ops *pcs_ops;
        struct phylink_config *config;
+       struct phylink_pcs *pcs;
        struct device *dev;
        unsigned int old_link_state:1;
 
@@ -241,8 +242,10 @@ static int phylink_parse_fixedlink(struct phylink *pl,
        phylink_set(pl->supported, MII);
        phylink_set(pl->supported, Pause);
        phylink_set(pl->supported, Asym_Pause);
+       phylink_set(pl->supported, Autoneg);
        if (s) {
                __set_bit(s->bit, pl->supported);
+               __set_bit(s->bit, pl->link_config.lp_advertising);
        } else {
                phylink_warn(pl, "fixed link %s duplex %dMbps not recognised\n",
                             pl->link_config.duplex == DUPLEX_FULL ? "full" : "half",
@@ -419,39 +422,102 @@ static void phylink_mac_config(struct phylink *pl,
        pl->mac_ops->mac_config(pl->config, pl->cur_link_an_mode, state);
 }
 
-static void phylink_mac_config_up(struct phylink *pl,
-                                 const struct phylink_link_state *state)
-{
-       if (state->link)
-               phylink_mac_config(pl, state);
-}
-
 static void phylink_mac_pcs_an_restart(struct phylink *pl)
 {
        if (pl->link_config.an_enabled &&
-           phy_interface_mode_is_8023z(pl->link_config.interface)) {
+           phy_interface_mode_is_8023z(pl->link_config.interface) &&
+           phylink_autoneg_inband(pl->cur_link_an_mode)) {
                if (pl->pcs_ops)
-                       pl->pcs_ops->pcs_an_restart(pl->config);
+                       pl->pcs_ops->pcs_an_restart(pl->pcs);
                else
                        pl->mac_ops->mac_an_restart(pl->config);
        }
 }
 
-static void phylink_pcs_config(struct phylink *pl, bool force_restart,
-                              const struct phylink_link_state *state)
+static void phylink_major_config(struct phylink *pl, bool restart,
+                                 const struct phylink_link_state *state)
 {
-       bool restart = force_restart;
+       int err;
 
-       if (pl->pcs_ops && pl->pcs_ops->pcs_config(pl->config,
-                                                  pl->cur_link_an_mode,
-                                                  state->interface,
-                                                  state->advertising))
-               restart = true;
+       phylink_dbg(pl, "major config %s\n", phy_modes(state->interface));
+
+       if (pl->mac_ops->mac_prepare) {
+               err = pl->mac_ops->mac_prepare(pl->config, pl->cur_link_an_mode,
+                                              state->interface);
+               if (err < 0) {
+                       phylink_err(pl, "mac_prepare failed: %pe\n",
+                                   ERR_PTR(err));
+                       return;
+               }
+       }
 
        phylink_mac_config(pl, state);
 
+       if (pl->pcs_ops) {
+               err = pl->pcs_ops->pcs_config(pl->pcs, pl->cur_link_an_mode,
+                                             state->interface,
+                                             state->advertising,
+                                             !!(pl->link_config.pause &
+                                                MLO_PAUSE_AN));
+               if (err < 0)
+                       phylink_err(pl, "pcs_config failed: %pe\n",
+                                   ERR_PTR(err));
+               if (err > 0)
+                       restart = true;
+       }
        if (restart)
                phylink_mac_pcs_an_restart(pl);
+
+       if (pl->mac_ops->mac_finish) {
+               err = pl->mac_ops->mac_finish(pl->config, pl->cur_link_an_mode,
+                                             state->interface);
+               if (err < 0)
+                       phylink_err(pl, "mac_prepare failed: %pe\n",
+                                   ERR_PTR(err));
+       }
+}
+
+/*
+ * Reconfigure for a change of inband advertisement.
+ * If we have a separate PCS, we only need to call its pcs_config() method,
+ * and then restart AN if it indicates something changed. Otherwise, we do
+ * the full MAC reconfiguration.
+ */
+static int phylink_change_inband_advert(struct phylink *pl)
+{
+       int ret;
+
+       if (test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state))
+               return 0;
+
+       if (!pl->pcs_ops) {
+               /* Legacy method */
+               phylink_mac_config(pl, &pl->link_config);
+               phylink_mac_pcs_an_restart(pl);
+               return 0;
+       }
+
+       phylink_dbg(pl, "%s: mode=%s/%s adv=%*pb pause=%02x\n", __func__,
+                   phylink_an_mode_str(pl->cur_link_an_mode),
+                   phy_modes(pl->link_config.interface),
+                   __ETHTOOL_LINK_MODE_MASK_NBITS, pl->link_config.advertising,
+                   pl->link_config.pause);
+
+       /* Modern PCS-based method; update the advert at the PCS, and
+        * restart negotiation if the pcs_config() helper indicates that
+        * the programmed advertisement has changed.
+        */
+       ret = pl->pcs_ops->pcs_config(pl->pcs, pl->cur_link_an_mode,
+                                     pl->link_config.interface,
+                                     pl->link_config.advertising,
+                                     !!(pl->link_config.pause & MLO_PAUSE_AN));
+       if (ret < 0)
+               return ret;
+
+       if (ret > 0)
+               phylink_mac_pcs_an_restart(pl);
+
+       return 0;
 }
 
 static void phylink_mac_pcs_get_state(struct phylink *pl,
@@ -468,7 +534,7 @@ static void phylink_mac_pcs_get_state(struct phylink *pl,
        state->link = 1;
 
        if (pl->pcs_ops)
-               pl->pcs_ops->pcs_get_state(pl->config, state);
+               pl->pcs_ops->pcs_get_state(pl->pcs, state);
        else
                pl->mac_ops->mac_pcs_get_state(pl->config, state);
 }
@@ -514,7 +580,7 @@ static void phylink_mac_initial_config(struct phylink *pl, bool force_restart)
        link_state.link = false;
 
        phylink_apply_manual_flow(pl, &link_state);
-       phylink_pcs_config(pl, force_restart, &link_state);
+       phylink_major_config(pl, force_restart, &link_state);
 }
 
 static const char *phylink_pause_to_str(int pause)
@@ -539,7 +605,7 @@ static void phylink_link_up(struct phylink *pl,
        pl->cur_interface = link_state.interface;
 
        if (pl->pcs_ops && pl->pcs_ops->pcs_link_up)
-               pl->pcs_ops->pcs_link_up(pl->config, pl->cur_link_an_mode,
+               pl->pcs_ops->pcs_link_up(pl->pcs, pl->cur_link_an_mode,
                                         pl->cur_interface,
                                         link_state.speed, link_state.duplex);
 
@@ -575,9 +641,15 @@ static void phylink_resolve(struct work_struct *w)
        struct phylink *pl = container_of(w, struct phylink, resolve);
        struct phylink_link_state link_state;
        struct net_device *ndev = pl->netdev;
-       int link_changed;
+       bool mac_config = false;
+       bool cur_link_state;
 
        mutex_lock(&pl->state_mutex);
+       if (pl->netdev)
+               cur_link_state = netif_carrier_ok(ndev);
+       else
+               cur_link_state = pl->old_link_state;
+
        if (pl->phylink_disable_state) {
                pl->mac_link_dropped = false;
                link_state.link = false;
@@ -588,12 +660,12 @@ static void phylink_resolve(struct work_struct *w)
                case MLO_AN_PHY:
                        link_state = pl->phy_state;
                        phylink_apply_manual_flow(pl, &link_state);
-                       phylink_mac_config_up(pl, &link_state);
+                       mac_config = link_state.link;
                        break;
 
                case MLO_AN_FIXED:
                        phylink_get_fixed_state(pl, &link_state);
-                       phylink_mac_config_up(pl, &link_state);
+                       mac_config = link_state.link;
                        break;
 
                case MLO_AN_INBAND:
@@ -611,21 +683,36 @@ static void phylink_resolve(struct work_struct *w)
                                /* If we have a PHY, we need to update with
                                 * the PHY flow control bits. */
                                link_state.pause = pl->phy_state.pause;
-                               phylink_apply_manual_flow(pl, &link_state);
-                               phylink_mac_config(pl, &link_state);
-                       } else {
-                               phylink_apply_manual_flow(pl, &link_state);
+                               mac_config = true;
                        }
+                       phylink_apply_manual_flow(pl, &link_state);
                        break;
                }
        }
 
-       if (pl->netdev)
-               link_changed = (link_state.link != netif_carrier_ok(ndev));
-       else
-               link_changed = (link_state.link != pl->old_link_state);
+       if (mac_config) {
+               if (link_state.interface != pl->link_config.interface) {
+                       /* The interface has changed, force the link down and
+                        * then reconfigure.
+                        */
+                       if (cur_link_state) {
+                               phylink_link_down(pl);
+                               cur_link_state = false;
+                       }
+                       phylink_major_config(pl, false, &link_state);
+                       pl->link_config.interface = link_state.interface;
+               } else if (!pl->pcs_ops) {
+                       /* The interface remains unchanged, only the speed,
+                        * duplex or pause settings have changed. Call the
+                        * old mac_config() method to configure the MAC/PCS
+                        * only if we do not have a PCS installed (an
+                        * unconverted user.)
+                        */
+                       phylink_mac_config(pl, &link_state);
+               }
+       }
 
-       if (link_changed) {
+       if (link_state.link != cur_link_state) {
                pl->old_link_state = link_state.link;
                if (!link_state.link)
                        phylink_link_down(pl);
@@ -777,11 +864,26 @@ struct phylink *phylink_create(struct phylink_config *config,
 }
 EXPORT_SYMBOL_GPL(phylink_create);
 
-void phylink_add_pcs(struct phylink *pl, const struct phylink_pcs_ops *ops)
+/**
+ * phylink_set_pcs() - set the current PCS for phylink to use
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @pcs: a pointer to the &struct phylink_pcs
+ *
+ * Bind the MAC PCS to phylink.  This may be called after phylink_create(),
+ * in mac_prepare() or mac_config() methods if it is desired to dynamically
+ * change the PCS.
+ *
+ * Please note that there are behavioural changes with the mac_config()
+ * callback if a PCS is present (denoting a newer setup) so removing a PCS
+ * is not supported, and if a PCS is going to be used, it must be registered
+ * by calling phylink_set_pcs() at the latest in the first mac_config() call.
+ */
+void phylink_set_pcs(struct phylink *pl, struct phylink_pcs *pcs)
 {
-       pl->pcs_ops = ops;
+       pl->pcs = pcs;
+       pl->pcs_ops = pcs->ops;
 }
-EXPORT_SYMBOL_GPL(phylink_add_pcs);
+EXPORT_SYMBOL_GPL(phylink_set_pcs);
 
 /**
  * phylink_destroy() - cleanup and destroy the phylink instance
@@ -1126,6 +1228,8 @@ void phylink_start(struct phylink *pl)
                break;
        case MLO_AN_INBAND:
                poll |= pl->config->pcs_poll;
+               if (pl->pcs)
+                       poll |= pl->pcs->poll;
                break;
        }
        if (poll)
@@ -1295,27 +1399,46 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
                                  const struct ethtool_link_ksettings *kset)
 {
        __ETHTOOL_DECLARE_LINK_MODE_MASK(support);
-       struct ethtool_link_ksettings our_kset;
        struct phylink_link_state config;
-       int ret;
+       const struct phy_setting *s;
 
        ASSERT_RTNL();
 
-       if (kset->base.autoneg != AUTONEG_DISABLE &&
-           kset->base.autoneg != AUTONEG_ENABLE)
-               return -EINVAL;
+       if (pl->phydev) {
+               /* We can rely on phylib for this update; we also do not need
+                * to update the pl->link_config settings:
+                * - the configuration returned via ksettings_get() will come
+                *   from phylib whenever a PHY is present.
+                * - link_config.interface will be updated by the PHY calling
+                *   back via phylink_phy_change() and a subsequent resolve.
+                * - initial link configuration for PHY mode comes from the
+                *   last phy state updated via phylink_phy_change().
+                * - other configuration changes (e.g. pause modes) are
+                *   performed directly via phylib.
+                * - if in in-band mode with a PHY, the link configuration
+                *   is passed on the link from the PHY, and all of
+                *   link_config.{speed,duplex,an_enabled,pause} are not used.
+                * - the only possible use would be link_config.advertising
+                *   pause modes when in 1000base-X mode with a PHY, but in
+                *   the presence of a PHY, this should not be changed as that
+                *   should be determined from the media side advertisement.
+                */
+               return phy_ethtool_ksettings_set(pl->phydev, kset);
+       }
 
        linkmode_copy(support, pl->supported);
        config = pl->link_config;
+       config.an_enabled = kset->base.autoneg == AUTONEG_ENABLE;
 
-       /* Mask out unsupported advertisements */
+       /* Mask out unsupported advertisements, and force the autoneg bit */
        linkmode_and(config.advertising, kset->link_modes.advertising,
                     support);
+       linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising,
+                        config.an_enabled);
 
        /* FIXME: should we reject autoneg if phy/mac does not support it? */
-       if (kset->base.autoneg == AUTONEG_DISABLE) {
-               const struct phy_setting *s;
-
+       switch (kset->base.autoneg) {
+       case AUTONEG_DISABLE:
                /* Autonegotiation disabled, select a suitable speed and
                 * duplex.
                 */
@@ -1324,90 +1447,73 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
                if (!s)
                        return -EINVAL;
 
-               /* If we have a fixed link (as specified by firmware), refuse
-                * to change link parameters.
+               /* If we have a fixed link, refuse to change link parameters.
+                * If the link parameters match, accept them but do nothing.
                 */
-               if (pl->cur_link_an_mode == MLO_AN_FIXED &&
-                   (s->speed != pl->link_config.speed ||
-                    s->duplex != pl->link_config.duplex))
-                       return -EINVAL;
+               if (pl->cur_link_an_mode == MLO_AN_FIXED) {
+                       if (s->speed != pl->link_config.speed ||
+                           s->duplex != pl->link_config.duplex)
+                               return -EINVAL;
+                       return 0;
+               }
 
                config.speed = s->speed;
                config.duplex = s->duplex;
-               config.an_enabled = false;
+               break;
 
-               __clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising);
-       } else {
-               /* If we have a fixed link, refuse to enable autonegotiation */
-               if (pl->cur_link_an_mode == MLO_AN_FIXED)
-                       return -EINVAL;
+       case AUTONEG_ENABLE:
+               /* If we have a fixed link, allow autonegotiation (since that
+                * is our default case) but do not allow the advertisement to
+                * be changed. If the advertisement matches, simply return.
+                */
+               if (pl->cur_link_an_mode == MLO_AN_FIXED) {
+                       if (!linkmode_equal(config.advertising,
+                                           pl->link_config.advertising))
+                               return -EINVAL;
+                       return 0;
+               }
 
                config.speed = SPEED_UNKNOWN;
                config.duplex = DUPLEX_UNKNOWN;
-               config.an_enabled = true;
+               break;
 
-               __set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising);
+       default:
+               return -EINVAL;
        }
 
-       if (pl->phydev) {
-               /* If we have a PHY, we process the kset change via phylib.
-                * phylib will call our link state function if the PHY
-                * parameters have changed, which will trigger a resolve
-                * and update the MAC configuration.
-                */
-               our_kset = *kset;
-               linkmode_copy(our_kset.link_modes.advertising,
-                             config.advertising);
-               our_kset.base.speed = config.speed;
-               our_kset.base.duplex = config.duplex;
+       /* We have ruled out the case with a PHY attached, and the
+        * fixed-link cases.  All that is left are in-band links.
+        */
+       if (phylink_validate(pl, support, &config))
+               return -EINVAL;
 
-               ret = phy_ethtool_ksettings_set(pl->phydev, &our_kset);
-               if (ret)
-                       return ret;
+       /* If autonegotiation is enabled, we must have an advertisement */
+       if (config.an_enabled && phylink_is_empty_linkmode(config.advertising))
+               return -EINVAL;
 
-               mutex_lock(&pl->state_mutex);
-               /* Save the new configuration */
-               linkmode_copy(pl->link_config.advertising,
-                             our_kset.link_modes.advertising);
+       mutex_lock(&pl->state_mutex);
+       pl->link_config.speed = config.speed;
+       pl->link_config.duplex = config.duplex;
+       pl->link_config.an_enabled = config.an_enabled;
+
+       if (pl->link_config.interface != config.interface) {
+               /* The interface changed, e.g. 1000base-X <-> 2500base-X */
+               /* We need to force the link down, then change the interface */
+               if (pl->old_link_state) {
+                       phylink_link_down(pl);
+                       pl->old_link_state = false;
+               }
+               if (!test_bit(PHYLINK_DISABLE_STOPPED,
+                             &pl->phylink_disable_state))
+                       phylink_major_config(pl, false, &config);
                pl->link_config.interface = config.interface;
-               pl->link_config.speed = our_kset.base.speed;
-               pl->link_config.duplex = our_kset.base.duplex;
-               pl->link_config.an_enabled = our_kset.base.autoneg !=
-                                            AUTONEG_DISABLE;
-               mutex_unlock(&pl->state_mutex);
-       } else {
-               /* For a fixed link, this isn't able to change any parameters,
-                * which just leaves inband mode.
-                */
-               if (phylink_validate(pl, support, &config))
-                       return -EINVAL;
-
-               /* If autonegotiation is enabled, we must have an advertisement */
-               if (config.an_enabled &&
-                   phylink_is_empty_linkmode(config.advertising))
-                       return -EINVAL;
-
-               mutex_lock(&pl->state_mutex);
                linkmode_copy(pl->link_config.advertising, config.advertising);
-               pl->link_config.interface = config.interface;
-               pl->link_config.speed = config.speed;
-               pl->link_config.duplex = config.duplex;
-               pl->link_config.an_enabled = kset->base.autoneg !=
-                                            AUTONEG_DISABLE;
-
-               if (pl->cur_link_an_mode == MLO_AN_INBAND &&
-                   !test_bit(PHYLINK_DISABLE_STOPPED,
-                             &pl->phylink_disable_state)) {
-                       /* If in 802.3z mode, this updates the advertisement.
-                        *
-                        * If we are in SGMII mode without a PHY, there is no
-                        * advertisement; the only thing we have is the pause
-                        * modes which can only come from a PHY.
-                        */
-                       phylink_pcs_config(pl, true, &pl->link_config);
-               }
-               mutex_unlock(&pl->state_mutex);
+       } else if (!linkmode_equal(pl->link_config.advertising,
+                                  config.advertising)) {
+               linkmode_copy(pl->link_config.advertising, config.advertising);
+               phylink_change_inband_advert(pl);
        }
+       mutex_unlock(&pl->state_mutex);
 
        return 0;
 }
@@ -1463,6 +1569,8 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
                                   struct ethtool_pauseparam *pause)
 {
        struct phylink_link_state *config = &pl->link_config;
+       bool manual_changed;
+       int pause_state;
 
        ASSERT_RTNL();
 
@@ -1477,15 +1585,15 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
            !pause->autoneg && pause->rx_pause != pause->tx_pause)
                return -EINVAL;
 
-       mutex_lock(&pl->state_mutex);
-       config->pause = 0;
+       pause_state = 0;
        if (pause->autoneg)
-               config->pause |= MLO_PAUSE_AN;
+               pause_state |= MLO_PAUSE_AN;
        if (pause->rx_pause)
-               config->pause |= MLO_PAUSE_RX;
+               pause_state |= MLO_PAUSE_RX;
        if (pause->tx_pause)
-               config->pause |= MLO_PAUSE_TX;
+               pause_state |= MLO_PAUSE_TX;
 
+       mutex_lock(&pl->state_mutex);
        /*
         * See the comments for linkmode_set_pause(), wrt the deficiencies
         * with the current implementation.  A solution to this issue would
@@ -1502,18 +1610,37 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
        linkmode_set_pause(config->advertising, pause->tx_pause,
                           pause->rx_pause);
 
-       /* If we have a PHY, phylib will call our link state function if the
-        * mode has changed, which will trigger a resolve and update the MAC
-        * configuration.
+       manual_changed = (config->pause ^ pause_state) & MLO_PAUSE_AN ||
+                        (!(pause_state & MLO_PAUSE_AN) &&
+                          (config->pause ^ pause_state) & MLO_PAUSE_TXRX_MASK);
+
+       config->pause = pause_state;
+
+       /* Update our in-band advertisement, triggering a renegotiation if
+        * the advertisement changed.
         */
-       if (pl->phydev) {
+       if (!pl->phydev)
+               phylink_change_inband_advert(pl);
+
+       mutex_unlock(&pl->state_mutex);
+
+       /* If we have a PHY, a change of the pause frame advertisement will
+        * cause phylib to renegotiate (if AN is enabled) which will in turn
+        * call our phylink_phy_change() and trigger a resolve.  Note that
+        * we can't hold our state mutex while calling phy_set_asym_pause().
+        */
+       if (pl->phydev)
                phy_set_asym_pause(pl->phydev, pause->rx_pause,
                                   pause->tx_pause);
-       } else if (!test_bit(PHYLINK_DISABLE_STOPPED,
-                            &pl->phylink_disable_state)) {
-               phylink_pcs_config(pl, true, &pl->link_config);
+
+       /* If the manual pause settings changed, make sure we trigger a
+        * resolve to update their state; we can not guarantee that the
+        * link will cycle.
+        */
+       if (manual_changed) {
+               pl->mac_link_dropped = true;
+               phylink_run_resolve(pl);
        }
-       mutex_unlock(&pl->state_mutex);
 
        return 0;
 }
@@ -1638,11 +1765,11 @@ static int phylink_phy_read(struct phylink *pl, unsigned int phy_id,
                case MII_BMSR:
                case MII_PHYSID1:
                case MII_PHYSID2:
-                       devad = __ffs(phydev->c45_ids.devices_in_package);
+                       devad = __ffs(phydev->c45_ids.mmds_present);
                        break;
                case MII_ADVERTISE:
                case MII_LPA:
-                       if (!(phydev->c45_ids.devices_in_package & MDIO_DEVS_AN))
+                       if (!(phydev->c45_ids.mmds_present & MDIO_DEVS_AN))
                                return -EINVAL;
                        devad = MDIO_MMD_AN;
                        if (reg == MII_ADVERTISE)
@@ -1678,11 +1805,11 @@ static int phylink_phy_write(struct phylink *pl, unsigned int phy_id,
                case MII_BMSR:
                case MII_PHYSID1:
                case MII_PHYSID2:
-                       devad = __ffs(phydev->c45_ids.devices_in_package);
+                       devad = __ffs(phydev->c45_ids.mmds_present);
                        break;
                case MII_ADVERTISE:
                case MII_LPA:
-                       if (!(phydev->c45_ids.devices_in_package & MDIO_DEVS_AN))
+                       if (!(phydev->c45_ids.mmds_present & MDIO_DEVS_AN))
                                return -EINVAL;
                        devad = MDIO_MMD_AN;
                        if (reg == MII_ADVERTISE)
@@ -1826,6 +1953,54 @@ int phylink_mii_ioctl(struct phylink *pl, struct ifreq *ifr, int cmd)
 }
 EXPORT_SYMBOL_GPL(phylink_mii_ioctl);
 
+/**
+ * phylink_speed_down() - set the non-SFP PHY to lowest speed supported by both
+ *   link partners
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @sync: perform action synchronously
+ *
+ * If we have a PHY that is not part of a SFP module, then set the speed
+ * as described in the phy_speed_down() function. Please see this function
+ * for a description of the @sync parameter.
+ *
+ * Returns zero if there is no PHY, otherwise as per phy_speed_down().
+ */
+int phylink_speed_down(struct phylink *pl, bool sync)
+{
+       int ret = 0;
+
+       ASSERT_RTNL();
+
+       if (!pl->sfp_bus && pl->phydev)
+               ret = phy_speed_down(pl->phydev, sync);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(phylink_speed_down);
+
+/**
+ * phylink_speed_up() - restore the advertised speeds prior to the call to
+ *   phylink_speed_down()
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ *
+ * If we have a PHY that is not part of a SFP module, then restore the
+ * PHY speeds as per phy_speed_up().
+ *
+ * Returns zero if there is no PHY, otherwise as per phy_speed_up().
+ */
+int phylink_speed_up(struct phylink *pl)
+{
+       int ret = 0;
+
+       ASSERT_RTNL();
+
+       if (!pl->sfp_bus && pl->phydev)
+               ret = phy_speed_up(pl->phydev);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(phylink_speed_up);
+
 static void phylink_sfp_attach(void *upstream, struct sfp_bus *bus)
 {
        struct phylink *pl = upstream;
@@ -2268,6 +2443,43 @@ int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs,
 EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_set_advertisement);
 
 /**
+ * phylink_mii_c22_pcs_config() - configure clause 22 PCS
+ * @pcs: a pointer to a &struct mdio_device.
+ * @mode: link autonegotiation mode
+ * @interface: the PHY interface mode being configured
+ * @advertising: the ethtool advertisement mask
+ *
+ * Configure a Clause 22 PCS PHY with the appropriate negotiation
+ * parameters for the @mode, @interface and @advertising parameters.
+ * Returns negative error number on failure, zero if the advertisement
+ * has not changed, or positive if there is a change.
+ */
+int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode,
+                              phy_interface_t interface,
+                              const unsigned long *advertising)
+{
+       bool changed;
+       u16 bmcr;
+       int ret;
+
+       ret = phylink_mii_c22_pcs_set_advertisement(pcs, interface,
+                                                   advertising);
+       if (ret < 0)
+               return ret;
+
+       changed = ret > 0;
+
+       bmcr = mode == MLO_AN_INBAND ? BMCR_ANENABLE : 0;
+       ret = mdiobus_modify(pcs->bus, pcs->addr, MII_BMCR,
+                            BMCR_ANENABLE, bmcr);
+       if (ret < 0)
+               return ret;
+
+       return changed ? 1 : 0;
+}
+EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_config);
+
+/**
  * phylink_mii_c22_pcs_an_restart() - restart 802.3z autonegotiation
  * @pcs: a pointer to a &struct mdio_device.
  *
index c7229d0..95dbe5e 100644 (file)
@@ -638,6 +638,18 @@ static struct phy_driver realtek_drvs[] = {
                .read_mmd       = rtl8125_read_mmd,
                .write_mmd      = rtl8125_write_mmd,
        }, {
+               PHY_ID_MATCH_EXACT(0x001cc840),
+               .name           = "RTL8125B 2.5Gbps internal",
+               .get_features   = rtl8125_get_features,
+               .config_aneg    = rtl8125_config_aneg,
+               .read_status    = rtl8125_read_status,
+               .suspend        = genphy_suspend,
+               .resume         = rtlgen_resume,
+               .read_page      = rtl821x_read_page,
+               .write_page     = rtl821x_write_page,
+               .read_mmd       = rtl8125_read_mmd,
+               .write_mmd      = rtl8125_write_mmd,
+       }, {
                PHY_ID_MATCH_EXACT(0x001cc961),
                .name           = "RTL8366RB Gigabit Ethernet",
                .config_init    = &rtl8366rb_config_init,
index 73c2969..c24b0e8 100644 (file)
@@ -1632,10 +1632,43 @@ static int sfp_sm_mod_hpower(struct sfp *sfp, bool enable)
        return 0;
 }
 
+static int sfp_cotsworks_fixup_check(struct sfp *sfp, struct sfp_eeprom_id *id)
+{
+       u8 check;
+       int err;
+
+       if (id->base.phys_id != SFF8024_ID_SFF_8472 ||
+           id->base.phys_ext_id != SFP_PHYS_EXT_ID_SFP ||
+           id->base.connector != SFF8024_CONNECTOR_LC) {
+               dev_warn(sfp->dev, "Rewriting fiber module EEPROM with corrected values\n");
+               id->base.phys_id = SFF8024_ID_SFF_8472;
+               id->base.phys_ext_id = SFP_PHYS_EXT_ID_SFP;
+               id->base.connector = SFF8024_CONNECTOR_LC;
+               err = sfp_write(sfp, false, SFP_PHYS_ID, &id->base, 3);
+               if (err != 3) {
+                       dev_err(sfp->dev, "Failed to rewrite module EEPROM: %d\n", err);
+                       return err;
+               }
+
+               /* Cotsworks modules have been found to require a delay between write operations. */
+               mdelay(50);
+
+               /* Update base structure checksum */
+               check = sfp_check(&id->base, sizeof(id->base) - 1);
+               err = sfp_write(sfp, false, SFP_CC_BASE, &check, 1);
+               if (err != 1) {
+                       dev_err(sfp->dev, "Failed to update base structure checksum in fiber module EEPROM: %d\n", err);
+                       return err;
+               }
+       }
+       return 0;
+}
+
 static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
 {
        /* SFP module inserted - read I2C data */
        struct sfp_eeprom_id id;
+       bool cotsworks_sfbg;
        bool cotsworks;
        u8 check;
        int ret;
@@ -1657,6 +1690,17 @@ static int sfp_sm_mod_probe(struct sfp *sfp, bool report)
         * serial number and date code.
         */
        cotsworks = !memcmp(id.base.vendor_name, "COTSWORKS       ", 16);
+       cotsworks_sfbg = !memcmp(id.base.vendor_pn, "SFBG", 4);
+
+       /* Cotsworks SFF module EEPROM do not always have valid phys_id,
+        * phys_ext_id, and connector bytes.  Rewrite SFF EEPROM bytes if
+        * Cotsworks PN matches and bytes are not correct.
+        */
+       if (cotsworks && cotsworks_sfbg) {
+               ret = sfp_cotsworks_fixup_check(sfp, &id);
+               if (ret < 0)
+                       return ret;
+       }
 
        /* Validate the checksum over the base structure */
        check = sfp_check(&id.base, sizeof(id.base) - 1);
@@ -2238,6 +2282,7 @@ static int sfp_probe(struct platform_device *pdev)
 {
        const struct sff_data *sff;
        struct i2c_adapter *i2c;
+       char *sfp_irq_name;
        struct sfp *sfp;
        int err, i;
 
@@ -2349,12 +2394,19 @@ static int sfp_probe(struct platform_device *pdev)
                        continue;
                }
 
+               sfp_irq_name = devm_kasprintf(sfp->dev, GFP_KERNEL,
+                                             "%s-%s", dev_name(sfp->dev),
+                                             gpio_of_names[i]);
+
+               if (!sfp_irq_name)
+                       return -ENOMEM;
+
                err = devm_request_threaded_irq(sfp->dev, sfp->gpio_irq[i],
                                                NULL, sfp_irq,
                                                IRQF_ONESHOT |
                                                IRQF_TRIGGER_RISING |
                                                IRQF_TRIGGER_FALLING,
-                                               dev_name(sfp->dev), sfp);
+                                               sfp_irq_name, sfp);
                if (err) {
                        sfp->gpio_irq[i] = 0;
                        sfp->need_poll = true;
index 93da7d3..74568ae 100644 (file)
@@ -122,10 +122,13 @@ static int lan87xx_read_status(struct phy_device *phydev)
                if (rc < 0)
                        return rc;
 
-               /* Wait max 640 ms to detect energy */
-               phy_read_poll_timeout(phydev, MII_LAN83C185_CTRL_STATUS, rc,
-                                     rc & MII_LAN83C185_ENERGYON, 10000,
-                                     640000, true);
+               /* Wait max 640 ms to detect energy and the timeout is not
+                * an actual error.
+                */
+               read_poll_timeout(phy_read, rc,
+                                 rc & MII_LAN83C185_ENERGYON || rc < 0,
+                                 10000, 640000, true, phydev,
+                                 MII_LAN83C185_CTRL_STATUS);
                if (rc < 0)
                        return rc;
 
index e89cdeb..d82016d 100644 (file)
@@ -142,7 +142,7 @@ static void plip_timer_bh(struct work_struct *work);
 static void plip_interrupt(void *dev_id);
 
 /* Functions for DEV methods */
-static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
 static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
                             unsigned short type, const void *daddr,
                            const void *saddr, unsigned len);
@@ -958,7 +958,7 @@ plip_interrupt(void *dev_id)
        spin_unlock_irqrestore(&nl->lock, flags);
 }
 
-static int
+static netdev_tx_t
 plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
 {
        struct net_local *nl = netdev_priv(dev);
index beedaad..d7f50b8 100644 (file)
@@ -1110,8 +1110,6 @@ static const struct proto_ops pppoe_ops = {
        .poll           = datagram_poll,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
-       .setsockopt     = sock_no_setsockopt,
-       .getsockopt     = sock_no_getsockopt,
        .sendmsg        = pppoe_sendmsg,
        .recvmsg        = pppoe_recvmsg,
        .mmap           = sock_no_mmap,
index acccb74..ee50584 100644 (file)
@@ -618,8 +618,6 @@ static const struct proto_ops pptp_ops = {
        .getname    = pptp_getname,
        .listen     = sock_no_listen,
        .shutdown   = sock_no_shutdown,
-       .setsockopt = sock_no_setsockopt,
-       .getsockopt = sock_no_getsockopt,
        .sendmsg    = sock_no_sendmsg,
        .recvmsg    = sock_no_recvmsg,
        .mmap       = sock_no_mmap,
index dacb4f6..c2e4408 100644 (file)
@@ -1335,6 +1335,10 @@ static int __init tbnet_init(void)
        tb_property_add_immediate(tbnet_dir, "prtcid", 1);
        tb_property_add_immediate(tbnet_dir, "prtcvers", 1);
        tb_property_add_immediate(tbnet_dir, "prtcrevs", 1);
+       /* Currently only announce support for match frags ID (bit 1). Bit 0
+        * is reserved for full E2E flow control which we do not support at
+        * the moment.
+        */
        tb_property_add_immediate(tbnet_dir, "prtcstns",
                                  TBNET_MATCH_FRAGS_ID);
 
index 858b012..7adeb91 100644 (file)
@@ -62,6 +62,7 @@
 #include <net/rtnetlink.h>
 #include <net/sock.h>
 #include <net/xdp.h>
+#include <net/ip_tunnels.h>
 #include <linux/seq_file.h>
 #include <linux/uio.h>
 #include <linux/skb_array.h>
@@ -1351,6 +1352,7 @@ static void tun_net_init(struct net_device *dev)
        switch (tun->flags & TUN_TYPE_MASK) {
        case IFF_TUN:
                dev->netdev_ops = &tun_netdev_ops;
+               dev->header_ops = &ip_tunnel_header_ops;
 
                /* Point-to-Point TUN Device */
                dev->hard_header_len = 0;
index 9507114..a38e868 100644 (file)
@@ -1491,10 +1491,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                }
 
                if (pkt_cnt == 0) {
-                       /* Skip IP alignment psudo header */
-                       skb_pull(skb, 2);
                        skb->len = pkt_len;
-                       skb_set_tail_pointer(skb, pkt_len);
+                       /* Skip IP alignment pseudo header */
+                       skb_pull(skb, 2);
+                       skb_set_tail_pointer(skb, skb->len);
                        skb->truesize = pkt_len + sizeof(struct sk_buff);
                        ax88179_rx_checksum(skb, pkt_hdr);
                        return 1;
@@ -1503,8 +1503,9 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                ax_skb = skb_clone(skb, GFP_ATOMIC);
                if (ax_skb) {
                        ax_skb->len = pkt_len;
-                       ax_skb->data = skb->data + 2;
-                       skb_set_tail_pointer(ax_skb, pkt_len);
+                       /* Skip IP alignment pseudo header */
+                       skb_pull(ax_skb, 2);
+                       skb_set_tail_pointer(ax_skb, ax_skb->len);
                        ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
                        ax88179_rx_checksum(ax_skb, pkt_hdr);
                        usbnet_skb_return(dev, ax_skb);
index a657943..8c1d61c 100644 (file)
@@ -63,10 +63,8 @@ static const u8 mbm_guid[16] = {
        0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a,
 };
 
-static void usbnet_cdc_update_filter(struct usbnet *dev)
+void usbnet_cdc_update_filter(struct usbnet *dev)
 {
-       struct cdc_state        *info = (void *) &dev->data;
-       struct usb_interface    *intf = info->control;
        struct net_device       *net = dev->net;
 
        u16 cdc_filter = USB_CDC_PACKET_TYPE_DIRECTED
@@ -86,12 +84,13 @@ static void usbnet_cdc_update_filter(struct usbnet *dev)
                        USB_CDC_SET_ETHERNET_PACKET_FILTER,
                        USB_TYPE_CLASS | USB_RECIP_INTERFACE,
                        cdc_filter,
-                       intf->cur_altsetting->desc.bInterfaceNumber,
+                       dev->intf->cur_altsetting->desc.bInterfaceNumber,
                        NULL,
                        0,
                        USB_CTRL_SET_TIMEOUT
                );
 }
+EXPORT_SYMBOL_GPL(usbnet_cdc_update_filter);
 
 /* probes control interface, claims data interface, collects the bulk
  * endpoints, activates data interface (if needed), maybe sets MTU.
index 8929669..e04f588 100644 (file)
@@ -792,6 +792,7 @@ static const struct net_device_ops cdc_ncm_netdev_ops = {
        .ndo_stop            = usbnet_stop,
        .ndo_start_xmit      = usbnet_start_xmit,
        .ndo_tx_timeout      = usbnet_tx_timeout,
+       .ndo_set_rx_mode     = usbnet_set_rx_mode,
        .ndo_get_stats64     = usbnet_get_stats64,
        .ndo_change_mtu      = cdc_ncm_change_mtu,
        .ndo_set_mac_address = eth_mac_addr,
@@ -1895,6 +1896,7 @@ static const struct driver_info cdc_ncm_info = {
        .status = cdc_ncm_status,
        .rx_fixup = cdc_ncm_rx_fixup,
        .tx_fixup = cdc_ncm_tx_fixup,
+       .set_rx_mode = usbnet_cdc_update_filter,
 };
 
 /* Same as cdc_ncm_info, but with FLAG_WWAN */
@@ -1908,6 +1910,7 @@ static const struct driver_info wwan_info = {
        .status = cdc_ncm_status,
        .rx_fixup = cdc_ncm_rx_fixup,
        .tx_fixup = cdc_ncm_tx_fixup,
+       .set_rx_mode = usbnet_cdc_update_filter,
 };
 
 /* Same as wwan_info, but with FLAG_NOARP  */
@@ -1921,6 +1924,7 @@ static const struct driver_info wwan_noarp_info = {
        .status = cdc_ncm_status,
        .rx_fixup = cdc_ncm_rx_fixup,
        .tx_fixup = cdc_ncm_tx_fixup,
+       .set_rx_mode = usbnet_cdc_update_filter,
 };
 
 static const struct usb_device_id cdc_devs[] = {
index c792d65..b09b453 100644 (file)
@@ -358,7 +358,7 @@ static int ipheth_close(struct net_device *net)
        return 0;
 }
 
-static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
+static netdev_tx_t ipheth_tx(struct sk_buff *skb, struct net_device *net)
 {
        struct ipheth_device *dev = netdev_priv(net);
        struct usb_device *udev = dev->udev;
index 31b1d4b..07c42c0 100644 (file)
@@ -1370,6 +1370,7 @@ static const struct usb_device_id products[] = {
        {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
        {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
+       {QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */
        {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)},    /* Quectel BG96 */
        {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
        {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)},    /* Foxconn T77W968 LTE */
index 355be77..bb4ccbd 100644 (file)
@@ -1287,11 +1287,14 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
 
        /* Init all registers */
        ret = smsc95xx_reset(dev);
+       if (ret)
+               goto free_pdata;
 
        /* detect device revision as different features may be available */
        ret = smsc95xx_read_reg(dev, ID_REV, &val);
        if (ret < 0)
-               return ret;
+               goto free_pdata;
+
        val >>= 16;
        pdata->chip_id = val;
        pdata->mdix_ctrl = get_mdix_status(dev->net);
@@ -1317,6 +1320,10 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
        schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
 
        return 0;
+
+free_pdata:
+       kfree(pdata);
+       return ret;
 }
 
 static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
@@ -1324,7 +1331,7 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
        struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
 
        if (pdata) {
-               cancel_delayed_work(&pdata->carrier_check);
+               cancel_delayed_work_sync(&pdata->carrier_check);
                netif_dbg(dev, ifdown, dev->net, "free pdata\n");
                kfree(pdata);
                pdata = NULL;
index 5ec97de..e45935a 100644 (file)
@@ -1108,12 +1108,13 @@ static void __handle_link_change(struct usbnet *dev)
        clear_bit(EVENT_LINK_CHANGE, &dev->flags);
 }
 
-static void usbnet_set_rx_mode(struct net_device *net)
+void usbnet_set_rx_mode(struct net_device *net)
 {
        struct usbnet           *dev = netdev_priv(net);
 
        usbnet_defer_kevent(dev, EVENT_SET_RX_MODE);
 }
+EXPORT_SYMBOL_GPL(usbnet_set_rx_mode);
 
 static void __handle_set_rx_mode(struct usbnet *dev)
 {
index 43928a1..60c1aad 100644 (file)
@@ -21,6 +21,7 @@
 #include <net/rtnetlink.h>
 #include <linux/u64_stats_sync.h>
 #include <linux/hashtable.h>
+#include <linux/spinlock_types.h>
 
 #include <linux/inetdevice.h>
 #include <net/arp.h>
 #include <net/netns/generic.h>
 
 #define DRV_NAME       "vrf"
-#define DRV_VERSION    "1.0"
+#define DRV_VERSION    "1.1"
 
 #define FIB_RULE_PREF  1000       /* default preference for FIB rules */
 
+#define HT_MAP_BITS    4
+#define HASH_INITVAL   ((u32)0xcafef00d)
+
+struct  vrf_map {
+       DECLARE_HASHTABLE(ht, HT_MAP_BITS);
+       spinlock_t vmap_lock;
+
+       /* shared_tables:
+        * count how many distinct tables do not comply with the strict mode
+        * requirement.
+        * shared_tables value must be 0 in order to enable the strict mode.
+        *
+        * example of the evolution of shared_tables:
+        *                                                        | time
+        * add  vrf0 --> table 100        shared_tables = 0       | t0
+        * add  vrf1 --> table 101        shared_tables = 0       | t1
+        * add  vrf2 --> table 100        shared_tables = 1       | t2
+        * add  vrf3 --> table 100        shared_tables = 1       | t3
+        * add  vrf4 --> table 101        shared_tables = 2       v t4
+        *
+        * shared_tables is a "step function" (or "staircase function")
+        * and it is increased by one when the second vrf is associated to a
+        * table.
+        *
+        * at t2, vrf0 and vrf2 are bound to table 100: shared_tables = 1.
+        *
+        * at t3, another dev (vrf3) is bound to the same table 100 but the
+        * value of shared_tables is still 1.
+        * This means that no matter how many new vrfs will register on the
+        * table 100, the shared_tables will not increase (considering only
+        * table 100).
+        *
+        * at t4, vrf4 is bound to table 101, and shared_tables = 2.
+        *
+        * Looking at the value of shared_tables we can immediately know if
+        * the strict_mode can or cannot be enforced. Indeed, strict_mode
+        * can be enforced iff shared_tables = 0.
+        *
+        * Conversely, shared_tables is decreased when a vrf is de-associated
+        * from a table with exactly two associated vrfs.
+        */
+       u32 shared_tables;
+
+       bool strict_mode;
+};
+
+struct vrf_map_elem {
+       struct hlist_node hnode;
+       struct list_head vrf_list;  /* VRFs registered to this table */
+
+       u32 table_id;
+       int users;
+       int ifindex;
+};
+
 static unsigned int vrf_net_id;
 
+/* per netns vrf data */
+struct netns_vrf {
+       /* protected by rtnl lock */
+       bool add_fib_rules;
+
+       struct vrf_map vmap;
+       struct ctl_table_header *ctl_hdr;
+};
+
 struct net_vrf {
        struct rtable __rcu     *rth;
        struct rt6_info __rcu   *rt6;
@@ -48,6 +113,9 @@ struct net_vrf {
        struct fib6_table       *fib6_table;
 #endif
        u32                     tb_id;
+
+       struct list_head        me_list;   /* entry in vrf_map_elem */
+       int                     ifindex;
 };
 
 struct pcpu_dstats {
@@ -103,6 +171,214 @@ static void vrf_get_stats64(struct net_device *dev,
        }
 }
 
+static struct vrf_map *netns_vrf_map(struct net *net)
+{
+       struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id);
+
+       return &nn_vrf->vmap;
+}
+
+static struct vrf_map *netns_vrf_map_by_dev(struct net_device *dev)
+{
+       return netns_vrf_map(dev_net(dev));
+}
+
+static int vrf_map_elem_get_vrf_ifindex(struct vrf_map_elem *me)
+{
+       struct list_head *me_head = &me->vrf_list;
+       struct net_vrf *vrf;
+
+       if (list_empty(me_head))
+               return -ENODEV;
+
+       vrf = list_first_entry(me_head, struct net_vrf, me_list);
+
+       return vrf->ifindex;
+}
+
+static struct vrf_map_elem *vrf_map_elem_alloc(gfp_t flags)
+{
+       struct vrf_map_elem *me;
+
+       me = kmalloc(sizeof(*me), flags);
+       if (!me)
+               return NULL;
+
+       return me;
+}
+
+static void vrf_map_elem_free(struct vrf_map_elem *me)
+{
+       kfree(me);
+}
+
+static void vrf_map_elem_init(struct vrf_map_elem *me, int table_id,
+                             int ifindex, int users)
+{
+       me->table_id = table_id;
+       me->ifindex = ifindex;
+       me->users = users;
+       INIT_LIST_HEAD(&me->vrf_list);
+}
+
+static struct vrf_map_elem *vrf_map_lookup_elem(struct vrf_map *vmap,
+                                               u32 table_id)
+{
+       struct vrf_map_elem *me;
+       u32 key;
+
+       key = jhash_1word(table_id, HASH_INITVAL);
+       hash_for_each_possible(vmap->ht, me, hnode, key) {
+               if (me->table_id == table_id)
+                       return me;
+       }
+
+       return NULL;
+}
+
+static void vrf_map_add_elem(struct vrf_map *vmap, struct vrf_map_elem *me)
+{
+       u32 table_id = me->table_id;
+       u32 key;
+
+       key = jhash_1word(table_id, HASH_INITVAL);
+       hash_add(vmap->ht, &me->hnode, key);
+}
+
+static void vrf_map_del_elem(struct vrf_map_elem *me)
+{
+       hash_del(&me->hnode);
+}
+
+static void vrf_map_lock(struct vrf_map *vmap) __acquires(&vmap->vmap_lock)
+{
+       spin_lock(&vmap->vmap_lock);
+}
+
+static void vrf_map_unlock(struct vrf_map *vmap) __releases(&vmap->vmap_lock)
+{
+       spin_unlock(&vmap->vmap_lock);
+}
+
+/* called with rtnl lock held */
+static int
+vrf_map_register_dev(struct net_device *dev, struct netlink_ext_ack *extack)
+{
+       struct vrf_map *vmap = netns_vrf_map_by_dev(dev);
+       struct net_vrf *vrf = netdev_priv(dev);
+       struct vrf_map_elem *new_me, *me;
+       u32 table_id = vrf->tb_id;
+       bool free_new_me = false;
+       int users;
+       int res;
+
+       /* we pre-allocate elements used in the spin-locked section (so that we
+        * keep the spinlock as short as possibile).
+        */
+       new_me = vrf_map_elem_alloc(GFP_KERNEL);
+       if (!new_me)
+               return -ENOMEM;
+
+       vrf_map_elem_init(new_me, table_id, dev->ifindex, 0);
+
+       vrf_map_lock(vmap);
+
+       me = vrf_map_lookup_elem(vmap, table_id);
+       if (!me) {
+               me = new_me;
+               vrf_map_add_elem(vmap, me);
+               goto link_vrf;
+       }
+
+       /* we already have an entry in the vrf_map, so it means there is (at
+        * least) a vrf registered on the specific table.
+        */
+       free_new_me = true;
+       if (vmap->strict_mode) {
+               /* vrfs cannot share the same table */
+               NL_SET_ERR_MSG(extack, "Table is used by another VRF");
+               res = -EBUSY;
+               goto unlock;
+       }
+
+link_vrf:
+       users = ++me->users;
+       if (users == 2)
+               ++vmap->shared_tables;
+
+       list_add(&vrf->me_list, &me->vrf_list);
+
+       res = 0;
+
+unlock:
+       vrf_map_unlock(vmap);
+
+       /* clean-up, if needed */
+       if (free_new_me)
+               vrf_map_elem_free(new_me);
+
+       return res;
+}
+
+/* called with rtnl lock held */
+static void vrf_map_unregister_dev(struct net_device *dev)
+{
+       struct vrf_map *vmap = netns_vrf_map_by_dev(dev);
+       struct net_vrf *vrf = netdev_priv(dev);
+       u32 table_id = vrf->tb_id;
+       struct vrf_map_elem *me;
+       int users;
+
+       vrf_map_lock(vmap);
+
+       me = vrf_map_lookup_elem(vmap, table_id);
+       if (!me)
+               goto unlock;
+
+       list_del(&vrf->me_list);
+
+       users = --me->users;
+       if (users == 1) {
+               --vmap->shared_tables;
+       } else if (users == 0) {
+               vrf_map_del_elem(me);
+
+               /* no one will refer to this element anymore */
+               vrf_map_elem_free(me);
+       }
+
+unlock:
+       vrf_map_unlock(vmap);
+}
+
+/* return the vrf device index associated with the table_id */
+static int vrf_ifindex_lookup_by_table_id(struct net *net, u32 table_id)
+{
+       struct vrf_map *vmap = netns_vrf_map(net);
+       struct vrf_map_elem *me;
+       int ifindex;
+
+       vrf_map_lock(vmap);
+
+       if (!vmap->strict_mode) {
+               ifindex = -EPERM;
+               goto unlock;
+       }
+
+       me = vrf_map_lookup_elem(vmap, table_id);
+       if (!me) {
+               ifindex = -ENODEV;
+               goto unlock;
+       }
+
+       ifindex = vrf_map_elem_get_vrf_ifindex(me);
+
+unlock:
+       vrf_map_unlock(vmap);
+
+       return ifindex;
+}
+
 /* by default VRF devices do not have a qdisc and are expected
  * to be created with only a single queue.
  */
@@ -1319,6 +1595,8 @@ static void vrf_dellink(struct net_device *dev, struct list_head *head)
        netdev_for_each_lower_dev(dev, port_dev, iter)
                vrf_del_slave(dev, port_dev);
 
+       vrf_map_unregister_dev(dev);
+
        unregister_netdevice_queue(dev, head);
 }
 
@@ -1327,6 +1605,7 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
                       struct netlink_ext_ack *extack)
 {
        struct net_vrf *vrf = netdev_priv(dev);
+       struct netns_vrf *nn_vrf;
        bool *add_fib_rules;
        struct net *net;
        int err;
@@ -1349,11 +1628,26 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
        if (err)
                goto out;
 
+       /* mapping between table_id and vrf;
+        * note: such binding could not be done in the dev init function
+        * because dev->ifindex id is not available yet.
+        */
+       vrf->ifindex = dev->ifindex;
+
+       err = vrf_map_register_dev(dev, extack);
+       if (err) {
+               unregister_netdevice(dev);
+               goto out;
+       }
+
        net = dev_net(dev);
-       add_fib_rules = net_generic(net, vrf_net_id);
+       nn_vrf = net_generic(net, vrf_net_id);
+
+       add_fib_rules = &nn_vrf->add_fib_rules;
        if (*add_fib_rules) {
                err = vrf_add_fib_rules(dev);
                if (err) {
+                       vrf_map_unregister_dev(dev);
                        unregister_netdevice(dev);
                        goto out;
                }
@@ -1440,20 +1734,164 @@ static struct notifier_block vrf_notifier_block __read_mostly = {
        .notifier_call = vrf_device_event,
 };
 
+static int vrf_map_init(struct vrf_map *vmap)
+{
+       spin_lock_init(&vmap->vmap_lock);
+       hash_init(vmap->ht);
+
+       vmap->strict_mode = false;
+
+       return 0;
+}
+
+#ifdef CONFIG_SYSCTL
+static bool vrf_strict_mode(struct vrf_map *vmap)
+{
+       bool strict_mode;
+
+       vrf_map_lock(vmap);
+       strict_mode = vmap->strict_mode;
+       vrf_map_unlock(vmap);
+
+       return strict_mode;
+}
+
+static int vrf_strict_mode_change(struct vrf_map *vmap, bool new_mode)
+{
+       bool *cur_mode;
+       int res = 0;
+
+       vrf_map_lock(vmap);
+
+       cur_mode = &vmap->strict_mode;
+       if (*cur_mode == new_mode)
+               goto unlock;
+
+       if (*cur_mode) {
+               /* disable strict mode */
+               *cur_mode = false;
+       } else {
+               if (vmap->shared_tables) {
+                       /* we cannot allow strict_mode because there are some
+                        * vrfs that share one or more tables.
+                        */
+                       res = -EBUSY;
+                       goto unlock;
+               }
+
+               /* no tables are shared among vrfs, so we can go back
+                * to 1:1 association between a vrf with its table.
+                */
+               *cur_mode = true;
+       }
+
+unlock:
+       vrf_map_unlock(vmap);
+
+       return res;
+}
+
+static int vrf_shared_table_handler(struct ctl_table *table, int write,
+                                   void *buffer, size_t *lenp, loff_t *ppos)
+{
+       struct net *net = (struct net *)table->extra1;
+       struct vrf_map *vmap = netns_vrf_map(net);
+       int proc_strict_mode = 0;
+       struct ctl_table tmp = {
+               .procname       = table->procname,
+               .data           = &proc_strict_mode,
+               .maxlen         = sizeof(int),
+               .mode           = table->mode,
+               .extra1         = SYSCTL_ZERO,
+               .extra2         = SYSCTL_ONE,
+       };
+       int ret;
+
+       if (!write)
+               proc_strict_mode = vrf_strict_mode(vmap);
+
+       ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
+
+       if (write && ret == 0)
+               ret = vrf_strict_mode_change(vmap, (bool)proc_strict_mode);
+
+       return ret;
+}
+
+static const struct ctl_table vrf_table[] = {
+       {
+               .procname       = "strict_mode",
+               .data           = NULL,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = vrf_shared_table_handler,
+               /* set by the vrf_netns_init */
+               .extra1         = NULL,
+       },
+       { },
+};
+
+static int vrf_netns_init_sysctl(struct net *net, struct netns_vrf *nn_vrf)
+{
+       struct ctl_table *table;
+
+       table = kmemdup(vrf_table, sizeof(vrf_table), GFP_KERNEL);
+       if (!table)
+               return -ENOMEM;
+
+       /* init the extra1 parameter with the reference to current netns */
+       table[0].extra1 = net;
+
+       nn_vrf->ctl_hdr = register_net_sysctl(net, "net/vrf", table);
+       if (!nn_vrf->ctl_hdr) {
+               kfree(table);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void vrf_netns_exit_sysctl(struct net *net)
+{
+       struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id);
+       struct ctl_table *table;
+
+       table = nn_vrf->ctl_hdr->ctl_table_arg;
+       unregister_net_sysctl_table(nn_vrf->ctl_hdr);
+       kfree(table);
+}
+#else
+static int vrf_netns_init_sysctl(struct net *net, struct netns_vrf *nn_vrf)
+{
+       return 0;
+}
+
+static void vrf_netns_exit_sysctl(struct net *net)
+{
+}
+#endif
+
 /* Initialize per network namespace state */
 static int __net_init vrf_netns_init(struct net *net)
 {
-       bool *add_fib_rules = net_generic(net, vrf_net_id);
+       struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id);
 
-       *add_fib_rules = true;
+       nn_vrf->add_fib_rules = true;
+       vrf_map_init(&nn_vrf->vmap);
 
-       return 0;
+       return vrf_netns_init_sysctl(net, nn_vrf);
+}
+
+static void __net_exit vrf_netns_exit(struct net *net)
+{
+       vrf_netns_exit_sysctl(net);
 }
 
 static struct pernet_operations vrf_net_ops __net_initdata = {
        .init = vrf_netns_init,
+       .exit = vrf_netns_exit,
        .id   = &vrf_net_id,
-       .size = sizeof(bool),
+       .size = sizeof(struct netns_vrf),
 };
 
 static int __init vrf_init_module(void)
@@ -1466,14 +1904,24 @@ static int __init vrf_init_module(void)
        if (rc < 0)
                goto error;
 
+       rc = l3mdev_table_lookup_register(L3MDEV_TYPE_VRF,
+                                         vrf_ifindex_lookup_by_table_id);
+       if (rc < 0)
+               goto unreg_pernet;
+
        rc = rtnl_link_register(&vrf_link_ops);
-       if (rc < 0) {
-               unregister_pernet_subsys(&vrf_net_ops);
-               goto error;
-       }
+       if (rc < 0)
+               goto table_lookup_unreg;
 
        return 0;
 
+table_lookup_unreg:
+       l3mdev_table_lookup_unregister(L3MDEV_TYPE_VRF,
+                                      vrf_ifindex_lookup_by_table_id);
+
+unreg_pernet:
+       unregister_pernet_subsys(&vrf_net_ops);
+
 error:
        unregister_netdevice_notifier(&vrf_notifier_block);
        return rc;
index e8085ab..a43c97b 100644 (file)
@@ -1380,6 +1380,8 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
                        struct vxlan_rdst *rd;
 
                        if (rcu_access_pointer(f->nh)) {
+                               if (*idx < cb->args[2])
+                                       goto skip_nh;
                                err = vxlan_fdb_info(skb, vxlan, f,
                                                     NETLINK_CB(cb->skb).portid,
                                                     cb->nlh->nlmsg_seq,
@@ -1387,6 +1389,8 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
                                                     NLM_F_MULTI, NULL);
                                if (err < 0)
                                        goto out;
+skip_nh:
+                               *idx += 1;
                                continue;
                        }
 
@@ -4473,10 +4477,12 @@ static int vxlan_netdevice_event(struct notifier_block *unused,
        struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
 
        if (event == NETDEV_UNREGISTER) {
-               vxlan_offload_rx_ports(dev, false);
+               if (!dev->udp_tunnel_nic_info)
+                       vxlan_offload_rx_ports(dev, false);
                vxlan_handle_lowerdev_unregister(vn, dev);
        } else if (event == NETDEV_REGISTER) {
-               vxlan_offload_rx_ports(dev, true);
+               if (!dev->udp_tunnel_nic_info)
+                       vxlan_offload_rx_ports(dev, true);
        } else if (event == NETDEV_UDP_TUNNEL_PUSH_INFO ||
                   event == NETDEV_UDP_TUNNEL_DROP_INFO) {
                vxlan_offload_rx_ports(dev, event == NETDEV_UDP_TUNNEL_PUSH_INFO);
index cb57f91..c354a51 100644 (file)
@@ -4,7 +4,7 @@
  *
  * Copyright (C) 2000-2003 Krzysztof Halasa <khc@pm.waw.pl>
  *
- * For information see <http://www.kernel.org/pub/linux/utils/net/hdlc/>
+ * For information see <https://www.kernel.org/pub/linux/utils/net/hdlc/>
  *
  * Sources of information:
  *    Hitachi HD64570 SCA User's Manual
index 5d6532a..f8aed06 100644 (file)
@@ -12,7 +12,7 @@
  * HARDWARE INFO
  *
  * Both cards are developed at the Institute of Computer Science,
- * Masaryk University (http://www.ics.muni.cz/). The hardware is
+ * Masaryk University (https://www.ics.muni.cz/). The hardware is
  * developed by Jiri Novotny <novotny@ics.muni.cz>. More information
  * and the photo of both cards is available at
  * http://www.pavoucek.cz/cosa.html. The card documentation, firmwares
@@ -35,7 +35,7 @@
  *
  * SOFTWARE INFO
  *
- * The homepage of the Linux driver is at http://www.fi.muni.cz/~kas/cosa/.
+ * The homepage of the Linux driver is at https://www.fi.muni.cz/~kas/cosa/.
  * The CVS tree of Linux driver can be viewed there, as well as the
  * firmware binaries and user-space utilities for downloading the firmware
  * into the card and setting up the card.
index 7916efc..0196b7f 100644 (file)
@@ -124,7 +124,7 @@ module_param_array(fst_excluded_list, int, NULL, 0);
 /*      The Am186CH/CC processors support a SmartDMA mode using circular pools
  *      of buffer descriptors. The structure is almost identical to that used
  *      in the LANCE Ethernet controllers. Details available as PDF from the
- *      AMD web site: http://www.amd.com/products/epd/processors/\
+ *      AMD web site: https://www.amd.com/products/epd/processors/\
  *                    2.16bitcont/3.am186cxfa/a21914/21914.pdf
  */
 struct txdesc {                        /* Transmit descriptor */
index e30d91a..2848323 100644 (file)
@@ -303,7 +303,6 @@ static void lapbeth_setup(struct net_device *dev)
        dev->netdev_ops      = &lapbeth_netdev_ops;
        dev->needs_free_netdev = true;
        dev->type            = ARPHRD_X25;
-       dev->hard_header_len = 3;
        dev->mtu             = 1000;
        dev->addr_len        = 0;
 }
@@ -324,6 +323,14 @@ static int lapbeth_new_device(struct net_device *dev)
        if (!ndev)
                goto out;
 
+       /* When transmitting data:
+        * first this driver removes a pseudo header of 1 byte,
+        * then the lapb module prepends an LAPB header of at most 3 bytes,
+        * then this driver prepends a length field of 2 bytes,
+        * then the underlying Ethernet device prepends its own header.
+        */
+       ndev->hard_header_len = -1 + 3 + 2 + dev->hard_header_len;
+
        lapbeth = netdev_priv(ndev);
        lapbeth->axdev = ndev;
 
index a20f467..842def2 100644 (file)
@@ -2063,7 +2063,7 @@ static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue)
     /*
      * Chip seems to have locked up
      * Reset it
-     * This whips out all our decriptor
+     * This whips out all our descriptor
      * table and starts from scartch
      */
 
index f9e7fc7..5bf4463 100644 (file)
@@ -4,7 +4,7 @@
  *
  * Copyright (C) 1998-2003 Krzysztof Halasa <khc@pm.waw.pl>
  *
- * For information see <http://www.kernel.org/pub/linux/utils/net/hdlc/>
+ * For information see <https://www.kernel.org/pub/linux/utils/net/hdlc/>
  *
  * Note: integrated CSU/DSU/DDS are not supported by this driver
  *
index 1907356..001fd37 100644 (file)
@@ -4,7 +4,7 @@
  *
  * Copyright (C) 2000-2008 Krzysztof Halasa <khc@pm.waw.pl>
  *
- * For information see <http://www.kernel.org/pub/linux/utils/net/hdlc/>.
+ * For information see <https://www.kernel.org/pub/linux/utils/net/hdlc/>.
  *
  * Sources of information:
  *    Hitachi HD64572 SCA-II User's Manual
index b5f8aac..d006222 100644 (file)
@@ -4,7 +4,7 @@
  *
  * Copyright (C) 2002-2008 Krzysztof Halasa <khc@pm.waw.pl>
  *
- * For information see <http://www.kernel.org/pub/linux/utils/net/hdlc/>
+ * For information see <https://www.kernel.org/pub/linux/utils/net/hdlc/>
  *
  * Sources of information:
  *    Hitachi HD64572 SCA-II User's Manual
index 3ac3f85..c9f65e9 100644 (file)
@@ -45,17 +45,18 @@ static int wg_open(struct net_device *dev)
        if (dev_v6)
                dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE;
 
+       mutex_lock(&wg->device_update_lock);
        ret = wg_socket_init(wg, wg->incoming_port);
        if (ret < 0)
-               return ret;
-       mutex_lock(&wg->device_update_lock);
+               goto out;
        list_for_each_entry(peer, &wg->peer_list, peer_list) {
                wg_packet_send_staged_packets(peer);
                if (peer->persistent_keepalive_interval)
                        wg_packet_send_keepalive(peer);
        }
+out:
        mutex_unlock(&wg->device_update_lock);
-       return 0;
+       return ret;
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -225,6 +226,7 @@ static void wg_destruct(struct net_device *dev)
        list_del(&wg->device_list);
        rtnl_unlock();
        mutex_lock(&wg->device_update_lock);
+       rcu_assign_pointer(wg->creating_net, NULL);
        wg->incoming_port = 0;
        wg_socket_reinit(wg, NULL, NULL);
        /* The final references are cleared in the below calls to destroy_workqueue. */
@@ -240,13 +242,11 @@ static void wg_destruct(struct net_device *dev)
        skb_queue_purge(&wg->incoming_handshakes);
        free_percpu(dev->tstats);
        free_percpu(wg->incoming_handshakes_worker);
-       if (wg->have_creating_net_ref)
-               put_net(wg->creating_net);
        kvfree(wg->index_hashtable);
        kvfree(wg->peer_hashtable);
        mutex_unlock(&wg->device_update_lock);
 
-       pr_debug("%s: Interface deleted\n", dev->name);
+       pr_debug("%s: Interface destroyed\n", dev->name);
        free_netdev(dev);
 }
 
@@ -262,6 +262,7 @@ static void wg_setup(struct net_device *dev)
                             max(sizeof(struct ipv6hdr), sizeof(struct iphdr));
 
        dev->netdev_ops = &netdev_ops;
+       dev->header_ops = &ip_tunnel_header_ops;
        dev->hard_header_len = 0;
        dev->addr_len = 0;
        dev->needed_headroom = DATA_PACKET_HEAD_ROOM;
@@ -292,7 +293,7 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
        struct wg_device *wg = netdev_priv(dev);
        int ret = -ENOMEM;
 
-       wg->creating_net = src_net;
+       rcu_assign_pointer(wg->creating_net, src_net);
        init_rwsem(&wg->static_identity.lock);
        mutex_init(&wg->socket_update_lock);
        mutex_init(&wg->device_update_lock);
@@ -393,30 +394,26 @@ static struct rtnl_link_ops link_ops __read_mostly = {
        .newlink                = wg_newlink,
 };
 
-static int wg_netdevice_notification(struct notifier_block *nb,
-                                    unsigned long action, void *data)
+static void wg_netns_pre_exit(struct net *net)
 {
-       struct net_device *dev = ((struct netdev_notifier_info *)data)->dev;
-       struct wg_device *wg = netdev_priv(dev);
-
-       ASSERT_RTNL();
-
-       if (action != NETDEV_REGISTER || dev->netdev_ops != &netdev_ops)
-               return 0;
+       struct wg_device *wg;
 
-       if (dev_net(dev) == wg->creating_net && wg->have_creating_net_ref) {
-               put_net(wg->creating_net);
-               wg->have_creating_net_ref = false;
-       } else if (dev_net(dev) != wg->creating_net &&
-                  !wg->have_creating_net_ref) {
-               wg->have_creating_net_ref = true;
-               get_net(wg->creating_net);
+       rtnl_lock();
+       list_for_each_entry(wg, &device_list, device_list) {
+               if (rcu_access_pointer(wg->creating_net) == net) {
+                       pr_debug("%s: Creating namespace exiting\n", wg->dev->name);
+                       netif_carrier_off(wg->dev);
+                       mutex_lock(&wg->device_update_lock);
+                       rcu_assign_pointer(wg->creating_net, NULL);
+                       wg_socket_reinit(wg, NULL, NULL);
+                       mutex_unlock(&wg->device_update_lock);
+               }
        }
-       return 0;
+       rtnl_unlock();
 }
 
-static struct notifier_block netdevice_notifier = {
-       .notifier_call = wg_netdevice_notification
+static struct pernet_operations pernet_ops = {
+       .pre_exit = wg_netns_pre_exit
 };
 
 int __init wg_device_init(void)
@@ -429,18 +426,18 @@ int __init wg_device_init(void)
                return ret;
 #endif
 
-       ret = register_netdevice_notifier(&netdevice_notifier);
+       ret = register_pernet_device(&pernet_ops);
        if (ret)
                goto error_pm;
 
        ret = rtnl_link_register(&link_ops);
        if (ret)
-               goto error_netdevice;
+               goto error_pernet;
 
        return 0;
 
-error_netdevice:
-       unregister_netdevice_notifier(&netdevice_notifier);
+error_pernet:
+       unregister_pernet_device(&pernet_ops);
 error_pm:
 #ifdef CONFIG_PM_SLEEP
        unregister_pm_notifier(&pm_notifier);
@@ -451,7 +448,7 @@ error_pm:
 void wg_device_uninit(void)
 {
        rtnl_link_unregister(&link_ops);
-       unregister_netdevice_notifier(&netdevice_notifier);
+       unregister_pernet_device(&pernet_ops);
 #ifdef CONFIG_PM_SLEEP
        unregister_pm_notifier(&pm_notifier);
 #endif
index b15a8be..4d0144e 100644 (file)
@@ -40,7 +40,7 @@ struct wg_device {
        struct net_device *dev;
        struct crypt_queue encrypt_queue, decrypt_queue;
        struct sock __rcu *sock4, *sock6;
-       struct net *creating_net;
+       struct net __rcu *creating_net;
        struct noise_static_identity static_identity;
        struct workqueue_struct *handshake_receive_wq, *handshake_send_wq;
        struct workqueue_struct *packet_crypt_wq;
@@ -56,7 +56,6 @@ struct wg_device {
        unsigned int num_peers, device_update_gen;
        u32 fwmark;
        u16 incoming_port;
-       bool have_creating_net_ref;
 };
 
 int wg_device_init(void);
index 802099c..20a4f3c 100644 (file)
@@ -511,11 +511,15 @@ static int wg_set_device(struct sk_buff *skb, struct genl_info *info)
        if (flags & ~__WGDEVICE_F_ALL)
                goto out;
 
-       ret = -EPERM;
-       if ((info->attrs[WGDEVICE_A_LISTEN_PORT] ||
-            info->attrs[WGDEVICE_A_FWMARK]) &&
-           !ns_capable(wg->creating_net->user_ns, CAP_NET_ADMIN))
-               goto out;
+       if (info->attrs[WGDEVICE_A_LISTEN_PORT] || info->attrs[WGDEVICE_A_FWMARK]) {
+               struct net *net;
+               rcu_read_lock();
+               net = rcu_dereference(wg->creating_net);
+               ret = !net || !ns_capable(net->user_ns, CAP_NET_ADMIN) ? -EPERM : 0;
+               rcu_read_unlock();
+               if (ret)
+                       goto out;
+       }
 
        ++wg->device_update_gen;
 
index 6264336..201a226 100644 (file)
@@ -617,8 +617,8 @@ wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src,
        memcpy(handshake->hash, hash, NOISE_HASH_LEN);
        memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN);
        handshake->remote_index = src->sender_index;
-       if ((s64)(handshake->last_initiation_consumption -
-           (initiation_consumption = ktime_get_coarse_boottime_ns())) < 0)
+       initiation_consumption = ktime_get_coarse_boottime_ns();
+       if ((s64)(handshake->last_initiation_consumption - initiation_consumption) < 0)
                handshake->last_initiation_consumption = initiation_consumption;
        handshake->state = HANDSHAKE_CONSUMED_INITIATION;
        up_write(&handshake->lock);
index c58df43..dfb674e 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/skbuff.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
+#include <net/ip_tunnels.h>
 
 struct wg_device;
 struct wg_peer;
@@ -65,25 +66,9 @@ struct packet_cb {
 #define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
 #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
 
-/* Returns either the correct skb->protocol value, or 0 if invalid. */
-static inline __be16 wg_examine_packet_protocol(struct sk_buff *skb)
-{
-       if (skb_network_header(skb) >= skb->head &&
-           (skb_network_header(skb) + sizeof(struct iphdr)) <=
-                   skb_tail_pointer(skb) &&
-           ip_hdr(skb)->version == 4)
-               return htons(ETH_P_IP);
-       if (skb_network_header(skb) >= skb->head &&
-           (skb_network_header(skb) + sizeof(struct ipv6hdr)) <=
-                   skb_tail_pointer(skb) &&
-           ipv6_hdr(skb)->version == 6)
-               return htons(ETH_P_IPV6);
-       return 0;
-}
-
 static inline bool wg_check_packet_protocol(struct sk_buff *skb)
 {
-       __be16 real_protocol = wg_examine_packet_protocol(skb);
+       __be16 real_protocol = ip_tunnel_parse_protocol(skb);
        return real_protocol && skb->protocol == real_protocol;
 }
 
index 9143814..2c9551e 100644 (file)
@@ -387,7 +387,7 @@ static void wg_packet_consume_data_done(struct wg_peer *peer,
         */
        skb->ip_summed = CHECKSUM_UNNECESSARY;
        skb->csum_level = ~0; /* All levels */
-       skb->protocol = wg_examine_packet_protocol(skb);
+       skb->protocol = ip_tunnel_parse_protocol(skb);
        if (skb->protocol == htons(ETH_P_IP)) {
                len = ntohs(ip_hdr(skb)->tot_len);
                if (unlikely(len < sizeof(struct iphdr)))
@@ -414,14 +414,8 @@ static void wg_packet_consume_data_done(struct wg_peer *peer,
        if (unlikely(routed_peer != peer))
                goto dishonest_packet_peer;
 
-       if (unlikely(napi_gro_receive(&peer->napi, skb) == GRO_DROP)) {
-               ++dev->stats.rx_dropped;
-               net_dbg_ratelimited("%s: Failed to give packet to userspace from peer %llu (%pISpfsc)\n",
-                                   dev->name, peer->internal_id,
-                                   &peer->endpoint.addr);
-       } else {
-               update_rx_stats(peer, message_data_len(len_before_trim));
-       }
+       napi_gro_receive(&peer->napi, skb);
+       update_rx_stats(peer, message_data_len(len_before_trim));
        return;
 
 dishonest_packet_peer:
index f901802..c33e2c8 100644 (file)
@@ -347,6 +347,7 @@ static void set_sock_opts(struct socket *sock)
 
 int wg_socket_init(struct wg_device *wg, u16 port)
 {
+       struct net *net;
        int ret;
        struct udp_tunnel_sock_cfg cfg = {
                .sk_user_data = wg,
@@ -371,37 +372,47 @@ int wg_socket_init(struct wg_device *wg, u16 port)
        };
 #endif
 
+       rcu_read_lock();
+       net = rcu_dereference(wg->creating_net);
+       net = net ? maybe_get_net(net) : NULL;
+       rcu_read_unlock();
+       if (unlikely(!net))
+               return -ENONET;
+
 #if IS_ENABLED(CONFIG_IPV6)
 retry:
 #endif
 
-       ret = udp_sock_create(wg->creating_net, &port4, &new4);
+       ret = udp_sock_create(net, &port4, &new4);
        if (ret < 0) {
                pr_err("%s: Could not create IPv4 socket\n", wg->dev->name);
-               return ret;
+               goto out;
        }
        set_sock_opts(new4);
-       setup_udp_tunnel_sock(wg->creating_net, new4, &cfg);
+       setup_udp_tunnel_sock(net, new4, &cfg);
 
 #if IS_ENABLED(CONFIG_IPV6)
        if (ipv6_mod_enabled()) {
                port6.local_udp_port = inet_sk(new4->sk)->inet_sport;
-               ret = udp_sock_create(wg->creating_net, &port6, &new6);
+               ret = udp_sock_create(net, &port6, &new6);
                if (ret < 0) {
                        udp_tunnel_sock_release(new4);
                        if (ret == -EADDRINUSE && !port && retries++ < 100)
                                goto retry;
                        pr_err("%s: Could not create IPv6 socket\n",
                               wg->dev->name);
-                       return ret;
+                       goto out;
                }
                set_sock_opts(new6);
-               setup_udp_tunnel_sock(wg->creating_net, new6, &cfg);
+               setup_udp_tunnel_sock(net, new6, &cfg);
        }
 #endif
 
        wg_socket_reinit(wg, new4->sk, new6 ? new6->sk : NULL);
-       return 0;
+       ret = 0;
+out:
+       put_net(net);
+       return ret;
 }
 
 void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
index 8ab62bb..170a64e 100644 (file)
@@ -14,7 +14,7 @@ menuconfig WLAN
          device drivers. For a complete list of drivers and documentation
          on them refer to the wireless wiki:
 
-         http://wireless.kernel.org/en/users/Drivers
+         https://wireless.wiki.kernel.org/en/users/Drivers
 
 if WLAN
 
@@ -40,6 +40,7 @@ source "drivers/net/wireless/intel/Kconfig"
 source "drivers/net/wireless/intersil/Kconfig"
 source "drivers/net/wireless/marvell/Kconfig"
 source "drivers/net/wireless/mediatek/Kconfig"
+source "drivers/net/wireless/microchip/Kconfig"
 source "drivers/net/wireless/ralink/Kconfig"
 source "drivers/net/wireless/realtek/Kconfig"
 source "drivers/net/wireless/rsi/Kconfig"
@@ -57,7 +58,8 @@ config PCMCIA_RAYCS
        help
          Say Y here if you intend to attach an Aviator/Raytheon PCMCIA
          (PC-card) wireless Ethernet networking card to your computer.
-         Please read the file <file:Documentation/networking/ray_cs.rst> for
+         Please read the file
+         <file:Documentation/networking/device_drivers/wifi/ray_cs.rst> for
          details.
 
          To compile this driver as a module, choose M here: the module will be
index 6cfe745..80b3244 100644 (file)
@@ -12,6 +12,7 @@ obj-$(CONFIG_WLAN_VENDOR_INTEL) += intel/
 obj-$(CONFIG_WLAN_VENDOR_INTERSIL) += intersil/
 obj-$(CONFIG_WLAN_VENDOR_MARVELL) += marvell/
 obj-$(CONFIG_WLAN_VENDOR_MEDIATEK) += mediatek/
+obj-$(CONFIG_WLAN_VENDOR_MICROCHIP) += microchip/
 obj-$(CONFIG_WLAN_VENDOR_RALINK) += ralink/
 obj-$(CONFIG_WLAN_VENDOR_REALTEK) += realtek/
 obj-$(CONFIG_WLAN_VENDOR_RSI) += rsi/
index ba326f6..22f9f2f 100644 (file)
@@ -1976,35 +1976,20 @@ static void adm8211_remove(struct pci_dev *pdev)
 }
 
 
-#ifdef CONFIG_PM
-static int adm8211_suspend(struct pci_dev *pdev, pm_message_t state)
-{
-       pci_save_state(pdev);
-       pci_set_power_state(pdev, pci_choose_state(pdev, state));
-       return 0;
-}
-
-static int adm8211_resume(struct pci_dev *pdev)
-{
-       pci_set_power_state(pdev, PCI_D0);
-       pci_restore_state(pdev);
-       return 0;
-}
-#endif /* CONFIG_PM */
-
+#define adm8211_suspend NULL
+#define adm8211_resume NULL
 
 MODULE_DEVICE_TABLE(pci, adm8211_pci_id_table);
 
+static SIMPLE_DEV_PM_OPS(adm8211_pm_ops, adm8211_suspend, adm8211_resume);
+
 /* TODO: implement enable_wake */
 static struct pci_driver adm8211_driver = {
        .name           = "adm8211",
        .id_table       = adm8211_pci_id_table,
        .probe          = adm8211_probe,
        .remove         = adm8211_remove,
-#ifdef CONFIG_PM
-       .suspend        = adm8211_suspend,
-       .resume         = adm8211_resume,
-#endif /* CONFIG_PM */
+       .driver.pm      = &adm8211_pm_ops,
 };
 
 module_pci_driver(adm8211_driver);
index 6e9d46b..d88edbf 100644 (file)
@@ -15,11 +15,11 @@ config WLAN_VENDOR_ATH
 
          For more information and documentation on this module you can visit:
 
-         http://wireless.kernel.org/en/users/Drivers/ath
+         https://wireless.wiki.kernel.org/en/users/Drivers/ath
 
          For information on all Atheros wireless drivers visit:
 
-         http://wireless.kernel.org/en/users/Drivers/Atheros
+         https://wireless.wiki.kernel.org/en/users/Drivers/Atheros
 
 if WLAN_VENDOR_ATH
 
index 4fd10ac..bbe8695 100644 (file)
@@ -1591,7 +1591,9 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt,
 err_unmap_msdu:
        dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
 err_free_msdu_id:
+       spin_lock_bh(&htt->tx_lock);
        ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+       spin_unlock_bh(&htt->tx_lock);
 err:
        return res;
 }
@@ -1798,7 +1800,9 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt,
 err_unmap_msdu:
        dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
 err_free_msdu_id:
+       spin_lock_bh(&htt->tx_lock);
        ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+       spin_unlock_bh(&htt->tx_lock);
 err:
        return res;
 }
index b7daf34..05a620f 100644 (file)
@@ -824,7 +824,7 @@ static int ath10k_usb_setup_pipe_resources(struct ath10k *ar,
 
        ath10k_dbg(ar, ATH10K_DBG_USB, "usb setting up pipes using interface\n");
 
-       /* walk decriptors and setup pipes */
+       /* walk descriptors and setup pipes */
        for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
                endpoint = &iface_desc->endpoint[i].desc;
 
index 7acb42e..88a9735 100644 (file)
@@ -34,3 +34,12 @@ config ATH11K_TRACING
        depends on ATH11K && EVENT_TRACING
        help
          Select this to use ath11k tracing infrastructure.
+
+config ATH11K_SPECTRAL
+       bool "QCA ath11k spectral scan support"
+       depends on ATH11K_DEBUGFS
+       depends on RELAY
+       help
+         Enable ath11k spectral scan support
+
+         Say Y to enable access to the FFT/spectral data via debugfs.
index fe7736e..1041863 100644 (file)
@@ -15,12 +15,14 @@ ath11k-y += core.o \
            dp_rx.o \
            debug.o \
            ce.o \
-           peer.o
+           peer.o \
+           dbring.o
 
 ath11k-$(CONFIG_ATH11K_DEBUGFS) += debug_htt_stats.o debugfs_sta.o
 ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o
 ath11k-$(CONFIG_ATH11K_TRACING) += trace.o
 ath11k-$(CONFIG_THERMAL) += thermal.o
+ath11k-$(CONFIG_ATH11K_SPECTRAL) += spectral.o
 
 # for tracing framework to find trace.h
 CFLAGS_trace.o := -I$(src)
index 02501cc..905cd8b 100644 (file)
@@ -400,8 +400,16 @@ static int ath11k_core_pdev_create(struct ath11k_base *ab)
                goto err_dp_pdev_free;
        }
 
+       ret = ath11k_spectral_init(ab);
+       if (ret) {
+               ath11k_err(ab, "failed to init spectral %d\n", ret);
+               goto err_thermal_unregister;
+       }
+
        return 0;
 
+err_thermal_unregister:
+       ath11k_thermal_unregister(ab);
 err_dp_pdev_free:
        ath11k_dp_pdev_free(ab);
 err_mac_unregister:
@@ -414,6 +422,7 @@ err_pdev_debug:
 
 static void ath11k_core_pdev_destroy(struct ath11k_base *ab)
 {
+       ath11k_spectral_deinit(ab);
        ath11k_thermal_unregister(ab);
        ath11k_mac_unregister(ab);
        ath11k_hif_irq_disable(ab);
@@ -582,6 +591,7 @@ static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab)
        ath11k_thermal_unregister(ab);
        ath11k_hif_irq_disable(ab);
        ath11k_dp_pdev_free(ab);
+       ath11k_spectral_deinit(ab);
        ath11k_hif_stop(ab);
        ath11k_wmi_detach(ab);
        ath11k_dp_pdev_reo_cleanup(ab);
index e04f0e7..e5c4e19 100644 (file)
@@ -21,6 +21,8 @@
 #include "hal_rx.h"
 #include "reg.h"
 #include "thermal.h"
+#include "dbring.h"
+#include "spectral.h"
 
 #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
 
@@ -215,12 +217,15 @@ struct ath11k_vif {
 
        bool is_started;
        bool is_up;
+       bool spectral_enabled;
        u32 aid;
        u8 bssid[ETH_ALEN];
        struct cfg80211_bitrate_mask bitrate_mask;
        int num_legacy_stations;
        int rtscts_prot_mode;
        int txpower;
+       bool rsnie_present;
+       bool wpaie_present;
 };
 
 struct ath11k_vif_iter {
@@ -353,7 +358,10 @@ struct ath11k_sta {
 #endif
 };
 
-#define ATH11K_NUM_CHANS 41
+#define ATH11K_MIN_5G_FREQ 4150
+#define ATH11K_MIN_6G_FREQ 5945
+#define ATH11K_MAX_6G_FREQ 7115
+#define ATH11K_NUM_CHANS 100
 #define ATH11K_MAX_5G_CHAN 173
 
 enum ath11k_state {
@@ -431,6 +439,7 @@ struct ath11k {
        u32 vht_cap_info;
        struct ath11k_he ar_he;
        enum ath11k_state state;
+       bool supports_6ghz;
        struct {
                struct completion started;
                struct completion completed;
@@ -537,6 +546,9 @@ struct ath11k {
 #ifdef CONFIG_ATH11K_DEBUGFS
        struct ath11k_debug debug;
 #endif
+#ifdef CONFIG_ATH11K_SPECTRAL
+       struct ath11k_spectral spectral;
+#endif
        bool dfs_block_radar_events;
        struct ath11k_thermal thermal;
 };
@@ -548,6 +560,7 @@ struct ath11k_band_cap {
        u32 he_mcs;
        u32 he_cap_phy_info[PSOC_HOST_MAX_PHY_SIZE];
        struct ath11k_ppe_threshold he_ppet;
+       u16 he_6ghz_capa;
 };
 
 struct ath11k_pdev_cap {
@@ -579,12 +592,42 @@ struct ath11k_board_data {
 /* IPQ8074 HW channel counters frequency value in hertz */
 #define IPQ8074_CC_FREQ_HERTZ 320000
 
-struct ath11k_soc_dp_rx_stats {
+struct ath11k_bp_stats {
+       /* Head Pointer reported by the last HTT Backpressure event for the ring */
+       u16 hp;
+
+       /* Tail Pointer reported by the last HTT Backpressure event for the ring */
+       u16 tp;
+
+       /* Number of Backpressure events received for the ring */
+       u32 count;
+
+       /* Last recorded event timestamp */
+       unsigned long jiffies;
+};
+
+struct ath11k_dp_ring_bp_stats {
+       struct ath11k_bp_stats umac_ring_bp_stats[HTT_SW_UMAC_RING_IDX_MAX];
+       struct ath11k_bp_stats lmac_ring_bp_stats[HTT_SW_LMAC_RING_IDX_MAX][MAX_RADIOS];
+};
+
+struct ath11k_soc_dp_tx_err_stats {
+       /* TCL Ring Descriptor unavailable */
+       u32 desc_na[DP_TCL_NUM_RING_MAX];
+       /* Other failures during dp_tx due to mem allocation failure
+        * idr unavailable etc.
+        */
+       atomic_t misc_fail;
+};
+
+struct ath11k_soc_dp_stats {
        u32 err_ring_pkts;
        u32 invalid_rbm;
        u32 rxdma_error[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX];
        u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX];
        u32 hal_reo_error[DP_REO_DST_RING_MAX];
+       struct ath11k_soc_dp_tx_err_stats tx_err;
+       struct ath11k_dp_ring_bp_stats bp_stats;
 };
 
 /* Master structure to hold the hw data which may be used in core module */
@@ -653,7 +696,7 @@ struct ath11k_base {
        struct dentry *debugfs_soc;
        struct dentry *debugfs_ath11k;
 #endif
-       struct ath11k_soc_dp_rx_stats soc_stats;
+       struct ath11k_soc_dp_stats soc_stats;
 
        unsigned long dev_flags;
        struct completion driver_recovery;
@@ -668,6 +711,9 @@ struct ath11k_base {
        /* Round robbin based TCL ring selector */
        atomic_t tcl_ring_selector;
 
+       struct ath11k_dbring_cap *db_caps;
+       u32 num_db_cap;
+
        /* must be last */
        u8 drv_priv[0] __aligned(sizeof(void *));
 };
diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c
new file mode 100644 (file)
index 0000000..cf20db3
--- /dev/null
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ */
+
+#include "core.h"
+#include "debug.h"
+
+static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
+                                       struct ath11k_dbring *ring,
+                                       struct ath11k_dbring_element *buff,
+                                       gfp_t gfp)
+{
+       struct ath11k_base *ab = ar->ab;
+       struct hal_srng *srng;
+       dma_addr_t paddr;
+       void *ptr_aligned, *ptr_unaligned, *desc;
+       int ret;
+       int buf_id;
+       u32 cookie;
+
+       srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
+
+       lockdep_assert_held(&srng->lock);
+
+       ath11k_hal_srng_access_begin(ab, srng);
+
+       ptr_unaligned = buff->payload;
+       ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align);
+       paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz,
+                              DMA_FROM_DEVICE);
+
+       ret = dma_mapping_error(ab->dev, paddr);
+       if (ret)
+               goto err;
+
+       spin_lock_bh(&ring->idr_lock);
+       buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, gfp);
+       spin_unlock_bh(&ring->idr_lock);
+       if (buf_id < 0) {
+               ret = -ENOBUFS;
+               goto err_dma_unmap;
+       }
+
+       desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+       if (!desc) {
+               ret = -ENOENT;
+               goto err_idr_remove;
+       }
+
+       buff->paddr = paddr;
+
+       cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, ar->pdev_idx) |
+                FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
+
+       ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0);
+
+       ath11k_hal_srng_access_end(ab, srng);
+
+       return 0;
+
+err_idr_remove:
+       spin_lock_bh(&ring->idr_lock);
+       idr_remove(&ring->bufs_idr, buf_id);
+       spin_unlock_bh(&ring->idr_lock);
+err_dma_unmap:
+       dma_unmap_single(ab->dev, paddr, ring->buf_sz,
+                        DMA_FROM_DEVICE);
+err:
+       ath11k_hal_srng_access_end(ab, srng);
+       return ret;
+}
+
+static int ath11k_dbring_fill_bufs(struct ath11k *ar,
+                                  struct ath11k_dbring *ring,
+                                  gfp_t gfp)
+{
+       struct ath11k_dbring_element *buff;
+       struct hal_srng *srng;
+       int num_remain, req_entries, num_free;
+       u32 align;
+       int size, ret;
+
+       srng = &ar->ab->hal.srng_list[ring->refill_srng.ring_id];
+
+       spin_lock_bh(&srng->lock);
+
+       num_free = ath11k_hal_srng_src_num_free(ar->ab, srng, true);
+       req_entries = min(num_free, ring->bufs_max);
+       num_remain = req_entries;
+       align = ring->buf_align;
+       size = sizeof(*buff) + ring->buf_sz + align - 1;
+
+       while (num_remain > 0) {
+               buff = kzalloc(size, gfp);
+               if (!buff)
+                       break;
+
+               ret = ath11k_dbring_bufs_replenish(ar, ring, buff, gfp);
+               if (ret) {
+                       ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n",
+                                   num_remain, req_entries);
+                       kfree(buff);
+                       break;
+               }
+               num_remain--;
+       }
+
+       spin_unlock_bh(&srng->lock);
+
+       return num_remain;
+}
+
+int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar,
+                               struct ath11k_dbring *ring,
+                               enum wmi_direct_buffer_module id)
+{
+       struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd param = {0};
+       int ret;
+
+       if (id >= WMI_DIRECT_BUF_MAX)
+               return -EINVAL;
+
+       param.pdev_id           = DP_SW2HW_MACID(ring->pdev_id);
+       param.module_id         = id;
+       param.base_paddr_lo     = lower_32_bits(ring->refill_srng.paddr);
+       param.base_paddr_hi     = upper_32_bits(ring->refill_srng.paddr);
+       param.head_idx_paddr_lo = lower_32_bits(ring->hp_addr);
+       param.head_idx_paddr_hi = upper_32_bits(ring->hp_addr);
+       param.tail_idx_paddr_lo = lower_32_bits(ring->tp_addr);
+       param.tail_idx_paddr_hi = upper_32_bits(ring->tp_addr);
+       param.num_elems         = ring->bufs_max;
+       param.buf_size          = ring->buf_sz;
+       param.num_resp_per_event = ring->num_resp_per_event;
+       param.event_timeout_ms  = ring->event_timeout_ms;
+
+       ret = ath11k_wmi_pdev_dma_ring_cfg(ar, &param);
+       if (ret) {
+               ath11k_warn(ar->ab, "failed to setup db ring cfg\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+int ath11k_dbring_set_cfg(struct ath11k *ar, struct ath11k_dbring *ring,
+                         u32 num_resp_per_event, u32 event_timeout_ms,
+                         int (*handler)(struct ath11k *,
+                                        struct ath11k_dbring_data *))
+{
+       if (WARN_ON(!ring))
+               return -EINVAL;
+
+       ring->num_resp_per_event = num_resp_per_event;
+       ring->event_timeout_ms = event_timeout_ms;
+       ring->handler = handler;
+
+       return 0;
+}
+
+int ath11k_dbring_buf_setup(struct ath11k *ar,
+                           struct ath11k_dbring *ring,
+                           struct ath11k_dbring_cap *db_cap)
+{
+       struct ath11k_base *ab = ar->ab;
+       struct hal_srng *srng;
+       int ret;
+
+       srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
+       ring->bufs_max = ring->refill_srng.size /
+                        ath11k_hal_srng_get_entrysize(HAL_RXDMA_DIR_BUF);
+
+       ring->buf_sz = db_cap->min_buf_sz;
+       ring->buf_align = db_cap->min_buf_align;
+       ring->pdev_id = db_cap->pdev_id;
+       ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng);
+       ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng);
+
+       ret = ath11k_dbring_fill_bufs(ar, ring, GFP_KERNEL);
+
+       return ret;
+}
+
+int ath11k_dbring_srng_setup(struct ath11k *ar, struct ath11k_dbring *ring,
+                            int ring_num, int num_entries)
+{
+       int ret;
+
+       ret = ath11k_dp_srng_setup(ar->ab, &ring->refill_srng, HAL_RXDMA_DIR_BUF,
+                                  ring_num, ar->pdev_idx, num_entries);
+       if (ret < 0) {
+               ath11k_warn(ar->ab, "failed to setup srng: %d ring_id %d\n",
+                           ret, ring_num);
+               goto err;
+       }
+
+       return 0;
+err:
+       ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
+       return ret;
+}
+
+int ath11k_dbring_get_cap(struct ath11k_base *ab,
+                         u8 pdev_idx,
+                         enum wmi_direct_buffer_module id,
+                         struct ath11k_dbring_cap *db_cap)
+{
+       int i;
+
+       if (!ab->num_db_cap || !ab->db_caps)
+               return -ENOENT;
+
+       if (id >= WMI_DIRECT_BUF_MAX)
+               return -EINVAL;
+
+       for (i = 0; i < ab->num_db_cap; i++) {
+               if (pdev_idx == ab->db_caps[i].pdev_id &&
+                   id == ab->db_caps[i].id) {
+                       *db_cap = ab->db_caps[i];
+
+                       return 0;
+               }
+       }
+
+       return -ENOENT;
+}
+
+int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
+                                      struct ath11k_dbring_buf_release_event *ev)
+{
+       struct ath11k_dbring *ring;
+       struct hal_srng *srng;
+       struct ath11k *ar;
+       struct ath11k_dbring_element *buff;
+       struct ath11k_dbring_data handler_data;
+       struct ath11k_buffer_addr desc;
+       u8 *vaddr_unalign;
+       u32 num_entry, num_buff_reaped;
+       u8 pdev_idx, rbm;
+       u32 cookie;
+       int buf_id;
+       int size;
+       dma_addr_t paddr;
+       int ret = 0;
+
+       pdev_idx = ev->fixed.pdev_id;
+
+       if (pdev_idx >= ab->num_radios) {
+               ath11k_warn(ab, "Invalid pdev id %d\n", pdev_idx);
+               return -EINVAL;
+       }
+
+       if (ev->fixed.num_buf_release_entry !=
+           ev->fixed.num_meta_data_entry) {
+               ath11k_warn(ab, "Buffer entry %d mismatch meta entry %d\n",
+                           ev->fixed.num_buf_release_entry,
+                           ev->fixed.num_meta_data_entry);
+               return -EINVAL;
+       }
+
+       ar = ab->pdevs[pdev_idx].ar;
+
+       rcu_read_lock();
+       if (!rcu_dereference(ab->pdevs_active[pdev_idx])) {
+               ret = -EINVAL;
+               goto rcu_unlock;
+       }
+
+       switch (ev->fixed.module_id) {
+       case WMI_DIRECT_BUF_SPECTRAL:
+               ring = ath11k_spectral_get_dbring(ar);
+               break;
+       default:
+               ring = NULL;
+               ath11k_warn(ab, "Recv dma buffer release ev on unsupp module %d\n",
+                           ev->fixed.module_id);
+               break;
+       }
+
+       if (!ring) {
+               ret = -EINVAL;
+               goto rcu_unlock;
+       }
+
+       srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
+       num_entry = ev->fixed.num_buf_release_entry;
+       size = sizeof(*buff) + ring->buf_sz + ring->buf_align - 1;
+       num_buff_reaped = 0;
+
+       spin_lock_bh(&srng->lock);
+
+       while (num_buff_reaped < num_entry) {
+               desc.info0 = ev->buf_entry[num_buff_reaped].paddr_lo;
+               desc.info1 = ev->buf_entry[num_buff_reaped].paddr_hi;
+               handler_data.meta = ev->meta_data[num_buff_reaped];
+
+               num_buff_reaped++;
+
+               ath11k_hal_rx_buf_addr_info_get(&desc, &paddr, &cookie, &rbm);
+
+               buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
+
+               spin_lock_bh(&ring->idr_lock);
+               buff = idr_find(&ring->bufs_idr, buf_id);
+               if (!buff) {
+                       spin_unlock_bh(&ring->idr_lock);
+                       continue;
+               }
+               idr_remove(&ring->bufs_idr, buf_id);
+               spin_unlock_bh(&ring->idr_lock);
+
+               dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz,
+                                DMA_FROM_DEVICE);
+
+               if (ring->handler) {
+                       vaddr_unalign = buff->payload;
+                       handler_data.data = PTR_ALIGN(vaddr_unalign,
+                                                     ring->buf_align);
+                       handler_data.data_sz = ring->buf_sz;
+
+                       ring->handler(ar, &handler_data);
+               }
+
+               memset(buff, 0, size);
+               ath11k_dbring_bufs_replenish(ar, ring, buff, GFP_ATOMIC);
+       }
+
+       spin_unlock_bh(&srng->lock);
+
+rcu_unlock:
+       rcu_read_unlock();
+
+       return ret;
+}
+
+void ath11k_dbring_srng_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
+{
+       ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
+}
+
+void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
+{
+       struct ath11k_dbring_element *buff;
+       int buf_id;
+
+       spin_lock_bh(&ring->idr_lock);
+       idr_for_each_entry(&ring->bufs_idr, buff, buf_id) {
+               idr_remove(&ring->bufs_idr, buf_id);
+               dma_unmap_single(ar->ab->dev, buff->paddr,
+                                ring->buf_sz, DMA_FROM_DEVICE);
+               kfree(buff);
+       }
+
+       idr_destroy(&ring->bufs_idr);
+       spin_unlock_bh(&ring->idr_lock);
+}
diff --git a/drivers/net/wireless/ath/ath11k/dbring.h b/drivers/net/wireless/ath/ath11k/dbring.h
new file mode 100644 (file)
index 0000000..f7fce9e
--- /dev/null
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_DBRING_H
+#define ATH11K_DBRING_H
+
+#include <linux/types.h>
+#include <linux/idr.h>
+#include <linux/spinlock.h>
+#include "dp.h"
+
+struct ath11k_dbring_element {
+       dma_addr_t paddr;
+       u8 payload[0];
+};
+
+struct ath11k_dbring_data {
+       void *data;
+       u32 data_sz;
+       struct wmi_dma_buf_release_meta_data meta;
+};
+
+struct ath11k_dbring_buf_release_event {
+       struct ath11k_wmi_dma_buf_release_fixed_param fixed;
+       struct wmi_dma_buf_release_entry *buf_entry;
+       struct wmi_dma_buf_release_meta_data *meta_data;
+       u32 num_buf_entry;
+       u32 num_meta;
+};
+
+struct ath11k_dbring_cap {
+       u32 pdev_id;
+       enum wmi_direct_buffer_module id;
+       u32 min_elem;
+       u32 min_buf_sz;
+       u32 min_buf_align;
+};
+
+struct ath11k_dbring {
+       struct dp_srng refill_srng;
+       struct idr bufs_idr;
+       /* Protects bufs_idr */
+       spinlock_t idr_lock;
+       dma_addr_t tp_addr;
+       dma_addr_t hp_addr;
+       int bufs_max;
+       u32 pdev_id;
+       u32 buf_sz;
+       u32 buf_align;
+       u32 num_resp_per_event;
+       u32 event_timeout_ms;
+       int (*handler)(struct ath11k *, struct ath11k_dbring_data *);
+};
+
+int ath11k_dbring_set_cfg(struct ath11k *ar,
+                         struct ath11k_dbring *ring,
+                         u32 num_resp_per_event,
+                         u32 event_timeout_ms,
+                         int (*handler)(struct ath11k *,
+                                        struct ath11k_dbring_data *));
+int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar,
+                               struct ath11k_dbring *ring,
+                               enum wmi_direct_buffer_module id);
+int ath11k_dbring_buf_setup(struct ath11k *ar,
+                           struct ath11k_dbring *ring,
+                           struct ath11k_dbring_cap *db_cap);
+int ath11k_dbring_srng_setup(struct ath11k *ar, struct ath11k_dbring *ring,
+                            int ring_num, int num_entries);
+int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
+                                      struct ath11k_dbring_buf_release_event *ev);
+int ath11k_dbring_get_cap(struct ath11k_base *ab,
+                         u8 pdev_idx,
+                         enum wmi_direct_buffer_module id,
+                         struct ath11k_dbring_cap *db_cap);
+void ath11k_dbring_srng_cleanup(struct ath11k *ar, struct ath11k_dbring *ring);
+void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring);
+#endif /* ATH11K_DBRING_H */
index 3fd6b5a..62a1aa0 100644 (file)
 #include "debug_htt_stats.h"
 #include "peer.h"
 
+static const char *htt_bp_umac_ring[HTT_SW_UMAC_RING_IDX_MAX] = {
+       "REO2SW1_RING",
+       "REO2SW2_RING",
+       "REO2SW3_RING",
+       "REO2SW4_RING",
+       "WBM2REO_LINK_RING",
+       "REO2TCL_RING",
+       "REO2FW_RING",
+       "RELEASE_RING",
+       "PPE_RELEASE_RING",
+       "TCL2TQM_RING",
+       "TQM_RELEASE_RING",
+       "REO_RELEASE_RING",
+       "WBM2SW0_RELEASE_RING",
+       "WBM2SW1_RELEASE_RING",
+       "WBM2SW2_RELEASE_RING",
+       "WBM2SW3_RELEASE_RING",
+       "REO_CMD_RING",
+       "REO_STATUS_RING",
+};
+
+static const char *htt_bp_lmac_ring[HTT_SW_LMAC_RING_IDX_MAX] = {
+       "FW2RXDMA_BUF_RING",
+       "FW2RXDMA_STATUS_RING",
+       "FW2RXDMA_LINK_RING",
+       "SW2RXDMA_BUF_RING",
+       "WBM2RXDMA_LINK_RING",
+       "RXDMA2FW_RING",
+       "RXDMA2SW_RING",
+       "RXDMA2RELEASE_RING",
+       "RXDMA2REO_RING",
+       "MONITOR_STATUS_RING",
+       "MONITOR_BUF_RING",
+       "MONITOR_DESC_RING",
+       "MONITOR_DEST_RING",
+};
+
 void ath11k_info(struct ath11k_base *ab, const char *fmt, ...)
 {
        struct va_format vaf = {
@@ -739,12 +776,78 @@ static const struct file_operations fops_extd_rx_stats = {
        .open = simple_open,
 };
 
-static ssize_t ath11k_debug_dump_soc_rx_stats(struct file *file,
+static int ath11k_fill_bp_stats(struct ath11k_base *ab,
+                               struct ath11k_bp_stats *bp_stats,
+                               char *buf, int len, int size)
+{
+       lockdep_assert_held(&ab->base_lock);
+
+       len += scnprintf(buf + len, size - len, "count: %u\n",
+                        bp_stats->count);
+       len += scnprintf(buf + len, size - len, "hp: %u\n",
+                        bp_stats->hp);
+       len += scnprintf(buf + len, size - len, "tp: %u\n",
+                        bp_stats->tp);
+       len += scnprintf(buf + len, size - len, "seen before: %ums\n\n",
+                        jiffies_to_msecs(jiffies - bp_stats->jiffies));
+       return len;
+}
+
+static ssize_t ath11k_debug_dump_soc_ring_bp_stats(struct ath11k_base *ab,
+                                                  char *buf, int size)
+{
+       struct ath11k_bp_stats *bp_stats;
+       bool stats_rxd = false;
+       u8 i, pdev_idx;
+       int len = 0;
+
+       len += scnprintf(buf + len, size - len, "\nBackpressure Stats\n");
+       len += scnprintf(buf + len, size - len, "==================\n");
+
+       spin_lock_bh(&ab->base_lock);
+       for (i = 0; i < HTT_SW_UMAC_RING_IDX_MAX; i++) {
+               bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[i];
+
+               if (!bp_stats->count)
+                       continue;
+
+               len += scnprintf(buf + len, size - len, "Ring: %s\n",
+                                htt_bp_umac_ring[i]);
+               len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size);
+               stats_rxd = true;
+       }
+
+       for (i = 0; i < HTT_SW_LMAC_RING_IDX_MAX; i++) {
+               for (pdev_idx = 0; pdev_idx < MAX_RADIOS; pdev_idx++) {
+                       bp_stats =
+                               &ab->soc_stats.bp_stats.lmac_ring_bp_stats[i][pdev_idx];
+
+                       if (!bp_stats->count)
+                               continue;
+
+                       len += scnprintf(buf + len, size - len, "Ring: %s\n",
+                                        htt_bp_lmac_ring[i]);
+                       len += scnprintf(buf + len, size - len, "pdev: %d\n",
+                                        pdev_idx);
+                       len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size);
+                       stats_rxd = true;
+               }
+       }
+       spin_unlock_bh(&ab->base_lock);
+
+       if (!stats_rxd)
+               len += scnprintf(buf + len, size - len,
+                                "No Ring Backpressure stats received\n\n");
+
+       return len;
+}
+
+static ssize_t ath11k_debug_dump_soc_dp_stats(struct file *file,
                                              char __user *user_buf,
                                              size_t count, loff_t *ppos)
 {
        struct ath11k_base *ab = file->private_data;
-       struct ath11k_soc_dp_rx_stats *soc_stats = &ab->soc_stats;
+       struct ath11k_soc_dp_stats *soc_stats = &ab->soc_stats;
        int len = 0, i, retval;
        const int size = 4096;
        static const char *rxdma_err[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX] = {
@@ -788,6 +891,19 @@ static ssize_t ath11k_debug_dump_soc_rx_stats(struct file *file,
                         soc_stats->hal_reo_error[2],
                         soc_stats->hal_reo_error[3]);
 
+       len += scnprintf(buf + len, size - len, "\nSOC TX STATS:\n");
+       len += scnprintf(buf + len, size - len, "\nTCL Ring Full Failures:\n");
+
+       for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
+               len += scnprintf(buf + len, size - len, "ring%d: %u\n",
+                                i, soc_stats->tx_err.desc_na[i]);
+
+       len += scnprintf(buf + len, size - len,
+                        "\nMisc Transmit Failures: %d\n",
+                        atomic_read(&soc_stats->tx_err.misc_fail));
+
+       len += ath11k_debug_dump_soc_ring_bp_stats(ab, buf + len, size - len);
+
        if (len > size)
                len = size;
        retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
@@ -796,8 +912,8 @@ static ssize_t ath11k_debug_dump_soc_rx_stats(struct file *file,
        return retval;
 }
 
-static const struct file_operations fops_soc_rx_stats = {
-       .read = ath11k_debug_dump_soc_rx_stats,
+static const struct file_operations fops_soc_dp_stats = {
+       .read = ath11k_debug_dump_soc_dp_stats,
        .open = simple_open,
        .owner = THIS_MODULE,
        .llseek = default_llseek,
@@ -819,8 +935,8 @@ int ath11k_debug_pdev_create(struct ath11k_base *ab)
        debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab,
                            &fops_simulate_fw_crash);
 
-       debugfs_create_file("soc_rx_stats", 0600, ab->debugfs_soc, ab,
-                           &fops_soc_rx_stats);
+       debugfs_create_file("soc_dp_stats", 0600, ab->debugfs_soc, ab,
+                           &fops_soc_dp_stats);
 
        return 0;
 }
index 9ae743e..1d64c3c 100644 (file)
@@ -172,11 +172,12 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
        case HAL_RXDMA_DST:
        case HAL_RXDMA_MONITOR_DST:
        case HAL_RXDMA_MONITOR_DESC:
-       case HAL_RXDMA_DIR_BUF:
                params.intr_batch_cntr_thres_entries =
                                        HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
                params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
                break;
+       case HAL_RXDMA_DIR_BUF:
+               break;
        default:
                ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type);
                return -EINVAL;
index 058a5c1..7587862 100644 (file)
@@ -999,6 +999,48 @@ struct htt_resp_msg {
 #define HTT_BACKPRESSURE_EVENT_HP_M GENMASK(15, 0)
 #define HTT_BACKPRESSURE_EVENT_TP_M GENMASK(31, 16)
 
+#define HTT_BACKPRESSURE_UMAC_RING_TYPE        0
+#define HTT_BACKPRESSURE_LMAC_RING_TYPE        1
+
+enum htt_backpressure_umac_ringid {
+       HTT_SW_RING_IDX_REO_REO2SW1_RING,
+       HTT_SW_RING_IDX_REO_REO2SW2_RING,
+       HTT_SW_RING_IDX_REO_REO2SW3_RING,
+       HTT_SW_RING_IDX_REO_REO2SW4_RING,
+       HTT_SW_RING_IDX_REO_WBM2REO_LINK_RING,
+       HTT_SW_RING_IDX_REO_REO2TCL_RING,
+       HTT_SW_RING_IDX_REO_REO2FW_RING,
+       HTT_SW_RING_IDX_REO_REO_RELEASE_RING,
+       HTT_SW_RING_IDX_WBM_PPE_RELEASE_RING,
+       HTT_SW_RING_IDX_TCL_TCL2TQM_RING,
+       HTT_SW_RING_IDX_WBM_TQM_RELEASE_RING,
+       HTT_SW_RING_IDX_WBM_REO_RELEASE_RING,
+       HTT_SW_RING_IDX_WBM_WBM2SW0_RELEASE_RING,
+       HTT_SW_RING_IDX_WBM_WBM2SW1_RELEASE_RING,
+       HTT_SW_RING_IDX_WBM_WBM2SW2_RELEASE_RING,
+       HTT_SW_RING_IDX_WBM_WBM2SW3_RELEASE_RING,
+       HTT_SW_RING_IDX_REO_REO_CMD_RING,
+       HTT_SW_RING_IDX_REO_REO_STATUS_RING,
+       HTT_SW_UMAC_RING_IDX_MAX,
+};
+
+enum htt_backpressure_lmac_ringid {
+       HTT_SW_RING_IDX_FW2RXDMA_BUF_RING,
+       HTT_SW_RING_IDX_FW2RXDMA_STATUS_RING,
+       HTT_SW_RING_IDX_FW2RXDMA_LINK_RING,
+       HTT_SW_RING_IDX_SW2RXDMA_BUF_RING,
+       HTT_SW_RING_IDX_WBM2RXDMA_LINK_RING,
+       HTT_SW_RING_IDX_RXDMA2FW_RING,
+       HTT_SW_RING_IDX_RXDMA2SW_RING,
+       HTT_SW_RING_IDX_RXDMA2RELEASE_RING,
+       HTT_SW_RING_IDX_RXDMA2REO_RING,
+       HTT_SW_RING_IDX_MONITOR_STATUS_RING,
+       HTT_SW_RING_IDX_MONITOR_BUF_RING,
+       HTT_SW_RING_IDX_MONITOR_DESC_RING,
+       HTT_SW_RING_IDX_MONITOR_DEST_RING,
+       HTT_SW_LMAC_RING_IDX_MAX,
+};
+
 /* ppdu stats
  *
  * @details
index a54610d..791d971 100644 (file)
@@ -653,10 +653,8 @@ static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
        spin_lock_bh(&dp->reo_cmd_lock);
        list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
        dp->reo_cmd_cache_flush_count++;
-       spin_unlock_bh(&dp->reo_cmd_lock);
 
        /* Flush and invalidate aged REO desc from HW cache */
-       spin_lock_bh(&dp->reo_cmd_lock);
        list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
                                 list) {
                if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
@@ -1503,9 +1501,10 @@ static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
                                                  struct sk_buff *skb)
 {
        u32 *data = (u32 *)skb->data;
-       u8 pdev_id, ring_type, ring_id;
+       u8 pdev_id, ring_type, ring_id, pdev_idx;
        u16 hp, tp;
        u32 backpressure_time;
+       struct ath11k_bp_stats *bp_stats;
 
        pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data);
        ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data);
@@ -1520,6 +1519,31 @@ static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
 
        ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
                   pdev_id, ring_type, ring_id, hp, tp, backpressure_time);
+
+       if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) {
+               if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX)
+                       return;
+
+               bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id];
+       } else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) {
+               pdev_idx = DP_HW2SW_MACID(pdev_id);
+
+               if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS)
+                       return;
+
+               bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx];
+       } else {
+               ath11k_warn(ab, "unknown ring type received in htt bp event %d\n",
+                           ring_type);
+               return;
+       }
+
+       spin_lock_bh(&ab->base_lock);
+       bp_stats->hp = hp;
+       bp_stats->tp = tp;
+       bp_stats->count++;
+       bp_stats->jiffies = jiffies;
+       spin_unlock_bh(&ab->base_lock);
 }
 
 void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
@@ -2162,6 +2186,7 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
                                struct ieee80211_rx_status *rx_status)
 {
        u8 channel_num;
+       u32 center_freq;
 
        rx_status->freq = 0;
        rx_status->rate_idx = 0;
@@ -2172,8 +2197,11 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
        rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
 
        channel_num = ath11k_dp_rx_h_msdu_start_freq(rx_desc);
+       center_freq = ath11k_dp_rx_h_msdu_start_freq(rx_desc) >> 16;
 
-       if (channel_num >= 1 && channel_num <= 14) {
+       if (center_freq >= 5935 && center_freq <= 7105) {
+               rx_status->band = NL80211_BAND_6GHZ;
+       } else if (channel_num >= 1 && channel_num <= 14) {
                rx_status->band = NL80211_BAND_2GHZ;
        } else if (channel_num >= 36 && channel_num <= 173) {
                rx_status->band = NL80211_BAND_5GHZ;
index 41c990a..1af7677 100644 (file)
@@ -121,8 +121,10 @@ tcl_ring_sel:
        spin_unlock_bh(&tx_ring->tx_idr_lock);
 
        if (ret < 0) {
-               if (ring_map == (BIT(DP_TCL_NUM_RING_MAX) - 1))
+               if (ring_map == (BIT(DP_TCL_NUM_RING_MAX) - 1)) {
+                       atomic_inc(&ab->soc_stats.tx_err.misc_fail);
                        return -ENOSPC;
+               }
 
                /* Check if the next ring is available */
                ring_selector++;
@@ -180,11 +182,13 @@ tcl_ring_sel:
        default:
                /* TODO: Take care of other encap modes as well */
                ret = -EINVAL;
+               atomic_inc(&ab->soc_stats.tx_err.misc_fail);
                goto fail_remove_idr;
        }
 
        ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
        if (dma_mapping_error(ab->dev, ti.paddr)) {
+               atomic_inc(&ab->soc_stats.tx_err.misc_fail);
                ath11k_warn(ab, "failed to DMA map data Tx buffer\n");
                ret = -ENOMEM;
                goto fail_remove_idr;
@@ -208,6 +212,7 @@ tcl_ring_sel:
                 * desc because the desc is directly enqueued onto hw queue.
                 */
                ath11k_hal_srng_access_end(ab, tcl_ring);
+               ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
                spin_unlock_bh(&tcl_ring->lock);
                ret = -ENOMEM;
 
index 2836a0f..07d3e03 100644 (file)
        .max_power              = 30, \
 }
 
+#define CHAN6G(_channel, _freq, _flags) { \
+       .band                   = NL80211_BAND_6GHZ, \
+       .hw_value               = (_channel), \
+       .center_freq            = (_freq), \
+       .flags                  = (_flags), \
+       .max_antenna_gain       = 0, \
+       .max_power              = 30, \
+}
+
 /* frame mode values are mapped as per enum ath11k_hw_txrx_mode */
 static unsigned int ath11k_frame_mode = ATH11K_HW_TXRX_NATIVE_WIFI;
 module_param_named(frame_mode, ath11k_frame_mode, uint, 0644);
@@ -86,6 +95,68 @@ static const struct ieee80211_channel ath11k_5ghz_channels[] = {
        CHAN5G(173, 5865, 0),
 };
 
+static const struct ieee80211_channel ath11k_6ghz_channels[] = {
+       CHAN6G(1, 5955, 0),
+       CHAN6G(5, 5975, 0),
+       CHAN6G(9, 5995, 0),
+       CHAN6G(13, 6015, 0),
+       CHAN6G(17, 6035, 0),
+       CHAN6G(21, 6055, 0),
+       CHAN6G(25, 6075, 0),
+       CHAN6G(29, 6095, 0),
+       CHAN6G(33, 6115, 0),
+       CHAN6G(37, 6135, 0),
+       CHAN6G(41, 6155, 0),
+       CHAN6G(45, 6175, 0),
+       CHAN6G(49, 6195, 0),
+       CHAN6G(53, 6215, 0),
+       CHAN6G(57, 6235, 0),
+       CHAN6G(61, 6255, 0),
+       CHAN6G(65, 6275, 0),
+       CHAN6G(69, 6295, 0),
+       CHAN6G(73, 6315, 0),
+       CHAN6G(77, 6335, 0),
+       CHAN6G(81, 6355, 0),
+       CHAN6G(85, 6375, 0),
+       CHAN6G(89, 6395, 0),
+       CHAN6G(93, 6415, 0),
+       CHAN6G(97, 6435, 0),
+       CHAN6G(101, 6455, 0),
+       CHAN6G(105, 6475, 0),
+       CHAN6G(109, 6495, 0),
+       CHAN6G(113, 6515, 0),
+       CHAN6G(117, 6535, 0),
+       CHAN6G(121, 6555, 0),
+       CHAN6G(125, 6575, 0),
+       CHAN6G(129, 6595, 0),
+       CHAN6G(133, 6615, 0),
+       CHAN6G(137, 6635, 0),
+       CHAN6G(141, 6655, 0),
+       CHAN6G(145, 6675, 0),
+       CHAN6G(149, 6695, 0),
+       CHAN6G(153, 6715, 0),
+       CHAN6G(157, 6735, 0),
+       CHAN6G(161, 6755, 0),
+       CHAN6G(165, 6775, 0),
+       CHAN6G(169, 6795, 0),
+       CHAN6G(173, 6815, 0),
+       CHAN6G(177, 6835, 0),
+       CHAN6G(181, 6855, 0),
+       CHAN6G(185, 6875, 0),
+       CHAN6G(189, 6895, 0),
+       CHAN6G(193, 6915, 0),
+       CHAN6G(197, 6935, 0),
+       CHAN6G(201, 6955, 0),
+       CHAN6G(205, 6975, 0),
+       CHAN6G(209, 6995, 0),
+       CHAN6G(213, 7015, 0),
+       CHAN6G(217, 7035, 0),
+       CHAN6G(221, 7055, 0),
+       CHAN6G(225, 7075, 0),
+       CHAN6G(229, 7095, 0),
+       CHAN6G(233, 7115, 0),
+};
+
 static struct ieee80211_rate ath11k_legacy_rates[] = {
        { .bitrate = 10,
          .hw_value = ATH11K_HW_RATE_CCK_LP_1M },
@@ -134,6 +205,17 @@ ath11k_phymodes[NUM_NL80211_BANDS][ATH11K_CHAN_WIDTH_NUM] = {
                        [NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160,
                        [NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80,
        },
+       [NL80211_BAND_6GHZ] = {
+                       [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
+                       [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
+                       [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20,
+                       [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20,
+                       [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40,
+                       [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80,
+                       [NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160,
+                       [NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80,
+       },
+
 };
 
 const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default = {
@@ -698,6 +780,8 @@ static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
        struct ieee80211_vif *vif = arvif->vif;
        struct ieee80211_mutable_offsets offs = {};
        struct sk_buff *bcn;
+       struct ieee80211_mgmt *mgmt;
+       u8 *ies;
        int ret;
 
        if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
@@ -709,6 +793,17 @@ static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
                return -EPERM;
        }
 
+       ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn);
+       ies += sizeof(mgmt->u.beacon);
+
+       if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies)))
+               arvif->rsnie_present = true;
+
+       if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+                                   WLAN_OUI_TYPE_MICROSOFT_WPA,
+                                   ies, (skb_tail_pointer(bcn) - ies)))
+               arvif->wpaie_present = true;
+
        ret = ath11k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
 
        kfree_skb(bcn);
@@ -798,6 +893,7 @@ static void ath11k_peer_assoc_h_crypto(struct ath11k *ar,
        struct ieee80211_bss_conf *info = &vif->bss_conf;
        struct cfg80211_chan_def def;
        struct cfg80211_bss *bss;
+       struct ath11k_vif *arvif = (struct ath11k_vif *)vif->drv_priv;
        const u8 *rsnie = NULL;
        const u8 *wpaie = NULL;
 
@@ -808,7 +904,12 @@ static void ath11k_peer_assoc_h_crypto(struct ath11k *ar,
 
        bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
                               IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
-       if (bss) {
+
+       if (arvif->rsnie_present || arvif->wpaie_present) {
+               arg->need_ptk_4_way = true;
+               if (arvif->wpaie_present)
+                       arg->need_gtk_2_way = true;
+       } else if (bss) {
                const struct cfg80211_bss_ies *ies;
 
                rcu_read_lock();
@@ -1489,6 +1590,7 @@ static void ath11k_peer_assoc_h_phymode(struct ath11k *ar,
                }
                break;
        case NL80211_BAND_5GHZ:
+       case NL80211_BAND_6GHZ:
                /* Check HE first */
                if (sta->he_cap.has_he) {
                        phymode = ath11k_mac_get_phymode_he(ar, sta);
@@ -2125,6 +2227,9 @@ static int ath11k_start_scan(struct ath11k *ar,
 
        lockdep_assert_held(&ar->conf_mutex);
 
+       if (ath11k_spectral_get_mode(ar) == ATH11K_SPECTRAL_BACKGROUND)
+               ath11k_spectral_reset_buffer(ar);
+
        ret = ath11k_wmi_send_scan_start_cmd(ar, arg);
        if (ret)
                return ret;
@@ -3411,7 +3516,7 @@ static void ath11k_mac_setup_ht_vht_cap(struct ath11k *ar,
                                                    rate_cap_rx_chainmask);
        }
 
-       if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+       if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP && !ar->supports_6ghz) {
                band = &ar->mac.sbands[NL80211_BAND_5GHZ];
                ht_cap = cap->band[NL80211_BAND_5GHZ].ht_cap_info;
                if (ht_cap_info)
@@ -3532,6 +3637,35 @@ ath11k_mac_filter_he_cap_mesh(struct ieee80211_he_cap_elem *he_cap_elem)
        he_cap_elem->phy_cap_info[9] &= ~m;
 }
 
+static __le16 ath11k_mac_setup_he_6ghz_cap(struct ath11k_pdev_cap *pcap,
+                                          struct ath11k_band_cap *bcap)
+{
+       u8 val;
+
+       bcap->he_6ghz_capa = IEEE80211_HT_MPDU_DENSITY_NONE;
+       if (bcap->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
+               bcap->he_6ghz_capa |=
+                       FIELD_PREP(IEEE80211_HE_6GHZ_CAP_SM_PS,
+                                  WLAN_HT_CAP_SM_PS_DYNAMIC);
+       else
+               bcap->he_6ghz_capa |=
+                       FIELD_PREP(IEEE80211_HE_6GHZ_CAP_SM_PS,
+                                  WLAN_HT_CAP_SM_PS_DISABLED);
+       val = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
+                       pcap->vht_cap);
+       bcap->he_6ghz_capa |=
+               FIELD_PREP(IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP, val);
+       val = FIELD_GET(IEEE80211_VHT_CAP_MAX_MPDU_MASK, pcap->vht_cap);
+       bcap->he_6ghz_capa |=
+               FIELD_PREP(IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN, val);
+       if (pcap->vht_cap & IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN)
+               bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS;
+       if (pcap->vht_cap & IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN)
+               bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS;
+
+       return cpu_to_le16(bcap->he_6ghz_capa);
+}
+
 static int ath11k_mac_copy_he_cap(struct ath11k *ar,
                                  struct ath11k_pdev_cap *cap,
                                  struct ieee80211_sband_iftype_data *data,
@@ -3614,6 +3748,11 @@ static int ath11k_mac_copy_he_cap(struct ath11k *ar,
                    IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT)
                        ath11k_gen_ppe_thresh(&band_cap->he_ppet,
                                              he_cap->ppe_thres);
+
+               if (band == NL80211_BAND_6GHZ) {
+                       data[idx].he_6ghz_capa.capa =
+                               ath11k_mac_setup_he_6ghz_cap(cap, band_cap);
+               }
                idx++;
        }
 
@@ -3643,6 +3782,16 @@ static void ath11k_mac_setup_he_cap(struct ath11k *ar,
                band->iftype_data = ar->mac.iftype[NL80211_BAND_5GHZ];
                band->n_iftype_data = count;
        }
+
+       if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
+           ar->supports_6ghz) {
+               count = ath11k_mac_copy_he_cap(ar, cap,
+                                              ar->mac.iftype[NL80211_BAND_6GHZ],
+                                              NL80211_BAND_6GHZ);
+               band = &ar->mac.sbands[NL80211_BAND_6GHZ];
+               band->iftype_data = ar->mac.iftype[NL80211_BAND_6GHZ];
+               band->n_iftype_data = count;
+       }
 }
 
 static int __ath11k_set_antenna(struct ath11k *ar, u32 tx_ant, u32 rx_ant)
@@ -4085,6 +4234,11 @@ ath11k_mac_setup_vdev_create_params(struct ath11k_vif *arvif,
                params->chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
                params->chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
        }
+       if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP &&
+           ar->supports_6ghz) {
+               params->chains[NL80211_BAND_6GHZ].tx = ar->num_tx_chains;
+               params->chains[NL80211_BAND_6GHZ].rx = ar->num_rx_chains;
+       }
 }
 
 static u32
@@ -5217,7 +5371,7 @@ ath11k_mac_get_single_legacy_rate(struct ath11k *ar,
 
        rate_idx = ffs(mask->control[band].legacy) - 1;
 
-       if (band == NL80211_BAND_5GHZ)
+       if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ)
                rate_idx += ATH11K_MAC_FIRST_OFDM_RATE_IDX;
 
        hw_rate = ath11k_legacy_rates[rate_idx].hw_value;
@@ -5683,7 +5837,8 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
        void *channels;
 
        BUILD_BUG_ON((ARRAY_SIZE(ath11k_2ghz_channels) +
-                     ARRAY_SIZE(ath11k_5ghz_channels)) !=
+                     ARRAY_SIZE(ath11k_5ghz_channels) +
+                     ARRAY_SIZE(ath11k_6ghz_channels)) !=
                     ATH11K_NUM_CHANS);
 
        reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx];
@@ -5696,6 +5851,7 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
                        return -ENOMEM;
 
                band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+               band->band = NL80211_BAND_2GHZ;
                band->n_channels = ARRAY_SIZE(ath11k_2ghz_channels);
                band->channels = channels;
                band->n_bitrates = ath11k_g_rates_size;
@@ -5707,23 +5863,48 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
        }
 
        if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
-               channels = kmemdup(ath11k_5ghz_channels,
-                                  sizeof(ath11k_5ghz_channels),
-                                  GFP_KERNEL);
-               if (!channels) {
-                       kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
-                       return -ENOMEM;
+               if (reg_cap->high_5ghz_chan >= ATH11K_MAX_6G_FREQ) {
+                       channels = kmemdup(ath11k_6ghz_channels,
+                                          sizeof(ath11k_6ghz_channels), GFP_KERNEL);
+                       if (!channels) {
+                               kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+                               return -ENOMEM;
+                       }
+
+                       ar->supports_6ghz = true;
+                       band = &ar->mac.sbands[NL80211_BAND_6GHZ];
+                       band->band = NL80211_BAND_6GHZ;
+                       band->n_channels = ARRAY_SIZE(ath11k_6ghz_channels);
+                       band->channels = channels;
+                       band->n_bitrates = ath11k_a_rates_size;
+                       band->bitrates = ath11k_a_rates;
+                       ar->hw->wiphy->bands[NL80211_BAND_6GHZ] = band;
+                       ath11k_mac_update_ch_list(ar, band,
+                                                 reg_cap->low_5ghz_chan,
+                                                 reg_cap->high_5ghz_chan);
                }
 
-               band = &ar->mac.sbands[NL80211_BAND_5GHZ];
-               band->n_channels = ARRAY_SIZE(ath11k_5ghz_channels);
-               band->channels = channels;
-               band->n_bitrates = ath11k_a_rates_size;
-               band->bitrates = ath11k_a_rates;
-               ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
-               ath11k_mac_update_ch_list(ar, band,
-                                         reg_cap->low_5ghz_chan,
-                                         reg_cap->high_5ghz_chan);
+               if (reg_cap->low_5ghz_chan < ATH11K_MIN_6G_FREQ) {
+                       channels = kmemdup(ath11k_5ghz_channels,
+                                          sizeof(ath11k_5ghz_channels),
+                                          GFP_KERNEL);
+                       if (!channels) {
+                               kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+                               kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+                               return -ENOMEM;
+                       }
+
+                       band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+                       band->band = NL80211_BAND_5GHZ;
+                       band->n_channels = ARRAY_SIZE(ath11k_5ghz_channels);
+                       band->channels = channels;
+                       band->n_bitrates = ath11k_a_rates_size;
+                       band->bitrates = ath11k_a_rates;
+                       ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
+                       ath11k_mac_update_ch_list(ar, band,
+                                                 reg_cap->low_5ghz_chan,
+                                                 reg_cap->high_5ghz_chan);
+               }
        }
 
        return 0;
@@ -5777,6 +5958,7 @@ static void __ath11k_mac_unregister(struct ath11k *ar)
 
        kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
        kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
+       kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
 
        SET_IEEE80211_DEV(ar->hw, NULL);
 }
index 453aa9c..7c9dc91 100644 (file)
@@ -161,6 +161,10 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
                        else
                                ch->phy_mode = MODE_11A;
 
+                       if (channel->band == NL80211_BAND_6GHZ &&
+                           cfg80211_channel_is_psc(channel))
+                               ch->psc_channel = true;
+
                        ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
                                   "mac channel [%d/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
                                   i, params->nallchans,
diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c
new file mode 100644 (file)
index 0000000..1c5d65b
--- /dev/null
@@ -0,0 +1,1023 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/relay.h>
+#include "core.h"
+#include "debug.h"
+
+#define ATH11K_SPECTRAL_NUM_RESP_PER_EVENT     2
+#define ATH11K_SPECTRAL_EVENT_TIMEOUT_MS       1
+
+#define ATH11K_SPECTRAL_DWORD_SIZE             4
+/* HW bug, expected BIN size is 2 bytes but HW report as 4 bytes */
+#define ATH11K_SPECTRAL_BIN_SIZE               4
+#define ATH11K_SPECTRAL_ATH11K_MIN_BINS                64
+#define ATH11K_SPECTRAL_ATH11K_MIN_IB_BINS     32
+#define ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS     256
+
+#define ATH11K_SPECTRAL_SAMPLE_FFT_BIN_MASK    0xFF
+
+#define ATH11K_SPECTRAL_SCAN_COUNT_MAX         4095
+
+/* Max channel computed by sum of 2g and 5g band channels */
+#define ATH11K_SPECTRAL_TOTAL_CHANNEL          41
+#define ATH11K_SPECTRAL_SAMPLES_PER_CHANNEL    70
+#define ATH11K_SPECTRAL_PER_SAMPLE_SIZE                (sizeof(struct fft_sample_ath11k) + \
+                                                ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS)
+#define ATH11K_SPECTRAL_TOTAL_SAMPLE           (ATH11K_SPECTRAL_TOTAL_CHANNEL * \
+                                                ATH11K_SPECTRAL_SAMPLES_PER_CHANNEL)
+#define ATH11K_SPECTRAL_SUB_BUFF_SIZE          ATH11K_SPECTRAL_PER_SAMPLE_SIZE
+#define ATH11K_SPECTRAL_NUM_SUB_BUF            ATH11K_SPECTRAL_TOTAL_SAMPLE
+
+#define ATH11K_SPECTRAL_20MHZ                  20
+#define ATH11K_SPECTRAL_40MHZ                  40
+#define ATH11K_SPECTRAL_80MHZ                  80
+
+#define ATH11K_SPECTRAL_SIGNATURE              0xFA
+
+#define ATH11K_SPECTRAL_TAG_RADAR_SUMMARY      0x0
+#define ATH11K_SPECTRAL_TAG_RADAR_FFT          0x1
+#define ATH11K_SPECTRAL_TAG_SCAN_SUMMARY       0x2
+#define ATH11K_SPECTRAL_TAG_SCAN_SEARCH                0x3
+
+#define SPECTRAL_TLV_HDR_LEN                           GENMASK(15, 0)
+#define SPECTRAL_TLV_HDR_TAG                           GENMASK(23, 16)
+#define SPECTRAL_TLV_HDR_SIGN                          GENMASK(31, 24)
+
+#define SPECTRAL_SUMMARY_INFO0_AGC_TOTAL_GAIN          GENMASK(7, 0)
+#define SPECTRAL_SUMMARY_INFO0_OB_FLAG                 BIT(8)
+#define SPECTRAL_SUMMARY_INFO0_GRP_IDX                 GENMASK(16, 9)
+#define SPECTRAL_SUMMARY_INFO0_RECENT_RFSAT            BIT(17)
+#define SPECTRAL_SUMMARY_INFO0_INBAND_PWR_DB           GENMASK(27, 18)
+#define SPECTRAL_SUMMARY_INFO0_FALSE_SCAN              BIT(28)
+#define SPECTRAL_SUMMARY_INFO0_DETECTOR_ID             GENMASK(30, 29)
+#define SPECTRAL_SUMMARY_INFO0_PRI80                   BIT(31)
+
+#define SPECTRAL_SUMMARY_INFO2_PEAK_SIGNED_IDX         GENMASK(11, 0)
+#define SPECTRAL_SUMMARY_INFO2_PEAK_MAGNITUDE          GENMASK(21, 12)
+#define SPECTRAL_SUMMARY_INFO2_NARROWBAND_MASK         GENMASK(29, 22)
+#define SPECTRAL_SUMMARY_INFO2_GAIN_CHANGE             BIT(30)
+
+struct spectral_tlv {
+       __le32 timestamp;
+       __le32 header;
+} __packed;
+
+struct spectral_summary_fft_report {
+       __le32 timestamp;
+       __le32 tlv_header;
+       __le32 info0;
+       __le32 reserve0;
+       __le32 info2;
+       __le32 reserve1;
+} __packed;
+
+struct ath11k_spectral_summary_report {
+       struct wmi_dma_buf_release_meta_data meta;
+       u32 timestamp;
+       u8 agc_total_gain;
+       u8 grp_idx;
+       u16 inb_pwr_db;
+       s16 peak_idx;
+       u16 peak_mag;
+       u8 detector_id;
+       bool out_of_band_flag;
+       bool rf_saturation;
+       bool primary80;
+       bool gain_change;
+       bool false_scan;
+};
+
+#define SPECTRAL_FFT_REPORT_INFO0_DETECTOR_ID          GENMASK(1, 0)
+#define SPECTRAL_FFT_REPORT_INFO0_FFT_NUM              GENMASK(4, 2)
+#define SPECTRAL_FFT_REPORT_INFO0_RADAR_CHECK          GENMASK(16, 5)
+#define SPECTRAL_FFT_REPORT_INFO0_PEAK_SIGNED_IDX      GENMASK(27, 17)
+#define SPECTRAL_FFT_REPORT_INFO0_CHAIN_IDX            GENMASK(30, 28)
+
+#define SPECTRAL_FFT_REPORT_INFO1_BASE_PWR_DB          GENMASK(8, 0)
+#define SPECTRAL_FFT_REPORT_INFO1_TOTAL_GAIN_DB                GENMASK(16, 9)
+
+#define SPECTRAL_FFT_REPORT_INFO2_NUM_STRONG_BINS      GENMASK(7, 0)
+#define SPECTRAL_FFT_REPORT_INFO2_PEAK_MAGNITUDE       GENMASK(17, 8)
+#define SPECTRAL_FFT_REPORT_INFO2_AVG_PWR_DB           GENMASK(24, 18)
+#define SPECTRAL_FFT_REPORT_INFO2_REL_PWR_DB           GENMASK(31, 25)
+
+struct spectral_search_fft_report {
+       __le32 timestamp;
+       __le32 tlv_header;
+       __le32 info0;
+       __le32 info1;
+       __le32 info2;
+       __le32 reserve0;
+       u8 bins[0];
+} __packed;
+
+struct ath11k_spectral_search_report {
+       u32 timestamp;
+       u8 detector_id;
+       u8 fft_count;
+       u16 radar_check;
+       s16 peak_idx;
+       u8 chain_idx;
+       u16 base_pwr_db;
+       u8 total_gain_db;
+       u8 strong_bin_count;
+       u16 peak_mag;
+       u8 avg_pwr_db;
+       u8 rel_pwr_db;
+};
+
+static struct dentry *create_buf_file_handler(const char *filename,
+                                             struct dentry *parent,
+                                             umode_t mode,
+                                             struct rchan_buf *buf,
+                                             int *is_global)
+{
+       struct dentry *buf_file;
+
+       buf_file = debugfs_create_file(filename, mode, parent, buf,
+                                      &relay_file_operations);
+       *is_global = 1;
+       return buf_file;
+}
+
+static int remove_buf_file_handler(struct dentry *dentry)
+{
+       debugfs_remove(dentry);
+
+       return 0;
+}
+
+static struct rchan_callbacks rfs_scan_cb = {
+       .create_buf_file = create_buf_file_handler,
+       .remove_buf_file = remove_buf_file_handler,
+};
+
+static struct ath11k_vif *ath11k_spectral_get_vdev(struct ath11k *ar)
+{
+       struct ath11k_vif *arvif;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       if (list_empty(&ar->arvifs))
+               return NULL;
+
+       /* if there already is a vif doing spectral, return that. */
+       list_for_each_entry(arvif, &ar->arvifs, list)
+               if (arvif->spectral_enabled)
+                       return arvif;
+
+       /* otherwise, return the first vif. */
+       return list_first_entry(&ar->arvifs, typeof(*arvif), list);
+}
+
+static int ath11k_spectral_scan_trigger(struct ath11k *ar)
+{
+       struct ath11k_vif *arvif;
+       int ret;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       arvif = ath11k_spectral_get_vdev(ar);
+       if (!arvif)
+               return -ENODEV;
+
+       if (ar->spectral.mode == ATH11K_SPECTRAL_DISABLED)
+               return 0;
+
+       ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id,
+                                             ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
+                                             ATH11K_WMI_SPECTRAL_ENABLE_CMD_ENABLE);
+       if (ret)
+               return ret;
+
+       ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id,
+                                             ATH11K_WMI_SPECTRAL_TRIGGER_CMD_TRIGGER,
+                                             ATH11K_WMI_SPECTRAL_ENABLE_CMD_ENABLE);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int ath11k_spectral_scan_config(struct ath11k *ar,
+                                      enum ath11k_spectral_mode mode)
+{
+       struct ath11k_wmi_vdev_spectral_conf_param param = { 0 };
+       struct ath11k_vif *arvif;
+       int ret, count;
+
+       lockdep_assert_held(&ar->conf_mutex);
+
+       arvif = ath11k_spectral_get_vdev(ar);
+       if (!arvif)
+               return -ENODEV;
+
+       arvif->spectral_enabled = (mode != ATH11K_SPECTRAL_DISABLED);
+       ar->spectral.mode = mode;
+
+       ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id,
+                                             ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
+                                             ATH11K_WMI_SPECTRAL_ENABLE_CMD_DISABLE);
+       if (ret) {
+               ath11k_warn(ar->ab, "failed to enable spectral scan: %d\n", ret);
+               return ret;
+       }
+
+       if (mode == ATH11K_SPECTRAL_DISABLED)
+               return 0;
+
+       if (mode == ATH11K_SPECTRAL_BACKGROUND)
+               count = ATH11K_WMI_SPECTRAL_COUNT_DEFAULT;
+       else
+               count = max_t(u16, 1, ar->spectral.count);
+
+       param.vdev_id = arvif->vdev_id;
+       param.scan_count = count;
+       param.scan_fft_size = ar->spectral.fft_size;
+       param.scan_period = ATH11K_WMI_SPECTRAL_PERIOD_DEFAULT;
+       param.scan_priority = ATH11K_WMI_SPECTRAL_PRIORITY_DEFAULT;
+       param.scan_gc_ena = ATH11K_WMI_SPECTRAL_GC_ENA_DEFAULT;
+       param.scan_restart_ena = ATH11K_WMI_SPECTRAL_RESTART_ENA_DEFAULT;
+       param.scan_noise_floor_ref = ATH11K_WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
+       param.scan_init_delay = ATH11K_WMI_SPECTRAL_INIT_DELAY_DEFAULT;
+       param.scan_nb_tone_thr = ATH11K_WMI_SPECTRAL_NB_TONE_THR_DEFAULT;
+       param.scan_str_bin_thr = ATH11K_WMI_SPECTRAL_STR_BIN_THR_DEFAULT;
+       param.scan_wb_rpt_mode = ATH11K_WMI_SPECTRAL_WB_RPT_MODE_DEFAULT;
+       param.scan_rssi_rpt_mode = ATH11K_WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT;
+       param.scan_rssi_thr = ATH11K_WMI_SPECTRAL_RSSI_THR_DEFAULT;
+       param.scan_pwr_format = ATH11K_WMI_SPECTRAL_PWR_FORMAT_DEFAULT;
+       param.scan_rpt_mode = ATH11K_WMI_SPECTRAL_RPT_MODE_DEFAULT;
+       param.scan_bin_scale = ATH11K_WMI_SPECTRAL_BIN_SCALE_DEFAULT;
+       param.scan_dbm_adj = ATH11K_WMI_SPECTRAL_DBM_ADJ_DEFAULT;
+       param.scan_chn_mask = ATH11K_WMI_SPECTRAL_CHN_MASK_DEFAULT;
+
+       ret = ath11k_wmi_vdev_spectral_conf(ar, &param);
+       if (ret) {
+               ath11k_warn(ar->ab, "failed to configure spectral scan: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static ssize_t ath11k_read_file_spec_scan_ctl(struct file *file,
+                                             char __user *user_buf,
+                                             size_t count, loff_t *ppos)
+{
+       struct ath11k *ar = file->private_data;
+       char *mode = "";
+       size_t len;
+       enum ath11k_spectral_mode spectral_mode;
+
+       mutex_lock(&ar->conf_mutex);
+       spectral_mode = ar->spectral.mode;
+       mutex_unlock(&ar->conf_mutex);
+
+       switch (spectral_mode) {
+       case ATH11K_SPECTRAL_DISABLED:
+               mode = "disable";
+               break;
+       case ATH11K_SPECTRAL_BACKGROUND:
+               mode = "background";
+               break;
+       case ATH11K_SPECTRAL_MANUAL:
+               mode = "manual";
+               break;
+       }
+
+       len = strlen(mode);
+       return simple_read_from_buffer(user_buf, count, ppos, mode, len);
+}
+
+static ssize_t ath11k_write_file_spec_scan_ctl(struct file *file,
+                                              const char __user *user_buf,
+                                              size_t count, loff_t *ppos)
+{
+       struct ath11k *ar = file->private_data;
+       char buf[32];
+       ssize_t len;
+       int ret;
+
+       len = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, len))
+               return -EFAULT;
+
+       buf[len] = '\0';
+
+       mutex_lock(&ar->conf_mutex);
+
+       if (strncmp("trigger", buf, 7) == 0) {
+               if (ar->spectral.mode == ATH11K_SPECTRAL_MANUAL ||
+                   ar->spectral.mode == ATH11K_SPECTRAL_BACKGROUND) {
+                       /* reset the configuration to adopt possibly changed
+                        * debugfs parameters
+                        */
+                       ret = ath11k_spectral_scan_config(ar, ar->spectral.mode);
+                       if (ret) {
+                               ath11k_warn(ar->ab, "failed to reconfigure spectral scan: %d\n",
+                                           ret);
+                               goto unlock;
+                       }
+
+                       ret = ath11k_spectral_scan_trigger(ar);
+                       if (ret) {
+                               ath11k_warn(ar->ab, "failed to trigger spectral scan: %d\n",
+                                           ret);
+                       }
+               } else {
+                       ret = -EINVAL;
+               }
+       } else if (strncmp("background", buf, 10) == 0) {
+               ret = ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_BACKGROUND);
+       } else if (strncmp("manual", buf, 6) == 0) {
+               ret = ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_MANUAL);
+       } else if (strncmp("disable", buf, 7) == 0) {
+               ret = ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_DISABLED);
+       } else {
+               ret = -EINVAL;
+       }
+
+unlock:
+       mutex_unlock(&ar->conf_mutex);
+
+       if (ret)
+               return ret;
+
+       return count;
+}
+
+static const struct file_operations fops_scan_ctl = {
+       .read = ath11k_read_file_spec_scan_ctl,
+       .write = ath11k_write_file_spec_scan_ctl,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+static ssize_t ath11k_read_file_spectral_count(struct file *file,
+                                              char __user *user_buf,
+                                              size_t count, loff_t *ppos)
+{
+       struct ath11k *ar = file->private_data;
+       char buf[32];
+       size_t len;
+       u16 spectral_count;
+
+       mutex_lock(&ar->conf_mutex);
+       spectral_count = ar->spectral.count;
+       mutex_unlock(&ar->conf_mutex);
+
+       len = sprintf(buf, "%d\n", spectral_count);
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath11k_write_file_spectral_count(struct file *file,
+                                               const char __user *user_buf,
+                                               size_t count, loff_t *ppos)
+{
+       struct ath11k *ar = file->private_data;
+       unsigned long val;
+       char buf[32];
+       ssize_t len;
+
+       len = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, len))
+               return -EFAULT;
+
+       buf[len] = '\0';
+       if (kstrtoul(buf, 0, &val))
+               return -EINVAL;
+
+       if (val > ATH11K_SPECTRAL_SCAN_COUNT_MAX)
+               return -EINVAL;
+
+       mutex_lock(&ar->conf_mutex);
+       ar->spectral.count = val;
+       mutex_unlock(&ar->conf_mutex);
+
+       return count;
+}
+
+static const struct file_operations fops_scan_count = {
+       .read = ath11k_read_file_spectral_count,
+       .write = ath11k_write_file_spectral_count,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+static ssize_t ath11k_read_file_spectral_bins(struct file *file,
+                                             char __user *user_buf,
+                                             size_t count, loff_t *ppos)
+{
+       struct ath11k *ar = file->private_data;
+       char buf[32];
+       unsigned int bins, fft_size;
+       size_t len;
+
+       mutex_lock(&ar->conf_mutex);
+
+       fft_size = ar->spectral.fft_size;
+       bins = 1 << fft_size;
+
+       mutex_unlock(&ar->conf_mutex);
+
+       len = sprintf(buf, "%d\n", bins);
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath11k_write_file_spectral_bins(struct file *file,
+                                              const char __user *user_buf,
+                                              size_t count, loff_t *ppos)
+{
+       struct ath11k *ar = file->private_data;
+       unsigned long val;
+       char buf[32];
+       ssize_t len;
+
+       len = min(count, sizeof(buf) - 1);
+       if (copy_from_user(buf, user_buf, len))
+               return -EFAULT;
+
+       buf[len] = '\0';
+       if (kstrtoul(buf, 0, &val))
+               return -EINVAL;
+
+       if (val < ATH11K_SPECTRAL_ATH11K_MIN_BINS ||
+           val > SPECTRAL_ATH11K_MAX_NUM_BINS)
+               return -EINVAL;
+
+       if (!is_power_of_2(val))
+               return -EINVAL;
+
+       mutex_lock(&ar->conf_mutex);
+       ar->spectral.fft_size = ilog2(val);
+       mutex_unlock(&ar->conf_mutex);
+
+       return count;
+}
+
+static const struct file_operations fops_scan_bins = {
+       .read = ath11k_read_file_spectral_bins,
+       .write = ath11k_write_file_spectral_bins,
+       .open = simple_open,
+       .owner = THIS_MODULE,
+       .llseek = default_llseek,
+};
+
+static int ath11k_spectral_pull_summary(struct ath11k *ar,
+                                       struct wmi_dma_buf_release_meta_data *meta,
+                                       struct spectral_summary_fft_report *summary,
+                                       struct ath11k_spectral_summary_report *report)
+{
+       report->timestamp = __le32_to_cpu(summary->timestamp);
+       report->agc_total_gain = FIELD_GET(SPECTRAL_SUMMARY_INFO0_AGC_TOTAL_GAIN,
+                                          __le32_to_cpu(summary->info0));
+       report->out_of_band_flag = FIELD_GET(SPECTRAL_SUMMARY_INFO0_OB_FLAG,
+                                            __le32_to_cpu(summary->info0));
+       report->grp_idx = FIELD_GET(SPECTRAL_SUMMARY_INFO0_GRP_IDX,
+                                   __le32_to_cpu(summary->info0));
+       report->rf_saturation = FIELD_GET(SPECTRAL_SUMMARY_INFO0_RECENT_RFSAT,
+                                         __le32_to_cpu(summary->info0));
+       report->inb_pwr_db = FIELD_GET(SPECTRAL_SUMMARY_INFO0_INBAND_PWR_DB,
+                                      __le32_to_cpu(summary->info0));
+       report->false_scan = FIELD_GET(SPECTRAL_SUMMARY_INFO0_FALSE_SCAN,
+                                      __le32_to_cpu(summary->info0));
+       report->detector_id = FIELD_GET(SPECTRAL_SUMMARY_INFO0_DETECTOR_ID,
+                                       __le32_to_cpu(summary->info0));
+       report->primary80 = FIELD_GET(SPECTRAL_SUMMARY_INFO0_PRI80,
+                                     __le32_to_cpu(summary->info0));
+       report->peak_idx = FIELD_GET(SPECTRAL_SUMMARY_INFO2_PEAK_SIGNED_IDX,
+                                    __le32_to_cpu(summary->info2));
+       report->peak_mag = FIELD_GET(SPECTRAL_SUMMARY_INFO2_PEAK_MAGNITUDE,
+                                    __le32_to_cpu(summary->info2));
+       report->gain_change = FIELD_GET(SPECTRAL_SUMMARY_INFO2_GAIN_CHANGE,
+                                       __le32_to_cpu(summary->info2));
+
+       memcpy(&report->meta, meta, sizeof(*meta));
+
+       return 0;
+}
+
+static int ath11k_spectral_pull_search(struct ath11k *ar,
+                                      struct spectral_search_fft_report *search,
+                                      struct ath11k_spectral_search_report *report)
+{
+       report->timestamp = __le32_to_cpu(search->timestamp);
+       report->detector_id = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_DETECTOR_ID,
+                                       __le32_to_cpu(search->info0));
+       report->fft_count = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_FFT_NUM,
+                                     __le32_to_cpu(search->info0));
+       report->radar_check = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_RADAR_CHECK,
+                                       __le32_to_cpu(search->info0));
+       report->peak_idx = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_PEAK_SIGNED_IDX,
+                                    __le32_to_cpu(search->info0));
+       report->chain_idx = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_CHAIN_IDX,
+                                     __le32_to_cpu(search->info0));
+       report->base_pwr_db = FIELD_GET(SPECTRAL_FFT_REPORT_INFO1_BASE_PWR_DB,
+                                       __le32_to_cpu(search->info1));
+       report->total_gain_db = FIELD_GET(SPECTRAL_FFT_REPORT_INFO1_TOTAL_GAIN_DB,
+                                         __le32_to_cpu(search->info1));
+       report->strong_bin_count = FIELD_GET(SPECTRAL_FFT_REPORT_INFO2_NUM_STRONG_BINS,
+                                            __le32_to_cpu(search->info2));
+       report->peak_mag = FIELD_GET(SPECTRAL_FFT_REPORT_INFO2_PEAK_MAGNITUDE,
+                                    __le32_to_cpu(search->info2));
+       report->avg_pwr_db = FIELD_GET(SPECTRAL_FFT_REPORT_INFO2_AVG_PWR_DB,
+                                      __le32_to_cpu(search->info2));
+       report->rel_pwr_db = FIELD_GET(SPECTRAL_FFT_REPORT_INFO2_REL_PWR_DB,
+                                      __le32_to_cpu(search->info2));
+
+       return 0;
+}
+
+static u8 ath11k_spectral_get_max_exp(s8 max_index, u8 max_magnitude,
+                                     int bin_len, u8 *bins)
+{
+       int dc_pos;
+       u8 max_exp;
+
+       dc_pos = bin_len / 2;
+
+       /* peak index outside of bins */
+       if (dc_pos <= max_index || -dc_pos >= max_index)
+               return 0;
+
+       for (max_exp = 0; max_exp < 8; max_exp++) {
+               if (bins[dc_pos + max_index] == (max_magnitude >> max_exp))
+                       break;
+       }
+
+       /* max_exp not found */
+       if (bins[dc_pos + max_index] != (max_magnitude >> max_exp))
+               return 0;
+
+       return max_exp;
+}
+
+static void ath11k_spectral_parse_16bit_fft(u8 *outbins, u8 *inbins, int num_bins)
+{
+       int i;
+       __le16 *data = (__le16 *)inbins;
+
+       i = 0;
+       while (i < num_bins) {
+               outbins[i] = (__le16_to_cpu(data[i])) &
+                            ATH11K_SPECTRAL_SAMPLE_FFT_BIN_MASK;
+               i++;
+       }
+}
+
+static
+int ath11k_spectral_process_fft(struct ath11k *ar,
+                               struct ath11k_spectral_summary_report *summary,
+                               void *data,
+                               struct fft_sample_ath11k *fft_sample,
+                               u32 data_len)
+{
+       struct ath11k_base *ab = ar->ab;
+       struct spectral_search_fft_report *fft_report = data;
+       struct ath11k_spectral_search_report search;
+       struct spectral_tlv *tlv;
+       int tlv_len, bin_len, num_bins;
+       u16 length, freq;
+       u8 chan_width_mhz;
+       int ret;
+
+       lockdep_assert_held(&ar->spectral.lock);
+
+       tlv = (struct spectral_tlv *)data;
+       tlv_len = FIELD_GET(SPECTRAL_TLV_HDR_LEN, __le32_to_cpu(tlv->header));
+       /* convert Dword into bytes */
+       tlv_len *= ATH11K_SPECTRAL_DWORD_SIZE;
+       bin_len = tlv_len - (sizeof(*fft_report) - sizeof(*tlv));
+
+       if (data_len < (bin_len + sizeof(*fft_report))) {
+               ath11k_warn(ab, "mismatch in expected bin len %d and data len %d\n",
+                           bin_len, data_len);
+               return -EINVAL;
+       }
+
+       num_bins = bin_len / ATH11K_SPECTRAL_BIN_SIZE;
+       /* Only In-band bins are useful to user for visualize */
+       num_bins >>= 1;
+
+       if (num_bins < ATH11K_SPECTRAL_ATH11K_MIN_IB_BINS ||
+           num_bins > ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS ||
+           !is_power_of_2(num_bins)) {
+               ath11k_warn(ab, "Invalid num of bins %d\n", num_bins);
+               return -EINVAL;
+       }
+
+       ret = ath11k_spectral_pull_search(ar, data, &search);
+       if (ret) {
+               ath11k_warn(ab, "failed to pull search report %d\n", ret);
+               return ret;
+       }
+
+       chan_width_mhz = summary->meta.ch_width;
+
+       switch (chan_width_mhz) {
+       case ATH11K_SPECTRAL_20MHZ:
+       case ATH11K_SPECTRAL_40MHZ:
+       case ATH11K_SPECTRAL_80MHZ:
+               fft_sample->chan_width_mhz = chan_width_mhz;
+               break;
+       default:
+               ath11k_warn(ab, "invalid channel width %d\n", chan_width_mhz);
+               return -EINVAL;
+       }
+
+       length = sizeof(*fft_sample) - sizeof(struct fft_sample_tlv) + num_bins;
+       fft_sample->tlv.type = ATH_FFT_SAMPLE_ATH11K;
+       fft_sample->tlv.length = __cpu_to_be16(length);
+
+       fft_sample->tsf = __cpu_to_be32(search.timestamp);
+       fft_sample->max_magnitude = __cpu_to_be16(search.peak_mag);
+       fft_sample->max_index = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_PEAK_SIGNED_IDX,
+                                         __le32_to_cpu(fft_report->info0));
+
+       summary->inb_pwr_db >>= 1;
+       fft_sample->rssi = __cpu_to_be16(summary->inb_pwr_db);
+       fft_sample->noise = __cpu_to_be32(summary->meta.noise_floor[search.chain_idx]);
+
+       freq = summary->meta.freq1;
+       fft_sample->freq1 = __cpu_to_be16(freq);
+
+       freq = summary->meta.freq2;
+       fft_sample->freq2 = __cpu_to_be16(freq);
+
+       ath11k_spectral_parse_16bit_fft(fft_sample->data,
+                                       fft_report->bins,
+                                       num_bins);
+
+       fft_sample->max_exp = ath11k_spectral_get_max_exp(fft_sample->max_index,
+                                                         search.peak_mag,
+                                                         num_bins,
+                                                         fft_sample->data);
+
+       if (ar->spectral.rfs_scan)
+               relay_write(ar->spectral.rfs_scan, fft_sample,
+                           length + sizeof(struct fft_sample_tlv));
+
+       return 0;
+}
+
+static int ath11k_spectral_process_data(struct ath11k *ar,
+                                       struct ath11k_dbring_data *param)
+{
+       struct ath11k_base *ab = ar->ab;
+       struct spectral_tlv *tlv;
+       struct spectral_summary_fft_report *summary = NULL;
+       struct ath11k_spectral_summary_report summ_rpt;
+       struct fft_sample_ath11k *fft_sample = NULL;
+       u8 *data;
+       u32 data_len, i;
+       u8 sign, tag;
+       int tlv_len, sample_sz;
+       int ret;
+       bool quit = false;
+
+       spin_lock_bh(&ar->spectral.lock);
+
+       if (!ar->spectral.enabled) {
+               ret = -EINVAL;
+               goto unlock;
+       }
+
+       sample_sz = sizeof(*fft_sample) + ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS;
+       fft_sample = kmalloc(sample_sz, GFP_ATOMIC);
+       if (!fft_sample) {
+               ret = -ENOBUFS;
+               goto unlock;
+       }
+
+       data = param->data;
+       data_len = param->data_sz;
+       i = 0;
+       while (!quit && (i < data_len)) {
+               if ((i + sizeof(*tlv)) > data_len) {
+                       ath11k_warn(ab, "failed to parse spectral tlv hdr at bytes %d\n",
+                                   i);
+                       ret = -EINVAL;
+                       goto err;
+               }
+
+               tlv = (struct spectral_tlv *)&data[i];
+               sign = FIELD_GET(SPECTRAL_TLV_HDR_SIGN,
+                                __le32_to_cpu(tlv->header));
+               if (sign != ATH11K_SPECTRAL_SIGNATURE) {
+                       ath11k_warn(ab, "Invalid sign 0x%x at bytes %d\n",
+                                   sign, i);
+                       ret = -EINVAL;
+                       goto err;
+               }
+
+               tlv_len = FIELD_GET(SPECTRAL_TLV_HDR_LEN,
+                                   __le32_to_cpu(tlv->header));
+               /* convert Dword into bytes */
+               tlv_len *= ATH11K_SPECTRAL_DWORD_SIZE;
+               if ((i + sizeof(*tlv) + tlv_len) > data_len) {
+                       ath11k_warn(ab, "failed to parse spectral tlv payload at bytes %d tlv_len:%d data_len:%d\n",
+                                   i, tlv_len, data_len);
+                       ret = -EINVAL;
+                       goto err;
+               }
+
+               tag = FIELD_GET(SPECTRAL_TLV_HDR_TAG,
+                               __le32_to_cpu(tlv->header));
+               switch (tag) {
+               case ATH11K_SPECTRAL_TAG_SCAN_SUMMARY:
+                       /* HW bug in tlv length of summary report,
+                        * HW report 3 DWORD size but the data payload
+                        * is 4 DWORD size (16 bytes).
+                        * Need to remove this workaround once HW bug fixed
+                        */
+                       tlv_len = sizeof(*summary) - sizeof(*tlv);
+
+                       if (tlv_len < (sizeof(*summary) - sizeof(*tlv))) {
+                               ath11k_warn(ab, "failed to parse spectral summary at bytes %d tlv_len:%d\n",
+                                           i, tlv_len);
+                               ret = -EINVAL;
+                               goto err;
+                       }
+
+                       summary = (struct spectral_summary_fft_report *)tlv;
+                       ath11k_spectral_pull_summary(ar, &param->meta,
+                                                    summary, &summ_rpt);
+                       break;
+               case ATH11K_SPECTRAL_TAG_SCAN_SEARCH:
+                       if (tlv_len < (sizeof(struct spectral_search_fft_report) -
+                                      sizeof(*tlv))) {
+                               ath11k_warn(ab, "failed to parse spectral search fft at bytes %d\n",
+                                           i);
+                               ret = -EINVAL;
+                               goto err;
+                       }
+
+                       memset(fft_sample, 0, sample_sz);
+                       ret = ath11k_spectral_process_fft(ar, &summ_rpt, tlv,
+                                                         fft_sample,
+                                                         data_len - i);
+                       if (ret) {
+                               ath11k_warn(ab, "failed to process spectral fft at bytes %d\n",
+                                           i);
+                               goto err;
+                       }
+                       quit = true;
+                       break;
+               }
+
+               i += sizeof(*tlv) + tlv_len;
+       }
+
+err:
+       kfree(fft_sample);
+unlock:
+       spin_unlock_bh(&ar->spectral.lock);
+       return ret;
+}
+
+static int ath11k_spectral_ring_alloc(struct ath11k *ar,
+                                     struct ath11k_dbring_cap *db_cap)
+{
+       struct ath11k_spectral *sp = &ar->spectral;
+       int ret;
+
+       ret = ath11k_dbring_srng_setup(ar, &sp->rx_ring,
+                                      0, db_cap->min_elem);
+       if (ret) {
+               ath11k_warn(ar->ab, "failed to setup db ring\n");
+               return ret;
+       }
+
+       ath11k_dbring_set_cfg(ar, &sp->rx_ring,
+                             ATH11K_SPECTRAL_NUM_RESP_PER_EVENT,
+                             ATH11K_SPECTRAL_EVENT_TIMEOUT_MS,
+                             ath11k_spectral_process_data);
+
+       ret = ath11k_dbring_buf_setup(ar, &sp->rx_ring, db_cap);
+       if (ret) {
+               ath11k_warn(ar->ab, "failed to setup db ring buffer\n");
+               goto srng_cleanup;
+       }
+
+       ret = ath11k_dbring_wmi_cfg_setup(ar, &sp->rx_ring,
+                                         WMI_DIRECT_BUF_SPECTRAL);
+       if (ret) {
+               ath11k_warn(ar->ab, "failed to setup db ring cfg\n");
+               goto buffer_cleanup;
+       }
+
+       return 0;
+
+buffer_cleanup:
+       ath11k_dbring_buf_cleanup(ar, &sp->rx_ring);
+srng_cleanup:
+       ath11k_dbring_srng_cleanup(ar, &sp->rx_ring);
+       return ret;
+}
+
+static inline void ath11k_spectral_ring_free(struct ath11k *ar)
+{
+       struct ath11k_spectral *sp = &ar->spectral;
+
+       if (!sp->enabled)
+               return;
+
+       ath11k_dbring_srng_cleanup(ar, &sp->rx_ring);
+       ath11k_dbring_buf_cleanup(ar, &sp->rx_ring);
+}
+
+static inline void ath11k_spectral_debug_unregister(struct ath11k *ar)
+{
+       debugfs_remove(ar->spectral.scan_bins);
+       ar->spectral.scan_bins = NULL;
+
+       debugfs_remove(ar->spectral.scan_count);
+       ar->spectral.scan_count = NULL;
+
+       debugfs_remove(ar->spectral.scan_ctl);
+       ar->spectral.scan_ctl = NULL;
+
+       if (ar->spectral.rfs_scan) {
+               relay_close(ar->spectral.rfs_scan);
+               ar->spectral.rfs_scan = NULL;
+       }
+}
+
+int ath11k_spectral_vif_stop(struct ath11k_vif *arvif)
+{
+       if (!arvif->spectral_enabled)
+               return 0;
+
+       return ath11k_spectral_scan_config(arvif->ar, ATH11K_SPECTRAL_DISABLED);
+}
+
+void ath11k_spectral_reset_buffer(struct ath11k *ar)
+{
+       if (!ar->spectral.enabled)
+               return;
+
+       if (ar->spectral.rfs_scan)
+               relay_reset(ar->spectral.rfs_scan);
+}
+
+void ath11k_spectral_deinit(struct ath11k_base *ab)
+{
+       struct ath11k *ar;
+       struct ath11k_spectral *sp;
+       int i;
+
+       for (i = 0; i <  ab->num_radios; i++) {
+               ar = ab->pdevs[i].ar;
+               sp = &ar->spectral;
+
+               if (!sp->enabled)
+                       continue;
+
+               ath11k_spectral_debug_unregister(ar);
+               ath11k_spectral_ring_free(ar);
+
+               spin_lock_bh(&sp->lock);
+
+               sp->mode = ATH11K_SPECTRAL_DISABLED;
+               sp->enabled = false;
+
+               spin_unlock_bh(&sp->lock);
+       }
+}
+
+static inline int ath11k_spectral_debug_register(struct ath11k *ar)
+{
+       int ret;
+
+       ar->spectral.rfs_scan = relay_open("spectral_scan",
+                                          ar->debug.debugfs_pdev,
+                                          ATH11K_SPECTRAL_SUB_BUFF_SIZE,
+                                          ATH11K_SPECTRAL_NUM_SUB_BUF,
+                                          &rfs_scan_cb, NULL);
+       if (!ar->spectral.rfs_scan) {
+               ath11k_warn(ar->ab, "failed to open relay in pdev %d\n",
+                           ar->pdev_idx);
+               return -EINVAL;
+       }
+
+       ar->spectral.scan_ctl = debugfs_create_file("spectral_scan_ctl",
+                                                   0600,
+                                                   ar->debug.debugfs_pdev, ar,
+                                                   &fops_scan_ctl);
+       if (!ar->spectral.scan_ctl) {
+               ath11k_warn(ar->ab, "failed to open debugfs in pdev %d\n",
+                           ar->pdev_idx);
+               ret = -EINVAL;
+               goto debug_unregister;
+       }
+
+       ar->spectral.scan_count = debugfs_create_file("spectral_count",
+                                                     0600,
+                                                     ar->debug.debugfs_pdev, ar,
+                                                     &fops_scan_count);
+       if (!ar->spectral.scan_count) {
+               ath11k_warn(ar->ab, "failed to open debugfs in pdev %d\n",
+                           ar->pdev_idx);
+               ret = -EINVAL;
+               goto debug_unregister;
+       }
+
+       ar->spectral.scan_bins = debugfs_create_file("spectral_bins",
+                                                    0600,
+                                                    ar->debug.debugfs_pdev, ar,
+                                                    &fops_scan_bins);
+       if (!ar->spectral.scan_bins) {
+               ath11k_warn(ar->ab, "failed to open debugfs in pdev %d\n",
+                           ar->pdev_idx);
+               ret = -EINVAL;
+               goto debug_unregister;
+       }
+
+       return 0;
+
+debug_unregister:
+       ath11k_spectral_debug_unregister(ar);
+       return ret;
+}
+
+int ath11k_spectral_init(struct ath11k_base *ab)
+{
+       struct ath11k *ar;
+       struct ath11k_spectral *sp;
+       struct ath11k_dbring_cap db_cap;
+       int ret;
+       int i;
+
+       if (!test_bit(WMI_TLV_SERVICE_FREQINFO_IN_METADATA,
+                     ab->wmi_ab.svc_map)) {
+               ath11k_info(ab, "spectral not supported\n");
+               return 0;
+       }
+
+       for (i = 0; i < ab->num_radios; i++) {
+               ar = ab->pdevs[i].ar;
+               sp = &ar->spectral;
+
+               ret = ath11k_dbring_get_cap(ar->ab, ar->pdev_idx,
+                                           WMI_DIRECT_BUF_SPECTRAL,
+                                           &db_cap);
+               if (ret) {
+                       ath11k_info(ab, "spectral not enabled for pdev %d\n", i);
+                       continue;
+               }
+
+               idr_init(&sp->rx_ring.bufs_idr);
+               spin_lock_init(&sp->rx_ring.idr_lock);
+               spin_lock_init(&sp->lock);
+
+               ret = ath11k_spectral_ring_alloc(ar, &db_cap);
+               if (ret) {
+                       ath11k_warn(ab, "failed to init spectral ring for pdev %d\n",
+                                   i);
+                       goto deinit;
+               }
+
+               spin_lock_bh(&sp->lock);
+
+               sp->mode = ATH11K_SPECTRAL_DISABLED;
+               sp->count = ATH11K_WMI_SPECTRAL_COUNT_DEFAULT;
+               sp->fft_size = ATH11K_WMI_SPECTRAL_FFT_SIZE_DEFAULT;
+               sp->enabled = true;
+
+               spin_unlock_bh(&sp->lock);
+
+               ret = ath11k_spectral_debug_register(ar);
+               if (ret) {
+                       ath11k_warn(ab, "failed to register spectral for pdev %d\n",
+                                   i);
+                       goto deinit;
+               }
+       }
+
+       return 0;
+
+deinit:
+       ath11k_spectral_deinit(ab);
+       return ret;
+}
+
+enum ath11k_spectral_mode ath11k_spectral_get_mode(struct ath11k *ar)
+{
+       if (ar->spectral.enabled)
+               return ar->spectral.mode;
+       else
+               return ATH11K_SPECTRAL_DISABLED;
+}
+
+struct ath11k_dbring *ath11k_spectral_get_dbring(struct ath11k *ar)
+{
+       if (ar->spectral.enabled)
+               return &ar->spectral.rx_ring;
+       else
+               return NULL;
+}
diff --git a/drivers/net/wireless/ath/ath11k/spectral.h b/drivers/net/wireless/ath/ath11k/spectral.h
new file mode 100644 (file)
index 0000000..0817442
--- /dev/null
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_SPECTRAL_H
+#define ATH11K_SPECTRAL_H
+
+#include "../spectral_common.h"
+#include "dbring.h"
+
+/* enum ath11k_spectral_mode:
+ *
+ * @SPECTRAL_DISABLED: spectral mode is disabled
+ * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
+ *     something else.
+ * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
+ *     is performed manually.
+ */
+enum ath11k_spectral_mode {
+       ATH11K_SPECTRAL_DISABLED = 0,
+       ATH11K_SPECTRAL_BACKGROUND,
+       ATH11K_SPECTRAL_MANUAL,
+};
+
+struct ath11k_spectral {
+       struct ath11k_dbring rx_ring;
+       /* Protects enabled */
+       spinlock_t lock;
+       struct rchan *rfs_scan; /* relay(fs) channel for spectral scan */
+       struct dentry *scan_ctl;
+       struct dentry *scan_count;
+       struct dentry *scan_bins;
+       enum ath11k_spectral_mode mode;
+       u16 count;
+       u8 fft_size;
+       bool enabled;
+};
+
+#ifdef CONFIG_ATH11K_SPECTRAL
+
+int ath11k_spectral_init(struct ath11k_base *ab);
+void ath11k_spectral_deinit(struct ath11k_base *ab);
+int ath11k_spectral_vif_stop(struct ath11k_vif *arvif);
+void ath11k_spectral_reset_buffer(struct ath11k *ar);
+enum ath11k_spectral_mode ath11k_spectral_get_mode(struct ath11k *ar);
+struct ath11k_dbring *ath11k_spectral_get_dbring(struct ath11k *ar);
+
+#else
+
+static inline int ath11k_spectral_init(struct ath11k_base *ab)
+{
+       return 0;
+}
+
+static inline void ath11k_spectral_deinit(struct ath11k_base *ab)
+{
+}
+
+static inline int ath11k_spectral_vif_stop(struct ath11k_vif *arvif)
+{
+       return 0;
+}
+
+static inline void ath11k_spectral_reset_buffer(struct ath11k *ar)
+{
+}
+
+static inline
+enum ath11k_spectral_mode ath11k_spectral_get_mode(struct ath11k *ar)
+{
+       return ATH11K_SPECTRAL_DISABLED;
+}
+
+static inline
+struct ath11k_dbring *ath11k_spectral_get_dbring(struct ath11k *ar)
+{
+       return NULL;
+}
+
+#endif /* CONFIG_ATH11K_SPECTRAL */
+#endif /* ATH11K_SPECTRAL_H */
index c2a9723..8e3437a 100644 (file)
@@ -27,6 +27,11 @@ struct wmi_tlv_svc_ready_parse {
        bool wmi_svc_bitmap_done;
 };
 
+struct wmi_tlv_dma_ring_caps_parse {
+       struct wmi_dma_ring_capabilities *dma_ring_caps;
+       u32 n_dma_ring_caps;
+};
+
 struct wmi_tlv_svc_rdy_ext_parse {
        struct ath11k_service_ext_param param;
        struct wmi_soc_mac_phy_hw_mode_caps *hw_caps;
@@ -39,15 +44,35 @@ struct wmi_tlv_svc_rdy_ext_parse {
        struct wmi_soc_hal_reg_capabilities *soc_hal_reg_caps;
        struct wmi_hal_reg_capabilities_ext *ext_hal_reg_caps;
        u32 n_ext_hal_reg_caps;
+       struct wmi_tlv_dma_ring_caps_parse dma_caps_parse;
        bool hw_mode_done;
        bool mac_phy_done;
        bool ext_hal_reg_done;
+       bool mac_phy_chainmask_combo_done;
+       bool mac_phy_chainmask_cap_done;
+       bool oem_dma_ring_cap_done;
+       bool dma_ring_cap_done;
+};
+
+struct wmi_tlv_svc_rdy_ext2_parse {
+       struct wmi_tlv_dma_ring_caps_parse dma_caps_parse;
+       bool dma_ring_cap_done;
 };
 
 struct wmi_tlv_rdy_parse {
        u32 num_extra_mac_addr;
 };
 
+struct wmi_tlv_dma_buf_release_parse {
+       struct ath11k_wmi_dma_buf_release_fixed_param fixed;
+       struct wmi_dma_buf_release_entry *buf_entry;
+       struct wmi_dma_buf_release_meta_data *meta_data;
+       u32 num_buf_entry;
+       u32 num_meta;
+       bool buf_entry_done;
+       bool meta_data_done;
+};
+
 static const struct wmi_tlv_policy wmi_tlv_policies[] = {
        [WMI_TAG_ARRAY_BYTE]
                = { .min_len = 0 },
@@ -368,6 +393,17 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
        memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
               sizeof(struct ath11k_ppe_threshold));
 
+       cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
+       cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
+       cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
+       cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
+       cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
+       cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
+       memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
+              sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
+       memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
+              sizeof(struct ath11k_ppe_threshold));
+
        return 0;
 }
 
@@ -1692,10 +1728,10 @@ ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
         */
        if (param->auth_flag)
                cmd->peer_flags |= WMI_PEER_AUTH;
-       if (param->need_ptk_4_way)
+       if (param->need_ptk_4_way) {
                cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
-       else
-               cmd->peer_flags &= ~WMI_PEER_NEED_PTK_4_WAY;
+               cmd->peer_flags &= ~WMI_PEER_AUTH;
+       }
        if (param->need_gtk_2_way)
                cmd->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
        /* safe mode bypass the 4-way handshake */
@@ -1778,6 +1814,7 @@ int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar,
        cmd->peer_he_cap_info = param->peer_he_cap_macinfo[0];
        cmd->peer_he_cap_info_ext = param->peer_he_cap_macinfo[1];
        cmd->peer_he_cap_info_internal = param->peer_he_cap_macinfo_internal;
+       cmd->peer_he_caps_6ghz = param->peer_he_caps_6ghz;
        cmd->peer_he_ops = param->peer_he_ops;
        memcpy(&cmd->peer_he_cap_phy, &param->peer_he_cap_phyinfo,
               sizeof(param->peer_he_cap_phyinfo));
@@ -1831,6 +1868,7 @@ int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar,
 
        /* HE Rates */
        cmd->peer_he_mcs = param->peer_he_mcs_count;
+       cmd->min_data_rate = param->min_data_rate;
 
        ptr += sizeof(*mcs);
 
@@ -1886,6 +1924,8 @@ void ath11k_wmi_start_scan_init(struct ath11k *ar,
        arg->dwell_time_active = 50;
        arg->dwell_time_active_2g = 0;
        arg->dwell_time_passive = 150;
+       arg->dwell_time_active_6g = 40;
+       arg->dwell_time_passive_6g = 30;
        arg->min_rest_time = 50;
        arg->max_rest_time = 500;
        arg->repeat_probe_time = 0;
@@ -1990,6 +2030,8 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
        int i, ret, len;
        u32 *tmp_ptr;
        u8 extraie_len_with_pad = 0;
+       struct hint_short_ssid *s_ssid = NULL;
+       struct hint_bssid *hint_bssid = NULL;
 
        len = sizeof(*cmd);
 
@@ -2011,6 +2053,14 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
                        roundup(params->extraie.len, sizeof(u32));
        len += extraie_len_with_pad;
 
+       if (params->num_hint_bssid)
+               len += TLV_HDR_SIZE +
+                      params->num_hint_bssid * sizeof(struct hint_bssid);
+
+       if (params->num_hint_s_ssid)
+               len += TLV_HDR_SIZE +
+                      params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
+
        skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
        if (!skb)
                return -ENOMEM;
@@ -2032,6 +2082,8 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
        cmd->dwell_time_active = params->dwell_time_active;
        cmd->dwell_time_active_2g = params->dwell_time_active_2g;
        cmd->dwell_time_passive = params->dwell_time_passive;
+       cmd->dwell_time_active_6g = params->dwell_time_active_6g;
+       cmd->dwell_time_passive_6g = params->dwell_time_passive_6g;
        cmd->min_rest_time = params->min_rest_time;
        cmd->max_rest_time = params->max_rest_time;
        cmd->repeat_probe_time = params->repeat_probe_time;
@@ -2109,6 +2161,68 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
 
        ptr += extraie_len_with_pad;
 
+       if (params->num_hint_s_ssid) {
+               len = params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
+               tlv = ptr;
+               tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
+                             FIELD_PREP(WMI_TLV_LEN, len);
+               ptr += TLV_HDR_SIZE;
+               s_ssid = ptr;
+               for (i = 0; i < params->num_hint_s_ssid; ++i) {
+                       s_ssid->freq_flags = params->hint_s_ssid[i].freq_flags;
+                       s_ssid->short_ssid = params->hint_s_ssid[i].short_ssid;
+                       s_ssid++;
+               }
+               ptr += len;
+       }
+
+       if (params->num_hint_bssid) {
+               len = params->num_hint_bssid * sizeof(struct hint_bssid);
+               tlv = ptr;
+               tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
+                             FIELD_PREP(WMI_TLV_LEN, len);
+               ptr += TLV_HDR_SIZE;
+               hint_bssid = ptr;
+               for (i = 0; i < params->num_hint_bssid; ++i) {
+                       hint_bssid->freq_flags =
+                               params->hint_bssid[i].freq_flags;
+                       ether_addr_copy(&params->hint_bssid[i].bssid.addr[0],
+                                       &hint_bssid->bssid.addr[0]);
+                       hint_bssid++;
+               }
+       }
+
+       len = params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
+       tlv = ptr;
+       tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
+                     FIELD_PREP(WMI_TLV_LEN, len);
+       ptr += TLV_HDR_SIZE;
+       if (params->num_hint_s_ssid) {
+               s_ssid = ptr;
+               for (i = 0; i < params->num_hint_s_ssid; ++i) {
+                       s_ssid->freq_flags = params->hint_s_ssid[i].freq_flags;
+                       s_ssid->short_ssid = params->hint_s_ssid[i].short_ssid;
+                       s_ssid++;
+               }
+       }
+       ptr += len;
+
+       len = params->num_hint_bssid * sizeof(struct hint_bssid);
+       tlv = ptr;
+       tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
+                     FIELD_PREP(WMI_TLV_LEN, len);
+       ptr += TLV_HDR_SIZE;
+       if (params->num_hint_bssid) {
+               hint_bssid = ptr;
+               for (i = 0; i < params->num_hint_bssid; ++i) {
+                       hint_bssid->freq_flags =
+                               params->hint_bssid[i].freq_flags;
+                       ether_addr_copy(&params->hint_bssid[i].bssid.addr[0],
+                                       &hint_bssid->bssid.addr[0]);
+                       hint_bssid++;
+               }
+       }
+
        ret = ath11k_wmi_cmd_send(wmi, skb,
                                  WMI_START_SCAN_CMDID);
        if (ret) {
@@ -2178,91 +2292,110 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
        struct wmi_tlv *tlv;
        void *ptr;
        int i, ret, len;
+       u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
        u32 *reg1, *reg2;
 
-       len = sizeof(*cmd) + TLV_HDR_SIZE +
-                sizeof(*chan_info) * chan_list->nallchans;
+       tchan_info = &chan_list->ch_param[0];
+       while (chan_list->nallchans) {
+               len = sizeof(*cmd) + TLV_HDR_SIZE;
+               max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
+                       sizeof(*chan_info);
 
-       skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
-       if (!skb)
-               return -ENOMEM;
+               if (chan_list->nallchans > max_chan_limit)
+                       num_send_chans = max_chan_limit;
+               else
+                       num_send_chans = chan_list->nallchans;
 
-       cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
-       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SCAN_CHAN_LIST_CMD) |
-                         FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+               chan_list->nallchans -= num_send_chans;
+               len += sizeof(*chan_info) * num_send_chans;
 
-       ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
-                  "WMI no.of chan = %d len = %d\n", chan_list->nallchans, len);
-       cmd->pdev_id = chan_list->pdev_id;
-       cmd->num_scan_chans = chan_list->nallchans;
-
-       ptr = skb->data + sizeof(*cmd);
+               skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+               if (!skb)
+                       return -ENOMEM;
 
-       len = sizeof(*chan_info) * chan_list->nallchans;
-       tlv = ptr;
-       tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
-                     FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
-       ptr += TLV_HDR_SIZE;
+               cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
+               cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SCAN_CHAN_LIST_CMD) |
+                       FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+               cmd->pdev_id = chan_list->pdev_id;
+               cmd->num_scan_chans = num_send_chans;
+               if (num_sends)
+                       cmd->flags |= WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG;
 
-       tchan_info = &chan_list->ch_param[0];
+               ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+                          "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n",
+                          num_send_chans, len, cmd->pdev_id, num_sends);
 
-       for (i = 0; i < chan_list->nallchans; ++i) {
-               chan_info = ptr;
-               memset(chan_info, 0, sizeof(*chan_info));
-               len = sizeof(*chan_info);
-               chan_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
-                                                  WMI_TAG_CHANNEL) |
-                                       FIELD_PREP(WMI_TLV_LEN,
-                                                  len - TLV_HDR_SIZE);
-
-               reg1 = &chan_info->reg_info_1;
-               reg2 = &chan_info->reg_info_2;
-               chan_info->mhz = tchan_info->mhz;
-               chan_info->band_center_freq1 = tchan_info->cfreq1;
-               chan_info->band_center_freq2 = tchan_info->cfreq2;
-
-               if (tchan_info->is_chan_passive)
-                       chan_info->info |= WMI_CHAN_INFO_PASSIVE;
-               if (tchan_info->allow_he)
-                       chan_info->info |= WMI_CHAN_INFO_ALLOW_HE;
-               else if (tchan_info->allow_vht)
-                       chan_info->info |= WMI_CHAN_INFO_ALLOW_VHT;
-               else if (tchan_info->allow_ht)
-                       chan_info->info |= WMI_CHAN_INFO_ALLOW_HT;
-               if (tchan_info->half_rate)
-                       chan_info->info |= WMI_CHAN_INFO_HALF_RATE;
-               if (tchan_info->quarter_rate)
-                       chan_info->info |= WMI_CHAN_INFO_QUARTER_RATE;
-
-               chan_info->info |= FIELD_PREP(WMI_CHAN_INFO_MODE,
-                                             tchan_info->phy_mode);
-               *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR,
-                                   tchan_info->minpower);
-               *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
-                                   tchan_info->maxpower);
-               *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
-                                   tchan_info->maxregpower);
-               *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS,
-                                   tchan_info->reg_class_id);
-               *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
-                                   tchan_info->antennamax);
+               ptr = skb->data + sizeof(*cmd);
 
-               ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
-                          "WMI chan scan list chan[%d] = %u\n",
-                          i, chan_info->mhz);
+               len = sizeof(*chan_info) * num_send_chans;
+               tlv = ptr;
+               tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+                             FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+               ptr += TLV_HDR_SIZE;
 
-               ptr += sizeof(*chan_info);
+               for (i = 0; i < num_send_chans; ++i) {
+                       chan_info = ptr;
+                       memset(chan_info, 0, sizeof(*chan_info));
+                       len = sizeof(*chan_info);
+                       chan_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+                                                          WMI_TAG_CHANNEL) |
+                                               FIELD_PREP(WMI_TLV_LEN,
+                                                          len - TLV_HDR_SIZE);
+
+                       reg1 = &chan_info->reg_info_1;
+                       reg2 = &chan_info->reg_info_2;
+                       chan_info->mhz = tchan_info->mhz;
+                       chan_info->band_center_freq1 = tchan_info->cfreq1;
+                       chan_info->band_center_freq2 = tchan_info->cfreq2;
+
+                       if (tchan_info->is_chan_passive)
+                               chan_info->info |= WMI_CHAN_INFO_PASSIVE;
+                       if (tchan_info->allow_he)
+                               chan_info->info |= WMI_CHAN_INFO_ALLOW_HE;
+                       else if (tchan_info->allow_vht)
+                               chan_info->info |= WMI_CHAN_INFO_ALLOW_VHT;
+                       else if (tchan_info->allow_ht)
+                               chan_info->info |= WMI_CHAN_INFO_ALLOW_HT;
+                       if (tchan_info->half_rate)
+                               chan_info->info |= WMI_CHAN_INFO_HALF_RATE;
+                       if (tchan_info->quarter_rate)
+                               chan_info->info |= WMI_CHAN_INFO_QUARTER_RATE;
+                       if (tchan_info->psc_channel)
+                               chan_info->info |= WMI_CHAN_INFO_PSC;
+
+                       chan_info->info |= FIELD_PREP(WMI_CHAN_INFO_MODE,
+                                                     tchan_info->phy_mode);
+                       *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR,
+                                           tchan_info->minpower);
+                       *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
+                                           tchan_info->maxpower);
+                       *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
+                                           tchan_info->maxregpower);
+                       *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS,
+                                           tchan_info->reg_class_id);
+                       *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
+                                           tchan_info->antennamax);
+
+                       ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+                                  "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
+                                  i, chan_info->mhz, chan_info->info);
+
+                       ptr += sizeof(*chan_info);
+
+                       tchan_info++;
+               }
 
-               tchan_info++;
-       }
+               ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
+               if (ret) {
+                       ath11k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
+                       dev_kfree_skb(skb);
+                       return ret;
+               }
 
-       ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
-       if (ret) {
-               ath11k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
-               dev_kfree_skb(skb);
+               num_sends++;
        }
 
-       return ret;
+       return 0;
 }
 
 int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
@@ -3265,6 +3398,236 @@ int ath11k_wmi_cmd_init(struct ath11k_base *ab)
        return ath11k_init_cmd_send(&wmi_sc->wmi[0], &init_param);
 }
 
+int ath11k_wmi_vdev_spectral_conf(struct ath11k *ar,
+                                 struct ath11k_wmi_vdev_spectral_conf_param *param)
+{
+       struct ath11k_wmi_vdev_spectral_conf_cmd *cmd;
+       struct sk_buff *skb;
+       int ret;
+
+       skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct ath11k_wmi_vdev_spectral_conf_cmd *)skb->data;
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+                                    WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD) |
+                         FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+       memcpy(&cmd->param, param, sizeof(*param));
+
+       ret = ath11k_wmi_cmd_send(ar->wmi, skb,
+                                 WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID);
+       if (ret) {
+               ath11k_warn(ar->ab,
+                           "failed to send spectral scan config wmi cmd\n");
+               goto err;
+       }
+
+       ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+                  "WMI spectral scan config cmd vdev_id 0x%x\n",
+                  param->vdev_id);
+
+       return 0;
+err:
+       dev_kfree_skb(skb);
+       return ret;
+}
+
+int ath11k_wmi_vdev_spectral_enable(struct ath11k *ar, u32 vdev_id,
+                                   u32 trigger, u32 enable)
+{
+       struct ath11k_wmi_vdev_spectral_enable_cmd *cmd;
+       struct sk_buff *skb;
+       int ret;
+
+       skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct ath11k_wmi_vdev_spectral_enable_cmd *)skb->data;
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+                                    WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD) |
+                         FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+       cmd->vdev_id = vdev_id;
+       cmd->trigger_cmd = trigger;
+       cmd->enable_cmd = enable;
+
+       ret = ath11k_wmi_cmd_send(ar->wmi, skb,
+                                 WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID);
+       if (ret) {
+               ath11k_warn(ar->ab,
+                           "failed to send spectral enable wmi cmd\n");
+               goto err;
+       }
+
+       ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+                  "WMI spectral enable cmd vdev id 0x%x\n",
+                  vdev_id);
+
+       return 0;
+err:
+       dev_kfree_skb(skb);
+       return ret;
+}
+
+int ath11k_wmi_pdev_dma_ring_cfg(struct ath11k *ar,
+                                struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *param)
+{
+       struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *cmd;
+       struct sk_buff *skb;
+       int ret;
+
+       skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+       if (!skb)
+               return -ENOMEM;
+
+       cmd = (struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data;
+       cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DMA_RING_CFG_REQ) |
+                         FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+       cmd->pdev_id            = param->pdev_id;
+       cmd->module_id          = param->module_id;
+       cmd->base_paddr_lo      = param->base_paddr_lo;
+       cmd->base_paddr_hi      = param->base_paddr_hi;
+       cmd->head_idx_paddr_lo  = param->head_idx_paddr_lo;
+       cmd->head_idx_paddr_hi  = param->head_idx_paddr_hi;
+       cmd->tail_idx_paddr_lo  = param->tail_idx_paddr_lo;
+       cmd->tail_idx_paddr_hi  = param->tail_idx_paddr_hi;
+       cmd->num_elems          = param->num_elems;
+       cmd->buf_size           = param->buf_size;
+       cmd->num_resp_per_event = param->num_resp_per_event;
+       cmd->event_timeout_ms   = param->event_timeout_ms;
+
+       ret = ath11k_wmi_cmd_send(ar->wmi, skb,
+                                 WMI_PDEV_DMA_RING_CFG_REQ_CMDID);
+       if (ret) {
+               ath11k_warn(ar->ab,
+                           "failed to send dma ring cfg req wmi cmd\n");
+               goto err;
+       }
+
+       ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+                  "WMI DMA ring cfg req cmd pdev_id 0x%x\n",
+                  param->pdev_id);
+
+       return 0;
+err:
+       dev_kfree_skb(skb);
+       return ret;
+}
+
+static int ath11k_wmi_tlv_dma_buf_entry_parse(struct ath11k_base *soc,
+                                             u16 tag, u16 len,
+                                             const void *ptr, void *data)
+{
+       struct wmi_tlv_dma_buf_release_parse *parse = data;
+
+       if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY)
+               return -EPROTO;
+
+       if (parse->num_buf_entry >= parse->fixed.num_buf_release_entry)
+               return -ENOBUFS;
+
+       parse->num_buf_entry++;
+       return 0;
+}
+
+static int ath11k_wmi_tlv_dma_buf_meta_parse(struct ath11k_base *soc,
+                                            u16 tag, u16 len,
+                                            const void *ptr, void *data)
+{
+       struct wmi_tlv_dma_buf_release_parse *parse = data;
+
+       if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA)
+               return -EPROTO;
+
+       if (parse->num_meta >= parse->fixed.num_meta_data_entry)
+               return -ENOBUFS;
+
+       parse->num_meta++;
+       return 0;
+}
+
+static int ath11k_wmi_tlv_dma_buf_parse(struct ath11k_base *ab,
+                                       u16 tag, u16 len,
+                                       const void *ptr, void *data)
+{
+       struct wmi_tlv_dma_buf_release_parse *parse = data;
+       int ret;
+
+       switch (tag) {
+       case WMI_TAG_DMA_BUF_RELEASE:
+               memcpy(&parse->fixed, ptr,
+                      sizeof(struct ath11k_wmi_dma_buf_release_fixed_param));
+               parse->fixed.pdev_id = DP_HW2SW_MACID(parse->fixed.pdev_id);
+               break;
+       case WMI_TAG_ARRAY_STRUCT:
+               if (!parse->buf_entry_done) {
+                       parse->num_buf_entry = 0;
+                       parse->buf_entry = (struct wmi_dma_buf_release_entry *)ptr;
+
+                       ret = ath11k_wmi_tlv_iter(ab, ptr, len,
+                                                 ath11k_wmi_tlv_dma_buf_entry_parse,
+                                                 parse);
+                       if (ret) {
+                               ath11k_warn(ab, "failed to parse dma buf entry tlv %d\n",
+                                           ret);
+                               return ret;
+                       }
+
+                       parse->buf_entry_done = true;
+               } else if (!parse->meta_data_done) {
+                       parse->num_meta = 0;
+                       parse->meta_data = (struct wmi_dma_buf_release_meta_data *)ptr;
+
+                       ret = ath11k_wmi_tlv_iter(ab, ptr, len,
+                                                 ath11k_wmi_tlv_dma_buf_meta_parse,
+                                                 parse);
+                       if (ret) {
+                               ath11k_warn(ab, "failed to parse dma buf meta tlv %d\n",
+                                           ret);
+                               return ret;
+                       }
+
+                       parse->meta_data_done = true;
+               }
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static void ath11k_wmi_pdev_dma_ring_buf_release_event(struct ath11k_base *ab,
+                                                      struct sk_buff *skb)
+{
+       struct wmi_tlv_dma_buf_release_parse parse = { };
+       struct ath11k_dbring_buf_release_event param;
+       int ret;
+
+       ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+                                 ath11k_wmi_tlv_dma_buf_parse,
+                                 &parse);
+       if (ret) {
+               ath11k_warn(ab, "failed to parse dma buf release tlv %d\n", ret);
+               return;
+       }
+
+       param.fixed             = parse.fixed;
+       param.buf_entry         = parse.buf_entry;
+       param.num_buf_entry     = parse.num_buf_entry;
+       param.meta_data         = parse.meta_data;
+       param.num_meta          = parse.num_meta;
+
+       ret = ath11k_dbring_buffer_release_event(ab, &param);
+       if (ret) {
+               ath11k_warn(ab, "failed to handle dma buf release event %d\n", ret);
+               return;
+       }
+}
+
 static int ath11k_wmi_tlv_hw_mode_caps_parse(struct ath11k_base *soc,
                                             u16 tag, u16 len,
                                             const void *ptr, void *data)
@@ -3445,6 +3808,95 @@ static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc,
        return 0;
 }
 
+static int ath11k_wmi_tlv_dma_ring_caps_parse(struct ath11k_base *soc,
+                                             u16 tag, u16 len,
+                                             const void *ptr, void *data)
+{
+       struct wmi_tlv_dma_ring_caps_parse *parse = data;
+
+       if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
+               return -EPROTO;
+
+       parse->n_dma_ring_caps++;
+       return 0;
+}
+
+static int ath11k_wmi_alloc_dbring_caps(struct ath11k_base *ab,
+                                       u32 num_cap)
+{
+       size_t sz;
+       void *ptr;
+
+       sz = num_cap * sizeof(struct ath11k_dbring_cap);
+       ptr = kzalloc(sz, GFP_ATOMIC);
+       if (!ptr)
+               return -ENOMEM;
+
+       ab->db_caps = ptr;
+       ab->num_db_cap = num_cap;
+
+       return 0;
+}
+
+static void ath11k_wmi_free_dbring_caps(struct ath11k_base *ab)
+{
+       kfree(ab->db_caps);
+       ab->db_caps = NULL;
+}
+
+static int ath11k_wmi_tlv_dma_ring_caps(struct ath11k_base *ab,
+                                       u16 len, const void *ptr, void *data)
+{
+       struct wmi_tlv_dma_ring_caps_parse *dma_caps_parse = data;
+       struct wmi_dma_ring_capabilities *dma_caps;
+       struct ath11k_dbring_cap *dir_buff_caps;
+       int ret;
+       u32 i;
+
+       dma_caps_parse->n_dma_ring_caps = 0;
+       dma_caps = (struct wmi_dma_ring_capabilities *)ptr;
+       ret = ath11k_wmi_tlv_iter(ab, ptr, len,
+                                 ath11k_wmi_tlv_dma_ring_caps_parse,
+                                 dma_caps_parse);
+       if (ret) {
+               ath11k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret);
+               return ret;
+       }
+
+       if (!dma_caps_parse->n_dma_ring_caps)
+               return 0;
+
+       if (ab->num_db_cap) {
+               ath11k_warn(ab, "Already processed, so ignoring dma ring caps\n");
+               return 0;
+       }
+
+       ret = ath11k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps);
+       if (ret)
+               return ret;
+
+       dir_buff_caps = ab->db_caps;
+       for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
+               if (dma_caps[i].module_id >= WMI_DIRECT_BUF_MAX) {
+                       ath11k_warn(ab, "Invalid module id %d\n", dma_caps[i].module_id);
+                       ret = -EINVAL;
+                       goto free_dir_buff;
+               }
+
+               dir_buff_caps[i].id = dma_caps[i].module_id;
+               dir_buff_caps[i].pdev_id = DP_HW2SW_MACID(dma_caps[i].pdev_id);
+               dir_buff_caps[i].min_elem = dma_caps[i].min_elem;
+               dir_buff_caps[i].min_buf_sz = dma_caps[i].min_buf_sz;
+               dir_buff_caps[i].min_buf_align = dma_caps[i].min_buf_align;
+       }
+
+       return 0;
+
+free_dir_buff:
+       ath11k_wmi_free_dbring_caps(ab);
+       return ret;
+}
+
 static int ath11k_wmi_tlv_svc_rdy_ext_parse(struct ath11k_base *ab,
                                            u16 tag, u16 len,
                                            const void *ptr, void *data)
@@ -3501,7 +3953,19 @@ static int ath11k_wmi_tlv_svc_rdy_ext_parse(struct ath11k_base *ab,
                                return ret;
 
                        svc_rdy_ext->ext_hal_reg_done = true;
-                       complete(&ab->wmi_ab.service_ready);
+               } else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
+                       svc_rdy_ext->mac_phy_chainmask_combo_done = true;
+               } else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
+                       svc_rdy_ext->mac_phy_chainmask_cap_done = true;
+               } else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
+                       svc_rdy_ext->oem_dma_ring_cap_done = true;
+               } else if (!svc_rdy_ext->dma_ring_cap_done) {
+                       ret = ath11k_wmi_tlv_dma_ring_caps(ab, len, ptr,
+                                                          &svc_rdy_ext->dma_caps_parse);
+                       if (ret)
+                               return ret;
+
+                       svc_rdy_ext->dma_ring_cap_done = true;
                }
                break;
 
@@ -3522,11 +3986,66 @@ static int ath11k_service_ready_ext_event(struct ath11k_base *ab,
                                  &svc_rdy_ext);
        if (ret) {
                ath11k_warn(ab, "failed to parse tlv %d\n", ret);
-               return ret;
+               goto err;
        }
 
+       if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map))
+               complete(&ab->wmi_ab.service_ready);
+
        kfree(svc_rdy_ext.mac_phy_caps);
        return 0;
+
+err:
+       ath11k_wmi_free_dbring_caps(ab);
+       return ret;
+}
+
+static int ath11k_wmi_tlv_svc_rdy_ext2_parse(struct ath11k_base *ab,
+                                            u16 tag, u16 len,
+                                            const void *ptr, void *data)
+{
+       struct wmi_tlv_svc_rdy_ext2_parse *parse = data;
+       int ret;
+
+       switch (tag) {
+       case WMI_TAG_ARRAY_STRUCT:
+               if (!parse->dma_ring_cap_done) {
+                       ret = ath11k_wmi_tlv_dma_ring_caps(ab, len, ptr,
+                                                          &parse->dma_caps_parse);
+                       if (ret)
+                               return ret;
+
+                       parse->dma_ring_cap_done = true;
+               }
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static int ath11k_service_ready_ext2_event(struct ath11k_base *ab,
+                                          struct sk_buff *skb)
+{
+       struct wmi_tlv_svc_rdy_ext2_parse svc_rdy_ext2 = { };
+       int ret;
+
+       ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+                                 ath11k_wmi_tlv_svc_rdy_ext2_parse,
+                                 &svc_rdy_ext2);
+       if (ret) {
+               ath11k_warn(ab, "failed to parse ext2 event tlv %d\n", ret);
+               goto err;
+       }
+
+       complete(&ab->wmi_ab.service_ready);
+
+       return 0;
+
+err:
+       ath11k_wmi_free_dbring_caps(ab);
+       return ret;
 }
 
 static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base *ab, struct sk_buff *skb,
@@ -3822,6 +4341,7 @@ static int ath11k_pull_mgmt_rx_params_tlv(struct ath11k_base *ab,
        }
 
        hdr->pdev_id =  ev->pdev_id;
+       hdr->chan_freq = ev->chan_freq;
        hdr->channel =  ev->channel;
        hdr->snr =  ev->snr;
        hdr->rate =  ev->rate;
@@ -5193,7 +5713,9 @@ static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb)
        if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
                status->flag |= RX_FLAG_MMIC_ERROR;
 
-       if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
+       if (rx_ev.chan_freq >= ATH11K_MIN_6G_FREQ) {
+               status->band = NL80211_BAND_6GHZ;
+       } else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
                status->band = NL80211_BAND_2GHZ;
        } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH11K_MAX_5G_CHAN) {
                status->band = NL80211_BAND_5GHZ;
@@ -5206,9 +5728,10 @@ static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb)
                goto exit;
        }
 
-       if (rx_ev.phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ)
+       if (rx_ev.phy_mode == MODE_11B &&
+           (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ))
                ath11k_dbg(ab, ATH11K_DBG_WMI,
-                          "wmi mgmt rx 11b (CCK) on 5GHz\n");
+                          "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band);
 
        sband = &ar->mac.sbands[status->band];
 
@@ -5933,6 +6456,9 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
        case WMI_SERVICE_READY_EXT_EVENTID:
                ath11k_service_ready_ext_event(ab, skb);
                break;
+       case WMI_SERVICE_READY_EXT2_EVENTID:
+               ath11k_service_ready_ext2_event(ab, skb);
+               break;
        case WMI_REG_CHAN_LIST_CC_EVENTID:
                ath11k_reg_chan_list_event(ab, skb);
                break;
@@ -5994,12 +6520,16 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
        case WMI_PDEV_TEMPERATURE_EVENTID:
                ath11k_wmi_pdev_temperature_event(ab, skb);
                break;
+       case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
+               ath11k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
+               break;
        /* add Unsupported events here */
        case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
        case WMI_VDEV_DELETE_RESP_EVENTID:
        case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
        case WMI_TWT_ENABLE_EVENTID:
        case WMI_TWT_DISABLE_EVENTID:
+       case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
                ath11k_dbg(ab, ATH11K_DBG_WMI,
                           "ignoring unsupported event 0x%x\n", id);
                break;
@@ -6213,4 +6743,6 @@ void ath11k_wmi_detach(struct ath11k_base *ab)
 
        for (i = 0; i < ab->htc.wmi_ep_count; i++)
                ath11k_wmi_pdev_detach(ab, i);
+
+       ath11k_wmi_free_dbring_caps(ab);
 }
index b9f3e55..5a32ba0 100644 (file)
@@ -24,6 +24,8 @@ struct ath11k_fw_stats;
 #define HE_PET_8_USEC            1
 #define HE_PET_16_USEC           2
 
+#define WMI_MAX_CHAINS          8
+
 #define WMI_MAX_NUM_SS                    MAX_HE_NSS
 #define WMI_MAX_NUM_RU                    MAX_HE_RU
 
@@ -50,10 +52,20 @@ struct wmi_tlv {
 #define WMI_MAX_MEM_REQS        32
 #define ATH11K_MAX_HW_LISTEN_INTERVAL 5
 
+#define WLAN_SCAN_MAX_HINT_S_SSID        10
+#define WLAN_SCAN_MAX_HINT_BSSID         10
+#define MAX_RNR_BSS                    5
+
+#define WLAN_SCAN_MAX_HINT_S_SSID        10
+#define WLAN_SCAN_MAX_HINT_BSSID         10
+#define MAX_RNR_BSS                    5
+
 #define WLAN_SCAN_PARAMS_MAX_SSID    16
 #define WLAN_SCAN_PARAMS_MAX_BSSID   4
 #define WLAN_SCAN_PARAMS_MAX_IE_LEN  256
 
+#define WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG 1
+
 #define WMI_BA_MODE_BUFFER_SIZE_256  3
 /*
  * HW mode config type replicated from FW header
@@ -586,6 +598,11 @@ enum wmi_tlv_event_id {
        WMI_PDEV_DMA_RING_CFG_RSP_EVENTID,
        WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID,
        WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID,
+       WMI_PDEV_CSC_SWITCH_COUNT_STATUS_EVENTID,
+       WMI_PDEV_COLD_BOOT_CAL_DATA_EVENTID,
+       WMI_PDEV_RAP_INFO_EVENTID,
+       WMI_CHAN_RF_CHARACTERIZATION_INFO_EVENTID,
+       WMI_SERVICE_READY_EXT2_EVENTID,
        WMI_VDEV_START_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_VDEV),
        WMI_VDEV_STOPPED_EVENTID,
        WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
@@ -1011,6 +1028,7 @@ enum wmi_tlv_vdev_param {
        WMI_VDEV_PARAM_FILS_MAX_CHANNEL_GUARD_TIME,
        WMI_VDEV_PARAM_BA_MODE = 0x7e,
        WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE = 0x87,
+       WMI_VDEV_PARAM_6GHZ_PARAMS = 0x99,
        WMI_VDEV_PARAM_PROTOTYPE = 0x8000,
        WMI_VDEV_PARAM_BSS_COLOR,
        WMI_VDEV_PARAM_SET_HEMU_MODE,
@@ -2013,9 +2031,10 @@ enum wmi_tlv_service {
        WMI_TLV_SERVICE_DSM_ROAM_FILTER = 211,
        WMI_TLV_SERVICE_PACKET_CAPTURE_SUPPORT = 212,
        WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213,
+       WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219,
+       WMI_TLV_SERVICE_EXT2_MSG = 220,
 
        WMI_MAX_EXT_SERVICE
-
 };
 
 enum {
@@ -2076,6 +2095,14 @@ enum wmi_beacon_gen_mode {
        WMI_BEACON_BURST_MODE = 1
 };
 
+enum wmi_direct_buffer_module {
+       WMI_DIRECT_BUF_SPECTRAL = 0,
+       WMI_DIRECT_BUF_CFR = 1,
+
+       /* keep it last */
+       WMI_DIRECT_BUF_MAX
+};
+
 struct wmi_host_pdev_band_to_mac {
        u32 pdev_id;
        u32 start_freq;
@@ -2382,6 +2409,15 @@ struct wmi_mac_addr {
        } __packed;
 } __packed;
 
+struct wmi_dma_ring_capabilities {
+       u32 tlv_header;
+       u32 pdev_id;
+       u32 module_id;
+       u32 min_elem;
+       u32 min_buf_sz;
+       u32 min_buf_align;
+} __packed;
+
 struct wmi_ready_event_min {
        struct wmi_abi_version fw_abi_vers;
        struct wmi_mac_addr mac_addr;
@@ -2519,7 +2555,8 @@ struct channel_param {
            allow_ht:1,
            allow_vht:1,
            allow_he:1,
-           set_agile:1;
+           set_agile:1,
+           psc_channel:1;
        u32 phy_mode;
        u32 cfreq1;
        u32 cfreq2;
@@ -3059,6 +3096,9 @@ struct  wmi_start_scan_cmd {
        u32 num_vendor_oui;
        u32 scan_ctrl_flags_ext;
        u32 dwell_time_active_2g;
+       u32 dwell_time_active_6g;
+       u32 dwell_time_passive_6g;
+       u32 scan_start_offset;
 } __packed;
 
 #define WMI_SCAN_FLAG_PASSIVE        0x1
@@ -3098,6 +3138,16 @@ enum {
        ((flag) |= (((mode) << WMI_SCAN_DWELL_MODE_SHIFT) & \
                    WMI_SCAN_DWELL_MODE_MASK))
 
+struct hint_short_ssid {
+       u32 freq_flags;
+       u32 short_ssid;
+};
+
+struct hint_bssid {
+       u32 freq_flags;
+       struct wmi_mac_addr bssid;
+};
+
 struct scan_req_params {
        u32 scan_id;
        u32 scan_req_id;
@@ -3125,6 +3175,8 @@ struct scan_req_params {
        u32 dwell_time_active;
        u32 dwell_time_active_2g;
        u32 dwell_time_passive;
+       u32 dwell_time_active_6g;
+       u32 dwell_time_passive_6g;
        u32 min_rest_time;
        u32 max_rest_time;
        u32 repeat_probe_time;
@@ -3175,6 +3227,10 @@ struct scan_req_params {
        struct element_info extraie;
        struct element_info htcap;
        struct element_info vhtcap;
+       u32 num_hint_s_ssid;
+       u32 num_hint_bssid;
+       struct hint_short_ssid hint_s_ssid[WLAN_SCAN_MAX_HINT_S_SSID];
+       struct hint_bssid hint_bssid[WLAN_SCAN_MAX_HINT_BSSID];
 };
 
 struct wmi_ssid_arg {
@@ -3264,6 +3320,7 @@ struct  wmi_bcn_send_from_host_cmd {
 #define WMI_CHAN_INFO_QUARTER_RATE     BIT(15)
 #define WMI_CHAN_INFO_DFS_FREQ2                BIT(16)
 #define WMI_CHAN_INFO_ALLOW_HE         BIT(17)
+#define WMI_CHAN_INFO_PSC              BIT(18)
 
 #define WMI_CHAN_REG_INFO1_MIN_PWR     GENMASK(7, 0)
 #define WMI_CHAN_REG_INFO1_MAX_PWR     GENMASK(15, 8)
@@ -3444,6 +3501,7 @@ struct peer_assoc_params {
        u32 tx_max_rate;
        u32 tx_mcs_set;
        u8 vht_capable;
+       u8 min_data_rate;
        u32 tx_max_mcs_nss;
        u32 peer_bw_rxnss_override;
        bool is_pmf_enabled;
@@ -3472,6 +3530,7 @@ struct peer_assoc_params {
        bool he_flag;
        u32 peer_he_cap_macinfo[2];
        u32 peer_he_cap_macinfo_internal;
+       u32 peer_he_caps_6ghz;
        u32 peer_he_ops;
        u32 peer_he_cap_phyinfo[WMI_HOST_MAX_HECAP_PHY_SIZE];
        u32 peer_he_mcs_count;
@@ -3509,6 +3568,8 @@ struct  wmi_peer_assoc_complete_cmd {
        u32 peer_he_mcs;
        u32 peer_he_cap_info_ext;
        u32 peer_he_cap_info_internal;
+       u32 min_data_rate;
+       u32 peer_he_caps_6ghz;
 } __packed;
 
 struct wmi_stop_scan_cmd {
@@ -4228,6 +4289,7 @@ struct wmi_pdev_temperature_event {
 #define WLAN_MGMT_TXRX_HOST_MAX_ANTENNA 4
 
 struct mgmt_rx_event_params {
+       u32 chan_freq;
        u32 channel;
        u32 snr;
        u8 rssi_ctl[WLAN_MGMT_TXRX_HOST_MAX_ANTENNA];
@@ -4257,6 +4319,7 @@ struct wmi_mgmt_rx_hdr {
        u32 rx_tsf_l32;
        u32 rx_tsf_u32;
        u32 pdev_id;
+       u32 chan_freq;
 } __packed;
 
 #define MAX_ANTENNA_EIGHT 8
@@ -4734,6 +4797,117 @@ struct ath11k_wmi_pdev_lro_config_cmd {
        u32 pdev_id;
 } __packed;
 
+#define ATH11K_WMI_SPECTRAL_COUNT_DEFAULT                 0
+#define ATH11K_WMI_SPECTRAL_PERIOD_DEFAULT              224
+#define ATH11K_WMI_SPECTRAL_PRIORITY_DEFAULT              1
+#define ATH11K_WMI_SPECTRAL_FFT_SIZE_DEFAULT              7
+#define ATH11K_WMI_SPECTRAL_GC_ENA_DEFAULT                1
+#define ATH11K_WMI_SPECTRAL_RESTART_ENA_DEFAULT           0
+#define ATH11K_WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT     -96
+#define ATH11K_WMI_SPECTRAL_INIT_DELAY_DEFAULT           80
+#define ATH11K_WMI_SPECTRAL_NB_TONE_THR_DEFAULT          12
+#define ATH11K_WMI_SPECTRAL_STR_BIN_THR_DEFAULT           8
+#define ATH11K_WMI_SPECTRAL_WB_RPT_MODE_DEFAULT           0
+#define ATH11K_WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT         0
+#define ATH11K_WMI_SPECTRAL_RSSI_THR_DEFAULT           0xf0
+#define ATH11K_WMI_SPECTRAL_PWR_FORMAT_DEFAULT            0
+#define ATH11K_WMI_SPECTRAL_RPT_MODE_DEFAULT              2
+#define ATH11K_WMI_SPECTRAL_BIN_SCALE_DEFAULT             1
+#define ATH11K_WMI_SPECTRAL_DBM_ADJ_DEFAULT               1
+#define ATH11K_WMI_SPECTRAL_CHN_MASK_DEFAULT              1
+
+struct ath11k_wmi_vdev_spectral_conf_param {
+       u32 vdev_id;
+       u32 scan_count;
+       u32 scan_period;
+       u32 scan_priority;
+       u32 scan_fft_size;
+       u32 scan_gc_ena;
+       u32 scan_restart_ena;
+       u32 scan_noise_floor_ref;
+       u32 scan_init_delay;
+       u32 scan_nb_tone_thr;
+       u32 scan_str_bin_thr;
+       u32 scan_wb_rpt_mode;
+       u32 scan_rssi_rpt_mode;
+       u32 scan_rssi_thr;
+       u32 scan_pwr_format;
+       u32 scan_rpt_mode;
+       u32 scan_bin_scale;
+       u32 scan_dbm_adj;
+       u32 scan_chn_mask;
+} __packed;
+
+struct ath11k_wmi_vdev_spectral_conf_cmd {
+       u32 tlv_header;
+       struct ath11k_wmi_vdev_spectral_conf_param param;
+} __packed;
+
+#define ATH11K_WMI_SPECTRAL_TRIGGER_CMD_TRIGGER  1
+#define ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR    2
+#define ATH11K_WMI_SPECTRAL_ENABLE_CMD_ENABLE    1
+#define ATH11K_WMI_SPECTRAL_ENABLE_CMD_DISABLE   2
+
+struct ath11k_wmi_vdev_spectral_enable_cmd {
+       u32 tlv_header;
+       u32 vdev_id;
+       u32 trigger_cmd;
+       u32 enable_cmd;
+} __packed;
+
+struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd {
+       u32 tlv_header;
+       u32 pdev_id;
+       u32 module_id;          /* see enum wmi_direct_buffer_module */
+       u32 base_paddr_lo;
+       u32 base_paddr_hi;
+       u32 head_idx_paddr_lo;
+       u32 head_idx_paddr_hi;
+       u32 tail_idx_paddr_lo;
+       u32 tail_idx_paddr_hi;
+       u32 num_elems;          /* Number of elems in the ring */
+       u32 buf_size;           /* size of allocated buffer in bytes */
+
+       /* Number of wmi_dma_buf_release_entry packed together */
+       u32 num_resp_per_event;
+
+       /* Target should timeout and send whatever resp
+        * it has if this time expires, units in milliseconds
+        */
+       u32 event_timeout_ms;
+} __packed;
+
+struct ath11k_wmi_dma_buf_release_fixed_param {
+       u32 pdev_id;
+       u32 module_id;
+       u32 num_buf_release_entry;
+       u32 num_meta_data_entry;
+} __packed;
+
+struct wmi_dma_buf_release_entry {
+       u32 tlv_header;
+       u32 paddr_lo;
+
+       /* Bits 11:0:   address of data
+        * Bits 31:12:  host context data
+        */
+       u32 paddr_hi;
+} __packed;
+
+#define WMI_SPECTRAL_META_INFO1_FREQ1          GENMASK(15, 0)
+#define WMI_SPECTRAL_META_INFO1_FREQ2          GENMASK(31, 16)
+
+#define WMI_SPECTRAL_META_INFO2_CHN_WIDTH      GENMASK(7, 0)
+
+struct wmi_dma_buf_release_meta_data {
+       u32 tlv_header;
+       s32 noise_floor[WMI_MAX_CHAINS];
+       u32 reset_delay;
+       u32 freq1;
+       u32 freq2;
+       u32 ch_width;
+} __packed;
+
 struct target_resource_config {
        u32 num_vdevs;
        u32 num_peers;
@@ -4941,4 +5115,10 @@ int ath11k_wmi_send_obss_color_collision_cfg_cmd(struct ath11k *ar, u32 vdev_id,
 int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k *ar, u32 vdev_id,
                                                bool enable);
 int ath11k_wmi_pdev_lro_cfg(struct ath11k *ar, int pdev_id);
+int ath11k_wmi_pdev_dma_ring_cfg(struct ath11k *ar,
+                                struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *param);
+int ath11k_wmi_vdev_spectral_enable(struct ath11k *ar, u32 vdev_id,
+                                   u32 trigger, u32 enable);
+int ath11k_wmi_vdev_spectral_conf(struct ath11k *ar,
+                                 struct ath11k_wmi_vdev_spectral_conf_param *param);
 #endif
index 53b66e9..5372e94 100644 (file)
@@ -311,7 +311,7 @@ static int ath6kl_usb_setup_pipe_resources(struct ath6kl_usb *ar_usb)
 
        ath6kl_dbg(ATH6KL_DBG_USB, "setting up USB Pipes using interface\n");
 
-       /* walk decriptors and setup pipes */
+       /* walk descriptors and setup pipes */
        for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
                endpoint = &iface_desc->endpoint[i].desc;
 
index 42bfdb4..d5e9af2 100644 (file)
@@ -34,7 +34,7 @@ config ATH9K
          APs that come with these cards refer to ath9k wiki
          products page:
 
-         http://wireless.kernel.org/en/users/Drivers/ath9k/products
+         https://wireless.wiki.kernel.org/en/users/Drivers/ath9k/products
 
          If you choose to build a module, it'll be called ath9k.
 
@@ -185,7 +185,8 @@ config ATH9K_HTC
          Support for Atheros HTC based cards.
          Chipsets supported: AR9271
 
-         For more information: http://wireless.kernel.org/en/users/Drivers/ath9k_htc
+         For more information:
+         https://wireless.wiki.kernel.org/en/users/Drivers/ath9k_htc
 
          The built module will be ath9k_htc.
 
index 052deff..8c97db7 100644 (file)
@@ -2410,7 +2410,7 @@ static u8 fixup_chainmask(u8 chip_chainmask, u8 eeprom_chainmask)
  * of tests. The testing requirements are going to be documented. Desired
  * test requirements are documented at:
  *
- * http://wireless.kernel.org/en/users/Drivers/ath9k/dfs
+ * https://wireless.wiki.kernel.org/en/users/Drivers/ath9k/dfs
  *
  * Once a new chipset gets properly tested an individual commit can be used
  * to document the testing for DFS for that chipset.
index b1bce7a..b2d7608 100644 (file)
@@ -10,7 +10,7 @@ config CARL9170
 
          It needs a special firmware (carl9170-1.fw), which can be downloaded
          from our wiki here:
-         <http://wireless.kernel.org/en/users/Drivers/carl9170>
+         <https://wireless.wiki.kernel.org/en/users/Drivers/carl9170>
 
          If you choose to build a module, it'll be called carl9170.
 
index 486957a..ead7933 100644 (file)
@@ -61,7 +61,7 @@ MODULE_ALIAS("arusb_lnx");
  * Note:
  *
  * Always update our wiki's device list (located at:
- * http://wireless.kernel.org/en/users/Drivers/ar9170/devices ),
+ * https://wireless.wiki.kernel.org/en/users/Drivers/ar9170/devices ),
  * whenever you add a new device.
  */
 static const struct usb_device_id carl9170_usb_ids[] = {
index 0d742ac..9c2e545 100644 (file)
@@ -24,6 +24,7 @@
  * could be acquired so far.
  */
 #define SPECTRAL_ATH10K_MAX_NUM_BINS           256
+#define SPECTRAL_ATH11K_MAX_NUM_BINS           512
 
 /* FFT sample format given to userspace via debugfs.
  *
@@ -37,6 +38,7 @@ enum ath_fft_sample_type {
        ATH_FFT_SAMPLE_HT20 = 1,
        ATH_FFT_SAMPLE_HT20_40,
        ATH_FFT_SAMPLE_ATH10K,
+       ATH_FFT_SAMPLE_ATH11K
 };
 
 struct fft_sample_tlv {
@@ -110,4 +112,19 @@ struct fft_sample_ath10k {
        u8 data[0];
 } __packed;
 
+struct fft_sample_ath11k {
+       struct fft_sample_tlv tlv;
+       u8 chan_width_mhz;
+       s8 max_index;
+       u8 max_exp;
+       __be16 freq1;
+       __be16 freq2;
+       __be16 max_magnitude;
+       __be16 rssi;
+       __be32 tsf;
+       __be32 noise;
+
+       u8 data[0];
+} __packed;
+
 #endif /* SPECTRAL_COMMON_H */
index dadba2d..6a95b19 100644 (file)
@@ -10,7 +10,7 @@ config WIL6210
          wil6210 chip by Wilocity. It supports operation on the
          60 GHz band, covered by the IEEE802.11ad standard.
 
-         http://wireless.kernel.org/en/users/Drivers/wil6210
+         https://wireless.wiki.kernel.org/en/users/Drivers/wil6210
 
          If you choose to build it as a module, it will be called
          wil6210
index bc8c15f..080e5aa 100644 (file)
@@ -897,7 +897,6 @@ static void wil_rx_handle_eapol(struct wil6210_vif *vif, struct sk_buff *skb)
 void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
                  struct wil_net_stats *stats, bool gro)
 {
-       gro_result_t rc = GRO_NORMAL;
        struct wil6210_vif *vif = ndev_to_vif(ndev);
        struct wil6210_priv *wil = ndev_to_wil(ndev);
        struct wireless_dev *wdev = vif_to_wdev(vif);
@@ -908,22 +907,16 @@ void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
         */
        int mcast = is_multicast_ether_addr(da);
        struct sk_buff *xmit_skb = NULL;
-       static const char * const gro_res_str[] = {
-               [GRO_MERGED]            = "GRO_MERGED",
-               [GRO_MERGED_FREE]       = "GRO_MERGED_FREE",
-               [GRO_HELD]              = "GRO_HELD",
-               [GRO_NORMAL]            = "GRO_NORMAL",
-               [GRO_DROP]              = "GRO_DROP",
-               [GRO_CONSUMED]          = "GRO_CONSUMED",
-       };
 
        if (wdev->iftype == NL80211_IFTYPE_STATION) {
                sa = wil_skb_get_sa(skb);
                if (mcast && ether_addr_equal(sa, ndev->dev_addr)) {
                        /* mcast packet looped back to us */
-                       rc = GRO_DROP;
                        dev_kfree_skb(skb);
-                       goto stats;
+                       ndev->stats.rx_dropped++;
+                       stats->rx_dropped++;
+                       wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
+                       return;
                }
        } else if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) {
                if (mcast) {
@@ -967,26 +960,16 @@ void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
                        wil_rx_handle_eapol(vif, skb);
 
                if (gro)
-                       rc = napi_gro_receive(&wil->napi_rx, skb);
+                       napi_gro_receive(&wil->napi_rx, skb);
                else
                        netif_rx_ni(skb);
-               wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
-                            len, gro_res_str[rc]);
-       }
-stats:
-       /* statistics. rc set to GRO_NORMAL for AP bridging */
-       if (unlikely(rc == GRO_DROP)) {
-               ndev->stats.rx_dropped++;
-               stats->rx_dropped++;
-               wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
-       } else {
-               ndev->stats.rx_packets++;
-               stats->rx_packets++;
-               ndev->stats.rx_bytes += len;
-               stats->rx_bytes += len;
-               if (mcast)
-                       ndev->stats.multicast++;
        }
+       ndev->stats.rx_packets++;
+       stats->rx_packets++;
+       ndev->stats.rx_bytes += len;
+       stats->rx_bytes += len;
+       if (mcast)
+               ndev->stats.multicast++;
 }
 
 void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
index 3b26807..a63b5c2 100644 (file)
@@ -17,7 +17,7 @@
  *
  * TODO list is at the wiki:
  *
- * http://wireless.kernel.org/en/users/Drivers/at76c50x-usb#TODO
+ * https://wireless.wiki.kernel.org/en/users/Drivers/at76c50x-usb#TODO
  */
 
 #include <linux/init.h>
index 3ad94da..a54dd4f 100644 (file)
@@ -734,7 +734,7 @@ static void b43_short_slot_timing_disable(struct b43_wldev *dev)
 }
 
 /* DummyTransmission function, as documented on
- * http://bcm-v4.sipsolutions.net/802.11/DummyTransmission
+ * https://bcm-v4.sipsolutions.net/802.11/DummyTransmission
  */
 void b43_dummy_transmission(struct b43_wldev *dev, bool ofdm, bool pa_on)
 {
@@ -1198,7 +1198,7 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags)
        }
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/BmacCorePllReset */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/BmacCorePllReset */
 void b43_wireless_core_phy_pll_reset(struct b43_wldev *dev)
 {
        struct bcma_drv_cc *bcma_cc __maybe_unused;
@@ -2164,7 +2164,7 @@ static void b43_print_fw_helptext(struct b43_wl *wl, bool error)
 {
        const char text[] =
                "You must go to " \
-               "http://wireless.kernel.org/en/users/Drivers/b43#devicefirmware " \
+               "https://wireless.wiki.kernel.org/en/users/Drivers/b43#devicefirmware " \
                "and download the correct firmware for this driver version. " \
                "Please carefully read all instructions on this website.\n";
 
@@ -2290,7 +2290,7 @@ err_format:
        return -EPROTO;
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/Init/Firmware */
+/* https://bcm-v4.sipsolutions.net/802.11/Init/Firmware */
 static int b43_try_request_fw(struct b43_request_fw_context *ctx)
 {
        struct b43_wldev *dev = ctx->dev;
@@ -2843,7 +2843,7 @@ static int b43_upload_initvals_band(struct b43_wldev *dev)
 }
 
 /* Initialize the GPIOs
- * http://bcm-specs.sipsolutions.net/GPIO
+ * https://bcm-specs.sipsolutions.net/GPIO
  */
 
 #ifdef CONFIG_B43_SSB
@@ -2971,7 +2971,7 @@ void b43_mac_enable(struct b43_wldev *dev)
        }
 }
 
-/* http://bcm-specs.sipsolutions.net/SuspendMAC */
+/* https://bcm-specs.sipsolutions.net/SuspendMAC */
 void b43_mac_suspend(struct b43_wldev *dev)
 {
        int i;
@@ -3004,7 +3004,7 @@ out:
        dev->mac_suspended++;
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MacPhyClkSet */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/MacPhyClkSet */
 void b43_mac_phy_clock_set(struct b43_wldev *dev, bool on)
 {
        u32 tmp;
@@ -3231,7 +3231,7 @@ static void b43_chip_exit(struct b43_wldev *dev)
 }
 
 /* Initialize the chip
- * http://bcm-specs.sipsolutions.net/ChipInit
+ * https://bcm-specs.sipsolutions.net/ChipInit
  */
 static int b43_chip_init(struct b43_wldev *dev)
 {
index 923d4cb..1de4de0 100644 (file)
@@ -559,7 +559,7 @@ bool b43_is_40mhz(struct b43_wldev *dev)
        return dev->phy.chandef->width == NL80211_CHAN_WIDTH_40;
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BmacPhyClkFgc */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/BmacPhyClkFgc */
 void b43_phy_force_clock(struct b43_wldev *dev, bool force)
 {
        u32 tmp;
index 1e022ec..d5a1a5c 100644 (file)
@@ -357,14 +357,14 @@ static void b43_set_original_gains(struct b43_wldev *dev)
        b43_dummy_transmission(dev, false, true);
 }
 
-/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
+/* https://bcm-specs.sipsolutions.net/NRSSILookupTable */
 static void b43_nrssi_hw_write(struct b43_wldev *dev, u16 offset, s16 val)
 {
        b43_phy_write(dev, B43_PHY_NRSSILT_CTRL, offset);
        b43_phy_write(dev, B43_PHY_NRSSILT_DATA, (u16) val);
 }
 
-/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
+/* https://bcm-specs.sipsolutions.net/NRSSILookupTable */
 static s16 b43_nrssi_hw_read(struct b43_wldev *dev, u16 offset)
 {
        u16 val;
@@ -375,7 +375,7 @@ static s16 b43_nrssi_hw_read(struct b43_wldev *dev, u16 offset)
        return (s16) val;
 }
 
-/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
+/* https://bcm-specs.sipsolutions.net/NRSSILookupTable */
 static void b43_nrssi_hw_update(struct b43_wldev *dev, u16 val)
 {
        u16 i;
@@ -389,7 +389,7 @@ static void b43_nrssi_hw_update(struct b43_wldev *dev, u16 val)
        }
 }
 
-/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
+/* https://bcm-specs.sipsolutions.net/NRSSILookupTable */
 static void b43_nrssi_mem_update(struct b43_wldev *dev)
 {
        struct b43_phy_g *gphy = dev->phy.g;
@@ -1575,7 +1575,7 @@ static void b43_phy_initb5(struct b43_wldev *dev)
        b43_write16(dev, 0x03E4, (b43_read16(dev, 0x03E4) & 0xFFC0) | 0x0004);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/Init/B6 */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/Init/B6 */
 static void b43_phy_initb6(struct b43_wldev *dev)
 {
        struct b43_phy *phy = &dev->phy;
@@ -2746,7 +2746,7 @@ static int b43_gphy_op_interf_mitigation(struct b43_wldev *dev,
        return 0;
 }
 
-/* http://bcm-specs.sipsolutions.net/EstimatePowerOut
+/* https://bcm-specs.sipsolutions.net/EstimatePowerOut
  * This function converts a TSSI value to dBm in Q5.2
  */
 static s8 b43_gphy_estimate_power_out(struct b43_wldev *dev, s8 tssi)
index 6033df1..c685b4b 100644 (file)
@@ -1018,7 +1018,7 @@ static void b43_phy_ht_op_free(struct b43_wldev *dev)
        phy->ht = NULL;
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */
+/* https://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */
 static void b43_phy_ht_op_software_rfkill(struct b43_wldev *dev,
                                        bool blocked)
 {
index cfb953d..0e5c076 100644 (file)
@@ -70,7 +70,7 @@ static void b43_lpphy_op_free(struct b43_wldev *dev)
        dev->phy.lp = NULL;
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/LP/ReadBandSrom */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/LP/ReadBandSrom */
 static void lpphy_read_band_sprom(struct b43_wldev *dev)
 {
        struct ssb_sprom *sprom = dev->dev->bus_sprom;
index c33b423..f75f0ff 100644 (file)
@@ -98,7 +98,7 @@ static inline bool b43_nphy_ipa(struct b43_wldev *dev)
                (dev->phy.n->ipa5g_on && band == NL80211_BAND_5GHZ));
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreGetState */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreGetState */
 static u8 b43_nphy_get_rx_core_state(struct b43_wldev *dev)
 {
        return (b43_phy_read(dev, B43_NPHY_RFSEQCA) & B43_NPHY_RFSEQCA_RXEN) >>
@@ -109,7 +109,7 @@ static u8 b43_nphy_get_rx_core_state(struct b43_wldev *dev)
  * RF (just without b43_nphy_rf_ctl_intc_override)
  **************************************************/
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */
 static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
                                       enum b43_nphy_rf_sequence seq)
 {
@@ -146,7 +146,7 @@ static void b43_nphy_rf_ctl_override_rev19(struct b43_wldev *dev, u16 field,
        /* TODO */
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverrideRev7 */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverrideRev7 */
 static void b43_nphy_rf_ctl_override_rev7(struct b43_wldev *dev, u16 field,
                                          u16 value, u8 core, bool off,
                                          u8 override)
@@ -193,7 +193,7 @@ static void b43_nphy_rf_ctl_override_rev7(struct b43_wldev *dev, u16 field,
        }
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverideOneToMany */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverideOneToMany */
 static void b43_nphy_rf_ctl_override_one_to_many(struct b43_wldev *dev,
                                                 enum n_rf_ctl_over_cmd cmd,
                                                 u16 value, u8 core, bool off)
@@ -237,7 +237,7 @@ static void b43_nphy_rf_ctl_override_one_to_many(struct b43_wldev *dev,
        }
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
 static void b43_nphy_rf_ctl_override(struct b43_wldev *dev, u16 field,
                                     u16 value, u8 core, bool off)
 {
@@ -382,7 +382,7 @@ static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev,
        }
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
 static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
                                          enum n_intc_override intc_override,
                                          u16 value, u8 core)
@@ -490,7 +490,7 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
  * Various PHY ops
  **************************************************/
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
 static void b43_nphy_write_clip_detection(struct b43_wldev *dev,
                                          const u16 *clip_st)
 {
@@ -498,14 +498,14 @@ static void b43_nphy_write_clip_detection(struct b43_wldev *dev,
        b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
 static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
 {
        clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES);
        clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */
 static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
 {
        u16 tmp;
@@ -526,7 +526,7 @@ static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
        return tmp;
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */
 static void b43_nphy_reset_cca(struct b43_wldev *dev)
 {
        u16 bbcfg;
@@ -540,7 +540,7 @@ static void b43_nphy_reset_cca(struct b43_wldev *dev)
        b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */
 static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
 {
        struct b43_phy *phy = &dev->phy;
@@ -564,7 +564,7 @@ static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
        }
 }
 
-/* http://bcm-v4.sipsolutions.net/PHY/N/Read_Lpf_Bw_Ctl */
+/* https://bcm-v4.sipsolutions.net/PHY/N/Read_Lpf_Bw_Ctl */
 static u16 b43_nphy_read_lpf_ctl(struct b43_wldev *dev, u16 offset)
 {
        if (!offset)
@@ -572,7 +572,7 @@ static u16 b43_nphy_read_lpf_ctl(struct b43_wldev *dev, u16 offset)
        return b43_ntab_read(dev, B43_NTAB16(7, offset)) & 0x7;
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */
 static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
 {
        struct b43_phy_n *nphy = dev->phy.n;
@@ -628,7 +628,7 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
                b43_nphy_stay_in_carrier_search(dev, 0);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */
 static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
                                        u8 *events, u8 *delays, u8 length)
 {
@@ -805,7 +805,7 @@ static void b43_radio_2057_setup(struct b43_wldev *dev,
 }
 
 /* Calibrate resistors in LPF of PLL?
- * http://bcm-v4.sipsolutions.net/PHY/radio205x_rcal
+ * https://bcm-v4.sipsolutions.net/PHY/radio205x_rcal
  */
 static u8 b43_radio_2057_rcal(struct b43_wldev *dev)
 {
@@ -919,7 +919,7 @@ static u8 b43_radio_2057_rcal(struct b43_wldev *dev)
 }
 
 /* Calibrate the internal RC oscillator?
- * http://bcm-v4.sipsolutions.net/PHY/radio2057_rccal
+ * https://bcm-v4.sipsolutions.net/PHY/radio2057_rccal
  */
 static u16 b43_radio_2057_rccal(struct b43_wldev *dev)
 {
@@ -1030,7 +1030,7 @@ static void b43_radio_2057_init_post(struct b43_wldev *dev)
        b43_radio_mask(dev, R2057_RFPLL_MASTER, ~0x8);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/Radio/2057/Init */
+/* https://bcm-v4.sipsolutions.net/802.11/Radio/2057/Init */
 static void b43_radio_2057_init(struct b43_wldev *dev)
 {
        b43_radio_2057_init_pre(dev);
@@ -1117,7 +1117,7 @@ static void b43_chantab_radio_2056_upload(struct b43_wldev *dev,
                                        e->radio_tx1_mixg_boost_tune);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2056Setup */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2056Setup */
 static void b43_radio_2056_setup(struct b43_wldev *dev,
                                const struct b43_nphy_channeltab_entry_rev3 *e)
 {
@@ -1356,7 +1356,7 @@ static void b43_radio_init2056_post(struct b43_wldev *dev)
 
 /*
  * Initialize a Broadcom 2056 N-radio
- * http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init
+ * https://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init
  */
 static void b43_radio_init2056(struct b43_wldev *dev)
 {
@@ -1406,7 +1406,7 @@ static void b43_chantab_radio_upload(struct b43_wldev *dev,
        b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2055Setup */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2055Setup */
 static void b43_radio_2055_setup(struct b43_wldev *dev,
                                const struct b43_nphy_channeltab_entry_rev2 *e)
 {
@@ -1480,7 +1480,7 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
 
 /*
  * Initialize a Broadcom 2055 N-radio
- * http://bcm-v4.sipsolutions.net/802.11/Radio/2055/Init
+ * https://bcm-v4.sipsolutions.net/802.11/Radio/2055/Init
  */
 static void b43_radio_init2055(struct b43_wldev *dev)
 {
@@ -1499,7 +1499,7 @@ static void b43_radio_init2055(struct b43_wldev *dev)
  * Samples
  **************************************************/
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */
 static int b43_nphy_load_samples(struct b43_wldev *dev,
                                        struct cordic_iq *samples, u16 len) {
        struct b43_phy_n *nphy = dev->phy.n;
@@ -1526,7 +1526,7 @@ static int b43_nphy_load_samples(struct b43_wldev *dev,
        return 0;
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GenLoadSamples */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/GenLoadSamples */
 static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max,
                                        bool test)
 {
@@ -1569,7 +1569,7 @@ static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max,
        return (i < 0) ? 0 : len;
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RunSamples */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RunSamples */
 static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
                                 u16 wait, bool iqmode, bool dac_test,
                                 bool modify_bbmult)
@@ -1650,7 +1650,7 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
  * RSSI
  **************************************************/
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */
 static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale,
                                        s8 offset, u8 core,
                                        enum n_rail_type rail,
@@ -1895,7 +1895,7 @@ static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code,
        }
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */
 static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code,
                                 enum n_rssi_type type)
 {
@@ -1907,7 +1907,7 @@ static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code,
                b43_nphy_rev2_rssi_select(dev, code, type);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRssi2055Vcm */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRssi2055Vcm */
 static void b43_nphy_set_rssi_2055_vcm(struct b43_wldev *dev,
                                       enum n_rssi_type rssi_type, u8 *buf)
 {
@@ -1936,7 +1936,7 @@ static void b43_nphy_set_rssi_2055_vcm(struct b43_wldev *dev,
        }
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PollRssi */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/PollRssi */
 static int b43_nphy_poll_rssi(struct b43_wldev *dev, enum n_rssi_type rssi_type,
                              s32 *buf, u8 nsamp)
 {
@@ -2025,7 +2025,7 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, enum n_rssi_type rssi_type,
        return out;
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */
 static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
 {
        struct b43_phy *phy = &dev->phy;
@@ -2287,7 +2287,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
        b43_nphy_write_clip_detection(dev, clip_state);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal */
 static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, enum n_rssi_type type)
 {
        int i, j, vcm;
@@ -2453,7 +2453,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, enum n_rssi_type type)
 
 /*
  * RSSI Calibration
- * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal
+ * https://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal
  */
 static void b43_nphy_rssi_cal(struct b43_wldev *dev)
 {
@@ -2680,7 +2680,7 @@ static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev)
                b43_phy_maskset(dev, B43_PHY_N(0xC5D), 0xFF80, 4);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
 static void b43_nphy_gain_ctl_workarounds(struct b43_wldev *dev)
 {
        if (dev->phy.rev >= 19)
@@ -3433,7 +3433,7 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
                                B43_NPHY_FINERX2_CGC_DECGC);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */
 static void b43_nphy_workarounds(struct b43_wldev *dev)
 {
        struct b43_phy *phy = &dev->phy;
@@ -3468,7 +3468,7 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
 
 /*
  * Transmits a known value for LO calibration
- * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone
+ * https://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone
  */
 static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val,
                            bool iqmode, bool dac_test, bool modify_bbmult)
@@ -3481,7 +3481,7 @@ static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val,
        return 0;
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */
 static void b43_nphy_update_txrx_chain(struct b43_wldev *dev)
 {
        struct b43_phy_n *nphy = dev->phy.n;
@@ -3509,7 +3509,7 @@ static void b43_nphy_update_txrx_chain(struct b43_wldev *dev)
                                ~B43_NPHY_RFSEQMODE_CAOVER);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */
 static void b43_nphy_stop_playback(struct b43_wldev *dev)
 {
        struct b43_phy *phy = &dev->phy;
@@ -3546,7 +3546,7 @@ static void b43_nphy_stop_playback(struct b43_wldev *dev)
                b43_nphy_stay_in_carrier_search(dev, 0);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */
 static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core,
                                        struct nphy_txgains target,
                                        struct nphy_iqcal_params *params)
@@ -3595,7 +3595,7 @@ static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core,
  * Tx and Rx
  **************************************************/
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */
 static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
 {
        struct b43_phy *phy = &dev->phy;
@@ -3732,7 +3732,7 @@ static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
                b43_nphy_stay_in_carrier_search(dev, 0);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrFix */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrFix */
 static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
 {
        struct b43_phy *phy = &dev->phy;
@@ -3926,7 +3926,7 @@ static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev)
 /*
  * Stop radio and transmit known signal. Then check received signal strength to
  * get TSSI (Transmit Signal Strength Indicator).
- * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlIdleTssi
+ * https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlIdleTssi
  */
 static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
 {
@@ -3978,7 +3978,7 @@ static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
        nphy->pwr_ctl_info[1].idle_tssi_2g = (tmp >> 8) & 0xFF;
 }
 
-/* http://bcm-v4.sipsolutions.net/PHY/N/TxPwrLimitToTbl */
+/* https://bcm-v4.sipsolutions.net/PHY/N/TxPwrLimitToTbl */
 static void b43_nphy_tx_prepare_adjusted_power_table(struct b43_wldev *dev)
 {
        struct b43_phy_n *nphy = dev->phy.n;
@@ -4039,7 +4039,7 @@ static void b43_nphy_tx_prepare_adjusted_power_table(struct b43_wldev *dev)
        }
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlSetup */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlSetup */
 static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev)
 {
        struct b43_phy *phy = &dev->phy;
@@ -4222,7 +4222,7 @@ static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev)
        u32 rfpwr_offset;
        u8 pga_gain, pad_gain;
        int i;
-       const s16 *uninitialized_var(rf_pwr_offset_table);
+       const s16 *rf_pwr_offset_table = NULL;
 
        table = b43_nphy_get_tx_gain_table(dev);
        if (!table)
@@ -4272,7 +4272,7 @@ static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev)
        }
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */
 static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
 {
        struct b43_phy_n *nphy = dev->phy.n;
@@ -4310,7 +4310,7 @@ static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
 
 /*
  * TX low-pass filter bandwidth setup
- * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw
+ * https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw
  */
 static void b43_nphy_tx_lpf_bw(struct b43_wldev *dev)
 {
@@ -4333,7 +4333,7 @@ static void b43_nphy_tx_lpf_bw(struct b43_wldev *dev)
        }
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */
 static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est,
                                u16 samps, u8 time, bool wait)
 {
@@ -4372,7 +4372,7 @@ static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est,
        memset(est, 0, sizeof(*est));
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */
 static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write,
                                        struct b43_phy_n_iq_comp *pcomp)
 {
@@ -4391,7 +4391,7 @@ static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write,
 
 #if 0
 /* Ready but not used anywhere */
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */
 static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core)
 {
        u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
@@ -4414,7 +4414,7 @@ static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core)
        b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */
 static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core)
 {
        u8 rxval, txval;
@@ -4476,7 +4476,7 @@ static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core)
 }
 #endif
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */
 static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask)
 {
        int i;
@@ -4574,7 +4574,7 @@ static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask)
        b43_nphy_rx_iq_coeffs(dev, true, &new);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */
 static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev)
 {
        u16 array[4];
@@ -4586,7 +4586,7 @@ static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev)
        b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */
 static void b43_nphy_spur_workaround(struct b43_wldev *dev)
 {
        struct b43_phy_n *nphy = dev->phy.n;
@@ -4645,7 +4645,7 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev)
                b43_nphy_stay_in_carrier_search(dev, 0);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */
 static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev)
 {
        struct b43_phy_n *nphy = dev->phy.n;
@@ -4713,7 +4713,7 @@ static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev)
 
 /*
  * Restore RSSI Calibration
- * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreRssiCal
+ * https://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreRssiCal
  */
 static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev)
 {
@@ -4822,7 +4822,7 @@ static void b43_nphy_tx_cal_radio_setup_rev7(struct b43_wldev *dev)
        }
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalRadioSetup */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalRadioSetup */
 static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev)
 {
        struct b43_phy *phy = &dev->phy;
@@ -4921,7 +4921,7 @@ static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev)
        }
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/UpdateTxCalLadder */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/UpdateTxCalLadder */
 static void b43_nphy_update_tx_cal_ladder(struct b43_wldev *dev, u16 core)
 {
        struct b43_phy_n *nphy = dev->phy.n;
@@ -4955,14 +4955,14 @@ static void b43_nphy_pa_set_tx_dig_filter(struct b43_wldev *dev, u16 offset,
                b43_phy_write(dev, offset, filter[i]);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ExtPaSetTxDigiFilts */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/ExtPaSetTxDigiFilts */
 static void b43_nphy_ext_pa_set_tx_dig_filters(struct b43_wldev *dev)
 {
        b43_nphy_pa_set_tx_dig_filter(dev, 0x2C5,
                                      tbl_tx_filter_coef_rev4[2]);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IpaSetTxDigiFilts */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/IpaSetTxDigiFilts */
 static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev)
 {
        /* B43_NPHY_TXF_20CO_S0A1, B43_NPHY_TXF_40CO_S0A1, unknown */
@@ -5002,7 +5002,7 @@ static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev)
        }
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetTxGain */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/GetTxGain */
 static struct nphy_txgains b43_nphy_get_tx_gains(struct b43_wldev *dev)
 {
        struct b43_phy_n *nphy = dev->phy.n;
@@ -5077,7 +5077,7 @@ static struct nphy_txgains b43_nphy_get_tx_gains(struct b43_wldev *dev)
        return target;
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhyCleanup */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhyCleanup */
 static void b43_nphy_tx_cal_phy_cleanup(struct b43_wldev *dev)
 {
        u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
@@ -5106,7 +5106,7 @@ static void b43_nphy_tx_cal_phy_cleanup(struct b43_wldev *dev)
        }
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhySetup */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhySetup */
 static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev)
 {
        struct b43_phy *phy = &dev->phy;
@@ -5207,7 +5207,7 @@ static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev)
        }
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SaveCal */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/SaveCal */
 static void b43_nphy_save_cal(struct b43_wldev *dev)
 {
        struct b43_phy *phy = &dev->phy;
@@ -5278,7 +5278,7 @@ static void b43_nphy_save_cal(struct b43_wldev *dev)
                b43_nphy_stay_in_carrier_search(dev, 0);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreCal */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreCal */
 static void b43_nphy_restore_cal(struct b43_wldev *dev)
 {
        struct b43_phy *phy = &dev->phy;
@@ -5366,7 +5366,7 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev)
        b43_nphy_rx_iq_coeffs(dev, true, rxcal_coeffs);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalTxIqlo */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/CalTxIqlo */
 static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
                                struct nphy_txgains target,
                                bool full, bool mphase)
@@ -5599,7 +5599,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
        return error;
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ReapplyTxCalCoeffs */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/ReapplyTxCalCoeffs */
 static void b43_nphy_reapply_tx_cal_coeffs(struct b43_wldev *dev)
 {
        struct b43_phy_n *nphy = dev->phy.n;
@@ -5634,7 +5634,7 @@ static void b43_nphy_reapply_tx_cal_coeffs(struct b43_wldev *dev)
        }
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIqRev2 */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIqRev2 */
 static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
                        struct nphy_txgains target, u8 type, bool debug)
 {
@@ -5821,7 +5821,7 @@ static int b43_nphy_rev3_cal_rx_iq(struct b43_wldev *dev,
        return -1;
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIq */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIq */
 static int b43_nphy_cal_rx_iq(struct b43_wldev *dev,
                        struct nphy_txgains target, u8 type, bool debug)
 {
@@ -5834,7 +5834,7 @@ static int b43_nphy_cal_rx_iq(struct b43_wldev *dev,
                return b43_nphy_rev2_cal_rx_iq(dev, target, type, debug);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreSetState */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreSetState */
 static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask)
 {
        struct b43_phy *phy = &dev->phy;
@@ -5939,7 +5939,7 @@ static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev,
  * N-PHY init
  **************************************************/
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */
 static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble)
 {
        u16 mimocfg = b43_phy_read(dev, B43_NPHY_MIMOCFG);
@@ -5953,7 +5953,7 @@ static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble)
        b43_phy_write(dev, B43_NPHY_MIMOCFG, mimocfg);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BPHYInit */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/BPHYInit */
 static void b43_nphy_bphy_init(struct b43_wldev *dev)
 {
        unsigned int i;
@@ -5972,7 +5972,7 @@ static void b43_nphy_bphy_init(struct b43_wldev *dev)
        b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */
 static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init)
 {
        if (dev->phy.rev >= 7)
@@ -6246,7 +6246,7 @@ static void b43_chantab_phy_upload(struct b43_wldev *dev,
        b43_phy_write(dev, B43_NPHY_BW6, e->phy_bw6);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PmuSpurAvoid */
+/* https://bcm-v4.sipsolutions.net/802.11/PmuSpurAvoid */
 static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid)
 {
        switch (dev->dev->bus_type) {
@@ -6265,7 +6265,7 @@ static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid)
        }
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */
 static void b43_nphy_channel_setup(struct b43_wldev *dev,
                                const struct b43_phy_n_sfo_cfg *e,
                                struct ieee80211_channel *new_channel)
@@ -6372,7 +6372,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
                b43_nphy_spur_workaround(dev);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetChanspec */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/SetChanspec */
 static int b43_nphy_set_channel(struct b43_wldev *dev,
                                struct ieee80211_channel *channel,
                                enum nl80211_channel_type channel_type)
@@ -6589,7 +6589,7 @@ static void b43_nphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
        b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */
+/* https://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */
 static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
                                        bool blocked)
 {
@@ -6643,7 +6643,7 @@ static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
        }
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/Anacore */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/Anacore */
 static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on)
 {
        struct b43_phy *phy = &dev->phy;
index 575c696..94f5e62 100644 (file)
@@ -3072,7 +3072,7 @@ INITTABSPTS(b2056_inittab_radio_rev11);
        .phy_regs.phy_bw5       = r4,   \
        .phy_regs.phy_bw6       = r5
 
-/* http://bcm-v4.sipsolutions.net/802.11/Radio/2056/ChannelTable */
+/* https://bcm-v4.sipsolutions.net/802.11/Radio/2056/ChannelTable */
 static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_phy_rev3[] = {
   {    .freq                   = 4920,
        RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
index dad405a..7957db9 100644 (file)
@@ -3620,7 +3620,7 @@ static void b43_nphy_tables_init_rev0(struct b43_wldev *dev)
        ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables */
 void b43_nphy_tables_init(struct b43_wldev *dev)
 {
        if (dev->phy.rev >= 16)
@@ -3633,7 +3633,7 @@ void b43_nphy_tables_init(struct b43_wldev *dev)
                b43_nphy_tables_init_rev0(dev);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */
+/* https://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */
 static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
 {
        struct b43_phy *phy = &dev->phy;
index 5208a39..da0d9e6 100644 (file)
@@ -591,7 +591,7 @@ static void b43legacy_synchronize_irq(struct b43legacy_wldev *dev)
 }
 
 /* DummyTransmission function, as documented on
- * http://bcm-specs.sipsolutions.net/DummyTransmission
+ * https://bcm-specs.sipsolutions.net/DummyTransmission
  */
 void b43legacy_dummy_transmission(struct b43legacy_wldev *dev)
 {
@@ -1477,8 +1477,8 @@ static void b43legacy_release_firmware(struct b43legacy_wldev *dev)
 
 static void b43legacy_print_fw_helptext(struct b43legacy_wl *wl)
 {
-       b43legacyerr(wl, "You must go to http://wireless.kernel.org/en/users/"
-                    "Drivers/b43#devicefirmware "
+       b43legacyerr(wl, "You must go to https://wireless.wiki.kernel.org/en/"
+                    "users/Drivers/b43#devicefirmware "
                     "and download the correct firmware (version 3).\n");
 }
 
@@ -1870,7 +1870,7 @@ out:
 }
 
 /* Initialize the GPIOs
- * http://bcm-specs.sipsolutions.net/GPIO
+ * https://bcm-specs.sipsolutions.net/GPIO
  */
 static int b43legacy_gpio_init(struct b43legacy_wldev *dev)
 {
@@ -1960,7 +1960,7 @@ void b43legacy_mac_enable(struct b43legacy_wldev *dev)
        }
 }
 
-/* http://bcm-specs.sipsolutions.net/SuspendMAC */
+/* https://bcm-specs.sipsolutions.net/SuspendMAC */
 void b43legacy_mac_suspend(struct b43legacy_wldev *dev)
 {
        int i;
@@ -2141,7 +2141,7 @@ static void b43legacy_chip_exit(struct b43legacy_wldev *dev)
 }
 
 /* Initialize the chip
- * http://bcm-specs.sipsolutions.net/ChipInit
+ * https://bcm-specs.sipsolutions.net/ChipInit
  */
 static int b43legacy_chip_init(struct b43legacy_wldev *dev)
 {
index a659259..05404fb 100644 (file)
@@ -129,7 +129,7 @@ void b43legacy_phy_calibrate(struct b43legacy_wldev *dev)
 }
 
 /* initialize B PHY power control
- * as described in http://bcm-specs.sipsolutions.net/InitPowerControl
+ * as described in https://bcm-specs.sipsolutions.net/InitPowerControl
  */
 static void b43legacy_phy_init_pctl(struct b43legacy_wldev *dev)
 {
@@ -1461,7 +1461,7 @@ void b43legacy_phy_set_baseband_attenuation(struct b43legacy_wldev *dev,
        b43legacy_phy_write(dev, 0x0060, value);
 }
 
-/* http://bcm-specs.sipsolutions.net/LocalOscillator/Measure */
+/* https://bcm-specs.sipsolutions.net/LocalOscillator/Measure */
 void b43legacy_phy_lo_g_measure(struct b43legacy_wldev *dev)
 {
        static const u8 pairorder[10] = { 3, 1, 5, 7, 9, 2, 0, 4, 6, 8 };
@@ -1721,7 +1721,7 @@ void b43legacy_phy_lo_mark_all_unused(struct b43legacy_wldev *dev)
        }
 }
 
-/* http://bcm-specs.sipsolutions.net/EstimatePowerOut
+/* https://bcm-specs.sipsolutions.net/EstimatePowerOut
  * This function converts a TSSI value to dBm in Q5.2
  */
 static s8 b43legacy_phy_estimate_power_out(struct b43legacy_wldev *dev, s8 tssi)
@@ -1747,7 +1747,7 @@ static s8 b43legacy_phy_estimate_power_out(struct b43legacy_wldev *dev, s8 tssi)
        return dbm;
 }
 
-/* http://bcm-specs.sipsolutions.net/RecalculateTransmissionPower */
+/* https://bcm-specs.sipsolutions.net/RecalculateTransmissionPower */
 void b43legacy_phy_xmitpower(struct b43legacy_wldev *dev)
 {
        struct b43legacy_phy *phy = &dev->phy;
index da40d1c..06891b4 100644 (file)
@@ -313,14 +313,14 @@ u8 b43legacy_radio_aci_scan(struct b43legacy_wldev *dev)
        return ret[channel - 1];
 }
 
-/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
+/* https://bcm-specs.sipsolutions.net/NRSSILookupTable */
 void b43legacy_nrssi_hw_write(struct b43legacy_wldev *dev, u16 offset, s16 val)
 {
        b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_CTRL, offset);
        b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_DATA, (u16)val);
 }
 
-/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
+/* https://bcm-specs.sipsolutions.net/NRSSILookupTable */
 s16 b43legacy_nrssi_hw_read(struct b43legacy_wldev *dev, u16 offset)
 {
        u16 val;
@@ -331,7 +331,7 @@ s16 b43legacy_nrssi_hw_read(struct b43legacy_wldev *dev, u16 offset)
        return (s16)val;
 }
 
-/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
+/* https://bcm-specs.sipsolutions.net/NRSSILookupTable */
 void b43legacy_nrssi_hw_update(struct b43legacy_wldev *dev, u16 val)
 {
        u16 i;
@@ -345,7 +345,7 @@ void b43legacy_nrssi_hw_update(struct b43legacy_wldev *dev, u16 val)
        }
 }
 
-/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
+/* https://bcm-specs.sipsolutions.net/NRSSILookupTable */
 void b43legacy_nrssi_mem_update(struct b43legacy_wldev *dev)
 {
        struct b43legacy_phy *phy = &dev->phy;
index 46346cb..1a7ab49 100644 (file)
@@ -863,7 +863,7 @@ static void brcmf_sdiod_freezer_detach(struct brcmf_sdio_dev *sdiodev)
 }
 #endif /* CONFIG_PM_SLEEP */
 
-static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
+int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
 {
        sdiodev->state = BRCMF_SDIOD_DOWN;
        if (sdiodev->bus) {
@@ -898,7 +898,7 @@ static void brcmf_sdiod_host_fixup(struct mmc_host *host)
        host->caps |= MMC_CAP_NONREMOVABLE;
 }
 
-static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
+int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
 {
        int ret = 0;
        unsigned int f2_blksz = SDIO_FUNC2_BLOCKSIZE;
index a757abd..ab0da2f 100644 (file)
@@ -84,6 +84,8 @@
 
 #define BRCMF_ND_INFO_TIMEOUT          msecs_to_jiffies(2000)
 
+#define BRCMF_PS_MAX_TIMEOUT_MS                2000
+
 #define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
        (sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
 
@@ -1387,7 +1389,8 @@ static int brcmf_set_sae_password(struct brcmf_if *ifp, const u8 *pwd_data,
        return err;
 }
 
-static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason)
+static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason,
+                           bool locally_generated)
 {
        struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(vif->wdev.wiphy);
        struct brcmf_pub *drvr = cfg->pub;
@@ -1409,7 +1412,7 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason)
                if ((vif->wdev.iftype == NL80211_IFTYPE_STATION) ||
                    (vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT))
                        cfg80211_disconnected(vif->wdev.netdev, reason, NULL, 0,
-                                             true, GFP_KERNEL);
+                                             locally_generated, GFP_KERNEL);
        }
        clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state);
        clear_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status);
@@ -1588,7 +1591,7 @@ brcmf_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *ndev)
                return 0;
        }
 
-       brcmf_link_down(ifp->vif, WLAN_REASON_DEAUTH_LEAVING);
+       brcmf_link_down(ifp->vif, WLAN_REASON_DEAUTH_LEAVING, true);
        brcmf_net_setcarrier(ifp, false);
 
        brcmf_dbg(TRACE, "Exit\n");
@@ -2941,6 +2944,12 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
                else
                        bphy_err(drvr, "error (%d)\n", err);
        }
+
+       err = brcmf_fil_iovar_int_set(ifp, "pm2_sleep_ret",
+                               min_t(u32, timeout, BRCMF_PS_MAX_TIMEOUT_MS));
+       if (err)
+               bphy_err(drvr, "Unable to set pm timeout, (%d)\n", err);
+
 done:
        brcmf_dbg(TRACE, "Exit\n");
        return err;
@@ -3907,7 +3916,7 @@ static s32 brcmf_cfg80211_suspend(struct wiphy *wiphy,
                         * disassociate from AP to save power while system is
                         * in suspended state
                         */
-                       brcmf_link_down(vif, WLAN_REASON_UNSPECIFIED);
+                       brcmf_link_down(vif, WLAN_REASON_UNSPECIFIED, true);
                        /* Make sure WPA_Supplicant receives all the event
                         * generated due to DISASSOC call to the fw to keep
                         * the state fw and WPA_Supplicant state consistent
@@ -4835,12 +4844,14 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
                        goto exit;
                }
 
-               if (settings->hidden_ssid) {
-                       err = brcmf_fil_iovar_int_set(ifp, "closednet", 1);
-                       if (err) {
-                               bphy_err(drvr, "closednet error (%d)\n", err);
-                               goto exit;
-                       }
+               err = brcmf_fil_iovar_int_set(ifp, "closednet",
+                                             settings->hidden_ssid);
+               if (err) {
+                       bphy_err(drvr, "%s closednet error (%d)\n",
+                                settings->hidden_ssid ?
+                                "enabled" : "disabled",
+                                err);
+                       goto exit;
                }
 
                brcmf_dbg(TRACE, "AP mode configuration complete\n");
@@ -5129,7 +5140,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
                                              &freq);
                chan_nr = ieee80211_frequency_to_channel(freq);
                af_params->channel = cpu_to_le32(chan_nr);
-
+               af_params->dwell_time = cpu_to_le32(params->wait);
                memcpy(action_frame->data, &buf[DOT11_MGMT_HDR_LEN],
                       le16_to_cpu(action_frame->len));
 
@@ -6024,10 +6035,19 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
                brcmf_net_setcarrier(ifp, true);
        } else if (brcmf_is_linkdown(e)) {
                brcmf_dbg(CONN, "Linkdown\n");
-               if (!brcmf_is_ibssmode(ifp->vif)) {
+               if (!brcmf_is_ibssmode(ifp->vif) &&
+                   test_bit(BRCMF_VIF_STATUS_CONNECTED,
+                            &ifp->vif->sme_state)) {
+                       if (memcmp(profile->bssid, e->addr, ETH_ALEN))
+                               return err;
+
                        brcmf_bss_connect_done(cfg, ndev, e, false);
                        brcmf_link_down(ifp->vif,
-                                       brcmf_map_fw_linkdown_reason(e));
+                                       brcmf_map_fw_linkdown_reason(e),
+                                       e->event_code &
+                                       (BRCMF_E_DEAUTH_IND |
+                                       BRCMF_E_DISASSOC_IND)
+                                       ? false : true);
                        brcmf_init_prof(ndev_to_prof(ndev));
                        if (ndev != cfg_to_ndev(cfg))
                                complete(&cfg->vif_disabled);
@@ -6801,7 +6821,7 @@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
  *     #AP <= 4, matching BI, channels = 1, 4 total
  *
  * no p2p and rsdb:
- *     #STA <= 2, #AP <= 2, channels = 2, 4 total
+ *     #STA <= 1, #AP <= 2, channels = 2, 4 total
  *
  * p2p, no mchan, and mbss:
  *
@@ -6816,7 +6836,7 @@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = {
  *     #AP <= 4, matching BI, channels = 1, 4 total
  *
  * p2p, rsdb, and no mbss:
- *     #STA <= 2, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 2, AP <= 2,
+ *     #STA <= 1, #P2P-DEV <= 1, #{P2P-CL, P2P-GO} <= 2, AP <= 2,
  *      channels = 2, 4 total
  */
 static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
@@ -6857,7 +6877,7 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
                goto err;
 
        combo[c].num_different_channels = 1 + (rsdb || (p2p && mchan));
-       c0_limits[i].max = 1 + rsdb;
+       c0_limits[i].max = 1;
        c0_limits[i++].types = BIT(NL80211_IFTYPE_STATION);
        if (mon_flag) {
                c0_limits[i].max = 1;
@@ -6873,7 +6893,7 @@ static int brcmf_setup_ifmodes(struct wiphy *wiphy, struct brcmf_if *ifp)
        if (p2p && rsdb) {
                c0_limits[i].max = 2;
                c0_limits[i++].types = BIT(NL80211_IFTYPE_AP);
-               combo[c].max_interfaces = 5;
+               combo[c].max_interfaces = 4;
        } else if (p2p) {
                combo[c].max_interfaces = i;
        } else if (rsdb) {
@@ -7180,7 +7200,7 @@ static s32 __brcmf_cfg80211_down(struct brcmf_if *ifp)
         * from AP to save power
         */
        if (check_vif_up(ifp->vif)) {
-               brcmf_link_down(ifp->vif, WLAN_REASON_UNSPECIFIED);
+               brcmf_link_down(ifp->vif, WLAN_REASON_UNSPECIFIED, true);
 
                /* Make sure WPA_Supplicant receives all the event
                   generated due to DISASSOC call to the fw to keep
index dec25e4..e3758bd 100644 (file)
@@ -209,8 +209,8 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
                bphy_err(drvr, "Retrieving cur_etheraddr failed, %d\n", err);
                goto done;
        }
-       memcpy(ifp->drvr->wiphy->perm_addr, ifp->drvr->mac, ETH_ALEN);
        memcpy(ifp->drvr->mac, ifp->mac_addr, sizeof(ifp->drvr->mac));
+       memcpy(ifp->drvr->wiphy->perm_addr, ifp->drvr->mac, ETH_ALEN);
 
        bus = ifp->drvr->bus_if;
        ri = &ifp->drvr->revinfo;
index c88655a..f89010a 100644 (file)
@@ -352,6 +352,9 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
        if ((skb->priority == 0) || (skb->priority > 7))
                skb->priority = cfg80211_classify8021d(skb, NULL);
 
+       /* set pacing shift for packet aggregation */
+       sk_pacing_shift_update(skb->sk, 8);
+
        ret = brcmf_proto_tx_queue_data(drvr, ifp->ifidx, skb);
        if (ret < 0)
                brcmf_txfinalize(ifp, skb, false);
index de0ef1b..2e31cc1 100644 (file)
@@ -19,7 +19,7 @@
 #define BRCMF_ARP_OL_PEER_AUTO_REPLY   0x00000008
 
 #define        BRCMF_BSS_INFO_VERSION  109 /* curr ver of brcmf_bss_info_le struct */
-#define BRCMF_BSS_RSSI_ON_CHANNEL      0x0002
+#define BRCMF_BSS_RSSI_ON_CHANNEL      0x0004
 
 #define BRCMF_STA_BRCM                 0x00000001      /* Running a Broadcom driver */
 #define BRCMF_STA_WME                  0x00000002      /* WMM association */
index 0970126..2df6811 100644 (file)
@@ -323,6 +323,10 @@ struct brcmf_skbuff_cb {
  *     firmware suppress the packet as device is already in PS mode.
  * @BRCMF_FWS_TXSTATUS_FW_TOSSED:
  *     firmware tossed the packet.
+ * @BRCMF_FWS_TXSTATUS_FW_DISCARD_NOACK:
+ *     firmware tossed the packet after retries.
+ * @BRCMF_FWS_TXSTATUS_FW_SUPPRESS_ACKED:
+ *     firmware wrongly reported suppressed previously, now fixing to acked.
  * @BRCMF_FWS_TXSTATUS_HOST_TOSSED:
  *     host tossed the packet.
  */
@@ -331,6 +335,8 @@ enum brcmf_fws_txstatus {
        BRCMF_FWS_TXSTATUS_CORE_SUPPRESS,
        BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS,
        BRCMF_FWS_TXSTATUS_FW_TOSSED,
+       BRCMF_FWS_TXSTATUS_FW_DISCARD_NOACK,
+       BRCMF_FWS_TXSTATUS_FW_SUPPRESS_ACKED,
        BRCMF_FWS_TXSTATUS_HOST_TOSSED
 };
 
@@ -383,6 +389,7 @@ struct brcmf_fws_mac_descriptor {
 };
 
 #define BRCMF_FWS_HANGER_MAXITEMS      3072
+#define BRCMF_BORROW_RATIO                     3
 
 /**
  * enum brcmf_fws_hanger_item_state - state of hanger item.
@@ -479,7 +486,8 @@ struct brcmf_fws_info {
        u32 fifo_enqpkt[BRCMF_FWS_FIFO_COUNT];
        int fifo_credit[BRCMF_FWS_FIFO_COUNT];
        int init_fifo_credit[BRCMF_FWS_FIFO_COUNT];
-       int credits_borrowed[BRCMF_FWS_FIFO_AC_VO + 1];
+       int credits_borrowed[BRCMF_FWS_FIFO_AC_VO + 1]
+               [BRCMF_FWS_FIFO_AC_VO + 1];
        int deq_node_pos[BRCMF_FWS_FIFO_COUNT];
        u32 fifo_credit_map;
        u32 fifo_delay_map;
@@ -621,6 +629,7 @@ static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
 static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
                                int ifidx)
 {
+       struct brcmf_fws_hanger_item *hi;
        bool (*matchfn)(struct sk_buff *, void *) = NULL;
        struct sk_buff *skb;
        int prec;
@@ -632,6 +641,9 @@ static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
                skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
                while (skb) {
                        hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
+                       hi = &fws->hanger.items[hslot];
+                       WARN_ON(skb != hi->pkt);
+                       hi->state = BRCMF_FWS_HANGER_ITEM_STATE_FREE;
                        brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
                                                true);
                        brcmu_pkt_buf_free_skb(skb);
@@ -1187,11 +1199,11 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws,
 
        fws->fifo_credit_map |= 1 << fifo;
 
-       if ((fifo == BRCMF_FWS_FIFO_AC_BE) &&
-           (fws->credits_borrowed[0])) {
+       if (fifo > BRCMF_FWS_FIFO_AC_BK &&
+           fifo <= BRCMF_FWS_FIFO_AC_VO) {
                for (lender_ac = BRCMF_FWS_FIFO_AC_VO; lender_ac >= 0;
                     lender_ac--) {
-                       borrowed = &fws->credits_borrowed[lender_ac];
+                       borrowed = &fws->credits_borrowed[fifo][lender_ac];
                        if (*borrowed) {
                                fws->fifo_credit_map |= (1 << lender_ac);
                                fifo_credit = &fws->fifo_credit[lender_ac];
@@ -1208,7 +1220,10 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws,
                }
        }
 
-       fws->fifo_credit[fifo] += credits;
+       if (credits) {
+               fws->fifo_credit[fifo] += credits;
+       }
+
        if (fws->fifo_credit[fifo] > fws->init_fifo_credit[fifo])
                fws->fifo_credit[fifo] = fws->init_fifo_credit[fifo];
 
@@ -1451,6 +1466,10 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
                remove_from_hanger = false;
        } else if (flags == BRCMF_FWS_TXSTATUS_FW_TOSSED)
                fws->stats.txs_tossed += compcnt;
+       else if (flags == BRCMF_FWS_TXSTATUS_FW_DISCARD_NOACK)
+               fws->stats.txs_discard += compcnt;
+       else if (flags == BRCMF_FWS_TXSTATUS_FW_SUPPRESS_ACKED)
+               fws->stats.txs_discard += compcnt;
        else if (flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED)
                fws->stats.txs_host_tossed += compcnt;
        else
@@ -1843,6 +1862,9 @@ void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb)
 
        WARN_ON(siglen > skb->len);
 
+       if (siglen > skb->len)
+               siglen = skb->len;
+
        if (!siglen)
                return;
        /* if flow control disabled, skip to packet data and leave */
@@ -2005,27 +2027,31 @@ static void brcmf_fws_rollback_toq(struct brcmf_fws_info *fws,
        }
 }
 
-static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws)
+static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws,
+                                  int highest_lender_ac, int borrower_ac,
+                                  bool borrow_all)
 {
-       int lender_ac;
+       int lender_ac, borrow_limit = 0;
 
-       if (time_after(fws->borrow_defer_timestamp, jiffies)) {
-               fws->fifo_credit_map &= ~(1 << BRCMF_FWS_FIFO_AC_BE);
-               return -ENAVAIL;
-       }
+       for (lender_ac = 0; lender_ac <= highest_lender_ac; lender_ac++) {
 
-       for (lender_ac = 0; lender_ac <= BRCMF_FWS_FIFO_AC_VO; lender_ac++) {
-               if (fws->fifo_credit[lender_ac] > 0) {
-                       fws->credits_borrowed[lender_ac]++;
+               if (!borrow_all)
+                       borrow_limit =
+                         fws->init_fifo_credit[lender_ac] / BRCMF_BORROW_RATIO;
+               else
+                       borrow_limit = 0;
+
+               if (fws->fifo_credit[lender_ac] > borrow_limit) {
+                       fws->credits_borrowed[borrower_ac][lender_ac]++;
                        fws->fifo_credit[lender_ac]--;
                        if (fws->fifo_credit[lender_ac] == 0)
                                fws->fifo_credit_map &= ~(1 << lender_ac);
-                       fws->fifo_credit_map |= (1 << BRCMF_FWS_FIFO_AC_BE);
+                       fws->fifo_credit_map |= (1 << borrower_ac);
                        brcmf_dbg(DATA, "borrow credit from: %d\n", lender_ac);
                        return 0;
                }
        }
-       fws->fifo_credit_map &= ~(1 << BRCMF_FWS_FIFO_AC_BE);
+       fws->fifo_credit_map &= ~(1 << borrower_ac);
        return -ENAVAIL;
 }
 
@@ -2216,9 +2242,10 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
                        }
                        continue;
                }
-               while ((fws->fifo_credit[fifo] > 0) ||
+
+               while ((fws->fifo_credit[fifo]) ||
                       ((!fws->bcmc_credit_check) &&
-                       (fifo == BRCMF_FWS_FIFO_BCMC))) {
+                               (fifo == BRCMF_FWS_FIFO_BCMC))) {
                        skb = brcmf_fws_deq(fws, fifo);
                        if (!skb)
                                break;
@@ -2228,10 +2255,14 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
                        if (fws->bus_flow_blocked)
                                break;
                }
-               if ((fifo == BRCMF_FWS_FIFO_AC_BE) &&
-                   (fws->fifo_credit[fifo] <= 0) &&
-                   (!fws->bus_flow_blocked)) {
-                       while (brcmf_fws_borrow_credit(fws) == 0) {
+
+               if (fifo >= BRCMF_FWS_FIFO_AC_BE &&
+                   fifo <= BRCMF_FWS_FIFO_AC_VO &&
+                   fws->fifo_credit[fifo] == 0 &&
+                   !fws->bus_flow_blocked) {
+                       while (brcmf_fws_borrow_credit(fws,
+                                                      fifo - 1, fifo,
+                                                      true) == 0) {
                                skb = brcmf_fws_deq(fws, fifo);
                                if (!skb) {
                                        brcmf_fws_return_credits(fws, fifo, 1);
index 8bb4f1f..f1a20db 100644 (file)
@@ -54,6 +54,7 @@
 #define BRCMF_IOCTL_REQ_PKTID                  0xFFFE
 
 #define BRCMF_MSGBUF_MAX_PKT_SIZE              2048
+#define BRCMF_MSGBUF_MAX_CTL_PKT_SIZE           8192
 #define BRCMF_MSGBUF_RXBUFPOST_THRESHOLD       32
 #define BRCMF_MSGBUF_MAX_IOCTLRESPBUF_POST     8
 #define BRCMF_MSGBUF_MAX_EVENTBUF_POST         8
@@ -1028,7 +1029,7 @@ brcmf_msgbuf_rxbuf_ctrl_post(struct brcmf_msgbuf *msgbuf, bool event_buf,
                rx_bufpost = (struct msgbuf_rx_ioctl_resp_or_event *)ret_ptr;
                memset(rx_bufpost, 0, sizeof(*rx_bufpost));
 
-               skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_PKT_SIZE);
+               skb = brcmu_pkt_buf_get_skb(BRCMF_MSGBUF_MAX_CTL_PKT_SIZE);
 
                if (skb == NULL) {
                        bphy_err(drvr, "Failed to alloc SKB\n");
index b886b56..a755426 100644 (file)
@@ -17,7 +17,6 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
 {
        struct brcmfmac_sdio_pd *sdio = &settings->bus.sdio;
        struct device_node *root, *np = dev->of_node;
-       struct property *prop;
        int irq;
        u32 irqf;
        u32 val;
@@ -25,8 +24,22 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
        /* Set board-type to the first string of the machine compatible prop */
        root = of_find_node_by_path("/");
        if (root) {
-               prop = of_find_property(root, "compatible", NULL);
-               settings->board_type = of_prop_next_string(prop, NULL);
+               int i, len;
+               char *board_type;
+               const char *tmp;
+
+               of_property_read_string_index(root, "compatible", 0, &tmp);
+
+               /* get rid of '/' in the compatible string to be able to find the FW */
+               len = strlen(tmp) + 1;
+               board_type = devm_kzalloc(dev, len, GFP_KERNEL);
+               strscpy(board_type, tmp, len);
+               for (i = 0; i < board_type[i]; i++) {
+                       if (board_type[i] == '/')
+                               board_type[i] = '-';
+               }
+               settings->board_type = board_type;
+
                of_node_put(root);
        }
 
index d2795dc..debd887 100644 (file)
@@ -1700,7 +1700,7 @@ static s32 brcmf_p2p_pub_af_tx(struct brcmf_cfg80211_info *cfg,
        return err;
 }
 
-static bool brcmf_p2p_check_dwell_overflow(s32 requested_dwell,
+static bool brcmf_p2p_check_dwell_overflow(u32 requested_dwell,
                                           unsigned long dwell_jiffies)
 {
        if ((requested_dwell & CUSTOM_RETRY_MASK) &&
@@ -1738,8 +1738,7 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
        unsigned long dwell_jiffies = 0;
        bool dwell_overflow = false;
 
-       s32 requested_dwell = af_params->dwell_time;
-
+       u32 requested_dwell = le32_to_cpu(af_params->dwell_time);
        action_frame = &af_params->action_frame;
        action_frame_len = le16_to_cpu(action_frame->len);
 
index 310d807..e8712ad 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/mmc/sdio_ids.h>
 #include <linux/mmc/sdio_func.h>
 #include <linux/mmc/card.h>
+#include <linux/mmc/core.h>
 #include <linux/semaphore.h>
 #include <linux/firmware.h>
 #include <linux/module.h>
@@ -648,6 +649,8 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
        BRCMF_FW_ENTRY(CY_CC_43012_CHIP_ID, 0xFFFFFFFF, 43012)
 };
 
+#define TXCTL_CREDITS  2
+
 static void pkt_align(struct sk_buff *p, int len, int align)
 {
        uint datalign;
@@ -661,8 +664,16 @@ static void pkt_align(struct sk_buff *p, int len, int align)
 /* To check if there's window offered */
 static bool data_ok(struct brcmf_sdio *bus)
 {
-       return (u8)(bus->tx_max - bus->tx_seq) != 0 &&
-              ((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
+       /* Reserve TXCTL_CREDITS credits for txctl */
+       return (bus->tx_max - bus->tx_seq) > TXCTL_CREDITS &&
+              ((bus->tx_max - bus->tx_seq) & 0x80) == 0;
+}
+
+/* To check if there's window offered */
+static bool txctl_ok(struct brcmf_sdio *bus)
+{
+       return (bus->tx_max - bus->tx_seq) != 0 &&
+              ((bus->tx_max - bus->tx_seq) & 0x80) == 0;
 }
 
 static int
@@ -2668,7 +2679,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
        brcmf_sdio_clrintr(bus);
 
        if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) &&
-           data_ok(bus)) {
+           txctl_ok(bus)) {
                sdio_claim_host(bus->sdiodev->func1);
                if (bus->ctrl_frame_stat) {
                        err = brcmf_sdio_tx_ctrlframe(bus,  bus->ctrl_frame_buf,
@@ -2676,6 +2687,9 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
                        bus->ctrl_frame_err = err;
                        wmb();
                        bus->ctrl_frame_stat = false;
+                       if (err)
+                               brcmf_err("sdio ctrlframe tx failed err=%d\n",
+                                         err);
                }
                sdio_release_host(bus->sdiodev->func1);
                brcmf_sdio_wait_event_wakeup(bus);
@@ -3699,7 +3713,11 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
                        if (bus->idlecount > bus->idletime) {
                                brcmf_dbg(SDIO, "idle\n");
                                sdio_claim_host(bus->sdiodev->func1);
-                               brcmf_sdio_wd_timer(bus, false);
+#ifdef DEBUG
+                               if (!BRCMF_FWCON_ON() ||
+                                   bus->console_interval == 0)
+#endif
+                                       brcmf_sdio_wd_timer(bus, false);
                                bus->idlecount = 0;
                                brcmf_sdio_bus_sleep(bus, true, false);
                                sdio_release_host(bus->sdiodev->func1);
@@ -4109,6 +4127,36 @@ int brcmf_sdio_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
        return 0;
 }
 
+static int brcmf_sdio_bus_reset(struct device *dev)
+{
+       int ret = 0;
+       struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+       struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
+
+       brcmf_dbg(SDIO, "Enter\n");
+
+       /* start by unregistering irqs */
+       brcmf_sdiod_intr_unregister(sdiodev);
+
+       brcmf_sdiod_remove(sdiodev);
+
+       /* reset the adapter */
+       sdio_claim_host(sdiodev->func1);
+       mmc_hw_reset(sdiodev->func1->card->host);
+       sdio_release_host(sdiodev->func1);
+
+       brcmf_bus_change_state(sdiodev->bus_if, BRCMF_BUS_DOWN);
+
+       ret = brcmf_sdiod_probe(sdiodev);
+       if (ret) {
+               brcmf_err("Failed to probe after sdio device reset: ret %d\n",
+                         ret);
+               brcmf_sdiod_remove(sdiodev);
+       }
+
+       return ret;
+}
+
 static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
        .stop = brcmf_sdio_bus_stop,
        .preinit = brcmf_sdio_bus_preinit,
@@ -4120,7 +4168,8 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
        .get_ramsize = brcmf_sdio_bus_get_ramsize,
        .get_memdump = brcmf_sdio_bus_get_memdump,
        .get_fwname = brcmf_sdio_get_fwname,
-       .debugfs_create = brcmf_sdio_debugfs_create
+       .debugfs_create = brcmf_sdio_debugfs_create,
+       .reset = brcmf_sdio_bus_reset
 };
 
 #define BRCMF_SDIO_FW_CODE     0
index 163fd66..1210892 100644 (file)
@@ -367,6 +367,9 @@ static inline void brcmf_sdiod_freezer_uncount(struct brcmf_sdio_dev *sdiodev)
 }
 #endif /* CONFIG_PM_SLEEP */
 
+int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev);
+int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev);
+
 struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
 void brcmf_sdio_remove(struct brcmf_sdio *bus);
 void brcmf_sdio_isr(struct brcmf_sdio *bus);
index c0a5449..c1b9ac6 100644 (file)
@@ -1,7 +1,9 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <net/mac80211.h>
 #include <linux/bcma/bcma_driver_chipcommon.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
 
 #include "mac80211_if.h"
 #include "pub.h"
 
 static void brcms_radio_led_ctrl(struct brcms_info *wl, bool state)
 {
-       if (wl->radio_led.gpio == -1)
+       if (!wl->radio_led.gpiod)
                return;
 
-       if (wl->radio_led.active_low)
-               state = !state;
-
        if (state)
-               gpio_set_value(wl->radio_led.gpio, 1);
+               gpiod_set_value(wl->radio_led.gpiod, 1);
        else
-               gpio_set_value(wl->radio_led.gpio, 0);
+               gpiod_set_value(wl->radio_led.gpiod, 0);
 }
 
 
@@ -45,8 +44,8 @@ void brcms_led_unregister(struct brcms_info *wl)
 {
        if (wl->led_dev.dev)
                led_classdev_unregister(&wl->led_dev);
-       if (wl->radio_led.gpio != -1)
-               gpio_free(wl->radio_led.gpio);
+       if (wl->radio_led.gpiod)
+               gpiochip_free_own_desc(wl->radio_led.gpiod);
 }
 
 int brcms_led_register(struct brcms_info *wl)
@@ -61,12 +60,8 @@ int brcms_led_register(struct brcms_info *wl)
                &sprom->gpio1,
                &sprom->gpio2,
                &sprom->gpio3 };
-       unsigned gpio = -1;
-       bool active_low = false;
-
-       /* none by default */
-       radio_led->gpio = -1;
-       radio_led->active_low = false;
+       int hwnum = -1;
+       enum gpio_lookup_flags lflags = GPIO_ACTIVE_HIGH;
 
        if (!bcma_gpio || !gpio_is_valid(bcma_gpio->base))
                return -ENODEV;
@@ -75,30 +70,26 @@ int brcms_led_register(struct brcms_info *wl)
        for (i = 0; i < BRCMS_LED_NO; i++) {
                u8 led = *leds[i];
                if ((led & BRCMS_LED_BEH_MASK) == BRCMS_LED_RADIO) {
-                       gpio = bcma_gpio->base + i;
+                       hwnum = i;
                        if (led & BRCMS_LED_AL_MASK)
-                               active_low = true;
+                               lflags = GPIO_ACTIVE_LOW;
                        break;
                }
        }
 
-       if (gpio == -1 || !gpio_is_valid(gpio))
+       /* No LED, bail out */
+       if (hwnum == -1)
                return -ENODEV;
 
-       /* request and configure LED gpio */
-       err = gpio_request_one(gpio,
-                               active_low ? GPIOF_OUT_INIT_HIGH
-                                       : GPIOF_OUT_INIT_LOW,
-                               "radio on");
-       if (err) {
-               wiphy_err(wl->wiphy, "requesting led gpio %d failed (err: %d)\n",
-                         gpio, err);
-               return err;
-       }
-       err = gpio_direction_output(gpio, 1);
-       if (err) {
-               wiphy_err(wl->wiphy, "cannot set led gpio %d to output (err: %d)\n",
-                         gpio, err);
+       /* Try to obtain this LED GPIO line */
+       radio_led->gpiod = gpiochip_request_own_desc(bcma_gpio, hwnum,
+                                                    "radio on", lflags,
+                                                    GPIOD_OUT_LOW);
+
+       if (IS_ERR(radio_led->gpiod)) {
+               err = PTR_ERR(radio_led->gpiod);
+               wiphy_err(wl->wiphy, "requesting led GPIO failed (err: %d)\n",
+                         err);
                return err;
        }
 
@@ -117,11 +108,8 @@ int brcms_led_register(struct brcms_info *wl)
                return err;
        }
 
-       wiphy_info(wl->wiphy, "registered radio enabled led device: %s gpio: %d\n",
-                  wl->radio_led.name,
-                  gpio);
-       radio_led->gpio = gpio;
-       radio_led->active_low = active_low;
+       wiphy_info(wl->wiphy, "registered radio enabled led device: %s\n",
+                  wl->radio_led.name);
 
        return 0;
 }
index 17a0b1f..d65f5c2 100644 (file)
 
 #ifndef _BRCM_LED_H_
 #define _BRCM_LED_H_
+
+struct gpio_desc;
+
 struct brcms_led {
        char name[32];
-       unsigned gpio;
-       bool active_low;
+       struct gpio_desc *gpiod;
 };
 
 #ifdef CONFIG_BCMA_DRIVER_GPIO
index 827bb6d..3166724 100644 (file)
@@ -74,16 +74,19 @@ MODULE_DEVICE_TABLE(pci, card_ids);
 
 static int airo_pci_probe(struct pci_dev *, const struct pci_device_id *);
 static void airo_pci_remove(struct pci_dev *);
-static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state);
-static int airo_pci_resume(struct pci_dev *pdev);
+static int __maybe_unused airo_pci_suspend(struct device *dev);
+static int __maybe_unused airo_pci_resume(struct device *dev);
+
+static SIMPLE_DEV_PM_OPS(airo_pci_pm_ops,
+                        airo_pci_suspend,
+                        airo_pci_resume);
 
 static struct pci_driver airo_driver = {
-       .name     = DRV_NAME,
-       .id_table = card_ids,
-       .probe    = airo_pci_probe,
-       .remove   = airo_pci_remove,
-       .suspend  = airo_pci_suspend,
-       .resume   = airo_pci_resume,
+       .name      = DRV_NAME,
+       .id_table  = card_ids,
+       .probe     = airo_pci_probe,
+       .remove    = airo_pci_remove,
+       .driver.pm = &airo_pci_pm_ops,
 };
 #endif /* CONFIG_PCI */
 
@@ -2450,7 +2453,7 @@ static void mpi_unmap_card(struct pci_dev *pci)
 
 /*************************************************************
  *  This routine assumes that descriptors have been setup .
- *  Run at insmod time or after reset  when the decriptors
+ *  Run at insmod time or after reset when the descriptors
  *  have been initialized . Returns 0 if all is well nz
  *  otherwise . Does not allocate memory but sets up card
  *  using previously allocated descriptors.
@@ -3113,7 +3116,7 @@ static int airo_thread(void *data) {
                                }
                                break;
                        }
-                       current->state = TASK_RUNNING;
+                       __set_current_state(TASK_RUNNING);
                        remove_wait_queue(&ai->thr_wait, &wait);
                        locked = 1;
                }
@@ -5573,9 +5576,9 @@ static void airo_pci_remove(struct pci_dev *pdev)
        pci_disable_device(pdev);
 }
 
-static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused airo_pci_suspend(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
        struct airo_info *ai = dev->ml_priv;
        Cmd cmd;
        Resp rsp;
@@ -5591,25 +5594,21 @@ static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state)
                return -EAGAIN;
        disable_MAC(ai, 0);
        netif_device_detach(dev);
-       ai->power = state;
+       ai->power = PMSG_SUSPEND;
        cmd.cmd = HOSTSLEEP;
        issuecommand(ai, &cmd, &rsp);
 
-       pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
-       pci_save_state(pdev);
-       pci_set_power_state(pdev, pci_choose_state(pdev, state));
+       device_wakeup_enable(dev_d);
        return 0;
 }
 
-static int airo_pci_resume(struct pci_dev *pdev)
+static int __maybe_unused airo_pci_resume(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
        struct airo_info *ai = dev->ml_priv;
-       pci_power_t prev_state = pdev->current_state;
+       pci_power_t prev_state = to_pci_dev(dev_d)->current_state;
 
-       pci_set_power_state(pdev, PCI_D0);
-       pci_restore_state(pdev);
-       pci_enable_wake(pdev, PCI_D0, 0);
+       device_wakeup_disable(dev_d);
 
        if (prev_state != PCI_D1) {
                reset_card(dev, 0);
index d003869..b1e7b44 100644 (file)
@@ -16,7 +16,7 @@ config IPW2100
          A driver for the Intel PRO/Wireless 2100 Network
          Connection 802.11b wireless network adapter.
 
-         See <file:Documentation/networking/device_drivers/intel/ipw2100.rst>
+         See <file:Documentation/networking/device_drivers/wifi/intel/ipw2100.rst>
          for information on the capabilities currently enabled in this driver
          and for tips for debugging issues and problems.
 
@@ -28,7 +28,7 @@ config IPW2100
          You will also very likely need the Wireless Tools in order to
          configure your card:
 
-         <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
+         <https://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
 
          It is recommended that you compile this driver as a module (M)
          rather than built-in (Y). This driver requires firmware at device
@@ -78,7 +78,7 @@ config IPW2200
          A driver for the Intel PRO/Wireless 2200BG and 2915ABG Network
          Connection adapters.
 
-         See <file:Documentation/networking/device_drivers/intel/ipw2200.rst>
+         See <file:Documentation/networking/device_drivers/wifi/intel/ipw2200.rst>
          for information on the capabilities currently enabled in this
          driver and for tips for debugging issues and problems.
 
@@ -90,7 +90,7 @@ config IPW2200
          You will also very likely need the Wireless Tools in order to
          configure your card:
 
-         <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
+         <https://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
 
          It is recommended that you compile this driver as a module (M)
          rather than built-in (Y). This driver requires firmware at device
index 624fe72..461e955 100644 (file)
@@ -2295,10 +2295,11 @@ static int ipw2100_alloc_skb(struct ipw2100_priv *priv,
                return -ENOMEM;
 
        packet->rxp = (struct ipw2100_rx *)packet->skb->data;
-       packet->dma_addr = pci_map_single(priv->pci_dev, packet->skb->data,
+       packet->dma_addr = dma_map_single(&priv->pci_dev->dev,
+                                         packet->skb->data,
                                          sizeof(struct ipw2100_rx),
-                                         PCI_DMA_FROMDEVICE);
-       if (pci_dma_mapping_error(priv->pci_dev, packet->dma_addr)) {
+                                         DMA_FROM_DEVICE);
+       if (dma_mapping_error(&priv->pci_dev->dev, packet->dma_addr)) {
                dev_kfree_skb(packet->skb);
                return -ENOMEM;
        }
@@ -2479,9 +2480,8 @@ static void isr_rx(struct ipw2100_priv *priv, int i,
                return;
        }
 
-       pci_unmap_single(priv->pci_dev,
-                        packet->dma_addr,
-                        sizeof(struct ipw2100_rx), PCI_DMA_FROMDEVICE);
+       dma_unmap_single(&priv->pci_dev->dev, packet->dma_addr,
+                        sizeof(struct ipw2100_rx), DMA_FROM_DEVICE);
 
        skb_put(packet->skb, status->frame_size);
 
@@ -2563,8 +2563,8 @@ static void isr_rx_monitor(struct ipw2100_priv *priv, int i,
                return;
        }
 
-       pci_unmap_single(priv->pci_dev, packet->dma_addr,
-                        sizeof(struct ipw2100_rx), PCI_DMA_FROMDEVICE);
+       dma_unmap_single(&priv->pci_dev->dev, packet->dma_addr,
+                        sizeof(struct ipw2100_rx), DMA_FROM_DEVICE);
        memmove(packet->skb->data + sizeof(struct ipw_rt_hdr),
                packet->skb->data, status->frame_size);
 
@@ -2689,9 +2689,9 @@ static void __ipw2100_rx_process(struct ipw2100_priv *priv)
 
                /* Sync the DMA for the RX buffer so CPU is sure to get
                 * the correct values */
-               pci_dma_sync_single_for_cpu(priv->pci_dev, packet->dma_addr,
-                                           sizeof(struct ipw2100_rx),
-                                           PCI_DMA_FROMDEVICE);
+               dma_sync_single_for_cpu(&priv->pci_dev->dev, packet->dma_addr,
+                                       sizeof(struct ipw2100_rx),
+                                       DMA_FROM_DEVICE);
 
                if (unlikely(ipw2100_corruption_check(priv, i))) {
                        ipw2100_corruption_detected(priv, i);
@@ -2923,9 +2923,8 @@ static int __ipw2100_tx_process(struct ipw2100_priv *priv)
                                     (packet->index + 1 + i) % txq->entries,
                                     tbd->host_addr, tbd->buf_length);
 
-                       pci_unmap_single(priv->pci_dev,
-                                        tbd->host_addr,
-                                        tbd->buf_length, PCI_DMA_TODEVICE);
+                       dma_unmap_single(&priv->pci_dev->dev, tbd->host_addr,
+                                        tbd->buf_length, DMA_TO_DEVICE);
                }
 
                libipw_txb_free(packet->info.d_struct.txb);
@@ -3165,15 +3164,13 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
                        tbd->buf_length = packet->info.d_struct.txb->
                            fragments[i]->len - LIBIPW_3ADDR_LEN;
 
-                       tbd->host_addr = pci_map_single(priv->pci_dev,
+                       tbd->host_addr = dma_map_single(&priv->pci_dev->dev,
                                                        packet->info.d_struct.
-                                                       txb->fragments[i]->
-                                                       data +
+                                                       txb->fragments[i]->data +
                                                        LIBIPW_3ADDR_LEN,
                                                        tbd->buf_length,
-                                                       PCI_DMA_TODEVICE);
-                       if (pci_dma_mapping_error(priv->pci_dev,
-                                                 tbd->host_addr)) {
+                                                       DMA_TO_DEVICE);
+                       if (dma_mapping_error(&priv->pci_dev->dev, tbd->host_addr)) {
                                IPW_DEBUG_TX("dma mapping error\n");
                                break;
                        }
@@ -3182,10 +3179,10 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
                                     txq->next, tbd->host_addr,
                                     tbd->buf_length);
 
-                       pci_dma_sync_single_for_device(priv->pci_dev,
-                                                      tbd->host_addr,
-                                                      tbd->buf_length,
-                                                      PCI_DMA_TODEVICE);
+                       dma_sync_single_for_device(&priv->pci_dev->dev,
+                                                  tbd->host_addr,
+                                                  tbd->buf_length,
+                                                  DMA_TO_DEVICE);
 
                        txq->next++;
                        txq->next %= txq->entries;
@@ -3440,9 +3437,9 @@ static int ipw2100_msg_allocate(struct ipw2100_priv *priv)
                return -ENOMEM;
 
        for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) {
-               v = pci_zalloc_consistent(priv->pci_dev,
-                                         sizeof(struct ipw2100_cmd_header),
-                                         &p);
+               v = dma_alloc_coherent(&priv->pci_dev->dev,
+                                      sizeof(struct ipw2100_cmd_header), &p,
+                                      GFP_KERNEL);
                if (!v) {
                        printk(KERN_ERR DRV_NAME ": "
                               "%s: PCI alloc failed for msg "
@@ -3461,11 +3458,10 @@ static int ipw2100_msg_allocate(struct ipw2100_priv *priv)
                return 0;
 
        for (j = 0; j < i; j++) {
-               pci_free_consistent(priv->pci_dev,
-                                   sizeof(struct ipw2100_cmd_header),
-                                   priv->msg_buffers[j].info.c_struct.cmd,
-                                   priv->msg_buffers[j].info.c_struct.
-                                   cmd_phys);
+               dma_free_coherent(&priv->pci_dev->dev,
+                                 sizeof(struct ipw2100_cmd_header),
+                                 priv->msg_buffers[j].info.c_struct.cmd,
+                                 priv->msg_buffers[j].info.c_struct.cmd_phys);
        }
 
        kfree(priv->msg_buffers);
@@ -3496,11 +3492,10 @@ static void ipw2100_msg_free(struct ipw2100_priv *priv)
                return;
 
        for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) {
-               pci_free_consistent(priv->pci_dev,
-                                   sizeof(struct ipw2100_cmd_header),
-                                   priv->msg_buffers[i].info.c_struct.cmd,
-                                   priv->msg_buffers[i].info.c_struct.
-                                   cmd_phys);
+               dma_free_coherent(&priv->pci_dev->dev,
+                                 sizeof(struct ipw2100_cmd_header),
+                                 priv->msg_buffers[i].info.c_struct.cmd,
+                                 priv->msg_buffers[i].info.c_struct.cmd_phys);
        }
 
        kfree(priv->msg_buffers);
@@ -4323,7 +4318,8 @@ static int status_queue_allocate(struct ipw2100_priv *priv, int entries)
        IPW_DEBUG_INFO("enter\n");
 
        q->size = entries * sizeof(struct ipw2100_status);
-       q->drv = pci_zalloc_consistent(priv->pci_dev, q->size, &q->nic);
+       q->drv = dma_alloc_coherent(&priv->pci_dev->dev, q->size, &q->nic,
+                                   GFP_KERNEL);
        if (!q->drv) {
                IPW_DEBUG_WARNING("Can not allocate status queue.\n");
                return -ENOMEM;
@@ -4339,9 +4335,10 @@ static void status_queue_free(struct ipw2100_priv *priv)
        IPW_DEBUG_INFO("enter\n");
 
        if (priv->status_queue.drv) {
-               pci_free_consistent(priv->pci_dev, priv->status_queue.size,
-                                   priv->status_queue.drv,
-                                   priv->status_queue.nic);
+               dma_free_coherent(&priv->pci_dev->dev,
+                                 priv->status_queue.size,
+                                 priv->status_queue.drv,
+                                 priv->status_queue.nic);
                priv->status_queue.drv = NULL;
        }
 
@@ -4357,7 +4354,8 @@ static int bd_queue_allocate(struct ipw2100_priv *priv,
 
        q->entries = entries;
        q->size = entries * sizeof(struct ipw2100_bd);
-       q->drv = pci_zalloc_consistent(priv->pci_dev, q->size, &q->nic);
+       q->drv = dma_alloc_coherent(&priv->pci_dev->dev, q->size, &q->nic,
+                                   GFP_KERNEL);
        if (!q->drv) {
                IPW_DEBUG_INFO
                    ("can't allocate shared memory for buffer descriptors\n");
@@ -4377,7 +4375,8 @@ static void bd_queue_free(struct ipw2100_priv *priv, struct ipw2100_bd_queue *q)
                return;
 
        if (q->drv) {
-               pci_free_consistent(priv->pci_dev, q->size, q->drv, q->nic);
+               dma_free_coherent(&priv->pci_dev->dev, q->size, q->drv,
+                                 q->nic);
                q->drv = NULL;
        }
 
@@ -4430,16 +4429,16 @@ static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
 
        priv->tx_buffers = kmalloc_array(TX_PENDED_QUEUE_LENGTH,
                                         sizeof(struct ipw2100_tx_packet),
-                                        GFP_ATOMIC);
+                                        GFP_KERNEL);
        if (!priv->tx_buffers) {
                bd_queue_free(priv, &priv->tx_queue);
                return -ENOMEM;
        }
 
        for (i = 0; i < TX_PENDED_QUEUE_LENGTH; i++) {
-               v = pci_alloc_consistent(priv->pci_dev,
-                                        sizeof(struct ipw2100_data_header),
-                                        &p);
+               v = dma_alloc_coherent(&priv->pci_dev->dev,
+                                      sizeof(struct ipw2100_data_header), &p,
+                                      GFP_KERNEL);
                if (!v) {
                        printk(KERN_ERR DRV_NAME
                               ": %s: PCI alloc failed for tx " "buffers.\n",
@@ -4459,11 +4458,10 @@ static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
                return 0;
 
        for (j = 0; j < i; j++) {
-               pci_free_consistent(priv->pci_dev,
-                                   sizeof(struct ipw2100_data_header),
-                                   priv->tx_buffers[j].info.d_struct.data,
-                                   priv->tx_buffers[j].info.d_struct.
-                                   data_phys);
+               dma_free_coherent(&priv->pci_dev->dev,
+                                 sizeof(struct ipw2100_data_header),
+                                 priv->tx_buffers[j].info.d_struct.data,
+                                 priv->tx_buffers[j].info.d_struct.data_phys);
        }
 
        kfree(priv->tx_buffers);
@@ -4540,12 +4538,10 @@ static void ipw2100_tx_free(struct ipw2100_priv *priv)
                        priv->tx_buffers[i].info.d_struct.txb = NULL;
                }
                if (priv->tx_buffers[i].info.d_struct.data)
-                       pci_free_consistent(priv->pci_dev,
-                                           sizeof(struct ipw2100_data_header),
-                                           priv->tx_buffers[i].info.d_struct.
-                                           data,
-                                           priv->tx_buffers[i].info.d_struct.
-                                           data_phys);
+                       dma_free_coherent(&priv->pci_dev->dev,
+                                         sizeof(struct ipw2100_data_header),
+                                         priv->tx_buffers[i].info.d_struct.data,
+                                         priv->tx_buffers[i].info.d_struct.data_phys);
        }
 
        kfree(priv->tx_buffers);
@@ -4608,9 +4604,10 @@ static int ipw2100_rx_allocate(struct ipw2100_priv *priv)
                return 0;
 
        for (j = 0; j < i; j++) {
-               pci_unmap_single(priv->pci_dev, priv->rx_buffers[j].dma_addr,
+               dma_unmap_single(&priv->pci_dev->dev,
+                                priv->rx_buffers[j].dma_addr,
                                 sizeof(struct ipw2100_rx_packet),
-                                PCI_DMA_FROMDEVICE);
+                                DMA_FROM_DEVICE);
                dev_kfree_skb(priv->rx_buffers[j].skb);
        }
 
@@ -4662,10 +4659,10 @@ static void ipw2100_rx_free(struct ipw2100_priv *priv)
 
        for (i = 0; i < RX_QUEUE_LENGTH; i++) {
                if (priv->rx_buffers[i].rxp) {
-                       pci_unmap_single(priv->pci_dev,
+                       dma_unmap_single(&priv->pci_dev->dev,
                                         priv->rx_buffers[i].dma_addr,
                                         sizeof(struct ipw2100_rx),
-                                        PCI_DMA_FROMDEVICE);
+                                        DMA_FROM_DEVICE);
                        dev_kfree_skb(priv->rx_buffers[i].skb);
                }
        }
@@ -6196,7 +6193,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
        pci_set_master(pci_dev);
        pci_set_drvdata(pci_dev, priv);
 
-       err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+       err = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
        if (err) {
                printk(KERN_WARNING DRV_NAME
                       "Error calling pci_set_dma_mask.\n");
@@ -6397,10 +6394,9 @@ static void ipw2100_pci_remove_one(struct pci_dev *pci_dev)
        IPW_DEBUG_INFO("exit\n");
 }
 
-#ifdef CONFIG_PM
-static int ipw2100_suspend(struct pci_dev *pci_dev, pm_message_t state)
+static int __maybe_unused ipw2100_suspend(struct device *dev_d)
 {
-       struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
+       struct ipw2100_priv *priv = dev_get_drvdata(dev_d);
        struct net_device *dev = priv->net_dev;
 
        IPW_DEBUG_INFO("%s: Going into suspend...\n", dev->name);
@@ -6414,10 +6410,6 @@ static int ipw2100_suspend(struct pci_dev *pci_dev, pm_message_t state)
        /* Remove the PRESENT state of the device */
        netif_device_detach(dev);
 
-       pci_save_state(pci_dev);
-       pci_disable_device(pci_dev);
-       pci_set_power_state(pci_dev, PCI_D3hot);
-
        priv->suspend_at = ktime_get_boottime_seconds();
 
        mutex_unlock(&priv->action_mutex);
@@ -6425,11 +6417,11 @@ static int ipw2100_suspend(struct pci_dev *pci_dev, pm_message_t state)
        return 0;
 }
 
-static int ipw2100_resume(struct pci_dev *pci_dev)
+static int __maybe_unused ipw2100_resume(struct device *dev_d)
 {
+       struct pci_dev *pci_dev = to_pci_dev(dev_d);
        struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
        struct net_device *dev = priv->net_dev;
-       int err;
        u32 val;
 
        if (IPW2100_PM_DISABLED)
@@ -6439,16 +6431,6 @@ static int ipw2100_resume(struct pci_dev *pci_dev)
 
        IPW_DEBUG_INFO("%s: Coming out of suspend...\n", dev->name);
 
-       pci_set_power_state(pci_dev, PCI_D0);
-       err = pci_enable_device(pci_dev);
-       if (err) {
-               printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
-                      dev->name);
-               mutex_unlock(&priv->action_mutex);
-               return err;
-       }
-       pci_restore_state(pci_dev);
-
        /*
         * Suspend/Resume resets the PCI configuration space, so we have to
         * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
@@ -6473,7 +6455,6 @@ static int ipw2100_resume(struct pci_dev *pci_dev)
 
        return 0;
 }
-#endif
 
 static void ipw2100_shutdown(struct pci_dev *pci_dev)
 {
@@ -6539,15 +6520,14 @@ static const struct pci_device_id ipw2100_pci_id_table[] = {
 
 MODULE_DEVICE_TABLE(pci, ipw2100_pci_id_table);
 
+static SIMPLE_DEV_PM_OPS(ipw2100_pm_ops, ipw2100_suspend, ipw2100_resume);
+
 static struct pci_driver ipw2100_pci_driver = {
        .name = DRV_NAME,
        .id_table = ipw2100_pci_id_table,
        .probe = ipw2100_pci_init_one,
        .remove = ipw2100_pci_remove_one,
-#ifdef CONFIG_PM
-       .suspend = ipw2100_suspend,
-       .resume = ipw2100_resume,
-#endif
+       .driver.pm = &ipw2100_pm_ops,
        .shutdown = ipw2100_shutdown,
 };
 
@@ -8352,7 +8332,7 @@ static int ipw2100_mod_firmware_load(struct ipw2100_fw *fw)
        if (IPW2100_FW_MAJOR(h->version) != IPW2100_FW_MAJOR_VERSION) {
                printk(KERN_WARNING DRV_NAME ": Firmware image not compatible "
                       "(detected version id of %u). "
-                      "See Documentation/networking/device_drivers/intel/ipw2100.rst\n",
+                      "See Documentation/networking/device_drivers/wifi/intel/ipw2100.rst\n",
                       h->version);
                return 1;
        }
index 661e63b..129ef2f 100644 (file)
@@ -3442,8 +3442,9 @@ static void ipw_rx_queue_reset(struct ipw_priv *priv,
                /* In the reset function, these buffers may have been allocated
                 * to an SKB, so we need to unmap and free potential storage */
                if (rxq->pool[i].skb != NULL) {
-                       pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
-                                        IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+                       dma_unmap_single(&priv->pci_dev->dev,
+                                        rxq->pool[i].dma_addr,
+                                        IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
                        dev_kfree_skb(rxq->pool[i].skb);
                        rxq->pool[i].skb = NULL;
                }
@@ -3774,7 +3775,8 @@ static int ipw_queue_tx_init(struct ipw_priv *priv,
                return -ENOMEM;
 
        q->bd =
-           pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
+           dma_alloc_coherent(&dev->dev, sizeof(q->bd[0]) * count,
+                              &q->q.dma_addr, GFP_KERNEL);
        if (!q->bd) {
                IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
                          sizeof(q->bd[0]) * count);
@@ -3816,9 +3818,10 @@ static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
 
        /* unmap chunks if any */
        for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
-               pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
+               dma_unmap_single(&dev->dev,
+                                le32_to_cpu(bd->u.data.chunk_ptr[i]),
                                 le16_to_cpu(bd->u.data.chunk_len[i]),
-                                PCI_DMA_TODEVICE);
+                                DMA_TO_DEVICE);
                if (txq->txb[txq->q.last_used]) {
                        libipw_txb_free(txq->txb[txq->q.last_used]);
                        txq->txb[txq->q.last_used] = NULL;
@@ -3850,8 +3853,8 @@ static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
        }
 
        /* free buffers belonging to queue itself */
-       pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
-                           q->dma_addr);
+       dma_free_coherent(&dev->dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
+                         q->dma_addr);
        kfree(txq->txb);
 
        /* 0 fill whole structure */
@@ -5196,8 +5199,8 @@ static void ipw_rx_queue_replenish(void *data)
                list_del(element);
 
                rxb->dma_addr =
-                   pci_map_single(priv->pci_dev, rxb->skb->data,
-                                  IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+                   dma_map_single(&priv->pci_dev->dev, rxb->skb->data,
+                                  IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
 
                list_add_tail(&rxb->list, &rxq->rx_free);
                rxq->free_count++;
@@ -5230,8 +5233,9 @@ static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
 
        for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
                if (rxq->pool[i].skb != NULL) {
-                       pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
-                                        IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+                       dma_unmap_single(&priv->pci_dev->dev,
+                                        rxq->pool[i].dma_addr,
+                                        IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
                        dev_kfree_skb(rxq->pool[i].skb);
                }
        }
@@ -8263,9 +8267,8 @@ static void ipw_rx(struct ipw_priv *priv)
                }
                priv->rxq->queue[i] = NULL;
 
-               pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
-                                           IPW_RX_BUF_SIZE,
-                                           PCI_DMA_FROMDEVICE);
+               dma_sync_single_for_cpu(&priv->pci_dev->dev, rxb->dma_addr,
+                                       IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
 
                pkt = (struct ipw_rx_packet *)rxb->skb->data;
                IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
@@ -8417,8 +8420,8 @@ static void ipw_rx(struct ipw_priv *priv)
                        rxb->skb = NULL;
                }
 
-               pci_unmap_single(priv->pci_dev, rxb->dma_addr,
-                                IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+               dma_unmap_single(&priv->pci_dev->dev, rxb->dma_addr,
+                                IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
                list_add_tail(&rxb->list, &priv->rxq->rx_used);
 
                i = (i + 1) % RX_QUEUE_SIZE;
@@ -10217,11 +10220,10 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
                           txb->fragments[i]->len - hdr_len);
 
                tfd->u.data.chunk_ptr[i] =
-                   cpu_to_le32(pci_map_single
-                               (priv->pci_dev,
-                                txb->fragments[i]->data + hdr_len,
-                                txb->fragments[i]->len - hdr_len,
-                                PCI_DMA_TODEVICE));
+                   cpu_to_le32(dma_map_single(&priv->pci_dev->dev,
+                                              txb->fragments[i]->data + hdr_len,
+                                              txb->fragments[i]->len - hdr_len,
+                                              DMA_TO_DEVICE));
                tfd->u.data.chunk_len[i] =
                    cpu_to_le16(txb->fragments[i]->len - hdr_len);
        }
@@ -10251,10 +10253,10 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
                        dev_kfree_skb_any(txb->fragments[i]);
                        txb->fragments[i] = skb;
                        tfd->u.data.chunk_ptr[i] =
-                           cpu_to_le32(pci_map_single
-                                       (priv->pci_dev, skb->data,
-                                        remaining_bytes,
-                                        PCI_DMA_TODEVICE));
+                           cpu_to_le32(dma_map_single(&priv->pci_dev->dev,
+                                                      skb->data,
+                                                      remaining_bytes,
+                                                      DMA_TO_DEVICE));
 
                        le32_add_cpu(&tfd->u.data.num_chunks, 1);
                }
@@ -11620,9 +11622,9 @@ static int ipw_pci_probe(struct pci_dev *pdev,
 
        pci_set_master(pdev);
 
-       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+       err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
        if (!err)
-               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+               err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
        if (err) {
                printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
                goto out_pci_disable_device;
@@ -11838,10 +11840,9 @@ static void ipw_pci_remove(struct pci_dev *pdev)
        free_firmware();
 }
 
-#ifdef CONFIG_PM
-static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused ipw_pci_suspend(struct device *dev_d)
 {
-       struct ipw_priv *priv = pci_get_drvdata(pdev);
+       struct ipw_priv *priv = dev_get_drvdata(dev_d);
        struct net_device *dev = priv->net_dev;
 
        printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
@@ -11852,33 +11853,20 @@ static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
        /* Remove the PRESENT state of the device */
        netif_device_detach(dev);
 
-       pci_save_state(pdev);
-       pci_disable_device(pdev);
-       pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
        priv->suspend_at = ktime_get_boottime_seconds();
 
        return 0;
 }
 
-static int ipw_pci_resume(struct pci_dev *pdev)
+static int __maybe_unused ipw_pci_resume(struct device *dev_d)
 {
+       struct pci_dev *pdev = to_pci_dev(dev_d);
        struct ipw_priv *priv = pci_get_drvdata(pdev);
        struct net_device *dev = priv->net_dev;
-       int err;
        u32 val;
 
        printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
 
-       pci_set_power_state(pdev, PCI_D0);
-       err = pci_enable_device(pdev);
-       if (err) {
-               printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
-                      dev->name);
-               return err;
-       }
-       pci_restore_state(pdev);
-
        /*
         * Suspend/Resume resets the PCI configuration space, so we have to
         * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
@@ -11900,7 +11888,6 @@ static int ipw_pci_resume(struct pci_dev *pdev)
 
        return 0;
 }
-#endif
 
 static void ipw_pci_shutdown(struct pci_dev *pdev)
 {
@@ -11912,16 +11899,15 @@ static void ipw_pci_shutdown(struct pci_dev *pdev)
        pci_disable_device(pdev);
 }
 
+static SIMPLE_DEV_PM_OPS(ipw_pci_pm_ops, ipw_pci_suspend, ipw_pci_resume);
+
 /* driver initialization stuff */
 static struct pci_driver ipw_driver = {
        .name = DRV_NAME,
        .id_table = card_ids,
        .probe = ipw_pci_probe,
        .remove = ipw_pci_remove,
-#ifdef CONFIG_PM
-       .suspend = ipw_pci_suspend,
-       .resume = ipw_pci_resume,
-#endif
+       .driver.pm = &ipw_pci_pm_ops,
        .shutdown = ipw_pci_shutdown,
 };
 
index da6d420..ad9d7e7 100644 (file)
@@ -1415,7 +1415,7 @@ il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
 /*
  * mac80211 queues, ACs, hardware queues, FIFOs.
  *
- * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
+ * Cf. https://wireless.wiki.kernel.org/en/developers/Documentation/mac80211/queues
  *
  * Mac80211 uses the following numbers, which we get as from it
  * by way of skb_get_queue_mapping(skb):
index 0a02d8a..1f19666 100644 (file)
@@ -1749,7 +1749,7 @@ il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
        u8 done_search = 0;
        u16 high_low;
        s32 sr;
-       u8 tid = MAX_TID_COUNT;
+       u8 tid;
        struct il_tid_data *tid_data;
 
        D_RATE("rate scale calculate new rate for skb\n");
index 348c17c..f78e062 100644 (file)
@@ -4286,8 +4286,8 @@ il_apm_init(struct il_priv *il)
         *    power savings, even without L1.
         */
        if (il->cfg->set_l0s) {
-               pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
-               if (lctl & PCI_EXP_LNKCTL_ASPM_L1) {
+               ret = pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
+               if (!ret && (lctl & PCI_EXP_LNKCTL_ASPM_L1)) {
                        /* L1-ASPM enabled; disable(!) L0S  */
                        il_set_bit(il, CSR_GIO_REG,
                                   CSR_GIO_REG_VAL_L0S_ENABLED);
index 36153fa..1085afb 100644 (file)
@@ -31,7 +31,7 @@ config IWLWIFI
          In order to use this driver, you will need a firmware
          image for it. You can obtain the microcode from:
 
-                 <http://wireless.kernel.org/en/users/Drivers/iwlwifi>.
+                 <https://wireless.wiki.kernel.org/en/users/Drivers/iwlwifi>.
 
          The firmware is typically installed in /lib/firmware. You can
          look in the hotplug script /etc/hotplug/firmware.agent to
index 0f4be4b..fdcc129 100644 (file)
@@ -1023,7 +1023,7 @@ struct iwl_wep_cmd {
        u8 global_key_type;
        u8 flags;
        u8 reserved;
-       struct iwl_wep_key key[0];
+       struct iwl_wep_key key[];
 } __packed;
 
 #define WEP_KEY_WEP_TYPE 1
@@ -1305,7 +1305,7 @@ struct iwl_tx_cmd {
         * length is 26 or 30 bytes, followed by payload data
         */
        u8 payload[0];
-       struct ieee80211_hdr hdr[0];
+       struct ieee80211_hdr hdr[];
 } __packed;
 
 /*
@@ -2380,7 +2380,7 @@ struct iwl_scan_cmd {
         * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
         * before requesting another scan.
         */
-       u8 data[0];
+       u8 data[];
 } __packed;
 
 /* Can abort will notify by complete notification with abort status. */
@@ -2475,7 +2475,7 @@ struct iwl_tx_beacon_cmd {
        __le16 tim_idx;
        u8 tim_size;
        u8 reserved1;
-       struct ieee80211_hdr frame[0];  /* beacon frame */
+       struct ieee80211_hdr frame[];   /* beacon frame */
 } __packed;
 
 /******************************************************************************
@@ -3188,7 +3188,7 @@ struct iwl_calib_hdr {
 
 struct iwl_calib_cmd {
        struct iwl_calib_hdr hdr;
-       u8 data[0];
+       u8 data[];
 } __packed;
 
 struct iwl_calib_xtal_freq_cmd {
@@ -3216,7 +3216,7 @@ struct iwl_calib_temperature_offset_v2_cmd {
 /* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */
 struct iwl_calib_chain_noise_reset_cmd {
        struct iwl_calib_hdr hdr;
-       u8 data[0];
+       u8 data[];
 };
 
 /* IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD */
index 6512d25..423d3c3 100644 (file)
@@ -200,6 +200,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
        iwl_leds_init(priv);
 
        wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+       wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_EXT_KEY_ID);
 
        ret = ieee80211_register_hw(priv->hw);
        if (ret) {
index fd719c3..b6c31f0 100644 (file)
@@ -361,7 +361,7 @@ struct iwl_mcc_update_resp_v3 {
        __le16 time;
        __le16 geo_info;
        __le32 n_channels;
-       __le32 channels[0];
+       __le32 channels[];
 } __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_3 */
 
 /**
@@ -390,7 +390,7 @@ struct iwl_mcc_update_resp {
        u8 source_id;
        u8 reserved[3];
        __le32 n_channels;
-       __le32 channels[0];
+       __le32 channels[];
 } __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_4 */
 
 /**
index f1d1fe9..82d59b5 100644 (file)
@@ -293,7 +293,7 @@ struct iwl_tx_cmd {
        __le16 pm_frame_timeout;
        __le16 reserved4;
        u8 payload[0];
-       struct ieee80211_hdr hdr[0];
+       struct ieee80211_hdr hdr[];
 } __packed; /* TX_CMD_API_S_VER_6 */
 
 struct iwl_dram_sec_info {
@@ -319,7 +319,7 @@ struct iwl_tx_cmd_gen2 {
        __le32 flags;
        struct iwl_dram_sec_info dram_info;
        __le32 rate_n_flags;
-       struct ieee80211_hdr hdr[0];
+       struct ieee80211_hdr hdr[];
 } __packed; /* TX_CMD_API_S_VER_7 */
 
 /**
@@ -342,7 +342,7 @@ struct iwl_tx_cmd_gen3 {
        struct iwl_dram_sec_info dram_info;
        __le32 rate_n_flags;
        __le64 ttl;
-       struct ieee80211_hdr hdr[0];
+       struct ieee80211_hdr hdr[];
 } __packed; /* TX_CMD_API_S_VER_8 */
 
 /*
@@ -766,8 +766,8 @@ struct iwl_mvm_compressed_ba_notif {
        __le32 tx_rate;
        __le16 tfd_cnt;
        __le16 ra_tid_cnt;
-       struct iwl_mvm_compressed_ba_tfd tfd[0];
        struct iwl_mvm_compressed_ba_ratid ra_tid[0];
+       struct iwl_mvm_compressed_ba_tfd tfd[];
 } __packed; /* COMPRESSED_BA_RES_API_S_VER_4 */
 
 /**
@@ -784,7 +784,7 @@ struct iwl_mac_beacon_cmd_v6 {
        __le32 template_id;
        __le32 tim_idx;
        __le32 tim_size;
-       struct ieee80211_hdr frame[0];
+       struct ieee80211_hdr frame[];
 } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_6 */
 
 /**
@@ -805,7 +805,7 @@ struct iwl_mac_beacon_cmd_v7 {
        __le32 tim_size;
        __le32 ecsa_offset;
        __le32 csa_offset;
-       struct ieee80211_hdr frame[0];
+       struct ieee80211_hdr frame[];
 } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_7 */
 
 enum iwl_mac_beacon_flags {
@@ -840,7 +840,7 @@ struct iwl_mac_beacon_cmd {
        __le32 tim_size;
        __le32 ecsa_offset;
        __le32 csa_offset;
-       struct ieee80211_hdr frame[0];
+       struct ieee80211_hdr frame[];
 } __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_10 */
 
 struct iwl_beacon_notif {
index 4d3687c..7ea55cf 100644 (file)
@@ -2554,7 +2554,7 @@ int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
                return -EINVAL;
 
        if (fwrt->dump.conf != FW_DBG_INVALID)
-               IWL_WARN(fwrt, "FW already configured (%d) - re-configuring\n",
+               IWL_INFO(fwrt, "FW already configured (%d) - re-configuring\n",
                         fwrt->dump.conf);
 
        /* Send all HCMDs for configuring the FW debug */
index 6e72c27..267ad4e 100644 (file)
@@ -260,7 +260,7 @@ struct hcmd_write_data {
        __be32 cmd_id;
        __be32 flags;
        __be16 length;
-       u8 data[0];
+       u8 data[];
 } __packed;
 
 static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf,
index 244899f..e27c132 100644 (file)
@@ -641,6 +641,6 @@ extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0;
 extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long;
 extern const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0;
 extern const struct iwl_cfg iwlax211_cfg_snj_gf_a0;
-#endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */
+#endif /* CONFIG_IWLMVM */
 
 #endif /* __IWL_CONFIG_H__ */
index 27116c7..9ce7207 100644 (file)
@@ -480,7 +480,7 @@ void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
        if (!iwlwifi_mod_params.enable_ini)
                return;
 
-       res = request_firmware(&fw, "iwl-debug-yoyo.bin", dev);
+       res = firmware_request_nowarn(&fw, "iwl-debug-yoyo.bin", dev);
        if (res)
                return;
 
index 3008a52..b35b892 100644 (file)
@@ -175,7 +175,7 @@ void iwl_opmode_deregister(const char *name);
 struct iwl_op_mode {
        const struct iwl_op_mode_ops *ops;
 
-       char op_mode_specific[0] __aligned(sizeof(void *));
+       char op_mode_specific[] __aligned(sizeof(void *));
 };
 
 static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode)
index a301e24..34788e7 100644 (file)
@@ -1006,7 +1006,7 @@ struct iwl_trans {
 
        /* pointer to trans specific struct */
        /*Ensure that this pointer will always be aligned to sizeof pointer */
-       char trans_specific[0] __aligned(sizeof(void *));
+       char trans_specific[] __aligned(sizeof(void *));
 };
 
 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
index 7791623..9374c85 100644 (file)
@@ -543,6 +543,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
 
        hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
        wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+
+       /* The new Tx API does not allow to pass the key or keyid of a MPDU to
+        * the hw, preventing us to control which key(id) to use per MPDU.
+        * Till that's fixed we can't use Extended Key ID for the newer cards.
+        */
+       if (!iwl_mvm_has_new_tx_api(mvm))
+               wiphy_ext_feature_set(hw->wiphy,
+                                     NL80211_EXT_FEATURE_EXT_KEY_ID);
        hw->wiphy->features |= NL80211_FEATURE_HT_IBSS;
 
        hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
@@ -4903,7 +4911,7 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
 
        if (mvmsta->avg_energy) {
-               sinfo->signal_avg = mvmsta->avg_energy;
+               sinfo->signal_avg = -(s8)mvmsta->avg_energy;
                sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
        }
 
index a7264b2..86b2ebb 100644 (file)
@@ -603,7 +603,7 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
                                     struct iwl_lq_sta *lq_data, u8 tid,
                                     struct ieee80211_sta *sta)
 {
-       int ret = -EAGAIN;
+       int ret;
 
        IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n",
                     sta->addr, tid);
index 2797799..9e12475 100644 (file)
@@ -1367,14 +1367,6 @@ out_err:
        return ret;
 }
 
-static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
-{
-       if (tid == IWL_MAX_TID_COUNT)
-               return IEEE80211_AC_VO; /* MGMT */
-
-       return tid_to_mac80211_ac[tid];
-}
-
 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
 {
        struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
index 6a6ce9d..c52d9b5 100644 (file)
@@ -30,7 +30,7 @@ config PRISM54
 
          For more information refer to the p54 wiki:
 
-         http://wireless.kernel.org/en/users/Drivers/p54
+         http://wireless.wiki.kernel.org/en/users/Drivers/p54
 
          Note: You need a motherboard with DMA support to use any of these cards
 
index 2ab34cf..b6c497c 100644 (file)
@@ -3366,8 +3366,8 @@ static void prism2_free_local_data(struct net_device *dev)
 }
 
 
-#if (defined(PRISM2_PCI) && defined(CONFIG_PM)) || defined(PRISM2_PCCARD)
-static void prism2_suspend(struct net_device *dev)
+#if defined(PRISM2_PCI) || defined(PRISM2_PCCARD)
+static void __maybe_unused prism2_suspend(struct net_device *dev)
 {
        struct hostap_interface *iface;
        struct local_info *local;
@@ -3385,7 +3385,7 @@ static void prism2_suspend(struct net_device *dev)
        /* Disable hardware and firmware */
        prism2_hw_shutdown(dev, 0);
 }
-#endif /* (PRISM2_PCI && CONFIG_PM) || PRISM2_PCCARD */
+#endif /* PRISM2_PCI || PRISM2_PCCARD */
 
 
 /* These might at some point be compiled separately and used as separate
index 0c2aa88..101887e 100644 (file)
@@ -403,36 +403,23 @@ static void prism2_pci_remove(struct pci_dev *pdev)
        pci_disable_device(pdev);
 }
 
-
-#ifdef CONFIG_PM
-static int prism2_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused prism2_pci_suspend(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
 
        if (netif_running(dev)) {
                netif_stop_queue(dev);
                netif_device_detach(dev);
        }
        prism2_suspend(dev);
-       pci_save_state(pdev);
-       pci_disable_device(pdev);
-       pci_set_power_state(pdev, PCI_D3hot);
 
        return 0;
 }
 
-static int prism2_pci_resume(struct pci_dev *pdev)
+static int __maybe_unused prism2_pci_resume(struct device *dev_d)
 {
-       struct net_device *dev = pci_get_drvdata(pdev);
-       int err;
-
-       err = pci_enable_device(pdev);
-       if (err) {
-               printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
-                      dev->name);
-               return err;
-       }
-       pci_restore_state(pdev);
+       struct net_device *dev = dev_get_drvdata(dev_d);
+
        prism2_hw_config(dev, 0);
        if (netif_running(dev)) {
                netif_device_attach(dev);
@@ -441,20 +428,19 @@ static int prism2_pci_resume(struct pci_dev *pdev)
 
        return 0;
 }
-#endif /* CONFIG_PM */
-
 
 MODULE_DEVICE_TABLE(pci, prism2_pci_id_table);
 
+static SIMPLE_DEV_PM_OPS(prism2_pci_pm_ops,
+                        prism2_pci_suspend,
+                        prism2_pci_resume);
+
 static struct pci_driver prism2_pci_driver = {
        .name           = "hostap_pci",
        .id_table       = prism2_pci_id_table,
        .probe          = prism2_pci_probe,
        .remove         = prism2_pci_remove,
-#ifdef CONFIG_PM
-       .suspend        = prism2_pci_suspend,
-       .resume         = prism2_pci_resume,
-#endif /* CONFIG_PM */
+       .driver.pm      = &prism2_pci_pm_ops,
 };
 
 module_pci_driver(prism2_pci_driver);
index c470ee2..f62730a 100644 (file)
@@ -27,7 +27,7 @@ config HERMES
 
          You will also very likely also need the Wireless Tools in order to
          configure your card and that /etc/pcmcia/wireless.opts works :
-         <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>
+         <https://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>
 
 config HERMES_PRISM
        bool "Support Prism 2/2.5 chipset"
@@ -120,7 +120,7 @@ config PCMCIA_HERMES
 
          You will very likely need the Wireless Tools in order to
          configure your card and that /etc/pcmcia/wireless.opts works:
-         <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
+         <https://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
 
 config PCMCIA_SPECTRUM
        tristate "Symbol Spectrum24 Trilogy PCMCIA card support"
index 048693b..96a03d1 100644 (file)
@@ -290,8 +290,7 @@ static struct pci_driver orinoco_nortel_driver = {
        .id_table       = orinoco_nortel_id_table,
        .probe          = orinoco_nortel_init_one,
        .remove         = orinoco_nortel_remove_one,
-       .suspend        = orinoco_pci_suspend,
-       .resume         = orinoco_pci_resume,
+       .driver.pm      = &orinoco_pci_pm_ops,
 };
 
 static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
index 4938a22..f3c86b0 100644 (file)
@@ -230,8 +230,7 @@ static struct pci_driver orinoco_pci_driver = {
        .id_table       = orinoco_pci_id_table,
        .probe          = orinoco_pci_init_one,
        .remove         = orinoco_pci_remove_one,
-       .suspend        = orinoco_pci_suspend,
-       .resume         = orinoco_pci_resume,
+       .driver.pm      = &orinoco_pci_pm_ops,
 };
 
 static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
index 43f5b9f..d49d940 100644 (file)
@@ -18,51 +18,37 @@ struct orinoco_pci_card {
        void __iomem *attr_io;
 };
 
-#ifdef CONFIG_PM
-static int orinoco_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused orinoco_pci_suspend(struct device *dev_d)
 {
+       struct pci_dev *pdev = to_pci_dev(dev_d);
        struct orinoco_private *priv = pci_get_drvdata(pdev);
 
        orinoco_down(priv);
        free_irq(pdev->irq, priv);
-       pci_save_state(pdev);
-       pci_disable_device(pdev);
-       pci_set_power_state(pdev, PCI_D3hot);
 
        return 0;
 }
 
-static int orinoco_pci_resume(struct pci_dev *pdev)
+static int __maybe_unused orinoco_pci_resume(struct device *dev_d)
 {
+       struct pci_dev *pdev = to_pci_dev(dev_d);
        struct orinoco_private *priv = pci_get_drvdata(pdev);
        struct net_device *dev = priv->ndev;
        int err;
 
-       pci_set_power_state(pdev, PCI_D0);
-       err = pci_enable_device(pdev);
-       if (err) {
-               printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
-                      dev->name);
-               return err;
-       }
-       pci_restore_state(pdev);
-
        err = request_irq(pdev->irq, orinoco_interrupt, IRQF_SHARED,
                          dev->name, priv);
        if (err) {
                printk(KERN_ERR "%s: cannot re-allocate IRQ on resume\n",
                       dev->name);
-               pci_disable_device(pdev);
                return -EBUSY;
        }
 
-       err = orinoco_up(priv);
-
-       return err;
+       return orinoco_up(priv);
 }
-#else
-#define orinoco_pci_suspend NULL
-#define orinoco_pci_resume NULL
-#endif
+
+static SIMPLE_DEV_PM_OPS(orinoco_pci_pm_ops,
+                        orinoco_pci_suspend,
+                        orinoco_pci_resume);
 
 #endif /* _ORINOCO_PCI_H */
index 2213520..16dada9 100644 (file)
@@ -336,8 +336,7 @@ static struct pci_driver orinoco_plx_driver = {
        .id_table       = orinoco_plx_id_table,
        .probe          = orinoco_plx_init_one,
        .remove         = orinoco_plx_remove_one,
-       .suspend        = orinoco_pci_suspend,
-       .resume         = orinoco_pci_resume,
+       .driver.pm      = &orinoco_pci_pm_ops,
 };
 
 static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
index 20ce569..9a9d335 100644 (file)
@@ -213,8 +213,7 @@ static struct pci_driver orinoco_tmd_driver = {
        .id_table       = orinoco_tmd_id_table,
        .probe          = orinoco_tmd_init_one,
        .remove         = orinoco_tmd_remove_one,
-       .suspend        = orinoco_pci_suspend,
-       .resume         = orinoco_pci_resume,
+       .driver.pm      = &orinoco_pci_pm_ops,
 };
 
 static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
index 651c676..11fa38f 100644 (file)
@@ -158,7 +158,7 @@ MODULE_FIRMWARE("orinoco_ezusb_fw");
 
 
 #define EZUSB_REQUEST_FW_TRANS         0xA0
-#define EZUSB_REQUEST_TRIGER           0xAA
+#define EZUSB_REQUEST_TRIGGER          0xAA
 #define EZUSB_REQUEST_TRIG_AC          0xAC
 #define EZUSB_CPUCS_REG                        0x7F92
 
@@ -1318,12 +1318,12 @@ static int ezusb_hard_reset(struct orinoco_private *priv)
        netdev_dbg(upriv->dev, "sending control message\n");
        retval = usb_control_msg(upriv->udev,
                                 usb_sndctrlpipe(upriv->udev, 0),
-                                EZUSB_REQUEST_TRIGER,
+                                EZUSB_REQUEST_TRIGGER,
                                 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
                                 USB_DIR_OUT, 0x0, 0x0, NULL, 0,
                                 DEF_TIMEOUT);
        if (retval < 0) {
-               err("EZUSB_REQUEST_TRIGER failed retval %d", retval);
+               err("EZUSB_REQUEST_TRIGGER failed retval %d", retval);
                return retval;
        }
 #if 0
index 024be55..003c378 100644 (file)
@@ -10,7 +10,7 @@ config P54_COMMON
          also need to be enabled in order to support any devices.
 
          These devices require softmac firmware which can be found at
-         <http://wireless.kernel.org/en/users/Drivers/p54>
+         <http://wireless.wiki.kernel.org/en/users/Drivers/p54>
 
          If you choose to build a module, it'll be called p54common.
 
@@ -22,7 +22,7 @@ config P54_USB
          This driver is for USB isl38xx based wireless cards.
 
          These devices require softmac firmware which can be found at
-         <http://wireless.kernel.org/en/users/Drivers/p54>
+         <http://wireless.wiki.kernel.org/en/users/Drivers/p54>
 
          If you choose to build a module, it'll be called p54usb.
 
@@ -36,7 +36,7 @@ config P54_PCI
          supported by the fullmac driver/firmware.
 
          This driver requires softmac firmware which can be found at
-         <http://wireless.kernel.org/en/users/Drivers/p54>
+         <http://wireless.wiki.kernel.org/en/users/Drivers/p54>
 
          If you choose to build a module, it'll be called p54pci.
 
index a5afcc8..bece14e 100644 (file)
@@ -132,7 +132,7 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
        if (priv->fw_var < 0x500)
                wiphy_info(priv->hw->wiphy,
                           "you are using an obsolete firmware. "
-                          "visit http://wireless.kernel.org/en/users/Drivers/p54 "
+                          "visit http://wireless.wiki.kernel.org/en/users/Drivers/p54 "
                           "and grab one for \"kernel >= 2.6.28\"!\n");
 
        if (priv->fw_var >= 0x300) {
index 80ad0b7..9d96c8b 100644 (file)
@@ -153,12 +153,12 @@ static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
                        if (!skb)
                                break;
 
-                       mapping = pci_map_single(priv->pdev,
+                       mapping = dma_map_single(&priv->pdev->dev,
                                                 skb_tail_pointer(skb),
                                                 priv->common.rx_mtu + 32,
-                                                PCI_DMA_FROMDEVICE);
+                                                DMA_FROM_DEVICE);
 
-                       if (pci_dma_mapping_error(priv->pdev, mapping)) {
+                       if (dma_mapping_error(&priv->pdev->dev, mapping)) {
                                dev_kfree_skb_any(skb);
                                dev_err(&priv->pdev->dev,
                                        "RX DMA Mapping error\n");
@@ -215,19 +215,22 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
                        len = priv->common.rx_mtu;
                }
                dma_addr = le32_to_cpu(desc->host_addr);
-               pci_dma_sync_single_for_cpu(priv->pdev, dma_addr,
-                       priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
+               dma_sync_single_for_cpu(&priv->pdev->dev, dma_addr,
+                                       priv->common.rx_mtu + 32,
+                                       DMA_FROM_DEVICE);
                skb_put(skb, len);
 
                if (p54_rx(dev, skb)) {
-                       pci_unmap_single(priv->pdev, dma_addr,
-                               priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
+                       dma_unmap_single(&priv->pdev->dev, dma_addr,
+                                        priv->common.rx_mtu + 32,
+                                        DMA_FROM_DEVICE);
                        rx_buf[i] = NULL;
                        desc->host_addr = cpu_to_le32(0);
                } else {
                        skb_trim(skb, 0);
-                       pci_dma_sync_single_for_device(priv->pdev, dma_addr,
-                               priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
+                       dma_sync_single_for_device(&priv->pdev->dev, dma_addr,
+                                                  priv->common.rx_mtu + 32,
+                                                  DMA_FROM_DEVICE);
                        desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
                }
 
@@ -258,8 +261,9 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
                skb = tx_buf[i];
                tx_buf[i] = NULL;
 
-               pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr),
-                                le16_to_cpu(desc->len), PCI_DMA_TODEVICE);
+               dma_unmap_single(&priv->pdev->dev,
+                                le32_to_cpu(desc->host_addr),
+                                le16_to_cpu(desc->len), DMA_TO_DEVICE);
 
                desc->host_addr = 0;
                desc->device_addr = 0;
@@ -334,9 +338,9 @@ static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
        idx = le32_to_cpu(ring_control->host_idx[1]);
        i = idx % ARRAY_SIZE(ring_control->tx_data);
 
-       mapping = pci_map_single(priv->pdev, skb->data, skb->len,
-                                PCI_DMA_TODEVICE);
-       if (pci_dma_mapping_error(priv->pdev, mapping)) {
+       mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
+                                DMA_TO_DEVICE);
+       if (dma_mapping_error(&priv->pdev->dev, mapping)) {
                spin_unlock_irqrestore(&priv->lock, flags);
                p54_free_skb(dev, skb);
                dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
@@ -378,10 +382,10 @@ static void p54p_stop(struct ieee80211_hw *dev)
        for (i = 0; i < ARRAY_SIZE(priv->rx_buf_data); i++) {
                desc = &ring_control->rx_data[i];
                if (desc->host_addr)
-                       pci_unmap_single(priv->pdev,
+                       dma_unmap_single(&priv->pdev->dev,
                                         le32_to_cpu(desc->host_addr),
                                         priv->common.rx_mtu + 32,
-                                        PCI_DMA_FROMDEVICE);
+                                        DMA_FROM_DEVICE);
                kfree_skb(priv->rx_buf_data[i]);
                priv->rx_buf_data[i] = NULL;
        }
@@ -389,10 +393,10 @@ static void p54p_stop(struct ieee80211_hw *dev)
        for (i = 0; i < ARRAY_SIZE(priv->rx_buf_mgmt); i++) {
                desc = &ring_control->rx_mgmt[i];
                if (desc->host_addr)
-                       pci_unmap_single(priv->pdev,
+                       dma_unmap_single(&priv->pdev->dev,
                                         le32_to_cpu(desc->host_addr),
                                         priv->common.rx_mtu + 32,
-                                        PCI_DMA_FROMDEVICE);
+                                        DMA_FROM_DEVICE);
                kfree_skb(priv->rx_buf_mgmt[i]);
                priv->rx_buf_mgmt[i] = NULL;
        }
@@ -400,10 +404,10 @@ static void p54p_stop(struct ieee80211_hw *dev)
        for (i = 0; i < ARRAY_SIZE(priv->tx_buf_data); i++) {
                desc = &ring_control->tx_data[i];
                if (desc->host_addr)
-                       pci_unmap_single(priv->pdev,
+                       dma_unmap_single(&priv->pdev->dev,
                                         le32_to_cpu(desc->host_addr),
                                         le16_to_cpu(desc->len),
-                                        PCI_DMA_TODEVICE);
+                                        DMA_TO_DEVICE);
 
                p54_free_skb(dev, priv->tx_buf_data[i]);
                priv->tx_buf_data[i] = NULL;
@@ -412,10 +416,10 @@ static void p54p_stop(struct ieee80211_hw *dev)
        for (i = 0; i < ARRAY_SIZE(priv->tx_buf_mgmt); i++) {
                desc = &ring_control->tx_mgmt[i];
                if (desc->host_addr)
-                       pci_unmap_single(priv->pdev,
+                       dma_unmap_single(&priv->pdev->dev,
                                         le32_to_cpu(desc->host_addr),
                                         le16_to_cpu(desc->len),
-                                        PCI_DMA_TODEVICE);
+                                        DMA_TO_DEVICE);
 
                p54_free_skb(dev, priv->tx_buf_mgmt[i]);
                priv->tx_buf_mgmt[i] = NULL;
@@ -568,9 +572,9 @@ static int p54p_probe(struct pci_dev *pdev,
                goto err_disable_dev;
        }
 
-       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+       err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
        if (!err)
-               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+               err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
        if (err) {
                dev_err(&pdev->dev, "No suitable DMA available\n");
                goto err_free_reg;
@@ -603,8 +607,9 @@ static int p54p_probe(struct pci_dev *pdev,
                goto err_free_dev;
        }
 
-       priv->ring_control = pci_alloc_consistent(pdev, sizeof(*priv->ring_control),
-                                                 &priv->ring_control_dma);
+       priv->ring_control = dma_alloc_coherent(&pdev->dev,
+                                               sizeof(*priv->ring_control),
+                                               &priv->ring_control_dma, GFP_KERNEL);
        if (!priv->ring_control) {
                dev_err(&pdev->dev, "Cannot allocate rings\n");
                err = -ENOMEM;
@@ -623,8 +628,8 @@ static int p54p_probe(struct pci_dev *pdev,
        if (!err)
                return 0;
 
-       pci_free_consistent(pdev, sizeof(*priv->ring_control),
-                           priv->ring_control, priv->ring_control_dma);
+       dma_free_coherent(&pdev->dev, sizeof(*priv->ring_control),
+                         priv->ring_control, priv->ring_control_dma);
 
  err_iounmap:
        iounmap(priv->map);
@@ -653,8 +658,8 @@ static void p54p_remove(struct pci_dev *pdev)
        wait_for_completion(&priv->fw_loaded);
        p54_unregister_common(dev);
        release_firmware(priv->firmware);
-       pci_free_consistent(pdev, sizeof(*priv->ring_control),
-                           priv->ring_control, priv->ring_control_dma);
+       dma_free_coherent(&pdev->dev, sizeof(*priv->ring_control),
+                         priv->ring_control, priv->ring_control_dma);
        iounmap(priv->map);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
index ff0e30c..cae4766 100644 (file)
@@ -36,7 +36,7 @@ static struct usb_driver p54u_driver;
  * Note:
  *
  * Always update our wiki's device list (located at:
- * http://wireless.kernel.org/en/users/Drivers/p54/devices ),
+ * http://wireless.wiki.kernel.org/en/users/Drivers/p54/devices ),
  * whenever you add a new device.
  */
 
index 1afc2cc..b889bb7 100644 (file)
@@ -143,7 +143,7 @@ enum dot11_priv_t {
  * together with a CSMA contention. Without this all frames are
  * sent with a CSMA contention.
  * Bibliography:
- * http://www.hpl.hp.com/personal/Jean_Tourrilhes/Papers/Packet.Frame.Grouping.html
+ * https://www.hpl.hp.com/personal/Jean_Tourrilhes/Papers/Packet.Frame.Grouping.html
  */
 enum dot11_maxframeburst_t {
        /* Values for DOT11_OID_MAXFRAMEBURST */
index a9bae69..efd64e5 100644 (file)
@@ -636,10 +636,10 @@ islpci_alloc_memory(islpci_private *priv)
         */
 
        /* perform the allocation */
-       priv->driver_mem_address = pci_alloc_consistent(priv->pdev,
-                                                       HOST_MEM_BLOCK,
-                                                       &priv->
-                                                       device_host_address);
+       priv->driver_mem_address = dma_alloc_coherent(&priv->pdev->dev,
+                                                     HOST_MEM_BLOCK,
+                                                     &priv->device_host_address,
+                                                     GFP_KERNEL);
 
        if (!priv->driver_mem_address) {
                /* error allocating the block of PCI memory */
@@ -692,11 +692,9 @@ islpci_alloc_memory(islpci_private *priv)
 
                /* map the allocated skb data area to pci */
                priv->pci_map_rx_address[counter] =
-                   pci_map_single(priv->pdev, (void *) skb->data,
-                                  MAX_FRAGMENT_SIZE_RX + 2,
-                                  PCI_DMA_FROMDEVICE);
-               if (pci_dma_mapping_error(priv->pdev,
-                                         priv->pci_map_rx_address[counter])) {
+                   dma_map_single(&priv->pdev->dev, (void *)skb->data,
+                                  MAX_FRAGMENT_SIZE_RX + 2, DMA_FROM_DEVICE);
+               if (dma_mapping_error(&priv->pdev->dev, priv->pci_map_rx_address[counter])) {
                        priv->pci_map_rx_address[counter] = 0;
                        /* error mapping the buffer to device
                           accessible memory address */
@@ -727,9 +725,9 @@ islpci_free_memory(islpci_private *priv)
 
        /* free consistent DMA area... */
        if (priv->driver_mem_address)
-               pci_free_consistent(priv->pdev, HOST_MEM_BLOCK,
-                                   priv->driver_mem_address,
-                                   priv->device_host_address);
+               dma_free_coherent(&priv->pdev->dev, HOST_MEM_BLOCK,
+                                 priv->driver_mem_address,
+                                 priv->device_host_address);
 
        /* clear some dangling pointers */
        priv->driver_mem_address = NULL;
@@ -741,8 +739,8 @@ islpci_free_memory(islpci_private *priv)
         for (counter = 0; counter < ISL38XX_CB_MGMT_QSIZE; counter++) {
                struct islpci_membuf *buf = &priv->mgmt_rx[counter];
                if (buf->pci_addr)
-                       pci_unmap_single(priv->pdev, buf->pci_addr,
-                                        buf->size, PCI_DMA_FROMDEVICE);
+                       dma_unmap_single(&priv->pdev->dev, buf->pci_addr,
+                                        buf->size, DMA_FROM_DEVICE);
                buf->pci_addr = 0;
                kfree(buf->mem);
                buf->size = 0;
@@ -752,10 +750,10 @@ islpci_free_memory(islpci_private *priv)
        /* clean up data rx buffers */
        for (counter = 0; counter < ISL38XX_CB_RX_QSIZE; counter++) {
                if (priv->pci_map_rx_address[counter])
-                       pci_unmap_single(priv->pdev,
+                       dma_unmap_single(&priv->pdev->dev,
                                         priv->pci_map_rx_address[counter],
                                         MAX_FRAGMENT_SIZE_RX + 2,
-                                        PCI_DMA_FROMDEVICE);
+                                        DMA_FROM_DEVICE);
                priv->pci_map_rx_address[counter] = 0;
 
                if (priv->data_low_rx[counter])
index 8d68025..74dd657 100644 (file)
@@ -50,9 +50,9 @@ islpci_eth_cleanup_transmit(islpci_private *priv,
                              skb, skb->data, skb->len, skb->truesize);
 #endif
 
-                       pci_unmap_single(priv->pdev,
+                       dma_unmap_single(&priv->pdev->dev,
                                         priv->pci_map_tx_address[index],
-                                        skb->len, PCI_DMA_TODEVICE);
+                                        skb->len, DMA_TO_DEVICE);
                        dev_kfree_skb_irq(skb);
                        skb = NULL;
                }
@@ -176,10 +176,9 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
 #endif
 
        /* map the skb buffer to pci memory for DMA operation */
-       pci_map_address = pci_map_single(priv->pdev,
-                                        (void *) skb->data, skb->len,
-                                        PCI_DMA_TODEVICE);
-       if (pci_dma_mapping_error(priv->pdev, pci_map_address)) {
+       pci_map_address = dma_map_single(&priv->pdev->dev, (void *)skb->data,
+                                        skb->len, DMA_TO_DEVICE);
+       if (dma_mapping_error(&priv->pdev->dev, pci_map_address)) {
                printk(KERN_WARNING "%s: cannot map buffer to PCI\n",
                       ndev->name);
                goto drop_free;
@@ -323,9 +322,8 @@ islpci_eth_receive(islpci_private *priv)
 #endif
 
        /* delete the streaming DMA mapping before processing the skb */
-       pci_unmap_single(priv->pdev,
-                        priv->pci_map_rx_address[index],
-                        MAX_FRAGMENT_SIZE_RX + 2, PCI_DMA_FROMDEVICE);
+       dma_unmap_single(&priv->pdev->dev, priv->pci_map_rx_address[index],
+                        MAX_FRAGMENT_SIZE_RX + 2, DMA_FROM_DEVICE);
 
        /* update the skb structure and align the buffer */
        skb_put(skb, size);
@@ -431,11 +429,9 @@ islpci_eth_receive(islpci_private *priv)
 
                /* set the streaming DMA mapping for proper PCI bus operation */
                priv->pci_map_rx_address[index] =
-                   pci_map_single(priv->pdev, (void *) skb->data,
-                                  MAX_FRAGMENT_SIZE_RX + 2,
-                                  PCI_DMA_FROMDEVICE);
-               if (pci_dma_mapping_error(priv->pdev,
-                                         priv->pci_map_rx_address[index])) {
+                   dma_map_single(&priv->pdev->dev, (void *)skb->data,
+                                  MAX_FRAGMENT_SIZE_RX + 2, DMA_FROM_DEVICE);
+               if (dma_mapping_error(&priv->pdev->dev, priv->pci_map_rx_address[index])) {
                        /* error mapping the buffer to device accessible memory address */
                        DEBUG(SHOW_ERROR_MESSAGES,
                              "Error mapping DMA address\n");
index 20291c0..31a1e61 100644 (file)
@@ -26,7 +26,8 @@ module_param(init_pcitm, int, 0);
 /* In this order: vendor, device, subvendor, subdevice, class, class_mask,
  * driver_data
  * If you have an update for this please contact prism54-devel@prism54.org
- * The latest list can be found at http://wireless.kernel.org/en/users/Drivers/p54 */
+ * The latest list can be found at http://wireless.wiki.kernel.org/en/users/Drivers/p54
+ */
 static const struct pci_device_id prism54_id_tbl[] = {
        /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
        {
@@ -63,16 +64,17 @@ MODULE_DEVICE_TABLE(pci, prism54_id_tbl);
 
 static int prism54_probe(struct pci_dev *, const struct pci_device_id *);
 static void prism54_remove(struct pci_dev *);
-static int prism54_suspend(struct pci_dev *, pm_message_t state);
-static int prism54_resume(struct pci_dev *);
+static int __maybe_unused prism54_suspend(struct device *);
+static int __maybe_unused prism54_resume(struct device *);
+
+static SIMPLE_DEV_PM_OPS(prism54_pm_ops, prism54_suspend, prism54_resume);
 
 static struct pci_driver prism54_driver = {
        .name = DRV_NAME,
        .id_table = prism54_id_tbl,
        .probe = prism54_probe,
        .remove = prism54_remove,
-       .suspend = prism54_suspend,
-       .resume = prism54_resume,
+       .driver.pm = &prism54_pm_ops,
 };
 
 /******************************************************************************
@@ -106,7 +108,7 @@ prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        }
 
        /* enable PCI DMA */
-       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+       if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
                printk(KERN_ERR "%s: 32-bit PCI DMA not supported", DRV_NAME);
                goto do_pci_disable_device;
         }
@@ -243,16 +245,13 @@ prism54_remove(struct pci_dev *pdev)
        pci_disable_device(pdev);
 }
 
-static int
-prism54_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused
+prism54_suspend(struct device *dev)
 {
-       struct net_device *ndev = pci_get_drvdata(pdev);
+       struct net_device *ndev = dev_get_drvdata(dev);
        islpci_private *priv = ndev ? netdev_priv(ndev) : NULL;
        BUG_ON(!priv);
 
-
-       pci_save_state(pdev);
-
        /* tell the device not to trigger interrupts for now... */
        isl38xx_disable_interrupts(priv->device_base);
 
@@ -266,26 +265,16 @@ prism54_suspend(struct pci_dev *pdev, pm_message_t state)
        return 0;
 }
 
-static int
-prism54_resume(struct pci_dev *pdev)
+static int __maybe_unused
+prism54_resume(struct device *dev)
 {
-       struct net_device *ndev = pci_get_drvdata(pdev);
+       struct net_device *ndev = dev_get_drvdata(dev);
        islpci_private *priv = ndev ? netdev_priv(ndev) : NULL;
-       int err;
 
        BUG_ON(!priv);
 
        printk(KERN_NOTICE "%s: got resume request\n", ndev->name);
 
-       err = pci_enable_device(pdev);
-       if (err) {
-               printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
-                      ndev->name);
-               return err;
-       }
-
-       pci_restore_state(pdev);
-
        /* alright let's go into the PREBOOT state */
        islpci_reset(priv, 1);
 
index e336eb1..0c7fb76 100644 (file)
@@ -115,10 +115,11 @@ islpci_mgmt_rx_fill(struct net_device *ndev)
                        buf->size = MGMT_FRAME_SIZE;
                }
                if (buf->pci_addr == 0) {
-                       buf->pci_addr = pci_map_single(priv->pdev, buf->mem,
+                       buf->pci_addr = dma_map_single(&priv->pdev->dev,
+                                                      buf->mem,
                                                       MGMT_FRAME_SIZE,
-                                                      PCI_DMA_FROMDEVICE);
-                       if (pci_dma_mapping_error(priv->pdev, buf->pci_addr)) {
+                                                      DMA_FROM_DEVICE);
+                       if (dma_mapping_error(&priv->pdev->dev, buf->pci_addr)) {
                                printk(KERN_WARNING
                                       "Failed to make memory DMA'able.\n");
                                return -ENOMEM;
@@ -203,9 +204,9 @@ islpci_mgt_transmit(struct net_device *ndev, int operation, unsigned long oid,
 #endif
 
        err = -ENOMEM;
-       buf.pci_addr = pci_map_single(priv->pdev, buf.mem, frag_len,
-                                     PCI_DMA_TODEVICE);
-       if (pci_dma_mapping_error(priv->pdev, buf.pci_addr)) {
+       buf.pci_addr = dma_map_single(&priv->pdev->dev, buf.mem, frag_len,
+                                     DMA_TO_DEVICE);
+       if (dma_mapping_error(&priv->pdev->dev, buf.pci_addr)) {
                printk(KERN_WARNING "%s: cannot map PCI memory for mgmt\n",
                       ndev->name);
                goto error_free;
@@ -302,8 +303,8 @@ islpci_mgt_receive(struct net_device *ndev)
                }
 
                /* Ensure the results of device DMA are visible to the CPU. */
-               pci_dma_sync_single_for_cpu(priv->pdev, buf->pci_addr,
-                                           buf->size, PCI_DMA_FROMDEVICE);
+               dma_sync_single_for_cpu(&priv->pdev->dev, buf->pci_addr,
+                                       buf->size, DMA_FROM_DEVICE);
 
                /* Perform endianess conversion for PIMFOR header in-place. */
                header = pimfor_decode_header(buf->mem, frag_len);
@@ -414,8 +415,8 @@ islpci_mgt_cleanup_transmit(struct net_device *ndev)
        for (; priv->index_mgmt_tx < curr_frag; priv->index_mgmt_tx++) {
                int index = priv->index_mgmt_tx % ISL38XX_CB_MGMT_QSIZE;
                struct islpci_membuf *buf = &priv->mgmt_tx[index];
-               pci_unmap_single(priv->pdev, buf->pci_addr, buf->size,
-                                PCI_DMA_TODEVICE);
+               dma_unmap_single(&priv->pdev->dev, buf->pci_addr, buf->size,
+                                DMA_TO_DEVICE);
                buf->pci_addr = 0;
                kfree(buf->mem);
                buf->mem = NULL;
index 0bdafe9..1046b59 100644 (file)
@@ -398,7 +398,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
        new_node->rx_reorder_ptr = kcalloc(win_size, sizeof(void *),
                                           GFP_KERNEL);
        if (!new_node->rx_reorder_ptr) {
-               kfree((u8 *) new_node);
+               kfree(new_node);
                mwifiex_dbg(priv->adapter, ERROR,
                            "%s: failed to alloc reorder_ptr\n", __func__);
                return;
index 4e4f59c..96848fa 100644 (file)
@@ -27,7 +27,8 @@ module_param(reg_alpha2, charp, 0);
 
 static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = {
        {
-               .max = 3, .types = BIT(NL80211_IFTYPE_STATION) |
+               .max = MWIFIEX_MAX_BSS_NUM,
+               .types = BIT(NL80211_IFTYPE_STATION) |
                                   BIT(NL80211_IFTYPE_P2P_GO) |
                                   BIT(NL80211_IFTYPE_P2P_CLIENT) |
                                   BIT(NL80211_IFTYPE_AP),
@@ -3726,11 +3727,11 @@ mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
        int ret;
 
        if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        /* make sure we are in station mode and connected */
        if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        switch (action_code) {
        case WLAN_TDLS_SETUP_REQUEST:
@@ -3798,11 +3799,11 @@ mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
 
        if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) ||
            !(wiphy->flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        /* make sure we are in station mode and connected */
        if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        mwifiex_dbg(priv->adapter, MSG,
                    "TDLS peer=%pM, oper=%d\n", peer, action);
@@ -3832,7 +3833,7 @@ mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
        default:
                mwifiex_dbg(priv->adapter, ERROR,
                            "tdls_oper: operation not supported\n");
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
        }
 
        return mwifiex_tdls_oper(priv, peer, action);
@@ -3913,11 +3914,11 @@ mwifiex_cfg80211_add_station(struct wiphy *wiphy, struct net_device *dev,
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 
        if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        /* make sure we are in station mode and connected */
        if ((priv->bss_type != MWIFIEX_BSS_TYPE_STA) || !priv->media_connected)
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        return mwifiex_tdls_oper(priv, mac, MWIFIEX_TDLS_CREATE_LINK);
 }
@@ -4150,11 +4151,11 @@ mwifiex_cfg80211_change_station(struct wiphy *wiphy, struct net_device *dev,
 
        /* we support change_station handler only for TDLS peers*/
        if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        /* make sure we are in station mode and connected */
        if ((priv->bss_type != MWIFIEX_BSS_TYPE_STA) || !priv->media_connected)
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        priv->sta_params = params;
 
index 5290991..9ee5600 100644 (file)
@@ -953,7 +953,7 @@ int mwifiex_set_mac_address(struct mwifiex_private *priv,
        } else {
                /* Internal mac address change */
                if (priv->bss_type == MWIFIEX_BSS_TYPE_ANY)
-                       return -ENOTSUPP;
+                       return -EOPNOTSUPP;
 
                mac_addr = old_mac_addr;
 
index 8bd355d..d3a968e 100644 (file)
@@ -1723,7 +1723,7 @@ mwifiex_cmd_tdls_config(struct mwifiex_private *priv,
        default:
                mwifiex_dbg(priv->adapter, ERROR,
                            "Unknown TDLS configuration\n");
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
        }
 
        le16_unaligned_add_cpu(&cmd->size, len);
@@ -1849,7 +1849,7 @@ mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
                break;
        default:
                mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS operation\n");
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
        }
 
        le16_unaligned_add_cpu(&cmd->size, config_len);
index f216601..962d8bf 100644 (file)
@@ -580,6 +580,11 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv,
 {
        struct host_cmd_ds_802_11_key_material *key =
                                                &resp->params.key_material;
+       int len;
+
+       len = le16_to_cpu(key->key_param_set.key_len);
+       if (len > sizeof(key->key_param_set.key))
+               return -EINVAL;
 
        if (le16_to_cpu(key->action) == HostCmd_ACT_GEN_SET) {
                if ((le16_to_cpu(key->key_param_set.key_info) & KEY_MCAST)) {
@@ -593,9 +598,8 @@ static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv,
 
        memset(priv->aes_key.key_param_set.key, 0,
               sizeof(key->key_param_set.key));
-       priv->aes_key.key_param_set.key_len = key->key_param_set.key_len;
-       memcpy(priv->aes_key.key_param_set.key, key->key_param_set.key,
-              le16_to_cpu(priv->aes_key.key_param_set.key_len));
+       priv->aes_key.key_param_set.key_len = cpu_to_le16(len);
+       memcpy(priv->aes_key.key_param_set.key, key->key_param_set.key, len);
 
        return 0;
 }
@@ -610,9 +614,14 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
                                              struct host_cmd_ds_command *resp)
 {
        struct host_cmd_ds_802_11_key_material_v2 *key_v2;
-       __le16 len;
+       int len;
 
        key_v2 = &resp->params.key_material_v2;
+
+       len = le16_to_cpu(key_v2->key_param_set.key_params.aes.key_len);
+       if (len > WLAN_KEY_LEN_CCMP)
+               return -EINVAL;
+
        if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) {
                if ((le16_to_cpu(key_v2->key_param_set.key_info) & KEY_MCAST)) {
                        mwifiex_dbg(priv->adapter, INFO, "info: key: GTK is set\n");
@@ -628,10 +637,9 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
        memset(priv->aes_key_v2.key_param_set.key_params.aes.key, 0,
               WLAN_KEY_LEN_CCMP);
        priv->aes_key_v2.key_param_set.key_params.aes.key_len =
-                               key_v2->key_param_set.key_params.aes.key_len;
-       len = priv->aes_key_v2.key_param_set.key_params.aes.key_len;
+                               cpu_to_le16(len);
        memcpy(priv->aes_key_v2.key_param_set.key_params.aes.key,
-              key_v2->key_param_set.key_params.aes.key, le16_to_cpu(len));
+              key_v2->key_param_set.key_params.aes.key, len);
 
        return 0;
 }
index 41533a0..31015d2 100644 (file)
@@ -12,6 +12,10 @@ config MT76_USB
        tristate
        depends on MT76_CORE
 
+config MT76_SDIO
+       tristate
+       depends on MT76_CORE
+
 config MT76x02_LIB
        tristate
        select MT76_CORE
index ef663b8..e53584d 100644 (file)
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_MT76_CORE) += mt76.o
 obj-$(CONFIG_MT76_USB) += mt76-usb.o
+obj-$(CONFIG_MT76_SDIO) += mt76-sdio.o
 obj-$(CONFIG_MT76x02_LIB) += mt76x02-lib.o
 obj-$(CONFIG_MT76x02_USB) += mt76x02-usb.o
 
@@ -9,8 +10,10 @@ mt76-y := \
        tx.o agg-rx.o mcu.o
 
 mt76-$(CONFIG_PCI) += pci.o
+mt76-$(CONFIG_NL80211_TESTMODE) += testmode.o
 
 mt76-usb-y := usb.o usb_trace.o
+mt76-sdio-y := sdio.o
 
 CFLAGS_trace.o := -I$(src)
 CFLAGS_usb_trace.o := -I$(src)
index 3a5de1d..5d58b16 100644 (file)
@@ -9,7 +9,7 @@ mt76_reg_set(void *data, u64 val)
 {
        struct mt76_dev *dev = data;
 
-       dev->bus->wr(dev, dev->debugfs_reg, val);
+       __mt76_wr(dev, dev->debugfs_reg, val);
        return 0;
 }
 
@@ -18,7 +18,7 @@ mt76_reg_get(void *data, u64 *val)
 {
        struct mt76_dev *dev = data;
 
-       *val = dev->bus->rr(dev, dev->debugfs_reg);
+       *val = __mt76_rr(dev, dev->debugfs_reg);
        return 0;
 }
 
@@ -54,9 +54,6 @@ static int mt76_rx_queues_read(struct seq_file *s, void *data)
        mt76_for_each_q_rx(dev, i) {
                struct mt76_queue *q = &dev->q_rx[i];
 
-               if (!q->ndesc)
-                       continue;
-
                queued = mt76_is_usb(dev) ? q->ndesc - q->queued : q->queued;
                seq_printf(s, "%d:      queued=%d head=%d tail=%d\n",
                           i, queued, q->head, q->tail);
index f4d6074..6c25859 100644 (file)
@@ -370,6 +370,12 @@ unmap:
                                 tx_info.buf[n].len, DMA_TO_DEVICE);
 
 free:
+#ifdef CONFIG_NL80211_TESTMODE
+       /* fix tx_done accounting on queue overflow */
+       if (tx_info.skb == dev->test.tx_skb)
+               dev->test.tx_done--;
+#endif
+
        e.skb = tx_info.skb;
        e.txwi = t;
        dev->drv->tx_complete_skb(dev, qid, &e);
index c236e30..3044e00 100644 (file)
@@ -74,6 +74,11 @@ mt76_get_of_eeprom(struct mt76_dev *dev, int len)
                                           &data[i]);
        }
 
+#ifdef CONFIG_NL80211_TESTMODE
+       dev->test.mtd_name = devm_kstrdup(dev->dev, part, GFP_KERNEL);
+       dev->test.mtd_offset = offset;
+#endif
+
 out_put_node:
        of_node_put(np);
        return ret;
index 9070981..3d4bf72 100644 (file)
@@ -58,12 +58,15 @@ static const struct ieee80211_channel mt76_channels_5ghz[] = {
        CHAN5G(132, 5660),
        CHAN5G(136, 5680),
        CHAN5G(140, 5700),
+       CHAN5G(144, 5720),
 
        CHAN5G(149, 5745),
        CHAN5G(153, 5765),
        CHAN5G(157, 5785),
        CHAN5G(161, 5805),
        CHAN5G(165, 5825),
+       CHAN5G(169, 5845),
+       CHAN5G(173, 5865),
 };
 
 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
@@ -279,7 +282,8 @@ mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw)
 
        wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
        wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH |
-                       WIPHY_FLAG_SUPPORTS_TDLS;
+                       WIPHY_FLAG_SUPPORTS_TDLS |
+                       WIPHY_FLAG_AP_UAPSD;
 
        wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
        wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
@@ -289,6 +293,7 @@ mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw)
        wiphy->available_antennas_rx = dev->phy.antenna_mask;
 
        hw->txq_data_size = sizeof(struct mt76_txq);
+       hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
 
        if (!hw->max_tx_fragments)
                hw->max_tx_fragments = 16;
@@ -300,7 +305,11 @@ mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw)
        ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
        ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
        ieee80211_hw_set(hw, TX_AMSDU);
-       ieee80211_hw_set(hw, TX_FRAG_LIST);
+
+       /* TODO: avoid linearization for SDIO */
+       if (!mt76_is_sdio(dev))
+               ieee80211_hw_set(hw, TX_FRAG_LIST);
+
        ieee80211_hw_set(hw, MFP_CAPABLE);
        ieee80211_hw_set(hw, AP_LINK_PS);
        ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
@@ -432,6 +441,12 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
 
        tasklet_init(&dev->tx_tasklet, mt76_tx_tasklet, (unsigned long)dev);
 
+       dev->wq = alloc_ordered_workqueue("mt76", 0);
+       if (!dev->wq) {
+               ieee80211_free_hw(hw);
+               return NULL;
+       }
+
        return dev;
 }
 EXPORT_SYMBOL_GPL(mt76_alloc_device);
@@ -485,7 +500,12 @@ EXPORT_SYMBOL_GPL(mt76_unregister_device);
 
 void mt76_free_device(struct mt76_dev *dev)
 {
-       mt76_tx_free(dev);
+       if (dev->wq) {
+               destroy_workqueue(dev->wq);
+               dev->wq = NULL;
+       }
+       if (mt76_is_mmio(dev))
+               mt76_tx_free(dev);
        ieee80211_free_hw(dev->hw);
 }
 EXPORT_SYMBOL_GPL(mt76_free_device);
@@ -500,6 +520,13 @@ void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
                return;
        }
 
+#ifdef CONFIG_NL80211_TESTMODE
+       if (dev->test.state == MT76_TM_STATE_RX_FRAMES) {
+               dev->test.rx_stats.packets[q]++;
+               if (status->flag & RX_FLAG_FAILED_FCS_CRC)
+                       dev->test.rx_stats.fcs_error[q]++;
+       }
+#endif
        __skb_queue_tail(&dev->rx_skb[q], skb);
 }
 EXPORT_SYMBOL_GPL(mt76_rx);
@@ -537,8 +564,7 @@ mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
        return &msband->chan[idx];
 }
 
-static void
-mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
+void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
 {
        struct mt76_channel_state *state = phy->chan_state;
 
@@ -546,6 +572,7 @@ mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time)
                                                  phy->survey_time));
        phy->survey_time = time;
 }
+EXPORT_SYMBOL_GPL(mt76_update_survey_active_time);
 
 void mt76_update_survey(struct mt76_dev *dev)
 {
index 3d7db6f..af35bc3 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/average.h>
 #include <net/mac80211.h>
 #include "util.h"
+#include "testmode.h"
 
 #define MT_TX_RING_SIZE     256
 #define MT_MCU_RING_SIZE    32
@@ -33,6 +34,7 @@ struct mt76_reg_pair {
 enum mt76_bus_type {
        MT76_BUS_MMIO,
        MT76_BUS_USB,
+       MT76_BUS_SDIO,
 };
 
 struct mt76_bus_ops {
@@ -52,6 +54,7 @@ struct mt76_bus_ops {
 
 #define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB)
 #define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO)
+#define mt76_is_sdio(dev) ((dev)->bus->type == MT76_BUS_SDIO)
 
 enum mt76_txq_id {
        MT_TXQ_VO = IEEE80211_AC_VO,
@@ -94,6 +97,7 @@ struct mt76_queue_entry {
        union {
                struct mt76_txwi_cache *txwi;
                struct urb *urb;
+               int buf_sz;
        };
        enum mt76_txq_id qid;
        bool skip_buf0:1;
@@ -146,6 +150,8 @@ struct mt76_mcu_ops {
                            int len, bool wait_resp);
        int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb,
                                int cmd, bool wait_resp);
+       u32 (*mcu_rr)(struct mt76_dev *dev, u32 offset);
+       void (*mcu_wr)(struct mt76_dev *dev, u32 offset, u32 val);
        int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base,
                         const struct mt76_reg_pair *rp, int len);
        int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
@@ -290,6 +296,7 @@ enum {
        MT76_STATE_POWER_OFF,
        MT76_STATE_SUSPEND,
        MT76_STATE_ROC,
+       MT76_STATE_PM,
 };
 
 struct mt76_hw_cap {
@@ -422,7 +429,6 @@ struct mt76_usb {
        u16 data_len;
 
        struct tasklet_struct rx_tasklet;
-       struct workqueue_struct *wq;
        struct work_struct stat_work;
 
        u8 out_ep[__MT_EP_OUT_MAX];
@@ -439,6 +445,24 @@ struct mt76_usb {
        } mcu;
 };
 
+struct mt76_sdio {
+       struct task_struct *tx_kthread;
+       struct task_struct *kthread;
+       struct work_struct stat_work;
+
+       unsigned long state;
+
+       struct sdio_func *func;
+
+       struct {
+               struct mutex lock;
+               int pse_data_quota;
+               int ple_data_quota;
+               int pse_mcu_quota;
+               int deficit;
+       } sched;
+};
+
 struct mt76_mmio {
        void __iomem *regs;
        spinlock_t irq_lock;
@@ -475,6 +499,47 @@ struct mt76_rx_status {
        s8 chain_signal[IEEE80211_MAX_CHAINS];
 };
 
+struct mt76_testmode_ops {
+       int (*set_state)(struct mt76_dev *dev, enum mt76_testmode_state state);
+       int (*set_params)(struct mt76_dev *dev, struct nlattr **tb,
+                         enum mt76_testmode_state new_state);
+       int (*dump_stats)(struct mt76_dev *dev, struct sk_buff *msg);
+};
+
+struct mt76_testmode_data {
+       enum mt76_testmode_state state;
+
+       u32 param_set[DIV_ROUND_UP(NUM_MT76_TM_ATTRS, 32)];
+       struct sk_buff *tx_skb;
+
+       u32 tx_count;
+       u16 tx_msdu_len;
+
+       u8 tx_rate_mode;
+       u8 tx_rate_idx;
+       u8 tx_rate_nss;
+       u8 tx_rate_sgi;
+       u8 tx_rate_ldpc;
+
+       u8 tx_antenna_mask;
+
+       u32 freq_offset;
+
+       u8 tx_power[4];
+       u8 tx_power_control;
+
+       const char *mtd_name;
+       u32 mtd_offset;
+
+       u32 tx_pending;
+       u32 tx_queued;
+       u32 tx_done;
+       struct {
+               u64 packets[__MT_RXQ_MAX];
+               u64 fcs_error[__MT_RXQ_MAX];
+       } rx_stats;
+};
+
 struct mt76_phy {
        struct ieee80211_hw *hw;
        struct mt76_dev *dev;
@@ -491,6 +556,8 @@ struct mt76_phy {
        struct mt76_sband sband_2g;
        struct mt76_sband sband_5g;
 
+       u32 vif_mask;
+
        int txpower_cur;
        u8 antenna_mask;
 };
@@ -572,9 +639,17 @@ struct mt76_dev {
 
        u32 rxfilter;
 
+#ifdef CONFIG_NL80211_TESTMODE
+       const struct mt76_testmode_ops *test_ops;
+       struct mt76_testmode_data test;
+#endif
+
+       struct workqueue_struct *wq;
+
        union {
                struct mt76_mmio mmio;
                struct mt76_usb usb;
+               struct mt76_sdio sdio;
        };
 };
 
@@ -805,6 +880,15 @@ static inline u8 mt76_tx_power_nss_delta(u8 nss)
        return nss_delta[nss - 1];
 }
 
+static inline bool mt76_testmode_enabled(struct mt76_dev *dev)
+{
+#ifdef CONFIG_NL80211_TESTMODE
+       return dev->test.state != MT76_TM_STATE_OFF;
+#else
+       return false;
+#endif
+}
+
 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
 void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta,
             struct mt76_wcid *wcid, struct sk_buff *skb);
@@ -824,6 +908,7 @@ void mt76_release_buffered_frames(struct ieee80211_hw *hw,
 bool mt76_has_tx_pending(struct mt76_phy *phy);
 void mt76_set_channel(struct mt76_phy *phy);
 void mt76_update_survey(struct mt76_dev *dev);
+void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time);
 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
                    struct survey_info *survey);
 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht);
@@ -877,6 +962,24 @@ void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                  const u8 *mac);
 void mt76_sw_scan_complete(struct ieee80211_hw *hw,
                           struct ieee80211_vif *vif);
+int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                     void *data, int len);
+int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
+                      struct netlink_callback *cb, void *data, int len);
+int mt76_testmode_set_state(struct mt76_dev *dev, enum mt76_testmode_state state);
+
+static inline void mt76_testmode_reset(struct mt76_dev *dev, bool disable)
+{
+#ifdef CONFIG_NL80211_TESTMODE
+       enum mt76_testmode_state state = MT76_TM_STATE_IDLE;
+
+       if (disable || dev->test.state == MT76_TM_STATE_OFF)
+               state = MT76_TM_STATE_OFF;
+
+       mt76_testmode_set_state(dev, state);
+#endif
+}
+
 
 /* internal */
 static inline struct ieee80211_hw *
@@ -901,6 +1004,7 @@ void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
                           struct napi_struct *napi);
 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
+void mt76_testmode_tx_pending(struct mt76_dev *dev);
 
 /* usb */
 static inline bool mt76u_urb_error(struct urb *urb)
@@ -935,13 +1039,12 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
        return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
 }
 
-int mt76u_skb_dma_info(struct sk_buff *skb, u32 info);
+int mt76_skb_adjust_pad(struct sk_buff *skb);
 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
                         u8 req_type, u16 val, u16 offset,
                         void *buf, size_t len);
 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
                     const u16 offset, const u32 val);
-void mt76u_deinit(struct mt76_dev *dev);
 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf,
               bool ext);
 int mt76u_alloc_mcu_queue(struct mt76_dev *dev);
@@ -951,6 +1054,12 @@ void mt76u_stop_rx(struct mt76_dev *dev);
 int mt76u_resume_rx(struct mt76_dev *dev);
 void mt76u_queues_deinit(struct mt76_dev *dev);
 
+int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
+              const struct mt76_bus_ops *bus_ops);
+int mt76s_alloc_queues(struct mt76_dev *dev);
+void mt76s_stop_txrx(struct mt76_dev *dev);
+void mt76s_deinit(struct mt76_dev *dev);
+
 struct sk_buff *
 mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
                   int data_len);
index 83dfa6d..447f2c6 100644 (file)
@@ -44,7 +44,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 
        mutex_lock(&dev->mt76.mutex);
 
-       mvif->idx = ffs(~dev->vif_mask) - 1;
+       mvif->idx = ffs(~dev->mphy.vif_mask) - 1;
        if (mvif->idx >= MT7603_MAX_INTERFACES) {
                ret = -ENOSPC;
                goto out;
@@ -65,7 +65,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        }
 
        idx = MT7603_WTBL_RESERVED - 1 - mvif->idx;
-       dev->vif_mask |= BIT(mvif->idx);
+       dev->mphy.vif_mask |= BIT(mvif->idx);
        INIT_LIST_HEAD(&mvif->sta.poll_list);
        mvif->sta.wcid.idx = idx;
        mvif->sta.wcid.hw_key_idx = -1;
@@ -107,7 +107,7 @@ mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        spin_unlock_bh(&dev->sta_poll_lock);
 
        mutex_lock(&dev->mt76.mutex);
-       dev->vif_mask &= ~BIT(mvif->idx);
+       dev->mphy.vif_mask &= ~BIT(mvif->idx);
        mutex_unlock(&dev->mt76.mutex);
 }
 
index 7fadf09..c863052 100644 (file)
@@ -108,8 +108,6 @@ struct mt7603_dev {
 
        u32 rxfilter;
 
-       u8 vif_mask;
-
        struct list_head sta_poll_list;
        spinlock_t sta_poll_lock;
 
index e25db11..f372fb6 100644 (file)
@@ -28,13 +28,28 @@ config MT7622_WMAC
          which has the same feature set as a MT7615, but limited to
          2.4 GHz only.
 
+config MT7663_USB_SDIO_COMMON
+       tristate
+       select MT7615_COMMON
+
 config MT7663U
        tristate "MediaTek MT7663U (USB) support"
        select MT76_USB
-       select MT7615_COMMON
+       select MT7663_USB_SDIO_COMMON
        depends on MAC80211
        depends on USB
        help
-         This adds support for MT7663U 802.11ax 2x2:2 wireless devices.
+         This adds support for MT7663U 802.11ac 2x2:2 wireless devices.
+
+         To compile this driver as a module, choose M here.
+
+config MT7663S
+       tristate "MediaTek MT7663S (SDIO) support"
+       select MT76_SDIO
+       select MT7663_USB_SDIO_COMMON
+       depends on MAC80211
+       depends on MMC
+       help
+         This adds support for MT7663S 802.11ac 2x2:2 wireless devices.
 
          To compile this driver as a module, choose M here.
index 99f353b..e8fc4a7 100644 (file)
@@ -2,14 +2,19 @@
 
 obj-$(CONFIG_MT7615_COMMON) += mt7615-common.o
 obj-$(CONFIG_MT7615E) += mt7615e.o
+obj-$(CONFIG_MT7663_USB_SDIO_COMMON) += mt7663-usb-sdio-common.o
 obj-$(CONFIG_MT7663U) += mt7663u.o
+obj-$(CONFIG_MT7663S) += mt7663s.o
 
 CFLAGS_trace.o := -I$(src)
 
 mt7615-common-y := main.o init.o mcu.o eeprom.o mac.o \
                   debugfs.o trace.o
+mt7615-common-$(CONFIG_NL80211_TESTMODE) += testmode.o
 
 mt7615e-y := pci.o pci_init.o dma.o pci_mac.o mmio.o
 mt7615e-$(CONFIG_MT7622_WMAC) += soc.o
 
-mt7663u-y := usb.o usb_mcu.o usb_init.o
+mt7663-usb-sdio-common-y := usb_sdio.o
+mt7663u-y := usb.o usb_mcu.o
+mt7663s-y := sdio.o sdio_mcu.o sdio_txrx.o
index d06afcf..8893165 100644 (file)
@@ -6,11 +6,16 @@ static int
 mt7615_radar_pattern_set(void *data, u64 val)
 {
        struct mt7615_dev *dev = data;
+       int err;
 
        if (!mt7615_wait_for_mcu_init(dev))
                return 0;
 
-       return mt7615_mcu_rdd_send_pattern(dev);
+       mt7615_mutex_acquire(dev);
+       err = mt7615_mcu_rdd_send_pattern(dev);
+       mt7615_mutex_release(dev);
+
+       return err;
 }
 
 DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_pattern, NULL,
@@ -47,6 +52,52 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_scs, mt7615_scs_get,
                         mt7615_scs_set, "%lld\n");
 
 static int
+mt7615_pm_set(void *data, u64 val)
+{
+       struct mt7615_dev *dev = data;
+
+       if (!mt7615_wait_for_mcu_init(dev))
+               return 0;
+
+       return mt7615_pm_set_enable(dev, val);
+}
+
+static int
+mt7615_pm_get(void *data, u64 *val)
+{
+       struct mt7615_dev *dev = data;
+
+       *val = dev->pm.enable;
+
+       return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_pm, mt7615_pm_get, mt7615_pm_set, "%lld\n");
+
+static int
+mt7615_pm_idle_timeout_set(void *data, u64 val)
+{
+       struct mt7615_dev *dev = data;
+
+       dev->pm.idle_timeout = msecs_to_jiffies(val);
+
+       return 0;
+}
+
+static int
+mt7615_pm_idle_timeout_get(void *data, u64 *val)
+{
+       struct mt7615_dev *dev = data;
+
+       *val = jiffies_to_msecs(dev->pm.idle_timeout);
+
+       return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_pm_idle_timeout, mt7615_pm_idle_timeout_get,
+                        mt7615_pm_idle_timeout_set, "%lld\n");
+
+static int
 mt7615_dbdc_set(void *data, u64 val)
 {
        struct mt7615_dev *dev = data;
@@ -84,7 +135,10 @@ mt7615_fw_debug_set(void *data, u64 val)
                return 0;
 
        dev->fw_debug = val;
+
+       mt7615_mutex_acquire(dev);
        mt7615_mcu_fw_log_2_host(dev, dev->fw_debug ? 2 : 0);
+       mt7615_mutex_release(dev);
 
        return 0;
 }
@@ -111,6 +165,8 @@ mt7615_reset_test_set(void *data, u64 val)
        if (!mt7615_wait_for_mcu_init(dev))
                return 0;
 
+       mt7615_mutex_acquire(dev);
+
        skb = alloc_skb(1, GFP_KERNEL);
        if (!skb)
                return -ENOMEM;
@@ -118,6 +174,8 @@ mt7615_reset_test_set(void *data, u64 val)
        skb_put(skb, 1);
        mt76_tx_queue_skb_raw(dev, 0, skb, 0);
 
+       mt7615_mutex_release(dev);
+
        return 0;
 }
 
@@ -167,9 +225,13 @@ mt7615_ampdu_stat_read(struct seq_file *file, void *data)
 {
        struct mt7615_dev *dev = file->private;
 
+       mt7615_mutex_acquire(dev);
+
        mt7615_ampdu_stat_read_phy(&dev->phy, file);
        mt7615_ampdu_stat_read_phy(mt7615_ext_phy(dev), file);
 
+       mt7615_mutex_release(dev);
+
        return 0;
 }
 
@@ -221,7 +283,10 @@ static int mt7615_read_temperature(struct seq_file *s, void *data)
                return 0;
 
        /* cpu */
+       mt7615_mutex_acquire(dev);
        temp = mt7615_mcu_get_temperature(dev, 0);
+       mt7615_mutex_release(dev);
+
        seq_printf(s, "Temperature: %d\n", temp);
 
        return 0;
@@ -233,6 +298,8 @@ mt7615_queues_acq(struct seq_file *s, void *data)
        struct mt7615_dev *dev = dev_get_drvdata(s->private);
        int i;
 
+       mt7615_mutex_acquire(dev);
+
        for (i = 0; i < 16; i++) {
                int j, wmm_idx = i % MT7615_MAX_WMM_SETS;
                int acs = i / MT7615_MAX_WMM_SETS;
@@ -253,6 +320,8 @@ mt7615_queues_acq(struct seq_file *s, void *data)
                seq_printf(s, "AC%d%d: queued=%d\n", wmm_idx, acs, qlen);
        }
 
+       mt7615_mutex_release(dev);
+
        return 0;
 }
 
@@ -285,6 +354,29 @@ mt7615_queues_read(struct seq_file *s, void *data)
        return 0;
 }
 
+static int
+mt7615_rf_reg_set(void *data, u64 val)
+{
+       struct mt7615_dev *dev = data;
+
+       mt7615_rf_wr(dev, dev->debugfs_rf_wf, dev->debugfs_rf_reg, val);
+
+       return 0;
+}
+
+static int
+mt7615_rf_reg_get(void *data, u64 *val)
+{
+       struct mt7615_dev *dev = data;
+
+       *val = mt7615_rf_rr(dev, dev->debugfs_rf_wf, dev->debugfs_rf_reg);
+
+       return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_rf_reg, mt7615_rf_reg_get, mt7615_rf_reg_set,
+                        "0x%08llx\n");
+
 int mt7615_init_debugfs(struct mt7615_dev *dev)
 {
        struct dentry *dir;
@@ -305,6 +397,9 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
        debugfs_create_file("scs", 0600, dir, dev, &fops_scs);
        debugfs_create_file("dbdc", 0600, dir, dev, &fops_dbdc);
        debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug);
+       debugfs_create_file("runtime-pm", 0600, dir, dev, &fops_pm);
+       debugfs_create_file("idle-timeout", 0600, dir, dev,
+                           &fops_pm_idle_timeout);
        debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir,
                                    mt7615_radio_read);
        debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern);
@@ -324,6 +419,11 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
        debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir,
                                    mt7615_read_temperature);
 
+       debugfs_create_u32("rf_wfidx", 0600, dir, &dev->debugfs_rf_wf);
+       debugfs_create_u32("rf_regidx", 0600, dir, &dev->debugfs_rf_reg);
+       debugfs_create_file_unsafe("rf_regval", 0600, dir, dev,
+                                  &fops_rf_reg);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(mt7615_init_debugfs);
index e5a965d..1231a5d 100644 (file)
@@ -122,10 +122,6 @@ static int mt7615_poll_tx(struct napi_struct *napi, int budget)
 
        mt7615_tx_cleanup(dev);
 
-       rcu_read_lock();
-       mt7615_mac_sta_poll(dev);
-       rcu_read_unlock();
-
        tasklet_schedule(&dev->mt76.tx_tasklet);
 
        return 0;
index e2d8051..fc1ebab 100644 (file)
@@ -285,7 +285,9 @@ mt7615_regd_notifier(struct wiphy *wiphy,
        if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR))
                return;
 
+       mt7615_mutex_acquire(dev);
        mt7615_dfs_init_radar_detector(phy);
+       mt7615_mutex_release(dev);
 }
 
 static void
@@ -321,6 +323,7 @@ mt7615_init_wiphy(struct ieee80211_hw *hw)
 
        ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
        ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
+       ieee80211_hw_set(hw, WANT_MONITOR_VIF);
 
        if (is_mt7615(&phy->dev->mt76))
                hw->max_tx_fragments = MT_TXP_MAX_BUF_NUM;
@@ -405,9 +408,6 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev)
        mphy->sband_2g.sband.n_channels = 0;
        mphy->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
 
-       /* The second interface does not get any packets unless it has a vif */
-       ieee80211_hw_set(mphy->hw, WANT_MONITOR_VIF);
-
        ret = mt76_register_phy(mphy);
        if (ret)
                ieee80211_free_hw(mphy->hw);
@@ -437,6 +437,12 @@ void mt7615_init_device(struct mt7615_dev *dev)
        dev->phy.dev = dev;
        dev->phy.mt76 = &dev->mt76.phy;
        dev->mt76.phy.priv = &dev->phy;
+
+       INIT_DELAYED_WORK(&dev->pm.ps_work, mt7615_pm_power_save_work);
+       INIT_WORK(&dev->pm.wake_work, mt7615_pm_wake_work);
+       init_completion(&dev->pm.wake_cmpl);
+       spin_lock_init(&dev->pm.txq_lock);
+       set_bit(MT76_STATE_PM, &dev->mphy.state);
        INIT_DELAYED_WORK(&dev->phy.mac_work, mt7615_mac_work);
        INIT_DELAYED_WORK(&dev->phy.scan_work, mt7615_scan_work);
        skb_queue_head_init(&dev->phy.scan_event_list);
@@ -450,6 +456,7 @@ void mt7615_init_device(struct mt7615_dev *dev)
        timer_setup(&dev->phy.roc_timer, mt7615_roc_timer, 0);
 
        mt7615_init_wiphy(hw);
+       dev->pm.idle_timeout = MT7615_PM_TIMEOUT;
        dev->mphy.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
        dev->mphy.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
        dev->mphy.sband_5g.sband.vht_cap.cap |=
@@ -457,5 +464,9 @@ void mt7615_init_device(struct mt7615_dev *dev)
                        IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
        mt7615_cap_dbdc_disable(dev);
        dev->phy.dfs_state = -1;
+
+#ifdef CONFIG_NL80211_TESTMODE
+       dev->mt76.test_ops = &mt7615_testmode_ops;
+#endif
 }
 EXPORT_SYMBOL_GPL(mt7615_init_device);
index d97315e..3dd8dd2 100644 (file)
@@ -186,6 +186,40 @@ mt7615_get_status_freq_info(struct mt7615_dev *dev, struct mt76_phy *mphy,
        status->freq = ieee80211_channel_to_frequency(chfreq, status->band);
 }
 
+static void mt7615_mac_fill_tm_rx(struct mt7615_dev *dev, __le32 *rxv)
+{
+#ifdef CONFIG_NL80211_TESTMODE
+       u32 rxv1 = le32_to_cpu(rxv[0]);
+       u32 rxv3 = le32_to_cpu(rxv[2]);
+       u32 rxv4 = le32_to_cpu(rxv[3]);
+       u32 rxv5 = le32_to_cpu(rxv[4]);
+       u8 cbw = FIELD_GET(MT_RXV1_FRAME_MODE, rxv1);
+       u8 mode = FIELD_GET(MT_RXV1_TX_MODE, rxv1);
+       s16 foe = FIELD_GET(MT_RXV5_FOE, rxv5);
+       u32 foe_const = (BIT(cbw + 1) & 0xf) * 10000;
+
+       if (!mode) {
+               /* CCK */
+               foe &= ~BIT(11);
+               foe *= 1000;
+               foe >>= 11;
+       } else {
+               if (foe > 2048)
+                       foe -= 4096;
+
+               foe = (foe * foe_const) >> 15;
+       }
+
+       dev->test.last_freq_offset = foe;
+       dev->test.last_rcpi[0] = FIELD_GET(MT_RXV4_RCPI0, rxv4);
+       dev->test.last_rcpi[1] = FIELD_GET(MT_RXV4_RCPI1, rxv4);
+       dev->test.last_rcpi[2] = FIELD_GET(MT_RXV4_RCPI2, rxv4);
+       dev->test.last_rcpi[3] = FIELD_GET(MT_RXV4_RCPI3, rxv4);
+       dev->test.last_ib_rssi = FIELD_GET(MT_RXV3_IB_RSSI, rxv3);
+       dev->test.last_wb_rssi = FIELD_GET(MT_RXV3_WB_RSSI, rxv3);
+#endif
+}
+
 static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
 {
        struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
@@ -401,6 +435,8 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
                                             status->chain_signal[i]);
                }
 
+               mt7615_mac_fill_tm_rx(dev, rxd);
+
                rxd += 6;
                if ((u8 *)rxd - skb->data >= skb->len)
                        return -EINVAL;
@@ -493,18 +529,18 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
                          struct ieee80211_sta *sta, int pid,
                          struct ieee80211_key_conf *key, bool beacon)
 {
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_tx_rate *rate = &info->control.rates[0];
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+       bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
        bool multicast = is_multicast_ether_addr(hdr->addr1);
        struct ieee80211_vif *vif = info->control.vif;
+       bool is_mmio = mt76_is_mmio(&dev->mt76);
+       u32 val, sz_txd = is_mmio ? MT_TXD_SIZE : MT_USB_TXD_SIZE;
        struct mt76_phy *mphy = &dev->mphy;
-       bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
-       bool is_usb = mt76_is_usb(&dev->mt76);
-       int tx_count = 8;
-       u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
        __le16 fc = hdr->frame_control;
-       u32 val, sz_txd = is_usb ? MT_USB_TXD_SIZE : MT_TXD_SIZE;
+       int tx_count = 8;
        u16 seqno = 0;
 
        if (vif) {
@@ -530,10 +566,10 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
                p_fmt = MT_TX_TYPE_FW;
                q_idx = ext_phy ? MT_LMAC_BCN1 : MT_LMAC_BCN0;
        } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
-               p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT;
+               p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
                q_idx = ext_phy ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0;
        } else {
-               p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT;
+               p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
                q_idx = wmm_idx * MT7615_MAX_WMM_SETS +
                        mt7615_lmac_mapping(dev, skb_get_queue_mapping(skb));
        }
@@ -617,16 +653,19 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
        }
 
        val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
-       if (ieee80211_is_data_qos(hdr->frame_control)) {
-               seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
-               val |= MT_TXD3_SN_VALID;
-       } else if (ieee80211_is_back_req(hdr->frame_control)) {
-               struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
+       if (info->flags & IEEE80211_TX_CTL_INJECTED) {
+               seqno = le16_to_cpu(hdr->seq_ctrl);
+
+               if (ieee80211_is_back_req(hdr->frame_control)) {
+                       struct ieee80211_bar *bar;
 
-               seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
-               val |= MT_TXD3_SN_VALID;
+                       bar = (struct ieee80211_bar *)skb->data;
+                       seqno = le16_to_cpu(bar->start_seq_num);
+               }
+
+               val |= MT_TXD3_SN_VALID |
+                      FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
        }
-       val |= FIELD_PREP(MT_TXD3_SEQ, seqno);
 
        txwi[3] |= cpu_to_le32(val);
 
@@ -636,7 +675,7 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
        txwi[7] = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
                  FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype) |
                  FIELD_PREP(MT_TXD7_SPE_IDX, 0x18);
-       if (is_usb)
+       if (!is_mmio)
                txwi[8] = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) |
                          FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype);
 
@@ -878,6 +917,9 @@ mt7615_mac_queue_rate_update(struct mt7615_phy *phy, struct mt7615_sta *sta,
        struct mt7615_dev *dev = phy->dev;
        struct mt7615_wtbl_desc *wd;
 
+       if (work_pending(&dev->wtbl_work))
+               return -EBUSY;
+
        wd = kzalloc(sizeof(*wd), GFP_ATOMIC);
        if (!wd)
                return -ENOMEM;
@@ -888,11 +930,34 @@ mt7615_mac_queue_rate_update(struct mt7615_phy *phy, struct mt7615_sta *sta,
        mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates,
                                    &wd->rate);
        list_add_tail(&wd->node, &dev->wd_head);
-       queue_work(dev->mt76.usb.wq, &dev->wtbl_work);
+       queue_work(dev->mt76.wq, &dev->wtbl_work);
 
        return 0;
 }
 
+u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid)
+{
+       u32 addr, val, val2;
+       u8 offset;
+
+       addr = mt7615_mac_wtbl_addr(dev, wcid) + 11 * 4;
+
+       offset = tid * 12;
+       addr += 4 * (offset / 32);
+       offset %= 32;
+
+       val = mt76_rr(dev, addr);
+       val >>= (tid % 32);
+
+       if (offset > 20) {
+               addr += 4;
+               val2 = mt76_rr(dev, addr);
+               val |= val2 << (32 - offset);
+       }
+
+       return val & GENMASK(11, 0);
+}
+
 void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
                          struct ieee80211_tx_rate *probe_rate,
                          struct ieee80211_tx_rate *rates)
@@ -902,7 +967,7 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
        struct mt7615_rate_desc rd;
        u32 w5, w27, addr;
 
-       if (mt76_is_usb(&dev->mt76)) {
+       if (!mt76_is_mmio(&dev->mt76)) {
                mt7615_mac_queue_rate_update(phy, sta, probe_rate, rates);
                return;
        }
@@ -961,6 +1026,7 @@ void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
 
        sta->rate_count = 2 * MT7615_RATE_RETRY * n_rates;
        sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+       sta->rate_probe = !!probe_rate;
 }
 EXPORT_SYMBOL_GPL(mt7615_mac_set_rates);
 
@@ -1169,7 +1235,6 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
                                phy = dev->mt76.phy2->priv;
 
                        mt7615_mac_set_rates(phy, sta, NULL, sta->rates);
-                       sta->rate_probe = false;
                }
                spin_unlock_bh(&dev->mt76.lock);
        } else {
@@ -1373,6 +1438,12 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
        }
 
        dev_kfree_skb(skb);
+
+       rcu_read_lock();
+       mt7615_mac_sta_poll(dev);
+       rcu_read_unlock();
+
+       tasklet_schedule(&dev->mt76.tx_tasklet);
 }
 
 void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
@@ -1462,7 +1533,7 @@ void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable)
        bool ext_phy = phy != &dev->phy;
        u32 reg, mask;
 
-       mutex_lock(&dev->mt76.mutex);
+       mt7615_mutex_acquire(dev);
 
        if (phy->scs_en == enable)
                goto out;
@@ -1489,7 +1560,7 @@ void mt7615_mac_set_scs(struct mt7615_phy *phy, bool enable)
        phy->scs_en = enable;
 
 out:
-       mutex_unlock(&dev->mt76.mutex);
+       mt7615_mutex_release(dev);
 }
 
 void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy)
@@ -1679,9 +1750,9 @@ mt7615_phy_update_channel(struct mt76_phy *mphy, int idx)
        state->noise = -(phy->noise >> 4);
 }
 
-void mt7615_update_channel(struct mt76_dev *mdev)
+static void __mt7615_update_channel(struct mt7615_dev *dev)
 {
-       struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+       struct mt76_dev *mdev = &dev->mt76;
 
        mt7615_phy_update_channel(&mdev->phy, 0);
        if (mdev->phy2)
@@ -1690,8 +1761,32 @@ void mt7615_update_channel(struct mt76_dev *mdev)
        /* reset obss airtime */
        mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR);
 }
+
+void mt7615_update_channel(struct mt76_dev *mdev)
+{
+       struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+
+       if (mt7615_pm_wake(dev))
+               return;
+
+       __mt7615_update_channel(dev);
+       mt7615_pm_power_save_sched(dev);
+}
 EXPORT_SYMBOL_GPL(mt7615_update_channel);
 
+static void mt7615_update_survey(struct mt7615_dev *dev)
+{
+       struct mt76_dev *mdev = &dev->mt76;
+       ktime_t cur_time;
+
+       __mt7615_update_channel(dev);
+       cur_time = ktime_get_boottime();
+
+       mt76_update_survey_active_time(&mdev->phy, cur_time);
+       if (mdev->phy2)
+               mt76_update_survey_active_time(mdev->phy2, cur_time);
+}
+
 static void
 mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
 {
@@ -1740,6 +1835,163 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
        }
 }
 
+void mt7615_pm_wake_work(struct work_struct *work)
+{
+       struct mt7615_dev *dev;
+       struct mt76_phy *mphy;
+       int i;
+
+       dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
+                                               pm.wake_work);
+       mphy = dev->phy.mt76;
+
+       if (mt7615_driver_own(dev)) {
+               dev_err(mphy->dev->dev, "failed to wake device\n");
+               goto out;
+       }
+
+       spin_lock_bh(&dev->pm.txq_lock);
+       for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+               struct mt7615_sta *msta = dev->pm.tx_q[i].msta;
+               struct mt76_wcid *wcid = msta ? &msta->wcid : NULL;
+               struct ieee80211_sta *sta = NULL;
+
+               if (!dev->pm.tx_q[i].skb)
+                       continue;
+
+               if (msta && wcid->sta)
+                       sta = container_of((void *)msta, struct ieee80211_sta,
+                                          drv_priv);
+
+               mt76_tx(mphy, sta, wcid, dev->pm.tx_q[i].skb);
+               dev->pm.tx_q[i].skb = NULL;
+       }
+       spin_unlock_bh(&dev->pm.txq_lock);
+
+       tasklet_schedule(&dev->mt76.tx_tasklet);
+
+out:
+       ieee80211_wake_queues(mphy->hw);
+       complete_all(&dev->pm.wake_cmpl);
+}
+
+int mt7615_pm_wake(struct mt7615_dev *dev)
+{
+       struct mt76_phy *mphy = dev->phy.mt76;
+
+       if (!mt7615_firmware_offload(dev))
+               return 0;
+
+       if (!mt76_is_mmio(mphy->dev))
+               return 0;
+
+       if (!test_bit(MT76_STATE_PM, &mphy->state))
+               return 0;
+
+       if (test_bit(MT76_HW_SCANNING, &mphy->state) ||
+           test_bit(MT76_HW_SCHED_SCANNING, &mphy->state))
+               return 0;
+
+       if (queue_work(dev->mt76.wq, &dev->pm.wake_work))
+               reinit_completion(&dev->pm.wake_cmpl);
+
+       if (!wait_for_completion_timeout(&dev->pm.wake_cmpl, 3 * HZ)) {
+               ieee80211_wake_queues(mphy->hw);
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mt7615_pm_wake);
+
+void mt7615_pm_power_save_sched(struct mt7615_dev *dev)
+{
+       struct mt76_phy *mphy = dev->phy.mt76;
+
+       if (!mt7615_firmware_offload(dev))
+               return;
+
+       if (!mt76_is_mmio(mphy->dev))
+               return;
+
+       if (!dev->pm.enable || !test_bit(MT76_STATE_RUNNING, &mphy->state))
+               return;
+
+       dev->pm.last_activity = jiffies;
+
+       if (test_bit(MT76_HW_SCANNING, &mphy->state) ||
+           test_bit(MT76_HW_SCHED_SCANNING, &mphy->state))
+               return;
+
+       if (!test_bit(MT76_STATE_PM, &mphy->state))
+               queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work,
+                                  dev->pm.idle_timeout);
+}
+EXPORT_SYMBOL_GPL(mt7615_pm_power_save_sched);
+
+void mt7615_pm_power_save_work(struct work_struct *work)
+{
+       struct mt7615_dev *dev;
+       unsigned long delta;
+
+       dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
+                                               pm.ps_work.work);
+
+       delta = dev->pm.idle_timeout;
+       if (time_is_after_jiffies(dev->pm.last_activity + delta)) {
+               delta = dev->pm.last_activity + delta - jiffies;
+               goto out;
+       }
+
+       if (!mt7615_firmware_own(dev))
+               return;
+out:
+       queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
+}
+
+static void
+mt7615_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+       struct mt7615_phy *phy = priv;
+       struct mt7615_dev *dev = phy->dev;
+       bool ext_phy = phy != &dev->phy;
+
+       if (mt7615_mcu_set_bss_pm(dev, vif, dev->pm.enable))
+               return;
+
+       if (dev->pm.enable) {
+               vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+               mt76_set(dev, MT_WF_RFCR(ext_phy),
+                        MT_WF_RFCR_DROP_OTHER_BEACON);
+       } else {
+               vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
+               mt76_clear(dev, MT_WF_RFCR(ext_phy),
+                          MT_WF_RFCR_DROP_OTHER_BEACON);
+       }
+}
+
+int mt7615_pm_set_enable(struct mt7615_dev *dev, bool enable)
+{
+       struct mt76_phy *mphy = dev->phy.mt76;
+
+       if (!mt7615_firmware_offload(dev) || !mt76_is_mmio(&dev->mt76))
+               return -EOPNOTSUPP;
+
+       mt7615_mutex_acquire(dev);
+
+       if (dev->pm.enable == enable)
+               goto out;
+
+       dev->pm.enable = enable;
+       ieee80211_iterate_active_interfaces(mphy->hw,
+                                           IEEE80211_IFACE_ITER_RESUME_ALL,
+                                           mt7615_pm_interface_iter, mphy->priv);
+out:
+       mt7615_mutex_release(dev);
+
+       return 0;
+}
+
 void mt7615_mac_work(struct work_struct *work)
 {
        struct mt7615_phy *phy;
@@ -1749,9 +2001,9 @@ void mt7615_mac_work(struct work_struct *work)
                                                mac_work.work);
        mdev = &phy->dev->mt76;
 
-       mutex_lock(&mdev->mutex);
+       mt7615_mutex_acquire(phy->dev);
 
-       mt76_update_survey(mdev);
+       mt7615_update_survey(phy->dev);
        if (++phy->mac_work_count == 5) {
                phy->mac_work_count = 0;
 
@@ -1759,7 +2011,7 @@ void mt7615_mac_work(struct work_struct *work)
                mt7615_mac_scs_check(phy);
        }
 
-       mutex_unlock(&mdev->mutex);
+       mt7615_mutex_release(phy->dev);
 
        mt76_tx_status_check(mdev, NULL, false);
        ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work,
@@ -1863,7 +2115,7 @@ void mt7615_mac_reset_work(struct work_struct *work)
        napi_disable(&dev->mt76.napi[1]);
        napi_disable(&dev->mt76.tx_napi);
 
-       mutex_lock(&dev->mt76.mutex);
+       mt7615_mutex_acquire(dev);
 
        mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_STOPPED);
 
@@ -1896,10 +2148,10 @@ void mt7615_mac_reset_work(struct work_struct *work)
        mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
        mt7615_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
 
-       mutex_unlock(&dev->mt76.mutex);
-
        mt7615_update_beacons(dev);
 
+       mt7615_mutex_release(dev);
+
        ieee80211_queue_delayed_work(mt76_hw(dev), &dev->phy.mac_work,
                                     MT7615_WATCHDOG_TIME);
        if (phy2)
index 81608ab..169f4e1 100644 (file)
@@ -100,11 +100,16 @@ enum rx_pkt_type {
 #define MT_RXV2_GROUP_ID               GENMASK(26, 21)
 #define MT_RXV2_LENGTH                 GENMASK(20, 0)
 
+#define MT_RXV3_WB_RSSI                        GENMASK(31, 24)
+#define MT_RXV3_IB_RSSI                        GENMASK(23, 16)
+
 #define MT_RXV4_RCPI3                  GENMASK(31, 24)
 #define MT_RXV4_RCPI2                  GENMASK(23, 16)
 #define MT_RXV4_RCPI1                  GENMASK(15, 8)
 #define MT_RXV4_RCPI0                  GENMASK(7, 0)
 
+#define MT_RXV5_FOE                    GENMASK(11, 0)
+
 #define MT_RXV6_NF3                    GENMASK(31, 24)
 #define MT_RXV6_NF2                    GENMASK(23, 16)
 #define MT_RXV6_NF1                    GENMASK(15, 8)
index beaca81..2d0b1f4 100644 (file)
@@ -24,6 +24,22 @@ static bool mt7615_dev_running(struct mt7615_dev *dev)
        return phy && test_bit(MT76_STATE_RUNNING, &phy->mt76->state);
 }
 
+static void mt7615_free_pending_tx_skbs(struct mt7615_dev *dev,
+                                       struct mt7615_sta *msta)
+{
+       int i;
+
+       spin_lock_bh(&dev->pm.txq_lock);
+       for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+               if (msta && dev->pm.tx_q[i].msta != msta)
+                       continue;
+
+               dev_kfree_skb(dev->pm.tx_q[i].skb);
+               dev->pm.tx_q[i].skb = NULL;
+       }
+       spin_unlock_bh(&dev->pm.txq_lock);
+}
+
 static int mt7615_start(struct ieee80211_hw *hw)
 {
        struct mt7615_dev *dev = mt7615_hw_dev(hw);
@@ -33,7 +49,7 @@ static int mt7615_start(struct ieee80211_hw *hw)
        if (!mt7615_wait_for_mcu_init(dev))
                return -EIO;
 
-       mutex_lock(&dev->mt76.mutex);
+       mt7615_mutex_acquire(dev);
 
        running = mt7615_dev_running(dev);
 
@@ -60,7 +76,7 @@ static int mt7615_start(struct ieee80211_hw *hw)
        if (!running)
                mt7615_mac_reset_counters(dev);
 
-       mutex_unlock(&dev->mt76.mutex);
+       mt7615_mutex_release(dev);
 
        return 0;
 }
@@ -74,7 +90,14 @@ static void mt7615_stop(struct ieee80211_hw *hw)
        del_timer_sync(&phy->roc_timer);
        cancel_work_sync(&phy->roc_work);
 
-       mutex_lock(&dev->mt76.mutex);
+       cancel_delayed_work_sync(&dev->pm.ps_work);
+       cancel_work_sync(&dev->pm.wake_work);
+
+       mt7615_free_pending_tx_skbs(dev, NULL);
+
+       mt7615_mutex_acquire(dev);
+
+       mt76_testmode_reset(&dev->mt76, true);
 
        clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
        cancel_delayed_work_sync(&phy->scan_work);
@@ -89,7 +112,7 @@ static void mt7615_stop(struct ieee80211_hw *hw)
                mt7615_mcu_set_mac_enable(dev, 0, false);
        }
 
-       mutex_unlock(&dev->mt76.mutex);
+       mt7615_mutex_release(dev);
 }
 
 static int get_omac_idx(enum nl80211_iftype type, u32 mask)
@@ -135,9 +158,15 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
        bool ext_phy = phy != &dev->phy;
        int idx, ret = 0;
 
-       mutex_lock(&dev->mt76.mutex);
+       mt7615_mutex_acquire(dev);
+
+       mt76_testmode_reset(&dev->mt76, true);
 
-       mvif->idx = ffs(~dev->vif_mask) - 1;
+       if (vif->type == NL80211_IFTYPE_MONITOR &&
+           is_zero_ether_addr(vif->addr))
+               phy->monitor_vif = vif;
+
+       mvif->idx = ffs(~dev->mphy.vif_mask) - 1;
        if (mvif->idx >= MT7615_MAX_INTERFACES) {
                ret = -ENOSPC;
                goto out;
@@ -157,7 +186,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
        else
                mvif->wmm_idx = mvif->idx % MT7615_MAX_WMM_SETS;
 
-       dev->vif_mask |= BIT(mvif->idx);
+       dev->mphy.vif_mask |= BIT(mvif->idx);
        dev->omac_mask |= BIT(mvif->omac_idx);
        phy->omac_mask |= BIT(mvif->omac_idx);
 
@@ -180,8 +209,20 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
        }
 
        ret = mt7615_mcu_add_dev_info(dev, vif, true);
+       if (ret)
+               goto out;
+
+       if (dev->pm.enable) {
+               ret = mt7615_mcu_set_bss_pm(dev, vif, true);
+               if (ret)
+                       goto out;
+
+               vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+               mt76_set(dev, MT_WF_RFCR(ext_phy),
+                        MT_WF_RFCR_DROP_OTHER_BEACON);
+       }
 out:
-       mutex_unlock(&dev->mt76.mutex);
+       mt7615_mutex_release(dev);
 
        return ret;
 }
@@ -197,17 +238,32 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
 
        /* TODO: disable beacon for the bss */
 
+       mt7615_mutex_acquire(dev);
+
+       mt76_testmode_reset(&dev->mt76, true);
+       if (vif == phy->monitor_vif)
+           phy->monitor_vif = NULL;
+
+       mt7615_free_pending_tx_skbs(dev, msta);
+
+       if (dev->pm.enable) {
+               bool ext_phy = phy != &dev->phy;
+
+               mt7615_mcu_set_bss_pm(dev, vif, false);
+               mt76_clear(dev, MT_WF_RFCR(ext_phy),
+                          MT_WF_RFCR_DROP_OTHER_BEACON);
+       }
        mt7615_mcu_add_dev_info(dev, vif, false);
 
        rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
        if (vif->txq)
                mt76_txq_remove(&dev->mt76, vif->txq);
 
-       mutex_lock(&dev->mt76.mutex);
-       dev->vif_mask &= ~BIT(mvif->idx);
+       dev->mphy.vif_mask &= ~BIT(mvif->idx);
        dev->omac_mask &= ~BIT(mvif->omac_idx);
        phy->omac_mask &= ~BIT(mvif->omac_idx);
-       mutex_unlock(&dev->mt76.mutex);
+
+       mt7615_mutex_release(dev);
 
        spin_lock_bh(&dev->sta_poll_lock);
        if (!list_empty(&msta->poll_list))
@@ -234,7 +290,7 @@ static void mt7615_init_dfs_state(struct mt7615_phy *phy)
        phy->dfs_state = -1;
 }
 
-static int mt7615_set_channel(struct mt7615_phy *phy)
+int mt7615_set_channel(struct mt7615_phy *phy)
 {
        struct mt7615_dev *dev = phy->dev;
        bool ext_phy = phy != &dev->phy;
@@ -242,7 +298,8 @@ static int mt7615_set_channel(struct mt7615_phy *phy)
 
        cancel_delayed_work_sync(&phy->mac_work);
 
-       mutex_lock(&dev->mt76.mutex);
+       mt7615_mutex_acquire(dev);
+
        set_bit(MT76_RESET, &phy->mt76->state);
 
        mt7615_init_dfs_state(phy);
@@ -260,7 +317,7 @@ static int mt7615_set_channel(struct mt7615_phy *phy)
        mt7615_mac_set_timing(phy);
        ret = mt7615_dfs_init_radar_detector(phy);
        mt7615_mac_cca_stats_reset(phy);
-       mt7615_mcu_set_sku_en(phy, true);
+       mt7615_mcu_set_sku_en(phy, !mt76_testmode_enabled(&dev->mt76));
 
        mt7615_mac_reset_counters(dev);
        phy->noise = 0;
@@ -268,11 +325,15 @@ static int mt7615_set_channel(struct mt7615_phy *phy)
 
 out:
        clear_bit(MT76_RESET, &phy->mt76->state);
-       mutex_unlock(&dev->mt76.mutex);
+
+       mt7615_mutex_release(dev);
 
        mt76_txq_schedule_all(phy->mt76);
-       ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work,
-                                    MT7615_WATCHDOG_TIME);
+
+       if (!mt76_testmode_enabled(&dev->mt76))
+               ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work,
+                                            MT7615_WATCHDOG_TIME);
+
        return ret;
 }
 
@@ -301,7 +362,7 @@ mt7615_queue_key_update(struct mt7615_dev *dev, enum set_key_cmd cmd,
        wd->key.cmd = cmd;
 
        list_add_tail(&wd->node, &dev->wd_head);
-       queue_work(dev->mt76.usb.wq, &dev->wtbl_work);
+       queue_work(dev->mt76.wq, &dev->wtbl_work);
 
        return 0;
 }
@@ -315,7 +376,7 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
        struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv :
                                  &mvif->sta;
        struct mt76_wcid *wcid = &msta->wcid;
-       int idx = key->keyidx;
+       int idx = key->keyidx, err;
 
        /* The hardware does not support per-STA RX GTK, fallback
         * to software mode for these.
@@ -345,6 +406,8 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                return -EOPNOTSUPP;
        }
 
+       mt7615_mutex_acquire(dev);
+
        if (cmd == SET_KEY) {
                key->hw_key_idx = wcid->idx;
                wcid->hw_key_idx = idx;
@@ -354,10 +417,14 @@ static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
        mt76_wcid_key_setup(&dev->mt76, wcid,
                            cmd == SET_KEY ? key : NULL);
 
-       if (mt76_is_usb(&dev->mt76))
-               return mt7615_queue_key_update(dev, cmd, msta, key);
+       if (mt76_is_mmio(&dev->mt76))
+               err = mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
+       else
+               err = mt7615_queue_key_update(dev, cmd, msta, key);
+
+       mt7615_mutex_release(dev);
 
-       return mt7615_mac_wtbl_set_key(dev, wcid, key, cmd);
+       return err;
 }
 
 static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
@@ -369,14 +436,23 @@ static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
 
        if (changed & (IEEE80211_CONF_CHANGE_CHANNEL |
                       IEEE80211_CONF_CHANGE_POWER)) {
+#ifdef CONFIG_NL80211_TESTMODE
+               if (dev->mt76.test.state != MT76_TM_STATE_OFF) {
+                       mt7615_mutex_acquire(dev);
+                       mt76_testmode_reset(&dev->mt76, false);
+                       mt7615_mutex_release(dev);
+               }
+#endif
                ieee80211_stop_queues(hw);
                ret = mt7615_set_channel(phy);
                ieee80211_wake_queues(hw);
        }
 
-       mutex_lock(&dev->mt76.mutex);
+       mt7615_mutex_acquire(dev);
 
        if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+               mt76_testmode_reset(&dev->mt76, true);
+
                if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
                        phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
                else
@@ -385,7 +461,7 @@ static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
                mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter);
        }
 
-       mutex_unlock(&dev->mt76.mutex);
+       mt7615_mutex_release(dev);
 
        return ret;
 }
@@ -396,11 +472,17 @@ mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
 {
        struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
        struct mt7615_dev *dev = mt7615_hw_dev(hw);
+       int err;
+
+       mt7615_mutex_acquire(dev);
 
        queue = mt7615_lmac_mapping(dev, queue);
        queue += mvif->wmm_idx * MT7615_MAX_WMM_SETS;
+       err = mt7615_mcu_set_wmm(dev, queue, params);
 
-       return mt7615_mcu_set_wmm(dev, queue, params);
+       mt7615_mutex_release(dev);
+
+       return err;
 }
 
 static void mt7615_configure_filter(struct ieee80211_hw *hw,
@@ -419,10 +501,13 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw,
                        MT_WF_RFCR1_DROP_CFACK;
        u32 flags = 0;
 
+       mt7615_mutex_acquire(dev);
+
 #define MT76_FILTER(_flag, _hw) do { \
                flags |= *total_flags & FIF_##_flag;                    \
                phy->rxfilter &= ~(_hw);                                \
-               phy->rxfilter |= !(flags & FIF_##_flag) * (_hw);        \
+               if (!mt76_testmode_enabled(&dev->mt76))                 \
+                       phy->rxfilter |= !(flags & FIF_##_flag) * (_hw);\
        } while (0)
 
        phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
@@ -455,6 +540,8 @@ static void mt7615_configure_filter(struct ieee80211_hw *hw,
                mt76_clear(dev, MT_WF_RFCR1(band), ctl_flags);
        else
                mt76_set(dev, MT_WF_RFCR1(band), ctl_flags);
+
+       mt7615_mutex_release(dev);
 }
 
 static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
@@ -465,7 +552,7 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
        struct mt7615_dev *dev = mt7615_hw_dev(hw);
        struct mt7615_phy *phy = mt7615_hw_phy(hw);
 
-       mutex_lock(&dev->mt76.mutex);
+       mt7615_mutex_acquire(dev);
 
        if (changed & BSS_CHANGED_ERP_SLOT) {
                int slottime = info->use_short_slot ? 9 : 20;
@@ -491,7 +578,10 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
        if (changed & BSS_CHANGED_PS)
                mt7615_mcu_set_vif_ps(dev, vif);
 
-       mutex_unlock(&dev->mt76.mutex);
+       if (changed & BSS_CHANGED_ARP_FILTER)
+               mt7615_mcu_update_arp_filter(hw, vif, info);
+
+       mt7615_mutex_release(dev);
 }
 
 static void
@@ -501,9 +591,9 @@ mt7615_channel_switch_beacon(struct ieee80211_hw *hw,
 {
        struct mt7615_dev *dev = mt7615_hw_dev(hw);
 
-       mutex_lock(&dev->mt76.mutex);
+       mt7615_mutex_acquire(dev);
        mt7615_mcu_add_beacon(dev, hw, vif, true);
-       mutex_unlock(&dev->mt76.mutex);
+       mt7615_mutex_release(dev);
 }
 
 int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
@@ -512,7 +602,7 @@ int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
        struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
        struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
        struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
-       int idx;
+       int idx, err;
 
        idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
        if (idx < 0)
@@ -524,6 +614,10 @@ int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
        msta->wcid.idx = idx;
        msta->wcid.ext_phy = mvif->band_idx;
 
+       err = mt7615_pm_wake(dev);
+       if (err)
+               return err;
+
        if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
                struct mt7615_phy *phy;
 
@@ -534,6 +628,8 @@ int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
                               MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
        mt7615_mcu_sta_add(dev, vif, sta, true);
 
+       mt7615_pm_power_save_sched(dev);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(mt7615_mac_sta_add);
@@ -544,6 +640,9 @@ void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
        struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
        struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
 
+       mt7615_free_pending_tx_skbs(dev, msta);
+       mt7615_pm_wake(dev);
+
        mt7615_mcu_sta_add(dev, vif, sta, false);
        mt7615_mac_wtbl_update(dev, msta->wcid.idx,
                               MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@@ -559,6 +658,8 @@ void mt7615_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
        if (!list_empty(&msta->poll_list))
                list_del_init(&msta->poll_list);
        spin_unlock_bh(&dev->sta_poll_lock);
+
+       mt7615_pm_power_save_sched(dev);
 }
 EXPORT_SYMBOL_GPL(mt7615_mac_sta_remove);
 
@@ -582,11 +683,29 @@ static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
                        break;
        }
        msta->n_rates = i;
-       mt7615_mac_set_rates(phy, msta, NULL, msta->rates);
-       msta->rate_probe = false;
+       if (!test_bit(MT76_STATE_PM, &phy->mt76->state))
+               mt7615_mac_set_rates(phy, msta, NULL, msta->rates);
        spin_unlock_bh(&dev->mt76.lock);
 }
 
+static void
+mt7615_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+{
+       struct mt7615_dev *dev = mt7615_hw_dev(hw);
+       struct mt7615_phy *phy = mt7615_hw_phy(hw);
+       struct mt76_phy *mphy = phy->mt76;
+
+       if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
+               return;
+
+       if (test_bit(MT76_STATE_PM, &mphy->state)) {
+               queue_work(dev->mt76.wq, &dev->pm.wake_work);
+               return;
+       }
+
+       tasklet_schedule(&dev->mt76.tx_tasklet);
+}
+
 static void mt7615_tx(struct ieee80211_hw *hw,
                      struct ieee80211_tx_control *control,
                      struct sk_buff *skb)
@@ -596,22 +715,43 @@ static void mt7615_tx(struct ieee80211_hw *hw,
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_vif *vif = info->control.vif;
        struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+       struct mt7615_sta *msta = NULL;
+       int qid;
 
        if (control->sta) {
-               struct mt7615_sta *sta;
-
-               sta = (struct mt7615_sta *)control->sta->drv_priv;
-               wcid = &sta->wcid;
+               msta = (struct mt7615_sta *)control->sta->drv_priv;
+               wcid = &msta->wcid;
        }
 
        if (vif && !control->sta) {
                struct mt7615_vif *mvif;
 
                mvif = (struct mt7615_vif *)vif->drv_priv;
-               wcid = &mvif->sta.wcid;
+               msta = &mvif->sta;
+               wcid = &msta->wcid;
+       }
+
+       if (!test_bit(MT76_STATE_PM, &mphy->state)) {
+               mt76_tx(mphy, control->sta, wcid, skb);
+               return;
+       }
+
+       qid = skb_get_queue_mapping(skb);
+       if (qid >= MT_TXQ_PSD) {
+               qid = IEEE80211_AC_BE;
+               skb_set_queue_mapping(skb, qid);
        }
 
-       mt76_tx(mphy, control->sta, wcid, skb);
+       spin_lock_bh(&dev->pm.txq_lock);
+       if (!dev->pm.tx_q[qid].skb) {
+               ieee80211_stop_queues(hw);
+               dev->pm.tx_q[qid].msta = msta;
+               dev->pm.tx_q[qid].skb = skb;
+               queue_work(dev->mt76.wq, &dev->pm.wake_work);
+       } else {
+               dev_kfree_skb(skb);
+       }
+       spin_unlock_bh(&dev->pm.txq_lock);
 }
 
 static int mt7615_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
@@ -619,9 +759,9 @@ static int mt7615_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
        struct mt7615_dev *dev = mt7615_hw_dev(hw);
        struct mt7615_phy *phy = mt7615_hw_phy(hw);
 
-       mutex_lock(&dev->mt76.mutex);
+       mt7615_mutex_acquire(dev);
        mt7615_mcu_set_rts_thresh(phy, val);
-       mutex_unlock(&dev->mt76.mutex);
+       mt7615_mutex_release(dev);
 
        return 0;
 }
@@ -645,7 +785,8 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 
        mtxq = (struct mt76_txq *)txq->drv_priv;
 
-       mutex_lock(&dev->mt76.mutex);
+       mt7615_mutex_acquire(dev);
+
        switch (action) {
        case IEEE80211_AMPDU_RX_START:
                mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
@@ -660,6 +801,9 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                mtxq->aggr = true;
                mtxq->send_bar = false;
                mt7615_mcu_add_tx_ba(dev, params, true);
+               ssn = mt7615_mac_get_sta_tid_sn(dev, msta->wcid.idx, tid);
+               ieee80211_send_bar(vif, sta->addr, tid,
+                                  IEEE80211_SN_TO_SEQ(ssn));
                break;
        case IEEE80211_AMPDU_TX_STOP_FLUSH:
        case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
@@ -667,6 +811,8 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                mt7615_mcu_add_tx_ba(dev, params, false);
                break;
        case IEEE80211_AMPDU_TX_START:
+               ssn = mt7615_mac_get_sta_tid_sn(dev, msta->wcid.idx, tid);
+               params->ssn = ssn;
                mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn);
                ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
                break;
@@ -676,7 +822,7 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
                break;
        }
-       mutex_unlock(&dev->mt76.mutex);
+       mt7615_mutex_release(dev);
 
        return ret;
 }
@@ -721,27 +867,47 @@ mt7615_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
                u32 t32[2];
        } tsf;
 
-       mutex_lock(&dev->mt76.mutex);
+       mt7615_mutex_acquire(dev);
 
        mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
        tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0);
        tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1);
 
-       mutex_unlock(&dev->mt76.mutex);
+       mt7615_mutex_release(dev);
 
        return tsf.t64;
 }
 
 static void
+mt7615_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+              u64 timestamp)
+{
+       struct mt7615_dev *dev = mt7615_hw_dev(hw);
+       union {
+               u64 t64;
+               u32 t32[2];
+       } tsf = { .t64 = timestamp, };
+
+       mt7615_mutex_acquire(dev);
+
+       mt76_wr(dev, MT_LPON_UTTR0, tsf.t32[0]);
+       mt76_wr(dev, MT_LPON_UTTR1, tsf.t32[1]);
+       /* TSF software overwrite */
+       mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_WRITE);
+
+       mt7615_mutex_release(dev);
+}
+
+static void
 mt7615_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
 {
        struct mt7615_phy *phy = mt7615_hw_phy(hw);
        struct mt7615_dev *dev = phy->dev;
 
-       mutex_lock(&dev->mt76.mutex);
+       mt7615_mutex_acquire(dev);
        phy->coverage_class = max_t(s16, coverage_class, 0);
        mt7615_mac_set_timing(phy);
-       mutex_unlock(&dev->mt76.mutex);
+       mt7615_mutex_release(dev);
 }
 
 static int
@@ -758,7 +924,7 @@ mt7615_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
        if ((BIT(hweight8(tx_ant)) - 1) != tx_ant)
                tx_ant = BIT(ffs(tx_ant) - 1) - 1;
 
-       mutex_lock(&dev->mt76.mutex);
+       mt7615_mutex_acquire(dev);
 
        phy->mt76->antenna_mask = tx_ant;
        if (ext_phy) {
@@ -771,7 +937,7 @@ mt7615_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
 
        mt76_set_stream_caps(phy->mt76, true);
 
-       mutex_unlock(&dev->mt76.mutex);
+       mt7615_mutex_release(dev);
 
        return 0;
 }
@@ -794,9 +960,11 @@ void mt7615_roc_work(struct work_struct *work)
        if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
                return;
 
+       mt7615_mutex_acquire(phy->dev);
        ieee80211_iterate_active_interfaces(phy->mt76->hw,
                                            IEEE80211_IFACE_ITER_RESUME_ALL,
                                            mt7615_roc_iter, phy);
+       mt7615_mutex_release(phy->dev);
        ieee80211_remain_on_channel_expired(phy->mt76->hw);
 }
 
@@ -844,17 +1012,26 @@ static int
 mt7615_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
               struct ieee80211_scan_request *req)
 {
+       struct mt7615_dev *dev = mt7615_hw_dev(hw);
        struct mt76_phy *mphy = hw->priv;
+       int err;
+
+       mt7615_mutex_acquire(dev);
+       err = mt7615_mcu_hw_scan(mphy->priv, vif, req);
+       mt7615_mutex_release(dev);
 
-       return mt7615_mcu_hw_scan(mphy->priv, vif, req);
+       return err;
 }
 
 static void
 mt7615_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 {
+       struct mt7615_dev *dev = mt7615_hw_dev(hw);
        struct mt76_phy *mphy = hw->priv;
 
+       mt7615_mutex_acquire(dev);
        mt7615_mcu_cancel_hw_scan(mphy->priv, vif);
+       mt7615_mutex_release(dev);
 }
 
 static int
@@ -862,22 +1039,35 @@ mt7615_start_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                        struct cfg80211_sched_scan_request *req,
                        struct ieee80211_scan_ies *ies)
 {
+       struct mt7615_dev *dev = mt7615_hw_dev(hw);
        struct mt76_phy *mphy = hw->priv;
        int err;
 
+       mt7615_mutex_acquire(dev);
+
        err = mt7615_mcu_sched_scan_req(mphy->priv, vif, req);
        if (err < 0)
-               return err;
+               goto out;
+
+       err = mt7615_mcu_sched_scan_enable(mphy->priv, vif, true);
+out:
+       mt7615_mutex_release(dev);
 
-       return mt7615_mcu_sched_scan_enable(mphy->priv, vif, true);
+       return err;
 }
 
 static int
 mt7615_stop_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 {
+       struct mt7615_dev *dev = mt7615_hw_dev(hw);
        struct mt76_phy *mphy = hw->priv;
+       int err;
 
-       return mt7615_mcu_sched_scan_enable(mphy->priv, vif, false);
+       mt7615_mutex_acquire(dev);
+       err = mt7615_mcu_sched_scan_enable(mphy->priv, vif, false);
+       mt7615_mutex_release(dev);
+
+       return err;
 }
 
 static int mt7615_remain_on_channel(struct ieee80211_hw *hw,
@@ -892,20 +1082,24 @@ static int mt7615_remain_on_channel(struct ieee80211_hw *hw,
        if (test_and_set_bit(MT76_STATE_ROC, &phy->mt76->state))
                return 0;
 
+       mt7615_mutex_acquire(phy->dev);
+
        err = mt7615_mcu_set_roc(phy, vif, chan, duration);
        if (err < 0) {
                clear_bit(MT76_STATE_ROC, &phy->mt76->state);
-               return err;
+               goto out;
        }
 
        if (!wait_event_timeout(phy->roc_wait, phy->roc_grant, HZ)) {
                mt7615_mcu_set_roc(phy, vif, NULL, 0);
                clear_bit(MT76_STATE_ROC, &phy->mt76->state);
-
-               return -ETIMEDOUT;
+               err = -ETIMEDOUT;
        }
 
-       return 0;
+out:
+       mt7615_mutex_release(phy->dev);
+
+       return err;
 }
 
 static int mt7615_cancel_remain_on_channel(struct ieee80211_hw *hw,
@@ -919,7 +1113,9 @@ static int mt7615_cancel_remain_on_channel(struct ieee80211_hw *hw,
        del_timer_sync(&phy->roc_timer);
        cancel_work_sync(&phy->roc_work);
 
+       mt7615_mutex_acquire(phy->dev);
        mt7615_mcu_set_roc(phy, vif, NULL, 0);
+       mt7615_mutex_release(phy->dev);
 
        return 0;
 }
@@ -933,7 +1129,10 @@ static int mt7615_suspend(struct ieee80211_hw *hw,
        bool ext_phy = phy != &dev->phy;
        int err = 0;
 
-       mutex_lock(&dev->mt76.mutex);
+       cancel_delayed_work_sync(&dev->pm.ps_work);
+       mt7615_free_pending_tx_skbs(dev, NULL);
+
+       mt7615_mutex_acquire(dev);
 
        clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
        cancel_delayed_work_sync(&phy->scan_work);
@@ -949,7 +1148,7 @@ static int mt7615_suspend(struct ieee80211_hw *hw,
        if (!mt7615_dev_running(dev))
                err = mt7615_mcu_set_hif_suspend(dev, true);
 
-       mutex_unlock(&dev->mt76.mutex);
+       mt7615_mutex_release(dev);
 
        return err;
 }
@@ -960,7 +1159,7 @@ static int mt7615_resume(struct ieee80211_hw *hw)
        struct mt7615_phy *phy = mt7615_hw_phy(hw);
        bool running, ext_phy = phy != &dev->phy;
 
-       mutex_lock(&dev->mt76.mutex);
+       mt7615_mutex_acquire(dev);
 
        running = mt7615_dev_running(dev);
        set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
@@ -970,7 +1169,7 @@ static int mt7615_resume(struct ieee80211_hw *hw)
 
                err = mt7615_mcu_set_hif_suspend(dev, false);
                if (err < 0) {
-                       mutex_unlock(&dev->mt76.mutex);
+                       mt7615_mutex_release(dev);
                        return err;
                }
        }
@@ -984,7 +1183,7 @@ static int mt7615_resume(struct ieee80211_hw *hw)
                                     MT7615_WATCHDOG_TIME);
        mt76_clear(dev, MT_WF_RFCR(ext_phy), MT_WF_RFCR_DROP_OTHER_BEACON);
 
-       mutex_unlock(&dev->mt76.mutex);
+       mt7615_mutex_release(dev);
 
        return 0;
 }
@@ -1001,7 +1200,11 @@ static void mt7615_set_rekey_data(struct ieee80211_hw *hw,
                                  struct ieee80211_vif *vif,
                                  struct cfg80211_gtk_rekey_data *data)
 {
+       struct mt7615_dev *dev = mt7615_hw_dev(hw);
+
+       mt7615_mutex_acquire(dev);
        mt7615_mcu_update_gtk_rekey(hw, vif, data);
+       mt7615_mutex_release(dev);
 }
 #endif /* CONFIG_PM */
 
@@ -1021,7 +1224,7 @@ const struct ieee80211_ops mt7615_ops = {
        .set_key = mt7615_set_key,
        .ampdu_action = mt7615_ampdu_action,
        .set_rts_threshold = mt7615_set_rts_threshold,
-       .wake_tx_queue = mt76_wake_tx_queue,
+       .wake_tx_queue = mt7615_wake_tx_queue,
        .sta_rate_tbl_update = mt7615_sta_rate_tbl_update,
        .sw_scan_start = mt76_sw_scan,
        .sw_scan_complete = mt76_sw_scan_complete,
@@ -1030,6 +1233,7 @@ const struct ieee80211_ops mt7615_ops = {
        .channel_switch_beacon = mt7615_channel_switch_beacon,
        .get_stats = mt7615_get_stats,
        .get_tsf = mt7615_get_tsf,
+       .set_tsf = mt7615_set_tsf,
        .get_survey = mt76_get_survey,
        .get_antenna = mt76_get_antenna,
        .set_antenna = mt7615_set_antenna,
@@ -1040,6 +1244,8 @@ const struct ieee80211_ops mt7615_ops = {
        .sched_scan_stop = mt7615_stop_sched_scan,
        .remain_on_channel = mt7615_remain_on_channel,
        .cancel_remain_on_channel = mt7615_cancel_remain_on_channel,
+       CFG80211_TESTMODE_CMD(mt76_testmode_cmd)
+       CFG80211_TESTMODE_DUMP(mt76_testmode_dump)
 #ifdef CONFIG_PM
        .suspend = mt7615_suspend,
        .resume = mt7615_resume,
index 6e869b8..d0cbb28 100644 (file)
@@ -146,13 +146,19 @@ void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
                mcu_txd->cid = mcu_cmd;
                break;
        case MCU_CE_PREFIX:
-               mcu_txd->set_query = MCU_Q_SET;
+               if (cmd & MCU_QUERY_MASK)
+                       mcu_txd->set_query = MCU_Q_QUERY;
+               else
+                       mcu_txd->set_query = MCU_Q_SET;
                mcu_txd->cid = mcu_cmd;
                break;
        default:
                mcu_txd->cid = MCU_CMD_EXT_CID;
-               mcu_txd->set_query = MCU_Q_SET;
-               mcu_txd->ext_cid = cmd;
+               if (cmd & MCU_QUERY_PREFIX)
+                       mcu_txd->set_query = MCU_Q_QUERY;
+               else
+                       mcu_txd->set_query = MCU_Q_SET;
+               mcu_txd->ext_cid = mcu_cmd;
                mcu_txd->ext_cid_ack = 1;
                break;
        }
@@ -180,8 +186,10 @@ mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd,
        struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data;
        int ret = 0;
 
-       if (seq != rxd->seq)
-               return -EAGAIN;
+       if (seq != rxd->seq) {
+               ret = -EAGAIN;
+               goto out;
+       }
 
        switch (cmd) {
        case MCU_CMD_PATCH_SEM_CONTROL:
@@ -192,6 +200,10 @@ mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd,
                skb_pull(skb, sizeof(*rxd));
                ret = le32_to_cpu(*(__le32 *)skb->data);
                break;
+       case MCU_EXT_CMD_RF_REG_ACCESS | MCU_QUERY_PREFIX:
+               skb_pull(skb, sizeof(*rxd));
+               ret = le32_to_cpu(*(__le32 *)&skb->data[8]);
+               break;
        case MCU_UNI_CMD_DEV_INFO_UPDATE:
        case MCU_UNI_CMD_BSS_INFO_UPDATE:
        case MCU_UNI_CMD_STA_REC_UPDATE:
@@ -205,9 +217,18 @@ mt7615_mcu_parse_response(struct mt7615_dev *dev, int cmd,
                ret = le32_to_cpu(event->status);
                break;
        }
+       case MCU_CMD_REG_READ: {
+               struct mt7615_mcu_reg_event *event;
+
+               skb_pull(skb, sizeof(*rxd));
+               event = (struct mt7615_mcu_reg_event *)skb->data;
+               ret = (int)le32_to_cpu(event->val);
+               break;
+       }
        default:
                break;
        }
+out:
        dev_kfree_skb(skb);
 
        return ret;
@@ -271,6 +292,38 @@ int mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
 }
 EXPORT_SYMBOL_GPL(mt7615_mcu_msg_send);
 
+u32 mt7615_rf_rr(struct mt7615_dev *dev, u32 wf, u32 reg)
+{
+       struct {
+               __le32 wifi_stream;
+               __le32 address;
+               __le32 data;
+       } req = {
+               .wifi_stream = cpu_to_le32(wf),
+               .address = cpu_to_le32(reg),
+       };
+
+       return __mt76_mcu_send_msg(&dev->mt76,
+                                  MCU_EXT_CMD_RF_REG_ACCESS | MCU_QUERY_PREFIX,
+                                  &req, sizeof(req), true);
+}
+
+int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val)
+{
+       struct {
+               __le32 wifi_stream;
+               __le32 address;
+               __le32 data;
+       } req = {
+               .wifi_stream = cpu_to_le32(wf),
+               .address = cpu_to_le32(reg),
+               .data = cpu_to_le32(val),
+       };
+
+       return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RF_REG_ACCESS, &req,
+                                  sizeof(req), false);
+}
+
 static void
 mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
 {
@@ -927,6 +980,38 @@ mt7615_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
 }
 
 static void
+mt7615_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif,
+                    struct ieee80211_sta *sta)
+{
+       struct sta_rec_uapsd *uapsd;
+       struct tlv *tlv;
+
+       if (vif->type != NL80211_IFTYPE_AP || !sta->wme)
+               return;
+
+       tlv = mt7615_mcu_add_tlv(skb, STA_REC_APPS, sizeof(*uapsd));
+       uapsd = (struct sta_rec_uapsd *)tlv;
+
+       if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) {
+               uapsd->dac_map |= BIT(3);
+               uapsd->tac_map |= BIT(3);
+       }
+       if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) {
+               uapsd->dac_map |= BIT(2);
+               uapsd->tac_map |= BIT(2);
+       }
+       if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) {
+               uapsd->dac_map |= BIT(1);
+               uapsd->tac_map |= BIT(1);
+       }
+       if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) {
+               uapsd->dac_map |= BIT(0);
+               uapsd->tac_map |= BIT(0);
+       }
+       uapsd->max_sp = sta->max_sp;
+}
+
+static void
 mt7615_mcu_wtbl_ba_tlv(struct sk_buff *skb,
                       struct ieee80211_ampdu_params *params,
                       bool enable, bool tx, void *sta_wtbl,
@@ -1188,8 +1273,10 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_dev *dev, struct ieee80211_vif *vif,
                return PTR_ERR(sskb);
 
        mt7615_mcu_sta_basic_tlv(sskb, vif, sta, enable);
-       if (enable && sta)
+       if (enable && sta) {
                mt7615_mcu_sta_ht_tlv(sskb, sta);
+               mt7615_mcu_sta_uapsd(sskb, vif, sta);
+       }
 
        wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET,
                                             NULL, &wskb);
@@ -1206,8 +1293,12 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_dev *dev, struct ieee80211_vif *vif,
        skb = enable ? wskb : sskb;
 
        err = __mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true);
-       if (err < 0)
+       if (err < 0) {
+               skb = enable ? sskb : wskb;
+               dev_kfree_skb(skb);
+
                return err;
+       }
 
        cmd = enable ? MCU_EXT_CMD_STA_REC_UPDATE : MCU_EXT_CMD_WTBL_UPDATE;
        skb = enable ? sskb : wskb;
@@ -1285,8 +1376,10 @@ mt7615_mcu_add_sta_cmd(struct mt7615_dev *dev, struct ieee80211_vif *vif,
                return PTR_ERR(skb);
 
        mt7615_mcu_sta_basic_tlv(skb, vif, sta, enable);
-       if (enable && sta)
+       if (enable && sta) {
                mt7615_mcu_sta_ht_tlv(skb, sta);
+               mt7615_mcu_sta_uapsd(skb, vif, sta);
+       }
 
        sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
 
@@ -1429,6 +1522,7 @@ mt7615_mcu_uni_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif,
                        u8 pad[3];
                } __packed hdr;
                struct mt7615_bss_basic_tlv basic;
+               struct mt7615_bss_qos_tlv qos;
        } basic_req = {
                .hdr = {
                        .bss_idx = mvif->idx,
@@ -1444,6 +1538,11 @@ mt7615_mcu_uni_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif,
                        .active = true, /* keep bss deactivated */
                        .phymode = 0x38,
                },
+               .qos = {
+                       .tag = cpu_to_le16(UNI_BSS_INFO_QBSS),
+                       .len = cpu_to_le16(sizeof(struct mt7615_bss_qos_tlv)),
+                       .qos = vif->bss_conf.qos,
+               },
        };
        struct {
                struct {
@@ -1808,44 +1907,66 @@ static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en)
 
 int mt7615_driver_own(struct mt7615_dev *dev)
 {
+       struct mt76_phy *mphy = &dev->mt76.phy;
        struct mt76_dev *mdev = &dev->mt76;
-       u32 addr;
+       int i;
 
-       addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST;
-       mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN);
+       if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state))
+               goto out;
 
        mt7622_trigger_hif_int(dev, true);
 
-       addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
-       if (!mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000)) {
-               dev_err(dev->mt76.dev, "Timeout for driver own\n");
-               return -EIO;
+       for (i = 0; i < MT7615_DRV_OWN_RETRY_COUNT; i++) {
+               u32 addr;
+
+               addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST;
+               mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN);
+
+               addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
+               if (mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 50))
+                       break;
        }
 
        mt7622_trigger_hif_int(dev, false);
 
+       if (i == MT7615_DRV_OWN_RETRY_COUNT) {
+               dev_err(mdev->dev, "driver own failed\n");
+               set_bit(MT76_STATE_PM, &mphy->state);
+               return -EIO;
+       }
+
+out:
+       dev->pm.last_activity = jiffies;
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(mt7615_driver_own);
 
 int mt7615_firmware_own(struct mt7615_dev *dev)
 {
+       struct mt76_phy *mphy = &dev->mt76.phy;
+       int err = 0;
        u32 addr;
 
-       addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
+       if (test_and_set_bit(MT76_STATE_PM, &mphy->state))
+               return 0;
+
        mt7622_trigger_hif_int(dev, true);
 
+       addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
        mt76_wr(dev, addr, MT_CFG_LPCR_HOST_FW_OWN);
 
-       if (!is_mt7615(&dev->mt76) &&
+       if (is_mt7622(&dev->mt76) &&
            !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN,
-                           MT_CFG_LPCR_HOST_FW_OWN, 3000)) {
+                           MT_CFG_LPCR_HOST_FW_OWN, 300)) {
                dev_err(dev->mt76.dev, "Timeout for firmware own\n");
-               return -EIO;
+               clear_bit(MT76_STATE_PM, &mphy->state);
+               err = -EIO;
        }
+
        mt7622_trigger_hif_int(dev, false);
 
-       return 0;
+       return err;
 }
 EXPORT_SYMBOL_GPL(mt7615_firmware_own);
 
@@ -2725,6 +2846,14 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
                .center_chan2 = ieee80211_frequency_to_channel(freq2),
        };
 
+#ifdef CONFIG_NL80211_TESTMODE
+       if (dev->mt76.test.state == MT76_TM_STATE_TX_FRAMES &&
+           dev->mt76.test.tx_antenna_mask) {
+               req.tx_streams = hweight8(dev->mt76.test.tx_antenna_mask);
+               req.rx_streams_mask = dev->mt76.test.tx_antenna_mask;
+       }
+#endif
+
        if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
                req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
        else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
@@ -2736,7 +2865,10 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
        req.band_idx = phy != &dev->phy;
        req.bw = mt7615_mcu_chan_bw(chandef);
 
-       mt7615_mcu_set_txpower_sku(phy, req.txpower_sku);
+       if (mt76_testmode_enabled(&dev->mt76))
+               memset(req.txpower_sku, 0x3f, 49);
+       else
+               mt7615_mcu_set_txpower_sku(phy, req.txpower_sku);
 
        return __mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true);
 }
@@ -2754,6 +2886,27 @@ int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index)
                                   sizeof(req), true);
 }
 
+int mt7615_mcu_set_test_param(struct mt7615_dev *dev, u8 param, bool test_mode,
+                             u32 val)
+{
+       struct {
+               u8 test_mode_en;
+               u8 param_idx;
+               u8 _rsv[2];
+
+               __le32 value;
+
+               u8 pad[8];
+       } req = {
+               .test_mode_en = test_mode,
+               .param_idx = param,
+               .value = cpu_to_le32(val),
+       };
+
+       return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_ATE_CTRL, &req,
+                                  sizeof(req), false);
+}
+
 int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable)
 {
        struct mt7615_dev *dev = phy->dev;
@@ -3332,43 +3485,8 @@ out:
        return ret;
 }
 
-#ifdef CONFIG_PM
-int mt7615_mcu_set_hif_suspend(struct mt7615_dev *dev, bool suspend)
-{
-       struct {
-               struct {
-                       u8 hif_type; /* 0x0: HIF_SDIO
-                                     * 0x1: HIF_USB
-                                     * 0x2: HIF_PCIE
-                                     */
-                       u8 pad[3];
-               } __packed hdr;
-               struct hif_suspend_tlv {
-                       __le16 tag;
-                       __le16 len;
-                       u8 suspend;
-               } __packed hif_suspend;
-       } req = {
-               .hif_suspend = {
-                       .tag = cpu_to_le16(0), /* 0: UNI_HIF_CTRL_BASIC */
-                       .len = cpu_to_le16(sizeof(struct hif_suspend_tlv)),
-                       .suspend = suspend,
-               },
-       };
-
-       if (mt76_is_mmio(&dev->mt76))
-               req.hdr.hif_type = 2;
-       else if (mt76_is_usb(&dev->mt76))
-               req.hdr.hif_type = 1;
-
-       return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_HIF_CTRL,
-                                  &req, sizeof(req), true);
-}
-EXPORT_SYMBOL_GPL(mt7615_mcu_set_hif_suspend);
-
-static int
-mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
-                     bool enable)
+int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+                         bool enable)
 {
        struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
        struct {
@@ -3408,6 +3526,40 @@ mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
                                   &req, sizeof(req), false);
 }
 
+#ifdef CONFIG_PM
+int mt7615_mcu_set_hif_suspend(struct mt7615_dev *dev, bool suspend)
+{
+       struct {
+               struct {
+                       u8 hif_type; /* 0x0: HIF_SDIO
+                                     * 0x1: HIF_USB
+                                     * 0x2: HIF_PCIE
+                                     */
+                       u8 pad[3];
+               } __packed hdr;
+               struct hif_suspend_tlv {
+                       __le16 tag;
+                       __le16 len;
+                       u8 suspend;
+               } __packed hif_suspend;
+       } req = {
+               .hif_suspend = {
+                       .tag = cpu_to_le16(0), /* 0: UNI_HIF_CTRL_BASIC */
+                       .len = cpu_to_le16(sizeof(struct hif_suspend_tlv)),
+                       .suspend = suspend,
+               },
+       };
+
+       if (mt76_is_mmio(&dev->mt76))
+               req.hdr.hif_type = 2;
+       else if (mt76_is_usb(&dev->mt76))
+               req.hdr.hif_type = 1;
+
+       return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_HIF_CTRL,
+                                  &req, sizeof(req), true);
+}
+EXPORT_SYMBOL_GPL(mt7615_mcu_set_hif_suspend);
+
 static int
 mt7615_mcu_set_wow_ctrl(struct mt7615_phy *phy, struct ieee80211_vif *vif,
                        bool suspend, struct cfg80211_wowlan *wowlan)
@@ -3542,6 +3694,32 @@ mt7615_mcu_set_gtk_rekey(struct mt7615_dev *dev,
                                   &req, sizeof(req), true);
 }
 
+static int
+mt7615_mcu_set_arp_filter(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+                         bool suspend)
+{
+       struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+       struct {
+               struct {
+                       u8 bss_idx;
+                       u8 pad[3];
+               } __packed hdr;
+               struct mt7615_arpns_tlv arpns;
+       } req = {
+               .hdr = {
+                       .bss_idx = mvif->idx,
+               },
+               .arpns = {
+                       .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP),
+                       .len = cpu_to_le16(sizeof(struct mt7615_arpns_tlv)),
+                       .mode = suspend,
+               },
+       };
+
+       return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_OFFLOAD,
+                                  &req, sizeof(req), true);
+}
+
 void mt7615_mcu_set_suspend_iter(void *priv, u8 *mac,
                                 struct ieee80211_vif *vif)
 {
@@ -3554,6 +3732,7 @@ void mt7615_mcu_set_suspend_iter(void *priv, u8 *mac,
        mt7615_mcu_set_bss_pm(phy->dev, vif, suspend);
 
        mt7615_mcu_set_gtk_rekey(phy->dev, vif, suspend);
+       mt7615_mcu_set_arp_filter(phy->dev, vif, suspend);
 
        mt7615_mcu_set_suspend_mode(phy->dev, vif, suspend, 1, true);
 
@@ -3653,6 +3832,53 @@ int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif,
                                   sizeof(req), false);
 }
 
+int mt7615_mcu_update_arp_filter(struct ieee80211_hw *hw,
+                                struct ieee80211_vif *vif,
+                                struct ieee80211_bss_conf *info)
+{
+       struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+       struct mt7615_dev *dev = mt7615_hw_dev(hw);
+       struct sk_buff *skb;
+       int i, len = min_t(int, info->arp_addr_cnt,
+                          IEEE80211_BSS_ARP_ADDR_LIST_LEN);
+       struct {
+               struct {
+                       u8 bss_idx;
+                       u8 pad[3];
+               } __packed hdr;
+               struct mt7615_arpns_tlv arp;
+       } req_hdr = {
+               .hdr = {
+                       .bss_idx = mvif->idx,
+               },
+               .arp = {
+                       .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP),
+                       .len = cpu_to_le16(sizeof(struct mt7615_arpns_tlv)),
+                       .ips_num = len,
+                       .mode = 2,  /* update */
+                       .option = 1,
+               },
+       };
+
+       if (!mt7615_firmware_offload(dev))
+               return 0;
+
+       skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
+                                sizeof(req_hdr) + len * sizeof(__be32));
+       if (!skb)
+               return -ENOMEM;
+
+       skb_put_data(skb, &req_hdr, sizeof(req_hdr));
+       for (i = 0; i < len; i++) {
+               u8 *addr = (u8 *)skb_put(skb, sizeof(__be32));
+
+               memcpy(addr, &info->arp_addr_list[i], sizeof(__be32));
+       }
+
+       return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+                                      MCU_UNI_CMD_OFFLOAD, true);
+}
+
 int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
                             struct ieee80211_vif *vif)
 {
@@ -3674,3 +3900,32 @@ int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
        return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_P2P_OPPPS,
                                   &req, sizeof(req), false);
 }
+
+u32 mt7615_mcu_reg_rr(struct mt76_dev *dev, u32 offset)
+{
+       struct {
+               __le32 addr;
+               __le32 val;
+       } __packed req = {
+               .addr = cpu_to_le32(offset),
+       };
+
+       return __mt76_mcu_send_msg(dev, MCU_CMD_REG_READ,
+                                  &req, sizeof(req), true);
+}
+EXPORT_SYMBOL_GPL(mt7615_mcu_reg_rr);
+
+void mt7615_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val)
+{
+       struct {
+               __le32 addr;
+               __le32 val;
+       } __packed req = {
+               .addr = cpu_to_le32(offset),
+               .val = cpu_to_le32(val),
+       };
+
+       __mt76_mcu_send_msg(dev, MCU_CMD_REG_WRITE,
+                           &req, sizeof(req), false);
+}
+EXPORT_SYMBOL_GPL(mt7615_mcu_reg_wr);
index 2314d0b..7b856e9 100644 (file)
@@ -81,6 +81,7 @@ enum {
        MCU_EVENT_GENERIC = 0x01,
        MCU_EVENT_ACCESS_REG = 0x02,
        MCU_EVENT_MT_PATCH_SEM = 0x04,
+       MCU_EVENT_REG_ACCESS = 0x05,
        MCU_EVENT_SCAN_DONE = 0x0d,
        MCU_EVENT_ROC = 0x10,
        MCU_EVENT_BSS_ABSENCE  = 0x11,
@@ -238,8 +239,11 @@ enum {
 #define MCU_FW_PREFIX          BIT(31)
 #define MCU_UNI_PREFIX         BIT(30)
 #define MCU_CE_PREFIX          BIT(29)
+#define MCU_QUERY_PREFIX       BIT(28)
 #define MCU_CMD_MASK           ~(MCU_FW_PREFIX | MCU_UNI_PREFIX |      \
-                                 MCU_CE_PREFIX)
+                                 MCU_CE_PREFIX | MCU_QUERY_PREFIX)
+
+#define MCU_QUERY_MASK         BIT(16)
 
 enum {
        MCU_CMD_TARGET_ADDRESS_LEN_REQ = MCU_FW_PREFIX | 0x01,
@@ -254,6 +258,7 @@ enum {
 };
 
 enum {
+       MCU_EXT_CMD_RF_REG_ACCESS = 0x02,
        MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
        MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
        MCU_EXT_CMD_SET_TX_POWER_CTRL = 0x11,
@@ -266,6 +271,7 @@ enum {
        MCU_EXT_CMD_GET_TEMP = 0x2c,
        MCU_EXT_CMD_WTBL_UPDATE = 0x32,
        MCU_EXT_CMD_SET_RDD_CTRL = 0x3a,
+       MCU_EXT_CMD_ATE_CTRL = 0x3d,
        MCU_EXT_CMD_PROTECT_CTRL = 0x3e,
        MCU_EXT_CMD_DBDC_CTRL = 0x45,
        MCU_EXT_CMD_MAC_INIT_CTRL = 0x46,
@@ -287,6 +293,11 @@ enum {
        MCU_UNI_CMD_HIF_CTRL = MCU_UNI_PREFIX | 0x07,
 };
 
+enum {
+       MCU_ATE_SET_FREQ_OFFSET = 0xa,
+       MCU_ATE_SET_TX_POWER_CONTROL = 0x15,
+};
+
 struct mt7615_mcu_uni_event {
        u8 cid;
        u8 pad[3];
@@ -421,6 +432,11 @@ struct nt7615_sched_scan_done {
        __le16 pad;
 } __packed;
 
+struct mt7615_mcu_reg_event {
+       __le32 reg;
+       __le32 val;
+} __packed;
+
 struct mt7615_mcu_bss_event {
        u8 bss_idx;
        u8 is_absent;
@@ -454,6 +470,13 @@ struct mt7615_bss_basic_tlv {
        u8 pad[3];
 } __packed;
 
+struct mt7615_bss_qos_tlv {
+       __le16 tag;
+       __le16 len;
+       u8 qos;
+       u8 pad[3];
+} __packed;
+
 struct mt7615_wow_ctrl_tlv {
        __le16 tag;
        __le16 len;
@@ -545,6 +568,15 @@ struct mt7615_roc_tlv {
        u8 rsv1[8];
 } __packed;
 
+struct mt7615_arpns_tlv {
+       __le16 tag;
+       __le16 len;
+       u8 mode;
+       u8 ips_num;
+       u8 option;
+       u8 pad[1];
+} __packed;
+
 /* offload mcu commands */
 enum {
        MCU_CMD_START_HW_SCAN = MCU_CE_PREFIX | 0x03,
@@ -557,6 +589,8 @@ enum {
        MCU_CMD_SET_P2P_OPPPS = MCU_CE_PREFIX | 0x33,
        MCU_CMD_SCHED_SCAN_ENABLE = MCU_CE_PREFIX | 0x61,
        MCU_CMD_SCHED_SCAN_REQ = MCU_CE_PREFIX | 0x62,
+       MCU_CMD_REG_WRITE = MCU_CE_PREFIX | 0xc0,
+       MCU_CMD_REG_READ = MCU_CE_PREFIX | MCU_QUERY_MASK | 0xc0,
 };
 
 #define MCU_CMD_ACK            BIT(0)
@@ -569,6 +603,8 @@ enum {
        UNI_BSS_INFO_BASIC = 0,
        UNI_BSS_INFO_RLM = 2,
        UNI_BSS_INFO_BCN_CONTENT = 7,
+       UNI_BSS_INFO_QBSS = 15,
+       UNI_BSS_INFO_UAPSD = 19,
 };
 
 enum {
@@ -580,8 +616,8 @@ enum {
 };
 
 enum {
-       UNI_OFFLOAD_OFFLOAD_ARPNS_IPV4,
-       UNI_OFFLOAD_OFFLOAD_ARPNS_IPV6,
+       UNI_OFFLOAD_OFFLOAD_ARP,
+       UNI_OFFLOAD_OFFLOAD_ND,
        UNI_OFFLOAD_OFFLOAD_GTK_REKEY,
        UNI_OFFLOAD_OFFLOAD_BMC_RPY_DETECT,
 };
@@ -882,6 +918,7 @@ struct wtbl_raw {
                                         sizeof(struct sta_rec_basic) + \
                                         sizeof(struct sta_rec_ht) +    \
                                         sizeof(struct sta_rec_vht) +   \
+                                        sizeof(struct sta_rec_uapsd) + \
                                         sizeof(struct tlv) +   \
                                         MT7615_WTBL_UPDATE_MAX_SIZE)
 
@@ -971,6 +1008,17 @@ struct sta_rec_ba {
        __le16 winsize;
 } __packed;
 
+struct sta_rec_uapsd {
+       __le16 tag;
+       __le16 len;
+       u8 dac_map;
+       u8 tac_map;
+       u8 max_sp;
+       u8 rsv0;
+       __le16 listen_interval;
+       u8 rsv1[2];
+} __packed;
+
 enum {
        STA_REC_BASIC,
        STA_REC_RA,
index 2e99845..133f93a 100644 (file)
@@ -17,7 +17,6 @@ const u32 mt7615e_reg_map[] = {
        [MT_CSR_BASE]           = 0x07000,
        [MT_PLE_BASE]           = 0x08000,
        [MT_PSE_BASE]           = 0x0c000,
-       [MT_PHY_BASE]           = 0x10000,
        [MT_CFG_BASE]           = 0x20200,
        [MT_AGG_BASE]           = 0x20a00,
        [MT_TMAC_BASE]          = 0x21000,
@@ -44,7 +43,7 @@ const u32 mt7663e_reg_map[] = {
        [MT_CSR_BASE]           = 0x07000,
        [MT_PLE_BASE]           = 0x08000,
        [MT_PSE_BASE]           = 0x0c000,
-       [MT_PHY_BASE]           = 0x10000,
+       [MT_PP_BASE]            = 0x0e000,
        [MT_CFG_BASE]           = 0x20000,
        [MT_AGG_BASE]           = 0x22000,
        [MT_TMAC_BASE]          = 0x24000,
@@ -140,6 +139,38 @@ static void mt7615_irq_tasklet(unsigned long data)
        mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
 }
 
+static u32 __mt7615_reg_addr(struct mt7615_dev *dev, u32 addr)
+{
+       if (addr < 0x100000)
+               return addr;
+
+       return mt7615_reg_map(dev, addr);
+}
+
+static u32 mt7615_rr(struct mt76_dev *mdev, u32 offset)
+{
+       struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+       u32 addr = __mt7615_reg_addr(dev, offset);
+
+       return dev->bus_ops->rr(mdev, addr);
+}
+
+static void mt7615_wr(struct mt76_dev *mdev, u32 offset, u32 val)
+{
+       struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+       u32 addr = __mt7615_reg_addr(dev, offset);
+
+       dev->bus_ops->wr(mdev, addr, val);
+}
+
+static u32 mt7615_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
+{
+       struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+       u32 addr = __mt7615_reg_addr(dev, offset);
+
+       return dev->bus_ops->rmw(mdev, addr, mask, val);
+}
+
 int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
                      int irq, const u32 *map)
 {
@@ -159,6 +190,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
                .sta_remove = mt7615_mac_sta_remove,
                .update_survey = mt7615_update_channel,
        };
+       struct mt76_bus_ops *bus_ops;
        struct ieee80211_ops *ops;
        struct mt7615_dev *dev;
        struct mt76_dev *mdev;
@@ -182,6 +214,19 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
                    (mt76_rr(dev, MT_HW_REV) & 0xff);
        dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
 
+       dev->bus_ops = dev->mt76.bus;
+       bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
+                              GFP_KERNEL);
+       if (!bus_ops) {
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       bus_ops->rr = mt7615_rr;
+       bus_ops->wr = mt7615_wr;
+       bus_ops->rmw = mt7615_rmw;
+       dev->mt76.bus = bus_ops;
+
        ret = devm_request_irq(mdev->dev, irq, mt7615_irq_handler,
                               IRQF_SHARED, KBUILD_MODNAME, dev);
        if (ret)
index 3e7d51b..571eadc 100644 (file)
@@ -4,6 +4,7 @@
 #ifndef __MT7615_H
 #define __MT7615_H
 
+#include <linux/completion.h>
 #include <linux/interrupt.h>
 #include <linux/ktime.h>
 #include <linux/regmap.h>
@@ -18,6 +19,7 @@
 #define MT7615_WTBL_STA                        (MT7615_WTBL_RESERVED - \
                                         MT7615_MAX_INTERFACES)
 
+#define MT7615_PM_TIMEOUT              (HZ / 12)
 #define MT7615_WATCHDOG_TIME           (HZ / 10)
 #define MT7615_HW_SCAN_TIMEOUT         (HZ / 10)
 #define MT7615_RESET_TIMEOUT           (30 * HZ)
@@ -31,6 +33,8 @@
 #define MT7615_RX_RING_SIZE            1024
 #define MT7615_RX_MCU_RING_SIZE                512
 
+#define MT7615_DRV_OWN_RETRY_COUNT     10
+
 #define MT7615_FIRMWARE_CR4            "mediatek/mt7615_cr4.bin"
 #define MT7615_FIRMWARE_N9             "mediatek/mt7615_n9.bin"
 #define MT7615_ROM_PATCH               "mediatek/mt7615_rom_patch.bin"
@@ -169,6 +173,8 @@ struct mt7615_phy {
        struct mt76_phy *mt76;
        struct mt7615_dev *dev;
 
+       struct ieee80211_vif *monitor_vif;
+
        u32 rxfilter;
        u32 omac_mask;
 
@@ -240,10 +246,10 @@ struct mt7615_dev {
                struct mt76_phy mphy;
        };
 
+       const struct mt76_bus_ops *bus_ops;
        struct tasklet_struct irq_tasklet;
 
        struct mt7615_phy phy;
-       u32 vif_mask;
        u32 omac_mask;
 
        u16 chainmask;
@@ -280,6 +286,37 @@ struct mt7615_dev {
 
        struct work_struct wtbl_work;
        struct list_head wd_head;
+
+       u32 debugfs_rf_wf;
+       u32 debugfs_rf_reg;
+
+#ifdef CONFIG_NL80211_TESTMODE
+       struct {
+               u32 *reg_backup;
+
+               s16 last_freq_offset;
+               u8 last_rcpi[4];
+               s8 last_ib_rssi;
+               s8 last_wb_rssi;
+       } test;
+#endif
+
+       struct {
+               bool enable;
+
+               spinlock_t txq_lock;
+               struct {
+                       struct mt7615_sta *msta;
+                       struct sk_buff *skb;
+               } tx_q[IEEE80211_NUM_ACS];
+
+               struct work_struct wake_work;
+               struct completion wake_cmpl;
+
+               struct delayed_work ps_work;
+               unsigned long last_activity;
+               unsigned long idle_timeout;
+       } pm;
 };
 
 enum tx_pkt_queue_idx {
@@ -372,8 +409,10 @@ extern struct ieee80211_rate mt7615_rates[12];
 extern const struct ieee80211_ops mt7615_ops;
 extern const u32 mt7615e_reg_map[__MT_BASE_MAX];
 extern const u32 mt7663e_reg_map[__MT_BASE_MAX];
+extern const u32 mt7663_usb_sdio_reg_map[__MT_BASE_MAX];
 extern struct pci_driver mt7615_pci_driver;
 extern struct platform_driver mt7622_wmac_driver;
+extern const struct mt76_testmode_ops mt7615_testmode_ops;
 
 #ifdef CONFIG_MT7622_WMAC
 int mt7622_wmac_init(struct mt7615_dev *dev);
@@ -408,6 +447,11 @@ bool mt7615_wait_for_mcu_init(struct mt7615_dev *dev);
 void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta,
                          struct ieee80211_tx_rate *probe_rate,
                          struct ieee80211_tx_rate *rates);
+int mt7615_pm_set_enable(struct mt7615_dev *dev, bool enable);
+void mt7615_pm_wake_work(struct work_struct *work);
+int mt7615_pm_wake(struct mt7615_dev *dev);
+void mt7615_pm_power_save_sched(struct mt7615_dev *dev);
+void mt7615_pm_power_save_work(struct work_struct *work);
 int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev);
 int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd);
 int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
@@ -462,6 +506,20 @@ static inline u16 mt7615_wtbl_size(struct mt7615_dev *dev)
                return MT7615_WTBL_SIZE;
 }
 
+static inline void mt7615_mutex_acquire(struct mt7615_dev *dev)
+        __acquires(&dev->mt76.mutex)
+{
+       mutex_lock(&dev->mt76.mutex);
+       mt7615_pm_wake(dev);
+}
+
+static inline void mt7615_mutex_release(struct mt7615_dev *dev)
+       __releases(&dev->mt76.mutex)
+{
+       mt7615_pm_power_save_sched(dev);
+       mutex_unlock(&dev->mt76.mutex);
+}
+
 static inline u8 mt7615_lmac_mapping(struct mt7615_dev *dev, u8 ac)
 {
        static const u8 lmac_queue_map[] = {
@@ -485,6 +543,7 @@ void mt7615_init_txpower(struct mt7615_dev *dev,
                         struct ieee80211_supported_band *sband);
 void mt7615_phy_init(struct mt7615_dev *dev);
 void mt7615_mac_init(struct mt7615_dev *dev);
+int mt7615_set_channel(struct mt7615_phy *phy);
 
 int mt7615_mcu_restart(struct mt76_dev *dev);
 void mt7615_update_channel(struct mt76_dev *mdev);
@@ -516,15 +575,19 @@ int mt7615_mac_wtbl_update_key(struct mt7615_dev *dev,
                               enum mt7615_cipher_type cipher,
                               enum set_key_cmd cmd);
 void mt7615_mac_reset_work(struct work_struct *work);
+u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid);
 
 int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq);
 int mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
                        int len, bool wait_resp);
+u32 mt7615_rf_rr(struct mt7615_dev *dev, u32 wf, u32 reg);
+int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val);
 int mt7615_mcu_set_dbdc(struct mt7615_dev *dev);
 int mt7615_mcu_set_eeprom(struct mt7615_dev *dev);
 int mt7615_mcu_set_mac_enable(struct mt7615_dev *dev, int band, bool enable);
 int mt7615_mcu_set_rts_thresh(struct mt7615_phy *phy, u32 val);
 int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index);
+int mt7615_mcu_set_tx_power(struct mt7615_phy *phy);
 void mt7615_mcu_exit(struct mt7615_dev *dev);
 void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb,
                         int cmd, int *wait_seq);
@@ -563,6 +626,8 @@ int mt7615_mcu_set_pulse_th(struct mt7615_dev *dev,
                            const struct mt7615_dfs_pulse *pulse);
 int mt7615_mcu_set_radar_th(struct mt7615_dev *dev, int index,
                            const struct mt7615_dfs_pattern *pattern);
+int mt7615_mcu_set_test_param(struct mt7615_dev *dev, u8 param, bool test_mode,
+                             u32 val);
 int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable);
 int mt7615_mcu_apply_rx_dcoc(struct mt7615_phy *phy);
 int mt7615_mcu_apply_tx_dpd(struct mt7615_phy *phy);
@@ -579,18 +644,40 @@ int mt7615_driver_own(struct mt7615_dev *dev);
 int mt7615_init_debugfs(struct mt7615_dev *dev);
 int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq);
 
+int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+                         bool enable);
 int mt7615_mcu_set_hif_suspend(struct mt7615_dev *dev, bool suspend);
 void mt7615_mcu_set_suspend_iter(void *priv, u8 *mac,
                                 struct ieee80211_vif *vif);
 int mt7615_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
                                struct ieee80211_vif *vif,
                                struct cfg80211_gtk_rekey_data *key);
-
+int mt7615_mcu_update_arp_filter(struct ieee80211_hw *hw,
+                                struct ieee80211_vif *vif,
+                                struct ieee80211_bss_conf *info);
 int __mt7663_load_firmware(struct mt7615_dev *dev);
+u32 mt7615_mcu_reg_rr(struct mt76_dev *dev, u32 offset);
+void mt7615_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val);
 
 /* usb */
-void mt7663u_wtbl_work(struct work_struct *work);
+int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+                                  enum mt76_txq_id qid, struct mt76_wcid *wcid,
+                                  struct ieee80211_sta *sta,
+                                  struct mt76_tx_info *tx_info);
+bool mt7663_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update);
+void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
+                                    enum mt76_txq_id qid,
+                                    struct mt76_queue_entry *e);
+void mt7663_usb_sdio_wtbl_work(struct work_struct *work);
+int mt7663_usb_sdio_register_device(struct mt7615_dev *dev);
 int mt7663u_mcu_init(struct mt7615_dev *dev);
-int mt7663u_register_device(struct mt7615_dev *dev);
+
+/* sdio */
+u32 mt7663s_read_pcr(struct mt7615_dev *dev);
+int mt7663s_mcu_init(struct mt7615_dev *dev);
+int mt7663s_driver_own(struct mt7615_dev *dev);
+int mt7663s_firmware_own(struct mt7615_dev *dev);
+int mt7663s_kthread_run(void *data);
+void mt7663s_sdio_irq(struct sdio_func *func);
 
 #endif
index ba12f19..2328d78 100644 (file)
@@ -75,6 +75,10 @@ static int mt7615_pci_suspend(struct pci_dev *pdev, pm_message_t state)
        bool hif_suspend;
        int i, err;
 
+       err = mt7615_pm_wake(dev);
+       if (err < 0)
+               return err;
+
        hif_suspend = !test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
                      mt7615_firmware_offload(dev);
        if (hif_suspend) {
index 69cba86..7224a00 100644 (file)
@@ -70,6 +70,10 @@ mt7615_led_set_config(struct led_classdev *led_cdev,
 
        mt76 = container_of(led_cdev, struct mt76_dev, led_cdev);
        dev = container_of(mt76, struct mt7615_dev, mt76);
+
+       if (test_bit(MT76_STATE_PM, &mt76->phy.state))
+               return;
+
        val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) |
              FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
              FIELD_PREP(MT_LED_STATUS_ON, delay_on);
index 7ec91c0..2d67f9a 100644 (file)
@@ -155,7 +155,6 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
                spin_lock_bh(&dev->mt76.lock);
                mt7615_mac_set_rates(phy, msta, &info->control.rates[0],
                                     msta->rates);
-               msta->rate_probe = true;
                spin_unlock_bh(&dev->mt76.lock);
        }
 
index aee433a..9137d9e 100644 (file)
@@ -14,7 +14,6 @@ enum mt7615_reg_base {
        MT_CSR_BASE,
        MT_PLE_BASE,
        MT_PSE_BASE,
-       MT_PHY_BASE,
        MT_CFG_BASE,
        MT_AGG_BASE,
        MT_TMAC_BASE,
@@ -29,6 +28,7 @@ enum mt7615_reg_base {
        MT_PCIE_REMAP_BASE2,
        MT_TOP_MISC_BASE,
        MT_EFUSE_ADDR_BASE,
+       MT_PP_BASE,
        __MT_BASE_MAX,
 };
 
@@ -153,6 +153,8 @@ enum mt7615_reg_base {
 
 #define MT_PLE(ofs)                    ((dev)->reg_map[MT_PLE_BASE] + (ofs))
 
+#define MT_PLE_PG_HIF0_GROUP           MT_PLE(0x110)
+#define MT_HIF0_MIN_QUOTA              GENMASK(11, 0)
 #define MT_PLE_FL_Q0_CTRL              MT_PLE(0x1b0)
 #define MT_PLE_FL_Q1_CTRL              MT_PLE(0x1b4)
 #define MT_PLE_FL_Q2_CTRL              MT_PLE(0x1b8)
@@ -162,6 +164,10 @@ enum mt7615_reg_base {
                                               ((n) << 2))
 
 #define MT_PSE(ofs)                    ((dev)->reg_map[MT_PSE_BASE] + (ofs))
+#define MT_PSE_PG_HIF0_GROUP           MT_PSE(0x110)
+#define MT_HIF0_MIN_QUOTA              GENMASK(11, 0)
+#define MT_PSE_PG_HIF1_GROUP           MT_PSE(0x118)
+#define MT_HIF1_MIN_QUOTA              GENMASK(11, 0)
 #define MT_PSE_QUEUE_EMPTY             MT_PSE(0x0b4)
 #define MT_HIF_0_EMPTY_MASK            BIT(16)
 #define MT_HIF_1_EMPTY_MASK            BIT(17)
@@ -169,7 +175,12 @@ enum mt7615_reg_base {
 #define MT_PSE_PG_INFO                 MT_PSE(0x194)
 #define MT_PSE_SRC_CNT                 GENMASK(27, 16)
 
-#define MT_WF_PHY_BASE                 ((dev)->reg_map[MT_PHY_BASE])
+#define MT_PP(ofs)                     ((dev)->reg_map[MT_PP_BASE] + (ofs))
+#define MT_PP_TXDWCNT                  MT_PP(0x0)
+#define MT_PP_TXDWCNT_TX0_ADD_DW_CNT   GENMASK(7, 0)
+#define MT_PP_TXDWCNT_TX1_ADD_DW_CNT   GENMASK(15, 8)
+
+#define MT_WF_PHY_BASE                 0x82070000
 #define MT_WF_PHY(ofs)                 (MT_WF_PHY_BASE + (ofs))
 
 #define MT_WF_PHY_WF2_RFCTRL0(n)       MT_WF_PHY(0x1900 + (n) * 0x400)
@@ -213,6 +224,9 @@ enum mt7615_reg_base {
 #define MT_WF_PHY_RXTD2_BASE           MT_WF_PHY(0x2a00)
 #define MT_WF_PHY_RXTD2(_n)            (MT_WF_PHY_RXTD2_BASE + ((_n) << 2))
 
+#define MT_WF_PHY_RFINTF3_0(_n)                MT_WF_PHY(0x1100 + (_n) * 0x400)
+#define MT_WF_PHY_RFINTF3_0_ANT                GENMASK(7, 4)
+
 #define MT_WF_CFG_BASE                 ((dev)->reg_map[MT_CFG_BASE])
 #define MT_WF_CFG(ofs)                 (MT_WF_CFG_BASE + (ofs))
 
@@ -256,6 +270,13 @@ enum mt7615_reg_base {
 #define MT_WF_ARB_BASE                 ((dev)->reg_map[MT_ARB_BASE])
 #define MT_WF_ARB(ofs)                 (MT_WF_ARB_BASE + (ofs))
 
+#define MT_ARB_RQCR                    MT_WF_ARB(0x070)
+#define MT_ARB_RQCR_RX_START           BIT(0)
+#define MT_ARB_RQCR_RXV_START          BIT(4)
+#define MT_ARB_RQCR_RXV_R_EN           BIT(7)
+#define MT_ARB_RQCR_RXV_T_EN           BIT(8)
+#define MT_ARB_RQCR_BAND_SHIFT         16
+
 #define MT_ARB_SCR                     MT_WF_ARB(0x080)
 #define MT_ARB_SCR_TX0_DISABLE         BIT(8)
 #define MT_ARB_SCR_RX0_DISABLE         BIT(9)
@@ -417,6 +438,7 @@ enum mt7615_reg_base {
 
 #define MT_LPON_T0CR                   MT_LPON(0x010)
 #define MT_LPON_T0CR_MODE              GENMASK(1, 0)
+#define MT_LPON_T0CR_WRITE             BIT(0)
 
 #define MT_LPON_UTTR0                  MT_LPON(0x018)
 #define MT_LPON_UTTR1                  MT_LPON(0x01c)
@@ -550,4 +572,11 @@ enum mt7615_reg_base {
 #define MT_WL_RX_BUSY                  BIT(30)
 #define MT_WL_TX_BUSY                  BIT(31)
 
+#define MT_MCU_PTA_BASE                        0x81060000
+#define MT_MCU_PTA(_n)                 (MT_MCU_PTA_BASE + (_n))
+
+#define MT_ANT_SWITCH_CON(n)           MT_MCU_PTA(0x0c8)
+#define MT_ANT_SWITCH_CON_MODE(_n)     (GENMASK(4, 0) << (_n * 8))
+#define MT_ANT_SWITCH_CON_MODE1(_n)    (GENMASK(3, 0) << (_n * 8))
+
 #endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
new file mode 100644 (file)
index 0000000..dabce51
--- /dev/null
@@ -0,0 +1,478 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc.
+ *
+ * Author: Felix Fietkau <nbd@nbd.name>
+ *        Lorenzo Bianconi <lorenzo@kernel.org>
+ *        Sean Wang <sean.wang@mediatek.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio_func.h>
+
+#include "mt7615.h"
+#include "sdio.h"
+#include "mac.h"
+
+static const struct sdio_device_id mt7663s_table[] = {
+       { SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7603) },
+       { }     /* Terminating entry */
+};
+
+static u32 mt7663s_read_whisr(struct mt76_dev *dev)
+{
+       return sdio_readl(dev->sdio.func, MCR_WHISR, NULL);
+}
+
+u32 mt7663s_read_pcr(struct mt7615_dev *dev)
+{
+       struct mt76_sdio *sdio = &dev->mt76.sdio;
+
+       return sdio_readl(sdio->func, MCR_WHLPCR, NULL);
+}
+
+static u32 mt7663s_read_mailbox(struct mt76_dev *dev, u32 offset)
+{
+       struct sdio_func *func = dev->sdio.func;
+       u32 val = ~0, status;
+       int err;
+
+       sdio_claim_host(func);
+
+       sdio_writel(func, offset, MCR_H2DSM0R, &err);
+       if (err < 0) {
+               dev_err(dev->dev, "failed setting address [err=%d]\n", err);
+               goto out;
+       }
+
+       sdio_writel(func, H2D_SW_INT_READ, MCR_WSICR, &err);
+       if (err < 0) {
+               dev_err(dev->dev, "failed setting read mode [err=%d]\n", err);
+               goto out;
+       }
+
+       err = readx_poll_timeout(mt7663s_read_whisr, dev, status,
+                                status & H2D_SW_INT_READ, 0, 1000000);
+       if (err < 0) {
+               dev_err(dev->dev, "query whisr timeout\n");
+               goto out;
+       }
+
+       sdio_writel(func, H2D_SW_INT_READ, MCR_WHISR, &err);
+       if (err < 0) {
+               dev_err(dev->dev, "failed setting read mode [err=%d]\n", err);
+               goto out;
+       }
+
+       val = sdio_readl(func, MCR_H2DSM0R, &err);
+       if (err < 0) {
+               dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err);
+               goto out;
+       }
+
+       if (val != offset) {
+               dev_err(dev->dev, "register mismatch\n");
+               val = ~0;
+               goto out;
+       }
+
+       val = sdio_readl(func, MCR_D2HRM1R, &err);
+       if (err < 0)
+               dev_err(dev->dev, "failed reading d2hrm1r [err=%d]\n", err);
+
+out:
+       sdio_release_host(func);
+
+       return val;
+}
+
+static void mt7663s_write_mailbox(struct mt76_dev *dev, u32 offset, u32 val)
+{
+       struct sdio_func *func = dev->sdio.func;
+       u32 status;
+       int err;
+
+       sdio_claim_host(func);
+
+       sdio_writel(func, offset, MCR_H2DSM0R, &err);
+       if (err < 0) {
+               dev_err(dev->dev, "failed setting address [err=%d]\n", err);
+               goto out;
+       }
+
+       sdio_writel(func, val, MCR_H2DSM1R, &err);
+       if (err < 0) {
+               dev_err(dev->dev,
+                       "failed setting write value [err=%d]\n", err);
+               goto out;
+       }
+
+       sdio_writel(func, H2D_SW_INT_WRITE, MCR_WSICR, &err);
+       if (err < 0) {
+               dev_err(dev->dev, "failed setting write mode [err=%d]\n", err);
+               goto out;
+       }
+
+       err = readx_poll_timeout(mt7663s_read_whisr, dev, status,
+                                status & H2D_SW_INT_WRITE, 0, 1000000);
+       if (err < 0) {
+               dev_err(dev->dev, "query whisr timeout\n");
+               goto out;
+       }
+
+       sdio_writel(func, H2D_SW_INT_WRITE, MCR_WHISR, &err);
+       if (err < 0) {
+               dev_err(dev->dev, "failed setting write mode [err=%d]\n", err);
+               goto out;
+       }
+
+       val = sdio_readl(func, MCR_H2DSM0R, &err);
+       if (err < 0) {
+               dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err);
+               goto out;
+       }
+
+       if (val != offset)
+               dev_err(dev->dev, "register mismatch\n");
+
+out:
+       sdio_release_host(func);
+}
+
+static u32 mt7663s_rr(struct mt76_dev *dev, u32 offset)
+{
+       if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
+               return dev->mcu_ops->mcu_rr(dev, offset);
+       else
+               return mt7663s_read_mailbox(dev, offset);
+}
+
+static void mt7663s_wr(struct mt76_dev *dev, u32 offset, u32 val)
+{
+       if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
+               dev->mcu_ops->mcu_wr(dev, offset, val);
+       else
+               mt7663s_write_mailbox(dev, offset, val);
+}
+
+static u32 mt7663s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
+{
+       val |= mt7663s_rr(dev, offset) & ~mask;
+       mt7663s_wr(dev, offset, val);
+
+       return val;
+}
+
+static void mt7663s_write_copy(struct mt76_dev *dev, u32 offset,
+                              const void *data, int len)
+{
+       const u32 *val = data;
+       int i;
+
+       for (i = 0; i < len / sizeof(u32); i++) {
+               mt7663s_wr(dev, offset, val[i]);
+               offset += sizeof(u32);
+       }
+}
+
+static void mt7663s_read_copy(struct mt76_dev *dev, u32 offset,
+                             void *data, int len)
+{
+       u32 *val = data;
+       int i;
+
+       for (i = 0; i < len / sizeof(u32); i++) {
+               val[i] = mt7663s_rr(dev, offset);
+               offset += sizeof(u32);
+       }
+}
+
+static int mt7663s_wr_rp(struct mt76_dev *dev, u32 base,
+                        const struct mt76_reg_pair *data,
+                        int len)
+{
+       int i;
+
+       for (i = 0; i < len; i++) {
+               mt7663s_wr(dev, data->reg, data->value);
+               data++;
+       }
+
+       return 0;
+}
+
+static int mt7663s_rd_rp(struct mt76_dev *dev, u32 base,
+                        struct mt76_reg_pair *data,
+                        int len)
+{
+       int i;
+
+       for (i = 0; i < len; i++) {
+               data->value = mt7663s_rr(dev, data->reg);
+               data++;
+       }
+
+       return 0;
+}
+
+static void mt7663s_init_work(struct work_struct *work)
+{
+       struct mt7615_dev *dev;
+
+       dev = container_of(work, struct mt7615_dev, mcu_work);
+       if (mt7663s_mcu_init(dev))
+               return;
+
+       mt7615_mcu_set_eeprom(dev);
+       mt7615_mac_init(dev);
+       mt7615_phy_init(dev);
+       mt7615_mcu_del_wtbl_all(dev);
+       mt7615_check_offload_capability(dev);
+}
+
+static int mt7663s_hw_init(struct mt7615_dev *dev, struct sdio_func *func)
+{
+       u32 status, ctrl;
+       int ret;
+
+       sdio_claim_host(func);
+
+       ret = sdio_enable_func(func);
+       if (ret < 0)
+               goto release;
+
+       /* Get ownership from the device */
+       sdio_writel(func, WHLPCR_INT_EN_CLR | WHLPCR_FW_OWN_REQ_CLR,
+                   MCR_WHLPCR, &ret);
+       if (ret < 0)
+               goto disable_func;
+
+       ret = readx_poll_timeout(mt7663s_read_pcr, dev, status,
+                                status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
+       if (ret < 0) {
+               dev_err(dev->mt76.dev, "Cannot get ownership from device");
+               goto disable_func;
+       }
+
+       ret = sdio_set_block_size(func, 512);
+       if (ret < 0)
+               goto disable_func;
+
+       /* Enable interrupt */
+       sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, &ret);
+       if (ret < 0)
+               goto disable_func;
+
+       ctrl = WHIER_RX0_DONE_INT_EN | WHIER_TX_DONE_INT_EN;
+       sdio_writel(func, ctrl, MCR_WHIER, &ret);
+       if (ret < 0)
+               goto disable_func;
+
+       /* set WHISR as read clear and Rx aggregation number as 16 */
+       ctrl = FIELD_PREP(MAX_HIF_RX_LEN_NUM, 16);
+       sdio_writel(func, ctrl, MCR_WHCR, &ret);
+       if (ret < 0)
+               goto disable_func;
+
+       ret = sdio_claim_irq(func, mt7663s_sdio_irq);
+       if (ret < 0)
+               goto disable_func;
+
+       sdio_release_host(func);
+
+       return 0;
+
+disable_func:
+       sdio_disable_func(func);
+release:
+       sdio_release_host(func);
+
+       return ret;
+}
+
+static int mt7663s_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+                          struct ieee80211_sta *sta)
+{
+       struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+       struct mt76_sdio *sdio = &mdev->sdio;
+       u32 pse, ple;
+       int err;
+
+       err = mt7615_mac_sta_add(mdev, vif, sta);
+       if (err < 0)
+               return err;
+
+       /* init sched data quota */
+       pse = mt76_get_field(dev, MT_PSE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA);
+       ple = mt76_get_field(dev, MT_PLE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA);
+
+       mutex_lock(&sdio->sched.lock);
+       sdio->sched.pse_data_quota = pse;
+       sdio->sched.ple_data_quota = ple;
+       mutex_unlock(&sdio->sched.lock);
+
+       return 0;
+}
+
+static int mt7663s_probe(struct sdio_func *func,
+                        const struct sdio_device_id *id)
+{
+       static const struct mt76_driver_ops drv_ops = {
+               .txwi_size = MT_USB_TXD_SIZE,
+               .drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ,
+               .tx_prepare_skb = mt7663_usb_sdio_tx_prepare_skb,
+               .tx_complete_skb = mt7663_usb_sdio_tx_complete_skb,
+               .tx_status_data = mt7663_usb_sdio_tx_status_data,
+               .rx_skb = mt7615_queue_rx_skb,
+               .sta_ps = mt7615_sta_ps,
+               .sta_add = mt7663s_sta_add,
+               .sta_remove = mt7615_mac_sta_remove,
+               .update_survey = mt7615_update_channel,
+       };
+       static const struct mt76_bus_ops mt7663s_ops = {
+               .rr = mt7663s_rr,
+               .rmw = mt7663s_rmw,
+               .wr = mt7663s_wr,
+               .write_copy = mt7663s_write_copy,
+               .read_copy = mt7663s_read_copy,
+               .wr_rp = mt7663s_wr_rp,
+               .rd_rp = mt7663s_rd_rp,
+               .type = MT76_BUS_SDIO,
+       };
+       struct ieee80211_ops *ops;
+       struct mt7615_dev *dev;
+       struct mt76_dev *mdev;
+       int ret;
+
+       ops = devm_kmemdup(&func->dev, &mt7615_ops, sizeof(mt7615_ops),
+                          GFP_KERNEL);
+       if (!ops)
+               return -ENOMEM;
+
+       mdev = mt76_alloc_device(&func->dev, sizeof(*dev), ops, &drv_ops);
+       if (!mdev)
+               return -ENOMEM;
+
+       dev = container_of(mdev, struct mt7615_dev, mt76);
+
+       INIT_WORK(&dev->mcu_work, mt7663s_init_work);
+       dev->reg_map = mt7663_usb_sdio_reg_map;
+       dev->ops = ops;
+       sdio_set_drvdata(func, dev);
+
+       mdev->sdio.tx_kthread = kthread_create(mt7663s_kthread_run, dev,
+                                              "mt7663s_tx");
+       if (IS_ERR(mdev->sdio.tx_kthread))
+               return PTR_ERR(mdev->sdio.tx_kthread);
+
+       ret = mt76s_init(mdev, func, &mt7663s_ops);
+       if (ret < 0)
+               goto err_free;
+
+       ret = mt7663s_hw_init(dev, func);
+       if (ret)
+               goto err_free;
+
+       mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+                   (mt76_rr(dev, MT_HW_REV) & 0xff);
+       dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+       ret = mt76s_alloc_queues(&dev->mt76);
+       if (ret)
+               goto err_deinit;
+
+       ret = mt7663_usb_sdio_register_device(dev);
+       if (ret)
+               goto err_deinit;
+
+       return 0;
+
+err_deinit:
+       mt76s_deinit(&dev->mt76);
+err_free:
+       mt76_free_device(&dev->mt76);
+
+       return ret;
+}
+
+static void mt7663s_remove(struct sdio_func *func)
+{
+       struct mt7615_dev *dev = sdio_get_drvdata(func);
+
+       if (!test_and_clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+               return;
+
+       ieee80211_unregister_hw(dev->mt76.hw);
+       mt76s_deinit(&dev->mt76);
+       mt76_free_device(&dev->mt76);
+}
+
+#ifdef CONFIG_PM
+static int mt7663s_suspend(struct device *dev)
+{
+       struct sdio_func *func = dev_to_sdio_func(dev);
+       struct mt7615_dev *mdev = sdio_get_drvdata(func);
+
+       if (!test_bit(MT76_STATE_SUSPEND, &mdev->mphy.state) &&
+           mt7615_firmware_offload(mdev)) {
+               int err;
+
+               err = mt7615_mcu_set_hif_suspend(mdev, true);
+               if (err < 0)
+                       return err;
+       }
+
+       mt76s_stop_txrx(&mdev->mt76);
+
+       return mt7663s_firmware_own(mdev);
+}
+
+static int mt7663s_resume(struct device *dev)
+{
+       struct sdio_func *func = dev_to_sdio_func(dev);
+       struct mt7615_dev *mdev = sdio_get_drvdata(func);
+       int err;
+
+       err = mt7663s_driver_own(mdev);
+       if (err)
+               return err;
+
+       if (!test_bit(MT76_STATE_SUSPEND, &mdev->mphy.state) &&
+           mt7615_firmware_offload(mdev))
+               err = mt7615_mcu_set_hif_suspend(mdev, false);
+
+       return err;
+}
+
+static const struct dev_pm_ops mt7663s_pm_ops = {
+       .suspend = mt7663s_suspend,
+       .resume = mt7663s_resume,
+};
+#endif
+
+MODULE_DEVICE_TABLE(sdio, mt7663s_table);
+MODULE_FIRMWARE(MT7663_OFFLOAD_FIRMWARE_N9);
+MODULE_FIRMWARE(MT7663_OFFLOAD_ROM_PATCH);
+MODULE_FIRMWARE(MT7663_FIRMWARE_N9);
+MODULE_FIRMWARE(MT7663_ROM_PATCH);
+
+static struct sdio_driver mt7663s_driver = {
+       .name           = KBUILD_MODNAME,
+       .probe          = mt7663s_probe,
+       .remove         = mt7663s_remove,
+       .id_table       = mt7663s_table,
+#ifdef CONFIG_PM
+       .drv = {
+               .pm = &mt7663s_pm_ops,
+       }
+#endif
+};
+module_sdio_driver(mt7663s_driver);
+
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.h b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.h
new file mode 100644 (file)
index 0000000..0518097
--- /dev/null
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc.
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ */
+
+#ifndef __MT76S_H
+#define __MT76S_H
+
+#define MT_PSE_PAGE_SZ                 128
+
+#define MCR_WCIR                       0x0000
+#define MCR_WHLPCR                     0x0004
+#define WHLPCR_FW_OWN_REQ_CLR          BIT(9)
+#define WHLPCR_FW_OWN_REQ_SET          BIT(8)
+#define WHLPCR_IS_DRIVER_OWN           BIT(8)
+#define WHLPCR_INT_EN_CLR              BIT(1)
+#define WHLPCR_INT_EN_SET              BIT(0)
+
+#define MCR_WSDIOCSR                   0x0008
+#define MCR_WHCR                       0x000C
+#define W_INT_CLR_CTRL                 BIT(1)
+#define RECV_MAILBOX_RD_CLR_EN         BIT(2)
+#define MAX_HIF_RX_LEN_NUM             GENMASK(13, 8)
+#define RX_ENHANCE_MODE                        BIT(16)
+
+#define MCR_WHISR                      0x0010
+#define MCR_WHIER                      0x0014
+#define WHIER_D2H_SW_INT               GENMASK(31, 8)
+#define WHIER_FW_OWN_BACK_INT_EN       BIT(7)
+#define WHIER_ABNORMAL_INT_EN          BIT(6)
+#define WHIER_RX1_DONE_INT_EN          BIT(2)
+#define WHIER_RX0_DONE_INT_EN          BIT(1)
+#define WHIER_TX_DONE_INT_EN           BIT(0)
+#define WHIER_DEFAULT                  (WHIER_RX0_DONE_INT_EN  | \
+                                        WHIER_RX1_DONE_INT_EN  | \
+                                        WHIER_TX_DONE_INT_EN   | \
+                                        WHIER_ABNORMAL_INT_EN  | \
+                                        WHIER_D2H_SW_INT)
+
+#define MCR_WASR                       0x0020
+#define MCR_WSICR                      0x0024
+#define MCR_WTSR0                      0x0028
+#define TQ0_CNT                                GENMASK(7, 0)
+#define TQ1_CNT                                GENMASK(15, 8)
+#define TQ2_CNT                                GENMASK(23, 16)
+#define TQ3_CNT                                GENMASK(31, 24)
+
+#define MCR_WTSR1                      0x002c
+#define TQ4_CNT                                GENMASK(7, 0)
+#define TQ5_CNT                                GENMASK(15, 8)
+#define TQ6_CNT                                GENMASK(23, 16)
+#define TQ7_CNT                                GENMASK(31, 24)
+
+#define MCR_WTDR1                      0x0034
+#define MCR_WRDR0                      0x0050
+#define MCR_WRDR1                      0x0054
+#define MCR_WRDR(p)                    (0x0050 + 4 * (p))
+#define MCR_H2DSM0R                    0x0070
+#define H2D_SW_INT_READ                        BIT(16)
+#define H2D_SW_INT_WRITE               BIT(17)
+
+#define MCR_H2DSM1R                    0x0074
+#define MCR_D2HRM0R                    0x0078
+#define MCR_D2HRM1R                    0x007c
+#define MCR_D2HRM2R                    0x0080
+#define MCR_WRPLR                      0x0090
+#define RX0_PACKET_LENGTH              GENMASK(15, 0)
+#define RX1_PACKET_LENGTH              GENMASK(31, 16)
+
+#define MCR_WTMDR                      0x00b0
+#define MCR_WTMCR                      0x00b4
+#define MCR_WTMDPCR0                   0x00b8
+#define MCR_WTMDPCR1                   0x00bc
+#define MCR_WPLRCR                     0x00d4
+#define MCR_WSR                                0x00D8
+#define MCR_CLKIOCR                    0x0100
+#define MCR_CMDIOCR                    0x0104
+#define MCR_DAT0IOCR                   0x0108
+#define MCR_DAT1IOCR                   0x010C
+#define MCR_DAT2IOCR                   0x0110
+#define MCR_DAT3IOCR                   0x0114
+#define MCR_CLKDLYCR                   0x0118
+#define MCR_CMDDLYCR                   0x011C
+#define MCR_ODATDLYCR                  0x0120
+#define MCR_IDATDLYCR1                 0x0124
+#define MCR_IDATDLYCR2                 0x0128
+#define MCR_ILCHCR                     0x012C
+#define MCR_WTQCR0                     0x0130
+#define MCR_WTQCR1                     0x0134
+#define MCR_WTQCR2                     0x0138
+#define MCR_WTQCR3                     0x013C
+#define MCR_WTQCR4                     0x0140
+#define MCR_WTQCR5                     0x0144
+#define MCR_WTQCR6                     0x0148
+#define MCR_WTQCR7                     0x014C
+#define MCR_WTQCR(x)                   (0x130 + 4 * (x))
+#define TXQ_CNT_L                      GENMASK(15, 0)
+#define TXQ_CNT_H                      GENMASK(31, 16)
+
+#define MCR_SWPCDBGR                   0x0154
+
+struct mt76s_intr {
+       u32 isr;
+       struct {
+               u32 wtqcr[8];
+       } tx;
+       struct {
+               u16 num[2];
+               u16 len[2][16];
+       } rx;
+       u32 rec_mb[2];
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
new file mode 100644 (file)
index 0000000..28b86be
--- /dev/null
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2020 MediaTek Inc.
+ *
+ * Author: Felix Fietkau <nbd@nbd.name>
+ *        Lorenzo Bianconi <lorenzo@kernel.org>
+ *        Sean Wang <sean.wang@mediatek.com>
+ */
+#include <linux/kernel.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+
+#include "mt7615.h"
+#include "mac.h"
+#include "mcu.h"
+#include "regs.h"
+#include "sdio.h"
+
+static int mt7663s_mcu_init_sched(struct mt7615_dev *dev)
+{
+       struct mt76_sdio *sdio = &dev->mt76.sdio;
+       u32 pse0, ple, pse1, txdwcnt;
+
+       pse0 = mt76_get_field(dev, MT_PSE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA);
+       pse1 = mt76_get_field(dev, MT_PSE_PG_HIF1_GROUP, MT_HIF1_MIN_QUOTA);
+       ple = mt76_get_field(dev, MT_PLE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA);
+       txdwcnt = mt76_get_field(dev, MT_PP_TXDWCNT,
+                                MT_PP_TXDWCNT_TX1_ADD_DW_CNT);
+
+       mutex_lock(&sdio->sched.lock);
+
+       sdio->sched.pse_data_quota = pse0;
+       sdio->sched.ple_data_quota = ple;
+       sdio->sched.pse_mcu_quota = pse1;
+       sdio->sched.deficit = txdwcnt << 2;
+
+       mutex_unlock(&sdio->sched.lock);
+
+       return 0;
+}
+
+static int
+mt7663s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
+                        int cmd, bool wait_resp)
+{
+       struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+       int ret, seq;
+
+       mutex_lock(&mdev->mcu.mutex);
+
+       mt7615_mcu_fill_msg(dev, skb, cmd, &seq);
+       ret = mt76_tx_queue_skb_raw(dev, MT_TXQ_MCU, skb, 0);
+       if (ret)
+               goto out;
+
+       mt76_queue_kick(dev, mdev->q_tx[MT_TXQ_MCU].q);
+       if (wait_resp)
+               ret = mt7615_mcu_wait_response(dev, cmd, seq);
+
+out:
+       mutex_unlock(&mdev->mcu.mutex);
+
+       return ret;
+}
+
+int mt7663s_driver_own(struct mt7615_dev *dev)
+{
+       struct sdio_func *func = dev->mt76.sdio.func;
+       struct mt76_phy *mphy = &dev->mt76.phy;
+       u32 status;
+       int ret;
+
+       if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state))
+               goto out;
+
+       sdio_claim_host(func);
+
+       sdio_writel(func, WHLPCR_FW_OWN_REQ_CLR, MCR_WHLPCR, 0);
+
+       ret = readx_poll_timeout(mt7663s_read_pcr, dev, status,
+                                status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
+       if (ret < 0) {
+               dev_err(dev->mt76.dev, "Cannot get ownership from device");
+               set_bit(MT76_STATE_PM, &mphy->state);
+               sdio_release_host(func);
+
+               return ret;
+       }
+
+       sdio_release_host(func);
+
+out:
+       dev->pm.last_activity = jiffies;
+
+       return 0;
+}
+
+int mt7663s_firmware_own(struct mt7615_dev *dev)
+{
+       struct sdio_func *func = dev->mt76.sdio.func;
+       struct mt76_phy *mphy = &dev->mt76.phy;
+       u32 status;
+       int ret;
+
+       if (test_and_set_bit(MT76_STATE_PM, &mphy->state))
+               return 0;
+
+       sdio_claim_host(func);
+
+       sdio_writel(func, WHLPCR_FW_OWN_REQ_SET, MCR_WHLPCR, 0);
+
+       ret = readx_poll_timeout(mt7663s_read_pcr, dev, status,
+                                !(status & WHLPCR_IS_DRIVER_OWN), 2000, 1000000);
+       if (ret < 0) {
+               dev_err(dev->mt76.dev, "Cannot set ownership to device");
+               clear_bit(MT76_STATE_PM, &mphy->state);
+       }
+
+       sdio_release_host(func);
+
+       return ret;
+}
+
+int mt7663s_mcu_init(struct mt7615_dev *dev)
+{
+       static const struct mt76_mcu_ops mt7663s_mcu_ops = {
+               .headroom = sizeof(struct mt7615_mcu_txd),
+               .tailroom = MT_USB_TAIL_SIZE,
+               .mcu_skb_send_msg = mt7663s_mcu_send_message,
+               .mcu_send_msg = mt7615_mcu_msg_send,
+               .mcu_restart = mt7615_mcu_restart,
+               .mcu_rr = mt7615_mcu_reg_rr,
+               .mcu_wr = mt7615_mcu_reg_wr,
+       };
+       int ret;
+
+       ret = mt7663s_driver_own(dev);
+       if (ret)
+               return ret;
+
+       dev->mt76.mcu_ops = &mt7663s_mcu_ops,
+
+       ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY);
+       if (ret) {
+               mt7615_mcu_restart(&dev->mt76);
+               if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
+                                   MT_TOP_MISC2_FW_N9_RDY, 0, 500))
+                       return -EIO;
+       }
+
+       ret = __mt7663_load_firmware(dev);
+       if (ret)
+               return ret;
+
+       ret = mt7663s_mcu_init_sched(dev);
+       if (ret)
+               return ret;
+
+       set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+
+       return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
new file mode 100644 (file)
index 0000000..443a4ec
--- /dev/null
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc.
+ *
+ * Author: Felix Fietkau <nbd@nbd.name>
+ *        Lorenzo Bianconi <lorenzo@kernel.org>
+ *        Sean Wang <sean.wang@mediatek.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio_func.h>
+
+#include "../trace.h"
+#include "mt7615.h"
+#include "sdio.h"
+#include "mac.h"
+
+static void mt7663s_refill_sched_quota(struct mt7615_dev *dev, u32 *data)
+{
+       struct mt76_sdio *sdio = &dev->mt76.sdio;
+
+       mutex_lock(&sdio->sched.lock);
+       sdio->sched.pse_data_quota += FIELD_GET(TXQ_CNT_L, data[0]) + /* BK */
+                                     FIELD_GET(TXQ_CNT_H, data[0]) + /* BE */
+                                     FIELD_GET(TXQ_CNT_L, data[1]) + /* VI */
+                                     FIELD_GET(TXQ_CNT_H, data[1]);  /* VO */
+       sdio->sched.ple_data_quota += FIELD_GET(TXQ_CNT_H, data[2]) + /* BK */
+                                     FIELD_GET(TXQ_CNT_L, data[3]) + /* BE */
+                                     FIELD_GET(TXQ_CNT_H, data[3]) + /* VI */
+                                     FIELD_GET(TXQ_CNT_L, data[4]);  /* VO */
+       sdio->sched.pse_mcu_quota += FIELD_GET(TXQ_CNT_L, data[2]);
+       mutex_unlock(&sdio->sched.lock);
+}
+
+static struct sk_buff *mt7663s_build_rx_skb(void *data, int data_len,
+                                           int buf_len)
+{
+       int len = min_t(int, data_len, MT_SKB_HEAD_LEN);
+       struct sk_buff *skb;
+
+       skb = alloc_skb(len, GFP_KERNEL);
+       if (!skb)
+               return NULL;
+
+       skb_put_data(skb, data, len);
+       if (data_len > len) {
+               struct page *page;
+
+               data += len;
+               page = virt_to_head_page(data);
+               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+                               page, data - page_address(page),
+                               data_len - len, buf_len);
+               get_page(page);
+       }
+
+       return skb;
+}
+
+static int mt7663s_rx_run_queue(struct mt7615_dev *dev, enum mt76_rxq_id qid,
+                               struct mt76s_intr *intr)
+{
+       struct mt76_queue *q = &dev->mt76.q_rx[qid];
+       struct mt76_sdio *sdio = &dev->mt76.sdio;
+       int len = 0, err, i, order;
+       struct page *page;
+       u8 *buf;
+
+       for (i = 0; i < intr->rx.num[qid]; i++)
+               len += round_up(intr->rx.len[qid][i] + 4, 4);
+
+       if (!len)
+               return 0;
+
+       if (len > sdio->func->cur_blksize)
+               len = roundup(len, sdio->func->cur_blksize);
+
+       order = get_order(len);
+       page = __dev_alloc_pages(GFP_KERNEL, order);
+       if (!page)
+               return -ENOMEM;
+
+       buf = page_address(page);
+
+       err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len);
+       if (err < 0) {
+               dev_err(dev->mt76.dev, "sdio read data failed:%d\n", err);
+               __free_pages(page, order);
+               return err;
+       }
+
+       for (i = 0; i < intr->rx.num[qid]; i++) {
+               int index = (q->tail + i) % q->ndesc;
+               struct mt76_queue_entry *e = &q->entry[index];
+
+               len = intr->rx.len[qid][i];
+               e->skb = mt7663s_build_rx_skb(buf, len, round_up(len + 4, 4));
+               if (!e->skb)
+                       break;
+
+               buf += round_up(len + 4, 4);
+               if (q->queued + i + 1 == q->ndesc)
+                       break;
+       }
+       __free_pages(page, order);
+
+       spin_lock_bh(&q->lock);
+       q->tail = (q->tail + i) % q->ndesc;
+       q->queued += i;
+       spin_unlock_bh(&q->lock);
+
+       return err;
+}
+
+static int mt7663s_tx_update_sched(struct mt7615_dev *dev,
+                                  struct mt76_queue_entry *e,
+                                  bool mcu)
+{
+       struct mt76_sdio *sdio = &dev->mt76.sdio;
+       struct mt76_phy *mphy = &dev->mt76.phy;
+       struct ieee80211_hdr *hdr;
+       int size, ret = -EBUSY;
+
+       size = DIV_ROUND_UP(e->buf_sz + sdio->sched.deficit, MT_PSE_PAGE_SZ);
+
+       if (mcu) {
+               if (!test_bit(MT76_STATE_MCU_RUNNING, &mphy->state))
+                       return 0;
+
+               mutex_lock(&sdio->sched.lock);
+               if (sdio->sched.pse_mcu_quota > size) {
+                       sdio->sched.pse_mcu_quota -= size;
+                       ret = 0;
+               }
+               mutex_unlock(&sdio->sched.lock);
+
+               return ret;
+       }
+
+       hdr = (struct ieee80211_hdr *)(e->skb->data + MT_USB_TXD_SIZE);
+       if (ieee80211_is_ctl(hdr->frame_control))
+               return 0;
+
+       mutex_lock(&sdio->sched.lock);
+       if (sdio->sched.pse_data_quota > size &&
+           sdio->sched.ple_data_quota > 0) {
+               sdio->sched.pse_data_quota -= size;
+               sdio->sched.ple_data_quota--;
+               ret = 0;
+       }
+       mutex_unlock(&sdio->sched.lock);
+
+       return ret;
+}
+
+static int mt7663s_tx_run_queue(struct mt7615_dev *dev, struct mt76_queue *q)
+{
+       bool mcu = q == dev->mt76.q_tx[MT_TXQ_MCU].q;
+       struct mt76_sdio *sdio = &dev->mt76.sdio;
+       int nframes = 0;
+
+       while (q->first != q->tail) {
+               struct mt76_queue_entry *e = &q->entry[q->first];
+               int err, len = e->skb->len;
+
+               if (mt7663s_tx_update_sched(dev, e, mcu))
+                       break;
+
+               if (len > sdio->func->cur_blksize)
+                       len = roundup(len, sdio->func->cur_blksize);
+
+               /* TODO: skb_walk_frags and then write to SDIO port */
+               err = sdio_writesb(sdio->func, MCR_WTDR1, e->skb->data, len);
+               if (err) {
+                       dev_err(dev->mt76.dev, "sdio write failed: %d\n", err);
+                       return -EIO;
+               }
+
+               e->done = true;
+               q->first = (q->first + 1) % q->ndesc;
+               nframes++;
+       }
+
+       return nframes;
+}
+
+static int mt7663s_tx_run_queues(struct mt7615_dev *dev)
+{
+       int i, nframes = 0;
+
+       for (i = 0; i < MT_TXQ_MCU_WA; i++) {
+               int ret;
+
+               ret = mt7663s_tx_run_queue(dev, dev->mt76.q_tx[i].q);
+               if (ret < 0)
+                       return ret;
+
+               nframes += ret;
+       }
+
+       return nframes;
+}
+
+int mt7663s_kthread_run(void *data)
+{
+       struct mt7615_dev *dev = data;
+       struct mt76_phy *mphy = &dev->mt76.phy;
+
+       while (!kthread_should_stop()) {
+               int ret;
+
+               cond_resched();
+
+               sdio_claim_host(dev->mt76.sdio.func);
+               ret = mt7663s_tx_run_queues(dev);
+               sdio_release_host(dev->mt76.sdio.func);
+
+               if (ret <= 0 || !test_bit(MT76_STATE_RUNNING, &mphy->state)) {
+                       set_current_state(TASK_INTERRUPTIBLE);
+                       schedule();
+               } else {
+                       wake_up_process(dev->mt76.sdio.kthread);
+               }
+       }
+
+       return 0;
+}
+
+void mt7663s_sdio_irq(struct sdio_func *func)
+{
+       struct mt7615_dev *dev = sdio_get_drvdata(func);
+       struct mt76_sdio *sdio = &dev->mt76.sdio;
+       struct mt76s_intr intr;
+
+       /* disable interrupt */
+       sdio_writel(func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, 0);
+
+       do {
+               sdio_readsb(func, &intr, MCR_WHISR, sizeof(struct mt76s_intr));
+               trace_dev_irq(&dev->mt76, intr.isr, 0);
+
+               if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.phy.state))
+                       goto out;
+
+               if (intr.isr & WHIER_RX0_DONE_INT_EN) {
+                       mt7663s_rx_run_queue(dev, 0, &intr);
+                       wake_up_process(sdio->kthread);
+               }
+
+               if (intr.isr & WHIER_RX1_DONE_INT_EN) {
+                       mt7663s_rx_run_queue(dev, 1, &intr);
+                       wake_up_process(sdio->kthread);
+               }
+
+               if (intr.isr & WHIER_TX_DONE_INT_EN) {
+                       mt7663s_refill_sched_quota(dev, intr.tx.wtqcr);
+                       mt7663s_tx_run_queues(dev);
+                       wake_up_process(sdio->kthread);
+               }
+       } while (intr.isr);
+out:
+       /* enable interrupt */
+       sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, 0);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
new file mode 100644 (file)
index 0000000..1730751
--- /dev/null
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+
+#include "mt7615.h"
+#include "eeprom.h"
+#include "mcu.h"
+
+enum {
+       TM_CHANGED_TXPOWER_CTRL,
+       TM_CHANGED_TXPOWER,
+       TM_CHANGED_FREQ_OFFSET,
+
+       /* must be last */
+       NUM_TM_CHANGED
+};
+
+
+static const u8 tm_change_map[] = {
+       [TM_CHANGED_TXPOWER_CTRL] = MT76_TM_ATTR_TX_POWER_CONTROL,
+       [TM_CHANGED_TXPOWER] = MT76_TM_ATTR_TX_POWER,
+       [TM_CHANGED_FREQ_OFFSET] = MT76_TM_ATTR_FREQ_OFFSET,
+};
+
+static const u32 reg_backup_list[] = {
+       MT_WF_PHY_RFINTF3_0(0),
+       MT_WF_PHY_RFINTF3_0(1),
+       MT_WF_PHY_RFINTF3_0(2),
+       MT_WF_PHY_RFINTF3_0(3),
+       MT_ANT_SWITCH_CON(2),
+       MT_ANT_SWITCH_CON(3),
+       MT_ANT_SWITCH_CON(4),
+       MT_ANT_SWITCH_CON(6),
+       MT_ANT_SWITCH_CON(7),
+       MT_ANT_SWITCH_CON(8),
+};
+
+static const struct {
+       u16 wf;
+       u16 reg;
+} rf_backup_list[] = {
+       { 0, 0x48 },
+       { 1, 0x48 },
+       { 2, 0x48 },
+       { 3, 0x48 },
+};
+
+static int
+mt7615_tm_set_tx_power(struct mt7615_phy *phy)
+{
+       struct mt7615_dev *dev = phy->dev;
+       struct mt76_phy *mphy = phy->mt76;
+       int i, ret, n_chains = hweight8(mphy->antenna_mask);
+       struct cfg80211_chan_def *chandef = &mphy->chandef;
+       int freq = chandef->center_freq1, len, target_chains;
+       u8 *data, *eep = (u8 *)dev->mt76.eeprom.data;
+       enum nl80211_band band = chandef->chan->band;
+       struct sk_buff *skb;
+       struct {
+               u8 center_chan;
+               u8 dbdc_idx;
+               u8 band;
+               u8 rsv;
+       } __packed req_hdr = {
+               .center_chan = ieee80211_frequency_to_channel(freq),
+               .band = band,
+               .dbdc_idx = phy != &dev->phy,
+       };
+       u8 *tx_power = NULL;
+
+       if (dev->mt76.test.state != MT76_TM_STATE_OFF)
+               tx_power = dev->mt76.test.tx_power;
+
+       len = sizeof(req_hdr) + MT7615_EE_MAX - MT_EE_NIC_CONF_0;
+       skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req_hdr) + len);
+       if (!skb)
+               return -ENOMEM;
+
+       skb_put_data(skb, &req_hdr, sizeof(req_hdr));
+       data = skb_put_data(skb, eep + MT_EE_NIC_CONF_0, len);
+
+       target_chains = mt7615_ext_pa_enabled(dev, band) ? 1 : n_chains;
+       for (i = 0; i < target_chains; i++) {
+               int index;
+
+               ret = mt7615_eeprom_get_target_power_index(dev, chandef->chan, i);
+               if (ret < 0)
+                       return -EINVAL;
+
+               index = ret - MT_EE_NIC_CONF_0;
+               if (tx_power && tx_power[i])
+                       data[ret - MT_EE_NIC_CONF_0] = tx_power[i];
+       }
+
+       return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+                                      MCU_EXT_CMD_SET_TX_POWER_CTRL, false);
+}
+
+static void
+mt7615_tm_reg_backup_restore(struct mt7615_dev *dev)
+{
+       u32 *b = dev->test.reg_backup;
+       int n_regs = ARRAY_SIZE(reg_backup_list);
+       int n_rf_regs = ARRAY_SIZE(rf_backup_list);
+       int i;
+
+       if (dev->mt76.test.state == MT76_TM_STATE_OFF) {
+               for (i = 0; i < n_regs; i++)
+                       mt76_wr(dev, reg_backup_list[i], b[i]);
+
+               for (i = 0; i < n_rf_regs; i++)
+                       mt7615_rf_wr(dev, rf_backup_list[i].wf,
+                                    rf_backup_list[i].reg, b[n_regs + i]);
+               return;
+       }
+
+       if (b)
+               return;
+
+       b = devm_kzalloc(dev->mt76.dev, 4 * (n_regs + n_rf_regs),
+                        GFP_KERNEL);
+       if (!b)
+               return;
+
+       dev->test.reg_backup = b;
+       for (i = 0; i < n_regs; i++)
+               b[i] = mt76_rr(dev, reg_backup_list[i]);
+       for (i = 0; i < n_rf_regs; i++)
+               b[n_regs + i] = mt7615_rf_rr(dev, rf_backup_list[i].wf,
+                                            rf_backup_list[i].reg);
+}
+
+
+static void
+mt7615_tm_init_phy(struct mt7615_dev *dev, struct mt7615_phy *phy)
+{
+       unsigned int total_flags = ~0;
+
+       if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
+               return;
+
+       mutex_unlock(&dev->mt76.mutex);
+       mt7615_set_channel(phy);
+       mt7615_ops.configure_filter(phy->mt76->hw, 0, &total_flags, 0);
+       mutex_lock(&dev->mt76.mutex);
+
+       mt7615_tm_reg_backup_restore(dev);
+}
+
+static void
+mt7615_tm_init(struct mt7615_dev *dev)
+{
+       mt7615_tm_init_phy(dev, &dev->phy);
+
+       if (dev->mt76.phy2)
+               mt7615_tm_init_phy(dev, dev->mt76.phy2->priv);
+}
+
+static void
+mt7615_tm_set_rx_enable(struct mt7615_dev *dev, bool en)
+{
+       u32 rqcr_mask = (MT_ARB_RQCR_RX_START |
+                        MT_ARB_RQCR_RXV_START |
+                        MT_ARB_RQCR_RXV_R_EN |
+                        MT_ARB_RQCR_RXV_T_EN) *
+                       (BIT(0) | BIT(MT_ARB_RQCR_BAND_SHIFT));
+
+       if (en) {
+               mt76_clear(dev, MT_ARB_SCR,
+                          MT_ARB_SCR_RX0_DISABLE | MT_ARB_SCR_RX1_DISABLE);
+               mt76_set(dev, MT_ARB_RQCR, rqcr_mask);
+       } else {
+               mt76_set(dev, MT_ARB_SCR,
+                        MT_ARB_SCR_RX0_DISABLE | MT_ARB_SCR_RX1_DISABLE);
+               mt76_clear(dev, MT_ARB_RQCR, rqcr_mask);
+       }
+}
+
+static void
+mt7615_tm_set_tx_antenna(struct mt7615_dev *dev, bool en)
+{
+       struct mt76_testmode_data *td = &dev->mt76.test;
+       u8 mask = td->tx_antenna_mask;
+       int i;
+
+       if (!mask)
+               return;
+
+       if (!en)
+               mask = dev->phy.chainmask;
+
+       for (i = 0; i < 4; i++) {
+               mt76_rmw_field(dev, MT_WF_PHY_RFINTF3_0(i),
+                              MT_WF_PHY_RFINTF3_0_ANT,
+                              td->tx_antenna_mask & BIT(i) ? 0 : 0xa);
+
+       }
+
+       /* 2.4 GHz band */
+       mt76_rmw_field(dev, MT_ANT_SWITCH_CON(3), MT_ANT_SWITCH_CON_MODE(0),
+                      (td->tx_antenna_mask & BIT(0)) ? 0x8 : 0x1b);
+       mt76_rmw_field(dev, MT_ANT_SWITCH_CON(4), MT_ANT_SWITCH_CON_MODE(2),
+                      (td->tx_antenna_mask & BIT(1)) ? 0xe : 0x1b);
+       mt76_rmw_field(dev, MT_ANT_SWITCH_CON(6), MT_ANT_SWITCH_CON_MODE1(0),
+                      (td->tx_antenna_mask & BIT(2)) ? 0x0 : 0xf);
+       mt76_rmw_field(dev, MT_ANT_SWITCH_CON(7), MT_ANT_SWITCH_CON_MODE1(2),
+                      (td->tx_antenna_mask & BIT(3)) ? 0x6 : 0xf);
+
+       /* 5 GHz band */
+       mt76_rmw_field(dev, MT_ANT_SWITCH_CON(4), MT_ANT_SWITCH_CON_MODE(1),
+                      (td->tx_antenna_mask & BIT(0)) ? 0xd : 0x1b);
+       mt76_rmw_field(dev, MT_ANT_SWITCH_CON(2), MT_ANT_SWITCH_CON_MODE(3),
+                      (td->tx_antenna_mask & BIT(1)) ? 0x13 : 0x1b);
+       mt76_rmw_field(dev, MT_ANT_SWITCH_CON(7), MT_ANT_SWITCH_CON_MODE1(1),
+                      (td->tx_antenna_mask & BIT(2)) ? 0x5 : 0xf);
+       mt76_rmw_field(dev, MT_ANT_SWITCH_CON(8), MT_ANT_SWITCH_CON_MODE1(3),
+                      (td->tx_antenna_mask & BIT(3)) ? 0xb : 0xf);
+
+       for (i = 0; i < 4; i++) {
+               u32 val;
+
+               val = mt7615_rf_rr(dev, i, 0x48);
+               val &= ~(0x3ff << 20);
+               if (td->tx_antenna_mask & BIT(i))
+                       val |= 3 << 20;
+               else
+                       val |= (2 << 28) | (2 << 26) | (8 << 20);
+               mt7615_rf_wr(dev, i, 0x48, val);
+       }
+}
+
+static void
+mt7615_tm_set_tx_frames(struct mt7615_dev *dev, bool en)
+{
+       struct ieee80211_tx_info *info;
+       struct sk_buff *skb = dev->mt76.test.tx_skb;
+
+       mt7615_mcu_set_chan_info(&dev->phy, MCU_EXT_CMD_SET_RX_PATH);
+       mt7615_tm_set_tx_antenna(dev, en);
+       mt7615_tm_set_rx_enable(dev, !en);
+       if (!en || !skb)
+               return;
+
+       info = IEEE80211_SKB_CB(skb);
+       info->control.vif = dev->phy.monitor_vif;
+}
+
+static void
+mt7615_tm_update_params(struct mt7615_dev *dev, u32 changed)
+{
+       struct mt76_testmode_data *td = &dev->mt76.test;
+       bool en = dev->mt76.test.state != MT76_TM_STATE_OFF;
+
+       if (changed & BIT(TM_CHANGED_TXPOWER_CTRL))
+               mt7615_mcu_set_test_param(dev, MCU_ATE_SET_TX_POWER_CONTROL,
+                                         en, en && td->tx_power_control);
+       if (changed & BIT(TM_CHANGED_FREQ_OFFSET))
+               mt7615_mcu_set_test_param(dev, MCU_ATE_SET_FREQ_OFFSET,
+                                         en, en ? td->freq_offset : 0);
+       if (changed & BIT(TM_CHANGED_TXPOWER))
+               mt7615_tm_set_tx_power(&dev->phy);
+}
+
+static int
+mt7615_tm_set_state(struct mt76_dev *mdev, enum mt76_testmode_state state)
+{
+       struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+       struct mt76_testmode_data *td = &mdev->test;
+       enum mt76_testmode_state prev_state = td->state;
+
+       mdev->test.state = state;
+
+       if (prev_state == MT76_TM_STATE_TX_FRAMES)
+               mt7615_tm_set_tx_frames(dev, false);
+       else if (state == MT76_TM_STATE_TX_FRAMES)
+               mt7615_tm_set_tx_frames(dev, true);
+
+       if (state <= MT76_TM_STATE_IDLE)
+               mt7615_tm_init(dev);
+
+       if ((state == MT76_TM_STATE_IDLE &&
+            prev_state == MT76_TM_STATE_OFF) ||
+           (state == MT76_TM_STATE_OFF &&
+            prev_state == MT76_TM_STATE_IDLE)) {
+               u32 changed = 0;
+               int i;
+
+               for (i = 0; i < ARRAY_SIZE(tm_change_map); i++) {
+                       u16 cur = tm_change_map[i];
+
+                       if (td->param_set[cur / 32] & BIT(cur % 32))
+                               changed |= BIT(i);
+               }
+
+               mt7615_tm_update_params(dev, changed);
+       }
+
+       return 0;
+}
+
+static int
+mt7615_tm_set_params(struct mt76_dev *mdev, struct nlattr **tb,
+                    enum mt76_testmode_state new_state)
+{
+       struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+       struct mt76_testmode_data *td = &dev->mt76.test;
+       u32 changed = 0;
+       int i;
+
+       BUILD_BUG_ON(NUM_TM_CHANGED >= 32);
+
+       if (new_state == MT76_TM_STATE_OFF ||
+           td->state == MT76_TM_STATE_OFF)
+               return 0;
+
+       if (td->tx_antenna_mask & ~dev->phy.chainmask)
+               return -EINVAL;
+
+       for (i = 0; i < ARRAY_SIZE(tm_change_map); i++) {
+               if (tb[tm_change_map[i]])
+                       changed |= BIT(i);
+       }
+
+       mt7615_tm_update_params(dev, changed);
+
+       return 0;
+}
+
+static int
+mt7615_tm_dump_stats(struct mt76_dev *mdev, struct sk_buff *msg)
+{
+       struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+       void *rx, *rssi;
+       int i;
+
+       rx = nla_nest_start(msg, MT76_TM_STATS_ATTR_LAST_RX);
+       if (!rx)
+               return -ENOMEM;
+
+       if (nla_put_s32(msg, MT76_TM_RX_ATTR_FREQ_OFFSET, dev->test.last_freq_offset) ||
+           nla_put_s32(msg, MT76_TM_RX_ATTR_IB_RSSI, dev->test.last_ib_rssi) ||
+           nla_put_s32(msg, MT76_TM_RX_ATTR_WB_RSSI, dev->test.last_wb_rssi))
+               return -ENOMEM;
+
+       rssi = nla_nest_start(msg, MT76_TM_RX_ATTR_RCPI);
+       if (!rssi)
+               return -ENOMEM;
+
+       for (i = 0; i < ARRAY_SIZE(dev->test.last_rcpi); i++)
+               if (nla_put_u8(msg, i, dev->test.last_rcpi[i]))
+                       return -ENOMEM;
+
+       nla_nest_end(msg, rssi);
+
+       nla_nest_end(msg, rx);
+
+       return 0;
+}
+
+const struct mt76_testmode_ops mt7615_testmode_ops = {
+       .set_state = mt7615_tm_set_state,
+       .set_params = mt7615_tm_set_params,
+       .dump_stats = mt7615_tm_dump_stats,
+};
index 5be6704..23a2133 100644 (file)
 #include "mcu.h"
 #include "regs.h"
 
-static const u32 mt7663u_reg_map[] = {
-       [MT_TOP_CFG_BASE]       = 0x80020000,
-       [MT_HW_BASE]            = 0x80000000,
-       [MT_DMA_SHDL_BASE]      = 0x5000a000,
-       [MT_HIF_BASE]           = 0x50000000,
-       [MT_CSR_BASE]           = 0x40000000,
-       [MT_EFUSE_ADDR_BASE]    = 0x78011000,
-       [MT_TOP_MISC_BASE]      = 0x81020000,
-       [MT_PLE_BASE]           = 0x82060000,
-       [MT_PSE_BASE]           = 0x82068000,
-       [MT_PHY_BASE]           = 0x82070000,
-       [MT_WTBL_BASE_ADDR]     = 0x820e0000,
-       [MT_CFG_BASE]           = 0x820f0000,
-       [MT_AGG_BASE]           = 0x820f2000,
-       [MT_ARB_BASE]           = 0x820f3000,
-       [MT_TMAC_BASE]          = 0x820f4000,
-       [MT_RMAC_BASE]          = 0x820f5000,
-       [MT_DMA_BASE]           = 0x820f7000,
-       [MT_PF_BASE]            = 0x820f8000,
-       [MT_WTBL_BASE_ON]       = 0x820f9000,
-       [MT_WTBL_BASE_OFF]      = 0x820f9800,
-       [MT_LPON_BASE]          = 0x820fb000,
-       [MT_MIB_BASE]           = 0x820fd000,
-};
-
 static const struct usb_device_id mt7615_device_table[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7663, 0xff, 0xff, 0xff) },
        { },
@@ -64,205 +39,19 @@ static void mt7663u_cleanup(struct mt7615_dev *dev)
        mt76u_queues_deinit(&dev->mt76);
 }
 
-static void
-mt7663u_mac_write_txwi(struct mt7615_dev *dev, struct mt76_wcid *wcid,
-                      enum mt76_txq_id qid, struct ieee80211_sta *sta,
-                      struct sk_buff *skb)
-{
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct ieee80211_key_conf *key = info->control.hw_key;
-       __le32 *txwi;
-       int pid;
-
-       if (!wcid)
-               wcid = &dev->mt76.global_wcid;
-
-       pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
-
-       txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE);
-       memset(txwi, 0, MT_USB_TXD_SIZE);
-       mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, false);
-       skb_push(skb, MT_USB_TXD_SIZE);
-}
-
-static int
-__mt7663u_mac_set_rates(struct mt7615_dev *dev,
-                       struct mt7615_wtbl_desc *wd)
-{
-       struct mt7615_rate_desc *rate = &wd->rate;
-       struct mt7615_sta *sta = wd->sta;
-       u32 w5, w27, addr, val;
-
-       lockdep_assert_held(&dev->mt76.mutex);
-
-       if (!sta)
-               return -EINVAL;
-
-       if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
-               return -ETIMEDOUT;
-
-       addr = mt7615_mac_wtbl_addr(dev, sta->wcid.idx);
-
-       w27 = mt76_rr(dev, addr + 27 * 4);
-       w27 &= ~MT_WTBL_W27_CC_BW_SEL;
-       w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rate->bw);
-
-       w5 = mt76_rr(dev, addr + 5 * 4);
-       w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE |
-               MT_WTBL_W5_MPDU_OK_COUNT |
-               MT_WTBL_W5_MPDU_FAIL_COUNT |
-               MT_WTBL_W5_RATE_IDX);
-       w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rate->bw) |
-             FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE,
-                        rate->bw_idx ? rate->bw_idx - 1 : 7);
-
-       mt76_wr(dev, MT_WTBL_RIUCR0, w5);
-
-       mt76_wr(dev, MT_WTBL_RIUCR1,
-               FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rate->probe_val) |
-               FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rate->val[0]) |
-               FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rate->val[1]));
-
-       mt76_wr(dev, MT_WTBL_RIUCR2,
-               FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rate->val[1] >> 8) |
-               FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rate->val[1]) |
-               FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rate->val[2]) |
-               FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rate->val[2]));
-
-       mt76_wr(dev, MT_WTBL_RIUCR3,
-               FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rate->val[2] >> 4) |
-               FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rate->val[3]) |
-               FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rate->val[3]));
-
-       mt76_wr(dev, MT_WTBL_UPDATE,
-               FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, sta->wcid.idx) |
-               MT_WTBL_UPDATE_RATE_UPDATE |
-               MT_WTBL_UPDATE_TX_COUNT_CLEAR);
-
-       mt76_wr(dev, addr + 27 * 4, w27);
-
-       mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
-       val = mt76_rr(dev, MT_LPON_UTTR0);
-       sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset;
-
-       if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
-               mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
-
-       sta->rate_count = 2 * MT7615_RATE_RETRY * sta->n_rates;
-       sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
-
-       return 0;
-}
-
-static int
-__mt7663u_mac_set_key(struct mt7615_dev *dev,
-                     struct mt7615_wtbl_desc *wd)
-{
-       struct mt7615_key_desc *key = &wd->key;
-       struct mt7615_sta *sta = wd->sta;
-       enum mt7615_cipher_type cipher;
-       struct mt76_wcid *wcid;
-       int err;
-
-       lockdep_assert_held(&dev->mt76.mutex);
-
-       if (!sta)
-               return -EINVAL;
-
-       cipher = mt7615_mac_get_cipher(key->cipher);
-       if (cipher == MT_CIPHER_NONE)
-               return -EOPNOTSUPP;
-
-       wcid = &wd->sta->wcid;
-
-       mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, key->cmd);
-       err = mt7615_mac_wtbl_update_key(dev, wcid, key->key, key->keylen,
-                                        cipher, key->cmd);
-       if (err < 0)
-               return err;
-
-       err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx,
-                                       key->cmd);
-       if (err < 0)
-               return err;
-
-       if (key->cmd == SET_KEY)
-               wcid->cipher |= BIT(cipher);
-       else
-               wcid->cipher &= ~BIT(cipher);
-
-       return 0;
-}
-
-void mt7663u_wtbl_work(struct work_struct *work)
+static void mt7663u_init_work(struct work_struct *work)
 {
-       struct mt7615_wtbl_desc *wd, *wd_next;
        struct mt7615_dev *dev;
 
-       dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
-                                               wtbl_work);
-
-       list_for_each_entry_safe(wd, wd_next, &dev->wd_head, node) {
-               spin_lock_bh(&dev->mt76.lock);
-               list_del(&wd->node);
-               spin_unlock_bh(&dev->mt76.lock);
-
-               mutex_lock(&dev->mt76.mutex);
-               switch (wd->type) {
-               case MT7615_WTBL_RATE_DESC:
-                       __mt7663u_mac_set_rates(dev, wd);
-                       break;
-               case MT7615_WTBL_KEY_DESC:
-                       __mt7663u_mac_set_key(dev, wd);
-                       break;
-               }
-               mutex_unlock(&dev->mt76.mutex);
-
-               kfree(wd);
-       }
-}
-
-static void
-mt7663u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
-                       struct mt76_queue_entry *e)
-{
-       skb_pull(e->skb, MT_USB_HDR_SIZE + MT_USB_TXD_SIZE);
-       mt76_tx_complete_skb(mdev, e->skb);
-}
-
-static int
-mt7663u_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
-                      enum mt76_txq_id qid, struct mt76_wcid *wcid,
-                      struct ieee80211_sta *sta,
-                      struct mt76_tx_info *tx_info)
-{
-       struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
-
-       if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
-               struct mt7615_sta *msta;
-
-               msta = container_of(wcid, struct mt7615_sta, wcid);
-               spin_lock_bh(&dev->mt76.lock);
-               mt7615_mac_set_rates(&dev->phy, msta, &info->control.rates[0],
-                                    msta->rates);
-               msta->rate_probe = true;
-               spin_unlock_bh(&dev->mt76.lock);
-       }
-       mt7663u_mac_write_txwi(dev, wcid, qid, sta, tx_info->skb);
-
-       return mt76u_skb_dma_info(tx_info->skb, tx_info->skb->len);
-}
-
-static bool mt7663u_tx_status_data(struct mt76_dev *mdev, u8 *update)
-{
-       struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
-
-       mutex_lock(&dev->mt76.mutex);
-       mt7615_mac_sta_poll(dev);
-       mutex_unlock(&dev->mt76.mutex);
+       dev = container_of(work, struct mt7615_dev, mcu_work);
+       if (mt7663u_mcu_init(dev))
+               return;
 
-       return 0;
+       mt7615_mcu_set_eeprom(dev);
+       mt7615_mac_init(dev);
+       mt7615_phy_init(dev);
+       mt7615_mcu_del_wtbl_all(dev);
+       mt7615_check_offload_capability(dev);
 }
 
 static int mt7663u_probe(struct usb_interface *usb_intf,
@@ -271,9 +60,9 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
        static const struct mt76_driver_ops drv_ops = {
                .txwi_size = MT_USB_TXD_SIZE,
                .drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ,
-               .tx_prepare_skb = mt7663u_tx_prepare_skb,
-               .tx_complete_skb = mt7663u_tx_complete_skb,
-               .tx_status_data = mt7663u_tx_status_data,
+               .tx_prepare_skb = mt7663_usb_sdio_tx_prepare_skb,
+               .tx_complete_skb = mt7663_usb_sdio_tx_complete_skb,
+               .tx_status_data = mt7663_usb_sdio_tx_status_data,
                .rx_skb = mt7615_queue_rx_skb,
                .sta_ps = mt7615_sta_ps,
                .sta_add = mt7615_mac_sta_add,
@@ -303,7 +92,8 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
 
        usb_set_intfdata(usb_intf, dev);
 
-       dev->reg_map = mt7663u_reg_map;
+       INIT_WORK(&dev->mcu_work, mt7663u_init_work);
+       dev->reg_map = mt7663_usb_sdio_reg_map;
        dev->ops = ops;
        ret = mt76u_init(mdev, usb_intf, true);
        if (ret < 0)
@@ -342,7 +132,7 @@ alloc_queues:
        if (ret)
                goto error_free_q;
 
-       ret = mt7663u_register_device(dev);
+       ret = mt7663_usb_sdio_register_device(dev);
        if (ret)
                goto error_free_q;
 
@@ -351,11 +141,10 @@ alloc_queues:
 error_free_q:
        mt76u_queues_deinit(&dev->mt76);
 error:
-       mt76u_deinit(&dev->mt76);
        usb_set_intfdata(usb_intf, NULL);
        usb_put_dev(interface_to_usbdev(usb_intf));
 
-       ieee80211_free_hw(mdev->hw);
+       mt76_free_device(&dev->mt76);
 
        return ret;
 }
@@ -373,8 +162,7 @@ static void mt7663u_disconnect(struct usb_interface *usb_intf)
        usb_set_intfdata(usb_intf, NULL);
        usb_put_dev(interface_to_usbdev(usb_intf));
 
-       mt76u_deinit(&dev->mt76);
-       ieee80211_free_hw(dev->mt76.hw);
+       mt76_free_device(&dev->mt76);
 }
 
 #ifdef CONFIG_PM
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_init.c
deleted file mode 100644 (file)
index 1fbc960..0000000
+++ /dev/null
@@ -1,145 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2019 MediaTek Inc.
- *
- * Author: Felix Fietkau <nbd@nbd.name>
- *        Lorenzo Bianconi <lorenzo@kernel.org>
- *        Sean Wang <sean.wang@mediatek.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-#include "mt7615.h"
-#include "mac.h"
-#include "regs.h"
-
-static int mt7663u_dma_sched_init(struct mt7615_dev *dev)
-{
-       int i;
-
-       mt76_rmw(dev, MT_DMA_SHDL(MT_DMASHDL_PKT_MAX_SIZE),
-                MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE,
-                FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) |
-                FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 8));
-
-       /* disable refill group 5 - group 15 and raise group 2
-        * and 3 as high priority.
-        */
-       mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_REFILL), 0xffe00006);
-       mt76_clear(dev, MT_DMA_SHDL(MT_DMASHDL_PAGE), BIT(16));
-
-       for (i = 0; i < 5; i++)
-               mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(i)),
-                       FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x3) |
-                       FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x1ff));
-
-       mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(0)), 0x42104210);
-       mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(1)), 0x42104210);
-
-       mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(2)), 0x4444);
-
-       /* group pririority from high to low:
-        * 15 (cmd groups) > 4 > 3 > 2 > 1 > 0.
-        */
-       mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET0), 0x6501234f);
-       mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET1), 0xedcba987);
-       mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_OPTIONAL), 0x7004801c);
-
-       mt76_wr(dev, MT_UDMA_WLCFG_1,
-               FIELD_PREP(MT_WL_TX_TMOUT_LMT, 80000) |
-               FIELD_PREP(MT_WL_RX_AGG_PKT_LMT, 1));
-
-       /* setup UDMA Rx Flush */
-       mt76_clear(dev, MT_UDMA_WLCFG_0, MT_WL_RX_FLUSH);
-       /* hif reset */
-       mt76_set(dev, MT_HIF_RST, MT_HIF_LOGIC_RST_N);
-
-       mt76_set(dev, MT_UDMA_WLCFG_0,
-                MT_WL_RX_AGG_EN | MT_WL_RX_EN | MT_WL_TX_EN |
-                MT_WL_RX_MPSZ_PAD0 | MT_TICK_1US_EN |
-                MT_WL_TX_TMOUT_FUNC_EN);
-       mt76_rmw(dev, MT_UDMA_WLCFG_0, MT_WL_RX_AGG_LMT | MT_WL_RX_AGG_TO,
-                FIELD_PREP(MT_WL_RX_AGG_LMT, 32) |
-                FIELD_PREP(MT_WL_RX_AGG_TO, 100));
-
-       return 0;
-}
-
-static int mt7663u_init_hardware(struct mt7615_dev *dev)
-{
-       int ret, idx;
-
-       ret = mt7615_eeprom_init(dev, MT_EFUSE_BASE);
-       if (ret < 0)
-               return ret;
-
-       ret = mt7663u_dma_sched_init(dev);
-       if (ret)
-               return ret;
-
-       set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
-
-       /* Beacon and mgmt frames should occupy wcid 0 */
-       idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
-       if (idx)
-               return -ENOSPC;
-
-       dev->mt76.global_wcid.idx = idx;
-       dev->mt76.global_wcid.hw_key_idx = -1;
-       rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
-
-       return 0;
-}
-
-static void mt7663u_init_work(struct work_struct *work)
-{
-       struct mt7615_dev *dev;
-
-       dev = container_of(work, struct mt7615_dev, mcu_work);
-       if (mt7663u_mcu_init(dev))
-               return;
-
-       mt7615_mcu_set_eeprom(dev);
-       mt7615_mac_init(dev);
-       mt7615_phy_init(dev);
-       mt7615_mcu_del_wtbl_all(dev);
-       mt7615_check_offload_capability(dev);
-}
-
-int mt7663u_register_device(struct mt7615_dev *dev)
-{
-       struct ieee80211_hw *hw = mt76_hw(dev);
-       int err;
-
-       INIT_WORK(&dev->wtbl_work, mt7663u_wtbl_work);
-       INIT_WORK(&dev->mcu_work, mt7663u_init_work);
-       INIT_LIST_HEAD(&dev->wd_head);
-       mt7615_init_device(dev);
-
-       err = mt7663u_init_hardware(dev);
-       if (err)
-               return err;
-
-       hw->extra_tx_headroom += MT_USB_HDR_SIZE + MT_USB_TXD_SIZE;
-       /* check hw sg support in order to enable AMSDU */
-       hw->max_tx_fragments = dev->mt76.usb.sg_en ? MT_HW_TXP_MAX_BUF_NUM : 1;
-
-       err = mt76_register_device(&dev->mt76, true, mt7615_rates,
-                                  ARRAY_SIZE(mt7615_rates));
-       if (err < 0)
-               return err;
-
-       if (!dev->mt76.usb.sg_en) {
-               struct ieee80211_sta_vht_cap *vht_cap;
-
-               /* decrease max A-MSDU size if SG is not supported */
-               vht_cap = &dev->mphy.sband_5g.sband.vht_cap;
-               vht_cap->cap &= ~IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
-       }
-
-       ieee80211_queue_work(hw, &dev->mcu_work);
-       mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband);
-       mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband);
-
-       return mt7615_init_debugfs(dev);
-}
index cd709fd..0b33df3 100644 (file)
@@ -28,13 +28,13 @@ mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
        else
                ep = MT_EP_OUT_AC_BE;
 
-       ret = mt76u_skb_dma_info(skb, skb->len);
+       put_unaligned_le32(skb->len, skb_push(skb, sizeof(skb->len)));
+       ret = mt76_skb_adjust_pad(skb);
        if (ret < 0)
                goto out;
 
        ret = mt76u_bulk_msg(&dev->mt76, skb->data, skb->len, NULL,
                             1000, ep);
-       dev_kfree_skb(skb);
        if (ret < 0)
                goto out;
 
@@ -43,6 +43,7 @@ mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
 
 out:
        mutex_unlock(&mdev->mcu.mutex);
+       dev_kfree_skb(skb);
 
        return ret;
 }
@@ -60,6 +61,8 @@ int mt7663u_mcu_init(struct mt7615_dev *dev)
 
        dev->mt76.mcu_ops = &mt7663u_mcu_ops,
 
+       /* usb does not support runtime-pm */
+       clear_bit(MT76_STATE_PM, &dev->mphy.state);
        mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
 
        if (test_and_clear_bit(MT76_STATE_POWER_OFF, &dev->mphy.state)) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
new file mode 100644 (file)
index 0000000..6dffdaa
--- /dev/null
@@ -0,0 +1,394 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc.
+ *
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ *        Sean Wang <sean.wang@mediatek.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "mt7615.h"
+#include "mac.h"
+#include "mcu.h"
+#include "regs.h"
+
+const u32 mt7663_usb_sdio_reg_map[] = {
+       [MT_TOP_CFG_BASE]       = 0x80020000,
+       [MT_HW_BASE]            = 0x80000000,
+       [MT_DMA_SHDL_BASE]      = 0x5000a000,
+       [MT_HIF_BASE]           = 0x50000000,
+       [MT_CSR_BASE]           = 0x40000000,
+       [MT_EFUSE_ADDR_BASE]    = 0x78011000,
+       [MT_TOP_MISC_BASE]      = 0x81020000,
+       [MT_PLE_BASE]           = 0x82060000,
+       [MT_PSE_BASE]           = 0x82068000,
+       [MT_PP_BASE]            = 0x8206c000,
+       [MT_WTBL_BASE_ADDR]     = 0x820e0000,
+       [MT_CFG_BASE]           = 0x820f0000,
+       [MT_AGG_BASE]           = 0x820f2000,
+       [MT_ARB_BASE]           = 0x820f3000,
+       [MT_TMAC_BASE]          = 0x820f4000,
+       [MT_RMAC_BASE]          = 0x820f5000,
+       [MT_DMA_BASE]           = 0x820f7000,
+       [MT_PF_BASE]            = 0x820f8000,
+       [MT_WTBL_BASE_ON]       = 0x820f9000,
+       [MT_WTBL_BASE_OFF]      = 0x820f9800,
+       [MT_LPON_BASE]          = 0x820fb000,
+       [MT_MIB_BASE]           = 0x820fd000,
+};
+EXPORT_SYMBOL_GPL(mt7663_usb_sdio_reg_map);
+
+static void
+mt7663_usb_sdio_write_txwi(struct mt7615_dev *dev, struct mt76_wcid *wcid,
+                          enum mt76_txq_id qid, struct ieee80211_sta *sta,
+                          struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_key_conf *key = info->control.hw_key;
+       __le32 *txwi;
+       int pid;
+
+       if (!wcid)
+               wcid = &dev->mt76.global_wcid;
+
+       pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
+
+       txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE);
+       memset(txwi, 0, MT_USB_TXD_SIZE);
+       mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, false);
+       skb_push(skb, MT_USB_TXD_SIZE);
+}
+
+static int
+mt7663_usb_sdio_set_rates(struct mt7615_dev *dev,
+                         struct mt7615_wtbl_desc *wd)
+{
+       struct mt7615_rate_desc *rate = &wd->rate;
+       struct mt7615_sta *sta = wd->sta;
+       u32 w5, w27, addr, val;
+
+       lockdep_assert_held(&dev->mt76.mutex);
+
+       if (!sta)
+               return -EINVAL;
+
+       if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
+               return -ETIMEDOUT;
+
+       addr = mt7615_mac_wtbl_addr(dev, sta->wcid.idx);
+
+       w27 = mt76_rr(dev, addr + 27 * 4);
+       w27 &= ~MT_WTBL_W27_CC_BW_SEL;
+       w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rate->bw);
+
+       w5 = mt76_rr(dev, addr + 5 * 4);
+       w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE |
+               MT_WTBL_W5_MPDU_OK_COUNT |
+               MT_WTBL_W5_MPDU_FAIL_COUNT |
+               MT_WTBL_W5_RATE_IDX);
+       w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rate->bw) |
+             FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE,
+                        rate->bw_idx ? rate->bw_idx - 1 : 7);
+
+       mt76_wr(dev, MT_WTBL_RIUCR0, w5);
+
+       mt76_wr(dev, MT_WTBL_RIUCR1,
+               FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rate->probe_val) |
+               FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rate->val[0]) |
+               FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rate->val[1]));
+
+       mt76_wr(dev, MT_WTBL_RIUCR2,
+               FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rate->val[1] >> 8) |
+               FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rate->val[1]) |
+               FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rate->val[2]) |
+               FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rate->val[2]));
+
+       mt76_wr(dev, MT_WTBL_RIUCR3,
+               FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rate->val[2] >> 4) |
+               FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rate->val[3]) |
+               FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rate->val[3]));
+
+       mt76_wr(dev, MT_WTBL_UPDATE,
+               FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, sta->wcid.idx) |
+               MT_WTBL_UPDATE_RATE_UPDATE |
+               MT_WTBL_UPDATE_TX_COUNT_CLEAR);
+
+       mt76_wr(dev, addr + 27 * 4, w27);
+
+       sta->rate_probe = sta->rateset[rate->rateset].probe_rate.idx != -1;
+
+       mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
+       val = mt76_rr(dev, MT_LPON_UTTR0);
+       sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset;
+
+       if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
+               mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
+
+       sta->rate_count = 2 * MT7615_RATE_RETRY * sta->n_rates;
+       sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+
+       return 0;
+}
+
+static int
+mt7663_usb_sdio_set_key(struct mt7615_dev *dev,
+                       struct mt7615_wtbl_desc *wd)
+{
+       struct mt7615_key_desc *key = &wd->key;
+       struct mt7615_sta *sta = wd->sta;
+       enum mt7615_cipher_type cipher;
+       struct mt76_wcid *wcid;
+       int err;
+
+       lockdep_assert_held(&dev->mt76.mutex);
+
+       if (!sta) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       cipher = mt7615_mac_get_cipher(key->cipher);
+       if (cipher == MT_CIPHER_NONE) {
+               err = -EOPNOTSUPP;
+               goto out;
+       }
+
+       wcid = &wd->sta->wcid;
+
+       mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, key->cmd);
+       err = mt7615_mac_wtbl_update_key(dev, wcid, key->key, key->keylen,
+                                        cipher, key->cmd);
+       if (err < 0)
+               goto out;
+
+       err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx,
+                                       key->cmd);
+       if (err < 0)
+               goto out;
+
+       if (key->cmd == SET_KEY)
+               wcid->cipher |= BIT(cipher);
+       else
+               wcid->cipher &= ~BIT(cipher);
+out:
+       kfree(key->key);
+
+       return err;
+}
+
+void mt7663_usb_sdio_wtbl_work(struct work_struct *work)
+{
+       struct mt7615_wtbl_desc *wd, *wd_next;
+       struct list_head wd_list;
+       struct mt7615_dev *dev;
+
+       dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev,
+                                               wtbl_work);
+
+       INIT_LIST_HEAD(&wd_list);
+       spin_lock_bh(&dev->mt76.lock);
+       list_splice_init(&dev->wd_head, &wd_list);
+       spin_unlock_bh(&dev->mt76.lock);
+
+       list_for_each_entry_safe(wd, wd_next, &wd_list, node) {
+               list_del(&wd->node);
+
+               mt7615_mutex_acquire(dev);
+
+               switch (wd->type) {
+               case MT7615_WTBL_RATE_DESC:
+                       mt7663_usb_sdio_set_rates(dev, wd);
+                       break;
+               case MT7615_WTBL_KEY_DESC:
+                       mt7663_usb_sdio_set_key(dev, wd);
+                       break;
+               }
+
+               mt7615_mutex_release(dev);
+
+               kfree(wd);
+       }
+}
+EXPORT_SYMBOL_GPL(mt7663_usb_sdio_wtbl_work);
+
+bool mt7663_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update)
+{
+       struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+
+       mt7615_mutex_acquire(dev);
+       mt7615_mac_sta_poll(dev);
+       mt7615_mutex_release(dev);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_status_data);
+
+void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
+                                    enum mt76_txq_id qid,
+                                    struct mt76_queue_entry *e)
+{
+       unsigned int headroom = MT_USB_TXD_SIZE;
+
+       if (mt76_is_usb(mdev))
+               headroom += MT_USB_HDR_SIZE;
+       skb_pull(e->skb, headroom);
+
+       mt76_tx_complete_skb(mdev, e->skb);
+}
+EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_complete_skb);
+
+int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+                                  enum mt76_txq_id qid, struct mt76_wcid *wcid,
+                                  struct ieee80211_sta *sta,
+                                  struct mt76_tx_info *tx_info)
+{
+       struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
+       struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+       struct sk_buff *skb = tx_info->skb;
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+       if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) &&
+           !msta->rate_probe) {
+               /* request to configure sampling rate */
+               spin_lock_bh(&dev->mt76.lock);
+               mt7615_mac_set_rates(&dev->phy, msta, &info->control.rates[0],
+                                    msta->rates);
+               spin_unlock_bh(&dev->mt76.lock);
+       }
+
+       mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, skb);
+       if (mt76_is_usb(mdev))
+               put_unaligned_le32(skb->len, skb_push(skb, sizeof(skb->len)));
+
+       return mt76_skb_adjust_pad(skb);
+}
+EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_prepare_skb);
+
+static int mt7663u_dma_sched_init(struct mt7615_dev *dev)
+{
+       int i;
+
+       mt76_rmw(dev, MT_DMA_SHDL(MT_DMASHDL_PKT_MAX_SIZE),
+                MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE,
+                FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) |
+                FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 8));
+
+       /* disable refill group 5 - group 15 and raise group 2
+        * and 3 as high priority.
+        */
+       mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_REFILL), 0xffe00006);
+       mt76_clear(dev, MT_DMA_SHDL(MT_DMASHDL_PAGE), BIT(16));
+
+       for (i = 0; i < 5; i++)
+               mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(i)),
+                       FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x3) |
+                       FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x1ff));
+
+       mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(0)), 0x42104210);
+       mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(1)), 0x42104210);
+
+       mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(2)), 0x4444);
+
+       /* group pririority from high to low:
+        * 15 (cmd groups) > 4 > 3 > 2 > 1 > 0.
+        */
+       mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET0), 0x6501234f);
+       mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET1), 0xedcba987);
+       mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_OPTIONAL), 0x7004801c);
+
+       mt76_wr(dev, MT_UDMA_WLCFG_1,
+               FIELD_PREP(MT_WL_TX_TMOUT_LMT, 80000) |
+               FIELD_PREP(MT_WL_RX_AGG_PKT_LMT, 1));
+
+       /* setup UDMA Rx Flush */
+       mt76_clear(dev, MT_UDMA_WLCFG_0, MT_WL_RX_FLUSH);
+       /* hif reset */
+       mt76_set(dev, MT_HIF_RST, MT_HIF_LOGIC_RST_N);
+
+       mt76_set(dev, MT_UDMA_WLCFG_0,
+                MT_WL_RX_AGG_EN | MT_WL_RX_EN | MT_WL_TX_EN |
+                MT_WL_RX_MPSZ_PAD0 | MT_TICK_1US_EN |
+                MT_WL_TX_TMOUT_FUNC_EN);
+       mt76_rmw(dev, MT_UDMA_WLCFG_0, MT_WL_RX_AGG_LMT | MT_WL_RX_AGG_TO,
+                FIELD_PREP(MT_WL_RX_AGG_LMT, 32) |
+                FIELD_PREP(MT_WL_RX_AGG_TO, 100));
+
+       return 0;
+}
+
+static int mt7663_usb_sdio_init_hardware(struct mt7615_dev *dev)
+{
+       int ret, idx;
+
+       ret = mt7615_eeprom_init(dev, MT_EFUSE_BASE);
+       if (ret < 0)
+               return ret;
+
+       if (mt76_is_usb(&dev->mt76)) {
+               ret = mt7663u_dma_sched_init(dev);
+               if (ret)
+                       return ret;
+       }
+
+       set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+
+       /* Beacon and mgmt frames should occupy wcid 0 */
+       idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
+       if (idx)
+               return -ENOSPC;
+
+       dev->mt76.global_wcid.idx = idx;
+       dev->mt76.global_wcid.hw_key_idx = -1;
+       rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
+
+       return 0;
+}
+
+int mt7663_usb_sdio_register_device(struct mt7615_dev *dev)
+{
+       struct ieee80211_hw *hw = mt76_hw(dev);
+       int err;
+
+       INIT_WORK(&dev->wtbl_work, mt7663_usb_sdio_wtbl_work);
+       INIT_LIST_HEAD(&dev->wd_head);
+       mt7615_init_device(dev);
+
+       err = mt7663_usb_sdio_init_hardware(dev);
+       if (err)
+               return err;
+
+       /* check hw sg support in order to enable AMSDU */
+       if (dev->mt76.usb.sg_en || mt76_is_sdio(&dev->mt76))
+               hw->max_tx_fragments = MT_HW_TXP_MAX_BUF_NUM;
+       else
+               hw->max_tx_fragments = 1;
+       hw->extra_tx_headroom += MT_USB_TXD_SIZE;
+       if (mt76_is_usb(&dev->mt76))
+               hw->extra_tx_headroom += MT_USB_HDR_SIZE;
+
+       err = mt76_register_device(&dev->mt76, true, mt7615_rates,
+                                  ARRAY_SIZE(mt7615_rates));
+       if (err < 0)
+               return err;
+
+       if (!dev->mt76.usb.sg_en) {
+               struct ieee80211_sta_vht_cap *vht_cap;
+
+               /* decrease max A-MSDU size if SG is not supported */
+               vht_cap = &dev->mphy.sband_5g.sband.vht_cap;
+               vht_cap->cap &= ~IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
+       }
+
+       ieee80211_queue_work(hw, &dev->mcu_work);
+       mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband);
+       mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband);
+
+       return mt7615_init_debugfs(dev);
+}
+EXPORT_SYMBOL_GPL(mt7663_usb_sdio_register_device);
+
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_LICENSE("Dual BSD/GPL");
index 5535b9c..ce6b286 100644 (file)
@@ -277,9 +277,8 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
 err:
        usb_set_intfdata(usb_intf, NULL);
        usb_put_dev(interface_to_usbdev(usb_intf));
-       mt76u_deinit(&dev->mt76);
+       mt76_free_device(&dev->mt76);
 
-       ieee80211_free_hw(mdev->hw);
        return ret;
 }
 
@@ -297,8 +296,7 @@ static void mt76x0_disconnect(struct usb_interface *usb_intf)
        usb_set_intfdata(usb_intf, NULL);
        usb_put_dev(interface_to_usbdev(usb_intf));
 
-       mt76u_deinit(&dev->mt76);
-       ieee80211_free_hw(dev->mt76.hw);
+       mt76_free_device(&dev->mt76);
 }
 
 static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
index 4c9bbc7..4660b96 100644 (file)
@@ -80,7 +80,6 @@ struct mt76x02_dev {
 
        struct mutex phy_mutex;
 
-       u16 vif_mask;
        u16 chainmask;
 
        u8 txdone_seq;
index 5fda6e7..bacb1f1 100644 (file)
@@ -439,7 +439,7 @@ static void mt76x02_reset_state(struct mt76x02_dev *dev)
                memset(msta, 0, sizeof(*msta));
        }
 
-       dev->vif_mask = 0;
+       dev->mphy.vif_mask = 0;
        dev->mt76.beacon_mask = 0;
 }
 
index 0180b62..37321e6 100644 (file)
@@ -56,8 +56,9 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
         */
        info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
               FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
+       put_unaligned_le32(info, skb_push(skb, sizeof(info)));
 
-       return mt76u_skb_dma_info(skb, info);
+       return mt76_skb_adjust_pad(skb);
 }
 
 int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
index a30bb53..e43d13d 100644 (file)
@@ -87,8 +87,10 @@ __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
        u32 info;
        int ret;
 
-       if (test_bit(MT76_REMOVED, &dev->phy.state))
-               return 0;
+       if (test_bit(MT76_REMOVED, &dev->phy.state)) {
+               ret = 0;
+               goto out;
+       }
 
        if (wait_resp) {
                seq = ++dev->mcu.msg_seq & 0xf;
@@ -111,6 +113,7 @@ __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
        if (wait_resp)
                ret = mt76x02u_mcu_wait_resp(dev, seq);
 
+out:
        consume_skb(skb);
 
        return ret;
index 44822a8..dbd4077 100644 (file)
@@ -305,7 +305,7 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        unsigned int idx = 0;
 
        /* Allow to change address in HW if we create first interface. */
-       if (!dev->vif_mask &&
+       if (!dev->mphy.vif_mask &&
            (((vif->addr[0] ^ dev->mt76.macaddr[0]) & ~GENMASK(4, 1)) ||
             memcmp(vif->addr + 1, dev->mt76.macaddr + 1, ETH_ALEN - 1)))
                mt76x02_mac_setaddr(dev, vif->addr);
@@ -330,11 +330,11 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
                idx += 8;
 
        /* vif is already set or idx is 8 for AP/Mesh/... */
-       if (dev->vif_mask & BIT(idx) ||
+       if (dev->mphy.vif_mask & BIT(idx) ||
            (vif->type != NL80211_IFTYPE_STATION && idx > 7))
                return -EBUSY;
 
-       dev->vif_mask |= BIT(idx);
+       dev->mphy.vif_mask |= BIT(idx);
 
        mt76x02_vif_init(dev, vif, idx);
        return 0;
@@ -348,7 +348,7 @@ void mt76x02_remove_interface(struct ieee80211_hw *hw,
        struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
 
        mt76_txq_remove(&dev->mt76, vif->txq);
-       dev->vif_mask &= ~BIT(mvif->idx);
+       dev->mphy.vif_mask &= ~BIT(mvif->idx);
 }
 EXPORT_SYMBOL_GPL(mt76x02_remove_interface);
 
index eca95b7..d01f47c 100644 (file)
@@ -39,6 +39,7 @@ static inline bool mt76x2_channel_silent(struct mt76x02_dev *dev)
 extern const struct ieee80211_ops mt76x2_ops;
 
 int mt76x2_register_device(struct mt76x02_dev *dev);
+int mt76x2_resume_device(struct mt76x02_dev *dev);
 
 void mt76x2_phy_power_on(struct mt76x02_dev *dev);
 void mt76x2_stop_hardware(struct mt76x02_dev *dev);
index 53ca0ce..6dfb0df 100644 (file)
@@ -9,7 +9,7 @@
 
 #include "mt76x2.h"
 
-static const struct pci_device_id mt76pci_device_table[] = {
+static const struct pci_device_id mt76x2e_device_table[] = {
        { PCI_DEVICE(0x14c3, 0x7662) },
        { PCI_DEVICE(0x14c3, 0x7612) },
        { PCI_DEVICE(0x14c3, 0x7602) },
@@ -17,7 +17,7 @@ static const struct pci_device_id mt76pci_device_table[] = {
 };
 
 static int
-mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        static const struct mt76_driver_ops drv_ops = {
                .txwi_size = sizeof(struct mt76x02_txwi),
@@ -93,7 +93,7 @@ error:
 }
 
 static void
-mt76pci_remove(struct pci_dev *pdev)
+mt76x2e_remove(struct pci_dev *pdev)
 {
        struct mt76_dev *mdev = pci_get_drvdata(pdev);
        struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
@@ -103,16 +103,72 @@ mt76pci_remove(struct pci_dev *pdev)
        mt76_free_device(mdev);
 }
 
-MODULE_DEVICE_TABLE(pci, mt76pci_device_table);
+static int __maybe_unused
+mt76x2e_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       struct mt76_dev *mdev = pci_get_drvdata(pdev);
+       int i, err;
+
+       napi_disable(&mdev->tx_napi);
+       tasklet_kill(&mdev->pre_tbtt_tasklet);
+       tasklet_kill(&mdev->tx_tasklet);
+
+       mt76_for_each_q_rx(mdev, i)
+               napi_disable(&mdev->napi[i]);
+
+       pci_enable_wake(pdev, pci_choose_state(pdev, state), true);
+       pci_save_state(pdev);
+       err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
+       if (err)
+               goto restore;
+
+       return 0;
+
+restore:
+       mt76_for_each_q_rx(mdev, i)
+               napi_enable(&mdev->napi[i]);
+       napi_enable(&mdev->tx_napi);
+
+       return err;
+}
+
+static int __maybe_unused
+mt76x2e_resume(struct pci_dev *pdev)
+{
+       struct mt76_dev *mdev = pci_get_drvdata(pdev);
+       struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+       int i, err;
+
+       err = pci_set_power_state(pdev, PCI_D0);
+       if (err)
+               return err;
+
+       pci_restore_state(pdev);
+
+       mt76_for_each_q_rx(mdev, i) {
+               napi_enable(&mdev->napi[i]);
+               napi_schedule(&mdev->napi[i]);
+       }
+       napi_enable(&mdev->tx_napi);
+       napi_schedule(&mdev->tx_napi);
+
+       return mt76x2_resume_device(dev);
+}
+
+MODULE_DEVICE_TABLE(pci, mt76x2e_device_table);
 MODULE_FIRMWARE(MT7662_FIRMWARE);
 MODULE_FIRMWARE(MT7662_ROM_PATCH);
 MODULE_LICENSE("Dual BSD/GPL");
 
 static struct pci_driver mt76pci_driver = {
        .name           = KBUILD_MODNAME,
-       .id_table       = mt76pci_device_table,
-       .probe          = mt76pci_probe,
-       .remove         = mt76pci_remove,
+       .id_table       = mt76x2e_device_table,
+       .probe          = mt76x2e_probe,
+       .remove         = mt76x2e_remove,
+#ifdef CONFIG_PM
+       .suspend        = mt76x2e_suspend,
+       .resume         = mt76x2e_resume,
+#endif /* CONFIG_PM */
 };
 
 module_pci_driver(mt76pci_driver);
index f27774f..101a0fe 100644 (file)
@@ -217,6 +217,23 @@ mt76x2_power_on(struct mt76x02_dev *dev)
        mt76x2_power_on_rf(dev, 1);
 }
 
+int mt76x2_resume_device(struct mt76x02_dev *dev)
+{
+       int err;
+
+       mt76x02_dma_disable(dev);
+       mt76x2_reset_wlan(dev, true);
+       mt76x2_power_on(dev);
+
+       err = mt76x2_mac_reset(dev, true);
+       if (err)
+               return err;
+
+       mt76x02_mac_start(dev);
+
+       return mt76x2_mcu_init(dev);
+}
+
 static int mt76x2_init_hardware(struct mt76x02_dev *dev)
 {
        int ret;
index 3a4e417..4e003c7 100644 (file)
@@ -16,6 +16,7 @@ static const struct usb_device_id mt76x2u_device_table[] = {
        { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USBAC1200 - Alfa AWUS036ACM */
        { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */
        { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
+       { USB_DEVICE(0x0e8d, 0x7632) }, /* HC-M7662BU1 */
        { USB_DEVICE(0x2c4e, 0x0103) }, /* Mercury UD13 */
        { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
        { USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */
@@ -74,8 +75,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
        return 0;
 
 err:
-       ieee80211_free_hw(mt76_hw(dev));
-       mt76u_deinit(&dev->mt76);
+       mt76_free_device(&dev->mt76);
        usb_set_intfdata(intf, NULL);
        usb_put_dev(udev);
 
@@ -91,9 +91,7 @@ static void mt76x2u_disconnect(struct usb_interface *intf)
        set_bit(MT76_REMOVED, &dev->mphy.state);
        ieee80211_unregister_hw(hw);
        mt76x2u_cleanup(dev);
-       mt76u_deinit(&dev->mt76);
-
-       ieee80211_free_hw(hw);
+       mt76_free_device(&dev->mt76);
        usb_set_intfdata(intf, NULL);
        usb_put_dev(udev);
 }
index 5278bee..38f473d 100644 (file)
@@ -178,7 +178,14 @@ mt7915_txbf_stat_read_phy(struct mt7915_phy *phy, struct seq_file *s)
        seq_printf(s, "Tx Beamformee feedback triggered counts: %ld\n",
                   FIELD_GET(MT_ETBF_TX_FB_TRI, cnt));
 
-       /* Tx SU counters */
+       /* Tx SU & MU counters */
+       cnt = mt76_rr(dev, MT_MIB_SDR34(ext_phy));
+       seq_printf(s, "Tx multi-user Beamforming counts: %ld\n",
+                  FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt));
+       cnt = mt76_rr(dev, MT_MIB_DR8(ext_phy));
+       seq_printf(s, "Tx multi-user MPDU counts: %d\n", cnt);
+       cnt = mt76_rr(dev, MT_MIB_DR9(ext_phy));
+       seq_printf(s, "Tx multi-user successful MPDU counts: %d\n", cnt);
        cnt = mt76_rr(dev, MT_MIB_DR11(ext_phy));
        seq_printf(s, "Tx single-user successful MPDU counts: %d\n", cnt);
 
@@ -384,6 +391,7 @@ int mt7915_init_debugfs(struct mt7915_dev *dev)
        return 0;
 }
 
+#ifdef CONFIG_MAC80211_DEBUGFS
 /** per-station debugfs **/
 
 /* usage: <tx mode> <ldpc> <stbc> <bw> <gi> <nss> <mcs> */
@@ -461,3 +469,4 @@ void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        debugfs_create_file("fixed_rate", 0600, dir, sta, &fops_fixed_rate);
        debugfs_create_file("stats", 0400, dir, sta, &fops_sta_stats);
 }
+#endif
index 766185d..a8832c5 100644 (file)
@@ -79,26 +79,27 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
        }
 }
 
+static void
+mt7915_tx_cleanup(struct mt7915_dev *dev)
+{
+       mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false);
+       mt76_queue_tx_cleanup(dev, MT_TXQ_MCU_WA, false);
+       mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false);
+       mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false);
+}
+
 static int mt7915_poll_tx(struct napi_struct *napi, int budget)
 {
-       static const u8 queue_map[] = {
-               MT_TXQ_MCU,
-               MT_TXQ_MCU_WA,
-               MT_TXQ_BE
-       };
        struct mt7915_dev *dev;
-       int i;
 
        dev = container_of(napi, struct mt7915_dev, mt76.tx_napi);
 
-       for (i = 0; i < ARRAY_SIZE(queue_map); i++)
-               mt76_queue_tx_cleanup(dev, queue_map[i], false);
+       mt7915_tx_cleanup(dev);
 
        if (napi_complete_done(napi, 0))
                mt7915_irq_enable(dev, MT_INT_TX_DONE_ALL);
 
-       for (i = 0; i < ARRAY_SIZE(queue_map); i++)
-               mt76_queue_tx_cleanup(dev, queue_map[i], false);
+       mt7915_tx_cleanup(dev);
 
        mt7915_mac_sta_poll(dev);
 
index aadf56e..e90d008 100644 (file)
@@ -417,11 +417,6 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
 
                he_cap_elem->mac_cap_info[0] =
                        IEEE80211_HE_MAC_CAP0_HTC_HE;
-               he_cap_elem->mac_cap_info[1] =
-                       IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_0US |
-                       IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_1;
-               he_cap_elem->mac_cap_info[2] =
-                       IEEE80211_HE_MAC_CAP2_BSR;
                he_cap_elem->mac_cap_info[3] =
                        IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
                        IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED;
@@ -443,27 +438,27 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
                        IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
                        IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ;
 
-               /* TODO: OFDMA */
-
                switch (i) {
                case NL80211_IFTYPE_AP:
                        he_cap_elem->mac_cap_info[0] |=
                                IEEE80211_HE_MAC_CAP0_TWT_RES;
+                       he_cap_elem->mac_cap_info[2] |=
+                               IEEE80211_HE_MAC_CAP2_BSR;
                        he_cap_elem->mac_cap_info[4] |=
                                IEEE80211_HE_MAC_CAP4_BQR;
+                       he_cap_elem->mac_cap_info[5] |=
+                               IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX;
                        he_cap_elem->phy_cap_info[3] |=
                                IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
                                IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
                        he_cap_elem->phy_cap_info[6] |=
                                IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
-                       he_cap_elem->phy_cap_info[9] |=
-                               IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
                        break;
                case NL80211_IFTYPE_STATION:
                        he_cap_elem->mac_cap_info[0] |=
                                IEEE80211_HE_MAC_CAP0_TWT_REQ;
-                       he_cap_elem->mac_cap_info[3] |=
-                               IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED;
+                       he_cap_elem->mac_cap_info[1] |=
+                               IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
 
                        if (band == NL80211_BAND_2GHZ)
                                he_cap_elem->phy_cap_info[0] |=
@@ -473,18 +468,31 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
                                        IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G;
 
                        he_cap_elem->phy_cap_info[1] |=
-                               IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A;
+                               IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+                               IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
+                       he_cap_elem->phy_cap_info[3] |=
+                               IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
+                               IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
+                       he_cap_elem->phy_cap_info[6] |=
+                               IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
+                               IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
+                               IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
+                       he_cap_elem->phy_cap_info[7] |=
+                               IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
+                               IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
                        he_cap_elem->phy_cap_info[8] |=
                                IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
                                IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
-                               IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
+                               IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
+                               IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484;
                        he_cap_elem->phy_cap_info[9] |=
-                               IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
+                               IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
+                               IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
+                               IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
+                               IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
+                               IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
+                               IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
                        break;
-#ifdef CONFIG_MAC80211_MESH
-               case NL80211_IFTYPE_MESH_POINT:
-                       break;
-#endif
                }
 
                he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map);
index a264e30..6825afc 100644 (file)
@@ -178,14 +178,14 @@ void mt7915_mac_sta_poll(struct mt7915_dev *dev)
 
 static void
 mt7915_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
-                                struct mt7915_rxv *rxv,
-                                struct ieee80211_radiotap_he *he)
+                                struct ieee80211_radiotap_he *he,
+                                __le32 *rxv)
 {
        u32 ru_h, ru_l;
        u8 ru, offs = 0;
 
-       ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv->v[0]));
-       ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv->v[1]));
+       ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv[0]));
+       ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv[1]));
        ru = (u8)(ru_l | ru_h << 4);
 
        status->bw = RATE_INFO_BW_HE_RU;
@@ -228,7 +228,7 @@ mt7915_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
 static void
 mt7915_mac_decode_he_radiotap(struct sk_buff *skb,
                              struct mt76_rx_status *status,
-                             struct mt7915_rxv *rxv)
+                             __le32 *rxv, u32 phy)
 {
        /* TODO: struct ieee80211_radiotap_he_mu */
        static const struct ieee80211_radiotap_he known = {
@@ -245,48 +245,45 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb,
                         HE_BITS(DATA2_TXOP_KNOWN),
        };
        struct ieee80211_radiotap_he *he = NULL;
-       __le32 v2 = rxv->v[2];
-       __le32 v11 = rxv->v[11];
-       __le32 v14 = rxv->v[14];
-       u32 ltf_size = le32_get_bits(v2, MT_CRXV_HE_LTF_SIZE) + 1;
+       u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1;
 
        he = skb_push(skb, sizeof(known));
        memcpy(he, &known, sizeof(known));
 
-       he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, v14) |
-                   HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, v2);
-       he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, v2) |
+       he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) |
+                   HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]);
+       he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) |
                    le16_encode_bits(ltf_size,
                                     IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
-       he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, v14) |
-                   HE_PREP(DATA6_DOPPLER, DOPPLER, v14);
+       he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
+                   HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
 
-       switch (rxv->phy) {
+       switch (phy) {
        case MT_PHY_TYPE_HE_SU:
                he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
                             HE_BITS(DATA1_UL_DL_KNOWN) |
                             HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
                             HE_BITS(DATA1_SPTL_REUSE_KNOWN);
 
-               he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, v14) |
-                            HE_PREP(DATA3_UL_DL, UPLINK, v2);
-               he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, v11);
+               he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
+                            HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
+               he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
                break;
        case MT_PHY_TYPE_HE_EXT_SU:
                he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
                             HE_BITS(DATA1_UL_DL_KNOWN);
 
-               he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, v2);
+               he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
                break;
        case MT_PHY_TYPE_HE_MU:
                he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
                             HE_BITS(DATA1_UL_DL_KNOWN) |
                             HE_BITS(DATA1_SPTL_REUSE_KNOWN);
 
-               he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, v2);
-               he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, v11);
+               he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
+               he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
 
-               mt7915_mac_decode_he_radiotap_ru(status, rxv, he);
+               mt7915_mac_decode_he_radiotap_ru(status, he, rxv);
                break;
        case MT_PHY_TYPE_HE_TB:
                he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
@@ -295,12 +292,12 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb,
                             HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
                             HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
 
-               he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, v11) |
-                            HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, v11) |
-                            HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, v11) |
-                            HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, v11);
+               he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
+                            HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
+                            HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
+                            HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
 
-               mt7915_mac_decode_he_radiotap_ru(status, rxv, he);
+               mt7915_mac_decode_he_radiotap_ru(status, he, rxv);
                break;
        default:
                break;
@@ -314,8 +311,9 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
        struct mt7915_phy *phy = &dev->phy;
        struct ieee80211_supported_band *sband;
        struct ieee80211_hdr *hdr;
-       struct mt7915_rxv rxv = {};
        __le32 *rxd = (__le32 *)skb->data;
+       __le32 *rxv = NULL;
+       u32 mode = 0;
        u32 rxd1 = le32_to_cpu(rxd[1]);
        u32 rxd2 = le32_to_cpu(rxd[2]);
        u32 rxd3 = le32_to_cpu(rxd[3]);
@@ -427,15 +425,14 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
        if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
                u32 v0, v1, v2;
 
-               memcpy(rxv.v, rxd, sizeof(rxv.v));
-
+               rxv = rxd;
                rxd += 2;
                if ((u8 *)rxd - skb->data >= skb->len)
                        return -EINVAL;
 
-               v0 = le32_to_cpu(rxv.v[0]);
-               v1 = le32_to_cpu(rxv.v[1]);
-               v2 = le32_to_cpu(rxv.v[2]);
+               v0 = le32_to_cpu(rxv[0]);
+               v1 = le32_to_cpu(rxv[1]);
+               v2 = le32_to_cpu(rxv[2]);
 
                if (v0 & MT_PRXV_HT_AD_CODE)
                        status->enc_flags |= RX_ENC_FLAG_LDPC;
@@ -466,9 +463,9 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
                                return -EINVAL;
 
                        idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0);
-                       rxv.phy = FIELD_GET(MT_CRXV_TX_MODE, v2);
+                       mode = FIELD_GET(MT_CRXV_TX_MODE, v2);
 
-                       switch (rxv.phy) {
+                       switch (mode) {
                        case MT_PHY_TYPE_CCK:
                                cck = true;
                                /* fall through */
@@ -503,8 +500,7 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
                                if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
                                        status->he_gi = gi;
 
-                               if (idx & MT_PRXV_TX_DCM)
-                                       status->he_dcm = true;
+                               status->he_dcm = !!(idx & MT_PRXV_TX_DCM);
                                break;
                        default:
                                return -EINVAL;
@@ -515,7 +511,7 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
                        case IEEE80211_STA_RX_BW_20:
                                break;
                        case IEEE80211_STA_RX_BW_40:
-                               if (rxv.phy & MT_PHY_TYPE_HE_EXT_SU &&
+                               if (mode & MT_PHY_TYPE_HE_EXT_SU &&
                                    (idx & MT_PRXV_TX_ER_SU_106T)) {
                                        status->bw = RATE_INFO_BW_HE_RU;
                                        status->he_ru =
@@ -535,7 +531,7 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
                        }
 
                        status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
-                       if (rxv.phy < MT_PHY_TYPE_HE_SU && gi)
+                       if (mode < MT_PHY_TYPE_HE_SU && gi)
                                status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
                }
        }
@@ -548,8 +544,8 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
                mt76_insert_ccmp_hdr(skb, key_id);
        }
 
-       if (status->flag & RX_FLAG_RADIOTAP_HE)
-               mt7915_mac_decode_he_radiotap(skb, status, &rxv);
+       if (rxv && status->flag & RX_FLAG_RADIOTAP_HE)
+               mt7915_mac_decode_he_radiotap(skb, status, rxv, mode);
 
        hdr = mt76_skb_get_hdr(skb);
        if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
@@ -591,16 +587,16 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
        fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
        fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
 
-       if (ieee80211_is_data(fc) || ieee80211_is_bufferable_mmpdu(fc)) {
-               q_idx = wmm_idx * MT7915_MAX_WMM_SETS +
-                       skb_get_queue_mapping(skb);
-               p_fmt = MT_TX_TYPE_CT;
-       } else if (beacon) {
-               q_idx = MT_LMAC_BCN0;
+       if (beacon) {
                p_fmt = MT_TX_TYPE_FW;
-       } else {
+               q_idx = MT_LMAC_BCN0;
+       } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
+               p_fmt = MT_TX_TYPE_CT;
                q_idx = MT_LMAC_ALTX0;
+       } else {
                p_fmt = MT_TX_TYPE_CT;
+               q_idx = wmm_idx * MT7915_MAX_WMM_SETS +
+                       mt7915_lmac_mapping(dev, skb_get_queue_mapping(skb));
        }
 
        val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
@@ -616,6 +612,7 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
              FIELD_PREP(MT_TXD1_TID,
                         skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
              FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
+
        if (ext_phy && q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0)
                val |= MT_TXD1_TGID;
 
index b9bc8b2..c8bb5ea 100644 (file)
@@ -128,13 +128,6 @@ enum rx_pkt_type {
 #define MT_CRXV_HE_BEAM_CHNG           BIT(13)
 #define MT_CRXV_HE_DOPPLER             BIT(16)
 
-struct mt7915_rxv {
-       u32 phy;
-
-       /* P-RXV: bit 0~1, C-RXV: bit 2~19 */
-       __le32 v[20];
-};
-
 enum tx_header_format {
        MT_HDR_FORMAT_802_3,
        MT_HDR_FORMAT_CMD,
@@ -149,16 +142,6 @@ enum tx_pkt_type {
        MT_TX_TYPE_FW,
 };
 
-enum tx_pkt_queue_idx {
-       MT_LMAC_AC00,
-       MT_LMAC_AC01,
-       MT_LMAC_AC02,
-       MT_LMAC_AC03,
-       MT_LMAC_ALTX0 = 0x10,
-       MT_LMAC_BMC0 = 0x10,
-       MT_LMAC_BCN0 = 0x12,
-};
-
 enum tx_port_idx {
        MT_TX_PORT_IDX_LMAC,
        MT_TX_PORT_IDX_MCU
index 05b5650..f95a0b5 100644 (file)
@@ -125,7 +125,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
 
        mutex_lock(&dev->mt76.mutex);
 
-       mvif->idx = ffs(~phy->vif_mask) - 1;
+       mvif->idx = ffs(~phy->mt76->vif_mask) - 1;
        if (mvif->idx >= MT7915_MAX_INTERFACES) {
                ret = -ENOSPC;
                goto out;
@@ -150,7 +150,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
        if (ret)
                goto out;
 
-       phy->vif_mask |= BIT(mvif->idx);
+       phy->mt76->vif_mask |= BIT(mvif->idx);
        phy->omac_mask |= BIT(mvif->omac_idx);
 
        idx = MT7915_WTBL_RESERVED - mvif->idx;
@@ -194,7 +194,7 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw,
                mt76_txq_remove(&dev->mt76, vif->txq);
 
        mutex_lock(&dev->mt76.mutex);
-       phy->vif_mask &= ~BIT(mvif->idx);
+       phy->mt76->vif_mask &= ~BIT(mvif->idx);
        phy->omac_mask &= ~BIT(mvif->omac_idx);
        mutex_unlock(&dev->mt76.mutex);
 
@@ -350,13 +350,12 @@ static int
 mt7915_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
               const struct ieee80211_tx_queue_params *params)
 {
+       struct mt7915_dev *dev = mt7915_hw_dev(hw);
        struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
 
        /* no need to update right away, we'll get BSS_CHANGED_QOS */
-       mvif->wmm[queue].cw_min = params->cw_min;
-       mvif->wmm[queue].cw_max = params->cw_max;
-       mvif->wmm[queue].aifs = params->aifs;
-       mvif->wmm[queue].txop = params->txop;
+       queue = mt7915_lmac_mapping(dev, queue);
+       mvif->queue_params[queue] = *params;
 
        return 0;
 }
index c8c12c7..eaed5ef 100644 (file)
@@ -312,8 +312,10 @@ mt7915_mcu_parse_response(struct mt7915_dev *dev, int cmd,
        struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
        int ret = 0;
 
-       if (seq != rxd->seq)
-               return -EAGAIN;
+       if (seq != rxd->seq) {
+               ret = -EAGAIN;
+               goto out;
+       }
 
        switch (cmd) {
        case -MCU_CMD_PATCH_SEM_CONTROL:
@@ -330,6 +332,7 @@ mt7915_mcu_parse_response(struct mt7915_dev *dev, int cmd,
        default:
                break;
        }
+out:
        dev_kfree_skb(skb);
 
        return ret;
@@ -505,15 +508,22 @@ static void
 mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb)
 {
        struct mt7915_mcu_ra_info *ra = (struct mt7915_mcu_ra_info *)skb->data;
-       u16 wcidx = le16_to_cpu(ra->wlan_idx);
-       struct mt76_wcid *wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
-       struct mt7915_sta *msta = container_of(wcid, struct mt7915_sta, wcid);
-       struct mt7915_sta_stats *stats = &msta->stats;
-       struct mt76_phy *mphy = &dev->mphy;
        struct rate_info rate = {}, prob_rate = {};
+       u16 probe = le16_to_cpu(ra->prob_up_rate);
        u16 attempts = le16_to_cpu(ra->attempts);
        u16 curr = le16_to_cpu(ra->curr_rate);
-       u16 probe = le16_to_cpu(ra->prob_up_rate);
+       u16 wcidx = le16_to_cpu(ra->wlan_idx);
+       struct mt76_phy *mphy = &dev->mphy;
+       struct mt7915_sta_stats *stats;
+       struct mt7915_sta *msta;
+       struct mt76_wcid *wcid;
+
+       if (wcidx >= MT76_N_WCIDS)
+               return;
+
+       wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
+       msta = container_of(wcid, struct mt7915_sta, wcid);
+       stats = &msta->stats;
 
        if (msta->wcid.ext_phy && dev->mt76.phy2)
                mphy = dev->mt76.phy2;
@@ -1166,19 +1176,31 @@ mt7915_mcu_sta_ba(struct mt7915_dev *dev,
        struct wtbl_req_hdr *wtbl_hdr;
        struct tlv *sta_wtbl;
        struct sk_buff *skb;
+       int ret;
 
        skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
                                       MT7915_STA_UPDATE_MAX_SIZE);
        if (IS_ERR(skb))
                return PTR_ERR(skb);
 
-       mt7915_mcu_sta_ba_tlv(skb, params, enable, tx);
        sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
 
        wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl,
                                             &skb);
        mt7915_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr);
 
+       ret = __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+                                     MCU_EXT_CMD_STA_REC_UPDATE, true);
+       if (ret)
+               return ret;
+
+       skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
+                                      MT7915_STA_UPDATE_MAX_SIZE);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       mt7915_mcu_sta_ba_tlv(skb, params, enable, tx);
+
        return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
                                       MCU_EXT_CMD_STA_REC_UPDATE, true);
 }
@@ -1466,16 +1488,39 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
                HE_PHY(CAP2_UL_MU_PARTIAL_MU_MIMO, elem->phy_cap_info[2]);
 }
 
+static int
+mt7915_mcu_add_mu(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+                 struct ieee80211_sta *sta)
+{
+       struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+       struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+       struct sk_buff *skb;
+       int len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_muru);
+
+       if (!sta->vht_cap.vht_supported && !sta->he_cap.has_he)
+               return 0;
+
+       skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
+
+       /* starec muru */
+       mt7915_mcu_sta_muru_tlv(skb, sta);
+
+       return __mt76_mcu_skb_send_msg(&dev->mt76, skb,
+                                      MCU_EXT_CMD_STA_REC_UPDATE, true);
+}
+
 static void
 mt7915_mcu_sta_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
                   struct ieee80211_sta *sta)
 {
        struct tlv *tlv;
 
+       /* starec ht */
        if (sta->ht_cap.ht_supported) {
                struct sta_rec_ht *ht;
 
-               /* starec ht */
                tlv = mt7915_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
                ht = (struct sta_rec_ht *)tlv;
                ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
@@ -1495,10 +1540,6 @@ mt7915_mcu_sta_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
        /* starec he */
        if (sta->he_cap.has_he)
                mt7915_mcu_sta_he_tlv(skb, sta);
-
-       /* starec muru */
-       if (sta->he_cap.has_he || sta->vht_cap.vht_supported)
-               mt7915_mcu_sta_muru_tlv(skb, sta);
 }
 
 static void
@@ -2064,6 +2105,32 @@ int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
                                       MCU_EXT_CMD_STA_REC_UPDATE, true);
 }
 
+static int
+mt7915_mcu_add_group(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+                    struct ieee80211_sta *sta)
+{
+#define MT_STA_BSS_GROUP               1
+       struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+       struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+       struct {
+               __le32 action;
+               u8 wlan_idx_lo;
+               u8 status;
+               u8 wlan_idx_hi;
+               u8 rsv0[5];
+               __le32 val;
+               u8 rsv1[8];
+       } __packed req = {
+               .action = cpu_to_le32(MT_STA_BSS_GROUP),
+               .wlan_idx_lo = to_wcid_lo(msta->wcid.idx),
+               .wlan_idx_hi = to_wcid_hi(msta->wcid.idx),
+               .val = cpu_to_le32(mvif->idx),
+       };
+
+       return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_DRR_CTRL,
+                                  &req, sizeof(req), true);
+}
+
 int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif,
                           struct ieee80211_sta *sta, bool enable)
 {
@@ -2073,10 +2140,18 @@ int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif,
                return 0;
 
        /* must keep the order */
+       ret = mt7915_mcu_add_group(dev, vif, sta);
+       if (ret)
+               return ret;
+
        ret = mt7915_mcu_add_txbf(dev, vif, sta, enable);
        if (ret)
                return ret;
 
+       ret = mt7915_mcu_add_mu(dev, vif, sta);
+       if (ret)
+               return ret;
+
        if (enable)
                return mt7915_mcu_add_rate_ctrl(dev, vif, sta);
 
@@ -2823,23 +2898,23 @@ int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif)
        int ac;
 
        for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+               struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
                struct edca *e = &req.edca[ac];
 
                e->queue = ac + mvif->wmm_idx * MT7915_MAX_WMM_SETS;
-               e->aifs = mvif->wmm[ac].aifs;
-               e->txop = cpu_to_le16(mvif->wmm[ac].txop);
+               e->aifs = q->aifs;
+               e->txop = cpu_to_le16(q->txop);
 
-               if (mvif->wmm[ac].cw_min)
-                       e->cw_min = fls(mvif->wmm[ac].cw_max);
+               if (q->cw_min)
+                       e->cw_min = fls(q->cw_min);
                else
                        e->cw_min = 5;
 
-               if (mvif->wmm[ac].cw_max)
-                       e->cw_max = cpu_to_le16(fls(mvif->wmm[ac].cw_max));
+               if (q->cw_max)
+                       e->cw_max = cpu_to_le16(fls(q->cw_max));
                else
                        e->cw_max = cpu_to_le16(10);
        }
-
        return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EDCA_UPDATE,
                                  &req, sizeof(req), true);
 }
index c241dd7..cb35e71 100644 (file)
@@ -201,6 +201,7 @@ enum {
        MCU_EXT_CMD_EDCA_UPDATE = 0x27,
        MCU_EXT_CMD_DEV_INFO_UPDATE = 0x2A,
        MCU_EXT_CMD_THERMAL_CTRL = 0x2c,
+       MCU_EXT_CMD_SET_DRR_CTRL = 0x36,
        MCU_EXT_CMD_SET_RDD_CTRL = 0x3a,
        MCU_EXT_CMD_PROTECT_CTRL = 0x3e,
        MCU_EXT_CMD_MAC_INIT_CTRL = 0x46,
@@ -653,7 +654,7 @@ struct sta_rec_muru {
                bool ofdma_ul_en;
                bool mimo_dl_en;
                bool mimo_ul_en;
-               bool rsv[4];
+               u8 rsv[4];
        } cfg;
 
        struct {
@@ -664,7 +665,7 @@ struct sta_rec_muru {
                bool lt16_sigb;
                bool rx_su_comp_sigb;
                bool rx_su_non_comp_sigb;
-               bool rsv;
+               u8 rsv;
        } ofdma_dl;
 
        struct {
@@ -951,7 +952,6 @@ enum {
                                         sizeof(struct sta_rec_ba) +    \
                                         sizeof(struct sta_rec_vht) +   \
                                         sizeof(struct tlv) +           \
-                                        sizeof(struct sta_rec_muru) +  \
                                         MT7915_WTBL_UPDATE_MAX_SIZE)
 
 #define MT7915_WTBL_UPDATE_BA_SIZE     (sizeof(struct wtbl_req_hdr) +  \
index 85d74ec..d8a13b4 100644 (file)
@@ -99,15 +99,10 @@ struct mt7915_vif {
        u8 band_idx;
        u8 wmm_idx;
 
-       struct {
-               u16 cw_min;
-               u16 cw_max;
-               u16 txop;
-               u8 aifs;
-       } wmm[IEEE80211_NUM_ACS];
-
        struct mt7915_sta sta;
        struct mt7915_dev *dev;
+
+       struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
 };
 
 struct mib_stats {
@@ -125,7 +120,6 @@ struct mt7915_phy {
        struct ieee80211_sband_iftype_data iftype[2][NUM_NL80211_IFTYPES];
 
        u32 rxfilter;
-       u32 vif_mask;
        u32 omac_mask;
 
        u16 noise;
@@ -200,6 +194,16 @@ enum {
 };
 
 enum {
+       MT_LMAC_AC00,
+       MT_LMAC_AC01,
+       MT_LMAC_AC02,
+       MT_LMAC_AC03,
+       MT_LMAC_ALTX0 = 0x10,
+       MT_LMAC_BMC0,
+       MT_LMAC_BCN0,
+};
+
+enum {
        MT_RX_SEL0,
        MT_RX_SEL1,
 };
@@ -254,6 +258,21 @@ mt7915_ext_phy(struct mt7915_dev *dev)
        return phy->priv;
 }
 
+static inline u8 mt7915_lmac_mapping(struct mt7915_dev *dev, u8 ac)
+{
+       static const u8 lmac_queue_map[] = {
+               [IEEE80211_AC_BK] = MT_LMAC_AC00,
+               [IEEE80211_AC_BE] = MT_LMAC_AC01,
+               [IEEE80211_AC_VI] = MT_LMAC_AC02,
+               [IEEE80211_AC_VO] = MT_LMAC_AC03,
+       };
+
+       if (WARN_ON_ONCE(ac >= ARRAY_SIZE(lmac_queue_map)))
+               return MT_LMAC_AC01; /* BE */
+
+       return lmac_queue_map[ac];
+}
+
 static inline void
 mt7915_set_aggr_state(struct mt7915_sta *msta, u8 tid,
                      enum mt7915_ampdu_state state)
index 7937c69..0ec4e18 100644 (file)
@@ -103,7 +103,7 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
        static const struct mt76_driver_ops drv_ops = {
                /* txwi_size = txd size + txp size */
                .txwi_size = MT_TXD_SIZE + sizeof(struct mt7915_txp),
-               .drv_flags = MT_DRV_TXWI_NO_FREE,
+               .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
                .survey_flags = SURVEY_INFO_TIME_TX |
                                SURVEY_INFO_TIME_RX |
                                SURVEY_INFO_TIME_BSS_RX,
index c121715..e098914 100644 (file)
 #define MT_MIB_SDR16(_band)            MT_WF_MIB(_band, 0x048)
 #define MT_MIB_SDR16_BUSY_MASK         GENMASK(23, 0)
 
+#define MT_MIB_SDR34(_band)            MT_WF_MIB(_band, 0x090)
+#define MT_MIB_MU_BF_TX_CNT            GENMASK(15, 0)
+
 #define MT_MIB_SDR36(_band)            MT_WF_MIB(_band, 0x098)
 #define MT_MIB_SDR36_TXTIME_MASK       GENMASK(23, 0)
 #define MT_MIB_SDR37(_band)            MT_WF_MIB(_band, 0x09c)
 #define MT_MIB_SDR37_RXTIME_MASK       GENMASK(23, 0)
 
+#define MT_MIB_DR8(_band)              MT_WF_MIB(_band, 0x0c0)
+#define MT_MIB_DR9(_band)              MT_WF_MIB(_band, 0x0c4)
 #define MT_MIB_DR11(_band)             MT_WF_MIB(_band, 0x0cc)
 
 #define MT_MIB_MB_SDR0(_band, n)       MT_WF_MIB(_band, 0x100 + ((n) << 4))
index 04c5a69..4c1c159 100644 (file)
@@ -3,6 +3,7 @@
  * Copyright (C) 2019 Lorenzo Bianconi <lorenzo@kernel.org>
  */
 
+#include "mt76.h"
 #include <linux/pci.h>
 
 void mt76_pci_disable_aspm(struct pci_dev *pdev)
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
new file mode 100644 (file)
index 0000000..d2b38ed
--- /dev/null
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc.
+ *
+ * This file is written based on mt76/usb.c.
+ *
+ * Author: Felix Fietkau <nbd@nbd.name>
+ *        Lorenzo Bianconi <lorenzo@kernel.org>
+ *        Sean Wang <sean.wang@mediatek.com>
+ */
+
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+
+#include "mt76.h"
+
+static int
+mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
+{
+       struct mt76_queue *q = &dev->q_rx[qid];
+
+       spin_lock_init(&q->lock);
+       q->entry = devm_kcalloc(dev->dev,
+                               MT_NUM_RX_ENTRIES, sizeof(*q->entry),
+                               GFP_KERNEL);
+       if (!q->entry)
+               return -ENOMEM;
+
+       q->ndesc = MT_NUM_RX_ENTRIES;
+       q->head = q->tail = 0;
+       q->queued = 0;
+
+       return 0;
+}
+
+static int mt76s_alloc_tx(struct mt76_dev *dev)
+{
+       struct mt76_queue *q;
+       int i;
+
+       for (i = 0; i < MT_TXQ_MCU_WA; i++) {
+               INIT_LIST_HEAD(&dev->q_tx[i].swq);
+
+               q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
+               if (!q)
+                       return -ENOMEM;
+
+               spin_lock_init(&q->lock);
+               q->hw_idx = i;
+               dev->q_tx[i].q = q;
+
+               q->entry = devm_kcalloc(dev->dev,
+                                       MT_NUM_TX_ENTRIES, sizeof(*q->entry),
+                                       GFP_KERNEL);
+               if (!q->entry)
+                       return -ENOMEM;
+
+               q->ndesc = MT_NUM_TX_ENTRIES;
+       }
+
+       return 0;
+}
+
+void mt76s_stop_txrx(struct mt76_dev *dev)
+{
+       struct mt76_sdio *sdio = &dev->sdio;
+
+       cancel_work_sync(&sdio->stat_work);
+       clear_bit(MT76_READING_STATS, &dev->phy.state);
+
+       mt76_tx_status_check(dev, NULL, true);
+}
+EXPORT_SYMBOL_GPL(mt76s_stop_txrx);
+
+int mt76s_alloc_queues(struct mt76_dev *dev)
+{
+       int err;
+
+       err = mt76s_alloc_rx_queue(dev, MT_RXQ_MAIN);
+       if (err < 0)
+               return err;
+
+       return mt76s_alloc_tx(dev);
+}
+EXPORT_SYMBOL_GPL(mt76s_alloc_queues);
+
+static struct mt76_queue_entry *
+mt76s_get_next_rx_entry(struct mt76_queue *q)
+{
+       struct mt76_queue_entry *e = NULL;
+
+       spin_lock_bh(&q->lock);
+       if (q->queued > 0) {
+               e = &q->entry[q->head];
+               q->head = (q->head + 1) % q->ndesc;
+               q->queued--;
+       }
+       spin_unlock_bh(&q->lock);
+
+       return e;
+}
+
+static int
+mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
+{
+       int qid = q - &dev->q_rx[MT_RXQ_MAIN];
+       int nframes = 0;
+
+       while (true) {
+               struct mt76_queue_entry *e;
+
+               if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
+                       break;
+
+               e = mt76s_get_next_rx_entry(q);
+               if (!e || !e->skb)
+                       break;
+
+               dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb);
+               e->skb = NULL;
+               nframes++;
+       }
+       if (qid == MT_RXQ_MAIN)
+               mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
+
+       return nframes;
+}
+
+static int mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
+{
+       struct mt76_sw_queue *sq = &dev->q_tx[qid];
+       u32 n_dequeued = 0, n_sw_dequeued = 0;
+       struct mt76_queue_entry entry;
+       struct mt76_queue *q = sq->q;
+       bool wake;
+
+       while (q->queued > n_dequeued) {
+               if (!q->entry[q->head].done)
+                       break;
+
+               if (q->entry[q->head].schedule) {
+                       q->entry[q->head].schedule = false;
+                       n_sw_dequeued++;
+               }
+
+               entry = q->entry[q->head];
+               q->entry[q->head].done = false;
+               q->head = (q->head + 1) % q->ndesc;
+               n_dequeued++;
+
+               if (qid == MT_TXQ_MCU)
+                       dev_kfree_skb(entry.skb);
+               else
+                       dev->drv->tx_complete_skb(dev, qid, &entry);
+       }
+
+       spin_lock_bh(&q->lock);
+
+       sq->swq_queued -= n_sw_dequeued;
+       q->queued -= n_dequeued;
+
+       wake = q->stopped && q->queued < q->ndesc - 8;
+       if (wake)
+               q->stopped = false;
+
+       if (!q->queued)
+               wake_up(&dev->tx_wait);
+
+       spin_unlock_bh(&q->lock);
+
+       if (qid == MT_TXQ_MCU)
+               goto out;
+
+       mt76_txq_schedule(&dev->phy, qid);
+
+       if (wake)
+               ieee80211_wake_queue(dev->hw, qid);
+
+       wake_up_process(dev->sdio.tx_kthread);
+out:
+       return n_dequeued;
+}
+
+static void mt76s_tx_status_data(struct work_struct *work)
+{
+       struct mt76_sdio *sdio;
+       struct mt76_dev *dev;
+       u8 update = 1;
+       u16 count = 0;
+
+       sdio = container_of(work, struct mt76_sdio, stat_work);
+       dev = container_of(sdio, struct mt76_dev, sdio);
+
+       while (true) {
+               if (test_bit(MT76_REMOVED, &dev->phy.state))
+                       break;
+
+               if (!dev->drv->tx_status_data(dev, &update))
+                       break;
+               count++;
+       }
+
+       if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
+               queue_work(dev->wq, &sdio->stat_work);
+       else
+               clear_bit(MT76_READING_STATS, &dev->phy.state);
+}
+
+static int
+mt76s_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
+                  struct sk_buff *skb, struct mt76_wcid *wcid,
+                  struct ieee80211_sta *sta)
+{
+       struct mt76_queue *q = dev->q_tx[qid].q;
+       struct mt76_tx_info tx_info = {
+               .skb = skb,
+       };
+       int err, len = skb->len;
+       u16 idx = q->tail;
+
+       if (q->queued == q->ndesc)
+               return -ENOSPC;
+
+       skb->prev = skb->next = NULL;
+       err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
+       if (err < 0)
+               return err;
+
+       q->entry[q->tail].skb = tx_info.skb;
+       q->entry[q->tail].buf_sz = len;
+       q->tail = (q->tail + 1) % q->ndesc;
+       q->queued++;
+
+       return idx;
+}
+
+static int
+mt76s_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
+                      struct sk_buff *skb, u32 tx_info)
+{
+       struct mt76_queue *q = dev->q_tx[qid].q;
+       int ret = -ENOSPC, len = skb->len;
+
+       spin_lock_bh(&q->lock);
+       if (q->queued == q->ndesc)
+               goto out;
+
+       ret = mt76_skb_adjust_pad(skb);
+       if (ret)
+               goto out;
+
+       q->entry[q->tail].buf_sz = len;
+       q->entry[q->tail].skb = skb;
+       q->tail = (q->tail + 1) % q->ndesc;
+       q->queued++;
+
+out:
+       spin_unlock_bh(&q->lock);
+
+       return ret;
+}
+
+static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
+{
+       struct mt76_sdio *sdio = &dev->sdio;
+
+       wake_up_process(sdio->tx_kthread);
+}
+
+static const struct mt76_queue_ops sdio_queue_ops = {
+       .tx_queue_skb = mt76s_tx_queue_skb,
+       .kick = mt76s_tx_kick,
+       .tx_queue_skb_raw = mt76s_tx_queue_skb_raw,
+};
+
+static int mt76s_kthread_run(void *data)
+{
+       struct mt76_dev *dev = data;
+       struct mt76_phy *mphy = &dev->phy;
+
+       while (!kthread_should_stop()) {
+               int i, nframes = 0;
+
+               cond_resched();
+
+               /* rx processing */
+               local_bh_disable();
+               rcu_read_lock();
+
+               mt76_for_each_q_rx(dev, i)
+                       nframes += mt76s_process_rx_queue(dev, &dev->q_rx[i]);
+
+               rcu_read_unlock();
+               local_bh_enable();
+
+               /* tx processing */
+               for (i = 0; i < MT_TXQ_MCU_WA; i++)
+                       nframes += mt76s_process_tx_queue(dev, i);
+
+               if (dev->drv->tx_status_data &&
+                   !test_and_set_bit(MT76_READING_STATS, &mphy->state))
+                       queue_work(dev->wq, &dev->sdio.stat_work);
+
+               if (!nframes || !test_bit(MT76_STATE_RUNNING, &mphy->state)) {
+                       set_current_state(TASK_INTERRUPTIBLE);
+                       schedule();
+               }
+       }
+
+       return 0;
+}
+
+void mt76s_deinit(struct mt76_dev *dev)
+{
+       struct mt76_sdio *sdio = &dev->sdio;
+       int i;
+
+       kthread_stop(sdio->kthread);
+       kthread_stop(sdio->tx_kthread);
+       mt76s_stop_txrx(dev);
+
+       sdio_claim_host(sdio->func);
+       sdio_release_irq(sdio->func);
+       sdio_release_host(sdio->func);
+
+       mt76_for_each_q_rx(dev, i) {
+               struct mt76_queue *q = &dev->q_rx[i];
+               int j;
+
+               for (j = 0; j < q->ndesc; j++) {
+                       struct mt76_queue_entry *e = &q->entry[j];
+
+                       if (!e->skb)
+                               continue;
+
+                       dev_kfree_skb(e->skb);
+                       e->skb = NULL;
+               }
+       }
+}
+EXPORT_SYMBOL_GPL(mt76s_deinit);
+
+int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
+              const struct mt76_bus_ops *bus_ops)
+{
+       struct mt76_sdio *sdio = &dev->sdio;
+
+       sdio->kthread = kthread_create(mt76s_kthread_run, dev, "mt76s");
+       if (IS_ERR(sdio->kthread))
+               return PTR_ERR(sdio->kthread);
+
+       INIT_WORK(&sdio->stat_work, mt76s_tx_status_data);
+
+       mutex_init(&sdio->sched.lock);
+       dev->queue_ops = &sdio_queue_ops;
+       dev->bus = bus_ops;
+       dev->sdio.func = func;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mt76s_init);
+
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c
new file mode 100644 (file)
index 0000000..75bb02c
--- /dev/null
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+#include "mt76.h"
+
+static const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = {
+       [MT76_TM_ATTR_RESET] = { .type = NLA_FLAG },
+       [MT76_TM_ATTR_STATE] = { .type = NLA_U8 },
+       [MT76_TM_ATTR_TX_COUNT] = { .type = NLA_U32 },
+       [MT76_TM_ATTR_TX_RATE_MODE] = { .type = NLA_U8 },
+       [MT76_TM_ATTR_TX_RATE_NSS] = { .type = NLA_U8 },
+       [MT76_TM_ATTR_TX_RATE_IDX] = { .type = NLA_U8 },
+       [MT76_TM_ATTR_TX_RATE_SGI] = { .type = NLA_U8 },
+       [MT76_TM_ATTR_TX_RATE_LDPC] = { .type = NLA_U8 },
+       [MT76_TM_ATTR_TX_ANTENNA] = { .type = NLA_U8 },
+       [MT76_TM_ATTR_TX_POWER_CONTROL] = { .type = NLA_U8 },
+       [MT76_TM_ATTR_TX_POWER] = { .type = NLA_NESTED },
+       [MT76_TM_ATTR_FREQ_OFFSET] = { .type = NLA_U32 },
+};
+
+void mt76_testmode_tx_pending(struct mt76_dev *dev)
+{
+       struct mt76_testmode_data *td = &dev->test;
+       struct mt76_wcid *wcid = &dev->global_wcid;
+       struct sk_buff *skb = td->tx_skb;
+       struct mt76_queue *q;
+       int qid;
+
+       if (!skb || !td->tx_pending)
+               return;
+
+       qid = skb_get_queue_mapping(skb);
+       q = dev->q_tx[qid].q;
+
+       spin_lock_bh(&q->lock);
+
+       while (td->tx_pending > 0 && q->queued < q->ndesc / 2) {
+               int ret;
+
+               ret = dev->queue_ops->tx_queue_skb(dev, qid, skb_get(skb), wcid, NULL);
+               if (ret < 0)
+                       break;
+
+               td->tx_pending--;
+               td->tx_queued++;
+       }
+
+       dev->queue_ops->kick(dev, q);
+
+       spin_unlock_bh(&q->lock);
+}
+
+
+static int
+mt76_testmode_tx_init(struct mt76_dev *dev)
+{
+       struct mt76_testmode_data *td = &dev->test;
+       struct ieee80211_tx_info *info;
+       struct ieee80211_hdr *hdr;
+       struct sk_buff *skb;
+       u16 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
+                IEEE80211_FCTL_FROMDS;
+       struct ieee80211_tx_rate *rate;
+       u8 max_nss = hweight8(dev->phy.antenna_mask);
+
+       if (td->tx_antenna_mask)
+               max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask));
+
+       skb = alloc_skb(td->tx_msdu_len, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+
+       dev_kfree_skb(td->tx_skb);
+       td->tx_skb = skb;
+       hdr = __skb_put_zero(skb, td->tx_msdu_len);
+       hdr->frame_control = cpu_to_le16(fc);
+       memcpy(hdr->addr1, dev->macaddr, sizeof(dev->macaddr));
+       memcpy(hdr->addr2, dev->macaddr, sizeof(dev->macaddr));
+       memcpy(hdr->addr3, dev->macaddr, sizeof(dev->macaddr));
+
+       info = IEEE80211_SKB_CB(skb);
+       info->flags = IEEE80211_TX_CTL_INJECTED |
+                     IEEE80211_TX_CTL_NO_ACK |
+                     IEEE80211_TX_CTL_NO_PS_BUFFER;
+       rate = &info->control.rates[0];
+       rate->count = 1;
+       rate->idx = td->tx_rate_idx;
+
+       switch (td->tx_rate_mode) {
+       case MT76_TM_TX_MODE_CCK:
+               if (dev->phy.chandef.chan->band != NL80211_BAND_2GHZ)
+                       return -EINVAL;
+
+               if (rate->idx > 4)
+                       return -EINVAL;
+               break;
+       case MT76_TM_TX_MODE_OFDM:
+               if (dev->phy.chandef.chan->band != NL80211_BAND_2GHZ)
+                       break;
+
+               if (rate->idx > 8)
+                       return -EINVAL;
+
+               rate->idx += 4;
+               break;
+       case MT76_TM_TX_MODE_HT:
+               if (rate->idx > 8 * max_nss &&
+                       !(rate->idx == 32 &&
+                         dev->phy.chandef.width >= NL80211_CHAN_WIDTH_40))
+                       return -EINVAL;
+
+               rate->flags |= IEEE80211_TX_RC_MCS;
+               break;
+       case MT76_TM_TX_MODE_VHT:
+               if (rate->idx > 9)
+                       return -EINVAL;
+
+               if (td->tx_rate_nss > max_nss)
+                       return -EINVAL;
+
+               ieee80211_rate_set_vht(rate, td->tx_rate_idx, td->tx_rate_nss);
+               rate->flags |= IEEE80211_TX_RC_VHT_MCS;
+               break;
+       default:
+               break;
+       }
+
+       if (td->tx_rate_sgi)
+               rate->flags |= IEEE80211_TX_RC_SHORT_GI;
+
+       if (td->tx_rate_ldpc)
+               info->flags |= IEEE80211_TX_CTL_LDPC;
+
+       if (td->tx_rate_mode >= MT76_TM_TX_MODE_HT) {
+               switch (dev->phy.chandef.width) {
+               case NL80211_CHAN_WIDTH_40:
+                       rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+                       break;
+               case NL80211_CHAN_WIDTH_80:
+                       rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+                       break;
+               case NL80211_CHAN_WIDTH_80P80:
+               case NL80211_CHAN_WIDTH_160:
+                       rate->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       skb_set_queue_mapping(skb, IEEE80211_AC_BE);
+
+       return 0;
+}
+
+static void
+mt76_testmode_tx_start(struct mt76_dev *dev)
+{
+       struct mt76_testmode_data *td = &dev->test;
+
+       td->tx_queued = 0;
+       td->tx_done = 0;
+       td->tx_pending = td->tx_count;
+       tasklet_schedule(&dev->tx_tasklet);
+}
+
+static void
+mt76_testmode_tx_stop(struct mt76_dev *dev)
+{
+       struct mt76_testmode_data *td = &dev->test;
+
+       tasklet_disable(&dev->tx_tasklet);
+
+       td->tx_pending = 0;
+
+       tasklet_enable(&dev->tx_tasklet);
+
+       wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued, 10 * HZ);
+
+       dev_kfree_skb(td->tx_skb);
+       td->tx_skb = NULL;
+}
+
+static inline void
+mt76_testmode_param_set(struct mt76_testmode_data *td, u16 idx)
+{
+       td->param_set[idx / 32] |= BIT(idx % 32);
+}
+
+static inline bool
+mt76_testmode_param_present(struct mt76_testmode_data *td, u16 idx)
+{
+       return td->param_set[idx / 32] & BIT(idx % 32);
+}
+
+static void
+mt76_testmode_init_defaults(struct mt76_dev *dev)
+{
+       struct mt76_testmode_data *td = &dev->test;
+
+       if (td->tx_msdu_len > 0)
+               return;
+
+       td->tx_msdu_len = 1024;
+       td->tx_count = 1;
+       td->tx_rate_mode = MT76_TM_TX_MODE_OFDM;
+       td->tx_rate_nss = 1;
+}
+
+static int
+__mt76_testmode_set_state(struct mt76_dev *dev, enum mt76_testmode_state state)
+{
+       enum mt76_testmode_state prev_state = dev->test.state;
+       int err;
+
+       if (prev_state == MT76_TM_STATE_TX_FRAMES)
+               mt76_testmode_tx_stop(dev);
+
+       if (state == MT76_TM_STATE_TX_FRAMES) {
+               err = mt76_testmode_tx_init(dev);
+               if (err)
+                       return err;
+       }
+
+       err = dev->test_ops->set_state(dev, state);
+       if (err) {
+               if (state == MT76_TM_STATE_TX_FRAMES)
+                       mt76_testmode_tx_stop(dev);
+
+               return err;
+       }
+
+       if (state == MT76_TM_STATE_TX_FRAMES)
+               mt76_testmode_tx_start(dev);
+       else if (state == MT76_TM_STATE_RX_FRAMES) {
+               memset(&dev->test.rx_stats, 0, sizeof(dev->test.rx_stats));
+       }
+
+       dev->test.state = state;
+
+       return 0;
+}
+
+int mt76_testmode_set_state(struct mt76_dev *dev, enum mt76_testmode_state state)
+{
+       struct mt76_testmode_data *td = &dev->test;
+       struct ieee80211_hw *hw = dev->phy.hw;
+
+       if (state == td->state && state == MT76_TM_STATE_OFF)
+               return 0;
+
+       if (state > MT76_TM_STATE_OFF &&
+           (!test_bit(MT76_STATE_RUNNING, &dev->phy.state) ||
+            !(hw->conf.flags & IEEE80211_CONF_MONITOR)))
+               return -ENOTCONN;
+
+       if (state != MT76_TM_STATE_IDLE &&
+           td->state != MT76_TM_STATE_IDLE) {
+               int ret;
+
+               ret = __mt76_testmode_set_state(dev, MT76_TM_STATE_IDLE);
+               if (ret)
+                       return ret;
+       }
+
+       return __mt76_testmode_set_state(dev, state);
+
+}
+EXPORT_SYMBOL(mt76_testmode_set_state);
+
+static int
+mt76_tm_get_u8(struct nlattr *attr, u8 *dest, u8 min, u8 max)
+{
+       u8 val;
+
+       if (!attr)
+               return 0;
+
+       val = nla_get_u8(attr);
+       if (val < min || val > max)
+               return -EINVAL;
+
+       *dest = val;
+       return 0;
+}
+
+int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                     void *data, int len)
+{
+       struct mt76_phy *phy = hw->priv;
+       struct mt76_dev *dev = phy->dev;
+       struct mt76_testmode_data *td = &dev->test;
+       struct nlattr *tb[NUM_MT76_TM_ATTRS];
+       u32 state;
+       int err;
+       int i;
+
+       if (!dev->test_ops)
+               return -EOPNOTSUPP;
+
+       err = nla_parse_deprecated(tb, MT76_TM_ATTR_MAX, data, len,
+                                  mt76_tm_policy, NULL);
+       if (err)
+               return err;
+
+       err = -EINVAL;
+
+       mutex_lock(&dev->mutex);
+
+       if (tb[MT76_TM_ATTR_RESET]) {
+               mt76_testmode_set_state(dev, MT76_TM_STATE_OFF);
+               memset(td, 0, sizeof(*td));
+       }
+
+       mt76_testmode_init_defaults(dev);
+
+       if (tb[MT76_TM_ATTR_TX_COUNT])
+               td->tx_count = nla_get_u32(tb[MT76_TM_ATTR_TX_COUNT]);
+
+       if (tb[MT76_TM_ATTR_TX_LENGTH]) {
+               u32 val = nla_get_u32(tb[MT76_TM_ATTR_TX_LENGTH]);
+
+               if (val > IEEE80211_MAX_FRAME_LEN ||
+                   val < sizeof(struct ieee80211_hdr))
+                       goto out;
+
+               td->tx_msdu_len = val;
+       }
+
+       if (tb[MT76_TM_ATTR_TX_RATE_IDX])
+               td->tx_rate_idx = nla_get_u8(tb[MT76_TM_ATTR_TX_RATE_IDX]);
+
+       if (mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_MODE], &td->tx_rate_mode,
+                          0, MT76_TM_TX_MODE_MAX) ||
+           mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_NSS], &td->tx_rate_nss,
+                          1, hweight8(phy->antenna_mask)) ||
+           mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_SGI], &td->tx_rate_sgi, 0, 1) ||
+           mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_LDPC], &td->tx_rate_ldpc, 0, 1) ||
+           mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_ANTENNA], &td->tx_antenna_mask, 1,
+                          phy->antenna_mask) ||
+           mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_POWER_CONTROL],
+                          &td->tx_power_control, 0, 1))
+               goto out;
+
+       if (tb[MT76_TM_ATTR_FREQ_OFFSET])
+               td->freq_offset = nla_get_u32(tb[MT76_TM_ATTR_FREQ_OFFSET]);
+
+       if (tb[MT76_TM_ATTR_STATE]) {
+               state = nla_get_u32(tb[MT76_TM_ATTR_STATE]);
+               if (state > MT76_TM_STATE_MAX)
+                       goto out;
+       } else {
+               state = td->state;
+       }
+
+       if (tb[MT76_TM_ATTR_TX_POWER]) {
+               struct nlattr *cur;
+               int idx = 0;
+               int rem;
+
+               nla_for_each_nested(cur, tb[MT76_TM_ATTR_TX_POWER], rem) {
+                       if (nla_len(cur) != 1 ||
+                           idx >= ARRAY_SIZE(td->tx_power))
+                               goto out;
+
+                       td->tx_power[idx++] = nla_get_u8(cur);
+               }
+       }
+
+       if (dev->test_ops->set_params) {
+               err = dev->test_ops->set_params(dev, tb, state);
+               if (err)
+                       goto out;
+       }
+
+       for (i = MT76_TM_ATTR_STATE; i < ARRAY_SIZE(tb); i++)
+               if (tb[i])
+                       mt76_testmode_param_set(td, i);
+
+       err = 0;
+       if (tb[MT76_TM_ATTR_STATE])
+               err = mt76_testmode_set_state(dev, state);
+
+out:
+       mutex_unlock(&dev->mutex);
+
+       return err;
+}
+EXPORT_SYMBOL(mt76_testmode_cmd);
+
+static int
+mt76_testmode_dump_stats(struct mt76_dev *dev, struct sk_buff *msg)
+{
+       struct mt76_testmode_data *td = &dev->test;
+       u64 rx_packets = 0;
+       u64 rx_fcs_error = 0;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(td->rx_stats.packets); i++) {
+               rx_packets += td->rx_stats.packets[i];
+               rx_fcs_error += td->rx_stats.fcs_error[i];
+       }
+
+       if (nla_put_u32(msg, MT76_TM_STATS_ATTR_TX_PENDING, td->tx_pending) ||
+           nla_put_u32(msg, MT76_TM_STATS_ATTR_TX_QUEUED, td->tx_queued) ||
+           nla_put_u32(msg, MT76_TM_STATS_ATTR_TX_DONE, td->tx_done) ||
+           nla_put_u64_64bit(msg, MT76_TM_STATS_ATTR_RX_PACKETS, rx_packets,
+                             MT76_TM_STATS_ATTR_PAD) ||
+           nla_put_u64_64bit(msg, MT76_TM_STATS_ATTR_RX_FCS_ERROR, rx_fcs_error,
+                             MT76_TM_STATS_ATTR_PAD))
+               return -EMSGSIZE;
+
+       if (dev->test_ops->dump_stats)
+               return dev->test_ops->dump_stats(dev, msg);
+
+       return 0;
+}
+
+int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
+                      struct netlink_callback *cb, void *data, int len)
+{
+       struct mt76_phy *phy = hw->priv;
+       struct mt76_dev *dev = phy->dev;
+       struct mt76_testmode_data *td = &dev->test;
+       struct nlattr *tb[NUM_MT76_TM_ATTRS] = {};
+       int err = 0;
+       void *a;
+       int i;
+
+       if (!dev->test_ops)
+               return -EOPNOTSUPP;
+
+       if (cb->args[2]++ > 0)
+               return -ENOENT;
+
+       if (data) {
+               err = nla_parse_deprecated(tb, MT76_TM_ATTR_MAX, data, len,
+                                          mt76_tm_policy, NULL);
+               if (err)
+                       return err;
+       }
+
+       mutex_lock(&dev->mutex);
+
+       if (tb[MT76_TM_ATTR_STATS]) {
+               a = nla_nest_start(msg, MT76_TM_ATTR_STATS);
+               err = mt76_testmode_dump_stats(dev, msg);
+               nla_nest_end(msg, a);
+
+               goto out;
+       }
+
+       mt76_testmode_init_defaults(dev);
+
+       err = -EMSGSIZE;
+       if (nla_put_u32(msg, MT76_TM_ATTR_STATE, td->state))
+               goto out;
+
+       if (td->mtd_name &&
+           (nla_put_string(msg, MT76_TM_ATTR_MTD_PART, td->mtd_name) ||
+            nla_put_u32(msg, MT76_TM_ATTR_MTD_OFFSET, td->mtd_offset)))
+               goto out;
+
+       if (nla_put_u32(msg, MT76_TM_ATTR_TX_COUNT, td->tx_count) ||
+           nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, td->tx_msdu_len) ||
+           nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_MODE, td->tx_rate_mode) ||
+           nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, td->tx_rate_nss) ||
+           nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, td->tx_rate_idx) ||
+           nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_SGI, td->tx_rate_sgi) ||
+           nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_LDPC, td->tx_rate_ldpc) ||
+           (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_ANTENNA) &&
+            nla_put_u8(msg, MT76_TM_ATTR_TX_ANTENNA, td->tx_antenna_mask)) ||
+           (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_POWER_CONTROL) &&
+            nla_put_u8(msg, MT76_TM_ATTR_TX_POWER_CONTROL, td->tx_power_control)) ||
+           (mt76_testmode_param_present(td, MT76_TM_ATTR_FREQ_OFFSET) &&
+            nla_put_u8(msg, MT76_TM_ATTR_FREQ_OFFSET, td->freq_offset)))
+               goto out;
+
+       if (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_POWER)) {
+               a = nla_nest_start(msg, MT76_TM_ATTR_TX_POWER);
+               if (!a)
+                       goto out;
+
+               for (i = 0; i < ARRAY_SIZE(td->tx_power); i++)
+                       if (nla_put_u8(msg, i, td->tx_power[i]))
+                               goto out;
+
+               nla_nest_end(msg, a);
+       }
+
+       err = 0;
+
+out:
+       mutex_unlock(&dev->mutex);
+
+       return err;
+}
+EXPORT_SYMBOL(mt76_testmode_dump);
diff --git a/drivers/net/wireless/mediatek/mt76/testmode.h b/drivers/net/wireless/mediatek/mt76/testmode.h
new file mode 100644 (file)
index 0000000..691fe57
--- /dev/null
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (C) 2020 Felix Fietkau <nbd@nbd.name>
+ */
+#ifndef __MT76_TESTMODE_H
+#define __MT76_TESTMODE_H
+
+/**
+ * enum mt76_testmode_attr - testmode attributes inside NL80211_ATTR_TESTDATA
+ *
+ * @MT76_TM_ATTR_UNSPEC: (invalid attribute)
+ *
+ * @MT76_TM_ATTR_RESET: reset parameters to default (flag)
+ * @MT76_TM_ATTR_STATE: test state (u32), see &enum mt76_testmode_state
+ *
+ * @MT76_TM_ATTR_MTD_PART: mtd partition used for eeprom data (string)
+ * @MT76_TM_ATTR_MTD_OFFSET: offset of eeprom data within the partition (u32)
+ *
+ * @MT76_TM_ATTR_TX_COUNT: configured number of frames to send when setting
+ *     state to MT76_TM_STATE_TX_FRAMES (u32)
+ * @MT76_TM_ATTR_TX_PENDING: pending frames during MT76_TM_STATE_TX_FRAMES (u32)
+ * @MT76_TM_ATTR_TX_LENGTH: packet tx msdu length (u32)
+ * @MT76_TM_ATTR_TX_RATE_MODE: packet tx mode (u8, see &enum mt76_testmode_tx_mode)
+ * @MT76_TM_ATTR_TX_RATE_NSS: packet tx number of spatial streams (u8)
+ * @MT76_TM_ATTR_TX_RATE_IDX: packet tx rate/MCS index (u8)
+ * @MT76_TM_ATTR_TX_RATE_SGI: packet tx use short guard interval (u8)
+ * @MT76_TM_ATTR_TX_RATE_LDPC: packet tx enable LDPC (u8)
+ *
+ * @MT76_TM_ATTR_TX_ANTENNA: tx antenna mask (u8)
+ * @MT76_TM_ATTR_TX_POWER_CONTROL: enable tx power control (u8)
+ * @MT76_TM_ATTR_TX_POWER: per-antenna tx power array (nested, u8 attrs)
+ *
+ * @MT76_TM_ATTR_FREQ_OFFSET: RF frequency offset (u32)
+ *
+ * @MT76_TM_ATTR_STATS: statistics (nested, see &enum mt76_testmode_stats_attr)
+ */
+enum mt76_testmode_attr {
+       MT76_TM_ATTR_UNSPEC,
+
+       MT76_TM_ATTR_RESET,
+       MT76_TM_ATTR_STATE,
+
+       MT76_TM_ATTR_MTD_PART,
+       MT76_TM_ATTR_MTD_OFFSET,
+
+       MT76_TM_ATTR_TX_COUNT,
+       MT76_TM_ATTR_TX_LENGTH,
+       MT76_TM_ATTR_TX_RATE_MODE,
+       MT76_TM_ATTR_TX_RATE_NSS,
+       MT76_TM_ATTR_TX_RATE_IDX,
+       MT76_TM_ATTR_TX_RATE_SGI,
+       MT76_TM_ATTR_TX_RATE_LDPC,
+
+       MT76_TM_ATTR_TX_ANTENNA,
+       MT76_TM_ATTR_TX_POWER_CONTROL,
+       MT76_TM_ATTR_TX_POWER,
+
+       MT76_TM_ATTR_FREQ_OFFSET,
+
+       MT76_TM_ATTR_STATS,
+
+       /* keep last */
+       NUM_MT76_TM_ATTRS,
+       MT76_TM_ATTR_MAX = NUM_MT76_TM_ATTRS - 1,
+};
+
+/**
+ * enum mt76_testmode_state - statistics attributes
+ *
+ * @MT76_TM_STATS_ATTR_TX_PENDING: pending tx frames (u32)
+ * @MT76_TM_STATS_ATTR_TX_QUEUED: queued tx frames (u32)
+ * @MT76_TM_STATS_ATTR_TX_QUEUED: completed tx frames (u32)
+ *
+ * @MT76_TM_STATS_ATTR_RX_PACKETS: number of rx packets (u64)
+ * @MT76_TM_STATS_ATTR_RX_FCS_ERROR: number of rx packets with FCS error (u64)
+ * @MT76_TM_STATS_ATTR_LAST_RX: information about the last received packet
+ *     see &enum mt76_testmode_rx_attr
+ */
+enum mt76_testmode_stats_attr {
+       MT76_TM_STATS_ATTR_UNSPEC,
+       MT76_TM_STATS_ATTR_PAD,
+
+       MT76_TM_STATS_ATTR_TX_PENDING,
+       MT76_TM_STATS_ATTR_TX_QUEUED,
+       MT76_TM_STATS_ATTR_TX_DONE,
+
+       MT76_TM_STATS_ATTR_RX_PACKETS,
+       MT76_TM_STATS_ATTR_RX_FCS_ERROR,
+       MT76_TM_STATS_ATTR_LAST_RX,
+
+       /* keep last */
+       NUM_MT76_TM_STATS_ATTRS,
+       MT76_TM_STATS_ATTR_MAX = NUM_MT76_TM_STATS_ATTRS - 1,
+};
+
+
+/**
+ * enum mt76_testmode_rx_attr - packet rx information
+ *
+ * @MT76_TM_RX_ATTR_FREQ_OFFSET: frequency offset (s32)
+ * @MT76_TM_RX_ATTR_RCPI: received channel power indicator (array, u8)
+ * @MT76_TM_RX_ATTR_IB_RSSI: internal inband RSSI (s8)
+ * @MT76_TM_RX_ATTR_WB_RSSI: internal wideband RSSI (s8)
+ */
+enum mt76_testmode_rx_attr {
+       MT76_TM_RX_ATTR_UNSPEC,
+
+       MT76_TM_RX_ATTR_FREQ_OFFSET,
+       MT76_TM_RX_ATTR_RCPI,
+       MT76_TM_RX_ATTR_IB_RSSI,
+       MT76_TM_RX_ATTR_WB_RSSI,
+
+       /* keep last */
+       NUM_MT76_TM_RX_ATTRS,
+       MT76_TM_RX_ATTR_MAX = NUM_MT76_TM_RX_ATTRS - 1,
+};
+
+/**
+ * enum mt76_testmode_state - phy test state
+ *
+ * @MT76_TM_STATE_OFF: test mode disabled (normal operation)
+ * @MT76_TM_STATE_IDLE: test mode enabled, but idle
+ * @MT76_TM_STATE_TX_FRAMES: send a fixed number of test frames
+ * @MT76_TM_STATE_RX_FRAMES: receive packets and keep statistics
+ */
+enum mt76_testmode_state {
+       MT76_TM_STATE_OFF,
+       MT76_TM_STATE_IDLE,
+       MT76_TM_STATE_TX_FRAMES,
+       MT76_TM_STATE_RX_FRAMES,
+
+       /* keep last */
+       NUM_MT76_TM_STATES,
+       MT76_TM_STATE_MAX = NUM_MT76_TM_STATES - 1,
+};
+
+/**
+ * enum mt76_testmode_tx_mode - packet tx phy mode
+ *
+ * @MT76_TM_TX_MODE_CCK: legacy CCK mode
+ * @MT76_TM_TX_MODE_OFDM: legacy OFDM mode
+ * @MT76_TM_TX_MODE_HT: 802.11n MCS
+ * @MT76_TM_TX_MODE_VHT: 802.11ac MCS
+ */
+enum mt76_testmode_tx_mode {
+       MT76_TM_TX_MODE_CCK,
+       MT76_TM_TX_MODE_OFDM,
+       MT76_TM_TX_MODE_HT,
+       MT76_TM_TX_MODE_VHT,
+
+       /* keep last */
+       NUM_MT76_TM_TX_MODES,
+       MT76_TM_TX_MODE_MAX = NUM_MT76_TM_TX_MODES - 1,
+};
+
+#endif
index f10c98a..3afd89e 100644 (file)
@@ -236,6 +236,14 @@ void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
        struct ieee80211_hw *hw;
        struct sk_buff_head list;
 
+#ifdef CONFIG_NL80211_TESTMODE
+       if (skb == dev->test.tx_skb) {
+               dev->test.tx_done++;
+               if (dev->test.tx_queued == dev->test.tx_done)
+                       wake_up(&dev->tx_wait);
+       }
+#endif
+
        if (!skb->prev) {
                hw = mt76_tx_status_get_hw(dev, skb);
                ieee80211_free_txskb(hw, skb);
@@ -259,6 +267,11 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
        int qid = skb_get_queue_mapping(skb);
        bool ext_phy = phy != &dev->phy;
 
+       if (mt76_testmode_enabled(dev)) {
+               ieee80211_free_txskb(phy->hw, skb);
+               return;
+       }
+
        if (WARN_ON(qid >= MT_TXQ_PSD)) {
                qid = MT_TXQ_BE;
                skb_set_queue_mapping(skb, qid);
@@ -579,6 +592,11 @@ void mt76_tx_tasklet(unsigned long data)
        mt76_txq_schedule_all(&dev->phy);
        if (dev->phy2)
                mt76_txq_schedule_all(dev->phy2);
+
+#ifdef CONFIG_NL80211_TESTMODE
+       if (dev->test.tx_pending)
+               mt76_testmode_tx_pending(dev);
+#endif
 }
 
 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
@@ -659,3 +677,32 @@ u8 mt76_ac_to_hwq(u8 ac)
        return wmm_queue_map[ac];
 }
 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);
+
+int mt76_skb_adjust_pad(struct sk_buff *skb)
+{
+       struct sk_buff *iter, *last = skb;
+       u32 pad;
+
+       /* Add zero pad of 4 - 7 bytes */
+       pad = round_up(skb->len, 4) + 4 - skb->len;
+
+       /* First packet of a A-MSDU burst keeps track of the whole burst
+        * length, need to update length of it and the last packet.
+        */
+       skb_walk_frags(skb, iter) {
+               last = iter;
+               if (!iter->next) {
+                       skb->data_len += pad;
+                       skb->len += pad;
+                       break;
+               }
+       }
+
+       if (skb_pad(last, pad))
+               return -ENOMEM;
+
+       __skb_put(last, pad);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad);
index 87382b2..dcab599 100644 (file)
@@ -672,17 +672,11 @@ mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
 static void mt76u_rx_tasklet(unsigned long data)
 {
        struct mt76_dev *dev = (struct mt76_dev *)data;
-       struct mt76_queue *q;
        int i;
 
        rcu_read_lock();
-       for (i = 0; i < __MT_RXQ_MAX; i++) {
-               q = &dev->q_rx[i];
-               if (!q->ndesc)
-                       continue;
-
-               mt76u_process_rx_queue(dev, q);
-       }
+       mt76_for_each_q_rx(dev, i)
+               mt76u_process_rx_queue(dev, &dev->q_rx[i]);
        rcu_read_unlock();
 }
 
@@ -756,27 +750,19 @@ mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
 
 static void mt76u_free_rx(struct mt76_dev *dev)
 {
-       struct mt76_queue *q;
        int i;
 
-       for (i = 0; i < __MT_RXQ_MAX; i++) {
-               q = &dev->q_rx[i];
-               if (!q->ndesc)
-                       continue;
-
-               mt76u_free_rx_queue(dev, q);
-       }
+       mt76_for_each_q_rx(dev, i)
+               mt76u_free_rx_queue(dev, &dev->q_rx[i]);
 }
 
 void mt76u_stop_rx(struct mt76_dev *dev)
 {
-       struct mt76_queue *q;
-       int i, j;
+       int i;
 
-       for (i = 0; i < __MT_RXQ_MAX; i++) {
-               q = &dev->q_rx[i];
-               if (!q->ndesc)
-                       continue;
+       mt76_for_each_q_rx(dev, i) {
+               struct mt76_queue *q = &dev->q_rx[i];
+               int j;
 
                for (j = 0; j < q->ndesc; j++)
                        usb_poison_urb(q->entry[j].urb);
@@ -788,14 +774,11 @@ EXPORT_SYMBOL_GPL(mt76u_stop_rx);
 
 int mt76u_resume_rx(struct mt76_dev *dev)
 {
-       struct mt76_queue *q;
-       int i, j, err;
-
-       for (i = 0; i < __MT_RXQ_MAX; i++) {
-               q = &dev->q_rx[i];
+       int i;
 
-               if (!q->ndesc)
-                       continue;
+       mt76_for_each_q_rx(dev, i) {
+               struct mt76_queue *q = &dev->q_rx[i];
+               int err, j;
 
                for (j = 0; j < q->ndesc; j++)
                        usb_unpoison_urb(q->entry[j].urb);
@@ -859,7 +842,7 @@ static void mt76u_tx_tasklet(unsigned long data)
 
                if (dev->drv->tx_status_data &&
                    !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
-                       queue_work(dev->usb.wq, &dev->usb.stat_work);
+                       queue_work(dev->wq, &dev->usb.stat_work);
                if (wake)
                        ieee80211_wake_queue(dev->hw, i);
        }
@@ -885,7 +868,7 @@ static void mt76u_tx_status_data(struct work_struct *work)
        }
 
        if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
-               queue_work(usb->wq, &usb->stat_work);
+               queue_work(dev->wq, &usb->stat_work);
        else
                clear_bit(MT76_READING_STATS, &dev->phy.state);
 }
@@ -921,35 +904,6 @@ mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
        return urb->num_sgs;
 }
 
-int mt76u_skb_dma_info(struct sk_buff *skb, u32 info)
-{
-       struct sk_buff *iter, *last = skb;
-       u32 pad;
-
-       put_unaligned_le32(info, skb_push(skb, sizeof(info)));
-       /* Add zero pad of 4 - 7 bytes */
-       pad = round_up(skb->len, 4) + 4 - skb->len;
-
-       /* First packet of a A-MSDU burst keeps track of the whole burst
-        * length, need to update length of it and the last packet.
-        */
-       skb_walk_frags(skb, iter) {
-               last = iter;
-               if (!iter->next) {
-                       skb->data_len += pad;
-                       skb->len += pad;
-                       break;
-               }
-       }
-
-       if (skb_pad(last, pad))
-               return -ENOMEM;
-       __skb_put(last, pad);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(mt76u_skb_dma_info);
-
 static int
 mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
                   struct sk_buff *skb, struct mt76_wcid *wcid,
@@ -1161,15 +1115,6 @@ static const struct mt76_queue_ops usb_queue_ops = {
        .kick = mt76u_tx_kick,
 };
 
-void mt76u_deinit(struct mt76_dev *dev)
-{
-       if (dev->usb.wq) {
-               destroy_workqueue(dev->usb.wq);
-               dev->usb.wq = NULL;
-       }
-}
-EXPORT_SYMBOL_GPL(mt76u_deinit);
-
 int mt76u_init(struct mt76_dev *dev,
               struct usb_interface *intf, bool ext)
 {
@@ -1192,10 +1137,6 @@ int mt76u_init(struct mt76_dev *dev,
        tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
        INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
 
-       usb->wq = alloc_workqueue("mt76u", WQ_UNBOUND, 0);
-       if (!usb->wq)
-               return -ENOMEM;
-
        usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0), 1);
        if (usb->data_len < 32)
                usb->data_len = 32;
@@ -1219,7 +1160,8 @@ int mt76u_init(struct mt76_dev *dev,
        return 0;
 
 error:
-       mt76u_deinit(dev);
+       destroy_workqueue(dev->wq);
+
        return err;
 }
 EXPORT_SYMBOL_GPL(mt76u_init);
index ecde874..f53bb4a 100644 (file)
@@ -13,7 +13,7 @@ bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
 
        timeout /= 10;
        do {
-               cur = dev->bus->rr(dev, offset) & mask;
+               cur = __mt76_rr(dev, offset) & mask;
                if (cur == val)
                        return true;
 
@@ -31,7 +31,7 @@ bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
 
        timeout /= 10;
        do {
-               cur = dev->bus->rr(dev, offset) & mask;
+               cur = __mt76_rr(dev, offset) & mask;
                if (cur == val)
                        return true;
 
index af55ed8..1b5cc27 100644 (file)
@@ -116,8 +116,10 @@ mt7601u_mcu_msg_send(struct mt7601u_dev *dev, struct sk_buff *skb,
        int sent, ret;
        u8 seq = 0;
 
-       if (test_bit(MT7601U_STATE_REMOVED, &dev->state))
+       if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) {
+               consume_skb(skb);
                return 0;
+       }
 
        mutex_lock(&dev->mcu.mutex);
 
diff --git a/drivers/net/wireless/microchip/Kconfig b/drivers/net/wireless/microchip/Kconfig
new file mode 100644 (file)
index 0000000..a6b46fb
--- /dev/null
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+config WLAN_VENDOR_MICROCHIP
+       bool "Microchip devices"
+       default y
+       help
+       If you have a wireless card belonging to this class, say Y.
+
+       Note that the answer to this question doesn't directly affect the
+       kernel: saying N will just cause the configurator to skip all the
+       questions about these cards. If you say Y, you will be asked for
+       your specific card in the following questions.
+
+if WLAN_VENDOR_MICROCHIP
+source "drivers/net/wireless/microchip/wilc1000/Kconfig"
+endif # WLAN_VENDOR_MICROCHIP
diff --git a/drivers/net/wireless/microchip/Makefile b/drivers/net/wireless/microchip/Makefile
new file mode 100644 (file)
index 0000000..73b763c
--- /dev/null
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_WILC1000)         += wilc1000/
similarity index 72%
rename from drivers/staging/wilc1000/Makefile
rename to drivers/net/wireless/microchip/wilc1000/Makefile
index a3305a0..c3c9e34 100644 (file)
@@ -1,9 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_WILC1000) += wilc1000.o
 
-ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \
-               -DFIRMWARE_1003=\"atmel/wilc1003_firmware.bin\"
-
 wilc1000-objs := cfg80211.o netdev.o mon.o \
                        hif.o wlan_cfg.o wlan.o
 
similarity index 99%
rename from drivers/staging/wilc1000/cfg80211.c
rename to drivers/net/wireless/microchip/wilc1000/cfg80211.c
index b6065a0..c1ac1d8 100644 (file)
@@ -46,9 +46,11 @@ static const struct ieee80211_txrx_stypes
        }
 };
 
+#ifdef CONFIG_PM
 static const struct wiphy_wowlan_support wowlan_support = {
        .flags = WIPHY_WOWLAN_ANY
 };
+#endif
 
 struct wilc_p2p_mgmt_data {
        int size;
similarity index 98%
rename from drivers/staging/wilc1000/mon.c
rename to drivers/net/wireless/microchip/wilc1000/mon.c
index 6033141..358ac86 100644 (file)
@@ -229,8 +229,7 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl,
                return NULL;
 
        wl->monitor_dev->type = ARPHRD_IEEE80211_RADIOTAP;
-       strncpy(wl->monitor_dev->name, name, IFNAMSIZ);
-       wl->monitor_dev->name[IFNAMSIZ - 1] = 0;
+       strlcpy(wl->monitor_dev->name, name, IFNAMSIZ);
        wl->monitor_dev->netdev_ops = &wilc_wfi_netdev_ops;
        wl->monitor_dev->needs_free_netdev = true;
 
similarity index 96%
rename from drivers/staging/wilc1000/netdev.c
rename to drivers/net/wireless/microchip/wilc1000/netdev.c
index fda0ab9..20615c7 100644 (file)
 
 #define WILC_MULTICAST_TABLE_SIZE      8
 
+/* latest API version supported */
+#define WILC1000_API_VER               1
+
+#define WILC1000_FW_PREFIX             "atmel/wilc1000_wifi_firmware-"
+#define __WILC1000_FW(api)             WILC1000_FW_PREFIX #api ".bin"
+#define WILC1000_FW(api)               __WILC1000_FW(api)
+
 static irqreturn_t isr_uh_routine(int irq, void *user_data)
 {
        struct net_device *dev = user_data;
@@ -176,23 +183,22 @@ static int wilc_wlan_get_firmware(struct net_device *dev)
        struct wilc_vif *vif = netdev_priv(dev);
        struct wilc *wilc = vif->wilc;
        int chip_id;
-       const struct firmware *wilc_firmware;
-       char *firmware;
+       const struct firmware *wilc_fw;
+       int ret;
 
        chip_id = wilc_get_chipid(wilc, false);
 
-       if (chip_id < 0x1003a0)
-               firmware = FIRMWARE_1002;
-       else
-               firmware = FIRMWARE_1003;
-
-       netdev_info(dev, "loading firmware %s\n", firmware);
+       netdev_info(dev, "ChipID [%x] loading firmware [%s]\n", chip_id,
+                   WILC1000_FW(WILC1000_API_VER));
 
-       if (request_firmware(&wilc_firmware, firmware, wilc->dev) != 0) {
-               netdev_err(dev, "%s - firmware not available\n", firmware);
+       ret = request_firmware(&wilc_fw, WILC1000_FW(WILC1000_API_VER),
+                              wilc->dev);
+       if (ret != 0) {
+               netdev_err(dev, "%s - firmware not available\n",
+                          WILC1000_FW(WILC1000_API_VER));
                return -EINVAL;
        }
-       wilc->firmware = wilc_firmware;
+       wilc->firmware = wilc_fw;
 
        return 0;
 }
@@ -678,14 +684,14 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
 
        if (skb->dev != ndev) {
                netdev_err(ndev, "Packet not destined to this device\n");
-               return 0;
+               return NETDEV_TX_OK;
        }
 
        tx_data = kmalloc(sizeof(*tx_data), GFP_ATOMIC);
        if (!tx_data) {
                dev_kfree_skb(skb);
                netif_wake_queue(ndev);
-               return 0;
+               return NETDEV_TX_OK;
        }
 
        tx_data->buff = skb->data;
@@ -710,7 +716,7 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
                srcu_read_unlock(&wilc->srcu, srcu_idx);
        }
 
-       return 0;
+       return NETDEV_TX_OK;
 }
 
 static int wilc_mac_close(struct net_device *ndev)
@@ -929,3 +935,4 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
 }
 
 MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(WILC1000_FW(WILC1000_API_VER));
similarity index 99%
rename from drivers/staging/wilc1000/sdio.c
rename to drivers/net/wireless/microchip/wilc1000/sdio.c
index 36eb589..3ece7b0 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <linux/clk.h>
 #include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/sdio.h>
 #include <linux/of_irq.h>
 
 #define SDIO_MODALIAS "wilc1000_sdio"
 
-#define SDIO_VENDOR_ID_WILC 0x0296
-#define SDIO_DEVICE_ID_WILC 0x5347
-
 static const struct sdio_device_id wilc_sdio_ids[] = {
-       { SDIO_DEVICE(SDIO_VENDOR_ID_WILC, SDIO_DEVICE_ID_WILC) },
+       { SDIO_DEVICE(SDIO_VENDOR_ID_MICROCHIP_WILC, SDIO_DEVICE_ID_MICROCHIP_WILC1000) },
        { },
 };
 
index eea777f..6aafff9 100644 (file)
@@ -446,8 +446,11 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
        }
 
        wiphy = qtnf_wiphy_allocate(bus, pdev);
-       if (!wiphy)
+       if (!wiphy) {
+               if (pdev)
+                       platform_device_unregister(pdev);
                return ERR_PTR(-ENOMEM);
+       }
 
        mac = wiphy_priv(wiphy);
 
index 4d44509..c1ac933 100644 (file)
@@ -1834,8 +1834,7 @@ static struct pci_driver rt2400pci_driver = {
        .id_table       = rt2400pci_device_table,
        .probe          = rt2400pci_probe,
        .remove         = rt2x00pci_remove,
-       .suspend        = rt2x00pci_suspend,
-       .resume         = rt2x00pci_resume,
+       .driver.pm      = &rt2x00pci_pm_ops,
 };
 
 module_pci_driver(rt2400pci_driver);
index 4620990..0859ade 100644 (file)
@@ -2132,8 +2132,7 @@ static struct pci_driver rt2500pci_driver = {
        .id_table       = rt2500pci_device_table,
        .probe          = rt2500pci_probe,
        .remove         = rt2x00pci_remove,
-       .suspend        = rt2x00pci_suspend,
-       .resume         = rt2x00pci_resume,
+       .driver.pm      = &rt2x00pci_pm_ops,
 };
 
 module_pci_driver(rt2500pci_driver);
index 3868c07..9a33baa 100644 (file)
@@ -455,8 +455,7 @@ static struct pci_driver rt2800pci_driver = {
        .id_table       = rt2800pci_device_table,
        .probe          = rt2800pci_probe,
        .remove         = rt2x00pci_remove,
-       .suspend        = rt2x00pci_suspend,
-       .resume         = rt2x00pci_resume,
+       .driver.pm      = &rt2x00pci_pm_ops,
 };
 
 module_pci_driver(rt2800pci_driver);
index ea8a34e..ecc60d8 100644 (file)
@@ -1487,9 +1487,8 @@ bool rt2x00mac_tx_frames_pending(struct ieee80211_hw *hw);
  */
 int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev);
 void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev);
-#ifdef CONFIG_PM
-int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state);
+
+int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev);
 int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev);
-#endif /* CONFIG_PM */
 
 #endif /* RT2X00_H */
index 7f9e43a..8c6d309 100644 (file)
@@ -1556,8 +1556,7 @@ EXPORT_SYMBOL_GPL(rt2x00lib_remove_dev);
 /*
  * Device state handlers
  */
-#ifdef CONFIG_PM
-int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state)
+int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev)
 {
        rt2x00_dbg(rt2x00dev, "Going to sleep\n");
 
@@ -1614,7 +1613,6 @@ int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev)
        return 0;
 }
 EXPORT_SYMBOL_GPL(rt2x00lib_resume);
-#endif /* CONFIG_PM */
 
 /*
  * rt2x00lib module information.
index 7f9baa9..cabeef0 100644 (file)
@@ -169,39 +169,24 @@ void rt2x00pci_remove(struct pci_dev *pci_dev)
 }
 EXPORT_SYMBOL_GPL(rt2x00pci_remove);
 
-#ifdef CONFIG_PM
-int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state)
+static int __maybe_unused rt2x00pci_suspend(struct device *dev)
 {
-       struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
+       struct ieee80211_hw *hw = dev_get_drvdata(dev);
        struct rt2x00_dev *rt2x00dev = hw->priv;
-       int retval;
-
-       retval = rt2x00lib_suspend(rt2x00dev, state);
-       if (retval)
-               return retval;
 
-       pci_save_state(pci_dev);
-       pci_disable_device(pci_dev);
-       return pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
+       return rt2x00lib_suspend(rt2x00dev);
 }
-EXPORT_SYMBOL_GPL(rt2x00pci_suspend);
 
-int rt2x00pci_resume(struct pci_dev *pci_dev)
+static int __maybe_unused rt2x00pci_resume(struct device *dev)
 {
-       struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
+       struct ieee80211_hw *hw = dev_get_drvdata(dev);
        struct rt2x00_dev *rt2x00dev = hw->priv;
 
-       if (pci_set_power_state(pci_dev, PCI_D0) ||
-           pci_enable_device(pci_dev)) {
-               rt2x00_err(rt2x00dev, "Failed to resume device\n");
-               return -EIO;
-       }
-
-       pci_restore_state(pci_dev);
        return rt2x00lib_resume(rt2x00dev);
 }
-EXPORT_SYMBOL_GPL(rt2x00pci_resume);
-#endif /* CONFIG_PM */
+
+SIMPLE_DEV_PM_OPS(rt2x00pci_pm_ops, rt2x00pci_suspend, rt2x00pci_resume);
+EXPORT_SYMBOL_GPL(rt2x00pci_pm_ops);
 
 /*
  * rt2x00pci module information.
index fd955cc..27f7b2b 100644 (file)
  */
 int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops);
 void rt2x00pci_remove(struct pci_dev *pci_dev);
-#ifdef CONFIG_PM
-int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state);
-int rt2x00pci_resume(struct pci_dev *pci_dev);
-#else
-#define rt2x00pci_suspend      NULL
-#define rt2x00pci_resume       NULL
-#endif /* CONFIG_PM */
+
+extern const struct dev_pm_ops rt2x00pci_pm_ops;
 
 #endif /* RT2X00PCI_H */
index 596b8a4..eface61 100644 (file)
@@ -130,7 +130,7 @@ int rt2x00soc_suspend(struct platform_device *pdev, pm_message_t state)
        struct ieee80211_hw *hw = platform_get_drvdata(pdev);
        struct rt2x00_dev *rt2x00dev = hw->priv;
 
-       return rt2x00lib_suspend(rt2x00dev, state);
+       return rt2x00lib_suspend(rt2x00dev);
 }
 EXPORT_SYMBOL_GPL(rt2x00soc_suspend);
 
index 92e9e02..e4473a5 100644 (file)
@@ -886,7 +886,7 @@ int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state)
        struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
        struct rt2x00_dev *rt2x00dev = hw->priv;
 
-       return rt2x00lib_suspend(rt2x00dev, state);
+       return rt2x00lib_suspend(rt2x00dev);
 }
 EXPORT_SYMBOL_GPL(rt2x00usb_suspend);
 
index d83288b..eefce76 100644 (file)
@@ -3009,8 +3009,7 @@ static struct pci_driver rt61pci_driver = {
        .id_table       = rt61pci_device_table,
        .probe          = rt61pci_probe,
        .remove         = rt2x00pci_remove,
-       .suspend        = rt2x00pci_suspend,
-       .resume         = rt2x00pci_resume,
+       .driver.pm      = &rt2x00pci_pm_ops,
 };
 
 module_pci_driver(rt61pci_driver);
index d5f6537..ba3286f 100644 (file)
@@ -1966,32 +1966,17 @@ static void rtl8180_remove(struct pci_dev *pdev)
        ieee80211_free_hw(dev);
 }
 
-#ifdef CONFIG_PM
-static int rtl8180_suspend(struct pci_dev *pdev, pm_message_t state)
-{
-       pci_save_state(pdev);
-       pci_set_power_state(pdev, pci_choose_state(pdev, state));
-       return 0;
-}
-
-static int rtl8180_resume(struct pci_dev *pdev)
-{
-       pci_set_power_state(pdev, PCI_D0);
-       pci_restore_state(pdev);
-       return 0;
-}
+#define rtl8180_suspend NULL
+#define rtl8180_resume NULL
 
-#endif /* CONFIG_PM */
+static SIMPLE_DEV_PM_OPS(rtl8180_pm_ops, rtl8180_suspend, rtl8180_resume);
 
 static struct pci_driver rtl8180_driver = {
        .name           = KBUILD_MODNAME,
        .id_table       = rtl8180_table,
        .probe          = rtl8180_probe,
        .remove         = rtl8180_remove,
-#ifdef CONFIG_PM
-       .suspend        = rtl8180_suspend,
-       .resume         = rtl8180_resume,
-#endif /* CONFIG_PM */
+       .driver.pm      = &rtl8180_pm_ops,
 };
 
 module_pci_driver(rtl8180_driver);
index a4940a3..2b140c1 100644 (file)
@@ -894,11 +894,9 @@ static void halbtc_display_wifi_status(struct btc_coexist *btcoexist,
                   (low_power ? ", 32k" : ""));
 
        seq_printf(m,
-                  "\n %-35s = %02x %02x %02x %02x %02x %02x (0x%x/0x%x)",
+                  "\n %-35s = %6ph (0x%x/0x%x)",
                   "Power mode cmd(lps/rpwm)",
-                  btcoexist->pwr_mode_val[0], btcoexist->pwr_mode_val[1],
-                  btcoexist->pwr_mode_val[2], btcoexist->pwr_mode_val[3],
-                  btcoexist->pwr_mode_val[4], btcoexist->pwr_mode_val[5],
+                  btcoexist->pwr_mode_val,
                   btcoexist->bt_info.lps_val,
                   btcoexist->bt_info.rpwm_val);
 }
@@ -1318,7 +1316,7 @@ bool exhalbtc_bind_bt_coex_withadapter(void *adapter)
 {
        struct rtl_priv *rtlpriv = adapter;
        struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv);
-       u8 ant_num = 2, chip_type, single_ant_path = 0;
+       u8 ant_num, chip_type, single_ant_path;
 
        if (!btcoexist)
                return false;
index bc0ac96..90f9272 100644 (file)
@@ -769,13 +769,13 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
                                         *(u8 *)(ie + index);
                                        index += 1;
                                        p2pinfo->noa_duration[i] =
-                                        le32_to_cpu(*(__le32 *)ie + index);
+                                        le32_to_cpu(*(__le32 *)(ie + index));
                                        index += 4;
                                        p2pinfo->noa_interval[i] =
-                                        le32_to_cpu(*(__le32 *)ie + index);
+                                        le32_to_cpu(*(__le32 *)(ie + index));
                                        index += 4;
                                        p2pinfo->noa_start_time[i] =
-                                        le32_to_cpu(*(__le32 *)ie + index);
+                                        le32_to_cpu(*(__le32 *)(ie + index));
                                        index += 4;
                                }
 
@@ -864,13 +864,13 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
                                         *(u8 *)(ie + index);
                                        index += 1;
                                        p2pinfo->noa_duration[i] =
-                                        le32_to_cpu(*(__le32 *)ie + index);
+                                        le32_to_cpu(*(__le32 *)(ie + index));
                                        index += 4;
                                        p2pinfo->noa_interval[i] =
-                                        le32_to_cpu(*(__le32 *)ie + index);
+                                        le32_to_cpu(*(__le32 *)(ie + index));
                                        index += 4;
                                        p2pinfo->noa_start_time[i] =
-                                        le32_to_cpu(*(__le32 *)ie + index);
+                                        le32_to_cpu(*(__le32 *)(ie + index));
                                        index += 4;
                                }
 
index dceb04a..1ffa188 100644 (file)
@@ -870,11 +870,11 @@ static void dm_txpower_track_cb_therm(struct ieee80211_hw *hw)
        /*0.1 the following TWO tables decide the
         *final index of OFDM/CCK swing table
         */
-       s8 delta_swing_table_idx[2][15]  = {
+       static const s8 delta_swing_table_idx[2][15]  = {
                {0, 0, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11},
                {0, 0, -1, -2, -3, -4, -4, -4, -4, -5, -7, -8, -9, -9, -10}
        };
-       u8 thermal_threshold[2][15] = {
+       static const u8 thermal_threshold[2][15] = {
                {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 27},
                {0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 25, 25, 25}
        };
index aa2e9e8..a5d2d6e 100644 (file)
@@ -497,7 +497,7 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
        dma_addr_t mapping;
        u8 bw_40 = 0;
        u8 short_gi = 0;
-       __le32 *pdesc = (u32 *)pdesc8;
+       __le32 *pdesc = (__le32 *)pdesc8;
 
        if (mac->opmode == NL80211_IFTYPE_STATION) {
                bw_40 = mac->bw_40;
index f070f25..5b071b7 100644 (file)
@@ -592,7 +592,7 @@ static void _rtl92cu_init_chipn_one_out_ep_priority(struct ieee80211_hw *hw,
                                                    bool wmm_enable,
                                                    u8 queue_sel)
 {
-       u16 uninitialized_var(value);
+       u16 value;
 
        switch (queue_sel) {
        case TX_SELE_HQ:
@@ -606,7 +606,7 @@ static void _rtl92cu_init_chipn_one_out_ep_priority(struct ieee80211_hw *hw,
                break;
        default:
                WARN_ON(1); /* Shall not reach here! */
-               break;
+               return;
        }
        _rtl92c_init_chipn_reg_priority(hw, value, value, value, value,
                                        value, value);
index b13fd3c..c9b3d9d 100644 (file)
@@ -736,11 +736,11 @@ static void rtl8723be_dm_txpower_tracking_callback_thermalmeter(
        u8 ofdm_min_index = 6;
        u8 index_for_channel = 0;
 
-       s8 delta_swing_table_idx_tup_a[TXSCALE_TABLE_SIZE] = {
+       static const s8 delta_swing_table_idx_tup_a[TXSCALE_TABLE_SIZE] = {
                0, 0, 1, 2, 2, 2, 3, 3, 3, 4,  5,
                5, 6, 6, 7, 7, 8, 8, 9, 9, 9, 10,
                10, 11, 11, 12, 12, 13, 14, 15};
-       s8 delta_swing_table_idx_tdown_a[TXSCALE_TABLE_SIZE] = {
+       static const s8 delta_swing_table_idx_tdown_a[TXSCALE_TABLE_SIZE] = {
                0, 0, 1, 2, 2, 2, 3, 3, 3, 4,  5,
                5, 6, 6, 6, 6, 7, 7, 7, 8, 8,  9,
                9, 10, 10, 11, 12, 13, 14, 15};
index f57e879..97a30cc 100644 (file)
@@ -115,47 +115,47 @@ static const u32 edca_setting_ul[PEER_MAX] = {
        0x5ea44f,       /* 7 MARV */
 };
 
-static u8 rtl8818e_delta_swing_table_idx_24gb_p[] = {
+static const u8 rtl8818e_delta_swing_table_idx_24gb_p[] = {
        0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 4,
        4, 4, 4, 5, 5, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9};
 
-static u8 rtl8818e_delta_swing_table_idx_24gb_n[] = {
+static const u8 rtl8818e_delta_swing_table_idx_24gb_n[] = {
        0, 0, 0, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6,
        7, 7, 7, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11, 11, 11};
 
-static u8 rtl8812ae_delta_swing_table_idx_24gb_n[]  = {
+static const u8 rtl8812ae_delta_swing_table_idx_24gb_n[]  = {
        0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6,
        6, 6, 7, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11};
 
-static u8 rtl8812ae_delta_swing_table_idx_24gb_p[] = {
+static const u8 rtl8812ae_delta_swing_table_idx_24gb_p[] = {
        0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6,
        6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9};
 
-static u8 rtl8812ae_delta_swing_table_idx_24ga_n[] = {
+static const u8 rtl8812ae_delta_swing_table_idx_24ga_n[] = {
        0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6,
        6, 6, 7, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11, 11};
 
-static u8 rtl8812ae_delta_swing_table_idx_24ga_p[] = {
+static const u8 rtl8812ae_delta_swing_table_idx_24ga_p[] = {
        0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6,
        6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9};
 
-static u8 rtl8812ae_delta_swing_table_idx_24gcckb_n[] = {
+static const u8 rtl8812ae_delta_swing_table_idx_24gcckb_n[] = {
        0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6,
        6, 6, 7, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11};
 
-static u8 rtl8812ae_delta_swing_table_idx_24gcckb_p[] = {
+static const u8 rtl8812ae_delta_swing_table_idx_24gcckb_p[] = {
        0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6,
        6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9};
 
-static u8 rtl8812ae_delta_swing_table_idx_24gccka_n[] = {
+static const u8 rtl8812ae_delta_swing_table_idx_24gccka_n[] = {
        0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6,
        6, 6, 7, 8, 8, 9, 9, 9, 10, 10, 10, 10, 11, 11};
 
-static u8 rtl8812ae_delta_swing_table_idx_24gccka_p[] = {
+static const u8 rtl8812ae_delta_swing_table_idx_24gccka_p[] = {
        0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6,
        6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 9, 9, 9};
 
-static u8 rtl8812ae_delta_swing_table_idx_5gb_n[][DEL_SW_IDX_SZ] = {
+static const u8 rtl8812ae_delta_swing_table_idx_5gb_n[][DEL_SW_IDX_SZ] = {
        {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7,
        7, 8, 8, 9, 9, 9, 10, 10, 11, 11, 12, 12, 13},
        {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7,
@@ -164,7 +164,7 @@ static u8 rtl8812ae_delta_swing_table_idx_5gb_n[][DEL_SW_IDX_SZ] = {
        12, 12, 13, 14, 14, 14, 15, 16, 17, 17, 17, 18, 18, 18},
 };
 
-static u8 rtl8812ae_delta_swing_table_idx_5gb_p[][DEL_SW_IDX_SZ] = {
+static const u8 rtl8812ae_delta_swing_table_idx_5gb_p[][DEL_SW_IDX_SZ] = {
        {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 8,
        8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11},
        {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8,
@@ -173,7 +173,7 @@ static u8 rtl8812ae_delta_swing_table_idx_5gb_p[][DEL_SW_IDX_SZ] = {
        9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11},
 };
 
-static u8 rtl8812ae_delta_swing_table_idx_5ga_n[][DEL_SW_IDX_SZ] = {
+static const u8 rtl8812ae_delta_swing_table_idx_5ga_n[][DEL_SW_IDX_SZ] = {
        {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8,
        8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 13, 13, 13},
        {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 9,
@@ -182,7 +182,7 @@ static u8 rtl8812ae_delta_swing_table_idx_5ga_n[][DEL_SW_IDX_SZ] = {
        12, 13, 14, 14, 15, 15, 15, 16, 16, 16, 17, 17, 18, 18},
 };
 
-static u8 rtl8812ae_delta_swing_table_idx_5ga_p[][DEL_SW_IDX_SZ] = {
+static const u8 rtl8812ae_delta_swing_table_idx_5ga_p[][DEL_SW_IDX_SZ] = {
        {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 7, 7, 8,
        8, 9, 9, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11},
        {0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8,
@@ -191,57 +191,23 @@ static u8 rtl8812ae_delta_swing_table_idx_5ga_p[][DEL_SW_IDX_SZ] = {
        10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11},
 };
 
-static u8 rtl8821ae_delta_swing_table_idx_24gb_n[] = {
+static const u8 rtl8821ae_delta_swing_table_idx_24ga_n[]  = {
        0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6,
        6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10};
 
-static u8 rtl8821ae_delta_swing_table_idx_24gb_p[]  = {
+static const u8 rtl8821ae_delta_swing_table_idx_24ga_p[] = {
        0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
        8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12};
 
-static u8 rtl8821ae_delta_swing_table_idx_24ga_n[]  = {
+static const u8 rtl8821ae_delta_swing_table_idx_24gccka_n[] = {
        0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6,
        6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10};
 
-static u8 rtl8821ae_delta_swing_table_idx_24ga_p[] = {
+static const u8 rtl8821ae_delta_swing_table_idx_24gccka_p[] = {
        0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
        8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12};
 
-static u8 rtl8821ae_delta_swing_table_idx_24gcckb_n[] = {
-       0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6,
-       6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10};
-
-static u8 rtl8821ae_delta_swing_table_idx_24gcckb_p[] = {
-       0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
-       8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12};
-
-static u8 rtl8821ae_delta_swing_table_idx_24gccka_n[] = {
-       0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6,
-       6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10};
-
-static u8 rtl8821ae_delta_swing_table_idx_24gccka_p[] = {
-       0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
-       8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 12, 12};
-
-static u8 rtl8821ae_delta_swing_table_idx_5gb_n[][DEL_SW_IDX_SZ] = {
-       {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11,
-       12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
-       {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11,
-       12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
-       {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11,
-       12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
-};
-
-static u8 rtl8821ae_delta_swing_table_idx_5gb_p[][DEL_SW_IDX_SZ] = {
-       {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11,
-       12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
-       {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11,
-       12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
-       {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11,
-       12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
-};
-
-static u8 rtl8821ae_delta_swing_table_idx_5ga_n[][DEL_SW_IDX_SZ] = {
+static const u8 rtl8821ae_delta_swing_table_idx_5ga_n[][DEL_SW_IDX_SZ] = {
        {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11,
        12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
        {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11,
@@ -250,7 +216,7 @@ static u8 rtl8821ae_delta_swing_table_idx_5ga_n[][DEL_SW_IDX_SZ] = {
        12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
 };
 
-static u8 rtl8821ae_delta_swing_table_idx_5ga_p[][DEL_SW_IDX_SZ] = {
+static const u8 rtl8821ae_delta_swing_table_idx_5ga_p[][DEL_SW_IDX_SZ] = {
        {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11,
        12, 12, 13, 14, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16},
        {0, 0, 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 11,
@@ -962,8 +928,10 @@ static void rtl8821ae_dm_iq_calibrate(struct ieee80211_hw *hw)
 }
 
 static void rtl8812ae_get_delta_swing_table(struct ieee80211_hw *hw,
-                                           u8 **up_a, u8 **down_a,
-                                           u8 **up_b, u8 **down_b)
+                                           const u8 **up_a,
+                                           const u8 **down_a,
+                                           const u8 **up_b,
+                                           const u8 **down_b)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_phy *rtlphy = &rtlpriv->phy;
@@ -999,10 +967,10 @@ static void rtl8812ae_get_delta_swing_table(struct ieee80211_hw *hw,
                *up_b = rtl8812ae_delta_swing_table_idx_5gb_p[2];
                *down_b = rtl8812ae_delta_swing_table_idx_5gb_n[2];
        } else {
-           *up_a = (u8 *)rtl8818e_delta_swing_table_idx_24gb_p;
-           *down_a = (u8 *)rtl8818e_delta_swing_table_idx_24gb_n;
-           *up_b = (u8 *)rtl8818e_delta_swing_table_idx_24gb_p;
-           *down_b = (u8 *)rtl8818e_delta_swing_table_idx_24gb_n;
+               *up_a = rtl8818e_delta_swing_table_idx_24gb_p;
+               *down_a = rtl8818e_delta_swing_table_idx_24gb_n;
+               *up_b = rtl8818e_delta_swing_table_idx_24gb_p;
+               *down_b = rtl8818e_delta_swing_table_idx_24gb_n;
        }
 }
 
@@ -1492,17 +1460,17 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
        /* 1. The following TWO tables decide
         * the final index of OFDM/CCK swing table.
         */
-       u8 *delta_swing_table_idx_tup_a;
-       u8 *delta_swing_table_idx_tdown_a;
-       u8 *delta_swing_table_idx_tup_b;
-       u8 *delta_swing_table_idx_tdown_b;
+       const u8 *delta_swing_table_idx_tup_a;
+       const u8 *delta_swing_table_idx_tdown_a;
+       const u8 *delta_swing_table_idx_tup_b;
+       const u8 *delta_swing_table_idx_tdown_b;
 
        /*2. Initilization ( 7 steps in total )*/
        rtl8812ae_get_delta_swing_table(hw,
-               (u8 **)&delta_swing_table_idx_tup_a,
-               (u8 **)&delta_swing_table_idx_tdown_a,
-               (u8 **)&delta_swing_table_idx_tup_b,
-               (u8 **)&delta_swing_table_idx_tdown_b);
+               &delta_swing_table_idx_tup_a,
+               &delta_swing_table_idx_tdown_a,
+               &delta_swing_table_idx_tup_b,
+               &delta_swing_table_idx_tdown_b);
 
        rtldm->txpower_trackinginit = true;
 
@@ -1830,8 +1798,9 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
                 "<===rtl8812ae_dm_txpower_tracking_callback_thermalmeter\n");
 }
 
-static void rtl8821ae_get_delta_swing_table(struct ieee80211_hw *hw, u8 **up_a,
-                                           u8 **down_a, u8 **up_b, u8 **down_b)
+static void rtl8821ae_get_delta_swing_table(struct ieee80211_hw *hw,
+                                           const u8 **up_a,
+                                           const u8 **down_a)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_phy *rtlphy = &rtlpriv->phy;
@@ -1843,34 +1812,22 @@ static void rtl8821ae_get_delta_swing_table(struct ieee80211_hw *hw, u8 **up_a,
                if (RTL8821AE_RX_HAL_IS_CCK_RATE(rate)) {
                        *up_a = rtl8821ae_delta_swing_table_idx_24gccka_p;
                        *down_a = rtl8821ae_delta_swing_table_idx_24gccka_n;
-                       *up_b = rtl8821ae_delta_swing_table_idx_24gcckb_p;
-                       *down_b = rtl8821ae_delta_swing_table_idx_24gcckb_n;
                } else {
                        *up_a = rtl8821ae_delta_swing_table_idx_24ga_p;
                        *down_a = rtl8821ae_delta_swing_table_idx_24ga_n;
-                       *up_b = rtl8821ae_delta_swing_table_idx_24gb_p;
-                       *down_b = rtl8821ae_delta_swing_table_idx_24gb_n;
                }
        } else if (36 <= channel && channel <= 64) {
                *up_a = rtl8821ae_delta_swing_table_idx_5ga_p[0];
                *down_a = rtl8821ae_delta_swing_table_idx_5ga_n[0];
-               *up_b = rtl8821ae_delta_swing_table_idx_5gb_p[0];
-               *down_b = rtl8821ae_delta_swing_table_idx_5gb_n[0];
        } else if (100 <= channel && channel <= 140) {
                *up_a = rtl8821ae_delta_swing_table_idx_5ga_p[1];
                *down_a = rtl8821ae_delta_swing_table_idx_5ga_n[1];
-               *up_b = rtl8821ae_delta_swing_table_idx_5gb_p[1];
-               *down_b = rtl8821ae_delta_swing_table_idx_5gb_n[1];
        } else if (149 <= channel && channel <= 173) {
                *up_a = rtl8821ae_delta_swing_table_idx_5ga_p[2];
                *down_a = rtl8821ae_delta_swing_table_idx_5ga_n[2];
-               *up_b = rtl8821ae_delta_swing_table_idx_5gb_p[2];
-               *down_b = rtl8821ae_delta_swing_table_idx_5gb_n[2];
        } else {
-           *up_a = (u8 *)rtl8818e_delta_swing_table_idx_24gb_p;
-           *down_a = (u8 *)rtl8818e_delta_swing_table_idx_24gb_n;
-           *up_b = (u8 *)rtl8818e_delta_swing_table_idx_24gb_p;
-           *down_b = (u8 *)rtl8818e_delta_swing_table_idx_24gb_n;
+               *up_a = rtl8818e_delta_swing_table_idx_24gb_p;
+               *down_a = rtl8818e_delta_swing_table_idx_24gb_n;
        }
        return;
 }
@@ -2075,16 +2032,13 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
        /* 1. The following TWO tables decide the final
         * index of OFDM/CCK swing table.
         */
-       u8 *delta_swing_table_idx_tup_a;
-       u8 *delta_swing_table_idx_tdown_a;
-       u8 *delta_swing_table_idx_tup_b;
-       u8 *delta_swing_table_idx_tdown_b;
+       const u8 *delta_swing_table_idx_tup_a;
+       const u8 *delta_swing_table_idx_tdown_a;
 
        /*2. Initilization ( 7 steps in total )*/
-       rtl8821ae_get_delta_swing_table(hw, (u8 **)&delta_swing_table_idx_tup_a,
-                                       (u8 **)&delta_swing_table_idx_tdown_a,
-                                       (u8 **)&delta_swing_table_idx_tup_b,
-                                       (u8 **)&delta_swing_table_idx_tdown_b);
+       rtl8821ae_get_delta_swing_table(hw,
+                                       &delta_swing_table_idx_tup_a,
+                                       &delta_swing_table_idx_tdown_a);
 
        rtldm->txpower_trackinginit = true;
 
index c66c6dc..d05e709 100644 (file)
@@ -680,8 +680,10 @@ static void _rtl_usb_cleanup_rx(struct ieee80211_hw *hw)
        tasklet_kill(&rtlusb->rx_work_tasklet);
        cancel_work_sync(&rtlpriv->works.lps_change_work);
 
-       flush_workqueue(rtlpriv->works.rtl_wq);
-       destroy_workqueue(rtlpriv->works.rtl_wq);
+       if (rtlpriv->works.rtl_wq) {
+               destroy_workqueue(rtlpriv->works.rtl_wq);
+               rtlpriv->works.rtl_wq = NULL;
+       }
 
        skb_queue_purge(&rtlusb->rx_queue);
 
@@ -718,8 +720,11 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw)
 
                usb_anchor_urb(urb, &rtlusb->rx_submitted);
                err = usb_submit_urb(urb, GFP_KERNEL);
-               if (err)
+               if (err) {
+                       usb_unanchor_urb(urb);
+                       usb_free_urb(urb);
                        goto err_out;
+               }
                usb_free_urb(urb);
        }
        return 0;
@@ -1082,6 +1087,7 @@ error_out2:
        usb_put_dev(udev);
        complete(&rtlpriv->firmware_loading_complete);
        kfree(rtlpriv->usb_data);
+       ieee80211_free_hw(hw);
        return -ENODEV;
 }
 EXPORT_SYMBOL(rtl_usb_probe);
index ca894c4..e3d7cb6 100644 (file)
@@ -25,6 +25,9 @@ config RTW88_8822C
 config RTW88_8723D
        tristate
 
+config RTW88_8821C
+       tristate
+
 config RTW88_8822BE
        tristate "Realtek 8822BE PCI wireless network adapter"
        depends on PCI
@@ -58,6 +61,17 @@ config RTW88_8723DE
 
          802.11n PCIe wireless network adapter
 
+config RTW88_8821CE
+       tristate "Realtek 8821CE PCI wireless network adapter"
+       depends on PCI
+       select RTW88_CORE
+       select RTW88_PCI
+       select RTW88_8821C
+       help
+         Select this option will enable support for 8821CE chipset
+
+         802.11ac PCIe wireless network adapter
+
 config RTW88_DEBUG
        bool "Realtek rtw88 debug support"
        depends on RTW88_CORE
index f31e78a..c0e4b11 100644 (file)
@@ -37,5 +37,11 @@ rtw88_8723d-objs             := rtw8723d.o rtw8723d_table.o
 obj-$(CONFIG_RTW88_8723DE)     += rtw88_8723de.o
 rtw88_8723de-objs              := rtw8723de.o
 
+obj-$(CONFIG_RTW88_8821C)      += rtw88_8821c.o
+rtw88_8821c-objs               := rtw8821c.o rtw8821c_table.o
+
+obj-$(CONFIG_RTW88_8821CE)     += rtw88_8821ce.o
+rtw88_8821ce-objs              := rtw8821ce.o
+
 obj-$(CONFIG_RTW88_PCI)                += rtw88_pci.o
 rtw88_pci-objs                 := pci.o
index 8a070d5..aff70e4 100644 (file)
@@ -183,7 +183,7 @@ void rtw_bf_del_sounding(struct rtw_dev *rtwdev)
 void rtw_bf_enable_bfee_su(struct rtw_dev *rtwdev, struct rtw_vif *vif,
                           struct rtw_bfee *bfee)
 {
-       u8 nc_index = 1;
+       u8 nc_index = hweight8(rtwdev->hal.antenna_rx) - 1;
        u8 nr_index = bfee->sound_dim;
        u8 grouping = 0, codebookinfo = 1, coefficientsize = 3;
        u32 addr_bfer_info, addr_csi_rpt, csi_param;
@@ -231,7 +231,8 @@ void rtw_bf_enable_bfee_mu(struct rtw_dev *rtwdev, struct rtw_vif *vif,
 {
        struct rtw_bf_info *bf_info = &rtwdev->bf_info;
        struct mu_bfer_init_para param;
-       u8 nc_index = 1, nr_index = 1;
+       u8 nc_index = hweight8(rtwdev->hal.antenna_rx) - 1;
+       u8 nr_index = 1;
        u8 grouping = 0, codebookinfo = 1, coefficientsize = 0;
        u32 csi_param;
 
index cbf3d50..aa08fd7 100644 (file)
@@ -378,6 +378,7 @@ static void rtw_coex_update_wl_link_info(struct rtw_dev *rtwdev, u8 reason)
        struct rtw_chip_info *chip = rtwdev->chip;
        struct rtw_traffic_stats *stats = &rtwdev->stats;
        bool is_5G = false;
+       bool wl_busy = false;
        bool scan = false, link = false;
        int i;
        u8 rssi_state;
@@ -386,7 +387,16 @@ static void rtw_coex_update_wl_link_info(struct rtw_dev *rtwdev, u8 reason)
 
        scan = test_bit(RTW_FLAG_SCANNING, rtwdev->flags);
        coex_stat->wl_connected = !!rtwdev->sta_cnt;
-       coex_stat->wl_gl_busy = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags);
+
+       wl_busy = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags);
+       if (wl_busy != coex_stat->wl_gl_busy) {
+               if (wl_busy)
+                       coex_stat->wl_gl_busy = true;
+               else
+                       ieee80211_queue_delayed_work(rtwdev->hw,
+                                                    &coex->wl_remain_work,
+                                                    12 * HZ);
+       }
 
        if (stats->tx_throughput > stats->rx_throughput)
                coex_stat->wl_tput_dir = COEX_WL_TPUT_TX;
@@ -888,10 +898,12 @@ static void rtw_coex_tdma(struct rtw_dev *rtwdev, bool force, u32 tcase)
 {
        struct rtw_coex *coex = &rtwdev->coex;
        struct rtw_coex_dm *coex_dm = &coex->dm;
+       struct rtw_coex_stat *coex_stat = &coex->stat;
        struct rtw_chip_info *chip = rtwdev->chip;
        struct rtw_efuse *efuse = &rtwdev->efuse;
        u8 n, type;
        bool turn_on;
+       bool wl_busy = false;
 
        if (tcase & TDMA_4SLOT)/* 4-slot (50ms) mode */
                rtw_coex_tdma_timer_base(rtwdev, 3);
@@ -909,13 +921,18 @@ static void rtw_coex_tdma(struct rtw_dev *rtwdev, bool force, u32 tcase)
                }
        }
 
-       if (turn_on) {
-               /* enable TBTT interrupt */
+       /* enable TBTT interrupt */
+       if (turn_on)
                rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION);
-               rtw_coex_write_scbd(rtwdev, COEX_SCBD_TDMA, true);
-       } else {
+
+       wl_busy = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags);
+
+       if ((coex_stat->bt_a2dp_exist &&
+            (coex_stat->bt_inq_remain || coex_stat->bt_multi_link)) ||
+           !wl_busy)
                rtw_coex_write_scbd(rtwdev, COEX_SCBD_TDMA, false);
-       }
+       else
+               rtw_coex_write_scbd(rtwdev, COEX_SCBD_TDMA, true);
 
        if (efuse->share_ant) {
                if (type < chip->tdma_sant_num)
@@ -1323,20 +1340,31 @@ static void rtw_coex_action_bt_inquiry(struct rtw_dev *rtwdev)
                /* Shared-Ant */
                if (wl_hi_pri) {
                        table_case = 15;
-                       if (coex_stat->bt_a2dp_exist &&
-                           !coex_stat->bt_pan_exist) {
-                               slot_type = TDMA_4SLOT;
-                               tdma_case = 11;
-                       } else if (coex_stat->wl_hi_pri_task1) {
+                       if (coex_stat->bt_profile_num > 0)
+                               tdma_case = 10;
+                       else if (coex_stat->wl_hi_pri_task1)
                                tdma_case = 6;
-                       } else if (!coex_stat->bt_page) {
+                       else if (!coex_stat->bt_page)
                                tdma_case = 8;
-                       } else {
+                       else
                                tdma_case = 9;
+               } else if (coex_stat->wl_gl_busy) {
+                       if (coex_stat->bt_profile_num == 0) {
+                               table_case = 12;
+                               tdma_case = 18;
+                       } else if (coex_stat->bt_profile_num == 1 &&
+                                  !coex_stat->bt_a2dp_exist) {
+                               slot_type = TDMA_4SLOT;
+                               table_case = 12;
+                               tdma_case = 20;
+                       } else {
+                               slot_type = TDMA_4SLOT;
+                               table_case = 12;
+                               tdma_case = 26;
                        }
                } else if (coex_stat->wl_connected) {
-                       table_case = 10;
-                       tdma_case = 10;
+                       table_case = 9;
+                       tdma_case = 27;
                } else {
                        table_case = 1;
                        tdma_case = 0;
@@ -1934,7 +1962,8 @@ static void rtw_coex_run_coex(struct rtw_dev *rtwdev, u8 reason)
        if (coex_stat->wl_under_ips)
                return;
 
-       if (coex->freeze && !coex_stat->bt_setup_link)
+       if (coex->freeze && coex_dm->reason == COEX_RSN_BTINFO &&
+           !coex_stat->bt_setup_link)
                return;
 
        coex_stat->cnt_wl[COEX_CNT_WL_COEXRUN]++;
@@ -2277,6 +2306,7 @@ void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length)
        struct rtw_chip_info *chip = rtwdev->chip;
        unsigned long bt_relink_time;
        u8 i, rsp_source = 0, type;
+       bool inq_page = false;
 
        rsp_source = buf[0] & 0xf;
        if (rsp_source >= COEX_BTINFO_SRC_MAX)
@@ -2343,7 +2373,20 @@ void rtw_coex_bt_info_notify(struct rtw_dev *rtwdev, u8 *buf, u8 length)
 
        /* 0xff means BT is under WHCK test */
        coex_stat->bt_whck_test = (coex_stat->bt_info_lb2 == 0xff);
-       coex_stat->bt_inq_page = ((coex_stat->bt_info_lb2 & BIT(2)) == BIT(2));
+
+       inq_page = ((coex_stat->bt_info_lb2 & BIT(2)) == BIT(2));
+
+       if (inq_page != coex_stat->bt_inq_page) {
+               cancel_delayed_work_sync(&coex->bt_remain_work);
+               coex_stat->bt_inq_page = inq_page;
+
+               if (inq_page)
+                       coex_stat->bt_inq_remain = true;
+               else
+                       ieee80211_queue_delayed_work(rtwdev->hw,
+                                                    &coex->bt_remain_work,
+                                                    4 * HZ);
+       }
        coex_stat->bt_acl_busy = ((coex_stat->bt_info_lb2 & BIT(3)) == BIT(3));
        coex_stat->cnt_bt[COEX_CNT_BT_RETRY] = coex_stat->bt_info_lb3 & 0xf;
        if (coex_stat->cnt_bt[COEX_CNT_BT_RETRY] >= 1)
@@ -2518,6 +2561,30 @@ void rtw_coex_defreeze_work(struct work_struct *work)
        mutex_unlock(&rtwdev->mutex);
 }
 
+void rtw_coex_wl_remain_work(struct work_struct *work)
+{
+       struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
+                                             coex.wl_remain_work.work);
+       struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat;
+
+       mutex_lock(&rtwdev->mutex);
+       coex_stat->wl_gl_busy = test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags);
+       rtw_coex_run_coex(rtwdev, COEX_RSN_WLSTATUS);
+       mutex_unlock(&rtwdev->mutex);
+}
+
+void rtw_coex_bt_remain_work(struct work_struct *work)
+{
+       struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
+                                             coex.bt_remain_work.work);
+       struct rtw_coex_stat *coex_stat = &rtwdev->coex.stat;
+
+       mutex_lock(&rtwdev->mutex);
+       coex_stat->bt_inq_remain = coex_stat->bt_inq_page;
+       rtw_coex_run_coex(rtwdev, COEX_RSN_BTSTATUS);
+       mutex_unlock(&rtwdev->mutex);
+}
+
 #ifdef CONFIG_RTW88_DEBUGFS
 #define INFO_SIZE      80
 
index 4c3a019..44720fd 100644 (file)
@@ -95,6 +95,7 @@ enum coex_runreason {
        COEX_RSN_BTINFO         = 12,
        COEX_RSN_LPS            = 13,
        COEX_RSN_WLSTATUS       = 14,
+       COEX_RSN_BTSTATUS       = 15,
 
        COEX_RSN_MAX
 };
@@ -362,6 +363,8 @@ void rtw_coex_write_scbd(struct rtw_dev *rtwdev, u16 bitpos, bool set);
 void rtw_coex_bt_relink_work(struct work_struct *work);
 void rtw_coex_bt_reenable_work(struct work_struct *work);
 void rtw_coex_defreeze_work(struct work_struct *work);
+void rtw_coex_wl_remain_work(struct work_struct *work);
+void rtw_coex_bt_remain_work(struct work_struct *work);
 
 void rtw_coex_power_on_setting(struct rtw_dev *rtwdev);
 void rtw_coex_init_hw_config(struct rtw_dev *rtwdev, bool wifi_only);
index 09f04fe..f769c98 100644 (file)
@@ -344,6 +344,31 @@ static ssize_t rtw_debugfs_set_write_reg(struct file *filp,
        return count;
 }
 
+static ssize_t rtw_debugfs_set_h2c(struct file *filp,
+                                  const char __user *buffer,
+                                  size_t count, loff_t *loff)
+{
+       struct rtw_debugfs_priv *debugfs_priv = filp->private_data;
+       struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+       char tmp[32 + 1];
+       u8 param[8];
+       int num;
+
+       rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 3);
+
+       num = sscanf(tmp, "%hhx,%hhx,%hhx,%hhx,%hhx,%hhx,%hhx,%hhx",
+                    &param[0], &param[1], &param[2], &param[3],
+                    &param[4], &param[5], &param[6], &param[7]);
+       if (num != 8) {
+               rtw_info(rtwdev, "invalid H2C command format for debug\n");
+               return -EINVAL;
+       }
+
+       rtw_fw_h2c_cmd_dbg(rtwdev, param);
+
+       return count;
+}
+
 static ssize_t rtw_debugfs_set_rf_write(struct file *filp,
                                        const char __user *buffer,
                                        size_t count, loff_t *loff)
@@ -808,6 +833,10 @@ static struct rtw_debugfs_priv rtw_debug_priv_write_reg = {
        .cb_write = rtw_debugfs_set_write_reg,
 };
 
+static struct rtw_debugfs_priv rtw_debug_priv_h2c = {
+       .cb_write = rtw_debugfs_set_h2c,
+};
+
 static struct rtw_debugfs_priv rtw_debug_priv_rf_write = {
        .cb_write = rtw_debugfs_set_rf_write,
 };
@@ -877,6 +906,7 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev)
        rtw_debugfs_add_r(phy_info);
        rtw_debugfs_add_r(coex_info);
        rtw_debugfs_add_rw(coex_enable);
+       rtw_debugfs_add_w(h2c);
        rtw_debugfs_add_r(mac_0);
        rtw_debugfs_add_r(mac_1);
        rtw_debugfs_add_r(mac_2);
index 6478fd7..63b00bc 100644 (file)
@@ -253,6 +253,11 @@ out:
        spin_unlock(&rtwdev->h2c.lock);
 }
 
+void rtw_fw_h2c_cmd_dbg(struct rtw_dev *rtwdev, u8 *h2c)
+{
+       rtw_fw_send_h2c_command(rtwdev, h2c);
+}
+
 static void rtw_fw_send_h2c_packet(struct rtw_dev *rtwdev, u8 *h2c_pkt)
 {
        int ret;
@@ -456,7 +461,7 @@ void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
        SET_RA_INFO_INIT_RA_LVL(h2c_pkt, si->init_ra_lv);
        SET_RA_INFO_SGI_EN(h2c_pkt, si->sgi_enable);
        SET_RA_INFO_BW_MODE(h2c_pkt, si->bw_mode);
-       SET_RA_INFO_LDPC(h2c_pkt, si->ldpc_en);
+       SET_RA_INFO_LDPC(h2c_pkt, !!si->ldpc_en);
        SET_RA_INFO_NO_UPDATE(h2c_pkt, no_update);
        SET_RA_INFO_VHT_EN(h2c_pkt, si->vht_enable);
        SET_RA_INFO_DIS_PT(h2c_pkt, disable_pt);
@@ -915,14 +920,14 @@ static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
        return skb_new;
 }
 
-static void rtw_fill_rsvd_page_desc(struct rtw_dev *rtwdev, struct sk_buff *skb)
+static void rtw_fill_rsvd_page_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
+                                   enum rtw_rsvd_packet_type type)
 {
-       struct rtw_tx_pkt_info pkt_info;
+       struct rtw_tx_pkt_info pkt_info = {0};
        struct rtw_chip_info *chip = rtwdev->chip;
        u8 *pkt_desc;
 
-       memset(&pkt_info, 0, sizeof(pkt_info));
-       rtw_rsvd_page_pkt_info_update(rtwdev, &pkt_info, skb);
+       rtw_tx_rsvd_page_pkt_info_update(rtwdev, &pkt_info, skb, type);
        pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
        memset(pkt_desc, 0, chip->tx_pkt_desc_sz);
        rtw_tx_fill_tx_desc(&pkt_info, skb);
@@ -1261,7 +1266,7 @@ static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev, u32 *size)
                 * And iter->len will be added with size of tx_desc_sz.
                 */
                if (rsvd_pkt->add_txdesc)
-                       rtw_fill_rsvd_page_desc(rtwdev, iter);
+                       rtw_fill_rsvd_page_desc(rtwdev, iter, rsvd_pkt->type);
 
                rsvd_pkt->skb = iter;
                rsvd_pkt->page = total_page;
index 470e180..686dcd3 100644 (file)
@@ -563,4 +563,6 @@ void rtw_fw_set_nlo_info(struct rtw_dev *rtwdev, bool enable);
 void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev,
                                 struct cfg80211_ssid *ssid);
 void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable);
+void rtw_fw_h2c_cmd_dbg(struct rtw_dev *rtwdev, u8 *h2c);
+
 #endif
index c412bc5..6b19915 100644 (file)
@@ -231,6 +231,23 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
        mutex_unlock(&rtwdev->mutex);
 }
 
+static int rtw_ops_change_interface(struct ieee80211_hw *hw,
+                                   struct ieee80211_vif *vif,
+                                   enum nl80211_iftype type, bool p2p)
+{
+       struct rtw_dev *rtwdev = hw->priv;
+
+       rtw_info(rtwdev, "change vif %pM (%d)->(%d), p2p (%d)->(%d)\n",
+                vif->addr, vif->type, type, vif->p2p, p2p);
+
+       rtw_ops_remove_interface(hw, vif);
+
+       vif->type = type;
+       vif->p2p = p2p;
+
+       return rtw_ops_add_interface(hw, vif);
+}
+
 static void rtw_ops_configure_filter(struct ieee80211_hw *hw,
                                     unsigned int changed_flags,
                                     unsigned int *new_flags,
@@ -373,6 +390,15 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
        if (changed & BSS_CHANGED_BEACON)
                rtw_fw_download_rsvd_page(rtwdev);
 
+       if (changed & BSS_CHANGED_BEACON_ENABLED) {
+               if (conf->enable_beacon)
+                       rtw_write32_set(rtwdev, REG_FWHW_TXQ_CTRL,
+                                       BIT_EN_BCNQ_DL);
+               else
+                       rtw_write32_clr(rtwdev, REG_FWHW_TXQ_CTRL,
+                                       BIT_EN_BCNQ_DL);
+       }
+
        if (changed & BSS_CHANGED_MU_GROUPS)
                rtw_chip_set_gid_table(rtwdev, vif, conf);
 
@@ -827,6 +853,7 @@ const struct ieee80211_ops rtw_ops = {
        .config                 = rtw_ops_config,
        .add_interface          = rtw_ops_add_interface,
        .remove_interface       = rtw_ops_remove_interface,
+       .change_interface       = rtw_ops_change_interface,
        .configure_filter       = rtw_ops_configure_filter,
        .bss_info_changed       = rtw_ops_bss_info_changed,
        .conf_tx                = rtw_ops_conf_tx,
index 0eefafc..54044ab 100644 (file)
@@ -722,8 +722,6 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
                        stbc_en = VHT_STBC_EN;
                if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
                        ldpc_en = VHT_LDPC_EN;
-               if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
-                       is_support_sgi = true;
        } else if (sta->ht_cap.ht_supported) {
                ra_mask |= (sta->ht_cap.mcs.rx_mask[1] << 20) |
                           (sta->ht_cap.mcs.rx_mask[0] << 12);
@@ -731,9 +729,6 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
                        stbc_en = HT_STBC_EN;
                if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
                        ldpc_en = HT_LDPC_EN;
-               if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20 ||
-                   sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
-                       is_support_sgi = true;
        }
 
        if (efuse->hw_cap.nss == 1)
@@ -775,12 +770,18 @@ void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
        switch (sta->bandwidth) {
        case IEEE80211_STA_RX_BW_80:
                bw_mode = RTW_CHANNEL_WIDTH_80;
+               is_support_sgi = sta->vht_cap.vht_supported &&
+                                (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
                break;
        case IEEE80211_STA_RX_BW_40:
                bw_mode = RTW_CHANNEL_WIDTH_40;
+               is_support_sgi = sta->ht_cap.ht_supported &&
+                                (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
                break;
        default:
                bw_mode = RTW_CHANNEL_WIDTH_20;
+               is_support_sgi = sta->ht_cap.ht_supported &&
+                                (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
                break;
        }
 
@@ -935,6 +936,8 @@ void rtw_core_stop(struct rtw_dev *rtwdev)
        cancel_delayed_work_sync(&coex->bt_relink_work);
        cancel_delayed_work_sync(&coex->bt_reenable_work);
        cancel_delayed_work_sync(&coex->defreeze_work);
+       cancel_delayed_work_sync(&coex->wl_remain_work);
+       cancel_delayed_work_sync(&coex->bt_remain_work);
 
        mutex_lock(&rtwdev->mutex);
 
@@ -989,12 +992,12 @@ static void rtw_init_vht_cap(struct rtw_dev *rtwdev,
        vht_cap->vht_supported = true;
        vht_cap->cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
                       IEEE80211_VHT_CAP_SHORT_GI_80 |
-                      IEEE80211_VHT_CAP_TXSTBC |
                       IEEE80211_VHT_CAP_RXSTBC_1 |
                       IEEE80211_VHT_CAP_HTC_VHT |
                       IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
                       0;
-
+       if (rtwdev->hal.rf_path_num > 1)
+               vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
        vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
                        IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
        vht_cap->cap |= (rtwdev->hal.bfee_sts_cap <<
@@ -1326,6 +1329,10 @@ static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev)
                efuse->share_ant = true;
        if (efuse->regd == 0xff)
                efuse->regd = 0;
+       if (efuse->tx_bb_swing_setting_2g == 0xff)
+               efuse->tx_bb_swing_setting_2g = 0;
+       if (efuse->tx_bb_swing_setting_5g == 0xff)
+               efuse->tx_bb_swing_setting_5g = 0;
 
        efuse->btcoex = (efuse->rf_board_option & 0xe0) == 0x20;
        efuse->ext_pa_2g = efuse->pa_type_2g & BIT(4) ? 1 : 0;
@@ -1422,6 +1429,8 @@ int rtw_core_init(struct rtw_dev *rtwdev)
        INIT_DELAYED_WORK(&coex->bt_relink_work, rtw_coex_bt_relink_work);
        INIT_DELAYED_WORK(&coex->bt_reenable_work, rtw_coex_bt_reenable_work);
        INIT_DELAYED_WORK(&coex->defreeze_work, rtw_coex_defreeze_work);
+       INIT_DELAYED_WORK(&coex->wl_remain_work, rtw_coex_wl_remain_work);
+       INIT_DELAYED_WORK(&coex->bt_remain_work, rtw_coex_bt_remain_work);
        INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work);
        INIT_WORK(&rtwdev->ba_work, rtw_txq_ba_work);
        skb_queue_head_init(&rtwdev->c2h_queue);
index 0841f5f..276b5d3 100644 (file)
@@ -183,6 +183,7 @@ enum rtw_chip_type {
        RTW_CHIP_TYPE_8822B,
        RTW_CHIP_TYPE_8822C,
        RTW_CHIP_TYPE_8723D,
+       RTW_CHIP_TYPE_8821C,
 };
 
 enum rtw_tx_queue_type {
@@ -591,6 +592,8 @@ struct rtw_tx_pkt_info {
        bool dis_qselseq;
        bool en_hwseq;
        u8 hw_ssn_sel;
+       bool nav_use_hdr;
+       bool bt_null;
 };
 
 struct rtw_rx_pkt_stat {
@@ -1147,6 +1150,9 @@ struct rtw_chip_info {
        const struct wiphy_wowlan_support *wowlan_stub;
        const u8 max_sched_scan_ssids;
 
+       /* for 8821c set channel */
+       u32 ch_param[3];
+
        /* coex paras */
        u32 coex_para_ver;
        u8 bt_desired_ver;
@@ -1263,6 +1269,7 @@ struct rtw_coex_stat {
        bool bt_link_exist;
        bool bt_whck_test;
        bool bt_inq_page;
+       bool bt_inq_remain;
        bool bt_inq;
        bool bt_page;
        bool bt_ble_voice;
@@ -1363,6 +1370,8 @@ struct rtw_coex {
        struct delayed_work bt_relink_work;
        struct delayed_work bt_reenable_work;
        struct delayed_work defreeze_work;
+       struct delayed_work wl_remain_work;
+       struct delayed_work bt_remain_work;
 };
 
 #define DPK_RF_REG_NUM 7
@@ -1462,6 +1471,7 @@ struct rtw_dm_info {
        u8 thermal_avg[RTW_RF_PATH_MAX];
        u8 thermal_meter_k;
        s8 delta_power_index[RTW_RF_PATH_MAX];
+       s8 delta_power_index_last[RTW_RF_PATH_MAX];
        u8 default_ofdm_index;
        bool pwr_trk_triggered;
        bool pwr_trk_init_trigger;
@@ -1479,6 +1489,7 @@ struct rtw_dm_info {
        /* [bandwidth 0:20M/1:40M][number of path] */
        u8 cck_pd_lv[2][RTW_RF_PATH_MAX];
        u32 cck_fa_avg;
+       u8 cck_pd_default;
 
        /* save the last rx phy status for debug */
        s8 rx_snr[RTW_RF_PATH_MAX];
@@ -1526,6 +1537,8 @@ struct rtw_efuse {
        u8 apa_type;
        bool ext_pa_2g;
        bool ext_pa_5g;
+       u8 tx_bb_swing_setting_2g;
+       u8 tx_bb_swing_setting_5g;
 
        bool btcoex;
        /* bt share antenna with wifi */
index 8228db9..3413973 100644 (file)
 #include "debug.h"
 
 static bool rtw_disable_msi;
+static bool rtw_pci_disable_aspm;
 module_param_named(disable_msi, rtw_disable_msi, bool, 0644);
+module_param_named(disable_aspm, rtw_pci_disable_aspm, bool, 0644);
 MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support");
+MODULE_PARM_DESC(disable_aspm, "Set Y to disable PCI ASPM support");
 
 static u32 rtw_pci_tx_queue_idx_addr[] = {
        [RTW_TX_QUEUE_BK]       = RTK_PCI_TXBD_IDX_BKQ,
@@ -1200,6 +1203,9 @@ static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable)
        u8 value;
        int ret;
 
+       if (rtw_pci_disable_aspm)
+               return;
+
        ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
        if (ret) {
                rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
@@ -1219,6 +1225,9 @@ static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable)
        u8 value;
        int ret;
 
+       if (rtw_pci_disable_aspm)
+               return;
+
        ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
        if (ret) {
                rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret);
index 5a3e9cc..8f468d6 100644 (file)
@@ -43,6 +43,8 @@
 #define BITS_EF_ADDR           (BIT_MASK_EF_ADDR << BIT_SHIFT_EF_ADDR)
 #define BITS_PLL               0xf0
 
+#define REG_AFE_XTAL_CTRL      0x24
+#define REG_AFE_PLL_CTRL       0x28
 #define REG_AFE_CTRL3          0x2c
 #define BIT_MASK_XTAL          0x00FFF000
 #define BIT_XTAL_GMP_BIT4      BIT(28)
@@ -59,6 +61,7 @@
 #define BIT_FSPI_EN            BIT(19)
 #define BIT_EN_SIC             BIT(12)
 #define BIT_BT_AOD_GPIO3       BIT(9)
+#define BIT_PO_BT_PTA_PINS     BIT(9)
 #define BIT_BT_PTA_EN          BIT(5)
 #define BIT_WLRFE_4_5_EN       BIT(2)
 
 #define REG_RFE_CTRL_E         0x0974
 #define REG_2ND_CCA_CTRL       0x0976
 
+#define REG_CCK0_FAREPORT      0xa2c
+
 #define REG_DIS_DPD            0x0a70
 #define DIS_DPD_MASK           GENMASK(9, 0)
 #define DIS_DPD_RATE6M         BIT(0)
index 4700195..3ddd170 100644 (file)
@@ -1956,13 +1956,13 @@ static const struct coex_table_para table_sant_8723d[] = {
        {0xa5555555, 0xaaaa5aaa},
        {0x6a5a5a5a, 0x5a5a5a5a},
        {0x6a5a5a5a, 0x6a5a5a5a},
-       {0x65555555, 0x5a5a5a5a},
+       {0x66555555, 0x5a5a5a5a},
        {0x65555555, 0x6a5a5a5a}, /* case-10 */
        {0x65555555, 0xfafafafa},
-       {0x65555555, 0x6a5a5aaa},
+       {0x66555555, 0x5a5a5aaa},
        {0x65555555, 0x5aaa5aaa},
        {0x65555555, 0xaaaa5aaa},
-       {0x65555555, 0xaaaaaaaa}, /* case-15 */
+       {0x66555555, 0xaaaaaaaa}, /* case-15 */
        {0xffff55ff, 0xfafafafa},
        {0xffff55ff, 0x6afa5afa},
        {0xaaffffaa, 0xfafafafa},
@@ -2034,8 +2034,9 @@ static const struct coex_tdma_para tdma_sant_8723d[] = {
        { {0x51, 0x0c, 0x03, 0x10, 0x54} },
        { {0x55, 0x08, 0x03, 0x10, 0x54} },
        { {0x65, 0x10, 0x03, 0x11, 0x11} },
-       { {0x51, 0x10, 0x03, 0x10, 0x51} },
-       { {0x61, 0x15, 0x03, 0x11, 0x10} }
+       { {0x51, 0x10, 0x03, 0x10, 0x51} }, /* case-25 */
+       { {0x51, 0x08, 0x03, 0x10, 0x50} },
+       { {0x61, 0x08, 0x03, 0x11, 0x11} }
 };
 
 /* Non-Shared-Antenna TDMA */
@@ -2714,7 +2715,7 @@ struct rtw_chip_info rtw8723d_hw_spec = {
        .pwr_track_tbl = &rtw8723d_rtw_pwr_track_tbl,
        .iqk_threshold = 8,
 
-       .coex_para_ver = 0x1905302f,
+       .coex_para_ver = 0x2007022f,
        .bt_desired_ver = 0x2f,
        .scbd_support = true,
        .new_scbd10_def = true,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
new file mode 100644 (file)
index 0000000..d8863d8
--- /dev/null
@@ -0,0 +1,1853 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019  Realtek Corporation
+ */
+
+#include "main.h"
+#include "coex.h"
+#include "fw.h"
+#include "tx.h"
+#include "rx.h"
+#include "phy.h"
+#include "rtw8821c.h"
+#include "rtw8821c_table.h"
+#include "mac.h"
+#include "reg.h"
+#include "debug.h"
+#include "bf.h"
+
+static void rtw8821ce_efuse_parsing(struct rtw_efuse *efuse,
+                                   struct rtw8821c_efuse *map)
+{
+       ether_addr_copy(efuse->addr, map->e.mac_addr);
+}
+
+static int rtw8821c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
+{
+       struct rtw_efuse *efuse = &rtwdev->efuse;
+       struct rtw8821c_efuse *map;
+       int i;
+
+       map = (struct rtw8821c_efuse *)log_map;
+
+       efuse->rfe_option = map->rfe_option;
+       efuse->rf_board_option = map->rf_board_option;
+       efuse->crystal_cap = map->xtal_k;
+       efuse->pa_type_2g = map->pa_type;
+       efuse->pa_type_5g = map->pa_type;
+       efuse->lna_type_2g = map->lna_type_2g[0];
+       efuse->lna_type_5g = map->lna_type_5g[0];
+       efuse->channel_plan = map->channel_plan;
+       efuse->country_code[0] = map->country_code[0];
+       efuse->country_code[1] = map->country_code[1];
+       efuse->bt_setting = map->rf_bt_setting;
+       efuse->regd = map->rf_board_option & 0x7;
+       efuse->thermal_meter[0] = map->thermal_meter;
+       efuse->thermal_meter_k = map->thermal_meter;
+       efuse->tx_bb_swing_setting_2g = map->tx_bb_swing_setting_2g;
+       efuse->tx_bb_swing_setting_5g = map->tx_bb_swing_setting_5g;
+
+       for (i = 0; i < 4; i++)
+               efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
+
+       switch (rtw_hci_type(rtwdev)) {
+       case RTW_HCI_TYPE_PCIE:
+               rtw8821ce_efuse_parsing(efuse, map);
+               break;
+       default:
+               /* unsupported now */
+               return -ENOTSUPP;
+       }
+
+       return 0;
+}
+
+static const u32 rtw8821c_txscale_tbl[] = {
+       0x081, 0x088, 0x090, 0x099, 0x0a2, 0x0ac, 0x0b6, 0x0c0, 0x0cc, 0x0d8,
+       0x0e5, 0x0f2, 0x101, 0x110, 0x120, 0x131, 0x143, 0x156, 0x16a, 0x180,
+       0x197, 0x1af, 0x1c8, 0x1e3, 0x200, 0x21e, 0x23e, 0x261, 0x285, 0x2ab,
+       0x2d3, 0x2fe, 0x32b, 0x35c, 0x38e, 0x3c4, 0x3fe
+};
+
+static const u8 rtw8821c_get_swing_index(struct rtw_dev *rtwdev)
+{
+       u8 i = 0;
+       u32 swing, table_value;
+
+       swing = rtw_read32_mask(rtwdev, REG_TXSCALE_A, 0xffe00000);
+       for (i = 0; i < ARRAY_SIZE(rtw8821c_txscale_tbl); i++) {
+               table_value = rtw8821c_txscale_tbl[i];
+               if (swing == table_value)
+                       break;
+       }
+
+       return i;
+}
+
+static void rtw8821c_pwrtrack_init(struct rtw_dev *rtwdev)
+{
+       struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+       u8 swing_idx = rtw8821c_get_swing_index(rtwdev);
+
+       if (swing_idx >= ARRAY_SIZE(rtw8821c_txscale_tbl))
+               dm_info->default_ofdm_index = 24;
+       else
+               dm_info->default_ofdm_index = swing_idx;
+
+       ewma_thermal_init(&dm_info->avg_thermal[RF_PATH_A]);
+       dm_info->delta_power_index[RF_PATH_A] = 0;
+       dm_info->delta_power_index_last[RF_PATH_A] = 0;
+       dm_info->pwr_trk_triggered = false;
+       dm_info->pwr_trk_init_trigger = true;
+       dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k;
+}
+
+static void rtw8821c_phy_bf_init(struct rtw_dev *rtwdev)
+{
+       rtw_bf_phy_init(rtwdev);
+       /* Grouping bitmap parameters */
+       rtw_write32(rtwdev, 0x1C94, 0xAFFFAFFF);
+}
+
+static void rtw8821c_phy_set_param(struct rtw_dev *rtwdev)
+{
+       u8 crystal_cap, val;
+
+       /* power on BB/RF domain */
+       val = rtw_read8(rtwdev, REG_SYS_FUNC_EN);
+       val |= BIT_FEN_PCIEA;
+       rtw_write8(rtwdev, REG_SYS_FUNC_EN, val);
+
+       /* toggle BB reset */
+       val |= BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST;
+       rtw_write8(rtwdev, REG_SYS_FUNC_EN, val);
+       val &= ~(BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST);
+       rtw_write8(rtwdev, REG_SYS_FUNC_EN, val);
+       val |= BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST;
+       rtw_write8(rtwdev, REG_SYS_FUNC_EN, val);
+
+       rtw_write8(rtwdev, REG_RF_CTRL,
+                  BIT_RF_EN | BIT_RF_RSTB | BIT_RF_SDM_RSTB);
+       usleep_range(10, 11);
+       rtw_write8(rtwdev, REG_WLRF1 + 3,
+                  BIT_RF_EN | BIT_RF_RSTB | BIT_RF_SDM_RSTB);
+       usleep_range(10, 11);
+
+       /* pre init before header files config */
+       rtw_write32_clr(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST);
+
+       rtw_phy_load_tables(rtwdev);
+
+       crystal_cap = rtwdev->efuse.crystal_cap & 0x3F;
+       rtw_write32_mask(rtwdev, REG_AFE_XTAL_CTRL, 0x7e000000, crystal_cap);
+       rtw_write32_mask(rtwdev, REG_AFE_PLL_CTRL, 0x7e, crystal_cap);
+       rtw_write32_mask(rtwdev, REG_CCK0_FAREPORT, BIT(18) | BIT(22), 0);
+
+       /* post init after header files config */
+       rtw_write32_set(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST);
+       rtwdev->chip->ch_param[0] = rtw_read32_mask(rtwdev, REG_TXSF2, MASKDWORD);
+       rtwdev->chip->ch_param[1] = rtw_read32_mask(rtwdev, REG_TXSF6, MASKDWORD);
+       rtwdev->chip->ch_param[2] = rtw_read32_mask(rtwdev, REG_TXFILTER, MASKDWORD);
+
+       rtw_phy_init(rtwdev);
+       rtwdev->dm_info.cck_pd_default = rtw_read8(rtwdev, REG_CSRATIO) & 0x1f;
+
+       rtw8821c_pwrtrack_init(rtwdev);
+
+       rtw8821c_phy_bf_init(rtwdev);
+}
+
+static int rtw8821c_mac_init(struct rtw_dev *rtwdev)
+{
+       u32 value32;
+       u16 pre_txcnt;
+
+       /* protocol configuration */
+       rtw_write8(rtwdev, REG_AMPDU_MAX_TIME_V1, WLAN_AMPDU_MAX_TIME);
+       rtw_write8_set(rtwdev, REG_TX_HANG_CTRL, BIT_EN_EOF_V1);
+       pre_txcnt = WLAN_PRE_TXCNT_TIME_TH | BIT_EN_PRECNT;
+       rtw_write8(rtwdev, REG_PRECNT_CTRL, (u8)(pre_txcnt & 0xFF));
+       rtw_write8(rtwdev, REG_PRECNT_CTRL + 1, (u8)(pre_txcnt >> 8));
+       value32 = WLAN_RTS_LEN_TH | (WLAN_RTS_TX_TIME_TH << 8) |
+                 (WLAN_MAX_AGG_PKT_LIMIT << 16) |
+                 (WLAN_RTS_MAX_AGG_PKT_LIMIT << 24);
+       rtw_write32(rtwdev, REG_PROT_MODE_CTRL, value32);
+       rtw_write16(rtwdev, REG_BAR_MODE_CTRL + 2,
+                   WLAN_BAR_RETRY_LIMIT | WLAN_RA_TRY_RATE_AGG_LIMIT << 8);
+       rtw_write8(rtwdev, REG_FAST_EDCA_VOVI_SETTING, FAST_EDCA_VO_TH);
+       rtw_write8(rtwdev, REG_FAST_EDCA_VOVI_SETTING + 2, FAST_EDCA_VI_TH);
+       rtw_write8(rtwdev, REG_FAST_EDCA_BEBK_SETTING, FAST_EDCA_BE_TH);
+       rtw_write8(rtwdev, REG_FAST_EDCA_BEBK_SETTING + 2, FAST_EDCA_BK_TH);
+       rtw_write8_set(rtwdev, REG_INIRTS_RATE_SEL, BIT(5));
+
+       /* EDCA configuration */
+       rtw_write8_clr(rtwdev, REG_TIMER0_SRC_SEL, BIT_TSFT_SEL_TIMER0);
+       rtw_write16(rtwdev, REG_TXPAUSE, 0);
+       rtw_write8(rtwdev, REG_SLOT, WLAN_SLOT_TIME);
+       rtw_write8(rtwdev, REG_PIFS, WLAN_PIFS_TIME);
+       rtw_write32(rtwdev, REG_SIFS, WLAN_SIFS_CFG);
+       rtw_write16(rtwdev, REG_EDCA_VO_PARAM + 2, WLAN_VO_TXOP_LIMIT);
+       rtw_write16(rtwdev, REG_EDCA_VI_PARAM + 2, WLAN_VI_TXOP_LIMIT);
+       rtw_write32(rtwdev, REG_RD_NAV_NXT, WLAN_NAV_CFG);
+       rtw_write16(rtwdev, REG_RXTSF_OFFSET_CCK, WLAN_RX_TSF_CFG);
+
+       /* Set beacon cotnrol - enable TSF and other related functions */
+       rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION);
+
+       /* Set send beacon related registers */
+       rtw_write32(rtwdev, REG_TBTT_PROHIBIT, WLAN_TBTT_TIME);
+       rtw_write8(rtwdev, REG_DRVERLYINT, WLAN_DRV_EARLY_INT);
+       rtw_write8(rtwdev, REG_BCNDMATIM, WLAN_BCN_DMA_TIME);
+       rtw_write8_clr(rtwdev, REG_TX_PTCL_CTRL + 1, BIT_SIFS_BK_EN >> 8);
+
+       /* WMAC configuration */
+       rtw_write32(rtwdev, REG_RXFLTMAP0, WLAN_RX_FILTER0);
+       rtw_write16(rtwdev, REG_RXFLTMAP2, WLAN_RX_FILTER2);
+       rtw_write32(rtwdev, REG_RCR, WLAN_RCR_CFG);
+       rtw_write8(rtwdev, REG_RX_PKT_LIMIT, WLAN_RXPKT_MAX_SZ_512);
+       rtw_write8(rtwdev, REG_TCR + 2, WLAN_TX_FUNC_CFG2);
+       rtw_write8(rtwdev, REG_TCR + 1, WLAN_TX_FUNC_CFG1);
+       rtw_write8(rtwdev, REG_ACKTO_CCK, 0x40);
+       rtw_write8_set(rtwdev, REG_WMAC_TRXPTCL_CTL_H, BIT(1));
+       rtw_write8_set(rtwdev, REG_SND_PTCL_CTRL, BIT(6));
+       rtw_write32(rtwdev, REG_WMAC_OPTION_FUNCTION + 8, WLAN_MAC_OPT_FUNC2);
+       rtw_write8(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, WLAN_MAC_OPT_NORM_FUNC1);
+
+       return 0;
+}
+
+static void rtw8821c_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
+{
+       u8 ldo_pwr;
+
+       ldo_pwr = rtw_read8(rtwdev, REG_LDO_EFUSE_CTRL + 3);
+       ldo_pwr = enable ? ldo_pwr | BIT(7) : ldo_pwr & ~BIT(7);
+       rtw_write8(rtwdev, REG_LDO_EFUSE_CTRL + 3, ldo_pwr);
+}
+
+static void rtw8821c_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
+{
+       u32 rf_reg18;
+
+       rf_reg18 = rtw_read_rf(rtwdev, RF_PATH_A, 0x18, RFREG_MASK);
+
+       rf_reg18 &= ~(RF18_BAND_MASK | RF18_CHANNEL_MASK | RF18_RFSI_MASK |
+                     RF18_BW_MASK);
+
+       rf_reg18 |= (channel <= 14 ? RF18_BAND_2G : RF18_BAND_5G);
+       rf_reg18 |= (channel & RF18_CHANNEL_MASK);
+
+       if (channel >= 100 && channel <= 140)
+               rf_reg18 |= RF18_RFSI_GE;
+       else if (channel > 140)
+               rf_reg18 |= RF18_RFSI_GT;
+
+       switch (bw) {
+       case RTW_CHANNEL_WIDTH_5:
+       case RTW_CHANNEL_WIDTH_10:
+       case RTW_CHANNEL_WIDTH_20:
+       default:
+               rf_reg18 |= RF18_BW_20M;
+               break;
+       case RTW_CHANNEL_WIDTH_40:
+               rf_reg18 |= RF18_BW_40M;
+               break;
+       case RTW_CHANNEL_WIDTH_80:
+               rf_reg18 |= RF18_BW_80M;
+               break;
+       }
+
+       if (channel <= 14) {
+               rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTDBG, BIT(6), 0x1);
+               rtw_write_rf(rtwdev, RF_PATH_A, 0x64, 0xf, 0xf);
+       } else {
+               rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTDBG, BIT(6), 0x0);
+       }
+
+       rtw_write_rf(rtwdev, RF_PATH_A, 0x18, RFREG_MASK, rf_reg18);
+
+       rtw_write_rf(rtwdev, RF_PATH_A, RF_XTALX2, BIT(19), 0);
+       rtw_write_rf(rtwdev, RF_PATH_A, RF_XTALX2, BIT(19), 1);
+}
+
+static void rtw8821c_set_channel_rxdfir(struct rtw_dev *rtwdev, u8 bw)
+{
+       if (bw == RTW_CHANNEL_WIDTH_40) {
+               /* RX DFIR for BW40 */
+               rtw_write32_mask(rtwdev, REG_ACBB0, BIT(29) | BIT(28), 0x2);
+               rtw_write32_mask(rtwdev, REG_ACBBRXFIR, BIT(29) | BIT(28), 0x2);
+               rtw_write32_mask(rtwdev, REG_TXDFIR, BIT(31), 0x0);
+               rtw_write32_mask(rtwdev, REG_CHFIR, BIT(31), 0x0);
+       } else if (bw == RTW_CHANNEL_WIDTH_80) {
+               /* RX DFIR for BW80 */
+               rtw_write32_mask(rtwdev, REG_ACBB0, BIT(29) | BIT(28), 0x2);
+               rtw_write32_mask(rtwdev, REG_ACBBRXFIR, BIT(29) | BIT(28), 0x1);
+               rtw_write32_mask(rtwdev, REG_TXDFIR, BIT(31), 0x0);
+               rtw_write32_mask(rtwdev, REG_CHFIR, BIT(31), 0x1);
+       } else {
+               /* RX DFIR for BW20, BW10 and BW5 */
+               rtw_write32_mask(rtwdev, REG_ACBB0, BIT(29) | BIT(28), 0x2);
+               rtw_write32_mask(rtwdev, REG_ACBBRXFIR, BIT(29) | BIT(28), 0x2);
+               rtw_write32_mask(rtwdev, REG_TXDFIR, BIT(31), 0x1);
+               rtw_write32_mask(rtwdev, REG_CHFIR, BIT(31), 0x0);
+       }
+}
+
+static void rtw8821c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+                                   u8 primary_ch_idx)
+{
+       u32 val32;
+
+       if (channel <= 14) {
+               rtw_write32_mask(rtwdev, REG_RXPSEL, BIT(28), 0x1);
+               rtw_write32_mask(rtwdev, REG_CCK_CHECK, BIT(7), 0x0);
+               rtw_write32_mask(rtwdev, REG_ENTXCCK, BIT(18), 0x0);
+               rtw_write32_mask(rtwdev, REG_RXCCAMSK, 0x0000FC00, 15);
+
+               rtw_write32_mask(rtwdev, REG_TXSCALE_A, 0xf00, 0x0);
+               rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x96a);
+               if (channel == 14) {
+                       rtw_write32_mask(rtwdev, REG_TXSF2, MASKDWORD, 0x0000b81c);
+                       rtw_write32_mask(rtwdev, REG_TXSF6, MASKLWORD, 0x0000);
+                       rtw_write32_mask(rtwdev, REG_TXFILTER, MASKDWORD, 0x00003667);
+               } else {
+                       rtw_write32_mask(rtwdev, REG_TXSF2, MASKDWORD,
+                                        rtwdev->chip->ch_param[0]);
+                       rtw_write32_mask(rtwdev, REG_TXSF6, MASKLWORD,
+                                        rtwdev->chip->ch_param[1] & MASKLWORD);
+                       rtw_write32_mask(rtwdev, REG_TXFILTER, MASKDWORD,
+                                        rtwdev->chip->ch_param[2]);
+               }
+       } else if (channel > 35) {
+               rtw_write32_mask(rtwdev, REG_ENTXCCK, BIT(18), 0x1);
+               rtw_write32_mask(rtwdev, REG_CCK_CHECK, BIT(7), 0x1);
+               rtw_write32_mask(rtwdev, REG_RXPSEL, BIT(28), 0x0);
+               rtw_write32_mask(rtwdev, REG_RXCCAMSK, 0x0000FC00, 15);
+
+               if (channel >= 36 && channel <= 64)
+                       rtw_write32_mask(rtwdev, REG_TXSCALE_A, 0xf00, 0x1);
+               else if (channel >= 100 && channel <= 144)
+                       rtw_write32_mask(rtwdev, REG_TXSCALE_A, 0xf00, 0x2);
+               else if (channel >= 149)
+                       rtw_write32_mask(rtwdev, REG_TXSCALE_A, 0xf00, 0x3);
+
+               if (channel >= 36 && channel <= 48)
+                       rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x494);
+               else if (channel >= 52 && channel <= 64)
+                       rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x453);
+               else if (channel >= 100 && channel <= 116)
+                       rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x452);
+               else if (channel >= 118 && channel <= 177)
+                       rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x412);
+       }
+
+       switch (bw) {
+       case RTW_CHANNEL_WIDTH_20:
+       default:
+               val32 = rtw_read32_mask(rtwdev, REG_ADCCLK, MASKDWORD);
+               val32 &= 0xffcffc00;
+               val32 |= 0x10010000;
+               rtw_write32_mask(rtwdev, REG_ADCCLK, MASKDWORD, val32);
+
+               rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x1);
+               break;
+       case RTW_CHANNEL_WIDTH_40:
+               if (primary_ch_idx == 1)
+                       rtw_write32_set(rtwdev, REG_RXSB, BIT(4));
+               else
+                       rtw_write32_clr(rtwdev, REG_RXSB, BIT(4));
+
+               val32 = rtw_read32_mask(rtwdev, REG_ADCCLK, MASKDWORD);
+               val32 &= 0xff3ff300;
+               val32 |= 0x20020000 | ((primary_ch_idx & 0xf) << 2) |
+                        RTW_CHANNEL_WIDTH_40;
+               rtw_write32_mask(rtwdev, REG_ADCCLK, MASKDWORD, val32);
+
+               rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x1);
+               break;
+       case RTW_CHANNEL_WIDTH_80:
+               val32 = rtw_read32_mask(rtwdev, REG_ADCCLK, MASKDWORD);
+               val32 &= 0xfcffcf00;
+               val32 |= 0x40040000 | ((primary_ch_idx & 0xf) << 2) |
+                        RTW_CHANNEL_WIDTH_80;
+               rtw_write32_mask(rtwdev, REG_ADCCLK, MASKDWORD, val32);
+
+               rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x1);
+               break;
+       case RTW_CHANNEL_WIDTH_5:
+               val32 = rtw_read32_mask(rtwdev, REG_ADCCLK, MASKDWORD);
+               val32 &= 0xefcefc00;
+               val32 |= 0x200240;
+               rtw_write32_mask(rtwdev, REG_ADCCLK, MASKDWORD, val32);
+
+               rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x0);
+               rtw_write32_mask(rtwdev, REG_ADC40, BIT(31), 0x1);
+               break;
+       case RTW_CHANNEL_WIDTH_10:
+               val32 = rtw_read32_mask(rtwdev, REG_ADCCLK, MASKDWORD);
+               val32 &= 0xefcefc00;
+               val32 |= 0x300380;
+               rtw_write32_mask(rtwdev, REG_ADCCLK, MASKDWORD, val32);
+
+               rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x0);
+               rtw_write32_mask(rtwdev, REG_ADC40, BIT(31), 0x1);
+               break;
+       }
+}
+
+static u32 rtw8821c_get_bb_swing(struct rtw_dev *rtwdev, u8 channel)
+{
+       struct rtw_efuse efuse = rtwdev->efuse;
+       u8 tx_bb_swing;
+       u32 swing2setting[4] = {0x200, 0x16a, 0x101, 0x0b6};
+
+       tx_bb_swing = channel <= 14 ? efuse.tx_bb_swing_setting_2g :
+                                     efuse.tx_bb_swing_setting_5g;
+       if (tx_bb_swing > 9)
+               tx_bb_swing = 0;
+
+       return swing2setting[(tx_bb_swing / 3)];
+}
+
+static void rtw8821c_set_channel_bb_swing(struct rtw_dev *rtwdev, u8 channel,
+                                         u8 bw, u8 primary_ch_idx)
+{
+       rtw_write32_mask(rtwdev, REG_TXSCALE_A, GENMASK(31, 21),
+                        rtw8821c_get_bb_swing(rtwdev, channel));
+       rtw8821c_pwrtrack_init(rtwdev);
+}
+
+static void rtw8821c_set_channel(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+                                u8 primary_chan_idx)
+{
+       rtw8821c_set_channel_bb(rtwdev, channel, bw, primary_chan_idx);
+       rtw8821c_set_channel_bb_swing(rtwdev, channel, bw, primary_chan_idx);
+       rtw_set_channel_mac(rtwdev, channel, bw, primary_chan_idx);
+       rtw8821c_set_channel_rf(rtwdev, channel, bw);
+       rtw8821c_set_channel_rxdfir(rtwdev, bw);
+}
+
+static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
+                                  struct rtw_rx_pkt_stat *pkt_stat)
+{
+       s8 min_rx_power = -120;
+       u8 pwdb = GET_PHY_STAT_P0_PWDB(phy_status);
+
+       pkt_stat->rx_power[RF_PATH_A] = pwdb - 100;
+       pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
+       pkt_stat->bw = RTW_CHANNEL_WIDTH_20;
+       pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A],
+                                    min_rx_power);
+}
+
+static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
+                                  struct rtw_rx_pkt_stat *pkt_stat)
+{
+       u8 rxsc, bw;
+       s8 min_rx_power = -120;
+
+       if (pkt_stat->rate > DESC_RATE11M && pkt_stat->rate < DESC_RATEMCS0)
+               rxsc = GET_PHY_STAT_P1_L_RXSC(phy_status);
+       else
+               rxsc = GET_PHY_STAT_P1_HT_RXSC(phy_status);
+
+       if (rxsc >= 1 && rxsc <= 8)
+               bw = RTW_CHANNEL_WIDTH_20;
+       else if (rxsc >= 9 && rxsc <= 12)
+               bw = RTW_CHANNEL_WIDTH_40;
+       else if (rxsc >= 13)
+               bw = RTW_CHANNEL_WIDTH_80;
+       else
+               bw = GET_PHY_STAT_P1_RF_MODE(phy_status);
+
+       pkt_stat->rx_power[RF_PATH_A] = GET_PHY_STAT_P1_PWDB_A(phy_status) - 110;
+       pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
+       pkt_stat->bw = bw;
+       pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A],
+                                    min_rx_power);
+}
+
+static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status,
+                            struct rtw_rx_pkt_stat *pkt_stat)
+{
+       u8 page;
+
+       page = *phy_status & 0xf;
+
+       switch (page) {
+       case 0:
+               query_phy_status_page0(rtwdev, phy_status, pkt_stat);
+               break;
+       case 1:
+               query_phy_status_page1(rtwdev, phy_status, pkt_stat);
+               break;
+       default:
+               rtw_warn(rtwdev, "unused phy status page (%d)\n", page);
+               return;
+       }
+}
+
+static void rtw8821c_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc,
+                                  struct rtw_rx_pkt_stat *pkt_stat,
+                                  struct ieee80211_rx_status *rx_status)
+{
+       struct ieee80211_hdr *hdr;
+       u32 desc_sz = rtwdev->chip->rx_pkt_desc_sz;
+       u8 *phy_status = NULL;
+
+       memset(pkt_stat, 0, sizeof(*pkt_stat));
+
+       pkt_stat->phy_status = GET_RX_DESC_PHYST(rx_desc);
+       pkt_stat->icv_err = GET_RX_DESC_ICV_ERR(rx_desc);
+       pkt_stat->crc_err = GET_RX_DESC_CRC32(rx_desc);
+       pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc);
+       pkt_stat->is_c2h = GET_RX_DESC_C2H(rx_desc);
+       pkt_stat->pkt_len = GET_RX_DESC_PKT_LEN(rx_desc);
+       pkt_stat->drv_info_sz = GET_RX_DESC_DRV_INFO_SIZE(rx_desc);
+       pkt_stat->shift = GET_RX_DESC_SHIFT(rx_desc);
+       pkt_stat->rate = GET_RX_DESC_RX_RATE(rx_desc);
+       pkt_stat->cam_id = GET_RX_DESC_MACID(rx_desc);
+       pkt_stat->ppdu_cnt = GET_RX_DESC_PPDU_CNT(rx_desc);
+       pkt_stat->tsf_low = GET_RX_DESC_TSFL(rx_desc);
+
+       /* drv_info_sz is in unit of 8-bytes */
+       pkt_stat->drv_info_sz *= 8;
+
+       /* c2h cmd pkt's rx/phy status is not interested */
+       if (pkt_stat->is_c2h)
+               return;
+
+       hdr = (struct ieee80211_hdr *)(rx_desc + desc_sz + pkt_stat->shift +
+                                      pkt_stat->drv_info_sz);
+       if (pkt_stat->phy_status) {
+               phy_status = rx_desc + desc_sz + pkt_stat->shift;
+               query_phy_status(rtwdev, phy_status, pkt_stat);
+       }
+
+       rtw_rx_fill_rx_status(rtwdev, pkt_stat, hdr, rx_status, phy_status);
+}
+
+static void
+rtw8821c_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
+{
+       struct rtw_hal *hal = &rtwdev->hal;
+       static const u32 offset_txagc[2] = {0x1d00, 0x1d80};
+       static u32 phy_pwr_idx;
+       u8 rate, rate_idx, pwr_index, shift;
+       int j;
+
+       for (j = 0; j < rtw_rate_size[rs]; j++) {
+               rate = rtw_rate_section[rs][j];
+               pwr_index = hal->tx_pwr_tbl[path][rate];
+               shift = rate & 0x3;
+               phy_pwr_idx |= ((u32)pwr_index << (shift * 8));
+               if (shift == 0x3 || rate == DESC_RATEVHT1SS_MCS9) {
+                       rate_idx = rate & 0xfc;
+                       rtw_write32(rtwdev, offset_txagc[path] + rate_idx,
+                                   phy_pwr_idx);
+                       phy_pwr_idx = 0;
+               }
+       }
+}
+
+static void rtw8821c_set_tx_power_index(struct rtw_dev *rtwdev)
+{
+       struct rtw_hal *hal = &rtwdev->hal;
+       int rs, path;
+
+       for (path = 0; path < hal->rf_path_num; path++) {
+               for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) {
+                       if (rs == RTW_RATE_SECTION_HT_2S ||
+                           rs == RTW_RATE_SECTION_VHT_2S)
+                               continue;
+                       rtw8821c_set_tx_power_index_by_rate(rtwdev, path, rs);
+               }
+       }
+}
+
+static void rtw8821c_false_alarm_statistics(struct rtw_dev *rtwdev)
+{
+       struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+       u32 cck_enable;
+       u32 cck_fa_cnt;
+       u32 ofdm_fa_cnt;
+       u32 crc32_cnt;
+       u32 cca32_cnt;
+
+       cck_enable = rtw_read32(rtwdev, REG_RXPSEL) & BIT(28);
+       cck_fa_cnt = rtw_read16(rtwdev, REG_FA_CCK);
+       ofdm_fa_cnt = rtw_read16(rtwdev, REG_FA_OFDM);
+
+       dm_info->cck_fa_cnt = cck_fa_cnt;
+       dm_info->ofdm_fa_cnt = ofdm_fa_cnt;
+       if (cck_enable)
+               dm_info->total_fa_cnt += cck_fa_cnt;
+       dm_info->total_fa_cnt = ofdm_fa_cnt;
+
+       crc32_cnt = rtw_read32(rtwdev, REG_CRC_CCK);
+       dm_info->cck_ok_cnt = FIELD_GET(GENMASK(15, 0), crc32_cnt);
+       dm_info->cck_err_cnt = FIELD_GET(GENMASK(31, 16), crc32_cnt);
+
+       crc32_cnt = rtw_read32(rtwdev, REG_CRC_OFDM);
+       dm_info->ofdm_ok_cnt = FIELD_GET(GENMASK(15, 0), crc32_cnt);
+       dm_info->ofdm_err_cnt = FIELD_GET(GENMASK(31, 16), crc32_cnt);
+
+       crc32_cnt = rtw_read32(rtwdev, REG_CRC_HT);
+       dm_info->ht_ok_cnt = FIELD_GET(GENMASK(15, 0), crc32_cnt);
+       dm_info->ht_err_cnt = FIELD_GET(GENMASK(31, 16), crc32_cnt);
+
+       crc32_cnt = rtw_read32(rtwdev, REG_CRC_VHT);
+       dm_info->vht_ok_cnt = FIELD_GET(GENMASK(15, 0), crc32_cnt);
+       dm_info->vht_err_cnt = FIELD_GET(GENMASK(31, 16), crc32_cnt);
+
+       cca32_cnt = rtw_read32(rtwdev, REG_CCA_OFDM);
+       dm_info->ofdm_cca_cnt = FIELD_GET(GENMASK(31, 16), cca32_cnt);
+       dm_info->total_cca_cnt = dm_info->ofdm_cca_cnt;
+       if (cck_enable) {
+               cca32_cnt = rtw_read32(rtwdev, REG_CCA_CCK);
+               dm_info->cck_cca_cnt = FIELD_GET(GENMASK(15, 0), cca32_cnt);
+               dm_info->total_cca_cnt += dm_info->cck_cca_cnt;
+       }
+
+       rtw_write32_set(rtwdev, REG_FAS, BIT(17));
+       rtw_write32_clr(rtwdev, REG_FAS, BIT(17));
+       rtw_write32_clr(rtwdev, REG_RXDESC, BIT(15));
+       rtw_write32_set(rtwdev, REG_RXDESC, BIT(15));
+       rtw_write32_set(rtwdev, REG_CNTRST, BIT(0));
+       rtw_write32_clr(rtwdev, REG_CNTRST, BIT(0));
+}
+
+static void rtw8821c_do_iqk(struct rtw_dev *rtwdev)
+{
+       static int do_iqk_cnt;
+       struct rtw_iqk_para para = {.clear = 0, .segment_iqk = 0};
+       u32 rf_reg, iqk_fail_mask;
+       int counter;
+       bool reload;
+
+       if (rtw_is_assoc(rtwdev))
+               para.segment_iqk = 1;
+
+       rtw_fw_do_iqk(rtwdev, &para);
+
+       for (counter = 0; counter < 300; counter++) {
+               rf_reg = rtw_read_rf(rtwdev, RF_PATH_A, RF_DTXLOK, RFREG_MASK);
+               if (rf_reg == 0xabcde)
+                       break;
+               msleep(20);
+       }
+       rtw_write_rf(rtwdev, RF_PATH_A, RF_DTXLOK, RFREG_MASK, 0x0);
+
+       reload = !!rtw_read32_mask(rtwdev, REG_IQKFAILMSK, BIT(16));
+       iqk_fail_mask = rtw_read32_mask(rtwdev, REG_IQKFAILMSK, GENMASK(7, 0));
+       rtw_dbg(rtwdev, RTW_DBG_PHY,
+               "iqk counter=%d reload=%d do_iqk_cnt=%d n_iqk_fail(mask)=0x%02x\n",
+               counter, reload, ++do_iqk_cnt, iqk_fail_mask);
+}
+
+static void rtw8821c_phy_calibration(struct rtw_dev *rtwdev)
+{
+       rtw8821c_do_iqk(rtwdev);
+}
+
+/* for coex */
+static void rtw8821c_coex_cfg_init(struct rtw_dev *rtwdev)
+{
+       /* enable TBTT nterrupt */
+       rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION);
+
+       /* BT report packet sample rate */
+       rtw_write8_mask(rtwdev, REG_BT_TDMA_TIME, SAMPLE_RATE_MASK,
+                       SAMPLE_RATE);
+
+       /* enable BT counter statistics */
+       rtw_write8(rtwdev, REG_BT_STAT_CTRL, BT_CNT_ENABLE);
+
+       /* enable PTA (3-wire function form BT side) */
+       rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_PTA_EN);
+       rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_PO_BT_PTA_PINS);
+
+       /* enable PTA (tx/rx signal form WiFi side) */
+       rtw_write8_set(rtwdev, REG_QUEUE_CTRL, BIT_PTA_WL_TX_EN);
+       /* wl tx signal to PTA not case EDCCA */
+       rtw_write8_clr(rtwdev, REG_QUEUE_CTRL, BIT_PTA_EDCCA_EN);
+       /* GNT_BT=1 while select both */
+       rtw_write16_set(rtwdev, REG_BT_COEX_V2, BIT_GNT_BT_POLARITY);
+
+       /* beacon queue always hi-pri  */
+       rtw_write8_mask(rtwdev, REG_BT_COEX_TABLE_H + 3, BIT_BCN_QUEUE,
+                       BCN_PRI_EN);
+}
+
+static void rtw8821c_coex_cfg_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type,
+                                        u8 pos_type)
+{
+       struct rtw_coex *coex = &rtwdev->coex;
+       struct rtw_coex_dm *coex_dm = &coex->dm;
+       struct rtw_coex_rfe *coex_rfe = &coex->rfe;
+       u32 switch_status = FIELD_PREP(CTRL_TYPE_MASK, ctrl_type) | pos_type;
+       bool polarity_inverse;
+       u8 regval = 0;
+
+       if (switch_status == coex_dm->cur_switch_status)
+               return;
+
+       coex_dm->cur_switch_status = switch_status;
+
+       if (coex_rfe->ant_switch_diversity &&
+           ctrl_type == COEX_SWITCH_CTRL_BY_BBSW)
+               ctrl_type = COEX_SWITCH_CTRL_BY_ANTDIV;
+
+       polarity_inverse = (coex_rfe->ant_switch_polarity == 1);
+
+       switch (ctrl_type) {
+       default:
+       case COEX_SWITCH_CTRL_BY_BBSW:
+               rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN);
+               rtw_write32_set(rtwdev, REG_LED_CFG, BIT_DPDT_WL_SEL);
+               /* BB SW, DPDT use RFE_ctrl8 and RFE_ctrl9 as ctrl pin */
+               rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_RFE_SEL89,
+                               DPDT_CTRL_PIN);
+
+               if (pos_type == COEX_SWITCH_TO_WLG_BT) {
+                       if (coex_rfe->rfe_module_type != 0x4 &&
+                           coex_rfe->rfe_module_type != 0x2)
+                               regval = 0x3;
+                       else
+                               regval = (!polarity_inverse ? 0x2 : 0x1);
+               } else if (pos_type == COEX_SWITCH_TO_WLG) {
+                       regval = (!polarity_inverse ? 0x2 : 0x1);
+               } else {
+                       regval = (!polarity_inverse ? 0x1 : 0x2);
+               }
+
+               rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_R_RFE_SEL_15,
+                               regval);
+               break;
+       case COEX_SWITCH_CTRL_BY_PTA:
+               rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN);
+               rtw_write32_set(rtwdev, REG_LED_CFG, BIT_DPDT_WL_SEL);
+               /* PTA,  DPDT use RFE_ctrl8 and RFE_ctrl9 as ctrl pin */
+               rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_RFE_SEL89,
+                               PTA_CTRL_PIN);
+
+               regval = (!polarity_inverse ? 0x2 : 0x1);
+               rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_R_RFE_SEL_15,
+                               regval);
+               break;
+       case COEX_SWITCH_CTRL_BY_ANTDIV:
+               rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN);
+               rtw_write32_set(rtwdev, REG_LED_CFG, BIT_DPDT_WL_SEL);
+               rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_RFE_SEL89,
+                               ANTDIC_CTRL_PIN);
+               break;
+       case COEX_SWITCH_CTRL_BY_MAC:
+               rtw_write32_set(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN);
+
+               regval = (!polarity_inverse ? 0x0 : 0x1);
+               rtw_write8_mask(rtwdev, REG_PAD_CTRL1, BIT_SW_DPDT_SEL_DATA,
+                               regval);
+               break;
+       case COEX_SWITCH_CTRL_BY_FW:
+               rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN);
+               rtw_write32_set(rtwdev, REG_LED_CFG, BIT_DPDT_WL_SEL);
+               break;
+       case COEX_SWITCH_CTRL_BY_BT:
+               rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN);
+               rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_WL_SEL);
+               break;
+       }
+
+       if (ctrl_type == COEX_SWITCH_CTRL_BY_BT) {
+               rtw_write32_clr(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE1);
+               rtw_write32_clr(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE2);
+       } else {
+               rtw_write32_set(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE1);
+               rtw_write32_set(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE2);
+       }
+}
+
+static void rtw8821c_coex_cfg_gnt_fix(struct rtw_dev *rtwdev)
+{}
+
+static void rtw8821c_coex_cfg_gnt_debug(struct rtw_dev *rtwdev)
+{
+       rtw_write32_clr(rtwdev, REG_PAD_CTRL1, BIT_BTGP_SPI_EN);
+       rtw_write32_clr(rtwdev, REG_PAD_CTRL1, BIT_BTGP_JTAG_EN);
+       rtw_write32_clr(rtwdev, REG_GPIO_MUXCFG, BIT_FSPI_EN);
+       rtw_write32_clr(rtwdev, REG_PAD_CTRL1, BIT_LED1DIS);
+       rtw_write32_clr(rtwdev, REG_SYS_SDIO_CTRL, BIT_SDIO_INT);
+       rtw_write32_clr(rtwdev, REG_SYS_SDIO_CTRL, BIT_DBG_GNT_WL_BT);
+}
+
+static void rtw8821c_coex_cfg_rfe_type(struct rtw_dev *rtwdev)
+{
+       struct rtw_coex *coex = &rtwdev->coex;
+       struct rtw_coex_rfe *coex_rfe = &coex->rfe;
+       struct rtw_efuse *efuse = &rtwdev->efuse;
+
+       coex_rfe->rfe_module_type = efuse->rfe_option;
+       coex_rfe->ant_switch_polarity = 0;
+       coex_rfe->ant_switch_exist = true;
+       coex_rfe->wlg_at_btg = false;
+
+       switch (coex_rfe->rfe_module_type) {
+       case 0:
+       case 8:
+       case 1:
+       case 9:  /* 1-Ant, Main, WLG */
+       default: /* 2-Ant, DPDT, WLG */
+               break;
+       case 2:
+       case 10: /* 1-Ant, Main, BTG */
+       case 7:
+       case 15: /* 2-Ant, DPDT, BTG */
+               coex_rfe->wlg_at_btg = true;
+               break;
+       case 3:
+       case 11: /* 1-Ant, Aux, WLG */
+               coex_rfe->ant_switch_polarity = 1;
+               break;
+       case 4:
+       case 12: /* 1-Ant, Aux, BTG */
+               coex_rfe->wlg_at_btg = true;
+               coex_rfe->ant_switch_polarity = 1;
+               break;
+       case 5:
+       case 13: /* 2-Ant, no switch, WLG */
+       case 6:
+       case 14: /* 2-Ant, no antenna switch, WLG */
+               coex_rfe->ant_switch_exist = false;
+               break;
+       }
+}
+
+static void rtw8821c_coex_cfg_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr)
+{
+       struct rtw_coex *coex = &rtwdev->coex;
+       struct rtw_coex_dm *coex_dm = &coex->dm;
+       struct rtw_efuse *efuse = &rtwdev->efuse;
+       bool share_ant = efuse->share_ant;
+
+       if (share_ant)
+               return;
+
+       if (wl_pwr == coex_dm->cur_wl_pwr_lvl)
+               return;
+
+       coex_dm->cur_wl_pwr_lvl = wl_pwr;
+}
+
+static void rtw8821c_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain)
+{}
+
+static void
+rtw8821c_txagc_swing_offset(struct rtw_dev *rtwdev, u8 pwr_idx_offset,
+                           s8 pwr_idx_offset_lower,
+                           s8 *txagc_idx, u8 *swing_idx)
+{
+       struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+       s8 delta_pwr_idx = dm_info->delta_power_index[RF_PATH_A];
+       u8 swing_upper_bound = dm_info->default_ofdm_index + 10;
+       u8 swing_lower_bound = 0;
+       u8 max_pwr_idx_offset = 0xf;
+       s8 agc_index = 0;
+       u8 swing_index = dm_info->default_ofdm_index;
+
+       pwr_idx_offset = min_t(u8, pwr_idx_offset, max_pwr_idx_offset);
+       pwr_idx_offset_lower = max_t(s8, pwr_idx_offset_lower, -15);
+
+       if (delta_pwr_idx >= 0) {
+               if (delta_pwr_idx <= pwr_idx_offset) {
+                       agc_index = delta_pwr_idx;
+                       swing_index = dm_info->default_ofdm_index;
+               } else if (delta_pwr_idx > pwr_idx_offset) {
+                       agc_index = pwr_idx_offset;
+                       swing_index = dm_info->default_ofdm_index +
+                                       delta_pwr_idx - pwr_idx_offset;
+                       swing_index = min_t(u8, swing_index, swing_upper_bound);
+               }
+       } else if (delta_pwr_idx < 0) {
+               if (delta_pwr_idx >= pwr_idx_offset_lower) {
+                       agc_index = delta_pwr_idx;
+                       swing_index = dm_info->default_ofdm_index;
+               } else if (delta_pwr_idx < pwr_idx_offset_lower) {
+                       if (dm_info->default_ofdm_index >
+                               (pwr_idx_offset_lower - delta_pwr_idx))
+                               swing_index = dm_info->default_ofdm_index +
+                                       delta_pwr_idx - pwr_idx_offset_lower;
+                       else
+                               swing_index = swing_lower_bound;
+
+                       agc_index = pwr_idx_offset_lower;
+               }
+       }
+
+       if (swing_index >= ARRAY_SIZE(rtw8821c_txscale_tbl)) {
+               rtw_warn(rtwdev, "swing index overflow\n");
+               swing_index = ARRAY_SIZE(rtw8821c_txscale_tbl) - 1;
+       }
+
+       *txagc_idx = agc_index;
+       *swing_idx = swing_index;
+}
+
+static void rtw8821c_pwrtrack_set_pwr(struct rtw_dev *rtwdev, u8 pwr_idx_offset,
+                                     s8 pwr_idx_offset_lower)
+{
+       s8 txagc_idx;
+       u8 swing_idx;
+
+       rtw8821c_txagc_swing_offset(rtwdev, pwr_idx_offset, pwr_idx_offset_lower,
+                                   &txagc_idx, &swing_idx);
+       rtw_write32_mask(rtwdev, REG_TXAGCIDX, GENMASK(6, 1), txagc_idx);
+       rtw_write32_mask(rtwdev, REG_TXSCALE_A, GENMASK(31, 21),
+                        rtw8821c_txscale_tbl[swing_idx]);
+}
+
+static void rtw8821c_pwrtrack_set(struct rtw_dev *rtwdev)
+{
+       struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+       u8 pwr_idx_offset, tx_pwr_idx;
+       s8 pwr_idx_offset_lower;
+       u8 channel = rtwdev->hal.current_channel;
+       u8 band_width = rtwdev->hal.current_band_width;
+       u8 regd = rtwdev->regd.txpwr_regd;
+       u8 tx_rate = dm_info->tx_rate;
+       u8 max_pwr_idx = rtwdev->chip->max_power_index;
+
+       tx_pwr_idx = rtw_phy_get_tx_power_index(rtwdev, RF_PATH_A, tx_rate,
+                                               band_width, channel, regd);
+
+       tx_pwr_idx = min_t(u8, tx_pwr_idx, max_pwr_idx);
+
+       pwr_idx_offset = max_pwr_idx - tx_pwr_idx;
+       pwr_idx_offset_lower = 0 - tx_pwr_idx;
+
+       rtw8821c_pwrtrack_set_pwr(rtwdev, pwr_idx_offset, pwr_idx_offset_lower);
+}
+
+static void rtw8821c_phy_pwrtrack(struct rtw_dev *rtwdev)
+{
+       struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+       struct rtw_swing_table swing_table;
+       u8 thermal_value, delta;
+
+       rtw_phy_config_swing_table(rtwdev, &swing_table);
+
+       if (rtwdev->efuse.thermal_meter[0] == 0xff)
+               return;
+
+       thermal_value = rtw_read_rf(rtwdev, RF_PATH_A, RF_T_METER, 0xfc00);
+
+       rtw_phy_pwrtrack_avg(rtwdev, thermal_value, RF_PATH_A);
+
+       if (dm_info->pwr_trk_init_trigger)
+               dm_info->pwr_trk_init_trigger = false;
+       else if (!rtw_phy_pwrtrack_thermal_changed(rtwdev, thermal_value,
+                                                  RF_PATH_A))
+               goto iqk;
+
+       delta = rtw_phy_pwrtrack_get_delta(rtwdev, RF_PATH_A);
+
+       delta = min_t(u8, delta, RTW_PWR_TRK_TBL_SZ - 1);
+
+       dm_info->delta_power_index[RF_PATH_A] =
+               rtw_phy_pwrtrack_get_pwridx(rtwdev, &swing_table, RF_PATH_A,
+                                           RF_PATH_A, delta);
+       if (dm_info->delta_power_index[RF_PATH_A] ==
+                       dm_info->delta_power_index_last[RF_PATH_A])
+               goto iqk;
+       else
+               dm_info->delta_power_index_last[RF_PATH_A] =
+                       dm_info->delta_power_index[RF_PATH_A];
+       rtw8821c_pwrtrack_set(rtwdev);
+
+iqk:
+       if (rtw_phy_pwrtrack_need_iqk(rtwdev))
+               rtw8821c_do_iqk(rtwdev);
+}
+
+static void rtw8821c_pwr_track(struct rtw_dev *rtwdev)
+{
+       struct rtw_efuse *efuse = &rtwdev->efuse;
+       struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+       if (efuse->power_track_type != 0)
+               return;
+
+       if (!dm_info->pwr_trk_triggered) {
+               rtw_write_rf(rtwdev, RF_PATH_A, RF_T_METER,
+                            GENMASK(17, 16), 0x03);
+               dm_info->pwr_trk_triggered = true;
+               return;
+       }
+
+       rtw8821c_phy_pwrtrack(rtwdev);
+       dm_info->pwr_trk_triggered = false;
+}
+
+static void rtw8821c_bf_config_bfee_su(struct rtw_dev *rtwdev,
+                                      struct rtw_vif *vif,
+                                      struct rtw_bfee *bfee, bool enable)
+{
+       if (enable)
+               rtw_bf_enable_bfee_su(rtwdev, vif, bfee);
+       else
+               rtw_bf_remove_bfee_su(rtwdev, bfee);
+}
+
+static void rtw8821c_bf_config_bfee_mu(struct rtw_dev *rtwdev,
+                                      struct rtw_vif *vif,
+                                      struct rtw_bfee *bfee, bool enable)
+{
+       if (enable)
+               rtw_bf_enable_bfee_mu(rtwdev, vif, bfee);
+       else
+               rtw_bf_remove_bfee_mu(rtwdev, bfee);
+}
+
+static void rtw8821c_bf_config_bfee(struct rtw_dev *rtwdev, struct rtw_vif *vif,
+                                   struct rtw_bfee *bfee, bool enable)
+{
+       if (bfee->role == RTW_BFEE_SU)
+               rtw8821c_bf_config_bfee_su(rtwdev, vif, bfee, enable);
+       else if (bfee->role == RTW_BFEE_MU)
+               rtw8821c_bf_config_bfee_mu(rtwdev, vif, bfee, enable);
+       else
+               rtw_warn(rtwdev, "wrong bfee role\n");
+}
+
+static void rtw8821c_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl)
+{
+       struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+       u8 pd[CCK_PD_LV_MAX] = {3, 7, 13, 13, 13};
+
+       if (dm_info->min_rssi > 60) {
+               new_lvl = 4;
+               pd[4] = 0x1d;
+               goto set_cck_pd;
+       }
+
+       if (dm_info->cck_pd_lv[RTW_CHANNEL_WIDTH_20][RF_PATH_A] == new_lvl)
+               return;
+
+       dm_info->cck_fa_avg = CCK_FA_AVG_RESET;
+
+set_cck_pd:
+       dm_info->cck_pd_lv[RTW_CHANNEL_WIDTH_20][RF_PATH_A] = new_lvl;
+       rtw_write32_mask(rtwdev, REG_PWRTH, 0x3f0000, pd[new_lvl]);
+       rtw_write32_mask(rtwdev, REG_PWRTH2, 0x1f0000,
+                        dm_info->cck_pd_default + new_lvl * 2);
+}
+
+static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8821c[] = {
+       {0x0086,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_SDIO_MSK,
+        RTW_PWR_ADDR_SDIO,
+        RTW_PWR_CMD_WRITE, BIT(0), 0},
+       {0x0086,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_SDIO_MSK,
+        RTW_PWR_ADDR_SDIO,
+        RTW_PWR_CMD_POLLING, BIT(1), BIT(1)},
+       {0x004A,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_USB_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(0), 0},
+       {0x0005,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_ALL_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(3) | BIT(4) | BIT(7), 0},
+       {0x0300,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_PCI_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, 0xFF, 0},
+       {0x0301,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_PCI_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, 0xFF, 0},
+       {0xFFFF,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_ALL_MSK,
+        0,
+        RTW_PWR_CMD_END, 0, 0},
+};
+
+static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8821c[] = {
+       {0x0020,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+       {0x0001,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_DELAY, 1, RTW_PWR_DELAY_MS},
+       {0x0000,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(5), 0},
+       {0x0005,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_ALL_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3) | BIT(2)), 0},
+       {0x0075,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_PCI_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+       {0x0006,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_ALL_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_POLLING, BIT(1), BIT(1)},
+       {0x0075,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_PCI_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(0), 0},
+       {0x0006,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_ALL_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+       {0x0005,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_ALL_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(7), 0},
+       {0x0005,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_ALL_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3)), 0},
+       {0x10C3,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_USB_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+       {0x0005,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_ALL_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+       {0x0005,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_ALL_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_POLLING, BIT(0), 0},
+       {0x0020,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_ALL_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(3), BIT(3)},
+       {0x0074,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_PCI_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+       {0x0022,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_PCI_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(1), 0},
+       {0x0062,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_PCI_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, (BIT(7) | BIT(6) | BIT(5)),
+        (BIT(7) | BIT(6) | BIT(5))},
+       {0x0061,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_PCI_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, (BIT(7) | BIT(6) | BIT(5)), 0},
+       {0x007C,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_ALL_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(1), 0},
+       {0xFFFF,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_ALL_MSK,
+        0,
+        RTW_PWR_CMD_END, 0, 0},
+};
+
+static struct rtw_pwr_seq_cmd trans_act_to_cardemu_8821c[] = {
+       {0x0093,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_ALL_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(3), 0},
+       {0x001F,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_ALL_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, 0xFF, 0},
+       {0x0049,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_ALL_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(1), 0},
+       {0x0006,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_ALL_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+       {0x0002,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_ALL_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(1), 0},
+       {0x10C3,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_USB_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(0), 0},
+       {0x0005,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_ALL_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
+       {0x0005,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_ALL_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_POLLING, BIT(1), 0},
+       {0x0020,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_ALL_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(3), 0},
+       {0x0000,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+       {0xFFFF,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_ALL_MSK,
+        0,
+        RTW_PWR_CMD_END, 0, 0},
+};
+
+static struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8821c[] = {
+       {0x0007,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, 0xFF, 0x20},
+       {0x0067,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_ALL_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(5), 0},
+       {0x0005,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_PCI_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(2), BIT(2)},
+       {0x004A,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_USB_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(0), 0},
+       {0x0067,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_SDIO_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(5), 0},
+       {0x0067,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_SDIO_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(4), 0},
+       {0x004F,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_SDIO_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(0), 0},
+       {0x0067,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_SDIO_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(1), 0},
+       {0x0046,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_SDIO_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(6), BIT(6)},
+       {0x0067,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_SDIO_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(2), 0},
+       {0x0046,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_SDIO_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(7), BIT(7)},
+       {0x0062,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_SDIO_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(4), BIT(4)},
+       {0x0081,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_ALL_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(7) | BIT(6), 0},
+       {0x0005,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3)},
+       {0x0086,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_SDIO_MSK,
+        RTW_PWR_ADDR_SDIO,
+        RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+       {0x0086,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_SDIO_MSK,
+        RTW_PWR_ADDR_SDIO,
+        RTW_PWR_CMD_POLLING, BIT(1), 0},
+       {0x0090,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_PCI_MSK,
+        RTW_PWR_ADDR_MAC,
+        RTW_PWR_CMD_WRITE, BIT(1), 0},
+       {0x0044,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_SDIO_MSK,
+        RTW_PWR_ADDR_SDIO,
+        RTW_PWR_CMD_WRITE, 0xFF, 0},
+       {0x0040,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_SDIO_MSK,
+        RTW_PWR_ADDR_SDIO,
+        RTW_PWR_CMD_WRITE, 0xFF, 0x90},
+       {0x0041,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_SDIO_MSK,
+        RTW_PWR_ADDR_SDIO,
+        RTW_PWR_CMD_WRITE, 0xFF, 0x00},
+       {0x0042,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_SDIO_MSK,
+        RTW_PWR_ADDR_SDIO,
+        RTW_PWR_CMD_WRITE, 0xFF, 0x04},
+       {0xFFFF,
+        RTW_PWR_CUT_ALL_MSK,
+        RTW_PWR_INTF_ALL_MSK,
+        0,
+        RTW_PWR_CMD_END, 0, 0},
+};
+
+static const struct rtw_pwr_seq_cmd *card_enable_flow_8821c[] = {
+       trans_carddis_to_cardemu_8821c,
+       trans_cardemu_to_act_8821c,
+       NULL
+};
+
+static const struct rtw_pwr_seq_cmd *card_disable_flow_8821c[] = {
+       trans_act_to_cardemu_8821c,
+       trans_cardemu_to_carddis_8821c,
+       NULL
+};
+
+static const struct rtw_intf_phy_para usb2_param_8821c[] = {
+       {0xFFFF, 0x00,
+        RTW_IP_SEL_PHY,
+        RTW_INTF_PHY_CUT_ALL,
+        RTW_INTF_PHY_PLATFORM_ALL},
+};
+
+static const struct rtw_intf_phy_para usb3_param_8821c[] = {
+       {0xFFFF, 0x0000,
+        RTW_IP_SEL_PHY,
+        RTW_INTF_PHY_CUT_ALL,
+        RTW_INTF_PHY_PLATFORM_ALL},
+};
+
+static const struct rtw_intf_phy_para pcie_gen1_param_8821c[] = {
+       {0x0009, 0x6380,
+        RTW_IP_SEL_PHY,
+        RTW_INTF_PHY_CUT_ALL,
+        RTW_INTF_PHY_PLATFORM_ALL},
+       {0xFFFF, 0x0000,
+        RTW_IP_SEL_PHY,
+        RTW_INTF_PHY_CUT_ALL,
+        RTW_INTF_PHY_PLATFORM_ALL},
+};
+
+static const struct rtw_intf_phy_para pcie_gen2_param_8821c[] = {
+       {0xFFFF, 0x0000,
+        RTW_IP_SEL_PHY,
+        RTW_INTF_PHY_CUT_ALL,
+        RTW_INTF_PHY_PLATFORM_ALL},
+};
+
+static const struct rtw_intf_phy_para_table phy_para_table_8821c = {
+       .usb2_para      = usb2_param_8821c,
+       .usb3_para      = usb3_param_8821c,
+       .gen1_para      = pcie_gen1_param_8821c,
+       .gen2_para      = pcie_gen2_param_8821c,
+       .n_usb2_para    = ARRAY_SIZE(usb2_param_8821c),
+       .n_usb3_para    = ARRAY_SIZE(usb2_param_8821c),
+       .n_gen1_para    = ARRAY_SIZE(pcie_gen1_param_8821c),
+       .n_gen2_para    = ARRAY_SIZE(pcie_gen2_param_8821c),
+};
+
+static const struct rtw_rfe_def rtw8821c_rfe_defs[] = {
+       [0] = RTW_DEF_RFE(8821c, 0, 0),
+};
+
+static struct rtw_hw_reg rtw8821c_dig[] = {
+       [0] = { .addr = 0xc50, .mask = 0x7f },
+};
+
+static const struct rtw_ltecoex_addr rtw8821c_ltecoex_addr = {
+       .ctrl = LTECOEX_ACCESS_CTRL,
+       .wdata = LTECOEX_WRITE_DATA,
+       .rdata = LTECOEX_READ_DATA,
+};
+
+static struct rtw_page_table page_table_8821c[] = {
+       /* not sure what [0] stands for */
+       {16, 16, 16, 14, 1},
+       {16, 16, 16, 14, 1},
+       {16, 16, 0, 0, 1},
+       {16, 16, 16, 0, 1},
+       {16, 16, 16, 14, 1},
+};
+
+static struct rtw_rqpn rqpn_table_8821c[] = {
+       /* not sure what [0] stands for */
+       {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+        RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+        RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+       {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+        RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+        RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+       {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+        RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_HIGH,
+        RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+       {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+        RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+        RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+       {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+        RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+        RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+};
+
+static struct rtw_prioq_addrs prioq_addrs_8821c = {
+       .prio[RTW_DMA_MAPPING_EXTRA] = {
+               .rsvd = REG_FIFOPAGE_INFO_4, .avail = REG_FIFOPAGE_INFO_4 + 2,
+       },
+       .prio[RTW_DMA_MAPPING_LOW] = {
+               .rsvd = REG_FIFOPAGE_INFO_2, .avail = REG_FIFOPAGE_INFO_2 + 2,
+       },
+       .prio[RTW_DMA_MAPPING_NORMAL] = {
+               .rsvd = REG_FIFOPAGE_INFO_3, .avail = REG_FIFOPAGE_INFO_3 + 2,
+       },
+       .prio[RTW_DMA_MAPPING_HIGH] = {
+               .rsvd = REG_FIFOPAGE_INFO_1, .avail = REG_FIFOPAGE_INFO_1 + 2,
+       },
+       .wsize = true,
+};
+
+static struct rtw_chip_ops rtw8821c_ops = {
+       .phy_set_param          = rtw8821c_phy_set_param,
+       .read_efuse             = rtw8821c_read_efuse,
+       .query_rx_desc          = rtw8821c_query_rx_desc,
+       .set_channel            = rtw8821c_set_channel,
+       .mac_init               = rtw8821c_mac_init,
+       .read_rf                = rtw_phy_read_rf,
+       .write_rf               = rtw_phy_write_rf_reg_sipi,
+       .set_antenna            = NULL,
+       .set_tx_power_index     = rtw8821c_set_tx_power_index,
+       .cfg_ldo25              = rtw8821c_cfg_ldo25,
+       .false_alarm_statistics = rtw8821c_false_alarm_statistics,
+       .phy_calibration        = rtw8821c_phy_calibration,
+       .cck_pd_set             = rtw8821c_phy_cck_pd_set,
+       .pwr_track              = rtw8821c_pwr_track,
+       .config_bfee            = rtw8821c_bf_config_bfee,
+       .set_gid_table          = rtw_bf_set_gid_table,
+       .cfg_csi_rate           = rtw_bf_cfg_csi_rate,
+
+       .coex_set_init          = rtw8821c_coex_cfg_init,
+       .coex_set_ant_switch    = rtw8821c_coex_cfg_ant_switch,
+       .coex_set_gnt_fix       = rtw8821c_coex_cfg_gnt_fix,
+       .coex_set_gnt_debug     = rtw8821c_coex_cfg_gnt_debug,
+       .coex_set_rfe_type      = rtw8821c_coex_cfg_rfe_type,
+       .coex_set_wl_tx_power   = rtw8821c_coex_cfg_wl_tx_power,
+       .coex_set_wl_rx_gain    = rtw8821c_coex_cfg_wl_rx_gain,
+};
+
+/* rssi in percentage % (dbm = % - 100) */
+static const u8 wl_rssi_step_8821c[] = {101, 45, 101, 40};
+static const u8 bt_rssi_step_8821c[] = {101, 101, 101, 101};
+
+/* Shared-Antenna Coex Table */
+static const struct coex_table_para table_sant_8821c[] = {
+       {0x55555555, 0x55555555}, /* case-0 */
+       {0x55555555, 0x55555555},
+       {0x66555555, 0x66555555},
+       {0xaaaaaaaa, 0xaaaaaaaa},
+       {0x5a5a5a5a, 0x5a5a5a5a},
+       {0xfafafafa, 0xfafafafa}, /* case-5 */
+       {0x6a5a5555, 0xaaaaaaaa},
+       {0x6a5a56aa, 0x6a5a56aa},
+       {0x6a5a5a5a, 0x6a5a5a5a},
+       {0x66555555, 0x5a5a5a5a},
+       {0x66555555, 0x6a5a5a5a}, /* case-10 */
+       {0x66555555, 0xaaaaaaaa},
+       {0x66555555, 0x6a5a5aaa},
+       {0x66555555, 0x6aaa6aaa},
+       {0x66555555, 0x6a5a5aaa},
+       {0x66555555, 0xaaaaaaaa}, /* case-15 */
+       {0xffff55ff, 0xfafafafa},
+       {0xffff55ff, 0x6afa5afa},
+       {0xaaffffaa, 0xfafafafa},
+       {0xaa5555aa, 0x5a5a5a5a},
+       {0xaa5555aa, 0x6a5a5a5a}, /* case-20 */
+       {0xaa5555aa, 0xaaaaaaaa},
+       {0xffffffff, 0x55555555},
+       {0xffffffff, 0x5a5a5a5a},
+       {0xffffffff, 0x5a5a5a5a},
+       {0xffffffff, 0x5a5a5aaa}, /* case-25 */
+       {0x55555555, 0x5a5a5a5a},
+       {0x55555555, 0xaaaaaaaa},
+       {0x66555555, 0x6a5a6a5a},
+       {0x66556655, 0x66556655},
+       {0x66556aaa, 0x6a5a6aaa}, /* case-30 */
+       {0xffffffff, 0x5aaa5aaa},
+       {0x56555555, 0x5a5a5aaa}
+};
+
+/* Non-Shared-Antenna Coex Table */
+static const struct coex_table_para table_nsant_8821c[] = {
+       {0xffffffff, 0xffffffff}, /* case-100 */
+       {0xffff55ff, 0xfafafafa},
+       {0x66555555, 0x66555555},
+       {0xaaaaaaaa, 0xaaaaaaaa},
+       {0x5a5a5a5a, 0x5a5a5a5a},
+       {0xffffffff, 0xffffffff}, /* case-105 */
+       {0x5afa5afa, 0x5afa5afa},
+       {0x55555555, 0xfafafafa},
+       {0x66555555, 0xfafafafa},
+       {0x66555555, 0x5a5a5a5a},
+       {0x66555555, 0x6a5a5a5a}, /* case-110 */
+       {0x66555555, 0xaaaaaaaa},
+       {0xffff55ff, 0xfafafafa},
+       {0xffff55ff, 0x5afa5afa},
+       {0xffff55ff, 0xaaaaaaaa},
+       {0xffff55ff, 0xffff55ff}, /* case-115 */
+       {0xaaffffaa, 0x5afa5afa},
+       {0xaaffffaa, 0xaaaaaaaa},
+       {0xffffffff, 0xfafafafa},
+       {0xffff55ff, 0xfafafafa},
+       {0xffffffff, 0xaaaaaaaa}, /* case-120 */
+       {0xffff55ff, 0x5afa5afa},
+       {0xffff55ff, 0x5afa5afa},
+       {0x55ff55ff, 0x55ff55ff}
+};
+
+/* Shared-Antenna TDMA */
+static const struct coex_tdma_para tdma_sant_8821c[] = {
+       { {0x00, 0x00, 0x00, 0x00, 0x00} }, /* case-0 */
+       { {0x61, 0x45, 0x03, 0x11, 0x11} }, /* case-1 */
+       { {0x61, 0x3a, 0x03, 0x11, 0x11} },
+       { {0x61, 0x35, 0x03, 0x11, 0x11} },
+       { {0x61, 0x20, 0x03, 0x11, 0x11} },
+       { {0x61, 0x3a, 0x03, 0x11, 0x11} }, /* case-5 */
+       { {0x61, 0x45, 0x03, 0x11, 0x10} },
+       { {0x61, 0x35, 0x03, 0x11, 0x10} },
+       { {0x61, 0x30, 0x03, 0x11, 0x10} },
+       { {0x61, 0x20, 0x03, 0x11, 0x10} },
+       { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-10 */
+       { {0x61, 0x08, 0x03, 0x11, 0x15} },
+       { {0x61, 0x08, 0x03, 0x10, 0x14} },
+       { {0x51, 0x08, 0x03, 0x10, 0x54} },
+       { {0x51, 0x08, 0x03, 0x10, 0x55} },
+       { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-15 */
+       { {0x51, 0x45, 0x03, 0x10, 0x50} },
+       { {0x51, 0x3a, 0x03, 0x11, 0x50} },
+       { {0x51, 0x30, 0x03, 0x10, 0x50} },
+       { {0x51, 0x21, 0x03, 0x10, 0x50} },
+       { {0x51, 0x10, 0x03, 0x10, 0x50} }, /* case-20 */
+       { {0x51, 0x4a, 0x03, 0x10, 0x50} },
+       { {0x51, 0x08, 0x03, 0x30, 0x54} },
+       { {0x55, 0x08, 0x03, 0x10, 0x54} },
+       { {0x65, 0x10, 0x03, 0x11, 0x10} },
+       { {0x51, 0x10, 0x03, 0x10, 0x51} }, /* case-25 */
+       { {0x51, 0x21, 0x03, 0x10, 0x50} },
+       { {0x61, 0x08, 0x03, 0x11, 0x11} }
+};
+
+/* Non-Shared-Antenna TDMA */
+static const struct coex_tdma_para tdma_nsant_8821c[] = {
+       { {0x00, 0x00, 0x00, 0x40, 0x00} }, /* case-100 */
+       { {0x61, 0x45, 0x03, 0x11, 0x11} },
+       { {0x61, 0x25, 0x03, 0x11, 0x11} },
+       { {0x61, 0x35, 0x03, 0x11, 0x11} },
+       { {0x61, 0x20, 0x03, 0x11, 0x11} },
+       { {0x61, 0x10, 0x03, 0x11, 0x11} }, /* case-105 */
+       { {0x61, 0x45, 0x03, 0x11, 0x10} },
+       { {0x61, 0x30, 0x03, 0x11, 0x10} },
+       { {0x61, 0x30, 0x03, 0x11, 0x10} },
+       { {0x61, 0x20, 0x03, 0x11, 0x10} },
+       { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-110 */
+       { {0x61, 0x10, 0x03, 0x11, 0x11} },
+       { {0x61, 0x08, 0x03, 0x10, 0x14} },
+       { {0x51, 0x08, 0x03, 0x10, 0x54} },
+       { {0x51, 0x08, 0x03, 0x10, 0x55} },
+       { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-115 */
+       { {0x51, 0x45, 0x03, 0x10, 0x50} },
+       { {0x51, 0x3a, 0x03, 0x10, 0x50} },
+       { {0x51, 0x30, 0x03, 0x10, 0x50} },
+       { {0x51, 0x21, 0x03, 0x10, 0x50} },
+       { {0x51, 0x21, 0x03, 0x10, 0x50} }, /* case-120 */
+       { {0x51, 0x10, 0x03, 0x10, 0x50} }
+};
+
+static const struct coex_5g_afh_map afh_5g_8821c[] = { {0, 0, 0} };
+
+/* wl_tx_dec_power, bt_tx_dec_power, wl_rx_gain, bt_rx_lna_constrain */
+static const struct coex_rf_para rf_para_tx_8821c[] = {
+       {0, 0, false, 7},  /* for normal */
+       {0, 20, false, 7}, /* for WL-CPT */
+       {8, 17, true, 4},
+       {7, 18, true, 4},
+       {6, 19, true, 4},
+       {5, 20, true, 4}
+};
+
+static const struct coex_rf_para rf_para_rx_8821c[] = {
+       {0, 0, false, 7},  /* for normal */
+       {0, 20, false, 7}, /* for WL-CPT */
+       {3, 24, true, 5},
+       {2, 26, true, 5},
+       {1, 27, true, 5},
+       {0, 28, true, 5}
+};
+
+static_assert(ARRAY_SIZE(rf_para_tx_8821c) == ARRAY_SIZE(rf_para_rx_8821c));
+
+static const u8 rtw8821c_pwrtrk_5gb_n[][RTW_PWR_TRK_TBL_SZ] = {
+       {0, 1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 8, 8, 8, 9, 9, 9, 10, 10,
+        11, 11, 12, 12, 12, 12, 12},
+       {0, 1, 1, 1, 2, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7, 8, 8, 9, 9, 10, 10, 11,
+        11, 12, 12, 12, 12, 12, 12, 12},
+       {0, 1, 2, 2, 3, 4, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11,
+        11, 12, 12, 12, 12, 12, 12},
+};
+
+static const u8 rtw8821c_pwrtrk_5gb_p[][RTW_PWR_TRK_TBL_SZ] = {
+       {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 11, 11,
+        12, 12, 12, 12, 12, 12, 12},
+       {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7, 8, 8, 9, 10, 10, 11, 11,
+        12, 12, 12, 12, 12, 12, 12, 12},
+       {0, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 6, 6, 7, 7, 8, 8, 9, 10, 10, 11,
+        11, 12, 12, 12, 12, 12, 12, 12},
+};
+
+static const u8 rtw8821c_pwrtrk_5ga_n[][RTW_PWR_TRK_TBL_SZ] = {
+       {0, 1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 8, 8, 8, 9, 9, 9, 10, 10,
+        11, 11, 12, 12, 12, 12, 12},
+       {0, 1, 1, 1, 2, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7, 8, 8, 9, 9, 10, 10, 11,
+        11, 12, 12, 12, 12, 12, 12, 12},
+       {0, 1, 2, 2, 3, 4, 4, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11,
+        11, 12, 12, 12, 12, 12, 12},
+};
+
+static const u8 rtw8821c_pwrtrk_5ga_p[][RTW_PWR_TRK_TBL_SZ] = {
+       {0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 11, 11,
+        12, 12, 12, 12, 12, 12, 12},
+       {0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7, 8, 8, 9, 10, 10, 11, 11,
+        12, 12, 12, 12, 12, 12, 12, 12},
+       {0, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 6, 6, 7, 7, 8, 8, 9, 10, 10, 11,
+        11, 12, 12, 12, 12, 12, 12, 12},
+};
+
+static const u8 rtw8821c_pwrtrk_2gb_n[] = {
+       0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4,
+       4, 4, 5, 5, 5, 5, 6, 6, 6, 7, 7, 8, 8, 9
+};
+
+static const u8 rtw8821c_pwrtrk_2gb_p[] = {
+       0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 5,
+       5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 9, 9
+};
+
+static const u8 rtw8821c_pwrtrk_2ga_n[] = {
+       0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4,
+       4, 4, 5, 5, 5, 5, 6, 6, 6, 7, 7, 8, 8, 9
+};
+
+static const u8 rtw8821c_pwrtrk_2ga_p[] = {
+       0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 5,
+       5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, 9, 9
+};
+
+static const u8 rtw8821c_pwrtrk_2g_cck_b_n[] = {
+       0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4,
+       4, 5, 5, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 9
+};
+
+static const u8 rtw8821c_pwrtrk_2g_cck_b_p[] = {
+       0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5,
+       5, 6, 6, 7, 7, 7, 8, 8, 9, 9, 9, 9, 9, 9
+};
+
+static const u8 rtw8821c_pwrtrk_2g_cck_a_n[] = {
+       0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4,
+       4, 5, 5, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 9
+};
+
+static const u8 rtw8821c_pwrtrk_2g_cck_a_p[] = {
+       0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5,
+       5, 6, 6, 7, 7, 7, 8, 8, 9, 9, 9, 9, 9, 9
+};
+
+static const struct rtw_pwr_track_tbl rtw8821c_rtw_pwr_track_tbl = {
+       .pwrtrk_5gb_n[0] = rtw8821c_pwrtrk_5gb_n[0],
+       .pwrtrk_5gb_n[1] = rtw8821c_pwrtrk_5gb_n[1],
+       .pwrtrk_5gb_n[2] = rtw8821c_pwrtrk_5gb_n[2],
+       .pwrtrk_5gb_p[0] = rtw8821c_pwrtrk_5gb_p[0],
+       .pwrtrk_5gb_p[1] = rtw8821c_pwrtrk_5gb_p[1],
+       .pwrtrk_5gb_p[2] = rtw8821c_pwrtrk_5gb_p[2],
+       .pwrtrk_5ga_n[0] = rtw8821c_pwrtrk_5ga_n[0],
+       .pwrtrk_5ga_n[1] = rtw8821c_pwrtrk_5ga_n[1],
+       .pwrtrk_5ga_n[2] = rtw8821c_pwrtrk_5ga_n[2],
+       .pwrtrk_5ga_p[0] = rtw8821c_pwrtrk_5ga_p[0],
+       .pwrtrk_5ga_p[1] = rtw8821c_pwrtrk_5ga_p[1],
+       .pwrtrk_5ga_p[2] = rtw8821c_pwrtrk_5ga_p[2],
+       .pwrtrk_2gb_n = rtw8821c_pwrtrk_2gb_n,
+       .pwrtrk_2gb_p = rtw8821c_pwrtrk_2gb_p,
+       .pwrtrk_2ga_n = rtw8821c_pwrtrk_2ga_n,
+       .pwrtrk_2ga_p = rtw8821c_pwrtrk_2ga_p,
+       .pwrtrk_2g_cckb_n = rtw8821c_pwrtrk_2g_cck_b_n,
+       .pwrtrk_2g_cckb_p = rtw8821c_pwrtrk_2g_cck_b_p,
+       .pwrtrk_2g_ccka_n = rtw8821c_pwrtrk_2g_cck_a_n,
+       .pwrtrk_2g_ccka_p = rtw8821c_pwrtrk_2g_cck_a_p,
+};
+
+static const struct rtw_reg_domain coex_info_hw_regs_8821c[] = {
+       {0xCB0, MASKDWORD, RTW_REG_DOMAIN_MAC32},
+       {0xCB4, MASKDWORD, RTW_REG_DOMAIN_MAC32},
+       {0xCBA, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
+       {0, 0, RTW_REG_DOMAIN_NL},
+       {0x430, MASKDWORD, RTW_REG_DOMAIN_MAC32},
+       {0x434, MASKDWORD, RTW_REG_DOMAIN_MAC32},
+       {0x42a, MASKLWORD, RTW_REG_DOMAIN_MAC16},
+       {0x426, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
+       {0x45e, BIT(3), RTW_REG_DOMAIN_MAC8},
+       {0x454, MASKLWORD, RTW_REG_DOMAIN_MAC16},
+       {0, 0, RTW_REG_DOMAIN_NL},
+       {0x4c, BIT(24) | BIT(23), RTW_REG_DOMAIN_MAC32},
+       {0x64, BIT(0), RTW_REG_DOMAIN_MAC8},
+       {0x4c6, BIT(4), RTW_REG_DOMAIN_MAC8},
+       {0x40, BIT(5), RTW_REG_DOMAIN_MAC8},
+       {0x1, RFREG_MASK, RTW_REG_DOMAIN_RF_A},
+       {0, 0, RTW_REG_DOMAIN_NL},
+       {0x550, MASKDWORD, RTW_REG_DOMAIN_MAC32},
+       {0x522, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
+       {0x953, BIT(1), RTW_REG_DOMAIN_MAC8},
+       {0xc50,  MASKBYTE0, RTW_REG_DOMAIN_MAC8},
+       {0x60A, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
+};
+
+struct rtw_chip_info rtw8821c_hw_spec = {
+       .ops = &rtw8821c_ops,
+       .id = RTW_CHIP_TYPE_8821C,
+       .fw_name = "rtw88/rtw8821c_fw.bin",
+       .wlan_cpu = RTW_WCPU_11AC,
+       .tx_pkt_desc_sz = 48,
+       .tx_buf_desc_sz = 16,
+       .rx_pkt_desc_sz = 24,
+       .rx_buf_desc_sz = 8,
+       .phy_efuse_size = 512,
+       .log_efuse_size = 512,
+       .ptct_efuse_size = 96,
+       .txff_size = 65536,
+       .rxff_size = 16384,
+       .txgi_factor = 1,
+       .is_pwr_by_rate_dec = true,
+       .max_power_index = 0x3f,
+       .csi_buf_pg_num = 0,
+       .band = RTW_BAND_2G | RTW_BAND_5G,
+       .page_size = 128,
+       .dig_min = 0x1c,
+       .ht_supported = true,
+       .vht_supported = true,
+       .lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK),
+       .sys_func_en = 0xD8,
+       .pwr_on_seq = card_enable_flow_8821c,
+       .pwr_off_seq = card_disable_flow_8821c,
+       .page_table = page_table_8821c,
+       .rqpn_table = rqpn_table_8821c,
+       .prioq_addrs = &prioq_addrs_8821c,
+       .intf_table = &phy_para_table_8821c,
+       .dig = rtw8821c_dig,
+       .rf_base_addr = {0x2800, 0x2c00},
+       .rf_sipi_addr = {0xc90, 0xe90},
+       .ltecoex_addr = &rtw8821c_ltecoex_addr,
+       .mac_tbl = &rtw8821c_mac_tbl,
+       .agc_tbl = &rtw8821c_agc_tbl,
+       .bb_tbl = &rtw8821c_bb_tbl,
+       .rf_tbl = {&rtw8821c_rf_a_tbl},
+       .rfe_defs = rtw8821c_rfe_defs,
+       .rfe_defs_size = ARRAY_SIZE(rtw8821c_rfe_defs),
+       .rx_ldpc = false,
+       .pwr_track_tbl = &rtw8821c_rtw_pwr_track_tbl,
+       .iqk_threshold = 8,
+       .bfer_su_max_num = 2,
+       .bfer_mu_max_num = 1,
+
+       .coex_para_ver = 0x19092746,
+       .bt_desired_ver = 0x46,
+       .scbd_support = true,
+       .new_scbd10_def = false,
+       .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
+       .bt_rssi_type = COEX_BTRSSI_RATIO,
+       .ant_isolation = 15,
+       .rssi_tolerance = 2,
+       .wl_rssi_step = wl_rssi_step_8821c,
+       .bt_rssi_step = bt_rssi_step_8821c,
+       .table_sant_num = ARRAY_SIZE(table_sant_8821c),
+       .table_sant = table_sant_8821c,
+       .table_nsant_num = ARRAY_SIZE(table_nsant_8821c),
+       .table_nsant = table_nsant_8821c,
+       .tdma_sant_num = ARRAY_SIZE(tdma_sant_8821c),
+       .tdma_sant = tdma_sant_8821c,
+       .tdma_nsant_num = ARRAY_SIZE(tdma_nsant_8821c),
+       .tdma_nsant = tdma_nsant_8821c,
+       .wl_rf_para_num = ARRAY_SIZE(rf_para_tx_8821c),
+       .wl_rf_para_tx = rf_para_tx_8821c,
+       .wl_rf_para_rx = rf_para_rx_8821c,
+       .bt_afh_span_bw20 = 0x24,
+       .bt_afh_span_bw40 = 0x36,
+       .afh_5g_num = ARRAY_SIZE(afh_5g_8821c),
+       .afh_5g = afh_5g_8821c,
+
+       .coex_info_hw_regs_num = ARRAY_SIZE(coex_info_hw_regs_8821c),
+       .coex_info_hw_regs = coex_info_hw_regs_8821c,
+};
+EXPORT_SYMBOL(rtw8821c_hw_spec);
+
+MODULE_FIRMWARE("rtw88/rtw8821c_fw.bin");
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ac wireless 8821c driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.h b/drivers/net/wireless/realtek/rtw88/rtw8821c.h
new file mode 100644 (file)
index 0000000..bd01e82
--- /dev/null
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019  Realtek Corporation
+ */
+
+#ifndef __RTW8821C_H__
+#define __RTW8821C_H__
+
+#include <asm/byteorder.h>
+
+#define RCR_VHT_ACK            BIT(26)
+
+struct rtw8821ce_efuse {
+       u8 mac_addr[ETH_ALEN];          /* 0xd0 */
+       u8 vender_id[2];
+       u8 device_id[2];
+       u8 sub_vender_id[2];
+       u8 sub_device_id[2];
+       u8 pmc[2];
+       u8 exp_device_cap[2];
+       u8 msi_cap;
+       u8 ltr_cap;                     /* 0xe3 */
+       u8 exp_link_control[2];
+       u8 link_cap[4];
+       u8 link_control[2];
+       u8 serial_number[8];
+       u8 res0:2;                      /* 0xf4 */
+       u8 ltr_en:1;
+       u8 res1:2;
+       u8 obff:2;
+       u8 res2:3;
+       u8 obff_cap:2;
+       u8 res3:4;
+       u8 res4[3];
+       u8 class_code[3];
+       u8 pci_pm_L1_2_supp:1;
+       u8 pci_pm_L1_1_supp:1;
+       u8 aspm_pm_L1_2_supp:1;
+       u8 aspm_pm_L1_1_supp:1;
+       u8 L1_pm_substates_supp:1;
+       u8 res5:3;
+       u8 port_common_mode_restore_time;
+       u8 port_t_power_on_scale:2;
+       u8 res6:1;
+       u8 port_t_power_on_value:5;
+       u8 res7;
+};
+
+struct rtw8821c_efuse {
+       __le16 rtl_id;
+       u8 res0[0x0e];
+
+       /* power index for four RF paths */
+       struct rtw_txpwr_idx txpwr_idx_table[4];
+
+       u8 channel_plan;                /* 0xb8 */
+       u8 xtal_k;
+       u8 thermal_meter;
+       u8 iqk_lck;
+       u8 pa_type;                     /* 0xbc */
+       u8 lna_type_2g[2];              /* 0xbd */
+       u8 lna_type_5g[2];
+       u8 rf_board_option;
+       u8 rf_feature_option;
+       u8 rf_bt_setting;
+       u8 eeprom_version;
+       u8 eeprom_customer_id;
+       u8 tx_bb_swing_setting_2g;
+       u8 tx_bb_swing_setting_5g;
+       u8 tx_pwr_calibrate_rate;
+       u8 rf_antenna_option;           /* 0xc9 */
+       u8 rfe_option;
+       u8 country_code[2];
+       u8 res[3];
+       union {
+               struct rtw8821ce_efuse e;
+       };
+};
+
+static inline void
+_rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
+{
+       /* 0xC00-0xCFF and 0xE00-0xEFF have the same layout */
+       rtw_write32_mask(rtwdev, addr, mask, data);
+       rtw_write32_mask(rtwdev, addr + 0x200, mask, data);
+}
+
+#define rtw_write32s_mask(rtwdev, addr, mask, data)                           \
+       do {                                                                   \
+               BUILD_BUG_ON((addr) < 0xC00 || (addr) >= 0xD00);               \
+                                                                              \
+               _rtw_write32s_mask(rtwdev, addr, mask, data);                  \
+       } while (0)
+
+#define BIT_FEN_PCIEA BIT(6)
+#define WLAN_SLOT_TIME         0x09
+#define WLAN_PIFS_TIME         0x19
+#define WLAN_SIFS_CCK_CONT_TX  0xA
+#define WLAN_SIFS_OFDM_CONT_TX 0xE
+#define WLAN_SIFS_CCK_TRX      0x10
+#define WLAN_SIFS_OFDM_TRX     0x10
+#define WLAN_VO_TXOP_LIMIT     0x186
+#define WLAN_VI_TXOP_LIMIT     0x3BC
+#define WLAN_RDG_NAV           0x05
+#define WLAN_TXOP_NAV          0x1B
+#define WLAN_CCK_RX_TSF                0x30
+#define WLAN_OFDM_RX_TSF       0x30
+#define WLAN_TBTT_PROHIBIT     0x04
+#define WLAN_TBTT_HOLD_TIME    0x064
+#define WLAN_DRV_EARLY_INT     0x04
+#define WLAN_BCN_DMA_TIME      0x02
+
+#define WLAN_RX_FILTER0                0x0FFFFFFF
+#define WLAN_RX_FILTER2                0xFFFF
+#define WLAN_RCR_CFG           0xE400220E
+#define WLAN_RXPKT_MAX_SZ      12288
+#define WLAN_RXPKT_MAX_SZ_512  (WLAN_RXPKT_MAX_SZ >> 9)
+
+#define WLAN_AMPDU_MAX_TIME            0x70
+#define WLAN_RTS_LEN_TH                        0xFF
+#define WLAN_RTS_TX_TIME_TH            0x08
+#define WLAN_MAX_AGG_PKT_LIMIT         0x20
+#define WLAN_RTS_MAX_AGG_PKT_LIMIT     0x20
+#define FAST_EDCA_VO_TH                0x06
+#define FAST_EDCA_VI_TH                0x06
+#define FAST_EDCA_BE_TH                0x06
+#define FAST_EDCA_BK_TH                0x06
+#define WLAN_BAR_RETRY_LIMIT           0x01
+#define WLAN_RA_TRY_RATE_AGG_LIMIT     0x08
+
+#define WLAN_TX_FUNC_CFG1              0x30
+#define WLAN_TX_FUNC_CFG2              0x30
+#define WLAN_MAC_OPT_NORM_FUNC1                0x98
+#define WLAN_MAC_OPT_LB_FUNC1          0x80
+#define WLAN_MAC_OPT_FUNC2             0x30810041
+
+#define WLAN_SIFS_CFG  (WLAN_SIFS_CCK_CONT_TX | \
+                       (WLAN_SIFS_OFDM_CONT_TX << BIT_SHIFT_SIFS_OFDM_CTX) | \
+                       (WLAN_SIFS_CCK_TRX << BIT_SHIFT_SIFS_CCK_TRX) | \
+                       (WLAN_SIFS_OFDM_TRX << BIT_SHIFT_SIFS_OFDM_TRX))
+
+#define WLAN_TBTT_TIME (WLAN_TBTT_PROHIBIT |\
+                       (WLAN_TBTT_HOLD_TIME << BIT_SHIFT_TBTT_HOLD_TIME_AP))
+
+#define WLAN_NAV_CFG           (WLAN_RDG_NAV | (WLAN_TXOP_NAV << 16))
+#define WLAN_RX_TSF_CFG                (WLAN_CCK_RX_TSF | (WLAN_OFDM_RX_TSF) << 8)
+#define WLAN_PRE_TXCNT_TIME_TH         0x1E4
+
+/* phy status page0 */
+#define GET_PHY_STAT_P0_PWDB(phy_stat)                                         \
+       le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(15, 8))
+
+/* phy status page1 */
+#define GET_PHY_STAT_P1_PWDB_A(phy_stat)                                       \
+       le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(15, 8))
+#define GET_PHY_STAT_P1_PWDB_B(phy_stat)                                       \
+       le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(23, 16))
+#define GET_PHY_STAT_P1_RF_MODE(phy_stat)                                      \
+       le32_get_bits(*((__le32 *)(phy_stat) + 0x03), GENMASK(29, 28))
+#define GET_PHY_STAT_P1_L_RXSC(phy_stat)                                       \
+       le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(11, 8))
+#define GET_PHY_STAT_P1_HT_RXSC(phy_stat)                                      \
+       le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(15, 12))
+#define GET_PHY_STAT_P1_RXEVM_A(phy_stat)                                      \
+       le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(7, 0))
+#define GET_PHY_STAT_P1_RXEVM_B(phy_stat)                                      \
+       le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(15, 8))
+#define GET_PHY_STAT_P1_CFO_TAIL_A(phy_stat)                                 \
+       le32_get_bits(*((__le32 *)(phy_stat) + 0x05), GENMASK(7, 0))
+#define GET_PHY_STAT_P1_CFO_TAIL_B(phy_stat)                                 \
+       le32_get_bits(*((__le32 *)(phy_stat) + 0x05), GENMASK(15, 8))
+#define GET_PHY_STAT_P1_RXSNR_A(phy_stat)                                      \
+       le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(7, 0))
+#define GET_PHY_STAT_P1_RXSNR_B(phy_stat)                                      \
+       le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(15, 8))
+
+#define REG_INIRTS_RATE_SEL 0x0480
+#define REG_HTSTFWT    0x800
+#define REG_RXPSEL     0x808
+#define BIT_RX_PSEL_RST                (BIT(28) | BIT(29))
+#define REG_TXPSEL     0x80c
+#define REG_RXCCAMSK   0x814
+#define REG_CCASEL     0x82c
+#define REG_PDMFTH     0x830
+#define REG_CCA2ND     0x838
+#define REG_L1WT       0x83c
+#define REG_L1PKWT     0x840
+#define REG_MRC                0x850
+#define REG_CLKTRK     0x860
+#define REG_ADCCLK     0x8ac
+#define REG_ADC160     0x8c4
+#define REG_ADC40      0x8c8
+#define REG_CHFIR      0x8f0
+#define REG_CDDTXP     0x93c
+#define REG_TXPSEL1    0x940
+#define REG_ACBB0      0x948
+#define REG_ACBBRXFIR  0x94c
+#define REG_ACGG2TBL   0x958
+#define REG_FAS                0x9a4
+#define REG_RXSB       0xa00
+#define REG_ADCINI     0xa04
+#define REG_PWRTH      0xa08
+#define REG_TXSF2      0xa24
+#define REG_TXSF6      0xa28
+#define REG_FA_CCK     0xa5c
+#define REG_RXDESC     0xa2c
+#define REG_ENTXCCK    0xa80
+#define REG_PWRTH2     0xaa8
+#define REG_CSRATIO    0xaaa
+#define REG_TXFILTER   0xaac
+#define REG_CNTRST     0xb58
+#define REG_AGCTR_A    0xc08
+#define REG_TXSCALE_A  0xc1c
+#define REG_TXDFIR     0xc20
+#define REG_RXIGI_A    0xc50
+#define REG_TXAGCIDX   0xc94
+#define REG_TRSW       0xca0
+#define REG_RFESEL0    0xcb0
+#define REG_RFESEL8    0xcb4
+#define REG_RFECTL     0xcb8
+#define REG_RFEINV     0xcbc
+#define REG_AGCTR_B    0xe08
+#define REG_RXIGI_B    0xe50
+#define REG_CRC_CCK    0xf04
+#define REG_CRC_OFDM   0xf14
+#define REG_CRC_HT     0xf10
+#define REG_CRC_VHT    0xf0c
+#define REG_CCA_OFDM   0xf08
+#define REG_FA_OFDM    0xf48
+#define REG_CCA_CCK    0xfcc
+#define REG_ANTWT      0x1904
+#define REG_IQKFAILMSK 0x1bf0
+#define BIT_MASK_R_RFE_SEL_15  GENMASK(31, 28)
+#define BIT_SDIO_INT BIT(18)
+#define SAMPLE_RATE_MASK GENMASK(5, 0)
+#define SAMPLE_RATE    0x5
+#define BT_CNT_ENABLE  0x1
+#define BIT_BCN_QUEUE  BIT(3)
+#define BCN_PRI_EN     0x1
+#define PTA_CTRL_PIN   0x66
+#define DPDT_CTRL_PIN  0x77
+#define ANTDIC_CTRL_PIN        0x88
+#define REG_CTRL_TYPE  0x67
+#define BIT_CTRL_TYPE1 BIT(5)
+#define BIT_CTRL_TYPE2 BIT(4)
+#define CTRL_TYPE_MASK GENMASK(15, 8)
+
+#define RF18_BAND_MASK         (BIT(16) | BIT(9) | BIT(8))
+#define RF18_BAND_2G           (0)
+#define RF18_BAND_5G           (BIT(16) | BIT(8))
+#define RF18_CHANNEL_MASK      (MASKBYTE0)
+#define RF18_RFSI_MASK         (BIT(18) | BIT(17))
+#define RF18_RFSI_GE           (BIT(17))
+#define RF18_RFSI_GT           (BIT(18))
+#define RF18_BW_MASK           (BIT(11) | BIT(10))
+#define RF18_BW_20M            (BIT(11) | BIT(10))
+#define RF18_BW_40M            (BIT(11))
+#define RF18_BW_80M            (BIT(10))
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8821c_table.c
new file mode 100644 (file)
index 0000000..970f903
--- /dev/null
@@ -0,0 +1,6611 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019  Realtek Corporation
+ */
+
+#include "main.h"
+#include "phy.h"
+#include "rtw8821c_table.h"
+
+static const u32 rtw8821c_mac[] = {
+       0x010, 0x00000043,
+       0x025, 0x0000001D,
+       0x026, 0x000000CE,
+       0x04F, 0x00000001,
+       0x029, 0x000000F9,
+       0x420, 0x00000080,
+       0x421, 0x0000000F,
+       0x428, 0x0000000A,
+       0x429, 0x00000010,
+       0x430, 0x00000000,
+       0x431, 0x00000000,
+       0x432, 0x00000000,
+       0x433, 0x00000001,
+       0x434, 0x00000004,
+       0x435, 0x00000005,
+       0x436, 0x00000007,
+       0x437, 0x00000008,
+       0x43C, 0x00000004,
+       0x43D, 0x00000005,
+       0x43E, 0x00000007,
+       0x43F, 0x00000008,
+       0x440, 0x0000005D,
+       0x441, 0x00000001,
+       0x442, 0x00000000,
+       0x444, 0x00000010,
+       0x445, 0x000000F0,
+       0x446, 0x00000001,
+       0x447, 0x000000FE,
+       0x448, 0x00000000,
+       0x449, 0x00000000,
+       0x44A, 0x00000000,
+       0x44B, 0x00000040,
+       0x44C, 0x00000010,
+       0x44D, 0x000000F0,
+       0x44E, 0x0000003F,
+       0x44F, 0x00000000,
+       0x450, 0x00000000,
+       0x451, 0x00000000,
+       0x452, 0x00000000,
+       0x453, 0x00000040,
+       0x455, 0x00000070,
+       0x45E, 0x00000004,
+       0x49C, 0x00000010,
+       0x49D, 0x000000F0,
+       0x49E, 0x00000000,
+       0x49F, 0x00000006,
+       0x4A0, 0x000000E0,
+       0x4A1, 0x00000003,
+       0x4A2, 0x00000000,
+       0x4A3, 0x00000040,
+       0x4A4, 0x00000015,
+       0x4A5, 0x000000F0,
+       0x4A6, 0x00000000,
+       0x4A7, 0x00000006,
+       0x4A8, 0x000000E0,
+       0x4A9, 0x00000000,
+       0x4AA, 0x00000000,
+       0x4AB, 0x00000000,
+       0x7DA, 0x00000008,
+       0x1448, 0x00000006,
+       0x144A, 0x00000006,
+       0x144C, 0x00000006,
+       0x144E, 0x00000006,
+       0x4C8, 0x000000FF,
+       0x4C9, 0x00000008,
+       0x4CC, 0x000000FF,
+       0x4CD, 0x000000FF,
+       0x4CE, 0x00000001,
+       0x4CF, 0x00000008,
+       0x500, 0x00000026,
+       0x501, 0x000000A2,
+       0x502, 0x0000002F,
+       0x503, 0x00000000,
+       0x504, 0x00000028,
+       0x505, 0x000000A3,
+       0x506, 0x0000005E,
+       0x507, 0x00000000,
+       0x508, 0x0000002B,
+       0x509, 0x000000A4,
+       0x50A, 0x0000005E,
+       0x50B, 0x00000000,
+       0x50C, 0x0000004F,
+       0x50D, 0x000000A4,
+       0x50E, 0x00000000,
+       0x50F, 0x00000000,
+       0x512, 0x0000001C,
+       0x514, 0x0000000A,
+       0x516, 0x0000000A,
+       0x521, 0x0000002F,
+       0x525, 0x0000004F,
+       0x551, 0x00000010,
+       0x559, 0x00000002,
+       0x55C, 0x00000050,
+       0x55D, 0x000000FF,
+       0x577, 0x0000000B,
+       0x578, 0x00000014,
+       0x579, 0x00000014,
+       0x57A, 0x00000014,
+       0x5BE, 0x00000064,
+       0x605, 0x00000030,
+       0x608, 0x0000000E,
+       0x609, 0x00000022,
+       0x60C, 0x00000018,
+       0x6A0, 0x000000FF,
+       0x6A1, 0x000000FF,
+       0x6A2, 0x000000FF,
+       0x6A3, 0x000000FF,
+       0x6A4, 0x000000FF,
+       0x6A5, 0x000000FF,
+       0x6DE, 0x00000084,
+       0x620, 0x000000FF,
+       0x621, 0x000000FF,
+       0x622, 0x000000FF,
+       0x623, 0x000000FF,
+       0x624, 0x000000FF,
+       0x625, 0x000000FF,
+       0x626, 0x000000FF,
+       0x627, 0x000000FF,
+       0x638, 0x00000050,
+       0x63C, 0x0000000A,
+       0x63D, 0x0000000A,
+       0x63E, 0x0000000E,
+       0x63F, 0x0000000E,
+       0x640, 0x00000040,
+       0x642, 0x00000040,
+       0x643, 0x00000000,
+       0x652, 0x000000C8,
+       0x66E, 0x00000005,
+       0x700, 0x00000021,
+       0x701, 0x00000043,
+       0x702, 0x00000065,
+       0x703, 0x00000087,
+       0x708, 0x00000021,
+       0x709, 0x00000043,
+       0x70A, 0x00000065,
+       0x70B, 0x00000087,
+       0x718, 0x00000040,
+       0x7D4, 0x00000098,
+
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8821c_mac, rtw_phy_cfg_mac);
+
+static const u32 rtw8821c_agc[] = {
+       0x80001004,     0x00000000,     0x40000000,     0x00000000,
+       0x81C, 0xFB000003,
+       0x81C, 0xFA020003,
+       0x81C, 0xF9040003,
+       0x81C, 0xF8060003,
+       0x81C, 0xF7080003,
+       0x81C, 0xF60A0003,
+       0x81C, 0xF50C0003,
+       0x81C, 0xF40E0003,
+       0x81C, 0xF3100003,
+       0x81C, 0xF2120003,
+       0x81C, 0xF1140003,
+       0x81C, 0xF0160003,
+       0x81C, 0xEF180003,
+       0x81C, 0xEE1A0003,
+       0x81C, 0xED1C0003,
+       0x81C, 0xEC1E0003,
+       0x81C, 0xEB200003,
+       0x81C, 0xEA220003,
+       0x81C, 0xE9240003,
+       0x81C, 0xE8260003,
+       0x81C, 0xE7280003,
+       0x81C, 0xE62A0003,
+       0x81C, 0xE52C0003,
+       0x81C, 0xE42E0003,
+       0x81C, 0xE3300003,
+       0x81C, 0xE2320003,
+       0x81C, 0xE1340003,
+       0x81C, 0xC4360003,
+       0x81C, 0xC3380003,
+       0x81C, 0xC23A0003,
+       0x81C, 0xC13C0003,
+       0x81C, 0x883E0003,
+       0x81C, 0x87400003,
+       0x81C, 0x86420003,
+       0x81C, 0x85440003,
+       0x81C, 0x84460003,
+       0x81C, 0x83480003,
+       0x81C, 0x824A0003,
+       0x81C, 0x814C0003,
+       0x81C, 0x804E0003,
+       0x81C, 0x64500003,
+       0x81C, 0x63520003,
+       0x81C, 0x62540003,
+       0x81C, 0x61560003,
+       0x81C, 0x60580003,
+       0x81C, 0x475A0003,
+       0x81C, 0x465C0003,
+       0x81C, 0x455E0003,
+       0x81C, 0x44600003,
+       0x81C, 0x43620003,
+       0x81C, 0x42640003,
+       0x81C, 0x41660003,
+       0x81C, 0x40680003,
+       0x81C, 0x236A0003,
+       0x81C, 0x226C0003,
+       0x81C, 0x056E0003,
+       0x81C, 0x04700003,
+       0x81C, 0x03720003,
+       0x81C, 0x02740003,
+       0x81C, 0x01760003,
+       0x81C, 0x01780003,
+       0x81C, 0x017A0003,
+       0x81C, 0x017C0003,
+       0x81C, 0x017E0003,
+       0x90001005,     0x00000000,     0x40000000,     0x00000000,
+       0x81C, 0xFB000003,
+       0x81C, 0xFA020003,
+       0x81C, 0xF9040003,
+       0x81C, 0xF8060003,
+       0x81C, 0xF7080003,
+       0x81C, 0xF60A0003,
+       0x81C, 0xF50C0003,
+       0x81C, 0xF40E0003,
+       0x81C, 0xF3100003,
+       0x81C, 0xF2120003,
+       0x81C, 0xF1140003,
+       0x81C, 0xF0160003,
+       0x81C, 0xEF180003,
+       0x81C, 0xEE1A0003,
+       0x81C, 0xED1C0003,
+       0x81C, 0xEC1E0003,
+       0x81C, 0xEB200003,
+       0x81C, 0xEA220003,
+       0x81C, 0xE9240003,
+       0x81C, 0xE8260003,
+       0x81C, 0xE7280003,
+       0x81C, 0xE62A0003,
+       0x81C, 0xE52C0003,
+       0x81C, 0xE42E0003,
+       0x81C, 0xE3300003,
+       0x81C, 0xE2320003,
+       0x81C, 0xE1340003,
+       0x81C, 0xC4360003,
+       0x81C, 0xC3380003,
+       0x81C, 0xC23A0003,
+       0x81C, 0xC13C0003,
+       0x81C, 0x883E0003,
+       0x81C, 0x87400003,
+       0x81C, 0x86420003,
+       0x81C, 0x85440003,
+       0x81C, 0x84460003,
+       0x81C, 0x83480003,
+       0x81C, 0x824A0003,
+       0x81C, 0x814C0003,
+       0x81C, 0x804E0003,
+       0x81C, 0x64500003,
+       0x81C, 0x63520003,
+       0x81C, 0x62540003,
+       0x81C, 0x61560003,
+       0x81C, 0x60580003,
+       0x81C, 0x475A0003,
+       0x81C, 0x465C0003,
+       0x81C, 0x455E0003,
+       0x81C, 0x44600003,
+       0x81C, 0x43620003,
+       0x81C, 0x42640003,
+       0x81C, 0x41660003,
+       0x81C, 0x40680003,
+       0x81C, 0x236A0003,
+       0x81C, 0x226C0003,
+       0x81C, 0x056E0003,
+       0x81C, 0x04700003,
+       0x81C, 0x03720003,
+       0x81C, 0x02740003,
+       0x81C, 0x01760003,
+       0x81C, 0x01780003,
+       0x81C, 0x017A0003,
+       0x81C, 0x017C0003,
+       0x81C, 0x017E0003,
+       0xA0000000,     0x00000000,
+       0x81C, 0xFB000003,
+       0x81C, 0xFA020003,
+       0x81C, 0xF9040003,
+       0x81C, 0xF8060003,
+       0x81C, 0xF7080003,
+       0x81C, 0xF60A0003,
+       0x81C, 0xF50C0003,
+       0x81C, 0xF40E0003,
+       0x81C, 0xF3100003,
+       0x81C, 0xF2120003,
+       0x81C, 0xF1140003,
+       0x81C, 0xF0160003,
+       0x81C, 0xEF180003,
+       0x81C, 0xEE1A0003,
+       0x81C, 0xED1C0003,
+       0x81C, 0xEC1E0003,
+       0x81C, 0xEB200003,
+       0x81C, 0xEA220003,
+       0x81C, 0xE9240003,
+       0x81C, 0xE8260003,
+       0x81C, 0xE7280003,
+       0x81C, 0xE62A0003,
+       0x81C, 0xCA2C0003,
+       0x81C, 0xC92E0003,
+       0x81C, 0xC8300003,
+       0x81C, 0xC7320003,
+       0x81C, 0xC6340003,
+       0x81C, 0xC5360003,
+       0x81C, 0xC4380003,
+       0x81C, 0xC33A0003,
+       0x81C, 0xC23C0003,
+       0x81C, 0xC13E0003,
+       0x81C, 0x88400003,
+       0x81C, 0x87420003,
+       0x81C, 0x86440003,
+       0x81C, 0x85460003,
+       0x81C, 0x84480003,
+       0x81C, 0x834A0003,
+       0x81C, 0x674C0003,
+       0x81C, 0x664E0003,
+       0x81C, 0x65500003,
+       0x81C, 0x64520003,
+       0x81C, 0x63540003,
+       0x81C, 0x62560003,
+       0x81C, 0x61580003,
+       0x81C, 0x455A0003,
+       0x81C, 0x445C0003,
+       0x81C, 0x435E0003,
+       0x81C, 0x42600003,
+       0x81C, 0x41620003,
+       0x81C, 0x25640003,
+       0x81C, 0x24660003,
+       0x81C, 0x23680003,
+       0x81C, 0x226A0003,
+       0x81C, 0x216C0003,
+       0x81C, 0x016E0003,
+       0x81C, 0x01700003,
+       0x81C, 0x01720003,
+       0x81C, 0x01740003,
+       0x81C, 0x01760003,
+       0x81C, 0x01780003,
+       0x81C, 0x017A0003,
+       0x81C, 0x017C0003,
+       0x81C, 0x017E0003,
+       0xB0000000,     0x00000000,
+       0x80001004,     0x00000000,     0x40000000,     0x00000000,
+       0x81C, 0xFD000103,
+       0x81C, 0xFC020103,
+       0x81C, 0xFB040103,
+       0x81C, 0xFA060103,
+       0x81C, 0xF9080103,
+       0x81C, 0xF80A0103,
+       0x81C, 0xF70C0103,
+       0x81C, 0xF60E0103,
+       0x81C, 0xF5100103,
+       0x81C, 0xF4120103,
+       0x81C, 0xF3140103,
+       0x81C, 0xF2160103,
+       0x81C, 0xF1180103,
+       0x81C, 0xF01A0103,
+       0x81C, 0xEF1C0103,
+       0x81C, 0xEE1E0103,
+       0x81C, 0xED200103,
+       0x81C, 0xEC220103,
+       0x81C, 0xEB240103,
+       0x81C, 0xEA260103,
+       0x81C, 0xE9280103,
+       0x81C, 0xE82A0103,
+       0x81C, 0xE72C0103,
+       0x81C, 0xE62E0103,
+       0x81C, 0xE5300103,
+       0x81C, 0xE4320103,
+       0x81C, 0xE3340103,
+       0x81C, 0xE2360103,
+       0x81C, 0xE1380103,
+       0x81C, 0xE03A0103,
+       0x81C, 0xC33C0103,
+       0x81C, 0xC23E0103,
+       0x81C, 0xC1400103,
+       0x81C, 0xC0420103,
+       0x81C, 0xA3440103,
+       0x81C, 0xA2460103,
+       0x81C, 0xA1480103,
+       0x81C, 0xA04A0103,
+       0x81C, 0x824C0103,
+       0x81C, 0x814E0103,
+       0x81C, 0x80500103,
+       0x81C, 0x62520103,
+       0x81C, 0x61540103,
+       0x81C, 0x60560103,
+       0x81C, 0x24580103,
+       0x81C, 0x235A0103,
+       0x81C, 0x225C0103,
+       0x81C, 0x215E0103,
+       0x81C, 0x20600103,
+       0x81C, 0x03620103,
+       0x81C, 0x02640103,
+       0x81C, 0x01660103,
+       0x81C, 0x01680103,
+       0x81C, 0x016A0103,
+       0x81C, 0x016C0103,
+       0x81C, 0x016E0103,
+       0x81C, 0x01700103,
+       0x81C, 0x01720103,
+       0x81C, 0x01740103,
+       0x81C, 0x01760103,
+       0x81C, 0x01780103,
+       0x81C, 0x017A0103,
+       0x81C, 0x017C0103,
+       0x81C, 0x017E0103,
+       0x90001005,     0x00000000,     0x40000000,     0x00000000,
+       0x81C, 0xF6000103,
+       0x81C, 0xF5020103,
+       0x81C, 0xF4040103,
+       0x81C, 0xF3060103,
+       0x81C, 0xF2080103,
+       0x81C, 0xF10A0103,
+       0x81C, 0xF00C0103,
+       0x81C, 0xEF0E0103,
+       0x81C, 0xEE100103,
+       0x81C, 0xED120103,
+       0x81C, 0xEC140103,
+       0x81C, 0xCE160103,
+       0x81C, 0xEA180103,
+       0x81C, 0xE91A0103,
+       0x81C, 0xE81C0103,
+       0x81C, 0xE71E0103,
+       0x81C, 0xE6200103,
+       0x81C, 0xE5220103,
+       0x81C, 0xE4240103,
+       0x81C, 0xE3260103,
+       0x81C, 0xE2280103,
+       0x81C, 0xE12A0103,
+       0x81C, 0xC32C0103,
+       0x81C, 0xA62E0103,
+       0x81C, 0xC1300103,
+       0x81C, 0xA4320103,
+       0x81C, 0xA3340103,
+       0x81C, 0xA2360103,
+       0x81C, 0xA1380103,
+       0x81C, 0x833A0103,
+       0x81C, 0x823C0103,
+       0x81C, 0x813E0103,
+       0x81C, 0x63400103,
+       0x81C, 0x62420103,
+       0x81C, 0x61440103,
+       0x81C, 0x60460103,
+       0x81C, 0x25480103,
+       0x81C, 0x244A0103,
+       0x81C, 0x234C0103,
+       0x81C, 0x064E0103,
+       0x81C, 0x21500103,
+       0x81C, 0x04520103,
+       0x81C, 0x03540103,
+       0x81C, 0x02560103,
+       0x81C, 0x01580103,
+       0x81C, 0x005A0103,
+       0x81C, 0x005C0103,
+       0x81C, 0x005E0103,
+       0x81C, 0x00600103,
+       0x81C, 0x00620103,
+       0x81C, 0x00640103,
+       0x81C, 0x00660103,
+       0x81C, 0x00680103,
+       0x81C, 0x006A0103,
+       0x81C, 0x006C0103,
+       0x81C, 0x006E0103,
+       0x81C, 0x00700103,
+       0x81C, 0x00720103,
+       0x81C, 0x00740103,
+       0x81C, 0x00760103,
+       0x81C, 0x00780103,
+       0x81C, 0x007A0103,
+       0x81C, 0x007C0103,
+       0x81C, 0x007E0103,
+       0xA0000000,     0x00000000,
+       0x81C, 0xFD000103,
+       0x81C, 0xFC020103,
+       0x81C, 0xFB040103,
+       0x81C, 0xFA060103,
+       0x81C, 0xF9080103,
+       0x81C, 0xF80A0103,
+       0x81C, 0xF70C0103,
+       0x81C, 0xF60E0103,
+       0x81C, 0xF5100103,
+       0x81C, 0xF4120103,
+       0x81C, 0xF3140103,
+       0x81C, 0xF2160103,
+       0x81C, 0xF1180103,
+       0x81C, 0xF01A0103,
+       0x81C, 0xEF1C0103,
+       0x81C, 0xEE1E0103,
+       0x81C, 0xED200103,
+       0x81C, 0xEC220103,
+       0x81C, 0xEB240103,
+       0x81C, 0xEA260103,
+       0x81C, 0xE9280103,
+       0x81C, 0xE82A0103,
+       0x81C, 0xE72C0103,
+       0x81C, 0xE62E0103,
+       0x81C, 0xE5300103,
+       0x81C, 0xE4320103,
+       0x81C, 0xE3340103,
+       0x81C, 0xE2360103,
+       0x81C, 0xE1380103,
+       0x81C, 0xE03A0103,
+       0x81C, 0xA83C0103,
+       0x81C, 0xA73E0103,
+       0x81C, 0xA6400103,
+       0x81C, 0xA5420103,
+       0x81C, 0xA4440103,
+       0x81C, 0xA3460103,
+       0x81C, 0xA2480103,
+       0x81C, 0xA14A0103,
+       0x81C, 0x834C0103,
+       0x81C, 0x824E0103,
+       0x81C, 0x81500103,
+       0x81C, 0x63520103,
+       0x81C, 0x62540103,
+       0x81C, 0x61560103,
+       0x81C, 0x25580103,
+       0x81C, 0x245A0103,
+       0x81C, 0x235C0103,
+       0x81C, 0x225E0103,
+       0x81C, 0x04600103,
+       0x81C, 0x03620103,
+       0x81C, 0x02640103,
+       0x81C, 0x01660103,
+       0x81C, 0x01680103,
+       0x81C, 0x016A0103,
+       0x81C, 0x016C0103,
+       0x81C, 0x016E0103,
+       0x81C, 0x01700103,
+       0x81C, 0x01720103,
+       0x81C, 0x01740103,
+       0x81C, 0x01760103,
+       0x81C, 0x01780103,
+       0x81C, 0x017A0103,
+       0x81C, 0x017C0103,
+       0x81C, 0x017E0103,
+       0xB0000000,     0x00000000,
+       0x80001004,     0x00000000,     0x40000000,     0x00000000,
+       0x81C, 0xFB000203,
+       0x81C, 0xFA020203,
+       0x81C, 0xF9040203,
+       0x81C, 0xF8060203,
+       0x81C, 0xF7080203,
+       0x81C, 0xF60A0203,
+       0x81C, 0xF50C0203,
+       0x81C, 0xF40E0203,
+       0x81C, 0xF3100203,
+       0x81C, 0xF2120203,
+       0x81C, 0xF1140203,
+       0x81C, 0xF0160203,
+       0x81C, 0xEF180203,
+       0x81C, 0xEE1A0203,
+       0x81C, 0xED1C0203,
+       0x81C, 0xEC1E0203,
+       0x81C, 0xEB200203,
+       0x81C, 0xEA220203,
+       0x81C, 0xE9240203,
+       0x81C, 0xE8260203,
+       0x81C, 0xE7280203,
+       0x81C, 0xE62A0203,
+       0x81C, 0xE52C0203,
+       0x81C, 0xE42E0203,
+       0x81C, 0xE3300203,
+       0x81C, 0xE2320203,
+       0x81C, 0xE1340203,
+       0x81C, 0xC5360203,
+       0x81C, 0xC4380203,
+       0x81C, 0xC33A0203,
+       0x81C, 0xC23C0203,
+       0x81C, 0xC13E0203,
+       0x81C, 0xA4400203,
+       0x81C, 0xA3420203,
+       0x81C, 0xA2440203,
+       0x81C, 0xA1460203,
+       0x81C, 0xA0480203,
+       0x81C, 0x834A0203,
+       0x81C, 0x824C0203,
+       0x81C, 0x814E0203,
+       0x81C, 0x63500203,
+       0x81C, 0x62520203,
+       0x81C, 0x61540203,
+       0x81C, 0x60560203,
+       0x81C, 0x23580203,
+       0x81C, 0x225A0203,
+       0x81C, 0x215C0203,
+       0x81C, 0x205E0203,
+       0x81C, 0x04600203,
+       0x81C, 0x03620203,
+       0x81C, 0x02640203,
+       0x81C, 0x01660203,
+       0x81C, 0x01680203,
+       0x81C, 0x016A0203,
+       0x81C, 0x016C0203,
+       0x81C, 0x016E0203,
+       0x81C, 0x01700203,
+       0x81C, 0x01720203,
+       0x81C, 0x01740203,
+       0x81C, 0x01760203,
+       0x81C, 0x01780203,
+       0x81C, 0x017A0203,
+       0x81C, 0x017C0203,
+       0x81C, 0x017E0203,
+       0x90001005,     0x00000000,     0x40000000,     0x00000000,
+       0x81C, 0xF6000203,
+       0x81C, 0xF5020203,
+       0x81C, 0xF4040203,
+       0x81C, 0xF3060203,
+       0x81C, 0xF2080203,
+       0x81C, 0xF10A0203,
+       0x81C, 0xF00C0203,
+       0x81C, 0xEF0E0203,
+       0x81C, 0xEE100203,
+       0x81C, 0xED120203,
+       0x81C, 0xEC140203,
+       0x81C, 0xEB160203,
+       0x81C, 0xEA180203,
+       0x81C, 0xE91A0203,
+       0x81C, 0xE81C0203,
+       0x81C, 0xE71E0203,
+       0x81C, 0xE6200203,
+       0x81C, 0xE5220203,
+       0x81C, 0xE4240203,
+       0x81C, 0xE3260203,
+       0x81C, 0xE2280203,
+       0x81C, 0xE12A0203,
+       0x81C, 0xE02C0203,
+       0x81C, 0xC22E0203,
+       0x81C, 0xC1300203,
+       0x81C, 0xC0320203,
+       0x81C, 0xA3340203,
+       0x81C, 0xA2360203,
+       0x81C, 0xA1380203,
+       0x81C, 0xA03A0203,
+       0x81C, 0x833C0203,
+       0x81C, 0x823E0203,
+       0x81C, 0x81400203,
+       0x81C, 0x80420203,
+       0x81C, 0x62440203,
+       0x81C, 0x61460203,
+       0x81C, 0x42480203,
+       0x81C, 0x414A0203,
+       0x81C, 0x234C0203,
+       0x81C, 0x224E0203,
+       0x81C, 0x21500203,
+       0x81C, 0x20520203,
+       0x81C, 0x03540203,
+       0x81C, 0x02560203,
+       0x81C, 0x01580203,
+       0x81C, 0x005A0203,
+       0x81C, 0x005C0203,
+       0x81C, 0x005E0203,
+       0x81C, 0x00600203,
+       0x81C, 0x00620203,
+       0x81C, 0x00640203,
+       0x81C, 0x00660203,
+       0x81C, 0x00680203,
+       0x81C, 0x006A0203,
+       0x81C, 0x006C0203,
+       0x81C, 0x006E0203,
+       0x81C, 0x00700203,
+       0x81C, 0x00720203,
+       0x81C, 0x00740203,
+       0x81C, 0x00760203,
+       0x81C, 0x00780203,
+       0x81C, 0x007A0203,
+       0x81C, 0x007C0203,
+       0x81C, 0x007E0203,
+       0xA0000000,     0x00000000,
+       0x81C, 0xFC000203,
+       0x81C, 0xFB020203,
+       0x81C, 0xFA040203,
+       0x81C, 0xF9060203,
+       0x81C, 0xF8080203,
+       0x81C, 0xF70A0203,
+       0x81C, 0xF60C0203,
+       0x81C, 0xF50E0203,
+       0x81C, 0xF4100203,
+       0x81C, 0xF3120203,
+       0x81C, 0xF2140203,
+       0x81C, 0xF1160203,
+       0x81C, 0xF0180203,
+       0x81C, 0xEF1A0203,
+       0x81C, 0xEE1C0203,
+       0x81C, 0xED1E0203,
+       0x81C, 0xEC200203,
+       0x81C, 0xEB220203,
+       0x81C, 0xEA240203,
+       0x81C, 0xE9260203,
+       0x81C, 0xE8280203,
+       0x81C, 0xE72A0203,
+       0x81C, 0xE62C0203,
+       0x81C, 0xE52E0203,
+       0x81C, 0xE4300203,
+       0x81C, 0xE3320203,
+       0x81C, 0xE2340203,
+       0x81C, 0xE1360203,
+       0x81C, 0xC5380203,
+       0x81C, 0xC43A0203,
+       0x81C, 0xC33C0203,
+       0x81C, 0xC23E0203,
+       0x81C, 0xA6400203,
+       0x81C, 0xA5420203,
+       0x81C, 0xA4440203,
+       0x81C, 0xA3460203,
+       0x81C, 0xA2480203,
+       0x81C, 0x844A0203,
+       0x81C, 0x834C0203,
+       0x81C, 0x824E0203,
+       0x81C, 0x64500203,
+       0x81C, 0x63520203,
+       0x81C, 0x62540203,
+       0x81C, 0x61560203,
+       0x81C, 0x60580203,
+       0x81C, 0x235A0203,
+       0x81C, 0x225C0203,
+       0x81C, 0x215E0203,
+       0x81C, 0x04600203,
+       0x81C, 0x03620203,
+       0x81C, 0x02640203,
+       0x81C, 0x01660203,
+       0x81C, 0x01680203,
+       0x81C, 0x016A0203,
+       0x81C, 0x016C0203,
+       0x81C, 0x016E0203,
+       0x81C, 0x01700203,
+       0x81C, 0x01720203,
+       0x81C, 0x01740203,
+       0x81C, 0x01760203,
+       0x81C, 0x01780203,
+       0x81C, 0x017A0203,
+       0x81C, 0x017C0203,
+       0x81C, 0x017E0203,
+       0xB0000000,     0x00000000,
+       0x80001004,     0x00000000,     0x40000000,     0x00000000,
+       0x81C, 0xFB000303,
+       0x81C, 0xFA020303,
+       0x81C, 0xF9040303,
+       0x81C, 0xF8060303,
+       0x81C, 0xF7080303,
+       0x81C, 0xF60A0303,
+       0x81C, 0xF50C0303,
+       0x81C, 0xF40E0303,
+       0x81C, 0xF3100303,
+       0x81C, 0xF2120303,
+       0x81C, 0xF1140303,
+       0x81C, 0xF0160303,
+       0x81C, 0xEF180303,
+       0x81C, 0xEE1A0303,
+       0x81C, 0xED1C0303,
+       0x81C, 0xEC1E0303,
+       0x81C, 0xEB200303,
+       0x81C, 0xEA220303,
+       0x81C, 0xE9240303,
+       0x81C, 0xE8260303,
+       0x81C, 0xE7280303,
+       0x81C, 0xE62A0303,
+       0x81C, 0xE52C0303,
+       0x81C, 0xE42E0303,
+       0x81C, 0xE3300303,
+       0x81C, 0xE2320303,
+       0x81C, 0xE1340303,
+       0x81C, 0xC4360303,
+       0x81C, 0xC3380303,
+       0x81C, 0xC23A0303,
+       0x81C, 0xC13C0303,
+       0x81C, 0xA53E0303,
+       0x81C, 0xA4400303,
+       0x81C, 0xA3420303,
+       0x81C, 0xA2440303,
+       0x81C, 0xA1460303,
+       0x81C, 0x83480303,
+       0x81C, 0x824A0303,
+       0x81C, 0x814C0303,
+       0x81C, 0x644E0303,
+       0x81C, 0x63500303,
+       0x81C, 0x62520303,
+       0x81C, 0x61540303,
+       0x81C, 0x60560303,
+       0x81C, 0x23580303,
+       0x81C, 0x225A0303,
+       0x81C, 0x215C0303,
+       0x81C, 0x045E0303,
+       0x81C, 0x03600303,
+       0x81C, 0x02620303,
+       0x81C, 0x01640303,
+       0x81C, 0x01660303,
+       0x81C, 0x01680303,
+       0x81C, 0x016A0303,
+       0x81C, 0x016C0303,
+       0x81C, 0x016E0303,
+       0x81C, 0x01700303,
+       0x81C, 0x01720303,
+       0x81C, 0x01740303,
+       0x81C, 0x01760303,
+       0x81C, 0x01780303,
+       0x81C, 0x017A0303,
+       0x81C, 0x017C0303,
+       0x81C, 0x017E0303,
+       0x90001005,     0x00000000,     0x40000000,     0x00000000,
+       0x81C, 0xF5000303,
+       0x81C, 0xF4020303,
+       0x81C, 0xF3040303,
+       0x81C, 0xF2060303,
+       0x81C, 0xF1080303,
+       0x81C, 0xF00A0303,
+       0x81C, 0xEF0C0303,
+       0x81C, 0xEE0E0303,
+       0x81C, 0xED100303,
+       0x81C, 0xEC120303,
+       0x81C, 0xEB140303,
+       0x81C, 0xEA160303,
+       0x81C, 0xE9180303,
+       0x81C, 0xE81A0303,
+       0x81C, 0xE71C0303,
+       0x81C, 0xE61E0303,
+       0x81C, 0xE5200303,
+       0x81C, 0xE4220303,
+       0x81C, 0xE3240303,
+       0x81C, 0xE2260303,
+       0x81C, 0xE1280303,
+       0x81C, 0xE02A0303,
+       0x81C, 0xA72C0303,
+       0x81C, 0xA62E0303,
+       0x81C, 0xA5300303,
+       0x81C, 0xA4320303,
+       0x81C, 0xA3340303,
+       0x81C, 0xA2360303,
+       0x81C, 0xA1380303,
+       0x81C, 0xA03A0303,
+       0x81C, 0x823C0303,
+       0x81C, 0x643E0303,
+       0x81C, 0x63400303,
+       0x81C, 0x62420303,
+       0x81C, 0x61440303,
+       0x81C, 0x60460303,
+       0x81C, 0x24480303,
+       0x81C, 0x234A0303,
+       0x81C, 0x224C0303,
+       0x81C, 0x054E0303,
+       0x81C, 0x04500303,
+       0x81C, 0x03520303,
+       0x81C, 0x02540303,
+       0x81C, 0x01560303,
+       0x81C, 0x00580303,
+       0x81C, 0x005A0303,
+       0x81C, 0x005C0303,
+       0x81C, 0x005E0303,
+       0x81C, 0x00600303,
+       0x81C, 0x00620303,
+       0x81C, 0x00640303,
+       0x81C, 0x00660303,
+       0x81C, 0x00680303,
+       0x81C, 0x006A0303,
+       0x81C, 0x006C0303,
+       0x81C, 0x006E0303,
+       0x81C, 0x00700303,
+       0x81C, 0x00720303,
+       0x81C, 0x00740303,
+       0x81C, 0x00760303,
+       0x81C, 0x00780303,
+       0x81C, 0x007A0303,
+       0x81C, 0x007C0303,
+       0x81C, 0x007E0303,
+       0xA0000000,     0x00000000,
+       0x81C, 0xFC000303,
+       0x81C, 0xFB020303,
+       0x81C, 0xFA040303,
+       0x81C, 0xF9060303,
+       0x81C, 0xF8080303,
+       0x81C, 0xF70A0303,
+       0x81C, 0xF60C0303,
+       0x81C, 0xF50E0303,
+       0x81C, 0xF4100303,
+       0x81C, 0xF3120303,
+       0x81C, 0xF2140303,
+       0x81C, 0xF1160303,
+       0x81C, 0xF0180303,
+       0x81C, 0xEF1A0303,
+       0x81C, 0xEE1C0303,
+       0x81C, 0xED1E0303,
+       0x81C, 0xEC200303,
+       0x81C, 0xEB220303,
+       0x81C, 0xEA240303,
+       0x81C, 0xE9260303,
+       0x81C, 0xE8280303,
+       0x81C, 0xE72A0303,
+       0x81C, 0xE62C0303,
+       0x81C, 0xE52E0303,
+       0x81C, 0xE4300303,
+       0x81C, 0xE3320303,
+       0x81C, 0xE2340303,
+       0x81C, 0xE1360303,
+       0x81C, 0xC4380303,
+       0x81C, 0xC33A0303,
+       0x81C, 0xC23C0303,
+       0x81C, 0xC13E0303,
+       0x81C, 0xA5400303,
+       0x81C, 0xA4420303,
+       0x81C, 0xA3440303,
+       0x81C, 0xA2460303,
+       0x81C, 0xA1480303,
+       0x81C, 0x834A0303,
+       0x81C, 0x824C0303,
+       0x81C, 0x814E0303,
+       0x81C, 0x64500303,
+       0x81C, 0x63520303,
+       0x81C, 0x62540303,
+       0x81C, 0x61560303,
+       0x81C, 0x24580303,
+       0x81C, 0x235A0303,
+       0x81C, 0x225C0303,
+       0x81C, 0x215E0303,
+       0x81C, 0x04600303,
+       0x81C, 0x03620303,
+       0x81C, 0x02640303,
+       0x81C, 0x01660303,
+       0x81C, 0x01680303,
+       0x81C, 0x016A0303,
+       0x81C, 0x016C0303,
+       0x81C, 0x016E0303,
+       0x81C, 0x01700303,
+       0x81C, 0x01720303,
+       0x81C, 0x01740303,
+       0x81C, 0x01760303,
+       0x81C, 0x01780303,
+       0x81C, 0x017A0303,
+       0x81C, 0x017C0303,
+       0x81C, 0x017E0303,
+       0xB0000000,     0x00000000,
+       0x80001004,     0x00000000,     0x40000000,     0x00000000,
+       0x81C, 0xFC000803,
+       0x81C, 0xFB020803,
+       0x81C, 0xFA040803,
+       0x81C, 0xF9060803,
+       0x81C, 0xF8080803,
+       0x81C, 0xF70A0803,
+       0x81C, 0xF60C0803,
+       0x81C, 0xF50E0803,
+       0x81C, 0xF4100803,
+       0x81C, 0xF3120803,
+       0x81C, 0xF2140803,
+       0x81C, 0xF1160803,
+       0x81C, 0xF0180803,
+       0x81C, 0xEF1A0803,
+       0x81C, 0xEE1C0803,
+       0x81C, 0xED1E0803,
+       0x81C, 0xB5200803,
+       0x81C, 0xB4220803,
+       0x81C, 0xB3240803,
+       0x81C, 0xB2260803,
+       0x81C, 0xB1280803,
+       0x81C, 0xB02A0803,
+       0x81C, 0xAF2C0803,
+       0x81C, 0xAE2E0803,
+       0x81C, 0xAD300803,
+       0x81C, 0xAC320803,
+       0x81C, 0xAB340803,
+       0x81C, 0xAA360803,
+       0x81C, 0xA9380803,
+       0x81C, 0xA83A0803,
+       0x81C, 0xA73C0803,
+       0x81C, 0xA63E0803,
+       0x81C, 0x88400803,
+       0x81C, 0x87420803,
+       0x81C, 0x86440803,
+       0x81C, 0x85460803,
+       0x81C, 0x84480803,
+       0x81C, 0x834A0803,
+       0x81C, 0x674C0803,
+       0x81C, 0x664E0803,
+       0x81C, 0x65500803,
+       0x81C, 0x64520803,
+       0x81C, 0x63540803,
+       0x81C, 0x62560803,
+       0x81C, 0x61580803,
+       0x81C, 0x455A0803,
+       0x81C, 0x445C0803,
+       0x81C, 0x435E0803,
+       0x81C, 0x42600803,
+       0x81C, 0x41620803,
+       0x81C, 0x25640803,
+       0x81C, 0x24660803,
+       0x81C, 0x23680803,
+       0x81C, 0x226A0803,
+       0x81C, 0x216C0803,
+       0x81C, 0x016E0803,
+       0x81C, 0x01700803,
+       0x81C, 0x01720803,
+       0x81C, 0x01740803,
+       0x81C, 0x01760803,
+       0x81C, 0x01780803,
+       0x81C, 0x017A0803,
+       0x81C, 0x017C0803,
+       0x81C, 0x017E0803,
+       0x90001005,     0x00000000,     0x40000000,     0x00000000,
+       0x81C, 0xFC000803,
+       0x81C, 0xFB020803,
+       0x81C, 0xFA040803,
+       0x81C, 0xF9060803,
+       0x81C, 0xF8080803,
+       0x81C, 0xF70A0803,
+       0x81C, 0xF60C0803,
+       0x81C, 0xF50E0803,
+       0x81C, 0xF4100803,
+       0x81C, 0xF3120803,
+       0x81C, 0xF2140803,
+       0x81C, 0xF1160803,
+       0x81C, 0xF0180803,
+       0x81C, 0xEF1A0803,
+       0x81C, 0xEE1C0803,
+       0x81C, 0xED1E0803,
+       0x81C, 0xB5200803,
+       0x81C, 0xB4220803,
+       0x81C, 0xB3240803,
+       0x81C, 0xB2260803,
+       0x81C, 0xB1280803,
+       0x81C, 0xB02A0803,
+       0x81C, 0xAF2C0803,
+       0x81C, 0xAE2E0803,
+       0x81C, 0xAD300803,
+       0x81C, 0xAC320803,
+       0x81C, 0xAB340803,
+       0x81C, 0xAA360803,
+       0x81C, 0xA9380803,
+       0x81C, 0xA83A0803,
+       0x81C, 0xA73C0803,
+       0x81C, 0xA63E0803,
+       0x81C, 0x88400803,
+       0x81C, 0x87420803,
+       0x81C, 0x86440803,
+       0x81C, 0x85460803,
+       0x81C, 0x84480803,
+       0x81C, 0x834A0803,
+       0x81C, 0x674C0803,
+       0x81C, 0x664E0803,
+       0x81C, 0x65500803,
+       0x81C, 0x64520803,
+       0x81C, 0x63540803,
+       0x81C, 0x62560803,
+       0x81C, 0x61580803,
+       0x81C, 0x455A0803,
+       0x81C, 0x445C0803,
+       0x81C, 0x435E0803,
+       0x81C, 0x42600803,
+       0x81C, 0x41620803,
+       0x81C, 0x25640803,
+       0x81C, 0x24660803,
+       0x81C, 0x23680803,
+       0x81C, 0x226A0803,
+       0x81C, 0x216C0803,
+       0x81C, 0x016E0803,
+       0x81C, 0x01700803,
+       0x81C, 0x01720803,
+       0x81C, 0x01740803,
+       0x81C, 0x01760803,
+       0x81C, 0x01780803,
+       0x81C, 0x017A0803,
+       0x81C, 0x017C0803,
+       0x81C, 0x017E0803,
+       0xA0000000,     0x00000000,
+       0x81C, 0xFC000803,
+       0x81C, 0xFB020803,
+       0x81C, 0xFA040803,
+       0x81C, 0xF9060803,
+       0x81C, 0xF8080803,
+       0x81C, 0xF70A0803,
+       0x81C, 0xF60C0803,
+       0x81C, 0xF50E0803,
+       0x81C, 0xF4100803,
+       0x81C, 0xF3120803,
+       0x81C, 0xF2140803,
+       0x81C, 0xF1160803,
+       0x81C, 0xF0180803,
+       0x81C, 0xEF1A0803,
+       0x81C, 0xEE1C0803,
+       0x81C, 0xED1E0803,
+       0x81C, 0xB5200803,
+       0x81C, 0xB4220803,
+       0x81C, 0xB3240803,
+       0x81C, 0xB2260803,
+       0x81C, 0xB1280803,
+       0x81C, 0xB02A0803,
+       0x81C, 0xAF2C0803,
+       0x81C, 0xAE2E0803,
+       0x81C, 0xAD300803,
+       0x81C, 0xAC320803,
+       0x81C, 0xAB340803,
+       0x81C, 0xAA360803,
+       0x81C, 0xA9380803,
+       0x81C, 0xA83A0803,
+       0x81C, 0xA73C0803,
+       0x81C, 0xA63E0803,
+       0x81C, 0x88400803,
+       0x81C, 0x87420803,
+       0x81C, 0x86440803,
+       0x81C, 0x85460803,
+       0x81C, 0x84480803,
+       0x81C, 0x834A0803,
+       0x81C, 0x674C0803,
+       0x81C, 0x664E0803,
+       0x81C, 0x65500803,
+       0x81C, 0x64520803,
+       0x81C, 0x63540803,
+       0x81C, 0x62560803,
+       0x81C, 0x61580803,
+       0x81C, 0x455A0803,
+       0x81C, 0x445C0803,
+       0x81C, 0x435E0803,
+       0x81C, 0x42600803,
+       0x81C, 0x41620803,
+       0x81C, 0x25640803,
+       0x81C, 0x24660803,
+       0x81C, 0x23680803,
+       0x81C, 0x226A0803,
+       0x81C, 0x216C0803,
+       0x81C, 0x016E0803,
+       0x81C, 0x01700803,
+       0x81C, 0x01720803,
+       0x81C, 0x01740803,
+       0x81C, 0x01760803,
+       0x81C, 0x01780803,
+       0x81C, 0x017A0803,
+       0x81C, 0x017C0803,
+       0x81C, 0x017E0803,
+       0xB0000000,     0x00000000,
+       0x80001004,     0x00000000,     0x40000000,     0x00000000,
+       0x81C, 0xFF000913,
+       0x81C, 0xFE020913,
+       0x81C, 0xFD040913,
+       0x81C, 0xFC060913,
+       0x81C, 0xFB080913,
+       0x81C, 0xFA0A0913,
+       0x81C, 0xF90C0913,
+       0x81C, 0xF80E0913,
+       0x81C, 0xF7100913,
+       0x81C, 0xF6120913,
+       0x81C, 0xF5140913,
+       0x81C, 0xF4160913,
+       0x81C, 0xF3180913,
+       0x81C, 0xF21A0913,
+       0x81C, 0xF11C0913,
+       0x81C, 0x941E0913,
+       0x81C, 0x93200913,
+       0x81C, 0x92220913,
+       0x81C, 0x91240913,
+       0x81C, 0x90260913,
+       0x81C, 0x8F280913,
+       0x81C, 0x8E2A0913,
+       0x81C, 0x8D2C0913,
+       0x81C, 0x8C2E0913,
+       0x81C, 0x8B300913,
+       0x81C, 0x8A320913,
+       0x81C, 0x89340913,
+       0x81C, 0x88360913,
+       0x81C, 0x87380913,
+       0x81C, 0x863A0913,
+       0x81C, 0x853C0913,
+       0x81C, 0x843E0913,
+       0x81C, 0x83400913,
+       0x81C, 0x82420913,
+       0x81C, 0x81440913,
+       0x81C, 0x07460913,
+       0x81C, 0x06480913,
+       0x81C, 0x054A0913,
+       0x81C, 0x044C0913,
+       0x81C, 0x034E0913,
+       0x81C, 0x02500913,
+       0x81C, 0x01520913,
+       0x81C, 0x88540903,
+       0x81C, 0x87560903,
+       0x81C, 0x86580903,
+       0x81C, 0x855A0903,
+       0x81C, 0x845C0903,
+       0x81C, 0x835E0903,
+       0x81C, 0x82600903,
+       0x81C, 0x81620903,
+       0x81C, 0x07640903,
+       0x81C, 0x06660903,
+       0x81C, 0x05680903,
+       0x81C, 0x046A0903,
+       0x81C, 0x036C0903,
+       0x81C, 0x026E0903,
+       0x81C, 0x01700903,
+       0x81C, 0x01720903,
+       0x81C, 0x01740903,
+       0x81C, 0x01760903,
+       0x81C, 0x01780903,
+       0x81C, 0x017A0903,
+       0x81C, 0x017C0903,
+       0x81C, 0x017E0903,
+       0x90001005,     0x00000000,     0x40000000,     0x00000000,
+       0x81C, 0xFF000913,
+       0x81C, 0xFE020913,
+       0x81C, 0xFD040913,
+       0x81C, 0xFC060913,
+       0x81C, 0xFB080913,
+       0x81C, 0xFA0A0913,
+       0x81C, 0xF90C0913,
+       0x81C, 0xF80E0913,
+       0x81C, 0xF7100913,
+       0x81C, 0xF6120913,
+       0x81C, 0xF5140913,
+       0x81C, 0xF4160913,
+       0x81C, 0xF3180913,
+       0x81C, 0xF21A0913,
+       0x81C, 0xF11C0913,
+       0x81C, 0x941E0913,
+       0x81C, 0x93200913,
+       0x81C, 0x92220913,
+       0x81C, 0x91240913,
+       0x81C, 0x90260913,
+       0x81C, 0x8F280913,
+       0x81C, 0x8E2A0913,
+       0x81C, 0x8D2C0913,
+       0x81C, 0x8C2E0913,
+       0x81C, 0x8B300913,
+       0x81C, 0x8A320913,
+       0x81C, 0x89340913,
+       0x81C, 0x88360913,
+       0x81C, 0x87380913,
+       0x81C, 0x863A0913,
+       0x81C, 0x853C0913,
+       0x81C, 0x843E0913,
+       0x81C, 0x83400913,
+       0x81C, 0x82420913,
+       0x81C, 0x81440913,
+       0x81C, 0x07460913,
+       0x81C, 0x06480913,
+       0x81C, 0x054A0913,
+       0x81C, 0x044C0913,
+       0x81C, 0x034E0913,
+       0x81C, 0x02500913,
+       0x81C, 0x01520913,
+       0x81C, 0x88540903,
+       0x81C, 0x87560903,
+       0x81C, 0x86580903,
+       0x81C, 0x855A0903,
+       0x81C, 0x845C0903,
+       0x81C, 0x835E0903,
+       0x81C, 0x82600903,
+       0x81C, 0x81620903,
+       0x81C, 0x07640903,
+       0x81C, 0x06660903,
+       0x81C, 0x05680903,
+       0x81C, 0x046A0903,
+       0x81C, 0x036C0903,
+       0x81C, 0x026E0903,
+       0x81C, 0x01700903,
+       0x81C, 0x01720903,
+       0x81C, 0x01740903,
+       0x81C, 0x01760903,
+       0x81C, 0x01780903,
+       0x81C, 0x017A0903,
+       0x81C, 0x017C0903,
+       0x81C, 0x017E0903,
+       0xA0000000,     0x00000000,
+       0x81C, 0xFF000913,
+       0x81C, 0xFE020913,
+       0x81C, 0xFD040913,
+       0x81C, 0xFC060913,
+       0x81C, 0xFB080913,
+       0x81C, 0xFA0A0913,
+       0x81C, 0xF90C0913,
+       0x81C, 0xF80E0913,
+       0x81C, 0xF7100913,
+       0x81C, 0xF6120913,
+       0x81C, 0xF5140913,
+       0x81C, 0xF4160913,
+       0x81C, 0xF3180913,
+       0x81C, 0xF21A0913,
+       0x81C, 0xF11C0913,
+       0x81C, 0x941E0913,
+       0x81C, 0x93200913,
+       0x81C, 0x92220913,
+       0x81C, 0x91240913,
+       0x81C, 0x90260913,
+       0x81C, 0x8F280913,
+       0x81C, 0x8E2A0913,
+       0x81C, 0x8D2C0913,
+       0x81C, 0x8C2E0913,
+       0x81C, 0x8B300913,
+       0x81C, 0x8A320913,
+       0x81C, 0x89340913,
+       0x81C, 0x88360913,
+       0x81C, 0x87380913,
+       0x81C, 0x863A0913,
+       0x81C, 0x853C0913,
+       0x81C, 0x843E0913,
+       0x81C, 0x83400913,
+       0x81C, 0x82420913,
+       0x81C, 0x81440913,
+       0x81C, 0x07460913,
+       0x81C, 0x06480913,
+       0x81C, 0x054A0913,
+       0x81C, 0x044C0913,
+       0x81C, 0x034E0913,
+       0x81C, 0x02500913,
+       0x81C, 0x01520913,
+       0x81C, 0x88540903,
+       0x81C, 0x87560903,
+       0x81C, 0x86580903,
+       0x81C, 0x855A0903,
+       0x81C, 0x845C0903,
+       0x81C, 0x835E0903,
+       0x81C, 0x82600903,
+       0x81C, 0x81620903,
+       0x81C, 0x07640903,
+       0x81C, 0x06660903,
+       0x81C, 0x05680903,
+       0x81C, 0x046A0903,
+       0x81C, 0x036C0903,
+       0x81C, 0x026E0903,
+       0x81C, 0x01700903,
+       0x81C, 0x01720903,
+       0x81C, 0x01740903,
+       0x81C, 0x01760903,
+       0x81C, 0x01780903,
+       0x81C, 0x017A0903,
+       0x81C, 0x017C0903,
+       0x81C, 0x017E0903,
+       0xB0000000,     0x00000000,
+       0x80001004,     0x00000000,     0x40000000,     0x00000000,
+       0xC50, 0x00000022,
+       0xC50, 0x00000020,
+       0x90001005,     0x00000000,     0x40000000,     0x00000000,
+       0xC50, 0x00000022,
+       0xC50, 0x00000022,
+       0xA0000000,     0x00000000,
+       0xC50, 0x00000022,
+       0xC50, 0x00000020,
+       0xB0000000,     0x00000000,
+
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8821c_agc, rtw_phy_cfg_agc);
+
+static const u32 rtw8821c_bb[] = {
+       0x800, 0x9020D010,
+       0x804, 0x80018180,
+       0x808, 0x04028211,
+       0x80C, 0x13D10011,
+       0x810, 0x21104255,
+       0x814, 0x020C3D10,
+       0x818, 0x84A10385,
+       0x81C, 0x1E1E081F,
+       0x820, 0x0001AAAA,
+       0x824, 0x00030FE0,
+       0x828, 0x0000CCCC,
+       0x82C, 0x75CB7010,
+       0x830, 0x79A0EAAA,
+       0x834, 0x072E698A,
+       0x838, 0x87766461,
+       0x83C, 0x9194B2B6,
+       0x840, 0x171740E0,
+       0x844, 0x4D3D7CDB,
+       0x848, 0x4AD0408B,
+       0x84C, 0x6AFBF7A5,
+       0x850, 0x28A74706,
+       0x854, 0x0001520C,
+       0x858, 0x4060C000,
+       0x85C, 0x74010160,
+       0x860, 0x68A7C321,
+       0x864, 0x79F27432,
+       0x868, 0x8CA7A314,
+       0x86C, 0x558C2878,
+       0x870, 0x55555555,
+       0x874, 0x27612C2E,
+       0x878, 0xC0003152,
+       0x87C, 0x5C8FC000,
+       0x880, 0x00000000,
+       0x884, 0x00000000,
+       0x888, 0x00000000,
+       0x88C, 0x00000000,
+       0x890, 0x00000000,
+       0x894, 0x00000000,
+       0x898, 0x00000000,
+       0x89C, 0x00000000,
+       0x8A0, 0x00000013,
+       0x8A4, 0x7F7F7F7F,
+       0x8A8, 0x2202033E,
+       0x8AC, 0xF00F000A,
+       0x8B0, 0x00000600,
+       0x8B4, 0x000FC080,
+       0x8B8, 0xEC0057FF,
+       0x8BC, 0x2CB520A3,
+       0x8C0, 0xFFE04020,
+       0x8C4, 0x47C00000,
+       0x8C8, 0x00025165,
+       0x8CC, 0x08188492,
+       0x8D0, 0x0000B800,
+       0x8D4, 0x860308A0,
+       0x8D8, 0x290B5612,
+       0x8DC, 0x00000000,
+       0x8E0, 0x32D16777,
+       0x8E4, 0x49092925,
+       0x8E8, 0xFFFFC42C,
+       0x8EC, 0x99999999,
+       0x8F0, 0x00009999,
+       0x8F4, 0x00D80FA1,
+       0x8F8, 0x400000C0,
+       0x8FC, 0x00000130,
+       0x900, 0x00C00000,
+       0x904, 0x0FFF0FFF,
+       0x908, 0x00000000,
+       0x90C, 0x13000000,
+       0x910, 0x0000FC00,
+       0x914, 0xC6380000,
+       0x918, 0x1C1028C0,
+       0x91C, 0x64B11A1C,
+       0x920, 0xE0767233,
+       0x924, 0x855A2500,
+       0x928, 0x4AB0E4E4,
+       0x92C, 0xFFFEB200,
+       0x930, 0xFFFFFFFE,
+       0x934, 0x001FFFFF,
+       0x938, 0x00008480,
+       0x93C, 0xE41C0642,
+       0x940, 0x0E470430,
+       0x944, 0x00000000,
+       0x948, 0xAC000000,
+       0x94C, 0x10000083,
+       0x950, 0xB2010080,
+       0x954, 0x86510080,
+       0x958, 0x00000181,
+       0x95C, 0x04248000,
+       0x960, 0x00000000,
+       0x964, 0x00000000,
+       0x968, 0x00000000,
+       0x96C, 0x00000000,
+       0x970, 0x00001FFF,
+       0x974, 0x04000FFF,
+       0x978, 0x00000000,
+       0x97C, 0x00000000,
+       0x980, 0x00000000,
+       0x984, 0x00000000,
+       0x988, 0x00000000,
+       0x98C, 0x23440000,
+       0x990, 0x27100000,
+       0x994, 0xFFFF0100,
+       0x998, 0xFFFFFF5C,
+       0x99C, 0xFFFFFFFF,
+       0x9A0, 0x000000FF,
+       0x9A4, 0x80000088,
+       0x9A8, 0x0C2F0000,
+       0x9AC, 0x01560000,
+       0x9B0, 0x70000000,
+       0x9B4, 0x00000000,
+       0x9B8, 0x00000000,
+       0x9BC, 0x00000000,
+       0x9C0, 0x00000000,
+       0x9C4, 0x00000000,
+       0x9C8, 0x00000000,
+       0x9CC, 0x00000000,
+       0x9D0, 0x00000000,
+       0x9D4, 0x00000000,
+       0x9D8, 0x00000000,
+       0x9DC, 0x00000000,
+       0x9E0, 0x00000000,
+       0x9E4, 0x02000402,
+       0x9E8, 0x000022D4,
+       0x9EC, 0x00000000,
+       0x9F0, 0x00000000,
+       0x9F4, 0x00000000,
+       0x9F8, 0x00000000,
+       0x9FC, 0xEFFFF7FF,
+       0xA00, 0x00D040C8,
+       0xA04, 0x80FF800C,
+       0xA08, 0x9C838300,
+       0xA0C, 0x297E000F,
+       0xA10, 0x9500BB78,
+       0xA14, 0x1114D028,
+       0xA18, 0x00881117,
+       0xA1C, 0x89140F00,
+       0xA20, 0xE82C0000,
+       0xA24, 0x64B80C1C,
+       0xA28, 0x00008810,
+       0xA2C, 0x00D20000,
+       0xA70, 0x101FBF00,
+       0xA74, 0x00000107,
+       0xA78, 0x00008900,
+       0xA7C, 0x225B0606,
+       0xA80, 0x21807532,
+       0xA84, 0x80120000,
+       0xA88, 0x048C0000,
+       0xA8C, 0x12345678,
+       0xA90, 0xABCDEF00,
+       0xA94, 0x001B1B89,
+       0xA98, 0x00000000,
+       0xA9C, 0x3F000000,
+       0xAA0, 0x00000000,
+       0xAA4, 0x00080000,
+       0xAA8, 0xEACF0004,
+       0xAAC, 0x01235667,
+       0xAB0, 0x00000000,
+       0xB00, 0xE1000440,
+       0xB04, 0x00800000,
+       0xB08, 0xFF02030B,
+       0xB0C, 0x01EAA406,
+       0xB10, 0x00030690,
+       0xB14, 0x006000FA,
+       0xB18, 0x00000002,
+       0xB1C, 0x00000002,
+       0xB20, 0x4B00001F,
+       0xB24, 0x4E8E3E40,
+       0xB28, 0x03020100,
+       0xB2C, 0x07060504,
+       0xB30, 0x0B0A0908,
+       0xB34, 0x0F0E0D0C,
+       0xB38, 0x13121110,
+       0xB3C, 0x0000003A,
+       0xB40, 0x00000000,
+       0xB44, 0x80000000,
+       0xB48, 0x3F0000FA,
+       0xB4C, 0x88C80020,
+       0xB50, 0x00000000,
+       0xB54, 0x00004241,
+       0xB58, 0xE0008208,
+       0xB5C, 0x41EFFFF9,
+       0xB60, 0x00000000,
+       0xB64, 0x00200063,
+       0xB68, 0x0000003A,
+       0xB6C, 0x00000102,
+       0xB70, 0x4E6D1870,
+       0xB74, 0x03020100,
+       0xB78, 0x07060504,
+       0xB7C, 0x0B0A0908,
+       0xB80, 0x0F0E0D0C,
+       0xB84, 0x13121110,
+       0xB88, 0x00000000,
+       0xB8C, 0x00000000,
+       0xC00, 0x00000007,
+       0xC04, 0x03050020,
+       0xC08, 0x60403231,
+       0xC0C, 0x00012345,
+       0xC10, 0x00000100,
+       0xC14, 0x01000000,
+       0xC18, 0x00000000,
+       0xC1C, 0x40040053,
+       0xC20, 0x400503A3,
+       0xC24, 0x00000000,
+       0xC28, 0x00000000,
+       0xC2C, 0x00000000,
+       0xC30, 0x00000000,
+       0xC34, 0x00000000,
+       0xC38, 0x00000000,
+       0xC3C, 0x00000000,
+       0xC40, 0x00000000,
+       0xC44, 0x00000000,
+       0xC48, 0x00000000,
+       0xC4C, 0x00000000,
+       0xC50, 0x00000020,
+       0xC54, 0x00000000,
+       0xC58, 0xD8020402,
+       0xC5C, 0xDE000120,
+       0xC68, 0x0000003F,
+       0xC6C, 0x0000122A,
+       0xC70, 0x00000000,
+       0xC74, 0x00000000,
+       0xC78, 0x00000000,
+       0xC7C, 0x00000000,
+       0xC80, 0x00000000,
+       0xC84, 0x00000000,
+       0xC88, 0x00000000,
+       0xC8C, 0x07000000,
+       0xC94, 0x01000100,
+       0xC98, 0x201C8000,
+       0xC9C, 0x00000000,
+       0xCA0, 0x0000A555,
+       0xCA4, 0x08040201,
+       0xCA8, 0x80402010,
+       0xCAC, 0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0xCB0, 0x77777717,
+       0xCB4, 0x00000073,
+       0xA0000000,     0x00000000,
+       0xCB0, 0x77775747,
+       0xCB4, 0x10000077,
+       0xB0000000,     0x00000000,
+       0xCB8, 0x00000000,
+       0xCBC, 0x00000000,
+       0xCC0, 0x00000000,
+       0xCC4, 0x00000000,
+       0xCC8, 0x00000000,
+       0xCCC, 0x00000000,
+       0xCD0, 0x00000000,
+       0xCD4, 0x00000000,
+       0xCD8, 0x00000000,
+       0xCDC, 0x00000000,
+       0xCE0, 0x00000000,
+       0xCE4, 0x00000000,
+       0xCE8, 0x00000000,
+       0xCEC, 0x00000000,
+       0xE00, 0x00000007,
+       0xE04, 0x00000020,
+       0xE08, 0x60403231,
+       0xE0C, 0x00012345,
+       0xE10, 0x00000100,
+       0xE14, 0x01000000,
+       0xE18, 0x00000000,
+       0xE1C, 0x40040053,
+       0xE20, 0x00020103,
+       0xE24, 0x00000000,
+       0xE28, 0x00000000,
+       0xE2C, 0x00000000,
+       0xE30, 0x00000000,
+       0xE34, 0x00000000,
+       0xE38, 0x00000000,
+       0xE3C, 0x00000000,
+       0xE40, 0x00000000,
+       0xE44, 0x00000000,
+       0xE48, 0x00000000,
+       0xE4C, 0x00000000,
+       0xE50, 0x00000020,
+       0xE54, 0x00000000,
+       0xE58, 0xD8020402,
+       0xE5C, 0xDE000120,
+       0xE68, 0x59799979,
+       0xE6C, 0x0000122A,
+       0xE70, 0x99795979,
+       0xE74, 0x99795979,
+       0xE78, 0x99799979,
+       0xE7C, 0x99791979,
+       0xE80, 0x19791979,
+       0xE84, 0x19791979,
+       0xE88, 0x00000000,
+       0xE8C, 0x07000000,
+       0xE94, 0x01000100,
+       0xE98, 0x201C8000,
+       0xE9C, 0x00000000,
+       0xEA0, 0x0000A555,
+       0xEA4, 0x08040201,
+       0xEA8, 0x80402010,
+       0xEAC, 0x00000000,
+       0xEB0, 0x98543210,
+       0xEB4, 0x000000BA,
+       0xEB8, 0x00000000,
+       0xEBC, 0x00000000,
+       0xEC0, 0x00000000,
+       0xEC4, 0x00000000,
+       0xEC8, 0x00000000,
+       0xECC, 0x00000000,
+       0xED0, 0x00000000,
+       0xED4, 0x00000000,
+       0xED8, 0x00000000,
+       0xEDC, 0x00000000,
+       0xEE0, 0x00000000,
+       0xEE4, 0x00000000,
+       0xEE8, 0x00000000,
+       0xEEC, 0x00000000,
+       0x1900, 0x00000000,
+       0x1904, 0x00238000,
+       0x1908, 0x00000000,
+       0x190C, 0x00000000,
+       0x1910, 0x00001800,
+       0x1914, 0x00000000,
+       0x1918, 0x00000000,
+       0x191C, 0x00000000,
+       0x1920, 0x00000000,
+       0x1924, 0x00000000,
+       0x1928, 0x00000000,
+       0x192C, 0x00000000,
+       0x1930, 0x00000000,
+       0x1934, 0x00000000,
+       0x1938, 0x00000000,
+       0x193C, 0x00000000,
+       0x1940, 0x00000000,
+       0x1944, 0x00000000,
+       0x1948, 0x00000000,
+       0x194C, 0x00000000,
+       0x1950, 0x00000000,
+       0x1954, 0x00000000,
+       0x1958, 0x00000000,
+       0x195C, 0x00000000,
+       0x1960, 0x00000000,
+       0x1964, 0x00000000,
+       0x1968, 0x00000000,
+       0x196C, 0x00000000,
+       0x1970, 0x00000000,
+       0x1974, 0x00000000,
+       0x1978, 0x00000000,
+       0x197C, 0x00000000,
+       0x1980, 0x00000000,
+       0x1984, 0x03000000,
+       0x1988, 0x21401E88,
+       0x198C, 0x00004000,
+       0x1990, 0x00000000,
+       0x1994, 0x00000000,
+       0x1998, 0x00000053,
+       0x199C, 0x00000000,
+       0x19A0, 0x00000000,
+       0x19A4, 0x00000000,
+       0x19A8, 0x010A0000,
+       0x19AC, 0x0E47E47F,
+       0x19B0, 0x00008000,
+       0x19B4, 0x0E47E47F,
+       0x19B8, 0x00000000,
+       0x19BC, 0x00000000,
+       0x19C0, 0x00000000,
+       0x19C4, 0x00000000,
+       0x19C8, 0x00000000,
+       0x19CC, 0x00000000,
+       0x19D0, 0x00000000,
+       0x19D4, 0x77777777,
+       0x19D8, 0x00000777,
+       0x19DC, 0x133E0F37,
+       0x19E0, 0x00000000,
+       0x19E4, 0x00000000,
+       0x19E8, 0x00000000,
+       0x19EC, 0x00000000,
+       0x19F0, 0x00000000,
+       0x19F4, 0x00000000,
+       0x19F8, 0x01A00000,
+       0x19FC, 0x00000000,
+       0x1C00, 0x00000100,
+       0x1C04, 0x01000000,
+       0x1C08, 0x00000100,
+       0x1C0C, 0x01000000,
+       0x1C10, 0x00000100,
+       0x1C14, 0x01000000,
+       0x1C18, 0x00000100,
+       0x1C1C, 0x01000000,
+       0x1C20, 0x00000100,
+       0x1C24, 0x01000000,
+       0x1C28, 0x00000100,
+       0x1C2C, 0x01000000,
+       0x1C30, 0x00000100,
+       0x1C34, 0x01000000,
+       0x1C38, 0x00000000,
+       0x1C3C, 0x00008000,
+       0x1C40, 0x000C0100,
+       0x1C44, 0x000000F3,
+       0x1C48, 0x1A8249A8,
+       0x1C4C, 0x1461C826,
+       0x1C50, 0x0001469E,
+       0x1C54, 0x58D158D1,
+       0x1C58, 0x04490088,
+       0x1C5C, 0x04004400,
+       0x1C60, 0x00000000,
+       0x1C64, 0x04004400,
+       0x1C68, 0x0B7B7B75,
+       0x1C6C, 0x01000000,
+       0x1C70, 0x00A08145,
+       0x1C74, 0x2080E0E0,
+       0x1C78, 0x00000000,
+       0x1C7C, 0x00000010,
+       0x1C80, 0x00000100,
+       0x1C84, 0x01000000,
+       0x1C88, 0x00000100,
+       0x1C8C, 0x01000000,
+       0x1C90, 0x00000100,
+       0x1C94, 0x01000000,
+       0x1C98, 0x00000100,
+       0x1C9C, 0x01000000,
+       0x1CA0, 0x00000100,
+       0x1CA4, 0x01000000,
+       0x1CA8, 0x00000100,
+       0x1CAC, 0x01000000,
+       0x1CB0, 0x00000100,
+       0x1CB4, 0x01000000,
+       0x1CB8, 0x00000000,
+       0x1CBC, 0x00000000,
+       0x1CC0, 0x201B0100,
+       0x1CC4, 0x00308000,
+       0x1CC8, 0x5B74B6E9,
+       0x1CCC, 0x01000000,
+       0x1CD0, 0x00000400,
+       0x1CD4, 0x01000000,
+       0x1CD8, 0x01B8ADEB,
+       0x1CDC, 0x01000000,
+       0x1CE0, 0x00030003,
+       0x1CE4, 0x4E4A0306,
+       0x1CE8, 0x00000100,
+       0x1CEC, 0x01000000,
+       0x1CF0, 0x00000100,
+       0x1CF4, 0x01000000,
+       0x1CF8, 0x01B8ADEB,
+       0x1CFC, 0x00000000,
+       0xC60, 0x700B8040,
+       0xC60, 0x700B8040,
+       0xC60, 0x70146040,
+       0xC60, 0x70246040,
+       0xC60, 0x70346040,
+       0xC60, 0x70446040,
+       0xC60, 0x705B2040,
+       0xC60, 0x70646040,
+       0xC60, 0x707B8040,
+       0xC60, 0x708B8040,
+       0xC60, 0x709B8040,
+       0xC60, 0x70AB8040,
+       0xC60, 0x70BB6040,
+       0xC60, 0x70C06040,
+       0xC60, 0x70D06040,
+       0xC60, 0x70EF6040,
+       0xC60, 0x70F06040,
+       0xE60, 0x700B8040,
+       0xE60, 0x700B8040,
+       0xE60, 0x70146040,
+       0xE60, 0x70246040,
+       0xE60, 0x70346040,
+       0xE60, 0x70446040,
+       0xE60, 0x705B2040,
+       0xE60, 0x70646040,
+       0xE60, 0x707B8040,
+       0xE60, 0x708B8040,
+       0xE60, 0x709B8040,
+       0xE60, 0x70AB8040,
+       0xE60, 0x70BB6040,
+       0xE60, 0x70C06040,
+       0xE60, 0x70D06040,
+       0xE60, 0x70EF6040,
+       0xE60, 0x70F06040,
+       0xC64, 0x00800000,
+       0xC64, 0x08800001,
+       0xC64, 0x00800002,
+       0xC64, 0x00800003,
+       0xC64, 0x00800004,
+       0xC64, 0x00800005,
+       0xC64, 0x00800006,
+       0xC64, 0x08800007,
+       0xC64, 0x00004000,
+       0xE64, 0x00800000,
+       0xE64, 0x08800001,
+       0xE64, 0x00800002,
+       0xE64, 0x00800003,
+       0xE64, 0x00800004,
+       0xE64, 0x00800005,
+       0xE64, 0x00800006,
+       0xE64, 0x08800007,
+       0xE64, 0x00004000,
+       0x1B00, 0xF8000008,
+       0x1B00, 0xF80A7008,
+       0x1B00, 0xF8015008,
+       0x1B00, 0xF8000008,
+       0x1B04, 0xE24629D2,
+       0x1B08, 0x00000080,
+       0x1B0C, 0x00000000,
+       0x1B10, 0x00011C00,
+       0x1B14, 0x00000000,
+       0x1B18, 0x00292903,
+       0x1B1C, 0xA2193C32,
+       0x1B20, 0x01840008,
+       0x1B24, 0x01860008,
+       0x1B28, 0x80060300,
+       0x1B2C, 0x00000003,
+       0x1B30, 0x20000000,
+       0x1B34, 0x00000800,
+       0x1B3C, 0x20000000,
+       0x1BC0, 0x01000000,
+       0x1BCC, 0x00000000,
+       0x1B90, 0x0001E018,
+       0x1B94, 0xF76D9F84,
+       0x1BC8, 0x000C44AA,
+       0x1BCC, 0x11978200,
+       0x1B8C, 0x00002000,
+       0x1B9C, 0x5B554F48,
+       0x1BA0, 0x6F6B6661,
+       0x1BA4, 0x817D7874,
+       0x1BA8, 0x908C8884,
+       0x1BAC, 0x9D9A9793,
+       0x1BB0, 0xAAA7A4A1,
+       0x1BB4, 0xB6B3B0AD,
+       0x1B40, 0x02CE03E8,
+       0x1B44, 0x01FD024C,
+       0x1B48, 0x01A101C9,
+       0x1B4C, 0x016A0183,
+       0x1B50, 0x01430153,
+       0x1B54, 0x01280134,
+       0x1B58, 0x0112011C,
+       0x1B5C, 0x01000107,
+       0x1B60, 0x00F200F9,
+       0x1B64, 0x00E500EB,
+       0x1B68, 0x00DA00E0,
+       0x1B6C, 0x00D200D6,
+       0x1B70, 0x00C900CD,
+       0x1B74, 0x00C200C5,
+       0x1B78, 0x00BB00BE,
+       0x1B7C, 0x00B500B8,
+       0x1BDC, 0x40CAFFE1,
+       0x1BDC, 0x4080A1E3,
+       0x1BDC, 0x405165E5,
+       0x1BDC, 0x403340E7,
+       0x1BDC, 0x402028E9,
+       0x1BDC, 0x401419EB,
+       0x1BDC, 0x400D10ED,
+       0x1BDC, 0x40080AEF,
+       0x1BDC, 0x400506F1,
+       0x1BDC, 0x400304F3,
+       0x1BDC, 0x400203F5,
+       0x1BDC, 0x400102F7,
+       0x1BDC, 0x400101F9,
+       0x1BDC, 0x400101FB,
+       0x1BDC, 0x400101FD,
+       0x1BDC, 0x400101FF,
+       0x1BDC, 0x40CAFF81,
+       0x1BDC, 0x4080A183,
+       0x1BDC, 0x40516585,
+       0x1BDC, 0x40334087,
+       0x1BDC, 0x40202889,
+       0x1BDC, 0x4014198B,
+       0x1BDC, 0x400D108D,
+       0x1BDC, 0x40080A8F,
+       0x1BDC, 0x40050691,
+       0x1BDC, 0x40030493,
+       0x1BDC, 0x40020395,
+       0x1BDC, 0x40010297,
+       0x1BDC, 0x40010199,
+       0x1BDC, 0x4001019B,
+       0x1BDC, 0x4001019D,
+       0x1BDC, 0x4001019F,
+       0x1BDC, 0x00000000,
+       0x1BDC, 0xD0000001,
+       0x1BDC, 0xD0000003,
+       0x1BDC, 0xD0000005,
+       0x1BDC, 0xD0000007,
+       0x1BDC, 0xD0000009,
+       0x1BDC, 0xD000000B,
+       0x1BDC, 0xD000000D,
+       0x1BDC, 0xD000000F,
+       0x1BDC, 0xD0000011,
+       0x1BDC, 0xD0000013,
+       0x1BDC, 0xD0000015,
+       0x1BDC, 0xD0000017,
+       0x1BDC, 0xD0000019,
+       0x1BDC, 0xD000001B,
+       0x1BDC, 0xD000001D,
+       0x1BDC, 0xD000001F,
+       0x1BDC, 0xD0000021,
+       0x1BDC, 0xD0000023,
+       0x1BDC, 0xD0000025,
+       0x1BDC, 0xD0000027,
+       0x1BDC, 0xD0000029,
+       0x1BDC, 0xD000002B,
+       0x1BDC, 0xD000002D,
+       0x1BDC, 0xD000002F,
+       0x1BDC, 0xD0000031,
+       0x1BDC, 0xD0000033,
+       0x1BDC, 0xD0000035,
+       0x1BDC, 0xD0000037,
+       0x1BDC, 0xD0000039,
+       0x1BDC, 0xD000003B,
+       0x1BDC, 0xD000003D,
+       0x1BDC, 0xD000003F,
+       0x1BDC, 0xD0000041,
+       0x1BDC, 0xD0000043,
+       0x1BDC, 0xD0000045,
+       0x1BDC, 0xD0000047,
+       0x1BDC, 0xD0000049,
+       0x1BDC, 0xD000004B,
+       0x1BDC, 0xD000004D,
+       0x1BDC, 0xD000004F,
+       0x1BDC, 0xD0000051,
+       0x1BDC, 0xD0000053,
+       0x1BDC, 0xD0000055,
+       0x1BDC, 0xD0000057,
+       0x1BDC, 0xD0000059,
+       0x1BDC, 0xD000005B,
+       0x1BDC, 0xD000005D,
+       0x1BDC, 0xD000005F,
+       0x1BDC, 0xD0000061,
+       0x1BDC, 0xD0000063,
+       0x1BDC, 0xD0000065,
+       0x1BDC, 0xD0000067,
+       0x1BDC, 0xD0000069,
+       0x1BDC, 0xD000006B,
+       0x1BDC, 0xD000006D,
+       0x1BDC, 0xD000006F,
+       0x1BDC, 0xD0000071,
+       0x1BDC, 0xD0000073,
+       0x1BDC, 0xD0000075,
+       0x1BDC, 0xD0000077,
+       0x1BDC, 0xD0000079,
+       0x1BDC, 0xD000007B,
+       0x1BDC, 0xD000007D,
+       0x1BDC, 0xD000007F,
+       0x1BDC, 0x90000081,
+       0x1BDC, 0x90000083,
+       0x1BDC, 0x90000085,
+       0x1BDC, 0x90000087,
+       0x1BDC, 0x90000089,
+       0x1BDC, 0x9000008B,
+       0x1BDC, 0x9000008D,
+       0x1BDC, 0x9000008F,
+       0x1BDC, 0x90000091,
+       0x1BDC, 0x90000093,
+       0x1BDC, 0x90000095,
+       0x1BDC, 0x90000097,
+       0x1BDC, 0x90000099,
+       0x1BDC, 0x9000009B,
+       0x1BDC, 0x9000009D,
+       0x1BDC, 0x9000009F,
+       0x1BDC, 0x900000A1,
+       0x1BDC, 0x900000A3,
+       0x1BDC, 0x900000A5,
+       0x1BDC, 0x900000A7,
+       0x1BDC, 0x900000A9,
+       0x1BDC, 0x900000AB,
+       0x1BDC, 0x900000AD,
+       0x1BDC, 0x900000AF,
+       0x1BDC, 0x900000B1,
+       0x1BDC, 0x900000B3,
+       0x1BDC, 0x900000B5,
+       0x1BDC, 0x900000B7,
+       0x1BDC, 0x900000B9,
+       0x1BDC, 0x900000BB,
+       0x1BDC, 0x900000BD,
+       0x1BDC, 0x900000BF,
+       0x1BDC, 0x900000C1,
+       0x1BDC, 0x900000C3,
+       0x1BDC, 0x900000C5,
+       0x1BDC, 0x900000C7,
+       0x1BDC, 0x900000C9,
+       0x1BDC, 0x900000CB,
+       0x1BDC, 0x900000CD,
+       0x1BDC, 0x900000CF,
+       0x1BDC, 0x900000D1,
+       0x1BDC, 0x900000D3,
+       0x1BDC, 0x900000D5,
+       0x1BDC, 0x900000D7,
+       0x1BDC, 0x900000D9,
+       0x1BDC, 0x900000DB,
+       0x1BDC, 0x900000DD,
+       0x1BDC, 0x900000DF,
+       0x1BDC, 0x900000E1,
+       0x1BDC, 0x900000E3,
+       0x1BDC, 0x900000E5,
+       0x1BDC, 0x900000E7,
+       0x1BDC, 0x900000E9,
+       0x1BDC, 0x900000EB,
+       0x1BDC, 0x900000ED,
+       0x1BDC, 0x900000EF,
+       0x1BDC, 0x900000F1,
+       0x1BDC, 0x900000F3,
+       0x1BDC, 0x900000F5,
+       0x1BDC, 0x900000F7,
+       0x1BDC, 0x900000F9,
+       0x1BDC, 0x900000FB,
+       0x1BDC, 0x900000FD,
+       0x1BDC, 0x900000FF,
+       0x1BDC, 0x00000000,
+       0x1B00, 0xF8000000,
+       0x1B80, 0x00000007,
+       0x1B80, 0x090A0005,
+       0x1B80, 0x090A0007,
+       0x1B80, 0x0FFE0015,
+       0x1B80, 0x0FFE0017,
+       0x1B80, 0x00220025,
+       0x1B80, 0x00220027,
+       0x1B80, 0x00040035,
+       0x1B80, 0x00040037,
+       0x1B80, 0x05C00045,
+       0x1B80, 0x05C00047,
+       0x1B80, 0x00070055,
+       0x1B80, 0x00070057,
+       0x1B80, 0x64000065,
+       0x1B80, 0x64000067,
+       0x1B80, 0x00020075,
+       0x1B80, 0x00020077,
+       0x1B80, 0x00080085,
+       0x1B80, 0x00080087,
+       0x1B80, 0x80000095,
+       0x1B80, 0x80000097,
+       0x1B80, 0x090800A5,
+       0x1B80, 0x090800A7,
+       0x1B80, 0x0F0200B5,
+       0x1B80, 0x0F0200B7,
+       0x1B80, 0x002200C5,
+       0x1B80, 0x002200C7,
+       0x1B80, 0x000400D5,
+       0x1B80, 0x000400D7,
+       0x1B80, 0x05C000E5,
+       0x1B80, 0x05C000E7,
+       0x1B80, 0x000700F5,
+       0x1B80, 0x000700F7,
+       0x1B80, 0x64020105,
+       0x1B80, 0x64020107,
+       0x1B80, 0x00020115,
+       0x1B80, 0x00020117,
+       0x1B80, 0x00040125,
+       0x1B80, 0x00040127,
+       0x1B80, 0x4A000135,
+       0x1B80, 0x4A000137,
+       0x1B80, 0x4B040145,
+       0x1B80, 0x4B040147,
+       0x1B80, 0x85030155,
+       0x1B80, 0x85030157,
+       0x1B80, 0x40090165,
+       0x1B80, 0x40090167,
+       0x1B80, 0xE02A0175,
+       0x1B80, 0xE02A0177,
+       0x1B80, 0x4B050185,
+       0x1B80, 0x4B050187,
+       0x1B80, 0x86030195,
+       0x1B80, 0x86030197,
+       0x1B80, 0x400B01A5,
+       0x1B80, 0x400B01A7,
+       0x1B80, 0xE02A01B5,
+       0x1B80, 0xE02A01B7,
+       0x1B80, 0x4B0001C5,
+       0x1B80, 0x4B0001C7,
+       0x1B80, 0x000701D5,
+       0x1B80, 0x000701D7,
+       0x1B80, 0x4C0001E5,
+       0x1B80, 0x4C0001E7,
+       0x1B80, 0x000401F5,
+       0x1B80, 0x000401F7,
+       0x1B80, 0x4D040205,
+       0x1B80, 0x4D040207,
+       0x1B80, 0x2EE00215,
+       0x1B80, 0x2EE00217,
+       0x1B80, 0x00000225,
+       0x1B80, 0x00000227,
+       0x1B80, 0x2EF00235,
+       0x1B80, 0x2EF00237,
+       0x1B80, 0x00000245,
+       0x1B80, 0x00000247,
+       0x1B80, 0x20810255,
+       0x1B80, 0x20810257,
+       0x1B80, 0x23450265,
+       0x1B80, 0x23450267,
+       0x1B80, 0x4D000275,
+       0x1B80, 0x4D000277,
+       0x1B80, 0x00040285,
+       0x1B80, 0x00040287,
+       0x1B80, 0x30000295,
+       0x1B80, 0x30000297,
+       0x1B80, 0xE1D602A5,
+       0x1B80, 0xE1D602A7,
+       0x1B80, 0xF01102B5,
+       0x1B80, 0xF01102B7,
+       0x1B80, 0xF11102C5,
+       0x1B80, 0xF11102C7,
+       0x1B80, 0xF21102D5,
+       0x1B80, 0xF21102D7,
+       0x1B80, 0xF31102E5,
+       0x1B80, 0xF31102E7,
+       0x1B80, 0xF41102F5,
+       0x1B80, 0xF41102F7,
+       0x1B80, 0xF5110305,
+       0x1B80, 0xF5110307,
+       0x1B80, 0xF6110315,
+       0x1B80, 0xF6110317,
+       0x1B80, 0xF7110325,
+       0x1B80, 0xF7110327,
+       0x1B80, 0xF8110335,
+       0x1B80, 0xF8110337,
+       0x1B80, 0xF9110345,
+       0x1B80, 0xF9110347,
+       0x1B80, 0xFA110355,
+       0x1B80, 0xFA110357,
+       0x1B80, 0xFB110365,
+       0x1B80, 0xFB110367,
+       0x1B80, 0xFC110375,
+       0x1B80, 0xFC110377,
+       0x1B80, 0xFD110385,
+       0x1B80, 0xFD110387,
+       0x1B80, 0xFE110395,
+       0x1B80, 0xFE110397,
+       0x1B80, 0xFF1103A5,
+       0x1B80, 0xFF1103A7,
+       0x1B80, 0x000103B5,
+       0x1B80, 0x000103B7,
+       0x1B80, 0x305503C5,
+       0x1B80, 0x305503C7,
+       0x1B80, 0x306D03D5,
+       0x1B80, 0x306D03D7,
+       0x1B80, 0x30B803E5,
+       0x1B80, 0x30B803E7,
+       0x1B80, 0x30BB03F5,
+       0x1B80, 0x30BB03F7,
+       0x1B80, 0x306F0405,
+       0x1B80, 0x306F0407,
+       0x1B80, 0x307A0415,
+       0x1B80, 0x307A0417,
+       0x1B80, 0x30850425,
+       0x1B80, 0x30850427,
+       0x1B80, 0x30C50435,
+       0x1B80, 0x30C50437,
+       0x1B80, 0x30BF0445,
+       0x1B80, 0x30BF0447,
+       0x1B80, 0x30D30455,
+       0x1B80, 0x30D30457,
+       0x1B80, 0x30DE0465,
+       0x1B80, 0x30DE0467,
+       0x1B80, 0x30E90475,
+       0x1B80, 0x30E90477,
+       0x1B80, 0x304C0485,
+       0x1B80, 0x304C0487,
+       0x1B80, 0x31180495,
+       0x1B80, 0x31180497,
+       0x1B80, 0x312904A5,
+       0x1B80, 0x312904A7,
+       0x1B80, 0x313E04B5,
+       0x1B80, 0x313E04B7,
+       0x1B80, 0x4D0404C5,
+       0x1B80, 0x4D0404C7,
+       0x1B80, 0x2EE004D5,
+       0x1B80, 0x2EE004D7,
+       0x1B80, 0x000004E5,
+       0x1B80, 0x000004E7,
+       0x1B80, 0x2EF004F5,
+       0x1B80, 0x2EF004F7,
+       0x1B80, 0x00000505,
+       0x1B80, 0x00000507,
+       0x1B80, 0x20810515,
+       0x1B80, 0x20810517,
+       0x1B80, 0xA3B50525,
+       0x1B80, 0xA3B50527,
+       0x1B80, 0x4D000535,
+       0x1B80, 0x4D000537,
+       0x1B80, 0x30000545,
+       0x1B80, 0x30000547,
+       0x1B80, 0xE1690555,
+       0x1B80, 0xE1690557,
+       0x1B80, 0x4D040565,
+       0x1B80, 0x4D040567,
+       0x1B80, 0x20800575,
+       0x1B80, 0x20800577,
+       0x1B80, 0x00000585,
+       0x1B80, 0x00000587,
+       0x1B80, 0x4D000595,
+       0x1B80, 0x4D000597,
+       0x1B80, 0x550705A5,
+       0x1B80, 0x550705A7,
+       0x1B80, 0xE16105B5,
+       0x1B80, 0xE16105B7,
+       0x1B80, 0xE16105C5,
+       0x1B80, 0xE16105C7,
+       0x1B80, 0x4D0405D5,
+       0x1B80, 0x4D0405D7,
+       0x1B80, 0x208805E5,
+       0x1B80, 0x208805E7,
+       0x1B80, 0x020005F5,
+       0x1B80, 0x020005F7,
+       0x1B80, 0x4D000605,
+       0x1B80, 0x4D000607,
+       0x1B80, 0x550F0615,
+       0x1B80, 0x550F0617,
+       0x1B80, 0xE1610625,
+       0x1B80, 0xE1610627,
+       0x1B80, 0x4F020635,
+       0x1B80, 0x4F020637,
+       0x1B80, 0x4E000645,
+       0x1B80, 0x4E000647,
+       0x1B80, 0x53020655,
+       0x1B80, 0x53020657,
+       0x1B80, 0x52010665,
+       0x1B80, 0x52010667,
+       0x1B80, 0xE1650675,
+       0x1B80, 0xE1650677,
+       0x1B80, 0x4D080685,
+       0x1B80, 0x4D080687,
+       0x1B80, 0x57100695,
+       0x1B80, 0x57100697,
+       0x1B80, 0x570006A5,
+       0x1B80, 0x570006A7,
+       0x1B80, 0x4D0006B5,
+       0x1B80, 0x4D0006B7,
+       0x1B80, 0x000106C5,
+       0x1B80, 0x000106C7,
+       0x1B80, 0xE16906D5,
+       0x1B80, 0xE16906D7,
+       0x1B80, 0x000106E5,
+       0x1B80, 0x000106E7,
+       0x1B80, 0x308F06F5,
+       0x1B80, 0x308F06F7,
+       0x1B80, 0x00230705,
+       0x1B80, 0x00230707,
+       0x1B80, 0xE1C90715,
+       0x1B80, 0xE1C90717,
+       0x1B80, 0x00020725,
+       0x1B80, 0x00020727,
+       0x1B80, 0x54E90735,
+       0x1B80, 0x54E90737,
+       0x1B80, 0x0BA60745,
+       0x1B80, 0x0BA60747,
+       0x1B80, 0x00230755,
+       0x1B80, 0x00230757,
+       0x1B80, 0xE1C90765,
+       0x1B80, 0xE1C90767,
+       0x1B80, 0x00020775,
+       0x1B80, 0x00020777,
+       0x1B80, 0x4D300785,
+       0x1B80, 0x4D300787,
+       0x1B80, 0x30A80795,
+       0x1B80, 0x30A80797,
+       0x1B80, 0x308B07A5,
+       0x1B80, 0x308B07A7,
+       0x1B80, 0x002207B5,
+       0x1B80, 0x002207B7,
+       0x1B80, 0xE1C907C5,
+       0x1B80, 0xE1C907C7,
+       0x1B80, 0x000207D5,
+       0x1B80, 0x000207D7,
+       0x1B80, 0x54E807E5,
+       0x1B80, 0x54E807E7,
+       0x1B80, 0x0BA607F5,
+       0x1B80, 0x0BA607F7,
+       0x1B80, 0x00220805,
+       0x1B80, 0x00220807,
+       0x1B80, 0xE1C90815,
+       0x1B80, 0xE1C90817,
+       0x1B80, 0x00020825,
+       0x1B80, 0x00020827,
+       0x1B80, 0x4D300835,
+       0x1B80, 0x4D300837,
+       0x1B80, 0x30A80845,
+       0x1B80, 0x30A80847,
+       0x1B80, 0x63F10855,
+       0x1B80, 0x63F10857,
+       0x1B80, 0xE1690865,
+       0x1B80, 0xE1690867,
+       0x1B80, 0xE1C90875,
+       0x1B80, 0xE1C90877,
+       0x1B80, 0x63F40885,
+       0x1B80, 0x63F40887,
+       0x1B80, 0xE1690895,
+       0x1B80, 0xE1690897,
+       0x1B80, 0xE1C908A5,
+       0x1B80, 0xE1C908A7,
+       0x1B80, 0x0BA808B5,
+       0x1B80, 0x0BA808B7,
+       0x1B80, 0x63F808C5,
+       0x1B80, 0x63F808C7,
+       0x1B80, 0xE16908D5,
+       0x1B80, 0xE16908D7,
+       0x1B80, 0xE1C908E5,
+       0x1B80, 0xE1C908E7,
+       0x1B80, 0x0BA908F5,
+       0x1B80, 0x0BA908F7,
+       0x1B80, 0x63FC0905,
+       0x1B80, 0x63FC0907,
+       0x1B80, 0xE1690915,
+       0x1B80, 0xE1690917,
+       0x1B80, 0xE1C90925,
+       0x1B80, 0xE1C90927,
+       0x1B80, 0x63FF0935,
+       0x1B80, 0x63FF0937,
+       0x1B80, 0xE1690945,
+       0x1B80, 0xE1690947,
+       0x1B80, 0xE1C90955,
+       0x1B80, 0xE1C90957,
+       0x1B80, 0x63000965,
+       0x1B80, 0x63000967,
+       0x1B80, 0xE1690975,
+       0x1B80, 0xE1690977,
+       0x1B80, 0xE1C90985,
+       0x1B80, 0xE1C90987,
+       0x1B80, 0x63030995,
+       0x1B80, 0x63030997,
+       0x1B80, 0xE16909A5,
+       0x1B80, 0xE16909A7,
+       0x1B80, 0xE1C909B5,
+       0x1B80, 0xE1C909B7,
+       0x1B80, 0xF4D409C5,
+       0x1B80, 0xF4D409C7,
+       0x1B80, 0x630709D5,
+       0x1B80, 0x630709D7,
+       0x1B80, 0xE16909E5,
+       0x1B80, 0xE16909E7,
+       0x1B80, 0xE1C909F5,
+       0x1B80, 0xE1C909F7,
+       0x1B80, 0xF5DB0A05,
+       0x1B80, 0xF5DB0A07,
+       0x1B80, 0x630B0A15,
+       0x1B80, 0x630B0A17,
+       0x1B80, 0xE1690A25,
+       0x1B80, 0xE1690A27,
+       0x1B80, 0xE1C90A35,
+       0x1B80, 0xE1C90A37,
+       0x1B80, 0x630E0A45,
+       0x1B80, 0x630E0A47,
+       0x1B80, 0xE1690A55,
+       0x1B80, 0xE1690A57,
+       0x1B80, 0xE1C90A65,
+       0x1B80, 0xE1C90A67,
+       0x1B80, 0x4D300A75,
+       0x1B80, 0x4D300A77,
+       0x1B80, 0x55010A85,
+       0x1B80, 0x55010A87,
+       0x1B80, 0x57040A95,
+       0x1B80, 0x57040A97,
+       0x1B80, 0x57000AA5,
+       0x1B80, 0x57000AA7,
+       0x1B80, 0x96000AB5,
+       0x1B80, 0x96000AB7,
+       0x1B80, 0x57080AC5,
+       0x1B80, 0x57080AC7,
+       0x1B80, 0x57000AD5,
+       0x1B80, 0x57000AD7,
+       0x1B80, 0x95000AE5,
+       0x1B80, 0x95000AE7,
+       0x1B80, 0x4D000AF5,
+       0x1B80, 0x4D000AF7,
+       0x1B80, 0x6C070B05,
+       0x1B80, 0x6C070B07,
+       0x1B80, 0x7B200B15,
+       0x1B80, 0x7B200B17,
+       0x1B80, 0x7A000B25,
+       0x1B80, 0x7A000B27,
+       0x1B80, 0x79000B35,
+       0x1B80, 0x79000B37,
+       0x1B80, 0x7F200B45,
+       0x1B80, 0x7F200B47,
+       0x1B80, 0x7E000B55,
+       0x1B80, 0x7E000B57,
+       0x1B80, 0x7D000B65,
+       0x1B80, 0x7D000B67,
+       0x1B80, 0x00010B75,
+       0x1B80, 0x00010B77,
+       0x1B80, 0x62850B85,
+       0x1B80, 0x62850B87,
+       0x1B80, 0xE1690B95,
+       0x1B80, 0xE1690B97,
+       0x1B80, 0x00010BA5,
+       0x1B80, 0x00010BA7,
+       0x1B80, 0x5C320BB5,
+       0x1B80, 0x5C320BB7,
+       0x1B80, 0xE1C50BC5,
+       0x1B80, 0xE1C50BC7,
+       0x1B80, 0xE1950BD5,
+       0x1B80, 0xE1950BD7,
+       0x1B80, 0x00010BE5,
+       0x1B80, 0x00010BE7,
+       0x1B80, 0x5C320BF5,
+       0x1B80, 0x5C320BF7,
+       0x1B80, 0x63F40C05,
+       0x1B80, 0x63F40C07,
+       0x1B80, 0x62850C15,
+       0x1B80, 0x62850C17,
+       0x1B80, 0x0BB00C25,
+       0x1B80, 0x0BB00C27,
+       0x1B80, 0xE1690C35,
+       0x1B80, 0xE1690C37,
+       0x1B80, 0xE1C90C45,
+       0x1B80, 0xE1C90C47,
+       0x1B80, 0x5C320C55,
+       0x1B80, 0x5C320C57,
+       0x1B80, 0x63FC0C65,
+       0x1B80, 0x63FC0C67,
+       0x1B80, 0x62850C75,
+       0x1B80, 0x62850C77,
+       0x1B80, 0x0BB10C85,
+       0x1B80, 0x0BB10C87,
+       0x1B80, 0xE1690C95,
+       0x1B80, 0xE1690C97,
+       0x1B80, 0xE1C90CA5,
+       0x1B80, 0xE1C90CA7,
+       0x1B80, 0x63030CB5,
+       0x1B80, 0x63030CB7,
+       0x1B80, 0xE1690CC5,
+       0x1B80, 0xE1690CC7,
+       0x1B80, 0xE1C90CD5,
+       0x1B80, 0xE1C90CD7,
+       0x1B80, 0xF7040CE5,
+       0x1B80, 0xF7040CE7,
+       0x1B80, 0x630B0CF5,
+       0x1B80, 0x630B0CF7,
+       0x1B80, 0xE1690D05,
+       0x1B80, 0xE1690D07,
+       0x1B80, 0xE1C90D15,
+       0x1B80, 0xE1C90D17,
+       0x1B80, 0x00010D25,
+       0x1B80, 0x00010D27,
+       0x1B80, 0x30F70D35,
+       0x1B80, 0x30F70D37,
+       0x1B80, 0x00230D45,
+       0x1B80, 0x00230D47,
+       0x1B80, 0xE1CE0D55,
+       0x1B80, 0xE1CE0D57,
+       0x1B80, 0x00020D65,
+       0x1B80, 0x00020D67,
+       0x1B80, 0x54E90D75,
+       0x1B80, 0x54E90D77,
+       0x1B80, 0x0BA60D85,
+       0x1B80, 0x0BA60D87,
+       0x1B80, 0x00230D95,
+       0x1B80, 0x00230D97,
+       0x1B80, 0xE1CE0DA5,
+       0x1B80, 0xE1CE0DA7,
+       0x1B80, 0x00020DB5,
+       0x1B80, 0x00020DB7,
+       0x1B80, 0x4D100DC5,
+       0x1B80, 0x4D100DC7,
+       0x1B80, 0x30A80DD5,
+       0x1B80, 0x30A80DD7,
+       0x1B80, 0x30F10DE5,
+       0x1B80, 0x30F10DE7,
+       0x1B80, 0x00220DF5,
+       0x1B80, 0x00220DF7,
+       0x1B80, 0xE1CE0E05,
+       0x1B80, 0xE1CE0E07,
+       0x1B80, 0x00020E15,
+       0x1B80, 0x00020E17,
+       0x1B80, 0x54E80E25,
+       0x1B80, 0x54E80E27,
+       0x1B80, 0x0BA60E35,
+       0x1B80, 0x0BA60E37,
+       0x1B80, 0x00220E45,
+       0x1B80, 0x00220E47,
+       0x1B80, 0xE1CE0E55,
+       0x1B80, 0xE1CE0E57,
+       0x1B80, 0x00020E65,
+       0x1B80, 0x00020E67,
+       0x1B80, 0x4D100E75,
+       0x1B80, 0x4D100E77,
+       0x1B80, 0x30A80E85,
+       0x1B80, 0x30A80E87,
+       0x1B80, 0x5C320E95,
+       0x1B80, 0x5C320E97,
+       0x1B80, 0x54F00EA5,
+       0x1B80, 0x54F00EA7,
+       0x1B80, 0x67F10EB5,
+       0x1B80, 0x67F10EB7,
+       0x1B80, 0xE1950EC5,
+       0x1B80, 0xE1950EC7,
+       0x1B80, 0xE1CE0ED5,
+       0x1B80, 0xE1CE0ED7,
+       0x1B80, 0x67F40EE5,
+       0x1B80, 0x67F40EE7,
+       0x1B80, 0xE1950EF5,
+       0x1B80, 0xE1950EF7,
+       0x1B80, 0xE1CE0F05,
+       0x1B80, 0xE1CE0F07,
+       0x1B80, 0x5C320F15,
+       0x1B80, 0x5C320F17,
+       0x1B80, 0x54F10F25,
+       0x1B80, 0x54F10F27,
+       0x1B80, 0x0BA80F35,
+       0x1B80, 0x0BA80F37,
+       0x1B80, 0x67F80F45,
+       0x1B80, 0x67F80F47,
+       0x1B80, 0xE1950F55,
+       0x1B80, 0xE1950F57,
+       0x1B80, 0xE1CE0F65,
+       0x1B80, 0xE1CE0F67,
+       0x1B80, 0x5C320F75,
+       0x1B80, 0x5C320F77,
+       0x1B80, 0x54F10F85,
+       0x1B80, 0x54F10F87,
+       0x1B80, 0x0BA90F95,
+       0x1B80, 0x0BA90F97,
+       0x1B80, 0x67FC0FA5,
+       0x1B80, 0x67FC0FA7,
+       0x1B80, 0xE1950FB5,
+       0x1B80, 0xE1950FB7,
+       0x1B80, 0xE1CE0FC5,
+       0x1B80, 0xE1CE0FC7,
+       0x1B80, 0x67FF0FD5,
+       0x1B80, 0x67FF0FD7,
+       0x1B80, 0xE1950FE5,
+       0x1B80, 0xE1950FE7,
+       0x1B80, 0xE1CE0FF5,
+       0x1B80, 0xE1CE0FF7,
+       0x1B80, 0x5C321005,
+       0x1B80, 0x5C321007,
+       0x1B80, 0x54F21015,
+       0x1B80, 0x54F21017,
+       0x1B80, 0x67001025,
+       0x1B80, 0x67001027,
+       0x1B80, 0xE1951035,
+       0x1B80, 0xE1951037,
+       0x1B80, 0xE1CE1045,
+       0x1B80, 0xE1CE1047,
+       0x1B80, 0x67031055,
+       0x1B80, 0x67031057,
+       0x1B80, 0xE1951065,
+       0x1B80, 0xE1951067,
+       0x1B80, 0xE1CE1075,
+       0x1B80, 0xE1CE1077,
+       0x1B80, 0xF9CC1085,
+       0x1B80, 0xF9CC1087,
+       0x1B80, 0x67071095,
+       0x1B80, 0x67071097,
+       0x1B80, 0xE19510A5,
+       0x1B80, 0xE19510A7,
+       0x1B80, 0xE1CE10B5,
+       0x1B80, 0xE1CE10B7,
+       0x1B80, 0xFAD310C5,
+       0x1B80, 0xFAD310C7,
+       0x1B80, 0x5C3210D5,
+       0x1B80, 0x5C3210D7,
+       0x1B80, 0x54F310E5,
+       0x1B80, 0x54F310E7,
+       0x1B80, 0x670B10F5,
+       0x1B80, 0x670B10F7,
+       0x1B80, 0xE1951105,
+       0x1B80, 0xE1951107,
+       0x1B80, 0xE1CE1115,
+       0x1B80, 0xE1CE1117,
+       0x1B80, 0x670E1125,
+       0x1B80, 0x670E1127,
+       0x1B80, 0xE1951135,
+       0x1B80, 0xE1951137,
+       0x1B80, 0xE1CE1145,
+       0x1B80, 0xE1CE1147,
+       0x1B80, 0x4D101155,
+       0x1B80, 0x4D101157,
+       0x1B80, 0x30A81165,
+       0x1B80, 0x30A81167,
+       0x1B80, 0x00011175,
+       0x1B80, 0x00011177,
+       0x1B80, 0x6C001185,
+       0x1B80, 0x6C001187,
+       0x1B80, 0x00061195,
+       0x1B80, 0x00061197,
+       0x1B80, 0x530011A5,
+       0x1B80, 0x530011A7,
+       0x1B80, 0x57F711B5,
+       0x1B80, 0x57F711B7,
+       0x1B80, 0x582111C5,
+       0x1B80, 0x582111C7,
+       0x1B80, 0x592E11D5,
+       0x1B80, 0x592E11D7,
+       0x1B80, 0x5A3811E5,
+       0x1B80, 0x5A3811E7,
+       0x1B80, 0x5B4111F5,
+       0x1B80, 0x5B4111F7,
+       0x1B80, 0x00071205,
+       0x1B80, 0x00071207,
+       0x1B80, 0x5C001215,
+       0x1B80, 0x5C001217,
+       0x1B80, 0x4B001225,
+       0x1B80, 0x4B001227,
+       0x1B80, 0x4E8F1235,
+       0x1B80, 0x4E8F1237,
+       0x1B80, 0x4F151245,
+       0x1B80, 0x4F151247,
+       0x1B80, 0x00041255,
+       0x1B80, 0x00041257,
+       0x1B80, 0xE1B31265,
+       0x1B80, 0xE1B31267,
+       0x1B80, 0xAB001275,
+       0x1B80, 0xAB001277,
+       0x1B80, 0x00011285,
+       0x1B80, 0x00011287,
+       0x1B80, 0x6C001295,
+       0x1B80, 0x6C001297,
+       0x1B80, 0x000612A5,
+       0x1B80, 0x000612A7,
+       0x1B80, 0x530012B5,
+       0x1B80, 0x530012B7,
+       0x1B80, 0x57F712C5,
+       0x1B80, 0x57F712C7,
+       0x1B80, 0x582112D5,
+       0x1B80, 0x582112D7,
+       0x1B80, 0x592E12E5,
+       0x1B80, 0x592E12E7,
+       0x1B80, 0x5A3812F5,
+       0x1B80, 0x5A3812F7,
+       0x1B80, 0x5B411305,
+       0x1B80, 0x5B411307,
+       0x1B80, 0x00071315,
+       0x1B80, 0x00071317,
+       0x1B80, 0x5C001325,
+       0x1B80, 0x5C001327,
+       0x1B80, 0x4B401335,
+       0x1B80, 0x4B401337,
+       0x1B80, 0x4E971345,
+       0x1B80, 0x4E971347,
+       0x1B80, 0x4F111355,
+       0x1B80, 0x4F111357,
+       0x1B80, 0x00041365,
+       0x1B80, 0x00041367,
+       0x1B80, 0xE1B31375,
+       0x1B80, 0xE1B31377,
+       0x1B80, 0xAB001385,
+       0x1B80, 0xAB001387,
+       0x1B80, 0x8B001395,
+       0x1B80, 0x8B001397,
+       0x1B80, 0xAB0013A5,
+       0x1B80, 0xAB0013A7,
+       0x1B80, 0x8A1913B5,
+       0x1B80, 0x8A1913B7,
+       0x1B80, 0x301D13C5,
+       0x1B80, 0x301D13C7,
+       0x1B80, 0x000113D5,
+       0x1B80, 0x000113D7,
+       0x1B80, 0x6C0113E5,
+       0x1B80, 0x6C0113E7,
+       0x1B80, 0x000613F5,
+       0x1B80, 0x000613F7,
+       0x1B80, 0x53011405,
+       0x1B80, 0x53011407,
+       0x1B80, 0x57F71415,
+       0x1B80, 0x57F71417,
+       0x1B80, 0x58211425,
+       0x1B80, 0x58211427,
+       0x1B80, 0x592E1435,
+       0x1B80, 0x592E1437,
+       0x1B80, 0x5A381445,
+       0x1B80, 0x5A381447,
+       0x1B80, 0x5B411455,
+       0x1B80, 0x5B411457,
+       0x1B80, 0x00071465,
+       0x1B80, 0x00071467,
+       0x1B80, 0x5C001475,
+       0x1B80, 0x5C001477,
+       0x1B80, 0x4B001485,
+       0x1B80, 0x4B001487,
+       0x1B80, 0x4E871495,
+       0x1B80, 0x4E871497,
+       0x1B80, 0x4F1114A5,
+       0x1B80, 0x4F1114A7,
+       0x1B80, 0x000414B5,
+       0x1B80, 0x000414B7,
+       0x1B80, 0xE1B314C5,
+       0x1B80, 0xE1B314C7,
+       0x1B80, 0xAB0014D5,
+       0x1B80, 0xAB0014D7,
+       0x1B80, 0x000614E5,
+       0x1B80, 0x000614E7,
+       0x1B80, 0x577714F5,
+       0x1B80, 0x577714F7,
+       0x1B80, 0x00071505,
+       0x1B80, 0x00071507,
+       0x1B80, 0x4E861515,
+       0x1B80, 0x4E861517,
+       0x1B80, 0x00041525,
+       0x1B80, 0x00041527,
+       0x1B80, 0x00011535,
+       0x1B80, 0x00011537,
+       0x1B80, 0x00011545,
+       0x1B80, 0x00011547,
+       0x1B80, 0x7B241555,
+       0x1B80, 0x7B241557,
+       0x1B80, 0x7A401565,
+       0x1B80, 0x7A401567,
+       0x1B80, 0x79001575,
+       0x1B80, 0x79001577,
+       0x1B80, 0x55031585,
+       0x1B80, 0x55031587,
+       0x1B80, 0x31611595,
+       0x1B80, 0x31611597,
+       0x1B80, 0x7B1C15A5,
+       0x1B80, 0x7B1C15A7,
+       0x1B80, 0x7A4015B5,
+       0x1B80, 0x7A4015B7,
+       0x1B80, 0x550B15C5,
+       0x1B80, 0x550B15C7,
+       0x1B80, 0x316115D5,
+       0x1B80, 0x316115D7,
+       0x1B80, 0x7B2015E5,
+       0x1B80, 0x7B2015E7,
+       0x1B80, 0x7A0015F5,
+       0x1B80, 0x7A0015F7,
+       0x1B80, 0x55131605,
+       0x1B80, 0x55131607,
+       0x1B80, 0x74011615,
+       0x1B80, 0x74011617,
+       0x1B80, 0x74001625,
+       0x1B80, 0x74001627,
+       0x1B80, 0x8E001635,
+       0x1B80, 0x8E001637,
+       0x1B80, 0x00011645,
+       0x1B80, 0x00011647,
+       0x1B80, 0x57021655,
+       0x1B80, 0x57021657,
+       0x1B80, 0x57001665,
+       0x1B80, 0x57001667,
+       0x1B80, 0x97001675,
+       0x1B80, 0x97001677,
+       0x1B80, 0x00011685,
+       0x1B80, 0x00011687,
+       0x1B80, 0x4F781695,
+       0x1B80, 0x4F781697,
+       0x1B80, 0x538816A5,
+       0x1B80, 0x538816A7,
+       0x1B80, 0xE17516B5,
+       0x1B80, 0xE17516B7,
+       0x1B80, 0x548016C5,
+       0x1B80, 0x548016C7,
+       0x1B80, 0x540016D5,
+       0x1B80, 0x540016D7,
+       0x1B80, 0x548116E5,
+       0x1B80, 0x548116E7,
+       0x1B80, 0x540016F5,
+       0x1B80, 0x540016F7,
+       0x1B80, 0x54821705,
+       0x1B80, 0x54821707,
+       0x1B80, 0x54001715,
+       0x1B80, 0x54001717,
+       0x1B80, 0xE1801725,
+       0x1B80, 0xE1801727,
+       0x1B80, 0xBF1D1735,
+       0x1B80, 0xBF1D1737,
+       0x1B80, 0x301D1745,
+       0x1B80, 0x301D1747,
+       0x1B80, 0xE1551755,
+       0x1B80, 0xE1551757,
+       0x1B80, 0xE15A1765,
+       0x1B80, 0xE15A1767,
+       0x1B80, 0xE15E1775,
+       0x1B80, 0xE15E1777,
+       0x1B80, 0xE1651785,
+       0x1B80, 0xE1651787,
+       0x1B80, 0xE1C51795,
+       0x1B80, 0xE1C51797,
+       0x1B80, 0x551317A5,
+       0x1B80, 0x551317A7,
+       0x1B80, 0xE16117B5,
+       0x1B80, 0xE16117B7,
+       0x1B80, 0x551517C5,
+       0x1B80, 0x551517C7,
+       0x1B80, 0xE16517D5,
+       0x1B80, 0xE16517D7,
+       0x1B80, 0xE1C517E5,
+       0x1B80, 0xE1C517E7,
+       0x1B80, 0x000117F5,
+       0x1B80, 0x000117F7,
+       0x1B80, 0x54BF1805,
+       0x1B80, 0x54BF1807,
+       0x1B80, 0x54C01815,
+       0x1B80, 0x54C01817,
+       0x1B80, 0x54A31825,
+       0x1B80, 0x54A31827,
+       0x1B80, 0x54C11835,
+       0x1B80, 0x54C11837,
+       0x1B80, 0x54A41845,
+       0x1B80, 0x54A41847,
+       0x1B80, 0x4C181855,
+       0x1B80, 0x4C181857,
+       0x1B80, 0xBF071865,
+       0x1B80, 0xBF071867,
+       0x1B80, 0x54C21875,
+       0x1B80, 0x54C21877,
+       0x1B80, 0x54A41885,
+       0x1B80, 0x54A41887,
+       0x1B80, 0xBF041895,
+       0x1B80, 0xBF041897,
+       0x1B80, 0x54C118A5,
+       0x1B80, 0x54C118A7,
+       0x1B80, 0x54A318B5,
+       0x1B80, 0x54A318B7,
+       0x1B80, 0xBF0118C5,
+       0x1B80, 0xBF0118C7,
+       0x1B80, 0xE1D318D5,
+       0x1B80, 0xE1D318D7,
+       0x1B80, 0x54DF18E5,
+       0x1B80, 0x54DF18E7,
+       0x1B80, 0x000118F5,
+       0x1B80, 0x000118F7,
+       0x1B80, 0x54BF1905,
+       0x1B80, 0x54BF1907,
+       0x1B80, 0x54E51915,
+       0x1B80, 0x54E51917,
+       0x1B80, 0x050A1925,
+       0x1B80, 0x050A1927,
+       0x1B80, 0x54DF1935,
+       0x1B80, 0x54DF1937,
+       0x1B80, 0x00011945,
+       0x1B80, 0x00011947,
+       0x1B80, 0x7F201955,
+       0x1B80, 0x7F201957,
+       0x1B80, 0x7E001965,
+       0x1B80, 0x7E001967,
+       0x1B80, 0x7D001975,
+       0x1B80, 0x7D001977,
+       0x1B80, 0x55011985,
+       0x1B80, 0x55011987,
+       0x1B80, 0x5C311995,
+       0x1B80, 0x5C311997,
+       0x1B80, 0xE16119A5,
+       0x1B80, 0xE16119A7,
+       0x1B80, 0xE16519B5,
+       0x1B80, 0xE16519B7,
+       0x1B80, 0x548019C5,
+       0x1B80, 0x548019C7,
+       0x1B80, 0x540019D5,
+       0x1B80, 0x540019D7,
+       0x1B80, 0x548119E5,
+       0x1B80, 0x548119E7,
+       0x1B80, 0x540019F5,
+       0x1B80, 0x540019F7,
+       0x1B80, 0x54821A05,
+       0x1B80, 0x54821A07,
+       0x1B80, 0x54001A15,
+       0x1B80, 0x54001A17,
+       0x1B80, 0xE1801A25,
+       0x1B80, 0xE1801A27,
+       0x1B80, 0xBFED1A35,
+       0x1B80, 0xBFED1A37,
+       0x1B80, 0x301D1A45,
+       0x1B80, 0x301D1A47,
+       0x1B80, 0x00231A55,
+       0x1B80, 0x00231A57,
+       0x1B80, 0x7B201A65,
+       0x1B80, 0x7B201A67,
+       0x1B80, 0x7A001A75,
+       0x1B80, 0x7A001A77,
+       0x1B80, 0x79001A85,
+       0x1B80, 0x79001A87,
+       0x1B80, 0xE1C91A95,
+       0x1B80, 0xE1C91A97,
+       0x1B80, 0x00021AA5,
+       0x1B80, 0x00021AA7,
+       0x1B80, 0x00011AB5,
+       0x1B80, 0x00011AB7,
+       0x1B80, 0x00221AC5,
+       0x1B80, 0x00221AC7,
+       0x1B80, 0x7B201AD5,
+       0x1B80, 0x7B201AD7,
+       0x1B80, 0x7A001AE5,
+       0x1B80, 0x7A001AE7,
+       0x1B80, 0x79001AF5,
+       0x1B80, 0x79001AF7,
+       0x1B80, 0xE1C91B05,
+       0x1B80, 0xE1C91B07,
+       0x1B80, 0x00021B15,
+       0x1B80, 0x00021B17,
+       0x1B80, 0x00011B25,
+       0x1B80, 0x00011B27,
+       0x1B80, 0x74021B35,
+       0x1B80, 0x74021B37,
+       0x1B80, 0x003F1B45,
+       0x1B80, 0x003F1B47,
+       0x1B80, 0x74001B55,
+       0x1B80, 0x74001B57,
+       0x1B80, 0x00021B65,
+       0x1B80, 0x00021B67,
+       0x1B80, 0x00011B75,
+       0x1B80, 0x00011B77,
+       0x1B80, 0x4D041B85,
+       0x1B80, 0x4D041B87,
+       0x1B80, 0x2EF81B95,
+       0x1B80, 0x2EF81B97,
+       0x1B80, 0x00001BA5,
+       0x1B80, 0x00001BA7,
+       0x1B80, 0x23301BB5,
+       0x1B80, 0x23301BB7,
+       0x1B80, 0x00241BC5,
+       0x1B80, 0x00241BC7,
+       0x1B80, 0x23E01BD5,
+       0x1B80, 0x23E01BD7,
+       0x1B80, 0x003F1BE5,
+       0x1B80, 0x003F1BE7,
+       0x1B80, 0x23FC1BF5,
+       0x1B80, 0x23FC1BF7,
+       0x1B80, 0xBFCE1C05,
+       0x1B80, 0xBFCE1C07,
+       0x1B80, 0x2EF01C15,
+       0x1B80, 0x2EF01C17,
+       0x1B80, 0x00001C25,
+       0x1B80, 0x00001C27,
+       0x1B80, 0x4D001C35,
+       0x1B80, 0x4D001C37,
+       0x1B80, 0x00011C45,
+       0x1B80, 0x00011C47,
+       0x1B80, 0x549F1C55,
+       0x1B80, 0x549F1C57,
+       0x1B80, 0x54FF1C65,
+       0x1B80, 0x54FF1C67,
+       0x1B80, 0x54001C75,
+       0x1B80, 0x54001C77,
+       0x1B80, 0x00011C85,
+       0x1B80, 0x00011C87,
+       0x1B80, 0x5C311C95,
+       0x1B80, 0x5C311C97,
+       0x1B80, 0x07141CA5,
+       0x1B80, 0x07141CA7,
+       0x1B80, 0x54001CB5,
+       0x1B80, 0x54001CB7,
+       0x1B80, 0x5C321CC5,
+       0x1B80, 0x5C321CC7,
+       0x1B80, 0x00011CD5,
+       0x1B80, 0x00011CD7,
+       0x1B80, 0x5C321CE5,
+       0x1B80, 0x5C321CE7,
+       0x1B80, 0x07141CF5,
+       0x1B80, 0x07141CF7,
+       0x1B80, 0x54001D05,
+       0x1B80, 0x54001D07,
+       0x1B80, 0x5C311D15,
+       0x1B80, 0x5C311D17,
+       0x1B80, 0x00011D25,
+       0x1B80, 0x00011D27,
+       0x1B80, 0x4C981D35,
+       0x1B80, 0x4C981D37,
+       0x1B80, 0x4C181D45,
+       0x1B80, 0x4C181D47,
+       0x1B80, 0x00011D55,
+       0x1B80, 0x00011D57,
+       0x1B80, 0x5C321D65,
+       0x1B80, 0x5C321D67,
+       0x1B80, 0x62841D75,
+       0x1B80, 0x62841D77,
+       0x1B80, 0x66861D85,
+       0x1B80, 0x66861D87,
+       0x1B80, 0x6C031D95,
+       0x1B80, 0x6C031D97,
+       0x1B80, 0x7B201DA5,
+       0x1B80, 0x7B201DA7,
+       0x1B80, 0x7A001DB5,
+       0x1B80, 0x7A001DB7,
+       0x1B80, 0x79001DC5,
+       0x1B80, 0x79001DC7,
+       0x1B80, 0x7F201DD5,
+       0x1B80, 0x7F201DD7,
+       0x1B80, 0x7E001DE5,
+       0x1B80, 0x7E001DE7,
+       0x1B80, 0x7D001DF5,
+       0x1B80, 0x7D001DF7,
+       0x1B80, 0x09011E05,
+       0x1B80, 0x09011E07,
+       0x1B80, 0x0C011E15,
+       0x1B80, 0x0C011E17,
+       0x1B80, 0x0BA61E25,
+       0x1B80, 0x0BA61E27,
+       0x1B80, 0x00011E35,
+       0x1B80, 0x00011E37,
+       0x1B80, 0x00000006,
+       0x1B80, 0x00000002,
+
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8821c_bb, rtw_phy_cfg_bb);
+
+static const struct rtw_phy_pg_cfg_pair rtw8821c_bb_pg_type0[] = {
+       { 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638, },
+       { 0, 0, 0, 0x00000c24, 0xffffffff, 0x36363636, },
+       { 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234, },
+       { 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363636, },
+       { 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032, },
+       { 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34363636, },
+       { 0, 0, 0, 0x00000c40, 0xffffffff, 0x26283032, },
+       { 0, 0, 0, 0x00000c44, 0xffffffff, 0x22222224, },
+       { 1, 0, 0, 0x00000c24, 0xffffffff, 0x34343434, },
+       { 1, 0, 0, 0x00000c28, 0xffffffff, 0x26283032, },
+       { 1, 0, 0, 0x00000c2c, 0xffffffff, 0x32343434, },
+       { 1, 0, 0, 0x00000c30, 0xffffffff, 0x24262830, },
+       { 1, 0, 0, 0x00000c3c, 0xffffffff, 0x32343434, },
+       { 1, 0, 0, 0x00000c40, 0xffffffff, 0x24262830, },
+       { 1, 0, 0, 0x00000c44, 0xffffffff, 0x20202022, },
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8821c_bb_pg_type0);
+
+static const u32 rtw8821c_rf_a[] = {
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x000, 0x00010000,
+       0x018, 0x00010D24,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x000, 0x00010000,
+       0x018, 0x00010D24,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x000, 0x00010000,
+       0x018, 0x00010D24,
+       0xA0000000,     0x00000000,
+       0x000, 0x00010000,
+       0x018, 0x00010D24,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00080000,
+       0x033, 0x00000002,
+       0x03E, 0x0000003F,
+       0x03F, 0x000C0F4E,
+       0x033, 0x00000001,
+       0x03E, 0x00000034,
+       0x03F, 0x0004080E,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00080000,
+       0x033, 0x00000002,
+       0x03E, 0x0000003F,
+       0x03F, 0x000C0F4E,
+       0x033, 0x00000001,
+       0x03E, 0x00000034,
+       0x03F, 0x0004080E,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00080000,
+       0x033, 0x00000002,
+       0x03E, 0x0000003F,
+       0x03F, 0x000C0F4E,
+       0x033, 0x00000001,
+       0x03E, 0x00000034,
+       0x03F, 0x0004080E,
+       0xA0000000,     0x00000000,
+       0x0EF, 0x00080000,
+       0x033, 0x00000002,
+       0x03E, 0x0000003F,
+       0x03F, 0x000C0F4E,
+       0x033, 0x00000001,
+       0x03E, 0x00000034,
+       0x03F, 0x0004080E,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00002000,
+       0x033, 0x00000000,
+       0x03F, 0x000005DF,
+       0x0EF, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00002000,
+       0x033, 0x00000000,
+       0x03F, 0x000005DF,
+       0x0EF, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00002000,
+       0x033, 0x00000000,
+       0x03F, 0x000005DF,
+       0x0EF, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x0EF, 0x00002000,
+       0x033, 0x00000000,
+       0x03F, 0x000005DF,
+       0x0EF, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00000400,
+       0x033, 0x00000000,
+       0x03F, 0x000005DF,
+       0x0EE, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00000400,
+       0x033, 0x00000000,
+       0x03F, 0x000005DF,
+       0x0EE, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00000400,
+       0x033, 0x00000000,
+       0x03F, 0x000005DF,
+       0x0EE, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x0EE, 0x00000400,
+       0x033, 0x00000000,
+       0x03F, 0x000005DF,
+       0x0EE, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0B0, 0x000FF0F8,
+       0x0B1, 0x0007DBE4,
+       0x0B2, 0x000225D1,
+       0x0B3, 0x000FC760,
+       0x0B4, 0x00099DD0,
+       0x0B5, 0x000400FC,
+       0x0B6, 0x000187F0,
+       0x0B7, 0x00030018,
+       0x0B8, 0x00080800,
+       0x0B9, 0x00000000,
+       0x0BA, 0x00008000,
+       0x0BB, 0x00000004,
+       0x0BC, 0x00040000,
+       0x0BD, 0x00000000,
+       0x0BE, 0x00000000,
+       0x0BF, 0x00000000,
+       0x0C0, 0x00000000,
+       0x0C1, 0x00000000,
+       0x0C2, 0x00000000,
+       0x0C3, 0x00000000,
+       0x0C4, 0x00002402,
+       0x0C5, 0x00000009,
+       0x0C6, 0x00040299,
+       0x0C7, 0x00055555,
+       0x0C8, 0x0000C16C,
+       0x0C9, 0x0001C140,
+       0x0CA, 0x00000000,
+       0x0CB, 0x00000000,
+       0x0CC, 0x00000000,
+       0x0CD, 0x00000000,
+       0x0CE, 0x00090C00,
+       0x0CF, 0x0006D200,
+       0x0DF, 0x00000009,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0B0, 0x000FF0F8,
+       0x0B1, 0x0007DBE4,
+       0x0B2, 0x000225D1,
+       0x0B3, 0x000FC760,
+       0x0B4, 0x00099DD0,
+       0x0B5, 0x000400FC,
+       0x0B6, 0x000187F0,
+       0x0B7, 0x00030018,
+       0x0B8, 0x00080800,
+       0x0B9, 0x00000000,
+       0x0BA, 0x00008000,
+       0x0BB, 0x00000004,
+       0x0BC, 0x00040000,
+       0x0BD, 0x00000000,
+       0x0BE, 0x00000000,
+       0x0BF, 0x00000000,
+       0x0C0, 0x00000000,
+       0x0C1, 0x00000000,
+       0x0C2, 0x00000000,
+       0x0C3, 0x00000000,
+       0x0C4, 0x00002402,
+       0x0C5, 0x00000009,
+       0x0C6, 0x00040299,
+       0x0C7, 0x00055555,
+       0x0C8, 0x0000C16C,
+       0x0C9, 0x0001C140,
+       0x0CA, 0x00000000,
+       0x0CB, 0x00000000,
+       0x0CC, 0x00000000,
+       0x0CD, 0x00000000,
+       0x0CE, 0x00090C00,
+       0x0CF, 0x0006D200,
+       0x0DF, 0x00000009,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0B0, 0x000FF0F8,
+       0x0B1, 0x0007DBE4,
+       0x0B2, 0x000225D1,
+       0x0B3, 0x000FC760,
+       0x0B4, 0x00099DD0,
+       0x0B5, 0x000400FC,
+       0x0B6, 0x000187F0,
+       0x0B7, 0x00030018,
+       0x0B8, 0x00080800,
+       0x0B9, 0x00000000,
+       0x0BA, 0x00008000,
+       0x0BB, 0x00000004,
+       0x0BC, 0x00040000,
+       0x0BD, 0x00000000,
+       0x0BE, 0x00000000,
+       0x0BF, 0x00000000,
+       0x0C0, 0x00000000,
+       0x0C1, 0x00000000,
+       0x0C2, 0x00000000,
+       0x0C3, 0x00000000,
+       0x0C4, 0x00002402,
+       0x0C5, 0x00000009,
+       0x0C6, 0x00040299,
+       0x0C7, 0x00055555,
+       0x0C8, 0x0000C16C,
+       0x0C9, 0x0001C140,
+       0x0CA, 0x00000000,
+       0x0CB, 0x00000000,
+       0x0CC, 0x00000000,
+       0x0CD, 0x00000000,
+       0x0CE, 0x00090C00,
+       0x0CF, 0x0006D200,
+       0x0DF, 0x00000009,
+       0xA0000000,     0x00000000,
+       0x0B0, 0x000FF0F8,
+       0x0B1, 0x0007DBE4,
+       0x0B2, 0x000225D1,
+       0x0B3, 0x000FC760,
+       0x0B4, 0x00099DD0,
+       0x0B5, 0x000400FC,
+       0x0B6, 0x000187F0,
+       0x0B7, 0x00030018,
+       0x0B8, 0x00080800,
+       0x0B9, 0x00000000,
+       0x0BA, 0x00008000,
+       0x0BB, 0x00000004,
+       0x0BC, 0x00040000,
+       0x0BD, 0x00000000,
+       0x0BE, 0x00000000,
+       0x0BF, 0x00000000,
+       0x0C0, 0x00000000,
+       0x0C1, 0x00000000,
+       0x0C2, 0x00000000,
+       0x0C3, 0x00000000,
+       0x0C4, 0x00002402,
+       0x0C5, 0x00000009,
+       0x0C6, 0x00040299,
+       0x0C7, 0x00055555,
+       0x0C8, 0x0000C16C,
+       0x0C9, 0x0001C140,
+       0x0CA, 0x00000000,
+       0x0CB, 0x00000000,
+       0x0CC, 0x00000000,
+       0x0CD, 0x00000000,
+       0x0CE, 0x00090C00,
+       0x0CF, 0x0006D200,
+       0x0DF, 0x00000009,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00010000,
+       0x033, 0x00000058,
+       0x03F, 0x0000001C,
+       0x0EE, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00010000,
+       0x033, 0x00000058,
+       0x03F, 0x0000001C,
+       0x0EE, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00010000,
+       0x033, 0x00000058,
+       0x03F, 0x0000001C,
+       0x0EE, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x0EE, 0x00010000,
+       0x033, 0x00000058,
+       0x03F, 0x0000001C,
+       0x0EE, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x018, 0x00010524,
+       0x081, 0x0000FCC1,
+       0x089, 0x00000004,
+       0x08A, 0x0008A186,
+       0x08B, 0x0006FFFC,
+       0x08C, 0x000312C7,
+       0x08D, 0x00020888,
+       0x08E, 0x00064140,
+       0x08F, 0x000A8010,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x018, 0x00010524,
+       0x081, 0x0000FCC1,
+       0x089, 0x00000004,
+       0x08A, 0x0008A186,
+       0x08B, 0x0006FFFC,
+       0x08C, 0x000312C7,
+       0x08D, 0x00020888,
+       0x08E, 0x00064140,
+       0x08F, 0x000A8010,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x018, 0x00010524,
+       0x081, 0x0000FCC1,
+       0x089, 0x00000004,
+       0x08A, 0x0008A186,
+       0x08B, 0x0007060C,
+       0x08C, 0x000312C7,
+       0x08D, 0x00020888,
+       0x08E, 0x00064140,
+       0x08F, 0x000A8010,
+       0xA0000000,     0x00000000,
+       0x018, 0x00010524,
+       0x081, 0x0000FCC1,
+       0x089, 0x00000004,
+       0x08A, 0x0008A186,
+       0x08B, 0x0007060C,
+       0x08C, 0x000312C7,
+       0x08D, 0x00020888,
+       0x08E, 0x00064140,
+       0x08F, 0x000A8010,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0DD, 0x00000020,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0DD, 0x00000020,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0DD, 0x00000020,
+       0xA0000000,     0x00000000,
+       0x0DD, 0x00000020,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00020000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00020000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00020000,
+       0xA0000000,     0x00000000,
+       0x0EF, 0x00020000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x033, 0x00000007,
+       0x03E, 0x00038000,
+       0x03F, 0x000C3186,
+       0x033, 0x00000006,
+       0x03E, 0x00038080,
+       0x03F, 0x000C3186,
+       0x033, 0x00000005,
+       0x03E, 0x000380C8,
+       0x03F, 0x000C3186,
+       0x033, 0x00000004,
+       0x03E, 0x00038190,
+       0x03F, 0x000C3186,
+       0x033, 0x00000003,
+       0x03E, 0x00038998,
+       0x03F, 0x000C3186,
+       0x033, 0x00000002,
+       0x03E, 0x00039840,
+       0x03F, 0x000C3186,
+       0x033, 0x00000001,
+       0x03E, 0x000398C4,
+       0x03F, 0x000C3186,
+       0x033, 0x00000000,
+       0x03E, 0x00039930,
+       0x03F, 0x000C3186,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x033, 0x00000007,
+       0x03E, 0x00038000,
+       0x03F, 0x000C3186,
+       0x033, 0x00000006,
+       0x03E, 0x00038080,
+       0x03F, 0x000C3186,
+       0x033, 0x00000005,
+       0x03E, 0x000380C8,
+       0x03F, 0x000C3186,
+       0x033, 0x00000004,
+       0x03E, 0x00038190,
+       0x03F, 0x000C3186,
+       0x033, 0x00000003,
+       0x03E, 0x00038998,
+       0x03F, 0x000C3186,
+       0x033, 0x00000002,
+       0x03E, 0x00039840,
+       0x03F, 0x000C3186,
+       0x033, 0x00000001,
+       0x03E, 0x000398C4,
+       0x03F, 0x000C3186,
+       0x033, 0x00000000,
+       0x03E, 0x00039930,
+       0x03F, 0x000C3186,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x033, 0x00000007,
+       0x03E, 0x00038000,
+       0x03F, 0x000C3186,
+       0x033, 0x00000006,
+       0x03E, 0x00038080,
+       0x03F, 0x000C3186,
+       0x033, 0x00000005,
+       0x03E, 0x000380C8,
+       0x03F, 0x000C3186,
+       0x033, 0x00000004,
+       0x03E, 0x00038190,
+       0x03F, 0x000C3186,
+       0x033, 0x00000003,
+       0x03E, 0x00038998,
+       0x03F, 0x000C3186,
+       0x033, 0x00000002,
+       0x03E, 0x00039840,
+       0x03F, 0x000C3186,
+       0x033, 0x00000001,
+       0x03E, 0x000398C4,
+       0x03F, 0x000C3186,
+       0x033, 0x00000000,
+       0x03E, 0x00039930,
+       0x03F, 0x000C3186,
+       0xA0000000,     0x00000000,
+       0x033, 0x00000007,
+       0x03E, 0x00038000,
+       0x03F, 0x000C3186,
+       0x033, 0x00000006,
+       0x03E, 0x00038080,
+       0x03F, 0x000C3186,
+       0x033, 0x00000005,
+       0x03E, 0x000380C8,
+       0x03F, 0x000C3186,
+       0x033, 0x00000004,
+       0x03E, 0x00038190,
+       0x03F, 0x000C3186,
+       0x033, 0x00000003,
+       0x03E, 0x00038998,
+       0x03F, 0x000C3186,
+       0x033, 0x00000002,
+       0x03E, 0x00039840,
+       0x03F, 0x000C3186,
+       0x033, 0x00000001,
+       0x03E, 0x000398C4,
+       0x03F, 0x000C3186,
+       0x033, 0x00000000,
+       0x03E, 0x00039930,
+       0x03F, 0x000C3186,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x033, 0x0000000F,
+       0x03E, 0x00038000,
+       0x03F, 0x000C3186,
+       0x033, 0x0000000E,
+       0x03E, 0x00038080,
+       0x03F, 0x000C3186,
+       0x033, 0x0000000D,
+       0x03E, 0x000380C8,
+       0x03F, 0x000C3186,
+       0x033, 0x0000000C,
+       0x03E, 0x00038190,
+       0x03F, 0x000C3186,
+       0x033, 0x0000000B,
+       0x03E, 0x00038998,
+       0x03F, 0x000C3186,
+       0x033, 0x0000000A,
+       0x03E, 0x00039840,
+       0x03F, 0x000C3186,
+       0x033, 0x00000009,
+       0x03E, 0x000398C4,
+       0x03F, 0x000C3186,
+       0x033, 0x00000008,
+       0x03E, 0x00039930,
+       0x03F, 0x000C3186,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x033, 0x0000000F,
+       0x03E, 0x00038000,
+       0x03F, 0x000C3186,
+       0x033, 0x0000000E,
+       0x03E, 0x00038080,
+       0x03F, 0x000C3186,
+       0x033, 0x0000000D,
+       0x03E, 0x000380C8,
+       0x03F, 0x000C3186,
+       0x033, 0x0000000C,
+       0x03E, 0x00038190,
+       0x03F, 0x000C3186,
+       0x033, 0x0000000B,
+       0x03E, 0x00038998,
+       0x03F, 0x000C3186,
+       0x033, 0x0000000A,
+       0x03E, 0x00039840,
+       0x03F, 0x000C3186,
+       0x033, 0x00000009,
+       0x03E, 0x000398C4,
+       0x03F, 0x000C3186,
+       0x033, 0x00000008,
+       0x03E, 0x00039930,
+       0x03F, 0x000C3186,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x033, 0x0000000F,
+       0x03E, 0x00038000,
+       0x03F, 0x000C3186,
+       0x033, 0x0000000E,
+       0x03E, 0x00038080,
+       0x03F, 0x000C3186,
+       0x033, 0x0000000D,
+       0x03E, 0x000380C8,
+       0x03F, 0x000C3186,
+       0x033, 0x0000000C,
+       0x03E, 0x00038190,
+       0x03F, 0x000C3186,
+       0x033, 0x0000000B,
+       0x03E, 0x00038998,
+       0x03F, 0x000C3186,
+       0x033, 0x0000000A,
+       0x03E, 0x00039840,
+       0x03F, 0x000C3186,
+       0x033, 0x00000009,
+       0x03E, 0x000398C4,
+       0x03F, 0x000C3186,
+       0x033, 0x00000008,
+       0x03E, 0x00039930,
+       0x03F, 0x000C3186,
+       0xA0000000,     0x00000000,
+       0x033, 0x0000000F,
+       0x03E, 0x00038000,
+       0x03F, 0x000C3186,
+       0x033, 0x0000000E,
+       0x03E, 0x00038080,
+       0x03F, 0x000C3186,
+       0x033, 0x0000000D,
+       0x03E, 0x000380C8,
+       0x03F, 0x000C3186,
+       0x033, 0x0000000C,
+       0x03E, 0x00038190,
+       0x03F, 0x000C3186,
+       0x033, 0x0000000B,
+       0x03E, 0x00038998,
+       0x03F, 0x000C3186,
+       0x033, 0x0000000A,
+       0x03E, 0x00039840,
+       0x03F, 0x000C3186,
+       0x033, 0x00000009,
+       0x03E, 0x000398C4,
+       0x03F, 0x000C3186,
+       0x033, 0x00000008,
+       0x03E, 0x00039930,
+       0x03F, 0x000C3186,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x033, 0x00000017,
+       0x03E, 0x00038000,
+       0x03F, 0x000C3186,
+       0x033, 0x00000016,
+       0x03E, 0x00038080,
+       0x03F, 0x000C3186,
+       0x033, 0x00000015,
+       0x03E, 0x000380C8,
+       0x03F, 0x000C3186,
+       0x033, 0x00000014,
+       0x03E, 0x00038190,
+       0x03F, 0x000C3186,
+       0x033, 0x00000013,
+       0x03E, 0x00038998,
+       0x03F, 0x000C3186,
+       0x033, 0x00000012,
+       0x03E, 0x00039840,
+       0x03F, 0x000C3186,
+       0x033, 0x00000011,
+       0x03E, 0x000398C4,
+       0x03F, 0x000C3186,
+       0x033, 0x00000010,
+       0x03E, 0x00039930,
+       0x03F, 0x000C3186,
+       0x0EF, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x033, 0x00000017,
+       0x03E, 0x00038000,
+       0x03F, 0x000C3186,
+       0x033, 0x00000016,
+       0x03E, 0x00038080,
+       0x03F, 0x000C3186,
+       0x033, 0x00000015,
+       0x03E, 0x000380C8,
+       0x03F, 0x000C3186,
+       0x033, 0x00000014,
+       0x03E, 0x00038190,
+       0x03F, 0x000C3186,
+       0x033, 0x00000013,
+       0x03E, 0x00038998,
+       0x03F, 0x000C3186,
+       0x033, 0x00000012,
+       0x03E, 0x00039840,
+       0x03F, 0x000C3186,
+       0x033, 0x00000011,
+       0x03E, 0x000398C4,
+       0x03F, 0x000C3186,
+       0x033, 0x00000010,
+       0x03E, 0x00039930,
+       0x03F, 0x000C3186,
+       0x0EF, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x033, 0x00000017,
+       0x03E, 0x00038000,
+       0x03F, 0x000C3186,
+       0x033, 0x00000016,
+       0x03E, 0x00038080,
+       0x03F, 0x000C3186,
+       0x033, 0x00000015,
+       0x03E, 0x000380C8,
+       0x03F, 0x000C3186,
+       0x033, 0x00000014,
+       0x03E, 0x00038190,
+       0x03F, 0x000C3186,
+       0x033, 0x00000013,
+       0x03E, 0x00038998,
+       0x03F, 0x000C3186,
+       0x033, 0x00000012,
+       0x03E, 0x00039840,
+       0x03F, 0x000C3186,
+       0x033, 0x00000011,
+       0x03E, 0x000398C4,
+       0x03F, 0x000C3186,
+       0x033, 0x00000010,
+       0x03E, 0x00039930,
+       0x03F, 0x000C3186,
+       0x0EF, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x033, 0x00000017,
+       0x03E, 0x00038000,
+       0x03F, 0x000C3186,
+       0x033, 0x00000016,
+       0x03E, 0x00038080,
+       0x03F, 0x000C3186,
+       0x033, 0x00000015,
+       0x03E, 0x000380C8,
+       0x03F, 0x000C3186,
+       0x033, 0x00000014,
+       0x03E, 0x00038190,
+       0x03F, 0x000C3186,
+       0x033, 0x00000013,
+       0x03E, 0x00038998,
+       0x03F, 0x000C3186,
+       0x033, 0x00000012,
+       0x03E, 0x00039840,
+       0x03F, 0x000C3186,
+       0x033, 0x00000011,
+       0x03E, 0x000398C4,
+       0x03F, 0x000C3186,
+       0x033, 0x00000010,
+       0x03E, 0x00039930,
+       0x03F, 0x000C3186,
+       0x0EF, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00004000,
+       0x033, 0x00000000,
+       0x03F, 0x0000000F,
+       0x033, 0x00000001,
+       0x03F, 0x0000000A,
+       0x033, 0x00000002,
+       0x03F, 0x00000005,
+       0x0EF, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00004000,
+       0x033, 0x00000000,
+       0x03F, 0x0000000F,
+       0x033, 0x00000001,
+       0x03F, 0x0000000A,
+       0x033, 0x00000002,
+       0x03F, 0x00000005,
+       0x0EF, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00004000,
+       0x033, 0x00000000,
+       0x03F, 0x0000000F,
+       0x033, 0x00000001,
+       0x03F, 0x0000000A,
+       0x033, 0x00000002,
+       0x03F, 0x00000005,
+       0x0EF, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x0EF, 0x00004000,
+       0x033, 0x00000000,
+       0x03F, 0x0000000F,
+       0x033, 0x00000001,
+       0x03F, 0x0000000A,
+       0x033, 0x00000002,
+       0x03F, 0x00000005,
+       0x0EF, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x018, 0x00000401,
+       0x084, 0x00001209,
+       0x086, 0x000001A0,
+       0x087, 0x000E8180,
+       0x088, 0x00006020,
+       0x0DF, 0x00008009,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x018, 0x00000401,
+       0x084, 0x00001209,
+       0x086, 0x000001A0,
+       0x087, 0x000E8180,
+       0x088, 0x00006020,
+       0x0DF, 0x00008009,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x018, 0x00000401,
+       0x084, 0x00001209,
+       0x086, 0x000001A0,
+       0x087, 0x000E8180,
+       0x088, 0x00006020,
+       0x0DF, 0x00008009,
+       0xA0000000,     0x00000000,
+       0x018, 0x00000401,
+       0x084, 0x00001209,
+       0x086, 0x000001A0,
+       0x087, 0x000E8180,
+       0x088, 0x00006020,
+       0x0DF, 0x00008009,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00008000,
+       0x033, 0x0000000F,
+       0x03F, 0x0000003C,
+       0x033, 0x0000000E,
+       0x03F, 0x00000038,
+       0x033, 0x0000000D,
+       0x03F, 0x00000030,
+       0x033, 0x0000000C,
+       0x03F, 0x00000028,
+       0x033, 0x0000000B,
+       0x03F, 0x00000020,
+       0x033, 0x0000000A,
+       0x03F, 0x00000018,
+       0x033, 0x00000009,
+       0x03F, 0x00000010,
+       0x033, 0x00000008,
+       0x03F, 0x00000008,
+       0x033, 0x00000007,
+       0x03F, 0x0000003C,
+       0x033, 0x00000006,
+       0x03F, 0x00000038,
+       0x033, 0x00000005,
+       0x03F, 0x00000030,
+       0x033, 0x00000004,
+       0x03F, 0x00000028,
+       0x033, 0x00000003,
+       0x03F, 0x00000020,
+       0x033, 0x00000002,
+       0x03F, 0x00000018,
+       0x033, 0x00000001,
+       0x03F, 0x00000010,
+       0x033, 0x00000000,
+       0x03F, 0x00000008,
+       0x0EF, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00008000,
+       0x033, 0x0000000F,
+       0x03F, 0x0000003C,
+       0x033, 0x0000000E,
+       0x03F, 0x00000038,
+       0x033, 0x0000000D,
+       0x03F, 0x00000030,
+       0x033, 0x0000000C,
+       0x03F, 0x00000028,
+       0x033, 0x0000000B,
+       0x03F, 0x00000020,
+       0x033, 0x0000000A,
+       0x03F, 0x00000018,
+       0x033, 0x00000009,
+       0x03F, 0x00000010,
+       0x033, 0x00000008,
+       0x03F, 0x00000008,
+       0x033, 0x00000007,
+       0x03F, 0x0000003C,
+       0x033, 0x00000006,
+       0x03F, 0x00000038,
+       0x033, 0x00000005,
+       0x03F, 0x00000030,
+       0x033, 0x00000004,
+       0x03F, 0x00000028,
+       0x033, 0x00000003,
+       0x03F, 0x00000020,
+       0x033, 0x00000002,
+       0x03F, 0x00000018,
+       0x033, 0x00000001,
+       0x03F, 0x00000010,
+       0x033, 0x00000000,
+       0x03F, 0x00000008,
+       0x0EF, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00008000,
+       0x033, 0x0000000F,
+       0x03F, 0x0000003C,
+       0x033, 0x0000000E,
+       0x03F, 0x00000038,
+       0x033, 0x0000000D,
+       0x03F, 0x00000030,
+       0x033, 0x0000000C,
+       0x03F, 0x00000028,
+       0x033, 0x0000000B,
+       0x03F, 0x00000020,
+       0x033, 0x0000000A,
+       0x03F, 0x00000018,
+       0x033, 0x00000009,
+       0x03F, 0x00000010,
+       0x033, 0x00000008,
+       0x03F, 0x00000008,
+       0x033, 0x00000007,
+       0x03F, 0x0000003C,
+       0x033, 0x00000006,
+       0x03F, 0x00000038,
+       0x033, 0x00000005,
+       0x03F, 0x00000030,
+       0x033, 0x00000004,
+       0x03F, 0x00000028,
+       0x033, 0x00000003,
+       0x03F, 0x00000020,
+       0x033, 0x00000002,
+       0x03F, 0x00000018,
+       0x033, 0x00000001,
+       0x03F, 0x00000010,
+       0x033, 0x00000000,
+       0x03F, 0x00000008,
+       0x0EF, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x0EF, 0x00008000,
+       0x033, 0x0000000F,
+       0x03F, 0x0000003C,
+       0x033, 0x0000000E,
+       0x03F, 0x00000038,
+       0x033, 0x0000000D,
+       0x03F, 0x00000030,
+       0x033, 0x0000000C,
+       0x03F, 0x00000028,
+       0x033, 0x0000000B,
+       0x03F, 0x00000020,
+       0x033, 0x0000000A,
+       0x03F, 0x00000018,
+       0x033, 0x00000009,
+       0x03F, 0x00000010,
+       0x033, 0x00000008,
+       0x03F, 0x00000008,
+       0x033, 0x00000007,
+       0x03F, 0x0000003C,
+       0x033, 0x00000006,
+       0x03F, 0x00000038,
+       0x033, 0x00000005,
+       0x03F, 0x00000030,
+       0x033, 0x00000004,
+       0x03F, 0x00000028,
+       0x033, 0x00000003,
+       0x03F, 0x00000020,
+       0x033, 0x00000002,
+       0x03F, 0x00000018,
+       0x033, 0x00000001,
+       0x03F, 0x00000010,
+       0x033, 0x00000000,
+       0x03F, 0x00000008,
+       0x0EF, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00000002,
+       0x033, 0x0000001E,
+       0x03F, 0x00000000,
+       0x033, 0x0000001C,
+       0x03F, 0x00000000,
+       0x033, 0x0000000E,
+       0x03F, 0x00000000,
+       0x033, 0x0000000C,
+       0x03F, 0x00000000,
+       0x033, 0x0000000A,
+       0x03F, 0x00000002,
+       0x033, 0x00000008,
+       0x03F, 0x00000000,
+       0x033, 0x00000036,
+       0x03F, 0x00000000,
+       0x033, 0x00000037,
+       0x03F, 0x00000000,
+       0x033, 0x00000034,
+       0x03F, 0x00000000,
+       0x033, 0x00000026,
+       0x03F, 0x00000006,
+       0x033, 0x00000027,
+       0x03F, 0x00000006,
+       0x033, 0x00000024,
+       0x03F, 0x00000006,
+       0x033, 0x00000022,
+       0x03F, 0x00000006,
+       0x033, 0x00000020,
+       0x03F, 0x00000006,
+       0x033, 0x00000006,
+       0x03F, 0x00000000,
+       0x033, 0x00000007,
+       0x03F, 0x00000006,
+       0x033, 0x00000004,
+       0x03F, 0x00000006,
+       0x033, 0x00000002,
+       0x03F, 0x00000006,
+       0x033, 0x00000000,
+       0x03F, 0x00000006,
+       0x0EE, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00000002,
+       0x033, 0x0000001E,
+       0x03F, 0x00000000,
+       0x033, 0x0000001C,
+       0x03F, 0x00000000,
+       0x033, 0x0000000E,
+       0x03F, 0x00000000,
+       0x033, 0x0000000C,
+       0x03F, 0x00000000,
+       0x033, 0x0000000A,
+       0x03F, 0x00000002,
+       0x033, 0x00000008,
+       0x03F, 0x00000000,
+       0x033, 0x00000036,
+       0x03F, 0x00000000,
+       0x033, 0x00000037,
+       0x03F, 0x00000000,
+       0x033, 0x00000034,
+       0x03F, 0x00000000,
+       0x033, 0x00000026,
+       0x03F, 0x00000006,
+       0x033, 0x00000027,
+       0x03F, 0x00000006,
+       0x033, 0x00000024,
+       0x03F, 0x00000006,
+       0x033, 0x00000022,
+       0x03F, 0x00000006,
+       0x033, 0x00000020,
+       0x03F, 0x00000006,
+       0x033, 0x00000006,
+       0x03F, 0x00000000,
+       0x033, 0x00000007,
+       0x03F, 0x00000006,
+       0x033, 0x00000004,
+       0x03F, 0x00000006,
+       0x033, 0x00000002,
+       0x03F, 0x00000006,
+       0x033, 0x00000000,
+       0x03F, 0x00000006,
+       0x0EE, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00000002,
+       0x033, 0x0000001E,
+       0x03F, 0x00000000,
+       0x033, 0x0000001C,
+       0x03F, 0x00000000,
+       0x033, 0x0000000E,
+       0x03F, 0x00000000,
+       0x033, 0x0000000C,
+       0x03F, 0x00000000,
+       0x033, 0x0000000A,
+       0x03F, 0x00000002,
+       0x033, 0x00000008,
+       0x03F, 0x00000000,
+       0x033, 0x00000036,
+       0x03F, 0x00000000,
+       0x033, 0x00000037,
+       0x03F, 0x00000000,
+       0x033, 0x00000034,
+       0x03F, 0x00000000,
+       0x033, 0x00000026,
+       0x03F, 0x00000006,
+       0x033, 0x00000027,
+       0x03F, 0x00000006,
+       0x033, 0x00000024,
+       0x03F, 0x00000006,
+       0x033, 0x00000022,
+       0x03F, 0x00000006,
+       0x033, 0x00000020,
+       0x03F, 0x00000006,
+       0x033, 0x00000006,
+       0x03F, 0x00000000,
+       0x033, 0x00000007,
+       0x03F, 0x00000006,
+       0x033, 0x00000004,
+       0x03F, 0x00000006,
+       0x033, 0x00000002,
+       0x03F, 0x00000006,
+       0x033, 0x00000000,
+       0x03F, 0x00000006,
+       0x0EE, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x0EE, 0x00000002,
+       0x033, 0x0000001E,
+       0x03F, 0x00000000,
+       0x033, 0x0000001C,
+       0x03F, 0x00000000,
+       0x033, 0x0000000E,
+       0x03F, 0x00000000,
+       0x033, 0x0000000C,
+       0x03F, 0x00000000,
+       0x033, 0x0000000A,
+       0x03F, 0x00000002,
+       0x033, 0x00000008,
+       0x03F, 0x00000000,
+       0x033, 0x00000036,
+       0x03F, 0x00000000,
+       0x033, 0x00000037,
+       0x03F, 0x00000000,
+       0x033, 0x00000034,
+       0x03F, 0x00000000,
+       0x033, 0x00000026,
+       0x03F, 0x00000006,
+       0x033, 0x00000027,
+       0x03F, 0x00000006,
+       0x033, 0x00000024,
+       0x03F, 0x00000006,
+       0x033, 0x00000022,
+       0x03F, 0x00000006,
+       0x033, 0x00000020,
+       0x03F, 0x00000006,
+       0x033, 0x00000006,
+       0x03F, 0x00000000,
+       0x033, 0x00000007,
+       0x03F, 0x00000006,
+       0x033, 0x00000004,
+       0x03F, 0x00000006,
+       0x033, 0x00000002,
+       0x03F, 0x00000006,
+       0x033, 0x00000000,
+       0x03F, 0x00000006,
+       0x0EE, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0A0, 0x000F0005,
+       0x0A1, 0x0006C000,
+       0x0A2, 0x0000161B,
+       0x0A3, 0x000B9CBD,
+       0x0AF, 0x00070000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0A0, 0x000F0005,
+       0x0A1, 0x0006C000,
+       0x0A2, 0x0000161B,
+       0x0A3, 0x000B9CBD,
+       0x0AF, 0x00070000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0A0, 0x000F0005,
+       0x0A1, 0x0006C000,
+       0x0A2, 0x0000161B,
+       0x0A3, 0x000B9CBD,
+       0x0AF, 0x00070000,
+       0xA0000000,     0x00000000,
+       0x0A0, 0x000F0005,
+       0x0A1, 0x0006C000,
+       0x0A2, 0x0000161B,
+       0x0A3, 0x000B9CBD,
+       0x0AF, 0x00070000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0DE, 0x00000200,
+       0x0EE, 0x00000100,
+       0x033, 0x00000007,
+       0x03F, 0x00000043,
+       0x033, 0x00000006,
+       0x03F, 0x0000007A,
+       0x033, 0x00000005,
+       0x03F, 0x00000041,
+       0x033, 0x00000004,
+       0x03F, 0x00000079,
+       0x033, 0x00000003,
+       0x03F, 0x00000043,
+       0x033, 0x00000002,
+       0x03F, 0x0000007A,
+       0x033, 0x00000001,
+       0x03F, 0x00000041,
+       0x033, 0x00000000,
+       0x03F, 0x00000079,
+       0x0EE, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0DE, 0x00000200,
+       0x0EE, 0x00000100,
+       0x033, 0x00000007,
+       0x03F, 0x00000043,
+       0x033, 0x00000006,
+       0x03F, 0x0000007A,
+       0x033, 0x00000005,
+       0x03F, 0x00000041,
+       0x033, 0x00000004,
+       0x03F, 0x00000079,
+       0x033, 0x00000003,
+       0x03F, 0x00000043,
+       0x033, 0x00000002,
+       0x03F, 0x0000007A,
+       0x033, 0x00000001,
+       0x03F, 0x00000041,
+       0x033, 0x00000000,
+       0x03F, 0x00000079,
+       0x0EE, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0DE, 0x00000200,
+       0x0EE, 0x00000100,
+       0x033, 0x00000007,
+       0x03F, 0x00000043,
+       0x033, 0x00000006,
+       0x03F, 0x0000007A,
+       0x033, 0x00000005,
+       0x03F, 0x00000041,
+       0x033, 0x00000004,
+       0x03F, 0x00000079,
+       0x033, 0x00000003,
+       0x03F, 0x00000043,
+       0x033, 0x00000002,
+       0x03F, 0x0000007A,
+       0x033, 0x00000001,
+       0x03F, 0x00000041,
+       0x033, 0x00000000,
+       0x03F, 0x00000079,
+       0x0EE, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x0DE, 0x00000200,
+       0x0EE, 0x00000100,
+       0x033, 0x00000007,
+       0x03F, 0x00000043,
+       0x033, 0x00000006,
+       0x03F, 0x0000007A,
+       0x033, 0x00000005,
+       0x03F, 0x00000041,
+       0x033, 0x00000004,
+       0x03F, 0x00000079,
+       0x033, 0x00000003,
+       0x03F, 0x00000043,
+       0x033, 0x00000002,
+       0x03F, 0x0000007A,
+       0x033, 0x00000001,
+       0x03F, 0x00000041,
+       0x033, 0x00000000,
+       0x03F, 0x00000079,
+       0x0EE, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0B8, 0x00080A00,
+       0x0B0, 0x000FF0FA,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0B8, 0x00080A00,
+       0x0B0, 0x000FF0FA,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0B8, 0x00080A00,
+       0x0B0, 0x000FF0FA,
+       0xA0000000,     0x00000000,
+       0x0B8, 0x00080A00,
+       0x0B0, 0x000FF0FA,
+       0xB0000000,     0x00000000,
+       0xFFE, 0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0CA, 0x00080000,
+       0x0C9, 0x0001C141,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0CA, 0x00080000,
+       0x0C9, 0x0001C141,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0CA, 0x00080000,
+       0x0C9, 0x0001C141,
+       0xA0000000,     0x00000000,
+       0x0CA, 0x00080000,
+       0x0C9, 0x0001C141,
+       0xB0000000,     0x00000000,
+       0xFFE, 0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0B0, 0x000FF0F8,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0B0, 0x000FF0F8,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0B0, 0x000FF0F8,
+       0xA0000000,     0x00000000,
+       0x0B0, 0x000FF0F8,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x018, 0x00018D24,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x018, 0x00018D24,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x018, 0x00018D24,
+       0xA0000000,     0x00000000,
+       0x018, 0x00018D24,
+       0xB0000000,     0x00000000,
+       0xFFE, 0x00000000,
+       0xFFE, 0x00000000,
+       0xFFE, 0x00000000,
+       0xFFE, 0x00000000,
+       0xFFE, 0x00000000,
+       0xFFE, 0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x018, 0x00010D24,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x018, 0x00010D24,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x018, 0x00010D24,
+       0xA0000000,     0x00000000,
+       0x018, 0x00010D24,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x01B, 0x00003A40,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x01B, 0x00003A40,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x01B, 0x00003A40,
+       0xA0000000,     0x00000000,
+       0x01B, 0x00003A40,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x061, 0x0004D3A3,
+       0x062, 0x0000D303,
+       0x063, 0x00000002,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x061, 0x0004D3A3,
+       0x062, 0x0000D303,
+       0x063, 0x00000002,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x061, 0x0004D3A1,
+       0x062, 0x0000D3A3,
+       0x063, 0x00000002,
+       0xA0000000,     0x00000000,
+       0x061, 0x0004D3A1,
+       0x062, 0x0000D3A3,
+       0x063, 0x00000002,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00000200,
+       0x030, 0x00000000,
+       0x03F, 0x00033303,
+       0x030, 0x00001000,
+       0x03F, 0x00033303,
+       0x030, 0x00002000,
+       0x03F, 0x00033303,
+       0x030, 0x00003000,
+       0x03F, 0x00033303,
+       0x030, 0x00004000,
+       0x03F, 0x00033303,
+       0x030, 0x00005000,
+       0x03F, 0x00033303,
+       0x030, 0x00006000,
+       0x03F, 0x00033303,
+       0x030, 0x00007000,
+       0x03F, 0x00033303,
+       0x030, 0x00008000,
+       0x03F, 0x00033303,
+       0x030, 0x00009000,
+       0x03F, 0x00033303,
+       0x030, 0x0000A000,
+       0x03F, 0x00033303,
+       0x030, 0x0000B000,
+       0x03F, 0x00033303,
+       0x0EF, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00000200,
+       0x030, 0x00000000,
+       0x03F, 0x000333A3,
+       0x030, 0x00001000,
+       0x03F, 0x000333A3,
+       0x030, 0x00002000,
+       0x03F, 0x000333A3,
+       0x030, 0x00003000,
+       0x03F, 0x000333A3,
+       0x030, 0x00004000,
+       0x03F, 0x000313A3,
+       0x030, 0x00005000,
+       0x03F, 0x000313A3,
+       0x030, 0x00006000,
+       0x03F, 0x000313A3,
+       0x030, 0x00007000,
+       0x03F, 0x000313A3,
+       0x030, 0x00008000,
+       0x03F, 0x000333A3,
+       0x030, 0x00009000,
+       0x03F, 0x000333A3,
+       0x030, 0x0000A000,
+       0x03F, 0x000333A3,
+       0x030, 0x0000B000,
+       0x03F, 0x000333A3,
+       0x0EF, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00000200,
+       0x030, 0x00000000,
+       0x03F, 0x000335A3,
+       0x030, 0x00001000,
+       0x03F, 0x000335A3,
+       0x030, 0x00002000,
+       0x03F, 0x000335A3,
+       0x030, 0x00003000,
+       0x03F, 0x000335A3,
+       0x030, 0x00004000,
+       0x03F, 0x000335A3,
+       0x030, 0x00005000,
+       0x03F, 0x000335A3,
+       0x030, 0x00006000,
+       0x03F, 0x000335A3,
+       0x030, 0x00007000,
+       0x03F, 0x000335A3,
+       0x030, 0x00008000,
+       0x03F, 0x000335A3,
+       0x030, 0x00009000,
+       0x03F, 0x000335A3,
+       0x030, 0x0000A000,
+       0x03F, 0x000335A3,
+       0x030, 0x0000B000,
+       0x03F, 0x000335A3,
+       0x0EF, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x0EF, 0x00000200,
+       0x030, 0x00000000,
+       0x03F, 0x000335A3,
+       0x030, 0x00001000,
+       0x03F, 0x000335A3,
+       0x030, 0x00002000,
+       0x03F, 0x000335A3,
+       0x030, 0x00003000,
+       0x03F, 0x000335A3,
+       0x030, 0x00004000,
+       0x03F, 0x000335A3,
+       0x030, 0x00005000,
+       0x03F, 0x000335A3,
+       0x030, 0x00006000,
+       0x03F, 0x000335A3,
+       0x030, 0x00007000,
+       0x03F, 0x000335A3,
+       0x030, 0x00008000,
+       0x03F, 0x000335A3,
+       0x030, 0x00009000,
+       0x03F, 0x000335A3,
+       0x030, 0x0000A000,
+       0x03F, 0x000335A3,
+       0x030, 0x0000B000,
+       0x03F, 0x000335A3,
+       0x0EF, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00000080,
+       0x033, 0x00000000,
+       0x03F, 0x00033303,
+       0x033, 0x00000001,
+       0x03F, 0x00033303,
+       0x033, 0x00000002,
+       0x03F, 0x00033303,
+       0x033, 0x00000003,
+       0x03F, 0x00033303,
+       0x033, 0x00000004,
+       0x03F, 0x00033303,
+       0x033, 0x00000005,
+       0x03F, 0x00033303,
+       0x033, 0x00000006,
+       0x03F, 0x00033303,
+       0x033, 0x00000007,
+       0x03F, 0x00033303,
+       0x033, 0x00000008,
+       0x03F, 0x00033303,
+       0x033, 0x00000009,
+       0x03F, 0x00033303,
+       0x033, 0x0000000A,
+       0x03F, 0x00033303,
+       0x033, 0x0000000B,
+       0x03F, 0x00033303,
+       0x033, 0x0000000C,
+       0x03F, 0x00033303,
+       0x033, 0x0000000D,
+       0x03F, 0x00033303,
+       0x033, 0x0000000E,
+       0x03F, 0x00033303,
+       0x033, 0x0000000F,
+       0x03F, 0x00033303,
+       0x033, 0x00000010,
+       0x03F, 0x00033303,
+       0x033, 0x00000011,
+       0x03F, 0x00033303,
+       0x033, 0x00000012,
+       0x03F, 0x00033303,
+       0x0EF, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00000080,
+       0x033, 0x00000000,
+       0x03F, 0x000333A3,
+       0x033, 0x00000001,
+       0x03F, 0x000333A3,
+       0x033, 0x00000002,
+       0x03F, 0x000333A3,
+       0x033, 0x00000003,
+       0x03F, 0x000333A3,
+       0x033, 0x00000004,
+       0x03F, 0x000333A3,
+       0x033, 0x00000005,
+       0x03F, 0x000333A3,
+       0x033, 0x00000006,
+       0x03F, 0x000333A3,
+       0x033, 0x00000007,
+       0x03F, 0x000333A3,
+       0x033, 0x00000008,
+       0x03F, 0x000313A3,
+       0x033, 0x00000009,
+       0x03F, 0x000313A3,
+       0x033, 0x0000000A,
+       0x03F, 0x000313A3,
+       0x033, 0x0000000B,
+       0x03F, 0x000313A3,
+       0x033, 0x0000000C,
+       0x03F, 0x000313A3,
+       0x033, 0x0000000D,
+       0x03F, 0x000333A3,
+       0x033, 0x0000000E,
+       0x03F, 0x000333A3,
+       0x033, 0x0000000F,
+       0x03F, 0x000333A3,
+       0x033, 0x00000010,
+       0x03F, 0x000333A3,
+       0x033, 0x00000011,
+       0x03F, 0x000333A3,
+       0x033, 0x00000012,
+       0x03F, 0x000333A3,
+       0x0EF, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00000080,
+       0x033, 0x00000000,
+       0x03F, 0x000335A3,
+       0x033, 0x00000001,
+       0x03F, 0x000335A3,
+       0x033, 0x00000002,
+       0x03F, 0x000335A3,
+       0x033, 0x00000003,
+       0x03F, 0x000335A3,
+       0x033, 0x00000004,
+       0x03F, 0x000335A3,
+       0x033, 0x00000005,
+       0x03F, 0x000335A3,
+       0x033, 0x00000006,
+       0x03F, 0x000335A3,
+       0x033, 0x00000007,
+       0x03F, 0x000335A3,
+       0x033, 0x00000008,
+       0x03F, 0x000335A3,
+       0x033, 0x00000009,
+       0x03F, 0x000335A3,
+       0x033, 0x0000000A,
+       0x03F, 0x000335A3,
+       0x033, 0x0000000B,
+       0x03F, 0x000335A3,
+       0x033, 0x0000000C,
+       0x03F, 0x000335A3,
+       0x033, 0x0000000D,
+       0x03F, 0x000335A3,
+       0x033, 0x0000000E,
+       0x03F, 0x000335A3,
+       0x033, 0x0000000F,
+       0x03F, 0x000335A3,
+       0x033, 0x00000010,
+       0x03F, 0x000335A3,
+       0x033, 0x00000011,
+       0x03F, 0x000335A3,
+       0x033, 0x00000012,
+       0x03F, 0x000335A3,
+       0x0EF, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x0EF, 0x00000080,
+       0x033, 0x00000000,
+       0x03F, 0x000335A3,
+       0x033, 0x00000001,
+       0x03F, 0x000335A3,
+       0x033, 0x00000002,
+       0x03F, 0x000335A3,
+       0x033, 0x00000003,
+       0x03F, 0x000335A3,
+       0x033, 0x00000004,
+       0x03F, 0x000335A3,
+       0x033, 0x00000005,
+       0x03F, 0x000335A3,
+       0x033, 0x00000006,
+       0x03F, 0x000335A3,
+       0x033, 0x00000007,
+       0x03F, 0x000335A3,
+       0x033, 0x00000008,
+       0x03F, 0x000335A3,
+       0x033, 0x00000009,
+       0x03F, 0x000335A3,
+       0x033, 0x0000000A,
+       0x03F, 0x000335A3,
+       0x033, 0x0000000B,
+       0x03F, 0x000335A3,
+       0x033, 0x0000000C,
+       0x03F, 0x000335A3,
+       0x033, 0x0000000D,
+       0x03F, 0x000335A3,
+       0x033, 0x0000000E,
+       0x03F, 0x000335A3,
+       0x033, 0x0000000F,
+       0x03F, 0x000335A3,
+       0x033, 0x00000010,
+       0x03F, 0x000335A3,
+       0x033, 0x00000011,
+       0x03F, 0x000335A3,
+       0x033, 0x00000012,
+       0x03F, 0x000335A3,
+       0x0EF, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00000040,
+       0x030, 0x00000644,
+       0x030, 0x00001135,
+       0x030, 0x00002133,
+       0x030, 0x00004000,
+       0x030, 0x00005000,
+       0x030, 0x00006000,
+       0x0EF, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00000040,
+       0x030, 0x00000644,
+       0x030, 0x00001412,
+       0x030, 0x00002202,
+       0x030, 0x00004000,
+       0x030, 0x00005000,
+       0x030, 0x00006000,
+       0x0EF, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00000040,
+       0x030, 0x00000640,
+       0x030, 0x00001512,
+       0x030, 0x00002202,
+       0x030, 0x00004000,
+       0x030, 0x00005000,
+       0x030, 0x00006000,
+       0x0EF, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x0EF, 0x00000040,
+       0x030, 0x00000640,
+       0x030, 0x00001512,
+       0x030, 0x00002202,
+       0x030, 0x00004000,
+       0x030, 0x00005000,
+       0x030, 0x00006000,
+       0x0EF, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00000800,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00000800,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00000800,
+       0xA0000000,     0x00000000,
+       0x0EF, 0x00000800,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x033, 0x00000020,
+       0x03F, 0x00000001,
+       0x033, 0x00000021,
+       0x03F, 0x00000004,
+       0x033, 0x00000022,
+       0x03F, 0x00000007,
+       0x033, 0x00000023,
+       0x03F, 0x00000024,
+       0x033, 0x00000024,
+       0x03F, 0x00000027,
+       0x033, 0x00000025,
+       0x03F, 0x0000002A,
+       0x033, 0x00000026,
+       0x03F, 0x0000002D,
+       0x033, 0x00000027,
+       0x03F, 0x00000030,
+       0x033, 0x00000028,
+       0x03F, 0x00000033,
+       0x033, 0x00000029,
+       0x03F, 0x00000036,
+       0x033, 0x0000002A,
+       0x03F, 0x00000039,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x033, 0x00000020,
+       0x03F, 0x00000E42,
+       0x033, 0x00000021,
+       0x03F, 0x00000E45,
+       0x033, 0x00000022,
+       0x03F, 0x00000E65,
+       0x033, 0x00000023,
+       0x03F, 0x00000E68,
+       0x033, 0x00000024,
+       0x03F, 0x00000EE4,
+       0x033, 0x00000025,
+       0x03F, 0x00000EE7,
+       0x033, 0x00000026,
+       0x03F, 0x00000EEA,
+       0x033, 0x00000027,
+       0x03F, 0x00000EED,
+       0x033, 0x00000028,
+       0x03F, 0x00000EF0,
+       0x033, 0x00000029,
+       0x03F, 0x00000EF3,
+       0x033, 0x0000002A,
+       0x03F, 0x00000EF6,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x033, 0x00000020,
+       0x03F, 0x00000E42,
+       0x033, 0x00000021,
+       0x03F, 0x00000E45,
+       0x033, 0x00000022,
+       0x03F, 0x00000E48,
+       0x033, 0x00000023,
+       0x03F, 0x00000E68,
+       0x033, 0x00000024,
+       0x03F, 0x00000E6B,
+       0x033, 0x00000025,
+       0x03F, 0x00000EAA,
+       0x033, 0x00000026,
+       0x03F, 0x00000EEA,
+       0x033, 0x00000027,
+       0x03F, 0x00000EED,
+       0x033, 0x00000028,
+       0x03F, 0x00000EF0,
+       0x033, 0x00000029,
+       0x03F, 0x00000EF3,
+       0x033, 0x0000002A,
+       0x03F, 0x00000EF6,
+       0xA0000000,     0x00000000,
+       0x033, 0x00000020,
+       0x03F, 0x00000E42,
+       0x033, 0x00000021,
+       0x03F, 0x00000E45,
+       0x033, 0x00000022,
+       0x03F, 0x00000E65,
+       0x033, 0x00000023,
+       0x03F, 0x00000E68,
+       0x033, 0x00000024,
+       0x03F, 0x00000EE4,
+       0x033, 0x00000025,
+       0x03F, 0x00000EE7,
+       0x033, 0x00000026,
+       0x03F, 0x00000EEA,
+       0x033, 0x00000027,
+       0x03F, 0x00000EED,
+       0x033, 0x00000028,
+       0x03F, 0x00000EF0,
+       0x033, 0x00000029,
+       0x03F, 0x00000EF3,
+       0x033, 0x0000002A,
+       0x03F, 0x00000EF6,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x033, 0x00000060,
+       0x03F, 0x00000001,
+       0x033, 0x00000061,
+       0x03F, 0x00000004,
+       0x033, 0x00000062,
+       0x03F, 0x00000007,
+       0x033, 0x00000063,
+       0x03F, 0x00000024,
+       0x033, 0x00000064,
+       0x03F, 0x00000027,
+       0x033, 0x00000065,
+       0x03F, 0x0000002A,
+       0x033, 0x00000066,
+       0x03F, 0x0000002D,
+       0x033, 0x00000067,
+       0x03F, 0x00000030,
+       0x033, 0x00000068,
+       0x03F, 0x00000033,
+       0x033, 0x00000069,
+       0x03F, 0x00000036,
+       0x033, 0x0000006A,
+       0x03F, 0x00000039,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x033, 0x00000060,
+       0x03F, 0x00000E42,
+       0x033, 0x00000061,
+       0x03F, 0x00000E45,
+       0x033, 0x00000062,
+       0x03F, 0x00000E65,
+       0x033, 0x00000063,
+       0x03F, 0x00000E68,
+       0x033, 0x00000064,
+       0x03F, 0x00000EE5,
+       0x033, 0x00000065,
+       0x03F, 0x00000EE8,
+       0x033, 0x00000066,
+       0x03F, 0x00000EEB,
+       0x033, 0x00000067,
+       0x03F, 0x00000EEE,
+       0x033, 0x00000068,
+       0x03F, 0x00000EF1,
+       0x033, 0x00000069,
+       0x03F, 0x00000EF4,
+       0x033, 0x0000006A,
+       0x03F, 0x00000EF7,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x033, 0x00000060,
+       0x03F, 0x00000E09,
+       0x033, 0x00000061,
+       0x03F, 0x00000E43,
+       0x033, 0x00000062,
+       0x03F, 0x00000E46,
+       0x033, 0x00000063,
+       0x03F, 0x00000E49,
+       0x033, 0x00000064,
+       0x03F, 0x00000E88,
+       0x033, 0x00000065,
+       0x03F, 0x00000E8B,
+       0x033, 0x00000066,
+       0x03F, 0x00000ECB,
+       0x033, 0x00000067,
+       0x03F, 0x00000ECE,
+       0x033, 0x00000068,
+       0x03F, 0x00000EF0,
+       0x033, 0x00000069,
+       0x03F, 0x00000EF3,
+       0x033, 0x0000006A,
+       0x03F, 0x00000EF6,
+       0xA0000000,     0x00000000,
+       0x033, 0x00000060,
+       0x03F, 0x00000E42,
+       0x033, 0x00000061,
+       0x03F, 0x00000E45,
+       0x033, 0x00000062,
+       0x03F, 0x00000E65,
+       0x033, 0x00000063,
+       0x03F, 0x00000E68,
+       0x033, 0x00000064,
+       0x03F, 0x00000EE5,
+       0x033, 0x00000065,
+       0x03F, 0x00000EE8,
+       0x033, 0x00000066,
+       0x03F, 0x00000EEB,
+       0x033, 0x00000067,
+       0x03F, 0x00000EEE,
+       0x033, 0x00000068,
+       0x03F, 0x00000EF1,
+       0x033, 0x00000069,
+       0x03F, 0x00000EF4,
+       0x033, 0x0000006A,
+       0x03F, 0x00000EF7,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x033, 0x000000A0,
+       0x03F, 0x00000001,
+       0x033, 0x000000A1,
+       0x03F, 0x00000004,
+       0x033, 0x000000A2,
+       0x03F, 0x00000007,
+       0x033, 0x000000A3,
+       0x03F, 0x00000025,
+       0x033, 0x000000A4,
+       0x03F, 0x00000028,
+       0x033, 0x000000A5,
+       0x03F, 0x0000002B,
+       0x033, 0x000000A6,
+       0x03F, 0x0000002E,
+       0x033, 0x000000A7,
+       0x03F, 0x00000031,
+       0x033, 0x000000A8,
+       0x03F, 0x00000034,
+       0x033, 0x000000A9,
+       0x03F, 0x00000037,
+       0x033, 0x000000AA,
+       0x03F, 0x0000003A,
+       0x0EF, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x033, 0x000000A0,
+       0x03F, 0x00000E09,
+       0x033, 0x000000A1,
+       0x03F, 0x00000E43,
+       0x033, 0x000000A2,
+       0x03F, 0x00000E64,
+       0x033, 0x000000A3,
+       0x03F, 0x00000E67,
+       0x033, 0x000000A4,
+       0x03F, 0x00000EE4,
+       0x033, 0x000000A5,
+       0x03F, 0x00000EE7,
+       0x033, 0x000000A6,
+       0x03F, 0x00000EEA,
+       0x033, 0x000000A7,
+       0x03F, 0x00000EED,
+       0x033, 0x000000A8,
+       0x03F, 0x00000EF0,
+       0x033, 0x000000A9,
+       0x03F, 0x00000EF3,
+       0x033, 0x000000AA,
+       0x03F, 0x00000EF6,
+       0x0EF, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x033, 0x000000A0,
+       0x03F, 0x00000E08,
+       0x033, 0x000000A1,
+       0x03F, 0x00000E42,
+       0x033, 0x000000A2,
+       0x03F, 0x00000E45,
+       0x033, 0x000000A3,
+       0x03F, 0x00000E48,
+       0x033, 0x000000A4,
+       0x03F, 0x00000EA5,
+       0x033, 0x000000A5,
+       0x03F, 0x00000EA8,
+       0x033, 0x000000A6,
+       0x03F, 0x00000ECA,
+       0x033, 0x000000A7,
+       0x03F, 0x00000ECD,
+       0x033, 0x000000A8,
+       0x03F, 0x00000EEF,
+       0x033, 0x000000A9,
+       0x03F, 0x00000EF2,
+       0x033, 0x000000AA,
+       0x03F, 0x00000EF5,
+       0x0EF, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x033, 0x000000A0,
+       0x03F, 0x00000E09,
+       0x033, 0x000000A1,
+       0x03F, 0x00000E43,
+       0x033, 0x000000A2,
+       0x03F, 0x00000E64,
+       0x033, 0x000000A3,
+       0x03F, 0x00000E67,
+       0x033, 0x000000A4,
+       0x03F, 0x00000EE4,
+       0x033, 0x000000A5,
+       0x03F, 0x00000EE7,
+       0x033, 0x000000A6,
+       0x03F, 0x00000EEA,
+       0x033, 0x000000A7,
+       0x03F, 0x00000EED,
+       0x033, 0x000000A8,
+       0x03F, 0x00000EF0,
+       0x033, 0x000000A9,
+       0x03F, 0x00000EF3,
+       0x033, 0x000000AA,
+       0x03F, 0x00000EF6,
+       0x0EF, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00000400,
+       0x033, 0x00000000,
+       0x03F, 0x0006AC00,
+       0x033, 0x00000001,
+       0x03F, 0x00060C00,
+       0x033, 0x00000002,
+       0x03F, 0x0006AC00,
+       0x033, 0x00000003,
+       0x03F, 0x00086A00,
+       0x0EF, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00000400,
+       0x033, 0x00000000,
+       0x03F, 0x0006AC00,
+       0x033, 0x00000001,
+       0x03F, 0x00060C00,
+       0x033, 0x00000002,
+       0x03F, 0x0006AC00,
+       0x033, 0x00000003,
+       0x03F, 0x00086A00,
+       0x0EF, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00000400,
+       0x033, 0x00000000,
+       0x03F, 0x0006AC00,
+       0x033, 0x00000001,
+       0x03F, 0x00060C00,
+       0x033, 0x00000002,
+       0x03F, 0x0006AC00,
+       0x033, 0x00000003,
+       0x03F, 0x00086A00,
+       0x0EF, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x0EF, 0x00000400,
+       0x033, 0x00000000,
+       0x03F, 0x0006AC00,
+       0x033, 0x00000001,
+       0x03F, 0x00060C00,
+       0x033, 0x00000002,
+       0x03F, 0x0006AC00,
+       0x033, 0x00000003,
+       0x03F, 0x00086A00,
+       0x0EF, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00000100,
+       0x033, 0x00000000,
+       0x03F, 0x00000040,
+       0x033, 0x00000001,
+       0x03F, 0x00000040,
+       0x033, 0x00000002,
+       0x03F, 0x00000040,
+       0x033, 0x00000003,
+       0x03F, 0x00000040,
+       0x0EF, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00000100,
+       0x033, 0x00000000,
+       0x03F, 0x00000040,
+       0x033, 0x00000001,
+       0x03F, 0x00000040,
+       0x033, 0x00000002,
+       0x03F, 0x00000040,
+       0x033, 0x00000003,
+       0x03F, 0x00000040,
+       0x0EF, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00000100,
+       0x033, 0x00000000,
+       0x03F, 0x00000040,
+       0x033, 0x00000001,
+       0x03F, 0x00000040,
+       0x033, 0x00000002,
+       0x03F, 0x00000040,
+       0x033, 0x00000003,
+       0x03F, 0x00000040,
+       0x0EF, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x0EF, 0x00000100,
+       0x033, 0x00000000,
+       0x03F, 0x00000040,
+       0x033, 0x00000001,
+       0x03F, 0x00000040,
+       0x033, 0x00000002,
+       0x03F, 0x00000040,
+       0x033, 0x00000003,
+       0x03F, 0x00000040,
+       0x0EF, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00040000,
+       0x033, 0x00000000,
+       0x03F, 0x00086A40,
+       0x033, 0x00000001,
+       0x03F, 0x00086A40,
+       0x033, 0x00000002,
+       0x03F, 0x00086A40,
+       0x033, 0x00000003,
+       0x03F, 0x00086A40,
+       0x033, 0x00000004,
+       0x03F, 0x00086A40,
+       0x033, 0x00000005,
+       0x03F, 0x00086A40,
+       0x033, 0x00000006,
+       0x03F, 0x00084A40,
+       0x033, 0x00000007,
+       0x03F, 0x00084A40,
+       0x0EF, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00040000,
+       0x033, 0x00000000,
+       0x03F, 0x00086A40,
+       0x033, 0x00000001,
+       0x03F, 0x00086A40,
+       0x033, 0x00000002,
+       0x03F, 0x00086A40,
+       0x033, 0x00000003,
+       0x03F, 0x00086A40,
+       0x033, 0x00000004,
+       0x03F, 0x00086A40,
+       0x033, 0x00000005,
+       0x03F, 0x00086A40,
+       0x033, 0x00000006,
+       0x03F, 0x00084A40,
+       0x033, 0x00000007,
+       0x03F, 0x00084A40,
+       0x0EF, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00040000,
+       0x033, 0x00000000,
+       0x03F, 0x00086A40,
+       0x033, 0x00000001,
+       0x03F, 0x00086A40,
+       0x033, 0x00000002,
+       0x03F, 0x00086A40,
+       0x033, 0x00000003,
+       0x03F, 0x00086A40,
+       0x033, 0x00000004,
+       0x03F, 0x00086A40,
+       0x033, 0x00000005,
+       0x03F, 0x00086A40,
+       0x033, 0x00000006,
+       0x03F, 0x00084A40,
+       0x033, 0x00000007,
+       0x03F, 0x00084A40,
+       0x0EF, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x0EF, 0x00040000,
+       0x033, 0x00000000,
+       0x03F, 0x00086A40,
+       0x033, 0x00000001,
+       0x03F, 0x00086A40,
+       0x033, 0x00000002,
+       0x03F, 0x00086A40,
+       0x033, 0x00000003,
+       0x03F, 0x00086A40,
+       0x033, 0x00000004,
+       0x03F, 0x00086A40,
+       0x033, 0x00000005,
+       0x03F, 0x00086A40,
+       0x033, 0x00000006,
+       0x03F, 0x00084A40,
+       0x033, 0x00000007,
+       0x03F, 0x00084A40,
+       0x0EF, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x051, 0x000801A8,
+       0x052, 0x000972E3,
+       0x053, 0x00008069,
+       0x054, 0x00030032,
+       0x055, 0x00082003,
+       0x056, 0x00051CCB,
+       0x057, 0x0000CFC2,
+       0x058, 0x00000010,
+       0x059, 0x00030000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x051, 0x000801A8,
+       0x052, 0x000972E3,
+       0x053, 0x00008069,
+       0x054, 0x00030032,
+       0x055, 0x00082003,
+       0x056, 0x00051CCB,
+       0x057, 0x0000CFC2,
+       0x058, 0x00000010,
+       0x059, 0x00030000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x051, 0x000801A8,
+       0x052, 0x000972E3,
+       0x053, 0x00008069,
+       0x054, 0x00030032,
+       0x055, 0x00082003,
+       0x056, 0x00051CCB,
+       0x057, 0x0000CFC2,
+       0x058, 0x00000010,
+       0x059, 0x00030000,
+       0xA0000000,     0x00000000,
+       0x051, 0x000801A8,
+       0x052, 0x000972E3,
+       0x053, 0x00008069,
+       0x054, 0x00030032,
+       0x055, 0x00082003,
+       0x056, 0x00051CCB,
+       0x057, 0x0000CFC2,
+       0x058, 0x00000010,
+       0x059, 0x00030000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00000800,
+       0x033, 0x00000000,
+       0x03F, 0x00051429,
+       0x033, 0x00000001,
+       0x03F, 0x00051449,
+       0x033, 0x00000002,
+       0x03F, 0x0005144C,
+       0x033, 0x00000003,
+       0x03F, 0x00051C66,
+       0x033, 0x00000004,
+       0x03F, 0x00051C69,
+       0x033, 0x00000005,
+       0x03F, 0x00051C6C,
+       0x033, 0x00000006,
+       0x03F, 0x00051CE8,
+       0x033, 0x00000007,
+       0x03F, 0x00051CEB,
+       0x033, 0x00000008,
+       0x03F, 0x00051CEE,
+       0x033, 0x00000009,
+       0x03F, 0x00051CF1,
+       0x033, 0x0000000A,
+       0x03F, 0x00051CF4,
+       0x0EF, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00000800,
+       0x033, 0x00000000,
+       0x03F, 0x00051429,
+       0x033, 0x00000001,
+       0x03F, 0x00051449,
+       0x033, 0x00000002,
+       0x03F, 0x0005144C,
+       0x033, 0x00000003,
+       0x03F, 0x00051C66,
+       0x033, 0x00000004,
+       0x03F, 0x00051C69,
+       0x033, 0x00000005,
+       0x03F, 0x00051C6C,
+       0x033, 0x00000006,
+       0x03F, 0x00051CE8,
+       0x033, 0x00000007,
+       0x03F, 0x00051CEB,
+       0x033, 0x00000008,
+       0x03F, 0x00051CEE,
+       0x033, 0x00000009,
+       0x03F, 0x00051CF1,
+       0x033, 0x0000000A,
+       0x03F, 0x00051CF4,
+       0x0EF, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00000800,
+       0x033, 0x00000000,
+       0x03F, 0x00051427,
+       0x033, 0x00000001,
+       0x03F, 0x00051446,
+       0x033, 0x00000002,
+       0x03F, 0x00051449,
+       0x033, 0x00000003,
+       0x03F, 0x0005144C,
+       0x033, 0x00000004,
+       0x03F, 0x00051C67,
+       0x033, 0x00000005,
+       0x03F, 0x00051C6A,
+       0x033, 0x00000006,
+       0x03F, 0x00051C8B,
+       0x033, 0x00000007,
+       0x03F, 0x00051CE9,
+       0x033, 0x00000008,
+       0x03F, 0x00051CEC,
+       0x033, 0x00000009,
+       0x03F, 0x00051CEF,
+       0x033, 0x0000000A,
+       0x03F, 0x00051CF2,
+       0x0EF, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x0EF, 0x00000800,
+       0x033, 0x00000000,
+       0x03F, 0x00051427,
+       0x033, 0x00000001,
+       0x03F, 0x00051446,
+       0x033, 0x00000002,
+       0x03F, 0x00051449,
+       0x033, 0x00000003,
+       0x03F, 0x0005144C,
+       0x033, 0x00000004,
+       0x03F, 0x00051C67,
+       0x033, 0x00000005,
+       0x03F, 0x00051C6A,
+       0x033, 0x00000006,
+       0x03F, 0x00051C8B,
+       0x033, 0x00000007,
+       0x03F, 0x00051CE9,
+       0x033, 0x00000008,
+       0x03F, 0x00051CEC,
+       0x033, 0x00000009,
+       0x03F, 0x00051CEF,
+       0x033, 0x0000000A,
+       0x03F, 0x00051CF2,
+       0x0EF, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00004000,
+       0x033, 0x00000000,
+       0x03F, 0x00048400,
+       0x033, 0x00000001,
+       0x03F, 0x00086E00,
+       0x033, 0x00000002,
+       0x03F, 0x00048400,
+       0x033, 0x00000003,
+       0x03F, 0x00048400,
+       0x0EE, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00004000,
+       0x033, 0x00000000,
+       0x03F, 0x00048400,
+       0x033, 0x00000001,
+       0x03F, 0x00086E00,
+       0x033, 0x00000002,
+       0x03F, 0x00048400,
+       0x033, 0x00000003,
+       0x03F, 0x00048400,
+       0x0EE, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00004000,
+       0x033, 0x00000000,
+       0x03F, 0x00048400,
+       0x033, 0x00000001,
+       0x03F, 0x00086E00,
+       0x033, 0x00000002,
+       0x03F, 0x00048400,
+       0x033, 0x00000003,
+       0x03F, 0x00048400,
+       0x0EE, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x0EE, 0x00004000,
+       0x033, 0x00000000,
+       0x03F, 0x00048400,
+       0x033, 0x00000001,
+       0x03F, 0x00086E00,
+       0x033, 0x00000002,
+       0x03F, 0x00048400,
+       0x033, 0x00000003,
+       0x03F, 0x00048400,
+       0x0EE, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00002000,
+       0x033, 0x00000000,
+       0x03F, 0x00000000,
+       0x033, 0x00000001,
+       0x03F, 0x00000000,
+       0x033, 0x00000002,
+       0x03F, 0x00000000,
+       0x033, 0x00000003,
+       0x03F, 0x00000000,
+       0x0EE, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00002000,
+       0x033, 0x00000000,
+       0x03F, 0x00000000,
+       0x033, 0x00000001,
+       0x03F, 0x00000000,
+       0x033, 0x00000002,
+       0x03F, 0x00000000,
+       0x033, 0x00000003,
+       0x03F, 0x00000000,
+       0x0EE, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00002000,
+       0x033, 0x00000000,
+       0x03F, 0x00000000,
+       0x033, 0x00000001,
+       0x03F, 0x00000000,
+       0x033, 0x00000002,
+       0x03F, 0x00000000,
+       0x033, 0x00000003,
+       0x03F, 0x00000000,
+       0x0EE, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x0EE, 0x00002000,
+       0x033, 0x00000000,
+       0x03F, 0x00000000,
+       0x033, 0x00000001,
+       0x03F, 0x00000000,
+       0x033, 0x00000002,
+       0x03F, 0x00000000,
+       0x033, 0x00000003,
+       0x03F, 0x00000000,
+       0x0EE, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00080000,
+       0x033, 0x00000000,
+       0x03F, 0x00048400,
+       0x033, 0x00000001,
+       0x03F, 0x00048400,
+       0x033, 0x00000002,
+       0x03F, 0x00048400,
+       0x033, 0x00000003,
+       0x03F, 0x00048400,
+       0x033, 0x00000004,
+       0x03F, 0x00048400,
+       0x033, 0x00000005,
+       0x03F, 0x00048400,
+       0x033, 0x00000006,
+       0x03F, 0x00048400,
+       0x033, 0x00000007,
+       0x03F, 0x00048400,
+       0x0EE, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00080000,
+       0x033, 0x00000000,
+       0x03F, 0x00048400,
+       0x033, 0x00000001,
+       0x03F, 0x00048400,
+       0x033, 0x00000002,
+       0x03F, 0x00048400,
+       0x033, 0x00000003,
+       0x03F, 0x00048400,
+       0x033, 0x00000004,
+       0x03F, 0x00048400,
+       0x033, 0x00000005,
+       0x03F, 0x00048400,
+       0x033, 0x00000006,
+       0x03F, 0x00048400,
+       0x033, 0x00000007,
+       0x03F, 0x00048400,
+       0x0EE, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00080000,
+       0x033, 0x00000000,
+       0x03F, 0x00048400,
+       0x033, 0x00000001,
+       0x03F, 0x00048400,
+       0x033, 0x00000002,
+       0x03F, 0x00048400,
+       0x033, 0x00000003,
+       0x03F, 0x00048400,
+       0x033, 0x00000004,
+       0x03F, 0x00048400,
+       0x033, 0x00000005,
+       0x03F, 0x00048400,
+       0x033, 0x00000006,
+       0x03F, 0x00048400,
+       0x033, 0x00000007,
+       0x03F, 0x00048400,
+       0x0EE, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x0EE, 0x00080000,
+       0x033, 0x00000000,
+       0x03F, 0x00048400,
+       0x033, 0x00000001,
+       0x03F, 0x00048400,
+       0x033, 0x00000002,
+       0x03F, 0x00048400,
+       0x033, 0x00000003,
+       0x03F, 0x00048400,
+       0x033, 0x00000004,
+       0x03F, 0x00048400,
+       0x033, 0x00000005,
+       0x03F, 0x00048400,
+       0x033, 0x00000006,
+       0x03F, 0x00048400,
+       0x033, 0x00000007,
+       0x03F, 0x00048400,
+       0x0EE, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x070, 0x00008000,
+       0x075, 0x000027DA,
+       0x076, 0x00006997,
+       0x077, 0x00070418,
+       0x078, 0x000BB000,
+       0x07D, 0x00007600,
+       0x07F, 0x00000000,
+       0x06A, 0x000F4C00,
+       0x065, 0x00082030,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x070, 0x00008000,
+       0x075, 0x000027DA,
+       0x076, 0x00006997,
+       0x077, 0x00070418,
+       0x078, 0x000BB000,
+       0x07D, 0x00007600,
+       0x07F, 0x00000000,
+       0x06A, 0x000F4C00,
+       0x065, 0x00082030,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x070, 0x00008000,
+       0x075, 0x000027DA,
+       0x076, 0x00006997,
+       0x077, 0x00070418,
+       0x078, 0x000BB000,
+       0x07D, 0x00007600,
+       0x07F, 0x00000000,
+       0x06A, 0x000F4C00,
+       0x065, 0x00082030,
+       0xA0000000,     0x00000000,
+       0x070, 0x00008000,
+       0x075, 0x000027DA,
+       0x076, 0x00006997,
+       0x077, 0x00070418,
+       0x078, 0x000BB000,
+       0x07D, 0x00007600,
+       0x07F, 0x00000000,
+       0x06A, 0x000F4C00,
+       0x065, 0x00082030,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00008000,
+       0x033, 0x00000000,
+       0x03F, 0x00051427,
+       0x033, 0x00000001,
+       0x03F, 0x00051446,
+       0x033, 0x00000002,
+       0x03F, 0x00051449,
+       0x033, 0x00000003,
+       0x03F, 0x0005144C,
+       0x033, 0x00000004,
+       0x03F, 0x00051C69,
+       0x033, 0x00000005,
+       0x03F, 0x00051C6C,
+       0x033, 0x00000006,
+       0x03F, 0x00051C8D,
+       0x033, 0x00000007,
+       0x03F, 0x00051CEB,
+       0x033, 0x00000008,
+       0x03F, 0x00051CEE,
+       0x033, 0x00000009,
+       0x03F, 0x00051CF1,
+       0x033, 0x0000000A,
+       0x03F, 0x00051CF4,
+       0x0EE, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00008000,
+       0x033, 0x00000000,
+       0x03F, 0x00051427,
+       0x033, 0x00000001,
+       0x03F, 0x00051446,
+       0x033, 0x00000002,
+       0x03F, 0x00051449,
+       0x033, 0x00000003,
+       0x03F, 0x0005144C,
+       0x033, 0x00000004,
+       0x03F, 0x00051C69,
+       0x033, 0x00000005,
+       0x03F, 0x00051C6C,
+       0x033, 0x00000006,
+       0x03F, 0x00051C8D,
+       0x033, 0x00000007,
+       0x03F, 0x00051CEB,
+       0x033, 0x00000008,
+       0x03F, 0x00051CEE,
+       0x033, 0x00000009,
+       0x03F, 0x00051CF1,
+       0x033, 0x0000000A,
+       0x03F, 0x00051CF4,
+       0x0EE, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00008000,
+       0x033, 0x00000000,
+       0x03F, 0x00051427,
+       0x033, 0x00000001,
+       0x03F, 0x00051446,
+       0x033, 0x00000002,
+       0x03F, 0x00051449,
+       0x033, 0x00000003,
+       0x03F, 0x0005144C,
+       0x033, 0x00000004,
+       0x03F, 0x00051C69,
+       0x033, 0x00000005,
+       0x03F, 0x00051C6C,
+       0x033, 0x00000006,
+       0x03F, 0x00051C8D,
+       0x033, 0x00000007,
+       0x03F, 0x00051CEB,
+       0x033, 0x00000008,
+       0x03F, 0x00051CEE,
+       0x033, 0x00000009,
+       0x03F, 0x00051CF1,
+       0x033, 0x0000000A,
+       0x03F, 0x00051CF4,
+       0x0EE, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x0EE, 0x00008000,
+       0x033, 0x00000000,
+       0x03F, 0x00051427,
+       0x033, 0x00000001,
+       0x03F, 0x00051446,
+       0x033, 0x00000002,
+       0x03F, 0x00051449,
+       0x033, 0x00000003,
+       0x03F, 0x0005144C,
+       0x033, 0x00000004,
+       0x03F, 0x00051C69,
+       0x033, 0x00000005,
+       0x03F, 0x00051C6C,
+       0x033, 0x00000006,
+       0x03F, 0x00051C8D,
+       0x033, 0x00000007,
+       0x03F, 0x00051CEB,
+       0x033, 0x00000008,
+       0x03F, 0x00051CEE,
+       0x033, 0x00000009,
+       0x03F, 0x00051CF1,
+       0x033, 0x0000000A,
+       0x03F, 0x00051CF4,
+       0x0EE, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00000010,
+       0x033, 0x00000000,
+       0x008, 0x0009C060,
+       0x033, 0x00000001,
+       0x008, 0x0009C060,
+       0x0EF, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00000010,
+       0x033, 0x00000000,
+       0x008, 0x0009C060,
+       0x033, 0x00000001,
+       0x008, 0x0009C060,
+       0x0EF, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00000010,
+       0x033, 0x00000000,
+       0x008, 0x0009C060,
+       0x033, 0x00000001,
+       0x008, 0x0009C060,
+       0x0EF, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x0EF, 0x00000010,
+       0x033, 0x00000000,
+       0x008, 0x0009C060,
+       0x033, 0x00000001,
+       0x008, 0x0009C060,
+       0x0EF, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00080000,
+       0x033, 0x00000024,
+       0x03E, 0x0000003F,
+       0x03F, 0x00060FDE,
+       0x0EF, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00080000,
+       0x033, 0x00000024,
+       0x03E, 0x0000003F,
+       0x03F, 0x00060FDE,
+       0x0EF, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00080000,
+       0x033, 0x00000024,
+       0x03E, 0x0000003F,
+       0x03F, 0x00060FDE,
+       0x0EF, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x0EF, 0x00080000,
+       0x033, 0x00000024,
+       0x03E, 0x0000003F,
+       0x03F, 0x00060FDE,
+       0x0EF, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00080000,
+       0x033, 0x00000025,
+       0x03E, 0x00000037,
+       0x03F, 0x0007EFCE,
+       0x0EF, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00080000,
+       0x033, 0x00000025,
+       0x03E, 0x00000037,
+       0x03F, 0x0007EFCE,
+       0x0EF, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00080000,
+       0x033, 0x00000025,
+       0x03E, 0x00000037,
+       0x03F, 0x0007EFCE,
+       0x0EF, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x0EF, 0x00080000,
+       0x033, 0x00000025,
+       0x03E, 0x00000037,
+       0x03F, 0x0007EFCE,
+       0x0EF, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00080000,
+       0x033, 0x00000026,
+       0x03E, 0x00000037,
+       0x03F, 0x0005EFCE,
+       0x0EF, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00080000,
+       0x033, 0x00000026,
+       0x03E, 0x00000037,
+       0x03F, 0x0005EFCE,
+       0x0EF, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EF, 0x00080000,
+       0x033, 0x00000026,
+       0x03E, 0x00000037,
+       0x03F, 0x0005EFCE,
+       0x0EF, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x0EF, 0x00080000,
+       0x033, 0x00000026,
+       0x03E, 0x00000037,
+       0x03F, 0x0005EFCE,
+       0x0EF, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00001000,
+       0x033, 0x00000004,
+       0x03F, 0x00001EC1,
+       0x0EE, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00001000,
+       0x033, 0x00000004,
+       0x03F, 0x00001EC1,
+       0x0EE, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00001000,
+       0x033, 0x00000004,
+       0x03F, 0x00001EC1,
+       0x0EE, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x0EE, 0x00001000,
+       0x033, 0x00000004,
+       0x03F, 0x00001EC1,
+       0x0EE, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00001000,
+       0x033, 0x00000005,
+       0x03F, 0x00001ECF,
+       0x0EE, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00001000,
+       0x033, 0x00000005,
+       0x03F, 0x00001ECF,
+       0x0EE, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00001000,
+       0x033, 0x00000005,
+       0x03F, 0x00001ECF,
+       0x0EE, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x0EE, 0x00001000,
+       0x033, 0x00000005,
+       0x03F, 0x00001ECF,
+       0x0EE, 0x00000000,
+       0xB0000000,     0x00000000,
+       0x80001005,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00001000,
+       0x033, 0x00000006,
+       0x03F, 0x00001F9D,
+       0x0EE, 0x00000000,
+       0x90001004,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00001000,
+       0x033, 0x00000006,
+       0x03F, 0x00001F9D,
+       0x0EE, 0x00000000,
+       0x90000400,     0x00000000,     0x40000000,     0x00000000,
+       0x0EE, 0x00001000,
+       0x033, 0x00000006,
+       0x03F, 0x00001F9D,
+       0x0EE, 0x00000000,
+       0xA0000000,     0x00000000,
+       0x0EE, 0x00001000,
+       0x033, 0x00000006,
+       0x03F, 0x00001F9D,
+       0x0EE, 0x00000000,
+       0xB0000000,     0x00000000,
+
+};
+
+RTW_DECL_TABLE_RF_RADIO(rtw8821c_rf_a, A);
+
+static const struct rtw_txpwr_lmt_cfg_pair rtw8821c_txpwr_lmt_type0[] = {
+       { 0, 0, 0, 0, 1, 30, },
+       { 2, 0, 0, 0, 1, 30, },
+       { 0, 0, 0, 0, 2, 32, },
+       { 2, 0, 0, 0, 2, 30, },
+       { 0, 0, 0, 0, 3, 32, },
+       { 2, 0, 0, 0, 3, 30, },
+       { 0, 0, 0, 0, 4, 32, },
+       { 2, 0, 0, 0, 4, 30, },
+       { 0, 0, 0, 0, 5, 32, },
+       { 2, 0, 0, 0, 5, 30, },
+       { 0, 0, 0, 0, 6, 32, },
+       { 2, 0, 0, 0, 6, 30, },
+       { 0, 0, 0, 0, 7, 32, },
+       { 2, 0, 0, 0, 7, 30, },
+       { 0, 0, 0, 0, 8, 32, },
+       { 2, 0, 0, 0, 8, 30, },
+       { 0, 0, 0, 0, 9, 32, },
+       { 2, 0, 0, 0, 9, 30, },
+       { 0, 0, 0, 0, 10, 32, },
+       { 2, 0, 0, 0, 10, 30, },
+       { 0, 0, 0, 0, 11, 32, },
+       { 2, 0, 0, 0, 11, 30, },
+       { 0, 0, 0, 0, 12, 24, },
+       { 2, 0, 0, 0, 12, 30, },
+       { 0, 0, 0, 0, 13, 16, },
+       { 2, 0, 0, 0, 13, 30, },
+       { 0, 0, 0, 0, 14, 63, },
+       { 2, 0, 0, 0, 14, 63, },
+       { 0, 0, 0, 1, 1, 30, },
+       { 2, 0, 0, 1, 1, 30, },
+       { 0, 0, 0, 1, 2, 32, },
+       { 2, 0, 0, 1, 2, 30, },
+       { 0, 0, 0, 1, 3, 34, },
+       { 2, 0, 0, 1, 3, 30, },
+       { 0, 0, 0, 1, 4, 34, },
+       { 2, 0, 0, 1, 4, 30, },
+       { 0, 0, 0, 1, 5, 34, },
+       { 2, 0, 0, 1, 5, 30, },
+       { 0, 0, 0, 1, 6, 34, },
+       { 2, 0, 0, 1, 6, 30, },
+       { 0, 0, 0, 1, 7, 34, },
+       { 2, 0, 0, 1, 7, 30, },
+       { 0, 0, 0, 1, 8, 34, },
+       { 2, 0, 0, 1, 8, 30, },
+       { 0, 0, 0, 1, 9, 34, },
+       { 2, 0, 0, 1, 9, 30, },
+       { 0, 0, 0, 1, 10, 32, },
+       { 2, 0, 0, 1, 10, 30, },
+       { 0, 0, 0, 1, 11, 30, },
+       { 2, 0, 0, 1, 11, 30, },
+       { 0, 0, 0, 1, 12, 28, },
+       { 2, 0, 0, 1, 12, 30, },
+       { 0, 0, 0, 1, 13, 16, },
+       { 2, 0, 0, 1, 13, 30, },
+       { 0, 0, 0, 1, 14, 63, },
+       { 2, 0, 0, 1, 14, 63, },
+       { 0, 0, 0, 2, 1, 26, },
+       { 2, 0, 0, 2, 1, 30, },
+       { 0, 0, 0, 2, 2, 30, },
+       { 2, 0, 0, 2, 2, 30, },
+       { 0, 0, 0, 2, 3, 32, },
+       { 2, 0, 0, 2, 3, 30, },
+       { 0, 0, 0, 2, 4, 34, },
+       { 2, 0, 0, 2, 4, 30, },
+       { 0, 0, 0, 2, 5, 34, },
+       { 2, 0, 0, 2, 5, 30, },
+       { 0, 0, 0, 2, 6, 34, },
+       { 2, 0, 0, 2, 6, 30, },
+       { 0, 0, 0, 2, 7, 34, },
+       { 2, 0, 0, 2, 7, 30, },
+       { 0, 0, 0, 2, 8, 34, },
+       { 2, 0, 0, 2, 8, 30, },
+       { 0, 0, 0, 2, 9, 32, },
+       { 2, 0, 0, 2, 9, 30, },
+       { 0, 0, 0, 2, 10, 30, },
+       { 2, 0, 0, 2, 10, 30, },
+       { 0, 0, 0, 2, 11, 28, },
+       { 2, 0, 0, 2, 11, 30, },
+       { 0, 0, 0, 2, 12, 26, },
+       { 2, 0, 0, 2, 12, 30, },
+       { 0, 0, 0, 2, 13, 12, },
+       { 2, 0, 0, 2, 13, 30, },
+       { 0, 0, 0, 2, 14, 63, },
+       { 2, 0, 0, 2, 14, 63, },
+       { 0, 0, 1, 2, 1, 63, },
+       { 2, 0, 1, 2, 1, 63, },
+       { 0, 0, 1, 2, 2, 63, },
+       { 2, 0, 1, 2, 2, 63, },
+       { 0, 0, 1, 2, 3, 26, },
+       { 2, 0, 1, 2, 3, 30, },
+       { 0, 0, 1, 2, 4, 26, },
+       { 2, 0, 1, 2, 4, 30, },
+       { 0, 0, 1, 2, 5, 30, },
+       { 2, 0, 1, 2, 5, 30, },
+       { 0, 0, 1, 2, 6, 30, },
+       { 2, 0, 1, 2, 6, 30, },
+       { 0, 0, 1, 2, 7, 30, },
+       { 2, 0, 1, 2, 7, 30, },
+       { 0, 0, 1, 2, 8, 26, },
+       { 2, 0, 1, 2, 8, 30, },
+       { 0, 0, 1, 2, 9, 26, },
+       { 2, 0, 1, 2, 9, 30, },
+       { 0, 0, 1, 2, 10, 28, },
+       { 2, 0, 1, 2, 10, 30, },
+       { 0, 0, 1, 2, 11, 20, },
+       { 2, 0, 1, 2, 11, 30, },
+       { 0, 0, 1, 2, 12, 63, },
+       { 2, 0, 1, 2, 12, 63, },
+       { 0, 0, 1, 2, 13, 63, },
+       { 2, 0, 1, 2, 13, 63, },
+       { 0, 0, 1, 2, 14, 63, },
+       { 2, 0, 1, 2, 14, 63, },
+       { 0, 1, 0, 1, 36, 31, },
+       { 2, 1, 0, 1, 36, 32, },
+       { 0, 1, 0, 1, 40, 33, },
+       { 2, 1, 0, 1, 40, 32, },
+       { 0, 1, 0, 1, 44, 33, },
+       { 2, 1, 0, 1, 44, 32, },
+       { 0, 1, 0, 1, 48, 31, },
+       { 2, 1, 0, 1, 48, 32, },
+       { 0, 1, 0, 1, 52, 33, },
+       { 2, 1, 0, 1, 52, 32, },
+       { 0, 1, 0, 1, 56, 33, },
+       { 2, 1, 0, 1, 56, 32, },
+       { 0, 1, 0, 1, 60, 33, },
+       { 2, 1, 0, 1, 60, 32, },
+       { 0, 1, 0, 1, 64, 30, },
+       { 2, 1, 0, 1, 64, 32, },
+       { 0, 1, 0, 1, 100, 30, },
+       { 2, 1, 0, 1, 100, 32, },
+       { 0, 1, 0, 1, 104, 33, },
+       { 2, 1, 0, 1, 104, 32, },
+       { 0, 1, 0, 1, 108, 33, },
+       { 2, 1, 0, 1, 108, 32, },
+       { 0, 1, 0, 1, 112, 33, },
+       { 2, 1, 0, 1, 112, 32, },
+       { 0, 1, 0, 1, 116, 33, },
+       { 2, 1, 0, 1, 116, 32, },
+       { 0, 1, 0, 1, 120, 33, },
+       { 2, 1, 0, 1, 120, 32, },
+       { 0, 1, 0, 1, 124, 33, },
+       { 2, 1, 0, 1, 124, 32, },
+       { 0, 1, 0, 1, 128, 33, },
+       { 2, 1, 0, 1, 128, 32, },
+       { 0, 1, 0, 1, 132, 33, },
+       { 2, 1, 0, 1, 132, 32, },
+       { 0, 1, 0, 1, 136, 33, },
+       { 2, 1, 0, 1, 136, 32, },
+       { 0, 1, 0, 1, 140, 31, },
+       { 2, 1, 0, 1, 140, 32, },
+       { 0, 1, 0, 1, 144, 30, },
+       { 2, 1, 0, 1, 144, 63, },
+       { 0, 1, 0, 1, 149, 33, },
+       { 2, 1, 0, 1, 149, 63, },
+       { 0, 1, 0, 1, 153, 33, },
+       { 2, 1, 0, 1, 153, 63, },
+       { 0, 1, 0, 1, 157, 33, },
+       { 2, 1, 0, 1, 157, 63, },
+       { 0, 1, 0, 1, 161, 33, },
+       { 2, 1, 0, 1, 161, 63, },
+       { 0, 1, 0, 1, 165, 33, },
+       { 2, 1, 0, 1, 165, 63, },
+       { 0, 1, 0, 2, 36, 30, },
+       { 2, 1, 0, 2, 36, 32, },
+       { 0, 1, 0, 2, 40, 33, },
+       { 2, 1, 0, 2, 40, 32, },
+       { 0, 1, 0, 2, 44, 33, },
+       { 2, 1, 0, 2, 44, 32, },
+       { 0, 1, 0, 2, 48, 33, },
+       { 2, 1, 0, 2, 48, 32, },
+       { 0, 1, 0, 2, 52, 33, },
+       { 2, 1, 0, 2, 52, 32, },
+       { 0, 1, 0, 2, 56, 33, },
+       { 2, 1, 0, 2, 56, 32, },
+       { 0, 1, 0, 2, 60, 33, },
+       { 2, 1, 0, 2, 60, 32, },
+       { 0, 1, 0, 2, 64, 30, },
+       { 2, 1, 0, 2, 64, 32, },
+       { 0, 1, 0, 2, 100, 30, },
+       { 2, 1, 0, 2, 100, 32, },
+       { 0, 1, 0, 2, 104, 33, },
+       { 2, 1, 0, 2, 104, 32, },
+       { 0, 1, 0, 2, 108, 33, },
+       { 2, 1, 0, 2, 108, 32, },
+       { 0, 1, 0, 2, 112, 33, },
+       { 2, 1, 0, 2, 112, 32, },
+       { 0, 1, 0, 2, 116, 33, },
+       { 2, 1, 0, 2, 116, 32, },
+       { 0, 1, 0, 2, 120, 33, },
+       { 2, 1, 0, 2, 120, 32, },
+       { 0, 1, 0, 2, 124, 33, },
+       { 2, 1, 0, 2, 124, 32, },
+       { 0, 1, 0, 2, 128, 33, },
+       { 2, 1, 0, 2, 128, 32, },
+       { 0, 1, 0, 2, 132, 33, },
+       { 2, 1, 0, 2, 132, 32, },
+       { 0, 1, 0, 2, 136, 33, },
+       { 2, 1, 0, 2, 136, 32, },
+       { 0, 1, 0, 2, 140, 29, },
+       { 2, 1, 0, 2, 140, 32, },
+       { 0, 1, 0, 2, 144, 27, },
+       { 2, 1, 0, 2, 144, 63, },
+       { 0, 1, 0, 2, 149, 33, },
+       { 2, 1, 0, 2, 149, 63, },
+       { 0, 1, 0, 2, 153, 33, },
+       { 2, 1, 0, 2, 153, 63, },
+       { 0, 1, 0, 2, 157, 33, },
+       { 2, 1, 0, 2, 157, 63, },
+       { 0, 1, 0, 2, 161, 33, },
+       { 2, 1, 0, 2, 161, 63, },
+       { 0, 1, 0, 2, 165, 33, },
+       { 2, 1, 0, 2, 165, 63, },
+       { 0, 1, 1, 2, 38, 22, },
+       { 2, 1, 1, 2, 38, 32, },
+       { 0, 1, 1, 2, 46, 32, },
+       { 2, 1, 1, 2, 46, 32, },
+       { 0, 1, 1, 2, 54, 32, },
+       { 2, 1, 1, 2, 54, 32, },
+       { 0, 1, 1, 2, 62, 23, },
+       { 2, 1, 1, 2, 62, 32, },
+       { 0, 1, 1, 2, 102, 21, },
+       { 2, 1, 1, 2, 102, 32, },
+       { 0, 1, 1, 2, 110, 32, },
+       { 2, 1, 1, 2, 110, 32, },
+       { 0, 1, 1, 2, 118, 32, },
+       { 2, 1, 1, 2, 118, 32, },
+       { 0, 1, 1, 2, 126, 32, },
+       { 2, 1, 1, 2, 126, 32, },
+       { 0, 1, 1, 2, 134, 32, },
+       { 2, 1, 1, 2, 134, 32, },
+       { 0, 1, 1, 2, 142, 29, },
+       { 2, 1, 1, 2, 142, 63, },
+       { 0, 1, 1, 2, 151, 32, },
+       { 2, 1, 1, 2, 151, 63, },
+       { 0, 1, 1, 2, 159, 32, },
+       { 2, 1, 1, 2, 159, 63, },
+       { 0, 1, 2, 4, 42, 19, },
+       { 2, 1, 2, 4, 42, 32, },
+       { 0, 1, 2, 4, 58, 22, },
+       { 2, 1, 2, 4, 58, 32, },
+       { 0, 1, 2, 4, 106, 18, },
+       { 2, 1, 2, 4, 106, 32, },
+       { 0, 1, 2, 4, 122, 32, },
+       { 2, 1, 2, 4, 122, 32, },
+       { 0, 1, 2, 4, 138, 28, },
+       { 2, 1, 2, 4, 138, 63, },
+       { 0, 1, 2, 4, 155, 32, },
+       { 2, 1, 2, 4, 155, 63, },
+       { 1, 0, 0, 0, 1, 34, },
+       { 3, 0, 0, 0, 1, 30, },
+       { 4, 0, 0, 0, 1, 34, },
+       { 5, 0, 0, 0, 1, 30, },
+       { 6, 0, 0, 0, 1, 30, },
+       { 7, 0, 0, 0, 1, 30, },
+       { 1, 0, 0, 0, 2, 34, },
+       { 3, 0, 0, 0, 2, 32, },
+       { 4, 0, 0, 0, 2, 34, },
+       { 5, 0, 0, 0, 2, 30, },
+       { 6, 0, 0, 0, 2, 32, },
+       { 7, 0, 0, 0, 2, 30, },
+       { 1, 0, 0, 0, 3, 34, },
+       { 3, 0, 0, 0, 3, 32, },
+       { 4, 0, 0, 0, 3, 34, },
+       { 5, 0, 0, 0, 3, 30, },
+       { 6, 0, 0, 0, 3, 32, },
+       { 7, 0, 0, 0, 3, 30, },
+       { 1, 0, 0, 0, 4, 34, },
+       { 3, 0, 0, 0, 4, 32, },
+       { 4, 0, 0, 0, 4, 34, },
+       { 5, 0, 0, 0, 4, 30, },
+       { 6, 0, 0, 0, 4, 32, },
+       { 7, 0, 0, 0, 4, 30, },
+       { 1, 0, 0, 0, 5, 34, },
+       { 3, 0, 0, 0, 5, 32, },
+       { 4, 0, 0, 0, 5, 34, },
+       { 5, 0, 0, 0, 5, 30, },
+       { 6, 0, 0, 0, 5, 32, },
+       { 7, 0, 0, 0, 5, 30, },
+       { 1, 0, 0, 0, 6, 34, },
+       { 3, 0, 0, 0, 6, 32, },
+       { 4, 0, 0, 0, 6, 34, },
+       { 5, 0, 0, 0, 6, 30, },
+       { 6, 0, 0, 0, 6, 32, },
+       { 7, 0, 0, 0, 6, 30, },
+       { 1, 0, 0, 0, 7, 34, },
+       { 3, 0, 0, 0, 7, 32, },
+       { 4, 0, 0, 0, 7, 34, },
+       { 5, 0, 0, 0, 7, 30, },
+       { 6, 0, 0, 0, 7, 32, },
+       { 7, 0, 0, 0, 7, 30, },
+       { 1, 0, 0, 0, 8, 34, },
+       { 3, 0, 0, 0, 8, 32, },
+       { 4, 0, 0, 0, 8, 34, },
+       { 5, 0, 0, 0, 8, 30, },
+       { 6, 0, 0, 0, 8, 32, },
+       { 7, 0, 0, 0, 8, 30, },
+       { 1, 0, 0, 0, 9, 34, },
+       { 3, 0, 0, 0, 9, 32, },
+       { 4, 0, 0, 0, 9, 34, },
+       { 5, 0, 0, 0, 9, 30, },
+       { 6, 0, 0, 0, 9, 32, },
+       { 7, 0, 0, 0, 9, 30, },
+       { 1, 0, 0, 0, 10, 34, },
+       { 3, 0, 0, 0, 10, 32, },
+       { 4, 0, 0, 0, 10, 34, },
+       { 5, 0, 0, 0, 10, 30, },
+       { 6, 0, 0, 0, 10, 32, },
+       { 7, 0, 0, 0, 10, 30, },
+       { 1, 0, 0, 0, 11, 34, },
+       { 3, 0, 0, 0, 11, 32, },
+       { 4, 0, 0, 0, 11, 34, },
+       { 5, 0, 0, 0, 11, 30, },
+       { 6, 0, 0, 0, 11, 32, },
+       { 7, 0, 0, 0, 11, 30, },
+       { 1, 0, 0, 0, 12, 34, },
+       { 3, 0, 0, 0, 12, 24, },
+       { 4, 0, 0, 0, 12, 34, },
+       { 5, 0, 0, 0, 12, 30, },
+       { 6, 0, 0, 0, 12, 24, },
+       { 7, 0, 0, 0, 12, 30, },
+       { 1, 0, 0, 0, 13, 34, },
+       { 3, 0, 0, 0, 13, 16, },
+       { 4, 0, 0, 0, 13, 34, },
+       { 5, 0, 0, 0, 13, 30, },
+       { 6, 0, 0, 0, 13, 16, },
+       { 7, 0, 0, 0, 13, 30, },
+       { 1, 0, 0, 0, 14, 34, },
+       { 3, 0, 0, 0, 14, 63, },
+       { 4, 0, 0, 0, 14, 63, },
+       { 5, 0, 0, 0, 14, 63, },
+       { 6, 0, 0, 0, 14, 63, },
+       { 7, 0, 0, 0, 14, 63, },
+       { 1, 0, 0, 1, 1, 34, },
+       { 3, 0, 0, 1, 1, 30, },
+       { 4, 0, 0, 1, 1, 32, },
+       { 5, 0, 0, 1, 1, 30, },
+       { 6, 0, 0, 1, 1, 30, },
+       { 7, 0, 0, 1, 1, 30, },
+       { 1, 0, 0, 1, 2, 34, },
+       { 3, 0, 0, 1, 2, 32, },
+       { 4, 0, 0, 1, 2, 34, },
+       { 5, 0, 0, 1, 2, 30, },
+       { 6, 0, 0, 1, 2, 32, },
+       { 7, 0, 0, 1, 2, 30, },
+       { 1, 0, 0, 1, 3, 34, },
+       { 3, 0, 0, 1, 3, 34, },
+       { 4, 0, 0, 1, 3, 34, },
+       { 5, 0, 0, 1, 3, 30, },
+       { 6, 0, 0, 1, 3, 34, },
+       { 7, 0, 0, 1, 3, 30, },
+       { 1, 0, 0, 1, 4, 34, },
+       { 3, 0, 0, 1, 4, 34, },
+       { 4, 0, 0, 1, 4, 34, },
+       { 5, 0, 0, 1, 4, 30, },
+       { 6, 0, 0, 1, 4, 34, },
+       { 7, 0, 0, 1, 4, 30, },
+       { 1, 0, 0, 1, 5, 34, },
+       { 3, 0, 0, 1, 5, 34, },
+       { 4, 0, 0, 1, 5, 34, },
+       { 5, 0, 0, 1, 5, 30, },
+       { 6, 0, 0, 1, 5, 34, },
+       { 7, 0, 0, 1, 5, 30, },
+       { 1, 0, 0, 1, 6, 34, },
+       { 3, 0, 0, 1, 6, 34, },
+       { 4, 0, 0, 1, 6, 34, },
+       { 5, 0, 0, 1, 6, 30, },
+       { 6, 0, 0, 1, 6, 34, },
+       { 7, 0, 0, 1, 6, 30, },
+       { 1, 0, 0, 1, 7, 34, },
+       { 3, 0, 0, 1, 7, 34, },
+       { 4, 0, 0, 1, 7, 34, },
+       { 5, 0, 0, 1, 7, 30, },
+       { 6, 0, 0, 1, 7, 34, },
+       { 7, 0, 0, 1, 7, 30, },
+       { 1, 0, 0, 1, 8, 34, },
+       { 3, 0, 0, 1, 8, 34, },
+       { 4, 0, 0, 1, 8, 34, },
+       { 5, 0, 0, 1, 8, 30, },
+       { 6, 0, 0, 1, 8, 34, },
+       { 7, 0, 0, 1, 8, 30, },
+       { 1, 0, 0, 1, 9, 34, },
+       { 3, 0, 0, 1, 9, 34, },
+       { 4, 0, 0, 1, 9, 34, },
+       { 5, 0, 0, 1, 9, 30, },
+       { 6, 0, 0, 1, 9, 34, },
+       { 7, 0, 0, 1, 9, 30, },
+       { 1, 0, 0, 1, 10, 34, },
+       { 3, 0, 0, 1, 10, 32, },
+       { 4, 0, 0, 1, 10, 34, },
+       { 5, 0, 0, 1, 10, 30, },
+       { 6, 0, 0, 1, 10, 32, },
+       { 7, 0, 0, 1, 10, 30, },
+       { 1, 0, 0, 1, 11, 34, },
+       { 3, 0, 0, 1, 11, 30, },
+       { 4, 0, 0, 1, 11, 34, },
+       { 5, 0, 0, 1, 11, 30, },
+       { 6, 0, 0, 1, 11, 30, },
+       { 7, 0, 0, 1, 11, 30, },
+       { 1, 0, 0, 1, 12, 34, },
+       { 3, 0, 0, 1, 12, 28, },
+       { 4, 0, 0, 1, 12, 34, },
+       { 5, 0, 0, 1, 12, 30, },
+       { 6, 0, 0, 1, 12, 28, },
+       { 7, 0, 0, 1, 12, 30, },
+       { 1, 0, 0, 1, 13, 34, },
+       { 3, 0, 0, 1, 13, 16, },
+       { 4, 0, 0, 1, 13, 32, },
+       { 5, 0, 0, 1, 13, 30, },
+       { 6, 0, 0, 1, 13, 16, },
+       { 7, 0, 0, 1, 13, 30, },
+       { 1, 0, 0, 1, 14, 63, },
+       { 3, 0, 0, 1, 14, 63, },
+       { 4, 0, 0, 1, 14, 63, },
+       { 5, 0, 0, 1, 14, 63, },
+       { 6, 0, 0, 1, 14, 63, },
+       { 7, 0, 0, 1, 14, 63, },
+       { 1, 0, 0, 2, 1, 34, },
+       { 3, 0, 0, 2, 1, 26, },
+       { 4, 0, 0, 2, 1, 32, },
+       { 5, 0, 0, 2, 1, 30, },
+       { 6, 0, 0, 2, 1, 26, },
+       { 7, 0, 0, 2, 1, 30, },
+       { 1, 0, 0, 2, 2, 34, },
+       { 3, 0, 0, 2, 2, 30, },
+       { 4, 0, 0, 2, 2, 34, },
+       { 5, 0, 0, 2, 2, 30, },
+       { 6, 0, 0, 2, 2, 30, },
+       { 7, 0, 0, 2, 2, 30, },
+       { 1, 0, 0, 2, 3, 34, },
+       { 3, 0, 0, 2, 3, 32, },
+       { 4, 0, 0, 2, 3, 34, },
+       { 5, 0, 0, 2, 3, 30, },
+       { 6, 0, 0, 2, 3, 32, },
+       { 7, 0, 0, 2, 3, 30, },
+       { 1, 0, 0, 2, 4, 34, },
+       { 3, 0, 0, 2, 4, 34, },
+       { 4, 0, 0, 2, 4, 34, },
+       { 5, 0, 0, 2, 4, 30, },
+       { 6, 0, 0, 2, 4, 34, },
+       { 7, 0, 0, 2, 4, 30, },
+       { 1, 0, 0, 2, 5, 34, },
+       { 3, 0, 0, 2, 5, 34, },
+       { 4, 0, 0, 2, 5, 34, },
+       { 5, 0, 0, 2, 5, 30, },
+       { 6, 0, 0, 2, 5, 34, },
+       { 7, 0, 0, 2, 5, 30, },
+       { 1, 0, 0, 2, 6, 34, },
+       { 3, 0, 0, 2, 6, 34, },
+       { 4, 0, 0, 2, 6, 34, },
+       { 5, 0, 0, 2, 6, 30, },
+       { 6, 0, 0, 2, 6, 34, },
+       { 7, 0, 0, 2, 6, 30, },
+       { 1, 0, 0, 2, 7, 34, },
+       { 3, 0, 0, 2, 7, 34, },
+       { 4, 0, 0, 2, 7, 34, },
+       { 5, 0, 0, 2, 7, 30, },
+       { 6, 0, 0, 2, 7, 34, },
+       { 7, 0, 0, 2, 7, 30, },
+       { 1, 0, 0, 2, 8, 34, },
+       { 3, 0, 0, 2, 8, 34, },
+       { 4, 0, 0, 2, 8, 34, },
+       { 5, 0, 0, 2, 8, 30, },
+       { 6, 0, 0, 2, 8, 34, },
+       { 7, 0, 0, 2, 8, 30, },
+       { 1, 0, 0, 2, 9, 34, },
+       { 3, 0, 0, 2, 9, 32, },
+       { 4, 0, 0, 2, 9, 34, },
+       { 5, 0, 0, 2, 9, 30, },
+       { 6, 0, 0, 2, 9, 32, },
+       { 7, 0, 0, 2, 9, 30, },
+       { 1, 0, 0, 2, 10, 34, },
+       { 3, 0, 0, 2, 10, 30, },
+       { 4, 0, 0, 2, 10, 34, },
+       { 5, 0, 0, 2, 10, 30, },
+       { 6, 0, 0, 2, 10, 30, },
+       { 7, 0, 0, 2, 10, 30, },
+       { 1, 0, 0, 2, 11, 34, },
+       { 3, 0, 0, 2, 11, 28, },
+       { 4, 0, 0, 2, 11, 34, },
+       { 5, 0, 0, 2, 11, 30, },
+       { 6, 0, 0, 2, 11, 28, },
+       { 7, 0, 0, 2, 11, 30, },
+       { 1, 0, 0, 2, 12, 34, },
+       { 3, 0, 0, 2, 12, 26, },
+       { 4, 0, 0, 2, 12, 34, },
+       { 5, 0, 0, 2, 12, 30, },
+       { 6, 0, 0, 2, 12, 26, },
+       { 7, 0, 0, 2, 12, 30, },
+       { 1, 0, 0, 2, 13, 34, },
+       { 3, 0, 0, 2, 13, 12, },
+       { 4, 0, 0, 2, 13, 32, },
+       { 5, 0, 0, 2, 13, 30, },
+       { 6, 0, 0, 2, 13, 12, },
+       { 7, 0, 0, 2, 13, 30, },
+       { 1, 0, 0, 2, 14, 63, },
+       { 3, 0, 0, 2, 14, 63, },
+       { 4, 0, 0, 2, 14, 63, },
+       { 5, 0, 0, 2, 14, 63, },
+       { 6, 0, 0, 2, 14, 63, },
+       { 7, 0, 0, 2, 14, 63, },
+       { 1, 0, 1, 2, 1, 63, },
+       { 3, 0, 1, 2, 1, 63, },
+       { 4, 0, 1, 2, 1, 63, },
+       { 5, 0, 1, 2, 1, 63, },
+       { 6, 0, 1, 2, 1, 63, },
+       { 7, 0, 1, 2, 1, 63, },
+       { 1, 0, 1, 2, 2, 63, },
+       { 3, 0, 1, 2, 2, 63, },
+       { 4, 0, 1, 2, 2, 63, },
+       { 5, 0, 1, 2, 2, 63, },
+       { 6, 0, 1, 2, 2, 63, },
+       { 7, 0, 1, 2, 2, 63, },
+       { 1, 0, 1, 2, 3, 30, },
+       { 3, 0, 1, 2, 3, 26, },
+       { 4, 0, 1, 2, 3, 30, },
+       { 5, 0, 1, 2, 3, 30, },
+       { 6, 0, 1, 2, 3, 26, },
+       { 7, 0, 1, 2, 3, 30, },
+       { 1, 0, 1, 2, 4, 30, },
+       { 3, 0, 1, 2, 4, 26, },
+       { 4, 0, 1, 2, 4, 30, },
+       { 5, 0, 1, 2, 4, 30, },
+       { 6, 0, 1, 2, 4, 26, },
+       { 7, 0, 1, 2, 4, 30, },
+       { 1, 0, 1, 2, 5, 30, },
+       { 3, 0, 1, 2, 5, 30, },
+       { 4, 0, 1, 2, 5, 30, },
+       { 5, 0, 1, 2, 5, 30, },
+       { 6, 0, 1, 2, 5, 30, },
+       { 7, 0, 1, 2, 5, 30, },
+       { 1, 0, 1, 2, 6, 30, },
+       { 3, 0, 1, 2, 6, 30, },
+       { 4, 0, 1, 2, 6, 30, },
+       { 5, 0, 1, 2, 6, 30, },
+       { 6, 0, 1, 2, 6, 30, },
+       { 7, 0, 1, 2, 6, 30, },
+       { 1, 0, 1, 2, 7, 30, },
+       { 3, 0, 1, 2, 7, 30, },
+       { 4, 0, 1, 2, 7, 30, },
+       { 5, 0, 1, 2, 7, 30, },
+       { 6, 0, 1, 2, 7, 30, },
+       { 7, 0, 1, 2, 7, 30, },
+       { 1, 0, 1, 2, 8, 30, },
+       { 3, 0, 1, 2, 8, 26, },
+       { 4, 0, 1, 2, 8, 30, },
+       { 5, 0, 1, 2, 8, 30, },
+       { 6, 0, 1, 2, 8, 26, },
+       { 7, 0, 1, 2, 8, 30, },
+       { 1, 0, 1, 2, 9, 30, },
+       { 3, 0, 1, 2, 9, 26, },
+       { 4, 0, 1, 2, 9, 30, },
+       { 5, 0, 1, 2, 9, 30, },
+       { 6, 0, 1, 2, 9, 26, },
+       { 7, 0, 1, 2, 9, 30, },
+       { 1, 0, 1, 2, 10, 30, },
+       { 3, 0, 1, 2, 10, 28, },
+       { 4, 0, 1, 2, 10, 30, },
+       { 5, 0, 1, 2, 10, 30, },
+       { 6, 0, 1, 2, 10, 28, },
+       { 7, 0, 1, 2, 10, 30, },
+       { 1, 0, 1, 2, 11, 30, },
+       { 3, 0, 1, 2, 11, 20, },
+       { 4, 0, 1, 2, 11, 30, },
+       { 5, 0, 1, 2, 11, 30, },
+       { 6, 0, 1, 2, 11, 20, },
+       { 7, 0, 1, 2, 11, 30, },
+       { 1, 0, 1, 2, 12, 63, },
+       { 3, 0, 1, 2, 12, 63, },
+       { 4, 0, 1, 2, 12, 63, },
+       { 5, 0, 1, 2, 12, 63, },
+       { 6, 0, 1, 2, 12, 63, },
+       { 7, 0, 1, 2, 12, 63, },
+       { 1, 0, 1, 2, 13, 63, },
+       { 3, 0, 1, 2, 13, 63, },
+       { 4, 0, 1, 2, 13, 63, },
+       { 5, 0, 1, 2, 13, 63, },
+       { 6, 0, 1, 2, 13, 63, },
+       { 7, 0, 1, 2, 13, 63, },
+       { 1, 0, 1, 2, 14, 63, },
+       { 3, 0, 1, 2, 14, 63, },
+       { 4, 0, 1, 2, 14, 63, },
+       { 5, 0, 1, 2, 14, 63, },
+       { 6, 0, 1, 2, 14, 63, },
+       { 7, 0, 1, 2, 14, 63, },
+       { 1, 1, 0, 1, 36, 33, },
+       { 3, 1, 0, 1, 36, 31, },
+       { 4, 1, 0, 1, 36, 29, },
+       { 5, 1, 0, 1, 36, 32, },
+       { 6, 1, 0, 1, 36, 29, },
+       { 7, 1, 0, 1, 36, 27, },
+       { 1, 1, 0, 1, 40, 33, },
+       { 3, 1, 0, 1, 40, 31, },
+       { 4, 1, 0, 1, 40, 28, },
+       { 5, 1, 0, 1, 40, 32, },
+       { 6, 1, 0, 1, 40, 29, },
+       { 7, 1, 0, 1, 40, 27, },
+       { 1, 1, 0, 1, 44, 33, },
+       { 3, 1, 0, 1, 44, 31, },
+       { 4, 1, 0, 1, 44, 28, },
+       { 5, 1, 0, 1, 44, 32, },
+       { 6, 1, 0, 1, 44, 30, },
+       { 7, 1, 0, 1, 44, 27, },
+       { 1, 1, 0, 1, 48, 33, },
+       { 3, 1, 0, 1, 48, 31, },
+       { 4, 1, 0, 1, 48, 27, },
+       { 5, 1, 0, 1, 48, 32, },
+       { 6, 1, 0, 1, 48, 30, },
+       { 7, 1, 0, 1, 48, 27, },
+       { 1, 1, 0, 1, 52, 33, },
+       { 3, 1, 0, 1, 52, 32, },
+       { 4, 1, 0, 1, 52, 16, },
+       { 5, 1, 0, 1, 52, 32, },
+       { 6, 1, 0, 1, 52, 30, },
+       { 7, 1, 0, 1, 52, 27, },
+       { 1, 1, 0, 1, 56, 33, },
+       { 3, 1, 0, 1, 56, 32, },
+       { 4, 1, 0, 1, 56, 33, },
+       { 5, 1, 0, 1, 56, 32, },
+       { 6, 1, 0, 1, 56, 30, },
+       { 7, 1, 0, 1, 56, 27, },
+       { 1, 1, 0, 1, 60, 33, },
+       { 3, 1, 0, 1, 60, 32, },
+       { 4, 1, 0, 1, 60, 33, },
+       { 5, 1, 0, 1, 60, 32, },
+       { 6, 1, 0, 1, 60, 30, },
+       { 7, 1, 0, 1, 60, 27, },
+       { 1, 1, 0, 1, 64, 33, },
+       { 3, 1, 0, 1, 64, 30, },
+       { 4, 1, 0, 1, 64, 33, },
+       { 5, 1, 0, 1, 64, 32, },
+       { 6, 1, 0, 1, 64, 29, },
+       { 7, 1, 0, 1, 64, 27, },
+       { 1, 1, 0, 1, 100, 33, },
+       { 3, 1, 0, 1, 100, 30, },
+       { 4, 1, 0, 1, 100, 33, },
+       { 5, 1, 0, 1, 100, 32, },
+       { 6, 1, 0, 1, 100, 30, },
+       { 7, 1, 0, 1, 100, 27, },
+       { 1, 1, 0, 1, 104, 33, },
+       { 3, 1, 0, 1, 104, 33, },
+       { 4, 1, 0, 1, 104, 33, },
+       { 5, 1, 0, 1, 104, 32, },
+       { 6, 1, 0, 1, 104, 30, },
+       { 7, 1, 0, 1, 104, 27, },
+       { 1, 1, 0, 1, 108, 33, },
+       { 3, 1, 0, 1, 108, 33, },
+       { 4, 1, 0, 1, 108, 33, },
+       { 5, 1, 0, 1, 108, 32, },
+       { 6, 1, 0, 1, 108, 30, },
+       { 7, 1, 0, 1, 108, 27, },
+       { 1, 1, 0, 1, 112, 33, },
+       { 3, 1, 0, 1, 112, 33, },
+       { 4, 1, 0, 1, 112, 33, },
+       { 5, 1, 0, 1, 112, 32, },
+       { 6, 1, 0, 1, 112, 30, },
+       { 7, 1, 0, 1, 112, 27, },
+       { 1, 1, 0, 1, 116, 33, },
+       { 3, 1, 0, 1, 116, 33, },
+       { 4, 1, 0, 1, 116, 33, },
+       { 5, 1, 0, 1, 116, 32, },
+       { 6, 1, 0, 1, 116, 30, },
+       { 7, 1, 0, 1, 116, 27, },
+       { 1, 1, 0, 1, 120, 33, },
+       { 3, 1, 0, 1, 120, 63, },
+       { 4, 1, 0, 1, 120, 33, },
+       { 5, 1, 0, 1, 120, 63, },
+       { 6, 1, 0, 1, 120, 30, },
+       { 7, 1, 0, 1, 120, 27, },
+       { 1, 1, 0, 1, 124, 33, },
+       { 3, 1, 0, 1, 124, 63, },
+       { 4, 1, 0, 1, 124, 33, },
+       { 5, 1, 0, 1, 124, 63, },
+       { 6, 1, 0, 1, 124, 30, },
+       { 7, 1, 0, 1, 124, 27, },
+       { 1, 1, 0, 1, 128, 33, },
+       { 3, 1, 0, 1, 128, 63, },
+       { 4, 1, 0, 1, 128, 63, },
+       { 5, 1, 0, 1, 128, 63, },
+       { 6, 1, 0, 1, 128, 30, },
+       { 7, 1, 0, 1, 128, 27, },
+       { 1, 1, 0, 1, 132, 33, },
+       { 3, 1, 0, 1, 132, 33, },
+       { 4, 1, 0, 1, 132, 63, },
+       { 5, 1, 0, 1, 132, 32, },
+       { 6, 1, 0, 1, 132, 30, },
+       { 7, 1, 0, 1, 132, 27, },
+       { 1, 1, 0, 1, 136, 33, },
+       { 3, 1, 0, 1, 136, 33, },
+       { 4, 1, 0, 1, 136, 63, },
+       { 5, 1, 0, 1, 136, 32, },
+       { 6, 1, 0, 1, 136, 30, },
+       { 7, 1, 0, 1, 136, 63, },
+       { 1, 1, 0, 1, 140, 33, },
+       { 3, 1, 0, 1, 140, 31, },
+       { 4, 1, 0, 1, 140, 63, },
+       { 5, 1, 0, 1, 140, 32, },
+       { 6, 1, 0, 1, 140, 30, },
+       { 7, 1, 0, 1, 140, 63, },
+       { 1, 1, 0, 1, 144, 63, },
+       { 3, 1, 0, 1, 144, 30, },
+       { 4, 1, 0, 1, 144, 63, },
+       { 5, 1, 0, 1, 144, 63, },
+       { 6, 1, 0, 1, 144, 30, },
+       { 7, 1, 0, 1, 144, 63, },
+       { 1, 1, 0, 1, 149, 63, },
+       { 3, 1, 0, 1, 149, 30, },
+       { 4, 1, 0, 1, 149, 33, },
+       { 5, 1, 0, 1, 149, 33, },
+       { 6, 1, 0, 1, 149, 30, },
+       { 7, 1, 0, 1, 149, 27, },
+       { 1, 1, 0, 1, 153, 63, },
+       { 3, 1, 0, 1, 153, 33, },
+       { 4, 1, 0, 1, 153, 33, },
+       { 5, 1, 0, 1, 153, 33, },
+       { 6, 1, 0, 1, 153, 30, },
+       { 7, 1, 0, 1, 153, 27, },
+       { 1, 1, 0, 1, 157, 63, },
+       { 3, 1, 0, 1, 157, 33, },
+       { 4, 1, 0, 1, 157, 33, },
+       { 5, 1, 0, 1, 157, 33, },
+       { 6, 1, 0, 1, 157, 30, },
+       { 7, 1, 0, 1, 157, 27, },
+       { 1, 1, 0, 1, 161, 63, },
+       { 3, 1, 0, 1, 161, 33, },
+       { 4, 1, 0, 1, 161, 31, },
+       { 5, 1, 0, 1, 161, 33, },
+       { 6, 1, 0, 1, 161, 30, },
+       { 7, 1, 0, 1, 161, 27, },
+       { 1, 1, 0, 1, 165, 63, },
+       { 3, 1, 0, 1, 165, 33, },
+       { 4, 1, 0, 1, 165, 63, },
+       { 5, 1, 0, 1, 165, 33, },
+       { 6, 1, 0, 1, 165, 30, },
+       { 7, 1, 0, 1, 165, 27, },
+       { 1, 1, 0, 2, 36, 33, },
+       { 3, 1, 0, 2, 36, 30, },
+       { 4, 1, 0, 2, 36, 27, },
+       { 5, 1, 0, 2, 36, 32, },
+       { 6, 1, 0, 2, 36, 30, },
+       { 7, 1, 0, 2, 36, 27, },
+       { 1, 1, 0, 2, 40, 33, },
+       { 3, 1, 0, 2, 40, 31, },
+       { 4, 1, 0, 2, 40, 29, },
+       { 5, 1, 0, 2, 40, 32, },
+       { 6, 1, 0, 2, 40, 30, },
+       { 7, 1, 0, 2, 40, 27, },
+       { 1, 1, 0, 2, 44, 33, },
+       { 3, 1, 0, 2, 44, 31, },
+       { 4, 1, 0, 2, 44, 29, },
+       { 5, 1, 0, 2, 44, 32, },
+       { 6, 1, 0, 2, 44, 30, },
+       { 7, 1, 0, 2, 44, 27, },
+       { 1, 1, 0, 2, 48, 33, },
+       { 3, 1, 0, 2, 48, 31, },
+       { 4, 1, 0, 2, 48, 26, },
+       { 5, 1, 0, 2, 48, 32, },
+       { 6, 1, 0, 2, 48, 30, },
+       { 7, 1, 0, 2, 48, 27, },
+       { 1, 1, 0, 2, 52, 33, },
+       { 3, 1, 0, 2, 52, 32, },
+       { 4, 1, 0, 2, 52, 7, },
+       { 5, 1, 0, 2, 52, 32, },
+       { 6, 1, 0, 2, 52, 30, },
+       { 7, 1, 0, 2, 52, 27, },
+       { 1, 1, 0, 2, 56, 33, },
+       { 3, 1, 0, 2, 56, 32, },
+       { 4, 1, 0, 2, 56, 33, },
+       { 5, 1, 0, 2, 56, 32, },
+       { 6, 1, 0, 2, 56, 30, },
+       { 7, 1, 0, 2, 56, 27, },
+       { 1, 1, 0, 2, 60, 33, },
+       { 3, 1, 0, 2, 60, 32, },
+       { 4, 1, 0, 2, 60, 33, },
+       { 5, 1, 0, 2, 60, 32, },
+       { 6, 1, 0, 2, 60, 30, },
+       { 7, 1, 0, 2, 60, 27, },
+       { 1, 1, 0, 2, 64, 33, },
+       { 3, 1, 0, 2, 64, 30, },
+       { 4, 1, 0, 2, 64, 33, },
+       { 5, 1, 0, 2, 64, 32, },
+       { 6, 1, 0, 2, 64, 30, },
+       { 7, 1, 0, 2, 64, 27, },
+       { 1, 1, 0, 2, 100, 33, },
+       { 3, 1, 0, 2, 100, 30, },
+       { 4, 1, 0, 2, 100, 33, },
+       { 5, 1, 0, 2, 100, 32, },
+       { 6, 1, 0, 2, 100, 30, },
+       { 7, 1, 0, 2, 100, 27, },
+       { 1, 1, 0, 2, 104, 33, },
+       { 3, 1, 0, 2, 104, 33, },
+       { 4, 1, 0, 2, 104, 33, },
+       { 5, 1, 0, 2, 104, 32, },
+       { 6, 1, 0, 2, 104, 30, },
+       { 7, 1, 0, 2, 104, 27, },
+       { 1, 1, 0, 2, 108, 33, },
+       { 3, 1, 0, 2, 108, 33, },
+       { 4, 1, 0, 2, 108, 33, },
+       { 5, 1, 0, 2, 108, 32, },
+       { 6, 1, 0, 2, 108, 30, },
+       { 7, 1, 0, 2, 108, 27, },
+       { 1, 1, 0, 2, 112, 33, },
+       { 3, 1, 0, 2, 112, 33, },
+       { 4, 1, 0, 2, 112, 33, },
+       { 5, 1, 0, 2, 112, 32, },
+       { 6, 1, 0, 2, 112, 30, },
+       { 7, 1, 0, 2, 112, 27, },
+       { 1, 1, 0, 2, 116, 33, },
+       { 3, 1, 0, 2, 116, 33, },
+       { 4, 1, 0, 2, 116, 33, },
+       { 5, 1, 0, 2, 116, 32, },
+       { 6, 1, 0, 2, 116, 30, },
+       { 7, 1, 0, 2, 116, 27, },
+       { 1, 1, 0, 2, 120, 33, },
+       { 3, 1, 0, 2, 120, 63, },
+       { 4, 1, 0, 2, 120, 33, },
+       { 5, 1, 0, 2, 120, 63, },
+       { 6, 1, 0, 2, 120, 30, },
+       { 7, 1, 0, 2, 120, 27, },
+       { 1, 1, 0, 2, 124, 33, },
+       { 3, 1, 0, 2, 124, 63, },
+       { 4, 1, 0, 2, 124, 33, },
+       { 5, 1, 0, 2, 124, 63, },
+       { 6, 1, 0, 2, 124, 30, },
+       { 7, 1, 0, 2, 124, 27, },
+       { 1, 1, 0, 2, 128, 33, },
+       { 3, 1, 0, 2, 128, 63, },
+       { 4, 1, 0, 2, 128, 63, },
+       { 5, 1, 0, 2, 128, 63, },
+       { 6, 1, 0, 2, 128, 30, },
+       { 7, 1, 0, 2, 128, 27, },
+       { 1, 1, 0, 2, 132, 33, },
+       { 3, 1, 0, 2, 132, 33, },
+       { 4, 1, 0, 2, 132, 63, },
+       { 5, 1, 0, 2, 132, 32, },
+       { 6, 1, 0, 2, 132, 30, },
+       { 7, 1, 0, 2, 132, 27, },
+       { 1, 1, 0, 2, 136, 33, },
+       { 3, 1, 0, 2, 136, 33, },
+       { 4, 1, 0, 2, 136, 63, },
+       { 5, 1, 0, 2, 136, 32, },
+       { 6, 1, 0, 2, 136, 30, },
+       { 7, 1, 0, 2, 136, 63, },
+       { 1, 1, 0, 2, 140, 33, },
+       { 3, 1, 0, 2, 140, 29, },
+       { 4, 1, 0, 2, 140, 63, },
+       { 5, 1, 0, 2, 140, 32, },
+       { 6, 1, 0, 2, 140, 30, },
+       { 7, 1, 0, 2, 140, 63, },
+       { 1, 1, 0, 2, 144, 63, },
+       { 3, 1, 0, 2, 144, 27, },
+       { 4, 1, 0, 2, 144, 63, },
+       { 5, 1, 0, 2, 144, 63, },
+       { 6, 1, 0, 2, 144, 30, },
+       { 7, 1, 0, 2, 144, 63, },
+       { 1, 1, 0, 2, 149, 63, },
+       { 3, 1, 0, 2, 149, 33, },
+       { 4, 1, 0, 2, 149, 33, },
+       { 5, 1, 0, 2, 149, 33, },
+       { 6, 1, 0, 2, 149, 30, },
+       { 7, 1, 0, 2, 149, 27, },
+       { 1, 1, 0, 2, 153, 63, },
+       { 3, 1, 0, 2, 153, 33, },
+       { 4, 1, 0, 2, 153, 33, },
+       { 5, 1, 0, 2, 153, 33, },
+       { 6, 1, 0, 2, 153, 30, },
+       { 7, 1, 0, 2, 153, 27, },
+       { 1, 1, 0, 2, 157, 63, },
+       { 3, 1, 0, 2, 157, 33, },
+       { 4, 1, 0, 2, 157, 33, },
+       { 5, 1, 0, 2, 157, 33, },
+       { 6, 1, 0, 2, 157, 30, },
+       { 7, 1, 0, 2, 157, 27, },
+       { 1, 1, 0, 2, 161, 63, },
+       { 3, 1, 0, 2, 161, 33, },
+       { 4, 1, 0, 2, 161, 31, },
+       { 5, 1, 0, 2, 161, 33, },
+       { 6, 1, 0, 2, 161, 30, },
+       { 7, 1, 0, 2, 161, 27, },
+       { 1, 1, 0, 2, 165, 63, },
+       { 3, 1, 0, 2, 165, 33, },
+       { 4, 1, 0, 2, 165, 63, },
+       { 5, 1, 0, 2, 165, 33, },
+       { 6, 1, 0, 2, 165, 30, },
+       { 7, 1, 0, 2, 165, 27, },
+       { 1, 1, 1, 2, 38, 32, },
+       { 3, 1, 1, 2, 38, 22, },
+       { 4, 1, 1, 2, 38, 26, },
+       { 5, 1, 1, 2, 38, 32, },
+       { 6, 1, 1, 2, 38, 22, },
+       { 7, 1, 1, 2, 38, 27, },
+       { 1, 1, 1, 2, 46, 32, },
+       { 3, 1, 1, 2, 46, 32, },
+       { 4, 1, 1, 2, 46, 28, },
+       { 5, 1, 1, 2, 46, 32, },
+       { 6, 1, 1, 2, 46, 30, },
+       { 7, 1, 1, 2, 46, 27, },
+       { 1, 1, 1, 2, 54, 32, },
+       { 3, 1, 1, 2, 54, 32, },
+       { 4, 1, 1, 2, 54, 22, },
+       { 5, 1, 1, 2, 54, 32, },
+       { 6, 1, 1, 2, 54, 30, },
+       { 7, 1, 1, 2, 54, 27, },
+       { 1, 1, 1, 2, 62, 32, },
+       { 3, 1, 1, 2, 62, 23, },
+       { 4, 1, 1, 2, 62, 31, },
+       { 5, 1, 1, 2, 62, 32, },
+       { 6, 1, 1, 2, 62, 23, },
+       { 7, 1, 1, 2, 62, 27, },
+       { 1, 1, 1, 2, 102, 32, },
+       { 3, 1, 1, 2, 102, 21, },
+       { 4, 1, 1, 2, 102, 31, },
+       { 5, 1, 1, 2, 102, 32, },
+       { 6, 1, 1, 2, 102, 30, },
+       { 7, 1, 1, 2, 102, 27, },
+       { 1, 1, 1, 2, 110, 32, },
+       { 3, 1, 1, 2, 110, 32, },
+       { 4, 1, 1, 2, 110, 32, },
+       { 5, 1, 1, 2, 110, 32, },
+       { 6, 1, 1, 2, 110, 30, },
+       { 7, 1, 1, 2, 110, 27, },
+       { 1, 1, 1, 2, 118, 32, },
+       { 3, 1, 1, 2, 118, 63, },
+       { 4, 1, 1, 2, 118, 32, },
+       { 5, 1, 1, 2, 118, 63, },
+       { 6, 1, 1, 2, 118, 30, },
+       { 7, 1, 1, 2, 118, 27, },
+       { 1, 1, 1, 2, 126, 32, },
+       { 3, 1, 1, 2, 126, 63, },
+       { 4, 1, 1, 2, 126, 63, },
+       { 5, 1, 1, 2, 126, 63, },
+       { 6, 1, 1, 2, 126, 30, },
+       { 7, 1, 1, 2, 126, 27, },
+       { 1, 1, 1, 2, 134, 32, },
+       { 3, 1, 1, 2, 134, 32, },
+       { 4, 1, 1, 2, 134, 63, },
+       { 5, 1, 1, 2, 134, 32, },
+       { 6, 1, 1, 2, 134, 30, },
+       { 7, 1, 1, 2, 134, 63, },
+       { 1, 1, 1, 2, 142, 63, },
+       { 3, 1, 1, 2, 142, 29, },
+       { 4, 1, 1, 2, 142, 63, },
+       { 5, 1, 1, 2, 142, 63, },
+       { 6, 1, 1, 2, 142, 30, },
+       { 7, 1, 1, 2, 142, 63, },
+       { 1, 1, 1, 2, 151, 63, },
+       { 3, 1, 1, 2, 151, 32, },
+       { 4, 1, 1, 2, 151, 27, },
+       { 5, 1, 1, 2, 151, 32, },
+       { 6, 1, 1, 2, 151, 30, },
+       { 7, 1, 1, 2, 151, 27, },
+       { 1, 1, 1, 2, 159, 63, },
+       { 3, 1, 1, 2, 159, 32, },
+       { 4, 1, 1, 2, 159, 26, },
+       { 5, 1, 1, 2, 159, 32, },
+       { 6, 1, 1, 2, 159, 30, },
+       { 7, 1, 1, 2, 159, 27, },
+       { 1, 1, 2, 4, 42, 28, },
+       { 3, 1, 2, 4, 42, 19, },
+       { 4, 1, 2, 4, 42, 25, },
+       { 5, 1, 2, 4, 42, 32, },
+       { 6, 1, 2, 4, 42, 19, },
+       { 7, 1, 2, 4, 42, 27, },
+       { 1, 1, 2, 4, 58, 28, },
+       { 3, 1, 2, 4, 58, 22, },
+       { 4, 1, 2, 4, 58, 28, },
+       { 5, 1, 2, 4, 58, 32, },
+       { 6, 1, 2, 4, 58, 22, },
+       { 7, 1, 2, 4, 58, 27, },
+       { 1, 1, 2, 4, 106, 32, },
+       { 3, 1, 2, 4, 106, 18, },
+       { 4, 1, 2, 4, 106, 30, },
+       { 5, 1, 2, 4, 106, 32, },
+       { 6, 1, 2, 4, 106, 30, },
+       { 7, 1, 2, 4, 106, 27, },
+       { 1, 1, 2, 4, 122, 32, },
+       { 3, 1, 2, 4, 122, 63, },
+       { 4, 1, 2, 4, 122, 26, },
+       { 5, 1, 2, 4, 122, 63, },
+       { 6, 1, 2, 4, 122, 30, },
+       { 7, 1, 2, 4, 122, 27, },
+       { 1, 1, 2, 4, 138, 63, },
+       { 3, 1, 2, 4, 138, 28, },
+       { 4, 1, 2, 4, 138, 63, },
+       { 5, 1, 2, 4, 138, 63, },
+       { 6, 1, 2, 4, 138, 30, },
+       { 7, 1, 2, 4, 138, 63, },
+       { 1, 1, 2, 4, 155, 63, },
+       { 3, 1, 2, 4, 155, 32, },
+       { 4, 1, 2, 4, 155, 27, },
+       { 5, 1, 2, 4, 155, 32, },
+       { 6, 1, 2, 4, 155, 30, },
+       { 7, 1, 2, 4, 155, 27, },
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8821c_txpwr_lmt_type0);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c_table.h b/drivers/net/wireless/realtek/rtw88/rtw8821c_table.h
new file mode 100644 (file)
index 0000000..5ea8b4f
--- /dev/null
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019  Realtek Corporation
+ */
+
+#ifndef __RTW8821C_TABLE_H__
+#define __RTW8821C_TABLE_H__
+
+extern const struct rtw_table rtw8821c_mac_tbl;
+extern const struct rtw_table rtw8821c_agc_tbl;
+extern const struct rtw_table rtw8821c_bb_tbl;
+extern const struct rtw_table rtw8821c_bb_pg_type0_tbl;
+extern const struct rtw_table rtw8821c_rf_a_tbl;
+extern const struct rtw_table rtw8821c_txpwr_lmt_type0_tbl;
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821ce.c b/drivers/net/wireless/realtek/rtw88/rtw8821ce.c
new file mode 100644 (file)
index 0000000..616fdcf
--- /dev/null
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019  Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include "rtw8821ce.h"
+
+static const struct pci_device_id rtw_8821ce_id_table[] = {
+       {
+               PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC821),
+               .driver_data = (kernel_ulong_t)&rtw8821c_hw_spec
+       },
+       {}
+};
+MODULE_DEVICE_TABLE(pci, rtw_8821ce_id_table);
+
+static struct pci_driver rtw_8821ce_driver = {
+       .name = "rtw_8821ce",
+       .id_table = rtw_8821ce_id_table,
+       .probe = rtw_pci_probe,
+       .remove = rtw_pci_remove,
+       .driver.pm = &rtw_pm_ops,
+       .shutdown = rtw_pci_shutdown,
+};
+module_pci_driver(rtw_8821ce_driver);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ac wireless 8821ce driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821ce.h b/drivers/net/wireless/realtek/rtw88/rtw8821ce.h
new file mode 100644 (file)
index 0000000..8d3eb77
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019  Realtek Corporation
+ */
+
+#ifndef __RTW_8821CE_H_
+#define __RTW_8821CE_H_
+
+extern const struct dev_pm_ops rtw_pm_ops;
+extern struct rtw_chip_info rtw8821c_hw_spec;
+int rtw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+void rtw_pci_remove(struct pci_dev *pdev);
+void rtw_pci_shutdown(struct pci_dev *pdev);
+
+#endif
index e49bdd7..351cd05 100644 (file)
@@ -2147,7 +2147,7 @@ static const struct coex_table_para table_sant_8822b[] = {
        {0x66555555, 0x5a5a5a5a},
        {0x66555555, 0x6a5a5a5a}, /* case-10 */
        {0x66555555, 0xfafafafa},
-       {0x66555555, 0x6a5a5aaa},
+       {0x66555555, 0x5a5a5aaa},
        {0x66555555, 0x5aaa5aaa},
        {0x66555555, 0xaaaa5aaa},
        {0x66555555, 0xaaaaaaaa}, /* case-15 */
@@ -2223,7 +2223,8 @@ static const struct coex_tdma_para tdma_sant_8822b[] = {
        { {0x55, 0x08, 0x03, 0x10, 0x54} },
        { {0x65, 0x10, 0x03, 0x11, 0x11} },
        { {0x51, 0x10, 0x03, 0x10, 0x51} }, /* case-25 */
-       { {0x51, 0x08, 0x03, 0x10, 0x50} }
+       { {0x51, 0x08, 0x03, 0x10, 0x50} },
+       { {0x61, 0x08, 0x03, 0x11, 0x11} }
 };
 
 /* Non-Shared-Antenna TDMA */
@@ -2475,7 +2476,7 @@ struct rtw_chip_info rtw8822b_hw_spec = {
        .bfer_mu_max_num = 1,
        .rx_ldpc = true,
 
-       .coex_para_ver = 0x19062706,
+       .coex_para_ver = 0x20070206,
        .bt_desired_ver = 0x6,
        .scbd_support = true,
        .new_scbd10_def = false,
index c3d72ef..4268084 100644 (file)
@@ -3899,6 +3899,7 @@ static const struct rtw_rfe_def rtw8822c_rfe_defs[] = {
        [1] = RTW_DEF_RFE(8822c, 0, 0),
        [2] = RTW_DEF_RFE(8822c, 0, 0),
        [5] = RTW_DEF_RFE(8822c, 0, 5),
+       [6] = RTW_DEF_RFE(8822c, 0, 0),
 };
 
 static const struct rtw_hw_reg rtw8822c_dig[] = {
@@ -3997,7 +3998,7 @@ static const struct coex_table_para table_sant_8822c[] = {
        {0x66555555, 0x5a5a5a5a},
        {0x66555555, 0x6a5a5a5a}, /* case-10 */
        {0x66555555, 0xfafafafa},
-       {0x66555555, 0x6a5a5aaa},
+       {0x66555555, 0x5a5a5aaa},
        {0x66555555, 0x5aaa5aaa},
        {0x66555555, 0xaaaa5aaa},
        {0x66555555, 0xaaaaaaaa}, /* case-15 */
@@ -4073,7 +4074,8 @@ static const struct coex_tdma_para tdma_sant_8822c[] = {
        { {0x55, 0x08, 0x03, 0x10, 0x54} },
        { {0x65, 0x10, 0x03, 0x11, 0x11} },
        { {0x51, 0x10, 0x03, 0x10, 0x51} }, /* case-25 */
-       { {0x51, 0x08, 0x03, 0x10, 0x50} }
+       { {0x51, 0x08, 0x03, 0x10, 0x50} },
+       { {0x61, 0x08, 0x03, 0x11, 0x11} }
 };
 
 /* Non-Shared-Antenna TDMA */
@@ -4343,8 +4345,8 @@ struct rtw_chip_info rtw8822c_hw_spec = {
        .wowlan_stub = &rtw_wowlan_stub_8822c,
        .max_sched_scan_ssids = 4,
 #endif
-       .coex_para_ver = 0x19062706,
-       .bt_desired_ver = 0x6,
+       .coex_para_ver = 0x20070217,
+       .bt_desired_ver = 0x17,
        .scbd_support = true,
        .new_scbd10_def = true,
        .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
index 7b6bd99..026ac49 100644 (file)
@@ -11,6 +11,10 @@ static const struct pci_device_id rtw_8822ce_id_table[] = {
                PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC822),
                .driver_data = (kernel_ulong_t)&rtw8822c_hw_spec
        },
+       {
+               PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC82F),
+               .driver_data = (kernel_ulong_t)&rtw8822c_hw_spec
+       },
        {}
 };
 MODULE_DEVICE_TABLE(pci, rtw_8822ce_id_table);
index 79c4211..7fcc992 100644 (file)
@@ -61,6 +61,8 @@ void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb)
        SET_TX_DESC_DISQSELSEQ(txdesc, pkt_info->dis_qselseq);
        SET_TX_DESC_EN_HWSEQ(txdesc, pkt_info->en_hwseq);
        SET_TX_DESC_HW_SSN_SEL(txdesc, pkt_info->hw_ssn_sel);
+       SET_TX_DESC_NAVUSEHDR(txdesc, pkt_info->nav_use_hdr);
+       SET_TX_DESC_BT_NULL(txdesc, pkt_info->bt_null);
 }
 EXPORT_SYMBOL(rtw_tx_fill_tx_desc);
 
@@ -227,17 +229,58 @@ void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb, int src)
        spin_unlock_irqrestore(&tx_report->q_lock, flags);
 }
 
-static void rtw_tx_mgmt_pkt_info_update(struct rtw_dev *rtwdev,
+static void rtw_tx_pkt_info_update_rate(struct rtw_dev *rtwdev,
                                        struct rtw_tx_pkt_info *pkt_info,
-                                       struct ieee80211_sta *sta,
                                        struct sk_buff *skb)
 {
+       if (rtwdev->hal.current_band_type == RTW_BAND_2G) {
+               pkt_info->rate_id = RTW_RATEID_B_20M;
+               pkt_info->rate = DESC_RATE1M;
+       } else {
+               pkt_info->rate_id = RTW_RATEID_G;
+               pkt_info->rate = DESC_RATE6M;
+       }
        pkt_info->use_rate = true;
-       pkt_info->rate_id = 6;
        pkt_info->dis_rate_fallback = true;
+}
+
+static void rtw_tx_pkt_info_update_sec(struct rtw_dev *rtwdev,
+                                      struct rtw_tx_pkt_info *pkt_info,
+                                      struct sk_buff *skb)
+{
+       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       u8 sec_type = 0;
+
+       if (info && info->control.hw_key) {
+               struct ieee80211_key_conf *key = info->control.hw_key;
+
+               switch (key->cipher) {
+               case WLAN_CIPHER_SUITE_WEP40:
+               case WLAN_CIPHER_SUITE_WEP104:
+               case WLAN_CIPHER_SUITE_TKIP:
+                       sec_type = 0x01;
+                       break;
+               case WLAN_CIPHER_SUITE_CCMP:
+                       sec_type = 0x03;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       pkt_info->sec_type = sec_type;
+}
+
+static void rtw_tx_mgmt_pkt_info_update(struct rtw_dev *rtwdev,
+                                       struct rtw_tx_pkt_info *pkt_info,
+                                       struct ieee80211_sta *sta,
+                                       struct sk_buff *skb)
+{
+       rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb);
        pkt_info->dis_qselseq = true;
        pkt_info->en_hwseq = true;
        pkt_info->hw_ssn_sel = 0;
+       /* TODO: need to change hw port and hw ssn sel for multiple vifs */
 }
 
 static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev,
@@ -312,7 +355,6 @@ void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
        struct rtw_sta_info *si;
        struct ieee80211_vif *vif = NULL;
        __le16 fc = hdr->frame_control;
-       u8 sec_type = 0;
        bool bmc;
 
        if (sta) {
@@ -325,23 +367,6 @@ void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
        else if (ieee80211_is_data(fc))
                rtw_tx_data_pkt_info_update(rtwdev, pkt_info, sta, skb);
 
-       if (info->control.hw_key) {
-               struct ieee80211_key_conf *key = info->control.hw_key;
-
-               switch (key->cipher) {
-               case WLAN_CIPHER_SUITE_WEP40:
-               case WLAN_CIPHER_SUITE_WEP104:
-               case WLAN_CIPHER_SUITE_TKIP:
-                       sec_type = 0x01;
-                       break;
-               case WLAN_CIPHER_SUITE_CCMP:
-                       sec_type = 0x03;
-                       break;
-               default:
-                       break;
-               }
-       }
-
        bmc = is_broadcast_ether_addr(hdr->addr1) ||
              is_multicast_ether_addr(hdr->addr1);
 
@@ -349,7 +374,7 @@ void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
                rtw_tx_report_enable(rtwdev, pkt_info);
 
        pkt_info->bmc = bmc;
-       pkt_info->sec_type = sec_type;
+       rtw_tx_pkt_info_update_sec(rtwdev, pkt_info, skb);
        pkt_info->tx_pkt_size = skb->len;
        pkt_info->offset = chip->tx_pkt_desc_sz;
        pkt_info->qsel = skb->priority;
@@ -359,24 +384,42 @@ void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
        rtw_tx_stats(rtwdev, vif, skb);
 }
 
-void rtw_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
-                                  struct rtw_tx_pkt_info *pkt_info,
-                                  struct sk_buff *skb)
+void rtw_tx_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
+                                     struct rtw_tx_pkt_info *pkt_info,
+                                     struct sk_buff *skb,
+                                     enum rtw_rsvd_packet_type type)
 {
        struct rtw_chip_info *chip = rtwdev->chip;
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
        bool bmc;
 
+       /* A beacon or dummy reserved page packet indicates that it is the first
+        * reserved page, and the qsel of it will be set in each hci.
+        */
+       if (type != RSVD_BEACON && type != RSVD_DUMMY)
+               pkt_info->qsel = TX_DESC_QSEL_MGMT;
+
+       rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb);
+
        bmc = is_broadcast_ether_addr(hdr->addr1) ||
              is_multicast_ether_addr(hdr->addr1);
-       pkt_info->use_rate = true;
-       pkt_info->rate_id = 6;
-       pkt_info->dis_rate_fallback = true;
        pkt_info->bmc = bmc;
        pkt_info->tx_pkt_size = skb->len;
        pkt_info->offset = chip->tx_pkt_desc_sz;
-       pkt_info->qsel = TX_DESC_QSEL_MGMT;
        pkt_info->ls = true;
+       if (type == RSVD_PS_POLL) {
+               pkt_info->nav_use_hdr = true;
+       } else {
+               pkt_info->dis_qselseq = true;
+               pkt_info->en_hwseq = true;
+               pkt_info->hw_ssn_sel = 0;
+       }
+       if (type == RSVD_QOS_NULL)
+               pkt_info->bt_null = true;
+
+       rtw_tx_pkt_info_update_sec(rtwdev, pkt_info, skb);
+
+       /* TODO: need to change hw port and hw ssn sel for multiple vifs */
 }
 
 struct sk_buff *
@@ -399,8 +442,7 @@ rtw_tx_write_data_rsvd_page_get(struct rtw_dev *rtwdev,
 
        skb_reserve(skb, tx_pkt_desc_sz);
        skb_put_data(skb, buf, size);
-       pkt_info->tx_pkt_size = size;
-       pkt_info->offset = tx_pkt_desc_sz;
+       rtw_tx_rsvd_page_pkt_info_update(rtwdev, pkt_info, skb, RSVD_BEACON);
 
        return skb;
 }
index 72dfd40..cfe84ee 100644 (file)
        le32p_replace_bits((__le32 *)(txdesc) + 0x08, value, BIT(15))
 #define SET_TX_DESC_HW_SSN_SEL(txdesc, value)                                 \
        le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, GENMASK(7, 6))
+#define SET_TX_DESC_NAVUSEHDR(txdesc, value)                                  \
+       le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, BIT(15))
+#define SET_TX_DESC_BT_NULL(txdesc, value)                                    \
+       le32p_replace_bits((__le32 *)(txdesc) + 0x02, value, BIT(23))
 
 enum rtw_tx_desc_queue_select {
        TX_DESC_QSEL_TID0       = 0,
@@ -83,6 +87,8 @@ enum rtw_tx_desc_queue_select {
        TX_DESC_QSEL_H2C        = 19,
 };
 
+enum rtw_rsvd_packet_type;
+
 void rtw_tx(struct rtw_dev *rtwdev,
            struct ieee80211_tx_control *control,
            struct sk_buff *skb);
@@ -96,9 +102,10 @@ void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
 void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb);
 void rtw_tx_report_enqueue(struct rtw_dev *rtwdev, struct sk_buff *skb, u8 sn);
 void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb, int src);
-void rtw_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
-                                  struct rtw_tx_pkt_info *pkt_info,
-                                  struct sk_buff *skb);
+void rtw_tx_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
+                                     struct rtw_tx_pkt_info *pkt_info,
+                                     struct sk_buff *skb,
+                                     enum rtw_rsvd_packet_type type);
 struct sk_buff *
 rtw_tx_write_data_rsvd_page_get(struct rtw_dev *rtwdev,
                                struct rtw_tx_pkt_info *pkt_info,
index 850864d..e6d426e 100644 (file)
@@ -70,7 +70,7 @@ static int wl1251_event_ps_report(struct wl1251 *wl,
                break;
        }
 
-       return 0;
+       return ret;
 }
 
 static void wl1251_event_mbox_dump(struct event_mailbox *mbox)
index de6c8a7..821ad1a 100644 (file)
@@ -521,6 +521,7 @@ static int wlcore_irq_locked(struct wl1271 *wl)
        int ret = 0;
        u32 intr;
        int loopcount = WL1271_IRQ_MAX_LOOPS;
+       bool run_tx_queue = true;
        bool done = false;
        unsigned int defer_count;
        unsigned long flags;
@@ -586,19 +587,22 @@ static int wlcore_irq_locked(struct wl1271 *wl)
                                goto err_ret;
 
                        /* Check if any tx blocks were freed */
-                       spin_lock_irqsave(&wl->wl_lock, flags);
-                       if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
-                           wl1271_tx_total_queue_count(wl) > 0) {
-                               spin_unlock_irqrestore(&wl->wl_lock, flags);
+                       if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
+                               if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
+                                       if (!wl1271_tx_total_queue_count(wl))
+                                               run_tx_queue = false;
+                                       spin_unlock_irqrestore(&wl->wl_lock, flags);
+                               }
+
                                /*
                                 * In order to avoid starvation of the TX path,
                                 * call the work function directly.
                                 */
-                               ret = wlcore_tx_work_locked(wl);
-                               if (ret < 0)
-                                       goto err_ret;
-                       } else {
-                               spin_unlock_irqrestore(&wl->wl_lock, flags);
+                               if (run_tx_queue) {
+                                       ret = wlcore_tx_work_locked(wl);
+                                       if (ret < 0)
+                                               goto err_ret;
+                               }
                        }
 
                        /* check for tx results */
@@ -648,25 +652,28 @@ static irqreturn_t wlcore_irq(int irq, void *cookie)
        int ret;
        unsigned long flags;
        struct wl1271 *wl = cookie;
+       bool queue_tx_work = true;
 
-       /* complete the ELP completion */
-       spin_lock_irqsave(&wl->wl_lock, flags);
        set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
-       if (wl->elp_compl) {
-               complete(wl->elp_compl);
-               wl->elp_compl = NULL;
+
+       /* complete the ELP completion */
+       if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) {
+               spin_lock_irqsave(&wl->wl_lock, flags);
+               if (wl->elp_compl)
+                       complete(wl->elp_compl);
+               spin_unlock_irqrestore(&wl->wl_lock, flags);
        }
 
        if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
                /* don't enqueue a work right now. mark it as pending */
                set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
                wl1271_debug(DEBUG_IRQ, "should not enqueue work");
+               spin_lock_irqsave(&wl->wl_lock, flags);
                disable_irq_nosync(wl->irq);
                pm_wakeup_event(wl->dev, 0);
                spin_unlock_irqrestore(&wl->wl_lock, flags);
                goto out_handled;
        }
-       spin_unlock_irqrestore(&wl->wl_lock, flags);
 
        /* TX might be handled here, avoid redundant work */
        set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
@@ -678,20 +685,22 @@ static irqreturn_t wlcore_irq(int irq, void *cookie)
        if (ret)
                wl12xx_queue_recovery_work(wl);
 
-       spin_lock_irqsave(&wl->wl_lock, flags);
-       /* In case TX was not handled here, queue TX work */
+       /* In case TX was not handled in wlcore_irq_locked(), queue TX work */
        clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
-       if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
-           wl1271_tx_total_queue_count(wl) > 0)
-               ieee80211_queue_work(wl->hw, &wl->tx_work);
-       spin_unlock_irqrestore(&wl->wl_lock, flags);
+       if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
+               if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
+                       if (!wl1271_tx_total_queue_count(wl))
+                               queue_tx_work = false;
+                       spin_unlock_irqrestore(&wl->wl_lock, flags);
+               }
+               if (queue_tx_work)
+                       ieee80211_queue_work(wl->hw, &wl->tx_work);
+       }
 
        mutex_unlock(&wl->mutex);
 
 out_handled:
-       spin_lock_irqsave(&wl->wl_lock, flags);
        clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
-       spin_unlock_irqrestore(&wl->wl_lock, flags);
 
        return IRQ_HANDLED;
 }
@@ -6732,7 +6741,6 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev)
        unsigned long flags;
        int ret;
        unsigned long start_time = jiffies;
-       bool pending = false;
        bool recovery = false;
 
        /* Nothing to do if no ELP mode requested */
@@ -6742,49 +6750,35 @@ static int __maybe_unused wlcore_runtime_resume(struct device *dev)
        wl1271_debug(DEBUG_PSM, "waking up chip from elp");
 
        spin_lock_irqsave(&wl->wl_lock, flags);
-       if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
-               pending = true;
-       else
-               wl->elp_compl = &compl;
+       wl->elp_compl = &compl;
        spin_unlock_irqrestore(&wl->wl_lock, flags);
 
        ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
        if (ret < 0) {
                recovery = true;
-               goto err;
-       }
-
-       if (!pending) {
+       } else if (!test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) {
                ret = wait_for_completion_timeout(&compl,
                        msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
                if (ret == 0) {
                        wl1271_warning("ELP wakeup timeout!");
-
-                       /* Return no error for runtime PM for recovery */
-                       ret = 0;
                        recovery = true;
-                       goto err;
                }
        }
 
-       clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
-
-       wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
-                    jiffies_to_msecs(jiffies - start_time));
-
-       return 0;
-
-err:
        spin_lock_irqsave(&wl->wl_lock, flags);
        wl->elp_compl = NULL;
        spin_unlock_irqrestore(&wl->wl_lock, flags);
+       clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
 
        if (recovery) {
                set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
                wl12xx_queue_recovery_work(wl);
+       } else {
+               wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
+                            jiffies_to_msecs(jiffies - start_time));
        }
 
-       return ret;
+       return 0;
 }
 
 static const struct dev_pm_ops wlcore_pm_ops = {
index 8ff0374..65b5985 100644 (file)
@@ -600,9 +600,7 @@ void zd_usb_disable_int(struct zd_usb *usb)
        dev_dbg_f(zd_usb_dev(usb), "urb %p killed\n", urb);
        usb_free_urb(urb);
 
-       if (buffer)
-               usb_free_coherent(udev, USB_MAX_EP_INT_BUFFER,
-                                 buffer, buffer_dma);
+       usb_free_coherent(udev, USB_MAX_EP_INT_BUFFER, buffer, buffer_dma);
 }
 
 static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
index 05847eb..ae477f7 100644 (file)
@@ -281,6 +281,9 @@ struct xenvif {
        u8 ipv6_csum:1;
        u8 multicast_control:1;
 
+       /* headroom requested by xen-netfront */
+       u16 xdp_headroom;
+
        /* Is this interface disabled? True when backend discovers
         * frontend is rogue.
         */
@@ -395,6 +398,7 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
 irqreturn_t xenvif_interrupt(int irq, void *dev_id);
 
 extern bool separate_tx_rx_irq;
+extern bool provides_xdp_headroom;
 
 extern unsigned int rx_drain_timeout_msecs;
 extern unsigned int rx_stall_timeout_msecs;
index 0c8a02a..8af4972 100644 (file)
@@ -483,6 +483,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
        vif->queues = NULL;
        vif->num_queues = 0;
 
+       vif->xdp_headroom = 0;
+
        spin_lock_init(&vif->lock);
        INIT_LIST_HEAD(&vif->fe_mcast_addr);
 
index 315dfc6..6dfca72 100644 (file)
@@ -96,6 +96,13 @@ unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT;
 module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
 MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
 
+/* The module parameter tells that we have to put data
+ * for xen-netfront with the XDP_PACKET_HEADROOM offset
+ * needed for XDP processing
+ */
+bool provides_xdp_headroom = true;
+module_param(provides_xdp_headroom, bool, 0644);
+
 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
                               u8 status);
 
index ef58870..ac034f6 100644 (file)
@@ -258,6 +258,19 @@ static void xenvif_rx_next_skb(struct xenvif_queue *queue,
                pkt->extra_count++;
        }
 
+       if (queue->vif->xdp_headroom) {
+               struct xen_netif_extra_info *extra;
+
+               extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
+
+               memset(extra, 0, sizeof(struct xen_netif_extra_info));
+               extra->u.xdp.headroom = queue->vif->xdp_headroom;
+               extra->type = XEN_NETIF_EXTRA_TYPE_XDP;
+               extra->flags = 0;
+
+               pkt->extra_count++;
+       }
+
        if (skb->sw_hash) {
                struct xen_netif_extra_info *extra;
 
@@ -356,7 +369,7 @@ static void xenvif_rx_data_slot(struct xenvif_queue *queue,
                                struct xen_netif_rx_request *req,
                                struct xen_netif_rx_response *rsp)
 {
-       unsigned int offset = 0;
+       unsigned int offset = queue->vif->xdp_headroom;
        unsigned int flags;
 
        do {
index 286054b..7e62a6e 100644 (file)
@@ -393,6 +393,24 @@ static void set_backend_state(struct backend_info *be,
        }
 }
 
+static void read_xenbus_frontend_xdp(struct backend_info *be,
+                                     struct xenbus_device *dev)
+{
+       struct xenvif *vif = be->vif;
+       u16 headroom;
+       int err;
+
+       err = xenbus_scanf(XBT_NIL, dev->otherend,
+                          "xdp-headroom", "%hu", &headroom);
+       if (err != 1) {
+               vif->xdp_headroom = 0;
+               return;
+       }
+       if (headroom > XEN_NETIF_MAX_XDP_HEADROOM)
+               headroom = XEN_NETIF_MAX_XDP_HEADROOM;
+       vif->xdp_headroom = headroom;
+}
+
 /**
  * Callback received when the frontend's state changes.
  */
@@ -417,6 +435,11 @@ static void frontend_changed(struct xenbus_device *dev,
                set_backend_state(be, XenbusStateConnected);
                break;
 
+       case XenbusStateReconfiguring:
+               read_xenbus_frontend_xdp(be, dev);
+               xenbus_switch_state(dev, XenbusStateReconfigured);
+               break;
+
        case XenbusStateClosing:
                set_backend_state(be, XenbusStateClosing);
                break;
@@ -947,6 +970,8 @@ static int read_xenbus_vif_flags(struct backend_info *be)
        vif->ipv6_csum = !!xenbus_read_unsigned(dev->otherend,
                                                "feature-ipv6-csum-offload", 0);
 
+       read_xenbus_frontend_xdp(be, dev);
+
        return 0;
 }
 
@@ -1036,6 +1061,15 @@ static int netback_probe(struct xenbus_device *dev,
                        goto abort_transaction;
                }
 
+               /* we can adjust a headroom for netfront XDP processing */
+               err = xenbus_printf(xbt, dev->nodename,
+                                   "feature-xdp-headroom", "%d",
+                                   provides_xdp_headroom);
+               if (err) {
+                       message = "writing feature-xdp-headroom";
+                       goto abort_transaction;
+               }
+
                /* We don't support rx-flip path (except old guests who
                 * don't grok this feature flag).
                 */
index 482c6c8..ed995df 100644 (file)
@@ -44,6 +44,9 @@
 #include <linux/mm.h>
 #include <linux/slab.h>
 #include <net/ip.h>
+#include <linux/bpf.h>
+#include <net/page_pool.h>
+#include <linux/bpf_trace.h>
 
 #include <xen/xen.h>
 #include <xen/xenbus.h>
@@ -102,6 +105,8 @@ struct netfront_queue {
        char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
        struct netfront_info *info;
 
+       struct bpf_prog __rcu *xdp_prog;
+
        struct napi_struct napi;
 
        /* Split event channels support, tx_* == rx_* when using
@@ -144,6 +149,9 @@ struct netfront_queue {
        struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
        grant_ref_t gref_rx_head;
        grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
+
+       struct page_pool *page_pool;
+       struct xdp_rxq_info xdp_rxq;
 };
 
 struct netfront_info {
@@ -159,6 +167,10 @@ struct netfront_info {
        struct netfront_stats __percpu *rx_stats;
        struct netfront_stats __percpu *tx_stats;
 
+       /* XDP state */
+       bool netback_has_xdp_headroom;
+       bool netfront_xdp_enabled;
+
        atomic_t rx_gso_checksum_fixup;
 };
 
@@ -265,8 +277,8 @@ static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
        if (unlikely(!skb))
                return NULL;
 
-       page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
-       if (!page) {
+       page = page_pool_dev_alloc_pages(queue->page_pool);
+       if (unlikely(!page)) {
                kfree_skb(skb);
                return NULL;
        }
@@ -560,6 +572,66 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
        return queue_idx;
 }
 
+static int xennet_xdp_xmit_one(struct net_device *dev,
+                              struct netfront_queue *queue,
+                              struct xdp_frame *xdpf)
+{
+       struct netfront_info *np = netdev_priv(dev);
+       struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
+       int notify;
+
+       xennet_make_first_txreq(queue, NULL,
+                               virt_to_page(xdpf->data),
+                               offset_in_page(xdpf->data),
+                               xdpf->len);
+
+       RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
+       if (notify)
+               notify_remote_via_irq(queue->tx_irq);
+
+       u64_stats_update_begin(&tx_stats->syncp);
+       tx_stats->bytes += xdpf->len;
+       tx_stats->packets++;
+       u64_stats_update_end(&tx_stats->syncp);
+
+       xennet_tx_buf_gc(queue);
+
+       return 0;
+}
+
+static int xennet_xdp_xmit(struct net_device *dev, int n,
+                          struct xdp_frame **frames, u32 flags)
+{
+       unsigned int num_queues = dev->real_num_tx_queues;
+       struct netfront_info *np = netdev_priv(dev);
+       struct netfront_queue *queue = NULL;
+       unsigned long irq_flags;
+       int drops = 0;
+       int i, err;
+
+       if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+               return -EINVAL;
+
+       queue = &np->queues[smp_processor_id() % num_queues];
+
+       spin_lock_irqsave(&queue->tx_lock, irq_flags);
+       for (i = 0; i < n; i++) {
+               struct xdp_frame *xdpf = frames[i];
+
+               if (!xdpf)
+                       continue;
+               err = xennet_xdp_xmit_one(dev, queue, xdpf);
+               if (err) {
+                       xdp_return_frame_rx_napi(xdpf);
+                       drops++;
+               }
+       }
+       spin_unlock_irqrestore(&queue->tx_lock, irq_flags);
+
+       return n - drops;
+}
+
+
 #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
 
 static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -682,6 +754,9 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
        /* First request has the packet length. */
        first_tx->size = skb->len;
 
+       /* timestamp packet in software */
+       skb_tx_timestamp(skb);
+
        RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
        if (notify)
                notify_remote_via_irq(queue->tx_irq);
@@ -778,23 +853,82 @@ static int xennet_get_extras(struct netfront_queue *queue,
        return err;
 }
 
+static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
+                  struct xen_netif_rx_response *rx, struct bpf_prog *prog,
+                  struct xdp_buff *xdp, bool *need_xdp_flush)
+{
+       struct xdp_frame *xdpf;
+       u32 len = rx->status;
+       u32 act;
+       int err;
+
+       xdp->data_hard_start = page_address(pdata);
+       xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
+       xdp_set_data_meta_invalid(xdp);
+       xdp->data_end = xdp->data + len;
+       xdp->rxq = &queue->xdp_rxq;
+       xdp->frame_sz = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
+
+       act = bpf_prog_run_xdp(prog, xdp);
+       switch (act) {
+       case XDP_TX:
+               get_page(pdata);
+               xdpf = xdp_convert_buff_to_frame(xdp);
+               err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
+               if (unlikely(err < 0))
+                       trace_xdp_exception(queue->info->netdev, prog, act);
+               break;
+       case XDP_REDIRECT:
+               get_page(pdata);
+               err = xdp_do_redirect(queue->info->netdev, xdp, prog);
+               *need_xdp_flush = true;
+               if (unlikely(err))
+                       trace_xdp_exception(queue->info->netdev, prog, act);
+               break;
+       case XDP_PASS:
+       case XDP_DROP:
+               break;
+
+       case XDP_ABORTED:
+               trace_xdp_exception(queue->info->netdev, prog, act);
+               break;
+
+       default:
+               bpf_warn_invalid_xdp_action(act);
+       }
+
+       return act;
+}
+
 static int xennet_get_responses(struct netfront_queue *queue,
                                struct netfront_rx_info *rinfo, RING_IDX rp,
-                               struct sk_buff_head *list)
+                               struct sk_buff_head *list,
+                               bool *need_xdp_flush)
 {
        struct xen_netif_rx_response *rx = &rinfo->rx;
-       struct xen_netif_extra_info *extras = rinfo->extras;
-       struct device *dev = &queue->info->netdev->dev;
+       int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
        RING_IDX cons = queue->rx.rsp_cons;
        struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
+       struct xen_netif_extra_info *extras = rinfo->extras;
        grant_ref_t ref = xennet_get_rx_ref(queue, cons);
-       int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
+       struct device *dev = &queue->info->netdev->dev;
+       struct bpf_prog *xdp_prog;
+       struct xdp_buff xdp;
+       unsigned long ret;
        int slots = 1;
        int err = 0;
-       unsigned long ret;
+       u32 verdict;
 
        if (rx->flags & XEN_NETRXF_extra_info) {
                err = xennet_get_extras(queue, extras, rp);
+               if (!err) {
+                       if (extras[XEN_NETIF_EXTRA_TYPE_XDP - 1].type) {
+                               struct xen_netif_extra_info *xdp;
+
+                               xdp = &extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
+                               rx->offset = xdp->u.xdp.headroom;
+                       }
+               }
                cons = queue->rx.rsp_cons;
        }
 
@@ -827,9 +961,24 @@ static int xennet_get_responses(struct netfront_queue *queue,
 
                gnttab_release_grant_reference(&queue->gref_rx_head, ref);
 
-               __skb_queue_tail(list, skb);
-
+               rcu_read_lock();
+               xdp_prog = rcu_dereference(queue->xdp_prog);
+               if (xdp_prog) {
+                       if (!(rx->flags & XEN_NETRXF_more_data)) {
+                               /* currently only a single page contains data */
+                               verdict = xennet_run_xdp(queue,
+                                                        skb_frag_page(&skb_shinfo(skb)->frags[0]),
+                                                        rx, xdp_prog, &xdp, need_xdp_flush);
+                               if (verdict != XDP_PASS)
+                                       err = -EINVAL;
+                       } else {
+                               /* drop the frame */
+                               err = -EINVAL;
+                       }
+               }
+               rcu_read_unlock();
 next:
+               __skb_queue_tail(list, skb);
                if (!(rx->flags & XEN_NETRXF_more_data))
                        break;
 
@@ -998,6 +1147,7 @@ static int xennet_poll(struct napi_struct *napi, int budget)
        struct sk_buff_head errq;
        struct sk_buff_head tmpq;
        int err;
+       bool need_xdp_flush = false;
 
        spin_lock(&queue->rx_lock);
 
@@ -1014,7 +1164,8 @@ static int xennet_poll(struct napi_struct *napi, int budget)
                memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
                memset(extras, 0, sizeof(rinfo.extras));
 
-               err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
+               err = xennet_get_responses(queue, &rinfo, rp, &tmpq,
+                                          &need_xdp_flush);
 
                if (unlikely(err)) {
 err:
@@ -1060,6 +1211,8 @@ err:
                i = ++queue->rx.rsp_cons;
                work_done++;
        }
+       if (need_xdp_flush)
+               xdp_do_flush();
 
        __skb_queue_purge(&errq);
 
@@ -1261,6 +1414,101 @@ static void xennet_poll_controller(struct net_device *dev)
 }
 #endif
 
+#define NETBACK_XDP_HEADROOM_DISABLE   0
+#define NETBACK_XDP_HEADROOM_ENABLE    1
+
+static int talk_to_netback_xdp(struct netfront_info *np, int xdp)
+{
+       int err;
+       unsigned short headroom;
+
+       headroom = xdp ? XDP_PACKET_HEADROOM : 0;
+       err = xenbus_printf(XBT_NIL, np->xbdev->nodename,
+                           "xdp-headroom", "%hu",
+                           headroom);
+       if (err)
+               pr_warn("Error writing xdp-headroom\n");
+
+       return err;
+}
+
+static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
+                         struct netlink_ext_ack *extack)
+{
+       unsigned long max_mtu = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM;
+       struct netfront_info *np = netdev_priv(dev);
+       struct bpf_prog *old_prog;
+       unsigned int i, err;
+
+       if (dev->mtu > max_mtu) {
+               netdev_warn(dev, "XDP requires MTU less than %lu\n", max_mtu);
+               return -EINVAL;
+       }
+
+       if (!np->netback_has_xdp_headroom)
+               return 0;
+
+       xenbus_switch_state(np->xbdev, XenbusStateReconfiguring);
+
+       err = talk_to_netback_xdp(np, prog ? NETBACK_XDP_HEADROOM_ENABLE :
+                                 NETBACK_XDP_HEADROOM_DISABLE);
+       if (err)
+               return err;
+
+       /* avoid the race with XDP headroom adjustment */
+       wait_event(module_wq,
+                  xenbus_read_driver_state(np->xbdev->otherend) ==
+                  XenbusStateReconfigured);
+       np->netfront_xdp_enabled = true;
+
+       old_prog = rtnl_dereference(np->queues[0].xdp_prog);
+
+       if (prog)
+               bpf_prog_add(prog, dev->real_num_tx_queues);
+
+       for (i = 0; i < dev->real_num_tx_queues; ++i)
+               rcu_assign_pointer(np->queues[i].xdp_prog, prog);
+
+       if (old_prog)
+               for (i = 0; i < dev->real_num_tx_queues; ++i)
+                       bpf_prog_put(old_prog);
+
+       xenbus_switch_state(np->xbdev, XenbusStateConnected);
+
+       return 0;
+}
+
+static u32 xennet_xdp_query(struct net_device *dev)
+{
+       unsigned int num_queues = dev->real_num_tx_queues;
+       struct netfront_info *np = netdev_priv(dev);
+       const struct bpf_prog *xdp_prog;
+       struct netfront_queue *queue;
+       unsigned int i;
+
+       for (i = 0; i < num_queues; ++i) {
+               queue = &np->queues[i];
+               xdp_prog = rtnl_dereference(queue->xdp_prog);
+               if (xdp_prog)
+                       return xdp_prog->aux->id;
+       }
+
+       return 0;
+}
+
+static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+       switch (xdp->command) {
+       case XDP_SETUP_PROG:
+               return xennet_xdp_set(dev, xdp->prog, xdp->extack);
+       case XDP_QUERY_PROG:
+               xdp->prog_id = xennet_xdp_query(dev);
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
 static const struct net_device_ops xennet_netdev_ops = {
        .ndo_open            = xennet_open,
        .ndo_stop            = xennet_close,
@@ -1272,6 +1520,8 @@ static const struct net_device_ops xennet_netdev_ops = {
        .ndo_fix_features    = xennet_fix_features,
        .ndo_set_features    = xennet_set_features,
        .ndo_select_queue    = xennet_select_queue,
+       .ndo_bpf            = xennet_xdp,
+       .ndo_xdp_xmit       = xennet_xdp_xmit,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller = xennet_poll_controller,
 #endif
@@ -1331,6 +1581,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
        SET_NETDEV_DEV(netdev, &dev->dev);
 
        np->netdev = netdev;
+       np->netfront_xdp_enabled = false;
 
        netif_carrier_off(netdev);
 
@@ -1419,6 +1670,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
                queue->rx_ring_ref = GRANT_INVALID_REF;
                queue->tx.sring = NULL;
                queue->rx.sring = NULL;
+
+               page_pool_destroy(queue->page_pool);
        }
 }
 
@@ -1754,6 +2007,51 @@ static void xennet_destroy_queues(struct netfront_info *info)
        info->queues = NULL;
 }
 
+
+
+static int xennet_create_page_pool(struct netfront_queue *queue)
+{
+       int err;
+       struct page_pool_params pp_params = {
+               .order = 0,
+               .flags = 0,
+               .pool_size = NET_RX_RING_SIZE,
+               .nid = NUMA_NO_NODE,
+               .dev = &queue->info->netdev->dev,
+               .offset = XDP_PACKET_HEADROOM,
+               .max_len = XEN_PAGE_SIZE - XDP_PACKET_HEADROOM,
+       };
+
+       queue->page_pool = page_pool_create(&pp_params);
+       if (IS_ERR(queue->page_pool)) {
+               err = PTR_ERR(queue->page_pool);
+               queue->page_pool = NULL;
+               return err;
+       }
+
+       err = xdp_rxq_info_reg(&queue->xdp_rxq, queue->info->netdev,
+                              queue->id);
+       if (err) {
+               netdev_err(queue->info->netdev, "xdp_rxq_info_reg failed\n");
+               goto err_free_pp;
+       }
+
+       err = xdp_rxq_info_reg_mem_model(&queue->xdp_rxq,
+                                        MEM_TYPE_PAGE_POOL, queue->page_pool);
+       if (err) {
+               netdev_err(queue->info->netdev, "xdp_rxq_info_reg_mem_model failed\n");
+               goto err_unregister_rxq;
+       }
+       return 0;
+
+err_unregister_rxq:
+       xdp_rxq_info_unreg(&queue->xdp_rxq);
+err_free_pp:
+       page_pool_destroy(queue->page_pool);
+       queue->page_pool = NULL;
+       return err;
+}
+
 static int xennet_create_queues(struct netfront_info *info,
                                unsigned int *num_queues)
 {
@@ -1779,6 +2077,14 @@ static int xennet_create_queues(struct netfront_info *info,
                        break;
                }
 
+               /* use page pool recycling instead of buddy allocator */
+               ret = xennet_create_page_pool(queue);
+               if (ret < 0) {
+                       dev_err(&info->xbdev->dev, "can't allocate page pool\n");
+                       *num_queues = i;
+                       return ret;
+               }
+
                netif_napi_add(queue->info->netdev, &queue->napi,
                               xennet_poll, 64);
                if (netif_running(info->netdev))
@@ -1825,6 +2131,17 @@ static int talk_to_netback(struct xenbus_device *dev,
                goto out_unlocked;
        }
 
+       info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend,
+                                                             "feature-xdp-headroom", 0);
+       if (info->netback_has_xdp_headroom) {
+               /* set the current xen-netfront xdp state */
+               err = talk_to_netback_xdp(info, info->netfront_xdp_enabled ?
+                                         NETBACK_XDP_HEADROOM_ENABLE :
+                                         NETBACK_XDP_HEADROOM_DISABLE);
+               if (err)
+                       goto out_unlocked;
+       }
+
        rtnl_lock();
        if (info->queues)
                xennet_destroy_queues(info);
@@ -1959,6 +2276,8 @@ static int xennet_connect(struct net_device *dev)
        err = talk_to_netback(np->xbdev, np);
        if (err)
                return err;
+       if (np->netback_has_xdp_headroom)
+               pr_info("backend supports XDP headroom\n");
 
        /* talk_to_netback() sets the correct number of queues */
        num_queues = dev->real_num_tx_queues;
@@ -2095,6 +2414,7 @@ static const struct ethtool_ops xennet_ethtool_ops =
        .get_sset_count = xennet_get_sset_count,
        .get_ethtool_stats = xennet_get_ethtool_stats,
        .get_strings = xennet_get_strings,
+       .get_ts_info = ethtool_op_get_ts_info,
 };
 
 #ifdef CONFIG_SYSFS
index ccbb5b4..4502f9c 100644 (file)
@@ -679,18 +679,8 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
                return a->mode;
        }
 
-       if (a == &dev_attr_align.attr) {
-               int i;
-
-               for (i = 0; i < nd_region->ndr_mappings; i++) {
-                       struct nd_mapping *nd_mapping = &nd_region->mapping[i];
-                       struct nvdimm *nvdimm = nd_mapping->nvdimm;
-
-                       if (test_bit(NDD_LABELING, &nvdimm->flags))
-                               return a->mode;
-               }
-               return 0;
-       }
+       if (a == &dev_attr_align.attr)
+               return a->mode;
 
        if (a != &dev_attr_set_cookie.attr
                        && a != &dev_attr_available_size.attr)
index 89b8597..4cef69b 100644 (file)
@@ -95,7 +95,7 @@ static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
        struct encrypted_key_payload *epayload;
        struct device *dev = &nvdimm->dev;
 
-       keyref = lookup_user_key(id, 0, 0);
+       keyref = lookup_user_key(id, 0, KEY_NEED_SEARCH);
        if (IS_ERR(keyref))
                return NULL;
 
index c2c5bc4..8410d03 100644 (file)
@@ -1116,10 +1116,16 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
                dev_warn(ctrl->device,
                        "Identify Descriptors failed (%d)\n", status);
                 /*
-                 * Don't treat an error as fatal, as we potentially already
-                 * have a NGUID or EUI-64.
+                 * Don't treat non-retryable errors as fatal, as we potentially
+                 * already have a NGUID or EUI-64.  If we failed with DNR set,
+                 * we want to silently ignore the error as we can still
+                 * identify the device, but if the status has DNR set, we want
+                 * to propagate the error back specifically for the disk
+                 * revalidation flow to make sure we don't abandon the
+                 * device just because of a temporal retry-able error (such
+                 * as path of transport errors).
                  */
-               if (status > 0 && !(status & NVME_SC_DNR))
+               if (status > 0 && (status & NVME_SC_DNR))
                        status = 0;
                goto free_data;
        }
@@ -1974,7 +1980,6 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
        if (ns->head->disk) {
                nvme_update_disk_info(ns->head->disk, ns, id);
                blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
-               revalidate_disk(ns->head->disk);
        }
 #endif
        return 0;
@@ -4174,6 +4179,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
        ctrl->dev = dev;
        ctrl->ops = ops;
        ctrl->quirks = quirks;
+       ctrl->numa_node = NUMA_NO_NODE;
        INIT_WORK(&ctrl->scan_work, nvme_scan_work);
        INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
        INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
index da78e49..6650947 100644 (file)
@@ -409,15 +409,14 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
 {
        struct nvme_ns_head *head = ns->head;
 
-       lockdep_assert_held(&ns->head->lock);
-
        if (!head->disk)
                return;
 
-       if (!(head->disk->flags & GENHD_FL_UP))
+       if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
                device_add_disk(&head->subsys->dev, head->disk,
                                nvme_ns_id_attr_groups);
 
+       mutex_lock(&head->lock);
        if (nvme_path_is_optimized(ns)) {
                int node, srcu_idx;
 
@@ -426,9 +425,10 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
                        __nvme_find_path(head, node);
                srcu_read_unlock(&head->srcu, srcu_idx);
        }
+       mutex_unlock(&head->lock);
 
-       synchronize_srcu(&ns->head->srcu);
-       kblockd_schedule_work(&ns->head->requeue_work);
+       synchronize_srcu(&head->srcu);
+       kblockd_schedule_work(&head->requeue_work);
 }
 
 static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
@@ -483,14 +483,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state)
 static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
                struct nvme_ns *ns)
 {
-       mutex_lock(&ns->head->lock);
        ns->ana_grpid = le32_to_cpu(desc->grpid);
        ns->ana_state = desc->state;
        clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
 
        if (nvme_state_is_live(ns->ana_state))
                nvme_mpath_set_live(ns);
-       mutex_unlock(&ns->head->lock);
 }
 
 static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
@@ -640,38 +638,45 @@ static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
 }
 DEVICE_ATTR_RO(ana_state);
 
-static int nvme_set_ns_ana_state(struct nvme_ctrl *ctrl,
+static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
                struct nvme_ana_group_desc *desc, void *data)
 {
-       struct nvme_ns *ns = data;
+       struct nvme_ana_group_desc *dst = data;
 
-       if (ns->ana_grpid == le32_to_cpu(desc->grpid)) {
-               nvme_update_ns_ana_state(desc, ns);
-               return -ENXIO; /* just break out of the loop */
-       }
+       if (desc->grpid != dst->grpid)
+               return 0;
 
-       return 0;
+       *dst = *desc;
+       return -ENXIO; /* just break out of the loop */
 }
 
 void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
 {
        if (nvme_ctrl_use_ana(ns->ctrl)) {
+               struct nvme_ana_group_desc desc = {
+                       .grpid = id->anagrpid,
+                       .state = 0,
+               };
+
                mutex_lock(&ns->ctrl->ana_lock);
                ns->ana_grpid = le32_to_cpu(id->anagrpid);
-               nvme_parse_ana_log(ns->ctrl, ns, nvme_set_ns_ana_state);
+               nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
                mutex_unlock(&ns->ctrl->ana_lock);
+               if (desc.state) {
+                       /* found the group desc: update */
+                       nvme_update_ns_ana_state(&desc, ns);
+               }
        } else {
-               mutex_lock(&ns->head->lock);
                ns->ana_state = NVME_ANA_OPTIMIZED; 
                nvme_mpath_set_live(ns);
-               mutex_unlock(&ns->head->lock);
        }
 
        if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) {
-               struct backing_dev_info *info =
-                                       ns->head->disk->queue->backing_dev_info;
+               struct gendisk *disk = ns->head->disk;
 
-               info->capabilities |= BDI_CAP_STABLE_WRITES;
+               if (disk)
+                       disk->queue->backing_dev_info->capabilities |=
+                                       BDI_CAP_STABLE_WRITES;
        }
 }
 
@@ -686,6 +691,14 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
        kblockd_schedule_work(&head->requeue_work);
        flush_work(&head->requeue_work);
        blk_cleanup_queue(head->disk->queue);
+       if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
+               /*
+                * if device_add_disk wasn't called, prevent
+                * disk release to put a bogus reference on the
+                * request queue
+                */
+               head->disk->queue = NULL;
+       }
        put_disk(head->disk);
 }
 
index c0f4226..2ef8d50 100644 (file)
@@ -364,6 +364,8 @@ struct nvme_ns_head {
        spinlock_t              requeue_lock;
        struct work_struct      requeue_work;
        struct mutex            lock;
+       unsigned long           flags;
+#define NVME_NSHEAD_DISK_LIVE  0
        struct nvme_ns __rcu    *current_path[];
 #endif
 };
index e2bacd3..b1d18f0 100644 (file)
@@ -1593,7 +1593,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
 
                dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
                dev->admin_tagset.timeout = ADMIN_TIMEOUT;
-               dev->admin_tagset.numa_node = dev_to_node(dev->dev);
+               dev->admin_tagset.numa_node = dev->ctrl.numa_node;
                dev->admin_tagset.cmd_size = sizeof(struct nvme_iod);
                dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
                dev->admin_tagset.driver_data = dev;
@@ -1669,6 +1669,8 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
        if (result)
                return result;
 
+       dev->ctrl.numa_node = dev_to_node(dev->dev);
+
        nvmeq = &dev->queues[0];
        aqa = nvmeq->q_depth - 1;
        aqa |= aqa << 16;
@@ -2257,7 +2259,7 @@ static void nvme_dev_add(struct nvme_dev *dev)
                if (dev->io_queues[HCTX_TYPE_POLL])
                        dev->tagset.nr_maps++;
                dev->tagset.timeout = NVME_IO_TIMEOUT;
-               dev->tagset.numa_node = dev_to_node(dev->dev);
+               dev->tagset.numa_node = dev->ctrl.numa_node;
                dev->tagset.queue_depth =
                                min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
                dev->tagset.cmd_size = sizeof(struct nvme_iod);
index f8f856d..13506a8 100644 (file)
@@ -470,7 +470,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
         * Spread I/O queues completion vectors according their queue index.
         * Admin queues can always go on completion vector 0.
         */
-       comp_vector = idx == 0 ? idx : idx - 1;
+       comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors;
 
        /* Polling queues need direct cq polling context */
        if (nvme_rdma_poll_queue(queue))
index 3345ec7..79ef2b8 100644 (file)
@@ -1532,7 +1532,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
                set->ops = &nvme_tcp_admin_mq_ops;
                set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
                set->reserved_tags = 2; /* connect + keep-alive */
-               set->numa_node = NUMA_NO_NODE;
+               set->numa_node = nctrl->numa_node;
                set->flags = BLK_MQ_F_BLOCKING;
                set->cmd_size = sizeof(struct nvme_tcp_request);
                set->driver_data = ctrl;
@@ -1544,7 +1544,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
                set->ops = &nvme_tcp_mq_ops;
                set->queue_depth = nctrl->sqsize + 1;
                set->reserved_tags = 1; /* fabric connect */
-               set->numa_node = NUMA_NO_NODE;
+               set->numa_node = nctrl->numa_node;
                set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
                set->cmd_size = sizeof(struct nvme_tcp_request);
                set->driver_data = ctrl;
index 0d54e73..6344e73 100644 (file)
@@ -340,7 +340,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
        ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
        ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
        ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
-       ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
+       ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
        ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
                NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
        ctrl->admin_tag_set.driver_data = ctrl;
@@ -512,7 +512,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
        ctrl->tag_set.ops = &nvme_loop_mq_ops;
        ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
        ctrl->tag_set.reserved_tags = 1; /* fabric connect */
-       ctrl->tag_set.numa_node = NUMA_NO_NODE;
+       ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
        ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
        ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
                NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
index a04afe7..eb84507 100644 (file)
@@ -314,10 +314,15 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
                                 child, addr);
 
                        if (of_mdiobus_child_is_phy(child)) {
+                               /* -ENODEV is the return code that PHYLIB has
+                                * standardized on to indicate that bus
+                                * scanning should continue.
+                                */
                                rc = of_mdiobus_register_phy(mdio, child, addr);
-                               if (rc && rc != -ENODEV)
+                               if (!rc)
+                                       break;
+                               if (rc != -ENODEV)
                                        goto unregister;
-                               break;
                        }
                }
        }
@@ -380,7 +385,7 @@ struct phy_device *of_phy_connect(struct net_device *dev,
        if (!phy)
                return NULL;
 
-       phy->dev_flags = flags;
+       phy->dev_flags |= flags;
 
        ret = phy_connect_direct(dev, phy, hndlr, iface);
 
index e1d097e..31478c0 100644 (file)
@@ -33,7 +33,7 @@ void flush_cpu_work(void);
 struct op_sample {
        unsigned long eip;
        unsigned long event;
-       unsigned long data[0];
+       unsigned long data[];
 };
 
 struct op_entry;
index 2c1a7d7..77fb23b 100644 (file)
@@ -43,7 +43,7 @@ struct samsung_usb2_phy_driver {
        struct regmap *reg_pmu;
        struct regmap *reg_sys;
        spinlock_t lock;
-       struct samsung_usb2_phy_instance instances[0];
+       struct samsung_usb2_phy_instance instances[];
 };
 
 struct samsung_usb2_common_phy {
index cb7e0f0..1f81569 100644 (file)
@@ -824,13 +824,12 @@ int imx_pinctrl_probe(struct platform_device *pdev,
                                return -EINVAL;
                        }
 
-                       ipctl->input_sel_base = devm_of_iomap(&pdev->dev, np,
-                                                             0, NULL);
+                       ipctl->input_sel_base = of_iomap(np, 0);
                        of_node_put(np);
-                       if (IS_ERR(ipctl->input_sel_base)) {
+                       if (!ipctl->input_sel_base) {
                                dev_err(&pdev->dev,
                                        "iomuxc input select base address not found\n");
-                               return PTR_ERR(ipctl->input_sel_base);
+                               return -ENOMEM;
                        }
                }
        }
index 0ff7c55..615174a 100644 (file)
@@ -800,6 +800,21 @@ static void byt_gpio_disable_free(struct pinctrl_dev *pctl_dev,
        pm_runtime_put(vg->dev);
 }
 
+static void byt_gpio_direct_irq_check(struct intel_pinctrl *vg,
+                                     unsigned int offset)
+{
+       void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
+
+       /*
+        * Before making any direction modifications, do a check if gpio is set
+        * for direct IRQ. On Bay Trail, setting GPIO to output does not make
+        * sense, so let's at least inform the caller before they shoot
+        * themselves in the foot.
+        */
+       if (readl(conf_reg) & BYT_DIRECT_IRQ_EN)
+               dev_info_once(vg->dev, "Potential Error: Setting GPIO with direct_irq_en to output");
+}
+
 static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
                                  struct pinctrl_gpio_range *range,
                                  unsigned int offset,
@@ -807,7 +822,6 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
 {
        struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
        void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
-       void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
        unsigned long flags;
        u32 value;
 
@@ -817,14 +831,8 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
        value &= ~BYT_DIR_MASK;
        if (input)
                value |= BYT_OUTPUT_EN;
-       else if (readl(conf_reg) & BYT_DIRECT_IRQ_EN)
-               /*
-                * Before making any direction modifications, do a check if gpio
-                * is set for direct IRQ.  On baytrail, setting GPIO to output
-                * does not make sense, so let's at least inform the caller before
-                * they shoot themselves in the foot.
-                */
-               dev_info_once(vg->dev, "Potential Error: Setting GPIO with direct_irq_en to output");
+       else
+               byt_gpio_direct_irq_check(vg, offset);
 
        writel(value, val_reg);
 
@@ -1165,19 +1173,50 @@ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
 
 static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
 {
-       return pinctrl_gpio_direction_input(chip->base + offset);
+       struct intel_pinctrl *vg = gpiochip_get_data(chip);
+       void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+       unsigned long flags;
+       u32 reg;
+
+       raw_spin_lock_irqsave(&byt_lock, flags);
+
+       reg = readl(val_reg);
+       reg &= ~BYT_DIR_MASK;
+       reg |= BYT_OUTPUT_EN;
+       writel(reg, val_reg);
+
+       raw_spin_unlock_irqrestore(&byt_lock, flags);
+       return 0;
 }
 
+/*
+ * Note despite the temptation this MUST NOT be converted into a call to
+ * pinctrl_gpio_direction_output() + byt_gpio_set() that does not work this
+ * MUST be done as a single BYT_VAL_REG register write.
+ * See the commit message of the commit adding this comment for details.
+ */
 static int byt_gpio_direction_output(struct gpio_chip *chip,
                                     unsigned int offset, int value)
 {
-       int ret = pinctrl_gpio_direction_output(chip->base + offset);
+       struct intel_pinctrl *vg = gpiochip_get_data(chip);
+       void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+       unsigned long flags;
+       u32 reg;
 
-       if (ret)
-               return ret;
+       raw_spin_lock_irqsave(&byt_lock, flags);
+
+       byt_gpio_direct_irq_check(vg, offset);
 
-       byt_gpio_set(chip, offset, value);
+       reg = readl(val_reg);
+       reg &= ~BYT_DIR_MASK;
+       if (value)
+               reg |= BYT_LEVEL;
+       else
+               reg &= ~BYT_LEVEL;
 
+       writel(reg, val_reg);
+
+       raw_spin_unlock_irqrestore(&byt_lock, flags);
        return 0;
 }
 
index 3e5760f..d4a192d 100644 (file)
@@ -252,7 +252,7 @@ static const struct amd_pingroup kerncz_groups[] = {
        {
                .name = "uart0",
                .pins = uart0_pins,
-               .npins = 9,
+               .npins = 5,
        },
        {
                .name = "uart1",
index e06fb88..1f47a66 100644 (file)
@@ -126,10 +126,7 @@ static int mcp23s08_spi_regmap_init(struct mcp23s08 *mcp, struct device *dev,
        copy->name = name;
 
        mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, copy);
-       if (IS_ERR(mcp->regmap))
-               return PTR_ERR(mcp->regmap);
-
-       return 0;
+       return PTR_ERR_OR_ZERO(mcp->regmap);
 }
 
 static int mcp23s08_probe(struct spi_device *spi)
index 1e0614d..f3a8a46 100644 (file)
@@ -958,7 +958,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np,
 }
 
 /**
- * smux_parse_one_pinctrl_entry() - parses a device tree mux entry
+ * pcs_parse_one_pinctrl_entry() - parses a device tree mux entry
  * @pctldev: pin controller device
  * @pcs: pinctrl driver instance
  * @np: device node of the mux entry
index 38c33a7..ec50a3b 100644 (file)
@@ -367,7 +367,8 @@ static const char * const wci20_groups[] = {
 
 static const char * const qpic_pad_groups[] = {
        "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio9", "gpio10",
-       "gpio11", "gpio17",
+       "gpio11", "gpio17", "gpio15", "gpio12", "gpio13", "gpio14", "gpio5",
+       "gpio6", "gpio7", "gpio8",
 };
 
 static const char * const burn0_groups[] = {
index fe0be8a..092a48e 100644 (file)
@@ -170,6 +170,7 @@ struct pmic_gpio_state {
        struct regmap   *map;
        struct pinctrl_dev *ctrl;
        struct gpio_chip chip;
+       struct irq_chip irq;
 };
 
 static const struct pinconf_generic_params pmic_gpio_bindings[] = {
@@ -917,16 +918,6 @@ static int pmic_gpio_populate(struct pmic_gpio_state *state,
        return 0;
 }
 
-static struct irq_chip pmic_gpio_irq_chip = {
-       .name = "spmi-gpio",
-       .irq_ack = irq_chip_ack_parent,
-       .irq_mask = irq_chip_mask_parent,
-       .irq_unmask = irq_chip_unmask_parent,
-       .irq_set_type = irq_chip_set_type_parent,
-       .irq_set_wake = irq_chip_set_wake_parent,
-       .flags = IRQCHIP_MASK_ON_SUSPEND,
-};
-
 static int pmic_gpio_domain_translate(struct irq_domain *domain,
                                      struct irq_fwspec *fwspec,
                                      unsigned long *hwirq,
@@ -1053,8 +1044,16 @@ static int pmic_gpio_probe(struct platform_device *pdev)
        if (!parent_domain)
                return -ENXIO;
 
+       state->irq.name = "spmi-gpio",
+       state->irq.irq_ack = irq_chip_ack_parent,
+       state->irq.irq_mask = irq_chip_mask_parent,
+       state->irq.irq_unmask = irq_chip_unmask_parent,
+       state->irq.irq_set_type = irq_chip_set_type_parent,
+       state->irq.irq_set_wake = irq_chip_set_wake_parent,
+       state->irq.flags = IRQCHIP_MASK_ON_SUSPEND,
+
        girq = &state->chip.irq;
-       girq->chip = &pmic_gpio_irq_chip;
+       girq->chip = &state->irq;
        girq->default_type = IRQ_TYPE_NONE;
        girq->handler = handle_level_irq;
        girq->fwnode = of_node_to_fwnode(state->dev->of_node);
index 21661f6..195cfe5 100644 (file)
@@ -731,8 +731,8 @@ static int tegra_pinctrl_resume(struct device *dev)
 }
 
 const struct dev_pm_ops tegra_pinctrl_pm = {
-       .suspend = &tegra_pinctrl_suspend,
-       .resume = &tegra_pinctrl_resume
+       .suspend_noirq = &tegra_pinctrl_suspend,
+       .resume_noirq = &tegra_pinctrl_resume
 };
 
 static bool tegra_pinctrl_gpio_node_has_range(struct tegra_pmx *pmx)
index 375cd6e..e0e6f85 100644 (file)
@@ -191,12 +191,33 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
                        err = -EFAULT;
                        break;
                }
-               if (((req.perout.flags & ~PTP_PEROUT_VALID_FLAGS) ||
-                       req.perout.rsv[0] || req.perout.rsv[1] ||
-                       req.perout.rsv[2] || req.perout.rsv[3]) &&
-                       cmd == PTP_PEROUT_REQUEST2) {
-                       err = -EINVAL;
-                       break;
+               if (cmd == PTP_PEROUT_REQUEST2) {
+                       struct ptp_perout_request *perout = &req.perout;
+
+                       if (perout->flags & ~PTP_PEROUT_VALID_FLAGS) {
+                               err = -EINVAL;
+                               break;
+                       }
+                       /*
+                        * The "on" field has undefined meaning if
+                        * PTP_PEROUT_DUTY_CYCLE isn't set, we must still treat
+                        * it as reserved, which must be set to zero.
+                        */
+                       if (!(perout->flags & PTP_PEROUT_DUTY_CYCLE) &&
+                           (perout->rsv[0] || perout->rsv[1] ||
+                            perout->rsv[2] || perout->rsv[3])) {
+                               err = -EINVAL;
+                               break;
+                       }
+                       if (perout->flags & PTP_PEROUT_DUTY_CYCLE) {
+                               /* The duty cycle must be subunitary. */
+                               if (perout->on.sec > perout->period.sec ||
+                                   (perout->on.sec == perout->period.sec &&
+                                    perout->on.nsec > perout->period.nsec)) {
+                                       err = -ERANGE;
+                                       break;
+                               }
+                       }
                } else if (cmd == PTP_PEROUT_REQUEST) {
                        req.perout.flags &= PTP_PEROUT_V1_VALID_FLAGS;
                        req.perout.rsv[0] = 0;
index dcd6e00..ce10ecd 100644 (file)
@@ -508,40 +508,8 @@ static const struct ptp_clock_info ptp_pch_caps = {
        .enable         = ptp_pch_enable,
 };
 
-
-#ifdef CONFIG_PM
-static s32 pch_suspend(struct pci_dev *pdev, pm_message_t state)
-{
-       pci_disable_device(pdev);
-       pci_enable_wake(pdev, PCI_D3hot, 0);
-
-       if (pci_save_state(pdev) != 0) {
-               dev_err(&pdev->dev, "could not save PCI config state\n");
-               return -ENOMEM;
-       }
-       pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
-       return 0;
-}
-
-static s32 pch_resume(struct pci_dev *pdev)
-{
-       s32 ret;
-
-       pci_set_power_state(pdev, PCI_D0);
-       pci_restore_state(pdev);
-       ret = pci_enable_device(pdev);
-       if (ret) {
-               dev_err(&pdev->dev, "pci_enable_device failed\n");
-               return ret;
-       }
-       pci_enable_wake(pdev, PCI_D3hot, 0);
-       return 0;
-}
-#else
 #define pch_suspend NULL
 #define pch_resume NULL
-#endif
 
 static void pch_remove(struct pci_dev *pdev)
 {
@@ -684,13 +652,14 @@ static const struct pci_device_id pch_ieee1588_pcidev_id[] = {
        {0}
 };
 
+static SIMPLE_DEV_PM_OPS(pch_pm_ops, pch_suspend, pch_resume);
+
 static struct pci_driver pch_driver = {
        .name = KBUILD_MODNAME,
        .id_table = pch_ieee1588_pcidev_id,
        .probe = pch_probe,
        .remove = pch_remove,
-       .suspend = pch_suspend,
-       .resume = pch_resume,
+       .driver.pm = &pch_pm_ops,
 };
 
 static void __exit ptp_pch_exit(void)
index 0e90c5d..eb8ed28 100644 (file)
@@ -39,7 +39,7 @@ struct rio_id_table {
        u16 start;      /* logical minimal id */
        u32 max;        /* max number of IDs in table */
        spinlock_t lock;
-       unsigned long table[0];
+       unsigned long table[];
 };
 
 static int next_destid = 0;
index 8f677f5..edb1c4f 100644 (file)
@@ -684,7 +684,7 @@ config REGULATOR_MT6323
 
 config REGULATOR_MT6358
        tristate "MediaTek MT6358 PMIC"
-       depends on MFD_MT6397 && BROKEN
+       depends on MFD_MT6397
        help
          Say y here to select this option to enable the power regulator of
          MediaTek MT6358 PMIC.
index e1d6c8f..fe65b5a 100644 (file)
@@ -512,7 +512,6 @@ static const struct da9063_regulator_info da9063_regulator_info[] = {
        },
        {
                DA9063_LDO(DA9063, LDO9, 950, 50, 3600),
-               .suspend = BFIELD(DA9063_REG_LDO9_CONT, DA9063_VLDO9_SEL),
        },
        {
                DA9063_LDO(DA9063, LDO11, 900, 50, 3600),
index e970e9d..e4bb09b 100644 (file)
@@ -486,7 +486,7 @@ int regulator_map_voltage_pickable_linear_range(struct regulator_dev *rdev,
                        continue;
                }
 
-               ret = selector + sel;
+               ret = selector + sel - range->min_sel;
 
                voltage = rdev->desc->ops->list_voltage(rdev, ret);
 
index 6895379..4c8e8b4 100644 (file)
@@ -209,6 +209,19 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = {
 
 };
 
+static const struct regulator_ops pfuze3000_sw_regulator_ops = {
+       .enable = regulator_enable_regmap,
+       .disable = regulator_disable_regmap,
+       .is_enabled = regulator_is_enabled_regmap,
+       .list_voltage = regulator_list_voltage_table,
+       .map_voltage = regulator_map_voltage_ascend,
+       .set_voltage_sel = regulator_set_voltage_sel_regmap,
+       .get_voltage_sel = regulator_get_voltage_sel_regmap,
+       .set_voltage_time_sel = regulator_set_voltage_time_sel,
+       .set_ramp_delay = pfuze100_set_ramp_delay,
+
+};
+
 #define PFUZE100_FIXED_REG(_chip, _name, base, voltage)        \
        [_chip ## _ ## _name] = {       \
                .desc = {       \
@@ -318,23 +331,28 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = {
        .stby_mask = 0x20,      \
 }
 
-
-#define PFUZE3000_SW2_REG(_chip, _name, base, min, max, step)  {       \
-       .desc = {       \
-               .name = #_name,\
-               .n_voltages = ((max) - (min)) / (step) + 1,     \
-               .ops = &pfuze100_sw_regulator_ops,      \
-               .type = REGULATOR_VOLTAGE,      \
-               .id = _chip ## _ ## _name,      \
-               .owner = THIS_MODULE,   \
-               .min_uV = (min),        \
-               .uV_step = (step),      \
-               .vsel_reg = (base) + PFUZE100_VOL_OFFSET,       \
-               .vsel_mask = 0x7,       \
-       },      \
-       .stby_reg = (base) + PFUZE100_STANDBY_OFFSET,   \
-       .stby_mask = 0x7,       \
-}
+/* No linar case for the some switches of PFUZE3000 */
+#define PFUZE3000_SW_REG(_chip, _name, base, mask, voltages)   \
+       [_chip ## _ ##  _name] = {      \
+               .desc = {       \
+                       .name = #_name, \
+                       .n_voltages = ARRAY_SIZE(voltages),     \
+                       .ops = &pfuze3000_sw_regulator_ops,     \
+                       .type = REGULATOR_VOLTAGE,      \
+                       .id = _chip ## _ ## _name,      \
+                       .owner = THIS_MODULE,   \
+                       .volt_table = voltages, \
+                       .vsel_reg = (base) + PFUZE100_VOL_OFFSET,       \
+                       .vsel_mask = (mask),    \
+                       .enable_reg = (base) + PFUZE100_MODE_OFFSET,    \
+                       .enable_mask = 0xf,     \
+                       .enable_val = 0x8,      \
+                       .enable_time = 500,     \
+               },      \
+               .stby_reg = (base) + PFUZE100_STANDBY_OFFSET,   \
+               .stby_mask = (mask),    \
+               .sw_reg = true,         \
+       }
 
 #define PFUZE3000_SW3_REG(_chip, _name, base, min, max, step)  {       \
        .desc = {       \
@@ -391,9 +409,9 @@ static struct pfuze_regulator pfuze200_regulators[] = {
 };
 
 static struct pfuze_regulator pfuze3000_regulators[] = {
-       PFUZE100_SWB_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
+       PFUZE3000_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
        PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000),
-       PFUZE100_SWB_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
+       PFUZE3000_SW_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
        PFUZE3000_SW3_REG(PFUZE3000, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
        PFUZE100_SWB_REG(PFUZE3000, SWBST, PFUZE100_SWBSTCON1, 0x3, pfuze100_swbst),
        PFUZE100_SWB_REG(PFUZE3000, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
@@ -407,8 +425,8 @@ static struct pfuze_regulator pfuze3000_regulators[] = {
 };
 
 static struct pfuze_regulator pfuze3001_regulators[] = {
-       PFUZE100_SWB_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
-       PFUZE100_SWB_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
+       PFUZE3000_SW_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
+       PFUZE3000_SW_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
        PFUZE3000_SW3_REG(PFUZE3001, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
        PFUZE100_SWB_REG(PFUZE3001, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
        PFUZE100_VGEN_REG(PFUZE3001, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000),
index eb13c47..bb1c840 100644 (file)
@@ -182,10 +182,9 @@ enum qdio_irq_poll_states {
 };
 
 struct qdio_input_q {
-       /* first ACK'ed buffer */
-       int ack_start;
-       /* how many SBALs are acknowledged */
-       int ack_count;
+       /* Batch of SBALs that we processed while polling the queue: */
+       unsigned int batch_start;
+       unsigned int batch_count;
        /* last time of noticing incoming data */
        u64 timestamp;
 };
index 286b044..da95c92 100644 (file)
@@ -110,8 +110,8 @@ static int qstat_show(struct seq_file *m, void *v)
        seq_printf(m, "nr_used: %d  ftc: %d\n",
                   atomic_read(&q->nr_buf_used), q->first_to_check);
        if (q->is_input_q) {
-               seq_printf(m, "ack start: %d  ack count: %d\n",
-                          q->u.in.ack_start, q->u.in.ack_count);
+               seq_printf(m, "batch start: %u  batch count: %u\n",
+                          q->u.in.batch_start, q->u.in.batch_count);
                seq_printf(m, "DSCI: %x   IRQs disabled: %u\n",
                           *(u8 *)q->irq_ptr->dsci,
                           test_bit(QDIO_IRQ_DISABLED,
index 610c05f..0c919a1 100644 (file)
@@ -254,10 +254,17 @@ static inline int set_buf_states(struct qdio_q *q, int bufnr,
        if (is_qebsm(q))
                return qdio_do_sqbs(q, state, bufnr, count);
 
+       /* Ensure that all preceding changes to the SBALs are visible: */
+       mb();
+
        for (i = 0; i < count; i++) {
-               xchg(&q->slsb.val[bufnr], state);
+               WRITE_ONCE(q->slsb.val[bufnr], state);
                bufnr = next_buf(bufnr);
        }
+
+       /* Make our SLSB changes visible: */
+       mb();
+
        return count;
 }
 
@@ -393,15 +400,15 @@ int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
 
 static inline void qdio_stop_polling(struct qdio_q *q)
 {
-       if (!q->u.in.ack_count)
+       if (!q->u.in.batch_count)
                return;
 
        qperf_inc(q, stop_polling);
 
        /* show the card that we are not polling anymore */
-       set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
-                      q->u.in.ack_count);
-       q->u.in.ack_count = 0;
+       set_buf_states(q, q->u.in.batch_start, SLSB_P_INPUT_NOT_INIT,
+                      q->u.in.batch_count);
+       q->u.in.batch_count = 0;
 }
 
 static inline void account_sbals(struct qdio_q *q, unsigned int count)
@@ -441,42 +448,13 @@ static void process_buffer_error(struct qdio_q *q, unsigned int start,
 static inline void inbound_handle_work(struct qdio_q *q, unsigned int start,
                                       int count, bool auto_ack)
 {
-       int new;
-
-       if (auto_ack) {
-               if (!q->u.in.ack_count) {
-                       q->u.in.ack_count = count;
-                       q->u.in.ack_start = start;
-                       return;
-               }
-
-               /* delete the previous ACK's */
-               set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
-                              q->u.in.ack_count);
-               q->u.in.ack_count = count;
-               q->u.in.ack_start = start;
-               return;
-       }
-
-       /*
-        * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
-        * or by the next inbound run.
-        */
-       new = add_buf(start, count - 1);
-       set_buf_state(q, new, SLSB_P_INPUT_ACK);
-
-       /* delete the previous ACKs */
-       if (q->u.in.ack_count)
-               set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
-                              q->u.in.ack_count);
+       /* ACK the newest SBAL: */
+       if (!auto_ack)
+               set_buf_state(q, add_buf(start, count - 1), SLSB_P_INPUT_ACK);
 
-       q->u.in.ack_count = 1;
-       q->u.in.ack_start = new;
-       count--;
-       if (!count)
-               return;
-       /* need to change ALL buffers to get more interrupts */
-       set_buf_states(q, start, SLSB_P_INPUT_NOT_INIT, count);
+       if (!q->u.in.batch_count)
+               q->u.in.batch_start = start;
+       q->u.in.batch_count += count;
 }
 
 static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
@@ -525,15 +503,18 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
                        account_sbals_error(q, count);
                return count;
        case SLSB_CU_INPUT_EMPTY:
-       case SLSB_P_INPUT_NOT_INIT:
-       case SLSB_P_INPUT_ACK:
                if (q->irq_ptr->perf_stat_enabled)
                        q->q_stats.nr_sbal_nop++;
                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
                              q->nr, start);
                return 0;
+       case SLSB_P_INPUT_NOT_INIT:
+       case SLSB_P_INPUT_ACK:
+               /* We should never see this state, throw a WARN: */
        default:
-               WARN_ON_ONCE(1);
+               dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
+                             "found state %#x at index %u on queue %u\n",
+                             state, start, q->nr);
                return 0;
        }
 }
@@ -738,11 +719,14 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
                DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
                              q->nr);
                return 0;
-       case SLSB_P_OUTPUT_NOT_INIT:
        case SLSB_P_OUTPUT_HALTED:
                return 0;
+       case SLSB_P_OUTPUT_NOT_INIT:
+               /* We should never see this state, throw a WARN: */
        default:
-               WARN_ON_ONCE(1);
+               dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
+                             "found state %#x at index %u on queue %u\n",
+                             state, start, q->nr);
                return 0;
        }
 }
@@ -938,10 +922,10 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
        }
 }
 
-static void qdio_handle_activate_check(struct ccw_device *cdev,
-                               unsigned long intparm, int cstat, int dstat)
+static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
+                                      unsigned long intparm, int cstat,
+                                      int dstat)
 {
-       struct qdio_irq *irq_ptr = cdev->private->qdio_data;
        struct qdio_q *q;
 
        DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
@@ -968,11 +952,9 @@ no_handler:
        lgr_info_log();
 }
 
-static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
+static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
                                      int dstat)
 {
-       struct qdio_irq *irq_ptr = cdev->private->qdio_data;
-
        DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
 
        if (cstat)
@@ -1019,7 +1001,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
 
        switch (irq_ptr->state) {
        case QDIO_IRQ_STATE_INACTIVE:
-               qdio_establish_handle_irq(cdev, cstat, dstat);
+               qdio_establish_handle_irq(irq_ptr, cstat, dstat);
                break;
        case QDIO_IRQ_STATE_CLEANUP:
                qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
@@ -1031,7 +1013,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
                        return;
                }
                if (cstat || dstat)
-                       qdio_handle_activate_check(cdev, intparm, cstat,
+                       qdio_handle_activate_check(irq_ptr, intparm, cstat,
                                                   dstat);
                break;
        case QDIO_IRQ_STATE_STOPPED:
@@ -1446,12 +1428,12 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
 
        qperf_inc(q, inbound_call);
 
-       /* If any ACKed SBALs are returned to HW, adjust ACK tracking: */
-       overlap = min(count - sub_buf(q->u.in.ack_start, bufnr),
-                     q->u.in.ack_count);
+       /* If any processed SBALs are returned to HW, adjust our tracking: */
+       overlap = min_t(int, count - sub_buf(q->u.in.batch_start, bufnr),
+                            q->u.in.batch_count);
        if (overlap > 0) {
-               q->u.in.ack_start = add_buf(q->u.in.ack_start, overlap);
-               q->u.in.ack_count -= overlap;
+               q->u.in.batch_start = add_buf(q->u.in.batch_start, overlap);
+               q->u.in.batch_count -= overlap;
        }
 
        count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
@@ -1535,12 +1517,11 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
 int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
            int q_nr, unsigned int bufnr, unsigned int count)
 {
-       struct qdio_irq *irq_ptr;
+       struct qdio_irq *irq_ptr = cdev->private->qdio_data;
 
        if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
                return -EINVAL;
 
-       irq_ptr = cdev->private->qdio_data;
        if (!irq_ptr)
                return -ENODEV;
 
index a646fc8..13b26a1 100644 (file)
@@ -8,6 +8,7 @@
  *            Eric Farman <farman@linux.ibm.com>
  */
 
+#include <linux/slab.h>
 #include <linux/vfio.h>
 #include "vfio_ccw_private.h"
 
index 004ce02..3c3d403 100644 (file)
@@ -195,11 +195,10 @@ static inline struct ep11_cprb *alloc_cprb(size_t payload_len)
        size_t len = sizeof(struct ep11_cprb) + payload_len;
        struct ep11_cprb *cprb;
 
-       cprb = kmalloc(len, GFP_KERNEL);
+       cprb = kzalloc(len, GFP_KERNEL);
        if (!cprb)
                return NULL;
 
-       memset(cprb, 0, len);
        cprb->cprb_len = sizeof(struct ep11_cprb);
        cprb->cprb_ver_id = 0x04;
        memcpy(cprb->func_id, "T4", 2);
index 51ea56b..3d54c8b 100644 (file)
@@ -721,7 +721,6 @@ struct qeth_card_options {
        struct qeth_vnicc_info vnicc; /* VNICC options */
        enum qeth_discipline_id layer;
        enum qeth_ipa_isolation_modes isolation;
-       enum qeth_ipa_isolation_modes prev_isolation;
        int sniffer;
        enum qeth_cq cq;
        char hsuid[9];
@@ -804,14 +803,13 @@ struct qeth_card {
        struct workqueue_struct *event_wq;
        struct workqueue_struct *cmd_wq;
        wait_queue_head_t wait_q;
-       DECLARE_HASHTABLE(mac_htable, 4);
        DECLARE_HASHTABLE(ip_htable, 4);
        DECLARE_HASHTABLE(local_addrs4, 4);
        DECLARE_HASHTABLE(local_addrs6, 4);
        spinlock_t local_addrs4_lock;
        spinlock_t local_addrs6_lock;
        struct mutex ip_lock;
-       DECLARE_HASHTABLE(ip_mc_htable, 4);
+       DECLARE_HASHTABLE(rx_mode_addrs, 4);
        struct work_struct rx_mode_work;
        struct work_struct kernel_thread_starter;
        spinlock_t thread_mask_lock;
@@ -1071,6 +1069,9 @@ int qeth_query_switch_attributes(struct qeth_card *card,
                                  struct qeth_switch_info *sw_info);
 int qeth_query_card_info(struct qeth_card *card,
                         struct carrier_info *carrier_info);
+int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
+                                    enum qeth_ipa_isolation_modes mode);
+
 unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset);
 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
                        struct sk_buff *skb, struct qeth_hdr *hdr,
@@ -1078,7 +1079,6 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
                        int elements_needed);
 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
-int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback);
 int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
 int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
 void qeth_trace_features(struct qeth_card *);
index 18a0fb7..8a76022 100644 (file)
@@ -969,7 +969,7 @@ void qeth_clear_ipacmd_list(struct qeth_card *card)
 
        spin_lock_irqsave(&card->lock, flags);
        list_for_each_entry(iob, &card->cmd_waiter_list, list)
-               qeth_notify_cmd(iob, -EIO);
+               qeth_notify_cmd(iob, -ECANCELED);
        spin_unlock_irqrestore(&card->lock, flags);
 }
 EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
@@ -1647,6 +1647,7 @@ static void qeth_setup_card(struct qeth_card *card)
        qeth_init_qdio_info(card);
        INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
        INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
+       hash_init(card->rx_mode_addrs);
        hash_init(card->local_addrs4);
        hash_init(card->local_addrs6);
        spin_lock_init(&card->local_addrs4_lock);
@@ -2025,7 +2026,7 @@ static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
 }
 
 static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
-                                                 void *data,
+                                                 const void *data,
                                                  unsigned int data_length)
 {
        struct qeth_cmd_buffer *iob;
@@ -2436,6 +2437,17 @@ static int qeth_cm_setup(struct qeth_card *card)
        return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
 }
 
+static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type)
+{
+       if (link_type == QETH_LINK_TYPE_LANE_TR ||
+           link_type == QETH_LINK_TYPE_HSTR) {
+               dev_err(&card->gdev->dev, "Unsupported Token Ring device\n");
+               return false;
+       }
+
+       return true;
+}
+
 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
 {
        struct net_device *dev = card->dev;
@@ -2495,8 +2507,8 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
 {
        __u16 mtu, framesize;
        __u16 len;
-       __u8 link_type;
        struct qeth_cmd_buffer *iob;
+       u8 link_type = 0;
 
        QETH_CARD_TEXT(card, 2, "ulpenacb");
 
@@ -2516,9 +2528,11 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
        if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
                memcpy(&link_type,
                       QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
-               card->info.link_type = link_type;
-       } else
-               card->info.link_type = 0;
+               if (!qeth_is_supported_link_type(card, link_type))
+                       return -EPROTONOSUPPORT;
+       }
+
+       card->info.link_type = link_type;
        QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
        return 0;
 }
@@ -3100,7 +3114,6 @@ struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
                                           enum qeth_prot_versions prot,
                                           unsigned int data_length)
 {
-       enum qeth_link_types link_type = card->info.link_type;
        struct qeth_cmd_buffer *iob;
        struct qeth_ipacmd_hdr *hdr;
 
@@ -3116,7 +3129,7 @@ struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
        hdr->command = cmd_code;
        hdr->initiator = IPA_CMD_INITIATOR_HOST;
        /* hdr->seqno is set by qeth_send_control_data() */
-       hdr->adapter_type = (link_type == QETH_LINK_TYPE_HSTR) ? 2 : 1;
+       hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH;
        hdr->rel_adapter_no = (u8) card->dev->dev_port;
        hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
        hdr->param_count = 1;
@@ -3199,18 +3212,22 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card,
                struct qeth_reply *reply, unsigned long data)
 {
        struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+       struct qeth_query_cmds_supp *query_cmd;
 
        QETH_CARD_TEXT(card, 3, "quyadpcb");
        if (qeth_setadpparms_inspect_rc(cmd))
                return -EIO;
 
-       if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
-               card->info.link_type =
-                     cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
+       query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp;
+       if (query_cmd->lan_type & 0x7f) {
+               if (!qeth_is_supported_link_type(card, query_cmd->lan_type))
+                       return -EPROTONOSUPPORT;
+
+               card->info.link_type = query_cmd->lan_type;
                QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
        }
-       card->options.adp.supported =
-               cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
+
+       card->options.adp.supported = query_cmd->supported_cmds;
        return 0;
 }
 
@@ -4541,12 +4558,8 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
 {
        struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
        struct qeth_set_access_ctrl *access_ctrl_req;
-       int fallback = *(int *)reply->param;
 
        QETH_CARD_TEXT(card, 4, "setaccb");
-       if (cmd->hdr.return_code)
-               return -EIO;
-       qeth_setadpparms_inspect_rc(cmd);
 
        access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
        QETH_CARD_TEXT_(card, 2, "rc=%d",
@@ -4556,72 +4569,56 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
                QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
                                 access_ctrl_req->subcmd_code, CARD_DEVID(card),
                                 cmd->data.setadapterparms.hdr.return_code);
-       switch (cmd->data.setadapterparms.hdr.return_code) {
+       switch (qeth_setadpparms_inspect_rc(cmd)) {
        case SET_ACCESS_CTRL_RC_SUCCESS:
-               if (card->options.isolation == ISOLATION_MODE_NONE) {
+               if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE)
                        dev_info(&card->gdev->dev,
                            "QDIO data connection isolation is deactivated\n");
-               } else {
+               else
                        dev_info(&card->gdev->dev,
                            "QDIO data connection isolation is activated\n");
-               }
-               break;
+               return 0;
        case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
                QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
                                 CARD_DEVID(card));
-               if (fallback)
-                       card->options.isolation = card->options.prev_isolation;
-               break;
+               return 0;
        case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
                QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
                                 CARD_DEVID(card));
-               if (fallback)
-                       card->options.isolation = card->options.prev_isolation;
-               break;
+               return 0;
        case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
                dev_err(&card->gdev->dev, "Adapter does not "
                        "support QDIO data connection isolation\n");
-               break;
+               return -EOPNOTSUPP;
        case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
                dev_err(&card->gdev->dev,
                        "Adapter is dedicated. "
                        "QDIO data connection isolation not supported\n");
-               if (fallback)
-                       card->options.isolation = card->options.prev_isolation;
-               break;
+               return -EOPNOTSUPP;
        case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
                dev_err(&card->gdev->dev,
                        "TSO does not permit QDIO data connection isolation\n");
-               if (fallback)
-                       card->options.isolation = card->options.prev_isolation;
-               break;
+               return -EPERM;
        case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
                dev_err(&card->gdev->dev, "The adjacent switch port does not "
                        "support reflective relay mode\n");
-               if (fallback)
-                       card->options.isolation = card->options.prev_isolation;
-               break;
+               return -EOPNOTSUPP;
        case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
                dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
                                        "enabled at the adjacent switch port");
-               if (fallback)
-                       card->options.isolation = card->options.prev_isolation;
-               break;
+               return -EREMOTEIO;
        case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
                dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
                                        "at the adjacent switch failed\n");
-               break;
+               /* benign error while disabling ISOLATION_MODE_FWD */
+               return 0;
        default:
-               /* this should never happen */
-               if (fallback)
-                       card->options.isolation = card->options.prev_isolation;
-               break;
+               return -EIO;
        }
-       return (cmd->hdr.return_code) ? -EIO : 0;
 }
 
-static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
-               enum qeth_ipa_isolation_modes isolation, int fallback)
+int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
+                                    enum qeth_ipa_isolation_modes mode)
 {
        int rc;
        struct qeth_cmd_buffer *iob;
@@ -4630,42 +4627,28 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
 
        QETH_CARD_TEXT(card, 4, "setacctl");
 
+       if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
+               dev_err(&card->gdev->dev,
+                       "Adapter does not support QDIO data connection isolation\n");
+               return -EOPNOTSUPP;
+       }
+
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
                                   SETADP_DATA_SIZEOF(set_access_ctrl));
        if (!iob)
                return -ENOMEM;
        cmd = __ipa_cmd(iob);
        access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
-       access_ctrl_req->subcmd_code = isolation;
+       access_ctrl_req->subcmd_code = mode;
 
        rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
-                              &fallback);
-       QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
-       return rc;
-}
-
-int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
-{
-       int rc = 0;
-
-       QETH_CARD_TEXT(card, 4, "setactlo");
-
-       if ((IS_OSD(card) || IS_OSX(card)) &&
-           qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
-               rc = qeth_setadpparms_set_access_ctrl(card,
-                       card->options.isolation, fallback);
-               if (rc) {
-                       QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
-                                        rc, CARD_DEVID(card));
-                       rc = -EOPNOTSUPP;
-               }
-       } else if (card->options.isolation != ISOLATION_MODE_NONE) {
-               card->options.isolation = ISOLATION_MODE_NONE;
-
-               dev_err(&card->gdev->dev, "Adapter does not "
-                       "support QDIO data connection isolation\n");
-               rc = -EOPNOTSUPP;
+                              NULL);
+       if (rc) {
+               QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
+               QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
+                                rc, CARD_DEVID(card));
        }
+
        return rc;
 }
 
@@ -4853,26 +4836,24 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
 }
 
 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
-               struct qeth_reply *reply, unsigned long data)
+                                        struct qeth_reply *reply,
+                                        unsigned long data)
 {
        struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
-       struct qeth_qoat_priv *priv;
-       char *resdata;
+       struct qeth_qoat_priv *priv = reply->param;
        int resdatalen;
 
        QETH_CARD_TEXT(card, 3, "qoatcb");
        if (qeth_setadpparms_inspect_rc(cmd))
                return -EIO;
 
-       priv = (struct qeth_qoat_priv *)reply->param;
        resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
-       resdata = (char *)data + 28;
 
        if (resdatalen > (priv->buffer_len - priv->response_len))
                return -ENOSPC;
 
-       memcpy((priv->buffer + priv->response_len), resdata,
-               resdatalen);
+       memcpy(priv->buffer + priv->response_len,
+              &cmd->data.setadapterparms.hdr, resdatalen);
        priv->response_len += resdatalen;
 
        if (cmd->data.setadapterparms.hdr.seq_no <
@@ -4893,24 +4874,17 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
 
        QETH_CARD_TEXT(card, 3, "qoatcmd");
 
-       if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
-               rc = -EOPNOTSUPP;
-               goto out;
-       }
+       if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT))
+               return -EOPNOTSUPP;
 
-       if (copy_from_user(&oat_data, udata,
-           sizeof(struct qeth_query_oat_data))) {
-                       rc = -EFAULT;
-                       goto out;
-       }
+       if (copy_from_user(&oat_data, udata, sizeof(oat_data)))
+               return -EFAULT;
 
        priv.buffer_len = oat_data.buffer_len;
        priv.response_len = 0;
        priv.buffer = vzalloc(oat_data.buffer_len);
-       if (!priv.buffer) {
-               rc = -ENOMEM;
-               goto out;
-       }
+       if (!priv.buffer)
+               return -ENOMEM;
 
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
                                   SETADP_DATA_SIZEOF(query_oat));
@@ -4922,30 +4896,19 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
        oat_req = &cmd->data.setadapterparms.data.query_oat;
        oat_req->subcmd_code = oat_data.command;
 
-       rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb,
-                              &priv);
+       rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv);
        if (!rc) {
-               if (is_compat_task())
-                       tmp = compat_ptr(oat_data.ptr);
-               else
-                       tmp = (void __user *)(unsigned long)oat_data.ptr;
-
-               if (copy_to_user(tmp, priv.buffer,
-                   priv.response_len)) {
-                       rc = -EFAULT;
-                       goto out_free;
-               }
-
+               tmp = is_compat_task() ? compat_ptr(oat_data.ptr) :
+                                        u64_to_user_ptr(oat_data.ptr);
                oat_data.response_len = priv.response_len;
 
-               if (copy_to_user(udata, &oat_data,
-                   sizeof(struct qeth_query_oat_data)))
+               if (copy_to_user(tmp, priv.buffer, priv.response_len) ||
+                   copy_to_user(udata, &oat_data, sizeof(oat_data)))
                        rc = -EFAULT;
        }
 
 out_free:
        vfree(priv.buffer);
-out:
        return rc;
 }
 
@@ -5334,9 +5297,12 @@ retriable:
            (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
                card->info.hwtrap = 0;
 
-       rc = qeth_set_access_ctrl_online(card, 0);
-       if (rc)
-               goto out;
+       if (card->options.isolation != ISOLATION_MODE_NONE) {
+               rc = qeth_setadpparms_set_access_ctrl(card,
+                                                     card->options.isolation);
+               if (rc)
+                       goto out;
+       }
 
        rc = qeth_init_qdio_queues(card);
        if (rc) {
@@ -6840,9 +6806,11 @@ netdev_features_t qeth_features_check(struct sk_buff *skb,
                                      struct net_device *dev,
                                      netdev_features_t features)
 {
+       struct qeth_card *card = dev->ml_priv;
+
        /* Traffic with local next-hop is not eligible for some offloads: */
-       if (skb->ip_summed == CHECKSUM_PARTIAL) {
-               struct qeth_card *card = dev->ml_priv;
+       if (skb->ip_summed == CHECKSUM_PARTIAL &&
+           READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) {
                netdev_features_t restricted = 0;
 
                if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
index e3f4866..68c2588 100644 (file)
@@ -10,7 +10,7 @@
 #include <asm/cio.h>
 #include "qeth_core_mpc.h"
 
-unsigned char IDX_ACTIVATE_READ[] = {
+const unsigned char IDX_ACTIVATE_READ[] = {
        0x00, 0x00, 0x80, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x19, 0x01, 0x01, 0x80,  0x00, 0x00, 0x00, 0x00,
        0x00, 0x00, 0x00, 0x00,  0x00, 0x00, 0xc8, 0xc1,
@@ -18,7 +18,7 @@ unsigned char IDX_ACTIVATE_READ[] = {
        0x00, 0x00
 };
 
-unsigned char IDX_ACTIVATE_WRITE[] = {
+const unsigned char IDX_ACTIVATE_WRITE[] = {
        0x00, 0x00, 0x80, 0x00,  0x00, 0x00, 0x00, 0x00,
        0x15, 0x01, 0x01, 0x80,  0x00, 0x00, 0x00, 0x00,
        0xff, 0xff, 0x00, 0x00,  0x00, 0x00, 0xc8, 0xc1,
@@ -26,7 +26,7 @@ unsigned char IDX_ACTIVATE_WRITE[] = {
        0x00, 0x00
 };
 
-unsigned char CM_ENABLE[] = {
+const unsigned char CM_ENABLE[] = {
        0x00, 0xe0, 0x00, 0x00,  0x00, 0x00, 0x00, 0x01,
        0x00, 0x00, 0x00, 0x14,  0x00, 0x00, 0x00, 0x63,
        0x10, 0x00, 0x00, 0x01,
@@ -45,7 +45,7 @@ unsigned char CM_ENABLE[] = {
        0xff, 0xff, 0xff
 };
 
-unsigned char CM_SETUP[] = {
+const unsigned char CM_SETUP[] = {
        0x00, 0xe0, 0x00, 0x00,  0x00, 0x00, 0x00, 0x02,
        0x00, 0x00, 0x00, 0x14,  0x00, 0x00, 0x00, 0x64,
        0x10, 0x00, 0x00, 0x01,
@@ -65,7 +65,7 @@ unsigned char CM_SETUP[] = {
        0x04, 0x06, 0xc8, 0x00
 };
 
-unsigned char ULP_ENABLE[] = {
+const unsigned char ULP_ENABLE[] = {
        0x00, 0xe0, 0x00, 0x00,  0x00, 0x00, 0x00, 0x03,
        0x00, 0x00, 0x00, 0x14,  0x00, 0x00, 0x00, 0x6b,
        0x10, 0x00, 0x00, 0x01,
@@ -85,7 +85,7 @@ unsigned char ULP_ENABLE[] = {
        0xf1, 0x00, 0x00
 };
 
-unsigned char ULP_SETUP[] = {
+const unsigned char ULP_SETUP[] = {
        0x00, 0xe0, 0x00, 0x00,  0x00, 0x00, 0x00, 0x04,
        0x00, 0x00, 0x00, 0x14,  0x00, 0x00, 0x00, 0x6c,
        0x10, 0x00, 0x00, 0x01,
@@ -107,7 +107,7 @@ unsigned char ULP_SETUP[] = {
        0x00, 0x00, 0x00, 0x00
 };
 
-unsigned char DM_ACT[] = {
+const unsigned char DM_ACT[] = {
        0x00, 0xe0, 0x00, 0x00,  0x00, 0x00, 0x00, 0x05,
        0x00, 0x00, 0x00, 0x14,  0x00, 0x00, 0x00, 0x55,
        0x10, 0x00, 0x00, 0x01,
@@ -123,7 +123,7 @@ unsigned char DM_ACT[] = {
        0x05, 0x40, 0x01, 0x01,  0x00
 };
 
-unsigned char IPA_PDU_HEADER[] = {
+const unsigned char IPA_PDU_HEADER[] = {
        0x00, 0xe0, 0x00, 0x00,  0x77, 0x77, 0x77, 0x77,
        0x00, 0x00, 0x00, 0x14,  0x00, 0x00, 0x00, 0x00,
        0x10, 0x00, 0x00, 0x01,  0x00, 0x00, 0x00, 0x00,
index 9d6f39d..b459def 100644 (file)
 #include <uapi/linux/if_ether.h>
 #include <uapi/linux/in6.h>
 
+extern const unsigned char IPA_PDU_HEADER[];
 #define IPA_PDU_HEADER_SIZE    0x40
 #define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer + 0x0e)
 #define QETH_IPA_PDU_LEN_PDU1(buffer) (buffer + 0x26)
 #define QETH_IPA_PDU_LEN_PDU2(buffer) (buffer + 0x29)
 #define QETH_IPA_PDU_LEN_PDU3(buffer) (buffer + 0x3a)
 
-extern unsigned char IPA_PDU_HEADER[];
 #define QETH_IPA_CMD_DEST_ADDR(buffer) (buffer + 0x2c)
 
 #define QETH_SEQ_NO_LENGTH     4
@@ -858,7 +858,7 @@ extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
 /* END OF   IP Assist related definitions                                    */
 /*****************************************************************************/
 
-extern unsigned char CM_ENABLE[];
+extern const unsigned char CM_ENABLE[];
 #define CM_ENABLE_SIZE 0x63
 #define QETH_CM_ENABLE_ISSUER_RM_TOKEN(buffer) (buffer + 0x2c)
 #define QETH_CM_ENABLE_FILTER_TOKEN(buffer) (buffer + 0x53)
@@ -868,7 +868,7 @@ extern unsigned char CM_ENABLE[];
                (PDU_ENCAPSULATION(buffer) + 0x13)
 
 
-extern unsigned char CM_SETUP[];
+extern const unsigned char CM_SETUP[];
 #define CM_SETUP_SIZE 0x64
 #define QETH_CM_SETUP_DEST_ADDR(buffer) (buffer + 0x2c)
 #define QETH_CM_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51)
@@ -877,7 +877,7 @@ extern unsigned char CM_SETUP[];
 #define QETH_CM_SETUP_RESP_DEST_ADDR(buffer) \
                (PDU_ENCAPSULATION(buffer) + 0x1a)
 
-extern unsigned char ULP_ENABLE[];
+extern const unsigned char ULP_ENABLE[];
 #define ULP_ENABLE_SIZE 0x6b
 #define QETH_ULP_ENABLE_LINKNUM(buffer) (buffer + 0x61)
 #define QETH_ULP_ENABLE_DEST_ADDR(buffer) (buffer + 0x2c)
@@ -898,7 +898,7 @@ extern unsigned char ULP_ENABLE[];
 #define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer + 0x50)
 #define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer + 0x19)
 
-extern unsigned char ULP_SETUP[];
+extern const unsigned char ULP_SETUP[];
 #define ULP_SETUP_SIZE 0x6c
 #define QETH_ULP_SETUP_DEST_ADDR(buffer) (buffer + 0x2c)
 #define QETH_ULP_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51)
@@ -910,7 +910,7 @@ extern unsigned char ULP_SETUP[];
                (PDU_ENCAPSULATION(buffer) + 0x1a)
 
 
-extern unsigned char DM_ACT[];
+extern const unsigned char DM_ACT[];
 #define DM_ACT_SIZE 0x55
 #define QETH_DM_ACT_DEST_ADDR(buffer) (buffer + 0x2c)
 #define QETH_DM_ACT_CONNECTION_TOKEN(buffer) (buffer + 0x51)
@@ -921,9 +921,8 @@ extern unsigned char DM_ACT[];
 #define QETH_PDU_HEADER_SEQ_NO(buffer) (buffer + 0x1c)
 #define QETH_PDU_HEADER_ACK_SEQ_NO(buffer) (buffer + 0x20)
 
-extern unsigned char IDX_ACTIVATE_READ[];
-extern unsigned char IDX_ACTIVATE_WRITE[];
-
+extern const unsigned char IDX_ACTIVATE_READ[];
+extern const unsigned char IDX_ACTIVATE_WRITE[];
 #define IDX_ACTIVATE_SIZE      0x22
 #define QETH_IDX_ACT_PNO(buffer) (buffer+0x0b)
 #define QETH_IDX_ACT_ISSUER_RM_TOKEN(buffer) (buffer + 0x0c)
index c901c94..8def823 100644 (file)
@@ -448,19 +448,17 @@ static ssize_t qeth_dev_isolation_store(struct device *dev,
                rc = -EINVAL;
                goto out;
        }
-       rc = count;
-
-       /* defer IP assist if device is offline (until discipline->set_online)*/
-       card->options.prev_isolation = card->options.isolation;
-       card->options.isolation = isolation;
-       if (qeth_card_hw_is_reachable(card)) {
-               int ipa_rc = qeth_set_access_ctrl_online(card, 1);
-               if (ipa_rc != 0)
-                       rc = ipa_rc;
-       }
+
+       if (qeth_card_hw_is_reachable(card))
+               rc = qeth_setadpparms_set_access_ctrl(card, isolation);
+
+       if (!rc)
+               WRITE_ONCE(card->options.isolation, isolation);
+
 out:
        mutex_unlock(&card->conf_mutex);
-       return rc;
+
+       return rc ? rc : count;
 }
 
 static DEVICE_ATTR(isolation, 0644, qeth_dev_isolation_show,
index 2d3bca3..ef7a2db 100644 (file)
@@ -156,7 +156,7 @@ static void qeth_l2_drain_rx_mode_cache(struct qeth_card *card)
        struct hlist_node *tmp;
        int i;
 
-       hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
+       hash_for_each_safe(card->rx_mode_addrs, i, tmp, mac, hnode) {
                hash_del(&mac->hnode);
                kfree(mac);
        }
@@ -438,7 +438,7 @@ static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha)
        u32 mac_hash = get_unaligned((u32 *)(&ha->addr[2]));
        struct qeth_mac *mac;
 
-       hash_for_each_possible(card->mac_htable, mac, hnode, mac_hash) {
+       hash_for_each_possible(card->rx_mode_addrs, mac, hnode, mac_hash) {
                if (ether_addr_equal_64bits(ha->addr, mac->mac_addr)) {
                        mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
                        return;
@@ -452,7 +452,7 @@ static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha)
        ether_addr_copy(mac->mac_addr, ha->addr);
        mac->disp_flag = QETH_DISP_ADDR_ADD;
 
-       hash_add(card->mac_htable, &mac->hnode, mac_hash);
+       hash_add(card->rx_mode_addrs, &mac->hnode, mac_hash);
 }
 
 static void qeth_l2_rx_mode_work(struct work_struct *work)
@@ -475,7 +475,7 @@ static void qeth_l2_rx_mode_work(struct work_struct *work)
                qeth_l2_add_mac(card, ha);
        netif_addr_unlock_bh(dev);
 
-       hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
+       hash_for_each_safe(card->rx_mode_addrs, i, tmp, mac, hnode) {
                switch (mac->disp_flag) {
                case QETH_DISP_ADDR_DELETE:
                        qeth_l2_remove_mac(card, mac->mac_addr);
@@ -601,7 +601,6 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
                        return rc;
        }
 
-       hash_init(card->mac_htable);
        INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work);
        return 0;
 }
index 1e50aa0..15a1248 100644 (file)
@@ -58,7 +58,7 @@ static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
        struct qeth_ipaddr *addr;
 
        if (query->is_multicast) {
-               hash_for_each_possible(card->ip_mc_htable, addr, hnode, key)
+               hash_for_each_possible(card->rx_mode_addrs, addr, hnode, key)
                        if (qeth_l3_addr_match_ip(addr, query))
                                return addr;
        } else {
@@ -239,7 +239,7 @@ static void qeth_l3_drain_rx_mode_cache(struct qeth_card *card)
        struct hlist_node *tmp;
        int i;
 
-       hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
+       hash_for_each_safe(card->rx_mode_addrs, i, tmp, addr, hnode) {
                hash_del(&addr->hnode);
                kfree(addr);
        }
@@ -1093,7 +1093,7 @@ static int qeth_l3_add_mcast_rtnl(struct net_device *dev, int vid, void *arg)
                if (!ipm)
                        continue;
 
-               hash_add(card->ip_mc_htable, &ipm->hnode,
+               hash_add(card->rx_mode_addrs, &ipm->hnode,
                         qeth_l3_ipaddr_hash(ipm));
        }
 
@@ -1124,8 +1124,8 @@ walk_ipv6:
                if (!ipm)
                        continue;
 
-               hash_add(card->ip_mc_htable,
-                               &ipm->hnode, qeth_l3_ipaddr_hash(ipm));
+               hash_add(card->rx_mode_addrs, &ipm->hnode,
+                        qeth_l3_ipaddr_hash(ipm));
 
        }
        read_unlock_bh(&in6_dev->lock);
@@ -1219,7 +1219,7 @@ static void qeth_l3_rx_mode_work(struct work_struct *work)
                        vlan_for_each(card->dev, qeth_l3_add_mcast_rtnl, card);
                rtnl_unlock();
 
-               hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
+               hash_for_each_safe(card->rx_mode_addrs, i, tmp, addr, hnode) {
                        switch (addr->disp_flag) {
                        case QETH_DISP_ADDR_DELETE:
                                rc = qeth_l3_deregister_addr_entry(card, addr);
@@ -1919,12 +1919,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
                return rc;
 
        if (IS_OSD(card) || IS_OSX(card)) {
-               if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
-                   (card->info.link_type == QETH_LINK_TYPE_HSTR)) {
-                       pr_info("qeth_l3: ignoring TR device\n");
-                       return -ENODEV;
-               }
-
                card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
 
                /*IPv6 address autoconfiguration stuff*/
@@ -2004,7 +1998,6 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
                }
        }
 
-       hash_init(card->ip_mc_htable);
        INIT_WORK(&card->rx_mode_work, qeth_l3_rx_mode_work);
        return 0;
 }
index db320da..79f6e8f 100644 (file)
@@ -577,7 +577,10 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
                                   ZFCP_STATUS_ERP_TIMEDOUT)) {
                        req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
                        zfcp_dbf_rec_run("erscf_1", act);
-                       req->erp_action = NULL;
+                       /* lock-free concurrent access with
+                        * zfcp_erp_timeout_handler()
+                        */
+                       WRITE_ONCE(req->erp_action, NULL);
                }
                if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
                        zfcp_dbf_rec_run("erscf_2", act);
@@ -613,8 +616,14 @@ void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask)
 void zfcp_erp_timeout_handler(struct timer_list *t)
 {
        struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
-       struct zfcp_erp_action *act = fsf_req->erp_action;
+       struct zfcp_erp_action *act;
 
+       if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
+               return;
+       /* lock-free concurrent access with zfcp_erp_strategy_check_fsfreq() */
+       act = READ_ONCE(fsf_req->erp_action);
+       if (!act)
+               return;
        zfcp_erp_notify(act, ZFCP_STATUS_ERP_TIMEDOUT);
 }
 
index 957889a..5730572 100644 (file)
@@ -1372,27 +1372,6 @@ static struct ccw_device_id virtio_ids[] = {
        {},
 };
 
-#ifdef CONFIG_PM_SLEEP
-static int virtio_ccw_freeze(struct ccw_device *cdev)
-{
-       struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
-
-       return virtio_device_freeze(&vcdev->vdev);
-}
-
-static int virtio_ccw_restore(struct ccw_device *cdev)
-{
-       struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
-       int ret;
-
-       ret = virtio_ccw_set_transport_rev(vcdev);
-       if (ret)
-               return ret;
-
-       return virtio_device_restore(&vcdev->vdev);
-}
-#endif
-
 static struct ccw_driver virtio_ccw_driver = {
        .driver = {
                .owner = THIS_MODULE,
@@ -1405,11 +1384,6 @@ static struct ccw_driver virtio_ccw_driver = {
        .set_online = virtio_ccw_online,
        .notify = virtio_ccw_cio_notify,
        .int_class = IRQIO_VIR,
-#ifdef CONFIG_PM_SLEEP
-       .freeze = virtio_ccw_freeze,
-       .thaw = virtio_ccw_restore,
-       .restore = virtio_ccw_restore,
-#endif
 };
 
 static int __init pure_hex(char **cp, unsigned int *val, int min_digit,
index d022407..bef47f3 100644 (file)
@@ -40,6 +40,7 @@ static struct scsi_host_template aic94xx_sht = {
        /* .name is initialized */
        .name                   = "aic94xx",
        .queuecommand           = sas_queuecommand,
+       .dma_need_drain         = ata_scsi_dma_need_drain,
        .target_alloc           = sas_target_alloc,
        .slave_configure        = sas_slave_configure,
        .scan_finished          = asd_scan_finished,
index 2e1718f..09a7669 100644 (file)
@@ -1756,6 +1756,7 @@ static struct scsi_host_template sht_v1_hw = {
        .proc_name              = DRV_NAME,
        .module                 = THIS_MODULE,
        .queuecommand           = sas_queuecommand,
+       .dma_need_drain         = ata_scsi_dma_need_drain,
        .target_alloc           = sas_target_alloc,
        .slave_configure        = hisi_sas_slave_configure,
        .scan_finished          = hisi_sas_scan_finished,
index e7e7849..968d387 100644 (file)
@@ -3532,6 +3532,7 @@ static struct scsi_host_template sht_v2_hw = {
        .proc_name              = DRV_NAME,
        .module                 = THIS_MODULE,
        .queuecommand           = sas_queuecommand,
+       .dma_need_drain         = ata_scsi_dma_need_drain,
        .target_alloc           = sas_target_alloc,
        .slave_configure        = hisi_sas_slave_configure,
        .scan_finished          = hisi_sas_scan_finished,
index 3e6b78a..55e2321 100644 (file)
@@ -3075,6 +3075,7 @@ static struct scsi_host_template sht_v3_hw = {
        .proc_name              = DRV_NAME,
        .module                 = THIS_MODULE,
        .queuecommand           = sas_queuecommand,
+       .dma_need_drain         = ata_scsi_dma_need_drain,
        .target_alloc           = sas_target_alloc,
        .slave_configure        = hisi_sas_slave_configure,
        .scan_finished          = hisi_sas_scan_finished,
index 7d77997..7d86f4c 100644 (file)
@@ -6731,6 +6731,7 @@ static struct scsi_host_template driver_template = {
        .compat_ioctl = ipr_ioctl,
 #endif
        .queuecommand = ipr_queuecommand,
+       .dma_need_drain = ata_scsi_dma_need_drain,
        .eh_abort_handler = ipr_eh_abort,
        .eh_device_reset_handler = ipr_eh_dev_reset,
        .eh_host_reset_handler = ipr_eh_host_reset,
index 974c3b9..085e285 100644 (file)
@@ -153,6 +153,7 @@ static struct scsi_host_template isci_sht = {
        .name                           = DRV_NAME,
        .proc_name                      = DRV_NAME,
        .queuecommand                   = sas_queuecommand,
+       .dma_need_drain                 = ata_scsi_dma_need_drain,
        .target_alloc                   = sas_target_alloc,
        .slave_configure                = sas_slave_configure,
        .scan_finished                  = isci_host_scan_finished,
index 773c45a..278d15f 100644 (file)
@@ -133,8 +133,10 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
        lockdep_assert_held(&lport->disc.disc_mutex);
 
        rdata = fc_rport_lookup(lport, port_id);
-       if (rdata)
+       if (rdata) {
+               kref_put(&rdata->kref, fc_rport_destroy);
                return rdata;
+       }
 
        if (lport->rport_priv_size > 0)
                rport_priv_size = lport->rport_priv_size;
@@ -481,10 +483,11 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
 
        fc_rport_state_enter(rdata, RPORT_ST_DELETE);
 
-       kref_get(&rdata->kref);
-       if (rdata->event == RPORT_EV_NONE &&
-           !queue_work(rport_event_queue, &rdata->event_work))
-               kref_put(&rdata->kref, fc_rport_destroy);
+       if (rdata->event == RPORT_EV_NONE) {
+               kref_get(&rdata->kref);
+               if (!queue_work(rport_event_queue, &rdata->event_work))
+                       kref_put(&rdata->kref, fc_rport_destroy);
+       }
 
        rdata->event = event;
 }
index 69a5249..6637f84 100644 (file)
@@ -11878,7 +11878,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
        lpfc_sli4_xri_exchange_busy_wait(phba);
 
        /* per-phba callback de-registration for hotplug event */
-       lpfc_cpuhp_remove(phba);
+       if (phba->pport)
+               lpfc_cpuhp_remove(phba);
 
        /* Disable PCI subsystem interrupt */
        lpfc_sli4_disable_intr(phba);
index 5973eed..b0de3bd 100644 (file)
@@ -33,6 +33,7 @@ static struct scsi_host_template mvs_sht = {
        .module                 = THIS_MODULE,
        .name                   = DRV_NAME,
        .queuecommand           = sas_queuecommand,
+       .dma_need_drain         = ata_scsi_dma_need_drain,
        .target_alloc           = sas_target_alloc,
        .slave_configure        = sas_slave_configure,
        .scan_finished          = mvs_scan_finished,
index a8f5344..9e99262 100644 (file)
@@ -87,6 +87,7 @@ static struct scsi_host_template pm8001_sht = {
        .module                 = THIS_MODULE,
        .name                   = DRV_NAME,
        .queuecommand           = sas_queuecommand,
+       .dma_need_drain         = ata_scsi_dma_need_drain,
        .target_alloc           = sas_target_alloc,
        .slave_configure        = sas_slave_configure,
        .scan_finished          = pm8001_scan_finished,
index 36b1ca2..6e77e49 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/interrupt.h>
 #include <linux/list.h>
 #include <linux/kthread.h>
+#include <linux/phylink.h>
 #include <scsi/libfc.h>
 #include <scsi/scsi_host.h>
 #include <scsi/fc_frame.h>
@@ -440,6 +441,7 @@ static void qedf_link_recovery(struct work_struct *work)
 static void qedf_update_link_speed(struct qedf_ctx *qedf,
        struct qed_link_output *link)
 {
+       __ETHTOOL_DECLARE_LINK_MODE_MASK(sup_caps);
        struct fc_lport *lport = qedf->lport;
 
        lport->link_speed = FC_PORTSPEED_UNKNOWN;
@@ -474,40 +476,60 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf,
         * Set supported link speed by querying the supported
         * capabilities of the link.
         */
-       if ((link->supported_caps & QED_LM_10000baseT_Full_BIT) ||
-           (link->supported_caps & QED_LM_10000baseKX4_Full_BIT) ||
-           (link->supported_caps & QED_LM_10000baseR_FEC_BIT) ||
-           (link->supported_caps & QED_LM_10000baseCR_Full_BIT) ||
-           (link->supported_caps & QED_LM_10000baseSR_Full_BIT) ||
-           (link->supported_caps & QED_LM_10000baseLR_Full_BIT) ||
-           (link->supported_caps & QED_LM_10000baseLRM_Full_BIT) ||
-           (link->supported_caps & QED_LM_10000baseKR_Full_BIT)) {
+
+       phylink_zero(sup_caps);
+       phylink_set(sup_caps, 10000baseT_Full);
+       phylink_set(sup_caps, 10000baseKX4_Full);
+       phylink_set(sup_caps, 10000baseR_FEC);
+       phylink_set(sup_caps, 10000baseCR_Full);
+       phylink_set(sup_caps, 10000baseSR_Full);
+       phylink_set(sup_caps, 10000baseLR_Full);
+       phylink_set(sup_caps, 10000baseLRM_Full);
+       phylink_set(sup_caps, 10000baseKR_Full);
+
+       if (linkmode_intersects(link->supported_caps, sup_caps))
                lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
-       }
-       if ((link->supported_caps & QED_LM_25000baseKR_Full_BIT) ||
-           (link->supported_caps & QED_LM_25000baseCR_Full_BIT) ||
-           (link->supported_caps & QED_LM_25000baseSR_Full_BIT)) {
+
+       phylink_zero(sup_caps);
+       phylink_set(sup_caps, 25000baseKR_Full);
+       phylink_set(sup_caps, 25000baseCR_Full);
+       phylink_set(sup_caps, 25000baseSR_Full);
+
+       if (linkmode_intersects(link->supported_caps, sup_caps))
                lport->link_supported_speeds |= FC_PORTSPEED_25GBIT;
-       }
-       if ((link->supported_caps & QED_LM_40000baseLR4_Full_BIT) ||
-           (link->supported_caps & QED_LM_40000baseKR4_Full_BIT) ||
-           (link->supported_caps & QED_LM_40000baseCR4_Full_BIT) ||
-           (link->supported_caps & QED_LM_40000baseSR4_Full_BIT)) {
+
+       phylink_zero(sup_caps);
+       phylink_set(sup_caps, 40000baseLR4_Full);
+       phylink_set(sup_caps, 40000baseKR4_Full);
+       phylink_set(sup_caps, 40000baseCR4_Full);
+       phylink_set(sup_caps, 40000baseSR4_Full);
+
+       if (linkmode_intersects(link->supported_caps, sup_caps))
                lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
-       }
-       if ((link->supported_caps & QED_LM_50000baseKR2_Full_BIT) ||
-           (link->supported_caps & QED_LM_50000baseCR2_Full_BIT) ||
-           (link->supported_caps & QED_LM_50000baseSR2_Full_BIT)) {
+
+       phylink_zero(sup_caps);
+       phylink_set(sup_caps, 50000baseKR2_Full);
+       phylink_set(sup_caps, 50000baseCR2_Full);
+       phylink_set(sup_caps, 50000baseSR2_Full);
+
+       if (linkmode_intersects(link->supported_caps, sup_caps))
                lport->link_supported_speeds |= FC_PORTSPEED_50GBIT;
-       }
-       if ((link->supported_caps & QED_LM_100000baseKR4_Full_BIT) ||
-           (link->supported_caps & QED_LM_100000baseSR4_Full_BIT) ||
-           (link->supported_caps & QED_LM_100000baseCR4_Full_BIT) ||
-           (link->supported_caps & QED_LM_100000baseLR4_ER4_Full_BIT)) {
+
+       phylink_zero(sup_caps);
+       phylink_set(sup_caps, 100000baseKR4_Full);
+       phylink_set(sup_caps, 100000baseSR4_Full);
+       phylink_set(sup_caps, 100000baseCR4_Full);
+       phylink_set(sup_caps, 100000baseLR4_ER4_Full);
+
+       if (linkmode_intersects(link->supported_caps, sup_caps))
                lport->link_supported_speeds |= FC_PORTSPEED_100GBIT;
-       }
-       if (link->supported_caps & QED_LM_20000baseKR2_Full_BIT)
+
+       phylink_zero(sup_caps);
+       phylink_set(sup_caps, 20000baseKR2_Full);
+
+       if (linkmode_intersects(link->supported_caps, sup_caps))
                lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
+
        fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
 }
 
index 42c3ad2..df670fb 100644 (file)
@@ -3496,7 +3496,9 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
                                qla2x00_clear_loop_id(fcport);
                                fcport->flags |= FCF_FABRIC_DEVICE;
                        } else if (fcport->d_id.b24 != rp->id.b24 ||
-                               fcport->scan_needed) {
+                                  (fcport->scan_needed &&
+                                   fcport->port_type != FCT_INITIATOR &&
+                                   fcport->port_type != FCT_NVME_INITIATOR)) {
                                qlt_schedule_sess_for_deletion(fcport);
                        }
                        fcport->d_id.b24 = rp->id.b24;
index 4576d3a..2436a17 100644 (file)
@@ -5944,7 +5944,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
                        break;
                }
 
-               if (NVME_TARGET(vha->hw, fcport)) {
+               if (found && NVME_TARGET(vha->hw, fcport)) {
                        if (fcport->disc_state == DSC_DELETE_PEND) {
                                qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
                                vha->fcport_count--;
index d66d47a..fa695a4 100644 (file)
@@ -139,11 +139,12 @@ static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
        sp->priv = NULL;
        if (priv->comp_status == QLA_SUCCESS) {
                fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
+               fd->status = NVME_SC_SUCCESS;
        } else {
                fd->rcv_rsplen = 0;
                fd->transferred_length = 0;
+               fd->status = NVME_SC_INTERNAL;
        }
-       fd->status = 0;
        spin_unlock_irqrestore(&priv->cmd_lock, flags);
 
        fd->done(fd);
index 53dd876..516a7f5 100644 (file)
@@ -106,8 +106,10 @@ static int ufs_bsg_request(struct bsg_job *job)
                desc_op = bsg_request->upiu_req.qr.opcode;
                ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff,
                                                &desc_len, desc_op);
-               if (ret)
+               if (ret) {
+                       pm_runtime_put_sync(hba->dev);
                        goto out;
+               }
 
                /* fall through */
        case UPIU_TRANSACTION_NOP_OUT:
index 7b0759a..cc57a38 100644 (file)
@@ -22,6 +22,8 @@
 #define OCOTP_UID_LOW                  0x410
 #define OCOTP_UID_HIGH                 0x420
 
+#define IMX8MP_OCOTP_UID_OFFSET                0x10
+
 /* Same as ANADIG_DIGPROG_IMX7D */
 #define ANADIG_DIGPROG_IMX8MM  0x800
 
@@ -87,6 +89,8 @@ static void __init imx8mm_soc_uid(void)
 {
        void __iomem *ocotp_base;
        struct device_node *np;
+       u32 offset = of_machine_is_compatible("fsl,imx8mp") ?
+                    IMX8MP_OCOTP_UID_OFFSET : 0;
 
        np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-ocotp");
        if (!np)
@@ -95,9 +99,9 @@ static void __init imx8mm_soc_uid(void)
        ocotp_base = of_iomap(np, 0);
        WARN_ON(!ocotp_base);
 
-       soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
+       soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset);
        soc_uid <<= 32;
-       soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
+       soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset);
 
        iounmap(ocotp_base);
        of_node_put(np);
@@ -146,7 +150,7 @@ static const struct imx8_soc_data imx8mp_soc_data = {
        .soc_revision = imx8mm_soc_revision,
 };
 
-static const struct of_device_id imx8_soc_match[] = {
+static __maybe_unused const struct of_device_id imx8_soc_match[] = {
        { .compatible = "fsl,imx8mq", .data = &imx8mq_soc_data, },
        { .compatible = "fsl,imx8mm", .data = &imx8mm_soc_data, },
        { .compatible = "fsl,imx8mn", .data = &imx8mn_soc_data, },
index 038aec3..a01eda7 100644 (file)
@@ -67,7 +67,7 @@ struct knav_reg_config {
        u32             link_ram_size0;
        u32             link_ram_base1;
        u32             __pad2[2];
-       u32             starvation[0];
+       u32             starvation[];
 };
 
 struct knav_reg_region {
index 96c6f77..c9b3f9e 100644 (file)
@@ -256,10 +256,10 @@ static int omap_reset_deassert(struct reset_controller_dev *rcdev,
                goto exit;
 
        /* wait for the status to be set */
-       ret = readl_relaxed_poll_timeout(reset->prm->base +
-                                        reset->prm->data->rstst,
-                                        v, v & BIT(st_bit), 1,
-                                        OMAP_RESET_MAX_WAIT);
+       ret = readl_relaxed_poll_timeout_atomic(reset->prm->base +
+                                                reset->prm->data->rstst,
+                                                v, v & BIT(st_bit), 1,
+                                                OMAP_RESET_MAX_WAIT);
        if (ret)
                pr_err("%s: timedout waiting for %s:%lu\n", __func__,
                       reset->prm->data->name, id);
index a35face..91c6aff 100644 (file)
@@ -588,14 +588,14 @@ static void dspi_release_dma(struct fsl_dspi *dspi)
                return;
 
        if (dma->chan_tx) {
-               dma_unmap_single(dma->chan_tx->device->dev, dma->tx_dma_phys,
-                                dma_bufsize, DMA_TO_DEVICE);
+               dma_free_coherent(dma->chan_tx->device->dev, dma_bufsize,
+                                 dma->tx_dma_buf, dma->tx_dma_phys);
                dma_release_channel(dma->chan_tx);
        }
 
        if (dma->chan_rx) {
-               dma_unmap_single(dma->chan_rx->device->dev, dma->rx_dma_phys,
-                                dma_bufsize, DMA_FROM_DEVICE);
+               dma_free_coherent(dma->chan_rx->device->dev, dma_bufsize,
+                                 dma->rx_dma_buf, dma->rx_dma_phys);
                dma_release_channel(dma->chan_rx);
        }
 }
@@ -1109,6 +1109,8 @@ static int dspi_suspend(struct device *dev)
        struct spi_controller *ctlr = dev_get_drvdata(dev);
        struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
 
+       if (dspi->irq)
+               disable_irq(dspi->irq);
        spi_controller_suspend(ctlr);
        clk_disable_unprepare(dspi->clk);
 
@@ -1129,6 +1131,8 @@ static int dspi_resume(struct device *dev)
        if (ret)
                return ret;
        spi_controller_resume(ctlr);
+       if (dspi->irq)
+               enable_irq(dspi->irq);
 
        return 0;
 }
@@ -1385,22 +1389,22 @@ static int dspi_probe(struct platform_device *pdev)
                goto poll_mode;
        }
 
-       ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt,
-                              IRQF_SHARED, pdev->name, dspi);
+       init_completion(&dspi->xfer_done);
+
+       ret = request_threaded_irq(dspi->irq, dspi_interrupt, NULL,
+                                  IRQF_SHARED, pdev->name, dspi);
        if (ret < 0) {
                dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
                goto out_clk_put;
        }
 
-       init_completion(&dspi->xfer_done);
-
 poll_mode:
 
        if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
                ret = dspi_request_dma(dspi, res->start);
                if (ret < 0) {
                        dev_err(&pdev->dev, "can't get dma channels\n");
-                       goto out_clk_put;
+                       goto out_free_irq;
                }
        }
 
@@ -1415,11 +1419,14 @@ poll_mode:
        ret = spi_register_controller(ctlr);
        if (ret != 0) {
                dev_err(&pdev->dev, "Problem registering DSPI ctlr\n");
-               goto out_clk_put;
+               goto out_free_irq;
        }
 
        return ret;
 
+out_free_irq:
+       if (dspi->irq)
+               free_irq(dspi->irq, dspi);
 out_clk_put:
        clk_disable_unprepare(dspi->clk);
 out_ctlr_put:
@@ -1434,18 +1441,8 @@ static int dspi_remove(struct platform_device *pdev)
        struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
 
        /* Disconnect from the SPI framework */
-       dspi_release_dma(dspi);
-       clk_disable_unprepare(dspi->clk);
        spi_unregister_controller(dspi->ctlr);
 
-       return 0;
-}
-
-static void dspi_shutdown(struct platform_device *pdev)
-{
-       struct spi_controller *ctlr = platform_get_drvdata(pdev);
-       struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
-
        /* Disable RX and TX */
        regmap_update_bits(dspi->regmap, SPI_MCR,
                           SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF,
@@ -1455,8 +1452,16 @@ static void dspi_shutdown(struct platform_device *pdev)
        regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT);
 
        dspi_release_dma(dspi);
+       if (dspi->irq)
+               free_irq(dspi->irq, dspi);
        clk_disable_unprepare(dspi->clk);
-       spi_unregister_controller(dspi->ctlr);
+
+       return 0;
+}
+
+static void dspi_shutdown(struct platform_device *pdev)
+{
+       dspi_remove(pdev);
 }
 
 static struct platform_driver fsl_dspi_driver = {
index 6721910..0040362 100644 (file)
@@ -1485,6 +1485,11 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
        { PCI_VDEVICE(INTEL, 0x4daa), LPSS_CNL_SSP },
        { PCI_VDEVICE(INTEL, 0x4dab), LPSS_CNL_SSP },
        { PCI_VDEVICE(INTEL, 0x4dfb), LPSS_CNL_SSP },
+       /* TGL-H */
+       { PCI_VDEVICE(INTEL, 0x43aa), LPSS_CNL_SSP },
+       { PCI_VDEVICE(INTEL, 0x43ab), LPSS_CNL_SSP },
+       { PCI_VDEVICE(INTEL, 0x43fb), LPSS_CNL_SSP },
+       { PCI_VDEVICE(INTEL, 0x43fd), LPSS_CNL_SSP },
        /* APL */
        { PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
        { PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
index 06192c9..cbc2387 100644 (file)
 
 struct rspi_data {
        void __iomem *addr;
-       u32 max_speed_hz;
+       u32 speed_hz;
        struct spi_controller *ctlr;
        struct platform_device *pdev;
        wait_queue_head_t wait;
@@ -258,8 +258,7 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
        rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
 
        /* Sets transfer bit rate */
-       spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk),
-                           2 * rspi->max_speed_hz) - 1;
+       spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->speed_hz) - 1;
        rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
 
        /* Disable dummy transmission, set 16-bit word access, 1 frame */
@@ -299,14 +298,14 @@ static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
 
        clksrc = clk_get_rate(rspi->clk);
        while (div < 3) {
-               if (rspi->max_speed_hz >= clksrc/4) /* 4=(CLK/2)/2 */
+               if (rspi->speed_hz >= clksrc/4) /* 4=(CLK/2)/2 */
                        break;
                div++;
                clksrc /= 2;
        }
 
        /* Sets transfer bit rate */
-       spbr = DIV_ROUND_UP(clksrc, 2 * rspi->max_speed_hz) - 1;
+       spbr = DIV_ROUND_UP(clksrc, 2 * rspi->speed_hz) - 1;
        rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
        rspi->spcmd |= div << 2;
 
@@ -341,7 +340,7 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
        rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
 
        /* Sets transfer bit rate */
-       spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->max_speed_hz);
+       spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->speed_hz);
        rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
 
        /* Disable dummy transmission, set byte access */
@@ -949,9 +948,24 @@ static int rspi_prepare_message(struct spi_controller *ctlr,
 {
        struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
        struct spi_device *spi = msg->spi;
+       const struct spi_transfer *xfer;
        int ret;
 
-       rspi->max_speed_hz = spi->max_speed_hz;
+       /*
+        * As the Bit Rate Register must not be changed while the device is
+        * active, all transfers in a message must use the same bit rate.
+        * In theory, the sequencer could be enabled, and each Command Register
+        * could divide the base bit rate by a different value.
+        * However, most RSPI variants do not have Transfer Data Length
+        * Multiplier Setting Registers, so each sequence step would be limited
+        * to a single word, making this feature unsuitable for large
+        * transfers, which would gain most from it.
+        */
+       rspi->speed_hz = spi->max_speed_hz;
+       list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+               if (xfer->speed_hz < rspi->speed_hz)
+                       rspi->speed_hz = xfer->speed_hz;
+       }
 
        rspi->spcmd = SPCMD_SSLKP;
        if (spi->mode & SPI_CPOL)
index 88e6543..bd23c46 100644 (file)
@@ -389,9 +389,9 @@ static int sprd_adi_restart_handler(struct notifier_block *this,
        sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_CTRL, val);
 
        /* Load the watchdog timeout value, 50ms is always enough. */
+       sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0);
        sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_LOW,
                       WDG_LOAD_VAL & WDG_LOAD_MASK);
-       sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0);
 
        /* Start the watchdog to reset system */
        sprd_adi_read(sadi, sadi->slave_pbase + REG_WDG_CTRL, &val);
index 3c44bb2..a900962 100644 (file)
@@ -553,20 +553,6 @@ static const struct spi_controller_mem_ops stm32_qspi_mem_ops = {
        .exec_op = stm32_qspi_exec_op,
 };
 
-static void stm32_qspi_release(struct stm32_qspi *qspi)
-{
-       pm_runtime_get_sync(qspi->dev);
-       /* disable qspi */
-       writel_relaxed(0, qspi->io_base + QSPI_CR);
-       stm32_qspi_dma_free(qspi);
-       mutex_destroy(&qspi->lock);
-       pm_runtime_put_noidle(qspi->dev);
-       pm_runtime_disable(qspi->dev);
-       pm_runtime_set_suspended(qspi->dev);
-       pm_runtime_dont_use_autosuspend(qspi->dev);
-       clk_disable_unprepare(qspi->clk);
-}
-
 static int stm32_qspi_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -642,7 +628,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
        if (IS_ERR(rstc)) {
                ret = PTR_ERR(rstc);
                if (ret == -EPROBE_DEFER)
-                       goto err_qspi_release;
+                       goto err_clk_disable;
        } else {
                reset_control_assert(rstc);
                udelay(2);
@@ -653,7 +639,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, qspi);
        ret = stm32_qspi_dma_setup(qspi);
        if (ret)
-               goto err_qspi_release;
+               goto err_dma_free;
 
        mutex_init(&qspi->lock);
 
@@ -673,15 +659,26 @@ static int stm32_qspi_probe(struct platform_device *pdev)
 
        ret = devm_spi_register_master(dev, ctrl);
        if (ret)
-               goto err_qspi_release;
+               goto err_pm_runtime_free;
 
        pm_runtime_mark_last_busy(dev);
        pm_runtime_put_autosuspend(dev);
 
        return 0;
 
-err_qspi_release:
-       stm32_qspi_release(qspi);
+err_pm_runtime_free:
+       pm_runtime_get_sync(qspi->dev);
+       /* disable qspi */
+       writel_relaxed(0, qspi->io_base + QSPI_CR);
+       mutex_destroy(&qspi->lock);
+       pm_runtime_put_noidle(qspi->dev);
+       pm_runtime_disable(qspi->dev);
+       pm_runtime_set_suspended(qspi->dev);
+       pm_runtime_dont_use_autosuspend(qspi->dev);
+err_dma_free:
+       stm32_qspi_dma_free(qspi);
+err_clk_disable:
+       clk_disable_unprepare(qspi->clk);
 err_master_put:
        spi_master_put(qspi->ctrl);
 
@@ -692,7 +689,16 @@ static int stm32_qspi_remove(struct platform_device *pdev)
 {
        struct stm32_qspi *qspi = platform_get_drvdata(pdev);
 
-       stm32_qspi_release(qspi);
+       pm_runtime_get_sync(qspi->dev);
+       /* disable qspi */
+       writel_relaxed(0, qspi->io_base + QSPI_CR);
+       stm32_qspi_dma_free(qspi);
+       mutex_destroy(&qspi->lock);
+       pm_runtime_put_noidle(qspi->dev);
+       pm_runtime_disable(qspi->dev);
+       pm_runtime_set_suspended(qspi->dev);
+       pm_runtime_dont_use_autosuspend(qspi->dev);
+       clk_disable_unprepare(qspi->clk);
 
        return 0;
 }
index d753df7..59e0767 100644 (file)
@@ -609,15 +609,20 @@ err_find_dev:
 static int spidev_release(struct inode *inode, struct file *filp)
 {
        struct spidev_data      *spidev;
+       int                     dofree;
 
        mutex_lock(&device_list_lock);
        spidev = filp->private_data;
        filp->private_data = NULL;
 
+       spin_lock_irq(&spidev->spi_lock);
+       /* ... after we unbound from the underlying device? */
+       dofree = (spidev->spi == NULL);
+       spin_unlock_irq(&spidev->spi_lock);
+
        /* last close? */
        spidev->users--;
        if (!spidev->users) {
-               int             dofree;
 
                kfree(spidev->tx_buffer);
                spidev->tx_buffer = NULL;
@@ -625,19 +630,14 @@ static int spidev_release(struct inode *inode, struct file *filp)
                kfree(spidev->rx_buffer);
                spidev->rx_buffer = NULL;
 
-               spin_lock_irq(&spidev->spi_lock);
-               if (spidev->spi)
-                       spidev->speed_hz = spidev->spi->max_speed_hz;
-
-               /* ... after we unbound from the underlying device? */
-               dofree = (spidev->spi == NULL);
-               spin_unlock_irq(&spidev->spi_lock);
-
                if (dofree)
                        kfree(spidev);
+               else
+                       spidev->speed_hz = spidev->spi->max_speed_hz;
        }
 #ifdef CONFIG_SPI_SLAVE
-       spi_slave_abort(spidev->spi);
+       if (!dofree)
+               spi_slave_abort(spidev->spi);
 #endif
        mutex_unlock(&device_list_lock);
 
@@ -787,13 +787,13 @@ static int spidev_remove(struct spi_device *spi)
 {
        struct spidev_data      *spidev = spi_get_drvdata(spi);
 
+       /* prevent new opens */
+       mutex_lock(&device_list_lock);
        /* make sure ops on existing fds can abort cleanly */
        spin_lock_irq(&spidev->spi_lock);
        spidev->spi = NULL;
        spin_unlock_irq(&spidev->spi_lock);
 
-       /* prevent new opens */
-       mutex_lock(&device_list_lock);
        list_del(&spidev->device_entry);
        device_destroy(spidev_class, spidev->devt);
        clear_bit(MINOR(spidev->devt), minors);
index 3861cb6..85542bf 100644 (file)
@@ -238,7 +238,7 @@ static void chipco_powercontrol_init(struct ssb_chipcommon *cc)
        }
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/PmuFastPwrupDelay */
+/* https://bcm-v4.sipsolutions.net/802.11/PmuFastPwrupDelay */
 static u16 pmu_fast_powerup_delay(struct ssb_chipcommon *cc)
 {
        struct ssb_bus *bus = cc->dev->bus;
@@ -255,7 +255,7 @@ static u16 pmu_fast_powerup_delay(struct ssb_chipcommon *cc)
        }
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/ClkctlFastPwrupDelay */
+/* https://bcm-v4.sipsolutions.net/802.11/ClkctlFastPwrupDelay */
 static void calc_fast_powerup_delay(struct ssb_chipcommon *cc)
 {
        struct ssb_bus *bus = cc->dev->bus;
index 0f60e90..888069e 100644 (file)
@@ -513,7 +513,7 @@ static void ssb_pmu_resources_init(struct ssb_chipcommon *cc)
                chipco_write32(cc, SSB_CHIPCO_PMU_MAXRES_MSK, max_msk);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/SSB/PmuInit */
+/* https://bcm-v4.sipsolutions.net/802.11/SSB/PmuInit */
 void ssb_pmu_init(struct ssb_chipcommon *cc)
 {
        u32 pmucap;
index 42d620c..7cd5531 100644 (file)
@@ -186,7 +186,7 @@ int ssb_fill_sprom_with_fallback(struct ssb_bus *bus, struct ssb_sprom *out)
        return get_fallback_sprom(bus, out);
 }
 
-/* http://bcm-v4.sipsolutions.net/802.11/IsSpromAvailable */
+/* https://bcm-v4.sipsolutions.net/802.11/IsSpromAvailable */
 bool ssb_is_sprom_available(struct ssb_bus *bus)
 {
        /* status register only exists on chipcomon rev >= 11 and we need check
index 4ec5528..b3fb4d4 100644 (file)
@@ -84,8 +84,6 @@ source "drivers/staging/fbtft/Kconfig"
 
 source "drivers/staging/fsl-dpaa2/Kconfig"
 
-source "drivers/staging/wilc1000/Kconfig"
-
 source "drivers/staging/most/Kconfig"
 
 source "drivers/staging/ks7010/Kconfig"
index 4d34198..3d8c7ea 100644 (file)
@@ -32,7 +32,6 @@ obj-$(CONFIG_UNISYSSPAR)      += unisys/
 obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD)  += clocking-wizard/
 obj-$(CONFIG_FB_TFT)           += fbtft/
 obj-$(CONFIG_FSL_DPAA2)                += fsl-dpaa2/
-obj-$(CONFIG_WILC1000)         += wilc1000/
 obj-$(CONFIG_MOST)             += most/
 obj-$(CONFIG_KS7010)           += ks7010/
 obj-$(CONFIG_GREYBUS)          += greybus/
index c798672..cfb673a 100644 (file)
@@ -163,7 +163,7 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
        of_node_put(phy_node);
 
        if (!phydev)
-               return -ENODEV;
+               return -EPROBE_DEFER;
 
        priv->last_link = 0;
        phy_start(phydev);
index e3771d4..7f6716e 100644 (file)
@@ -22,7 +22,5 @@
 
 extern const struct ethtool_ops cvm_oct_ethtool_ops;
 
-void octeon_mdiobus_force_mod_depencency(void);
-
 int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 int cvm_oct_phy_setup_device(struct net_device *dev);
index f42c381..204f0b1 100644 (file)
@@ -689,8 +689,6 @@ static int cvm_oct_probe(struct platform_device *pdev)
        mtu_overhead += VLAN_HLEN;
 #endif
 
-       octeon_mdiobus_force_mod_depencency();
-
        pip = pdev->dev.of_node;
        if (!pip) {
                pr_err("Error: No 'pip' in /aliases\n");
@@ -987,6 +985,7 @@ static struct platform_driver cvm_oct_driver = {
 
 module_platform_driver(cvm_oct_driver);
 
+MODULE_SOFTDEP("pre: mdio-cavium");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
index 69bcd17..a3ea7ce 100644 (file)
@@ -1824,12 +1824,14 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_l
        pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
        if (!pIE)
                return _FAIL;
+       if (ie_len > sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates))
+               return _FAIL;
 
        memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, pIE->data, ie_len);
        supportRateNum = ie_len;
 
        pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _EXT_SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
-       if (pIE)
+       if (pIE && (ie_len <= sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates) - supportRateNum))
                memcpy((pmlmeinfo->FW_sta_info[cam_idx].SupportedRates + supportRateNum), pIE->data, ie_len);
 
        return _SUCCESS;
index 893b67f..5110f9b 100644 (file)
@@ -240,7 +240,7 @@ int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
 }
 
 int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req,
-            int chan_start_idx, int chan_num)
+            int chan_start_idx, int chan_num, int *timeout)
 {
        int ret, i;
        struct hif_msg *hif;
@@ -289,11 +289,13 @@ int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req,
        tmo_chan_fg = 512 * USEC_PER_TU + body->probe_delay;
        tmo_chan_fg *= body->num_of_probe_requests;
        tmo = chan_num * max(tmo_chan_bg, tmo_chan_fg) + 512 * USEC_PER_TU;
+       if (timeout)
+               *timeout = usecs_to_jiffies(tmo);
 
        wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START_SCAN, buf_len);
        ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
        kfree(hif);
-       return ret ? ret : usecs_to_jiffies(tmo);
+       return ret;
 }
 
 int hif_stop_scan(struct wfx_vif *wvif)
index e9eca93..e1da28a 100644 (file)
@@ -42,7 +42,7 @@ int hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
 int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
                  void *buf, size_t buf_size);
 int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req80211,
-            int chan_start, int chan_num);
+            int chan_start, int chan_num, int *timeout);
 int hif_stop_scan(struct wfx_vif *wvif);
 int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
             struct ieee80211_channel *channel, const u8 *ssid, int ssidlen);
index 3248ece..93ea2b7 100644 (file)
@@ -246,7 +246,7 @@ static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev)
        for (i = 0; i < IEEE80211_NUM_ACS; i++) {
                sorted_queues[i] = &wdev->tx_queue[i];
                for (j = i; j > 0; j--)
-                       if (atomic_read(&sorted_queues[j]->pending_frames) >
+                       if (atomic_read(&sorted_queues[j]->pending_frames) <
                            atomic_read(&sorted_queues[j - 1]->pending_frames))
                                swap(sorted_queues[j - 1], sorted_queues[j]);
        }
@@ -291,15 +291,12 @@ struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
 
        if (atomic_read(&wdev->tx_lock))
                return NULL;
-
-       for (;;) {
-               skb = wfx_tx_queues_get_skb(wdev);
-               if (!skb)
-                       return NULL;
-               skb_queue_tail(&wdev->tx_pending, skb);
-               wake_up(&wdev->tx_dequeue);
-               tx_priv = wfx_skb_tx_priv(skb);
-               tx_priv->xmit_timestamp = ktime_get();
-               return (struct hif_msg *)skb->data;
-       }
+       skb = wfx_tx_queues_get_skb(wdev);
+       if (!skb)
+               return NULL;
+       skb_queue_tail(&wdev->tx_pending, skb);
+       wake_up(&wdev->tx_dequeue);
+       tx_priv = wfx_skb_tx_priv(skb);
+       tx_priv->xmit_timestamp = ktime_get();
+       return (struct hif_msg *)skb->data;
 }
index 57ea999..e9de197 100644 (file)
@@ -56,10 +56,10 @@ static int send_scan_req(struct wfx_vif *wvif,
        wfx_tx_lock_flush(wvif->wdev);
        wvif->scan_abort = false;
        reinit_completion(&wvif->scan_complete);
-       timeout = hif_scan(wvif, req, start_idx, i - start_idx);
-       if (timeout < 0) {
+       ret = hif_scan(wvif, req, start_idx, i - start_idx, &timeout);
+       if (ret) {
                wfx_tx_unlock(wvif->wdev);
-               return timeout;
+               return -EIO;
        }
        ret = wait_for_completion_timeout(&wvif->scan_complete, timeout);
        if (req->channels[start_idx]->max_power != wvif->vif->bss_conf.txpower)
index 9e12402..6c0e1b0 100644 (file)
@@ -123,12 +123,12 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
 {
        int i;
 
-       for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) {
-               if (power > cpufreq_cdev->em->table[i].power)
+       for (i = cpufreq_cdev->max_level; i >= 0; i--) {
+               if (power >= cpufreq_cdev->em->table[i].power)
                        break;
        }
 
-       return cpufreq_cdev->em->table[i + 1].frequency;
+       return cpufreq_cdev->em->table[i].frequency;
 }
 
 /**
index e761c9b..1b84ea6 100644 (file)
@@ -649,7 +649,7 @@ MODULE_DEVICE_TABLE(of, of_imx_thermal_match);
 static int imx_thermal_register_legacy_cooling(struct imx_thermal_data *data)
 {
        struct device_node *np;
-       int ret;
+       int ret = 0;
 
        data->policy = cpufreq_cpu_get(0);
        if (!data->policy) {
@@ -664,11 +664,12 @@ static int imx_thermal_register_legacy_cooling(struct imx_thermal_data *data)
                if (IS_ERR(data->cdev)) {
                        ret = PTR_ERR(data->cdev);
                        cpufreq_cpu_put(data->policy);
-                       return ret;
                }
        }
 
-       return 0;
+       of_node_put(np);
+
+       return ret;
 }
 
 static void imx_thermal_unregister_legacy_cooling(struct imx_thermal_data *data)
index 76e3060..6b7ef19 100644 (file)
@@ -211,6 +211,9 @@ enum {
 /* The total number of temperature sensors in the MT8183 */
 #define MT8183_NUM_SENSORS     6
 
+/* The number of banks in the MT8183 */
+#define MT8183_NUM_ZONES               1
+
 /* The number of sensing points per bank */
 #define MT8183_NUM_SENSORS_PER_ZONE     6
 
@@ -497,7 +500,7 @@ static const struct mtk_thermal_data mt7622_thermal_data = {
  */
 static const struct mtk_thermal_data mt8183_thermal_data = {
        .auxadc_channel = MT8183_TEMP_AUXADC_CHANNEL,
-       .num_banks = MT8183_NUM_SENSORS_PER_ZONE,
+       .num_banks = MT8183_NUM_ZONES,
        .num_sensors = MT8183_NUM_SENSORS,
        .vts_index = mt8183_vts_index,
        .cali_val = MT8183_CALIBRATION,
index 8d3e94d..39c4462 100644 (file)
@@ -382,7 +382,7 @@ static inline u32 masked_irq(u32 hw_id, u32 mask, enum tsens_ver ver)
  *
  * Return: IRQ_HANDLED
  */
-irqreturn_t tsens_critical_irq_thread(int irq, void *data)
+static irqreturn_t tsens_critical_irq_thread(int irq, void *data)
 {
        struct tsens_priv *priv = data;
        struct tsens_irq_data d;
@@ -452,7 +452,7 @@ irqreturn_t tsens_critical_irq_thread(int irq, void *data)
  *
  * Return: IRQ_HANDLED
  */
-irqreturn_t tsens_irq_thread(int irq, void *data)
+static irqreturn_t tsens_irq_thread(int irq, void *data)
 {
        struct tsens_priv *priv = data;
        struct tsens_irq_data d;
@@ -520,7 +520,7 @@ irqreturn_t tsens_irq_thread(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-int tsens_set_trips(void *_sensor, int low, int high)
+static int tsens_set_trips(void *_sensor, int low, int high)
 {
        struct tsens_sensor *s = _sensor;
        struct tsens_priv *priv = s->priv;
@@ -557,7 +557,7 @@ int tsens_set_trips(void *_sensor, int low, int high)
        return 0;
 }
 
-int tsens_enable_irq(struct tsens_priv *priv)
+static int tsens_enable_irq(struct tsens_priv *priv)
 {
        int ret;
        int val = tsens_version(priv) > VER_1_X ? 7 : 1;
@@ -570,7 +570,7 @@ int tsens_enable_irq(struct tsens_priv *priv)
        return ret;
 }
 
-void tsens_disable_irq(struct tsens_priv *priv)
+static void tsens_disable_irq(struct tsens_priv *priv)
 {
        regmap_field_write(priv->rf[INT_EN], 0);
 }
index 58fe7c1..c48c5e9 100644 (file)
@@ -167,7 +167,7 @@ static int rcar_gen3_thermal_get_temp(void *devdata, int *temp)
 {
        struct rcar_gen3_thermal_tsc *tsc = devdata;
        int mcelsius, val;
-       u32 reg;
+       int reg;
 
        /* Read register and convert to mili Celsius */
        reg = rcar_gen3_thermal_read(tsc, REG_GEN3_TEMP) & CTEMP_MASK;
index a340374..4cde70d 100644 (file)
@@ -348,8 +348,8 @@ static int sprd_thm_probe(struct platform_device *pdev)
 
        thm->var_data = pdata;
        thm->base = devm_platform_ioremap_resource(pdev, 0);
-       if (!thm->base)
-               return -ENOMEM;
+       if (IS_ERR(thm->base))
+               return PTR_ERR(thm->base);
 
        thm->nr_sensors = of_get_child_count(np);
        if (thm->nr_sensors == 0 || thm->nr_sensors > SPRD_THM_MAX_SENSOR) {
index 5022447..6004c0c 100644 (file)
@@ -50,7 +50,7 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
         * I/O utilities that messages sent to the console will automatically
         * be displayed on the dbg_io.
         */
-       dbg_io_ops->is_console = true;
+       dbg_io_ops->cons = co;
 
        return 0;
 }
index 4139698..84ffede 100644 (file)
@@ -45,7 +45,6 @@ static struct platform_device *kgdboc_pdev;
 
 #if IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE)
 static struct kgdb_io          kgdboc_earlycon_io_ops;
-static struct console          *earlycon;
 static int                      (*earlycon_orig_exit)(struct console *con);
 #endif /* IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */
 
@@ -145,7 +144,7 @@ static void kgdboc_unregister_kbd(void)
 #if IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE)
 static void cleanup_earlycon(void)
 {
-       if (earlycon)
+       if (kgdboc_earlycon_io_ops.cons)
                kgdb_unregister_io_module(&kgdboc_earlycon_io_ops);
 }
 #else /* !IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */
@@ -178,7 +177,7 @@ static int configure_kgdboc(void)
                goto noconfig;
        }
 
-       kgdboc_io_ops.is_console = 0;
+       kgdboc_io_ops.cons = NULL;
        kgdb_tty_driver = NULL;
 
        kgdboc_use_kms = 0;
@@ -198,7 +197,7 @@ static int configure_kgdboc(void)
                int idx;
                if (cons->device && cons->device(cons, &idx) == p &&
                    idx == tty_line) {
-                       kgdboc_io_ops.is_console = 1;
+                       kgdboc_io_ops.cons = cons;
                        break;
                }
        }
@@ -433,7 +432,8 @@ static int kgdboc_earlycon_get_char(void)
 {
        char c;
 
-       if (!earlycon->read(earlycon, &c, 1))
+       if (!kgdboc_earlycon_io_ops.cons->read(kgdboc_earlycon_io_ops.cons,
+                                              &c, 1))
                return NO_POLL_CHAR;
 
        return c;
@@ -441,7 +441,8 @@ static int kgdboc_earlycon_get_char(void)
 
 static void kgdboc_earlycon_put_char(u8 chr)
 {
-       earlycon->write(earlycon, &chr, 1);
+       kgdboc_earlycon_io_ops.cons->write(kgdboc_earlycon_io_ops.cons, &chr,
+                                          1);
 }
 
 static void kgdboc_earlycon_pre_exp_handler(void)
@@ -461,7 +462,7 @@ static void kgdboc_earlycon_pre_exp_handler(void)
         * boot if we detect this case.
         */
        for_each_console(con)
-               if (con == earlycon)
+               if (con == kgdboc_earlycon_io_ops.cons)
                        return;
 
        already_warned = true;
@@ -484,25 +485,25 @@ static int kgdboc_earlycon_deferred_exit(struct console *con)
 
 static void kgdboc_earlycon_deinit(void)
 {
-       if (!earlycon)
+       if (!kgdboc_earlycon_io_ops.cons)
                return;
 
-       if (earlycon->exit == kgdboc_earlycon_deferred_exit)
+       if (kgdboc_earlycon_io_ops.cons->exit == kgdboc_earlycon_deferred_exit)
                /*
                 * kgdboc_earlycon is exiting but original boot console exit
                 * was never called (AKA kgdboc_earlycon_deferred_exit()
                 * didn't ever run).  Undo our trap.
                 */
-               earlycon->exit = earlycon_orig_exit;
-       else if (earlycon->exit)
+               kgdboc_earlycon_io_ops.cons->exit = earlycon_orig_exit;
+       else if (kgdboc_earlycon_io_ops.cons->exit)
                /*
                 * We skipped calling the exit() routine so we could try to
                 * keep using the boot console even after it went away.  We're
                 * finally done so call the function now.
                 */
-               earlycon->exit(earlycon);
+               kgdboc_earlycon_io_ops.cons->exit(kgdboc_earlycon_io_ops.cons);
 
-       earlycon = NULL;
+       kgdboc_earlycon_io_ops.cons = NULL;
 }
 
 static struct kgdb_io kgdboc_earlycon_io_ops = {
@@ -511,7 +512,6 @@ static struct kgdb_io kgdboc_earlycon_io_ops = {
        .write_char             = kgdboc_earlycon_put_char,
        .pre_exception          = kgdboc_earlycon_pre_exp_handler,
        .deinit                 = kgdboc_earlycon_deinit,
-       .is_console             = true,
 };
 
 #define MAX_CONSOLE_NAME_LEN (sizeof((struct console *) 0)->name)
@@ -557,10 +557,10 @@ static int __init kgdboc_earlycon_init(char *opt)
                goto unlock;
        }
 
-       earlycon = con;
+       kgdboc_earlycon_io_ops.cons = con;
        pr_info("Going to register kgdb with earlycon '%s'\n", con->name);
        if (kgdb_register_io_module(&kgdboc_earlycon_io_ops) != 0) {
-               earlycon = NULL;
+               kgdboc_earlycon_io_ops.cons = NULL;
                pr_info("Failed to register kgdb with earlycon\n");
        } else {
                /* Trap exit so we can keep earlycon longer if needed. */
index 82645a2..61ec5bb 100644 (file)
@@ -327,7 +327,8 @@ static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev,
                if (!set || (tmode & 0xff) != 0)
                        return -EINVAL;
 
-               switch (tmode >> 8) {
+               tmode >>= 8;
+               switch (tmode) {
                case TEST_J:
                case TEST_K:
                case TEST_SE0_NAK:
@@ -704,15 +705,17 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
        int ret = 0;
        u8 zlp = 0;
 
+       spin_lock_irqsave(&priv_dev->lock, flags);
        trace_cdns3_ep0_queue(priv_dev, request);
 
        /* cancel the request if controller receive new SETUP packet. */
-       if (cdns3_check_new_setup(priv_dev))
+       if (cdns3_check_new_setup(priv_dev)) {
+               spin_unlock_irqrestore(&priv_dev->lock, flags);
                return -ECONNRESET;
+       }
 
        /* send STATUS stage. Should be called only for SET_CONFIGURATION */
        if (priv_dev->ep0_stage == CDNS3_STATUS_STAGE) {
-               spin_lock_irqsave(&priv_dev->lock, flags);
                cdns3_select_ep(priv_dev, 0x00);
 
                erdy_sent = !priv_dev->hw_configured_flag;
@@ -737,7 +740,6 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
                return 0;
        }
 
-       spin_lock_irqsave(&priv_dev->lock, flags);
        if (!list_empty(&priv_ep->pending_req_list)) {
                dev_err(priv_dev->dev,
                        "can't handle multiple requests for ep0\n");
index 8d121e2..755c565 100644 (file)
@@ -156,7 +156,7 @@ DECLARE_EVENT_CLASS(cdns3_log_ep0_irq,
                __dynamic_array(char, str, CDNS3_MSG_MAX)
        ),
        TP_fast_assign(
-               __entry->ep_dir = priv_dev->ep0_data_dir;
+               __entry->ep_dir = priv_dev->selected_ep;
                __entry->ep_sts = ep_sts;
        ),
        TP_printk("%s", cdns3_decode_ep0_irq(__get_str(str),
index f67088b..d5187b5 100644 (file)
@@ -1689,6 +1689,8 @@ static int acm_pre_reset(struct usb_interface *intf)
 
 static const struct usb_device_id acm_ids[] = {
        /* quirky and broken devices */
+       { USB_DEVICE(0x0424, 0x274e), /* Microchip Technology, Inc. (formerly SMSC) */
+         .driver_info = DISABLE_ECHO, }, /* DISABLE ECHO in termios flag */
        { USB_DEVICE(0x076d, 0x0006), /* Denso Cradle CU-321 */
        .driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
        { USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */
index 3e8efe7..e0b7767 100644 (file)
@@ -218,11 +218,12 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Logitech HD Webcam C270 */
        { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
 
-       /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
+       /* Logitech HD Pro Webcams C920, C920-C, C922, C925e and C930e */
        { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
        { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
        { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
        { USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT },
+       { USB_DEVICE(0x046d, 0x085c), .driver_info = USB_QUIRK_DELAY_INIT },
 
        /* Logitech ConferenceCam CC3000e */
        { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
index 12b98b4..7faf5f8 100644 (file)
@@ -4920,12 +4920,6 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
                                          epnum, 0);
        }
 
-       ret = usb_add_gadget_udc(dev, &hsotg->gadget);
-       if (ret) {
-               dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep,
-                                          hsotg->ctrl_req);
-               return ret;
-       }
        dwc2_hsotg_dump(hsotg);
 
        return 0;
index e571c8a..c347d93 100644 (file)
@@ -575,6 +575,17 @@ static int dwc2_driver_probe(struct platform_device *dev)
        if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
                dwc2_lowlevel_hw_disable(hsotg);
 
+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
+       IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+       /* Postponed adding a new gadget to the udc class driver list */
+       if (hsotg->gadget_enabled) {
+               retval = usb_add_gadget_udc(hsotg->dev, &hsotg->gadget);
+               if (retval) {
+                       dwc2_hsotg_remove(hsotg);
+                       goto error_init;
+               }
+       }
+#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
        return 0;
 
 error_init:
index 48b68b6..90bb022 100644 (file)
@@ -162,12 +162,6 @@ static const struct dwc3_exynos_driverdata exynos5250_drvdata = {
        .suspend_clk_idx = -1,
 };
 
-static const struct dwc3_exynos_driverdata exynos5420_drvdata = {
-       .clk_names = { "usbdrd30", "usbdrd30_susp_clk"},
-       .num_clks = 2,
-       .suspend_clk_idx = 1,
-};
-
 static const struct dwc3_exynos_driverdata exynos5433_drvdata = {
        .clk_names = { "aclk", "susp_clk", "pipe_pclk", "phyclk" },
        .num_clks = 4,
@@ -185,9 +179,6 @@ static const struct of_device_id exynos_dwc3_match[] = {
                .compatible = "samsung,exynos5250-dwusb3",
                .data = &exynos5250_drvdata,
        }, {
-               .compatible = "samsung,exynos5420-dwusb3",
-               .data = &exynos5420_drvdata,
-       }, {
                .compatible = "samsung,exynos5433-dwusb3",
                .data = &exynos5433_drvdata,
        }, {
index b673727..96c05b1 100644 (file)
@@ -206,8 +206,10 @@ static void dwc3_pci_resume_work(struct work_struct *work)
        int ret;
 
        ret = pm_runtime_get_sync(&dwc3->dev);
-       if (ret)
+       if (ret) {
+               pm_runtime_put_sync_autosuspend(&dwc3->dev);
                return;
+       }
 
        pm_runtime_mark_last_busy(&dwc3->dev);
        pm_runtime_put_sync_autosuspend(&dwc3->dev);
index ea0d531..775cf70 100644 (file)
@@ -1058,7 +1058,8 @@ static int __init kgdbdbgp_parse_config(char *str)
                kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
        }
        kgdb_register_io_module(&kgdbdbgp_io_ops);
-       kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
+       if (early_dbgp_console.index != -1)
+               kgdbdbgp_io_ops.cons = &early_dbgp_console;
 
        return 0;
 }
index cafde05..80a1b52 100644 (file)
@@ -2313,7 +2313,8 @@ static int mv_udc_probe(struct platform_device *pdev)
        return 0;
 
 err_create_workqueue:
-       destroy_workqueue(udc->qwork);
+       if (udc->qwork)
+               destroy_workqueue(udc->qwork);
 err_destroy_dma:
        dma_pool_destroy(udc->dtd_pool);
 err_free_dma:
index a4e9abc..1a9b757 100644 (file)
@@ -203,9 +203,8 @@ static int exynos_ehci_probe(struct platform_device *pdev)
        hcd->rsrc_len = resource_size(res);
 
        irq = platform_get_irq(pdev, 0);
-       if (!irq) {
-               dev_err(&pdev->dev, "Failed to get IRQ\n");
-               err = -ENODEV;
+       if (irq < 0) {
+               err = irq;
                goto fail_io;
        }
 
index 3c3820a..af3c1b9 100644 (file)
@@ -216,6 +216,13 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
                ehci_info(ehci, "applying MosChip frame-index workaround\n");
                ehci->frame_index_bug = 1;
                break;
+       case PCI_VENDOR_ID_HUAWEI:
+               /* Synopsys HC bug */
+               if (pdev->device == 0xa239) {
+                       ehci_info(ehci, "applying Synopsys HC workaround\n");
+                       ehci->has_synopsys_hc_bug = 1;
+               }
+               break;
        }
 
        /* optional debug port, normally in the first BAR */
index cff9652..b91d50d 100644 (file)
@@ -191,6 +191,7 @@ static int ohci_hcd_sm501_drv_remove(struct platform_device *pdev)
        struct resource *mem;
 
        usb_remove_hcd(hcd);
+       iounmap(hcd->regs);
        release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
        usb_put_hcd(hcd);
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
index bfbdb3c..4311d4c 100644 (file)
@@ -587,6 +587,9 @@ static int xhci_mtk_remove(struct platform_device *dev)
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
        struct usb_hcd  *shared_hcd = xhci->shared_hcd;
 
+       pm_runtime_put_noidle(&dev->dev);
+       pm_runtime_disable(&dev->dev);
+
        usb_remove_hcd(shared_hcd);
        xhci->shared_hcd = NULL;
        device_init_wakeup(&dev->dev, false);
@@ -597,8 +600,6 @@ static int xhci_mtk_remove(struct platform_device *dev)
        xhci_mtk_sch_exit(mtk);
        xhci_mtk_clks_disable(mtk);
        xhci_mtk_ldos_disable(mtk);
-       pm_runtime_put_sync(&dev->dev);
-       pm_runtime_disable(&dev->dev);
 
        return 0;
 }
index bee5dec..ed468ee 100644 (file)
@@ -1430,6 +1430,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
                                xhci->devs[slot_id]->out_ctx, ep_index);
 
                ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
+               ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */
                ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
                ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
 
@@ -4390,6 +4391,9 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
        int             hird, exit_latency;
        int             ret;
 
+       if (xhci->quirks & XHCI_HW_LPM_DISABLE)
+               return -EPERM;
+
        if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
                        !udev->lpm_capable)
                return -EPERM;
@@ -4412,7 +4416,7 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
        xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
                        enable ? "enable" : "disable", port_num + 1);
 
-       if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) {
+       if (enable) {
                /* Host supports BESL timeout instead of HIRD */
                if (udev->usb2_hw_lpm_besl_capable) {
                        /* if device doesn't have a preferred BESL value use a
@@ -4471,6 +4475,9 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
                        mutex_lock(hcd->bandwidth_mutex);
                        xhci_change_max_exit_latency(xhci, udev, 0);
                        mutex_unlock(hcd->bandwidth_mutex);
+                       readl_poll_timeout(ports[port_num]->addr, pm_val,
+                                          (pm_val & PORT_PLS_MASK) == XDEV_U0,
+                                          100, 10000);
                        return 0;
                }
        }
index 2c6c4f8..c295e8a 100644 (file)
@@ -716,7 +716,7 @@ struct xhci_ep_ctx {
  * 4 - TRB error
  * 5-7 - reserved
  */
-#define EP_STATE_MASK          (0xf)
+#define EP_STATE_MASK          (0x7)
 #define EP_STATE_DISABLED      0
 #define EP_STATE_RUNNING       1
 #define EP_STATE_HALTED                2
index 98ada1a..bae8889 100644 (file)
@@ -2873,6 +2873,7 @@ static void usbtest_disconnect(struct usb_interface *intf)
 
        usb_set_intfdata(intf, NULL);
        dev_dbg(&intf->dev, "disconnect\n");
+       kfree(dev->buf);
        kfree(dev);
 }
 
index cffe2ac..03a3337 100644 (file)
@@ -1199,11 +1199,7 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, tegra_phy);
 
-       err = usb_add_phy_dev(&tegra_phy->u_phy);
-       if (err)
-               return err;
-
-       return 0;
+       return usb_add_phy_dev(&tegra_phy->u_phy);
 }
 
 static int tegra_usb_phy_remove(struct platform_device *pdev)
index 01c6a48..ac9a81a 100644 (file)
@@ -803,7 +803,8 @@ static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
        return info->dma_map_ctrl(chan->device->dev, pkt, map);
 }
 
-static void usbhsf_dma_complete(void *arg);
+static void usbhsf_dma_complete(void *arg,
+                               const struct dmaengine_result *result);
 static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
 {
        struct usbhs_pipe *pipe = pkt->pipe;
@@ -813,6 +814,7 @@ static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
        struct dma_chan *chan;
        struct device *dev = usbhs_priv_to_dev(priv);
        enum dma_transfer_direction dir;
+       dma_cookie_t cookie;
 
        fifo = usbhs_pipe_to_fifo(pipe);
        if (!fifo)
@@ -827,11 +829,11 @@ static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
        if (!desc)
                return;
 
-       desc->callback          = usbhsf_dma_complete;
-       desc->callback_param    = pipe;
+       desc->callback_result   = usbhsf_dma_complete;
+       desc->callback_param    = pkt;
 
-       pkt->cookie = dmaengine_submit(desc);
-       if (pkt->cookie < 0) {
+       cookie = dmaengine_submit(desc);
+       if (cookie < 0) {
                dev_err(dev, "Failed to submit dma descriptor\n");
                return;
        }
@@ -1152,12 +1154,10 @@ static size_t usbhs_dma_calc_received_size(struct usbhs_pkt *pkt,
                                           struct dma_chan *chan, int dtln)
 {
        struct usbhs_pipe *pipe = pkt->pipe;
-       struct dma_tx_state state;
        size_t received_size;
        int maxp = usbhs_pipe_get_maxpacket(pipe);
 
-       dmaengine_tx_status(chan, pkt->cookie, &state);
-       received_size = pkt->length - state.residue;
+       received_size = pkt->length - pkt->dma_result->residue;
 
        if (dtln) {
                received_size -= USBHS_USB_DMAC_XFER_SIZE;
@@ -1363,13 +1363,16 @@ static int usbhsf_irq_ready(struct usbhs_priv *priv,
        return 0;
 }
 
-static void usbhsf_dma_complete(void *arg)
+static void usbhsf_dma_complete(void *arg,
+                               const struct dmaengine_result *result)
 {
-       struct usbhs_pipe *pipe = arg;
+       struct usbhs_pkt *pkt = arg;
+       struct usbhs_pipe *pipe = pkt->pipe;
        struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
        struct device *dev = usbhs_priv_to_dev(priv);
        int ret;
 
+       pkt->dma_result = result;
        ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE);
        if (ret < 0)
                dev_err(dev, "dma_complete run_error %d : %d\n",
index 7d3700b..039a2b9 100644 (file)
@@ -50,7 +50,7 @@ struct usbhs_pkt {
                     struct usbhs_pkt *pkt);
        struct work_struct work;
        dma_addr_t dma;
-       dma_cookie_t cookie;
+       const struct dmaengine_result *dma_result;
        void *buf;
        int length;
        int trans;
index 962bc69..70ddc9d 100644 (file)
@@ -148,7 +148,8 @@ pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_mux_state *state)
        msg[0] = PMC_USB_DP_HPD;
        msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
 
-       msg[1] = PMC_USB_DP_HPD_IRQ;
+       if (data->status & DP_STATUS_IRQ_HPD)
+               msg[1] = PMC_USB_DP_HPD_IRQ;
 
        if (data->status & DP_STATUS_HPD_STATE)
                msg[1] |= PMC_USB_DP_HPD_LVL;
@@ -161,6 +162,7 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state)
 {
        struct typec_displayport_data *data = state->data;
        struct altmode_req req = { };
+       int ret;
 
        if (data->status & DP_STATUS_IRQ_HPD)
                return pmc_usb_mux_dp_hpd(port, state);
@@ -181,7 +183,14 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state)
        if (data->status & DP_STATUS_HPD_STATE)
                req.mode_data |= PMC_USB_ALTMODE_HPD_HIGH;
 
-       return pmc_usb_command(port, (void *)&req, sizeof(req));
+       ret = pmc_usb_command(port, (void *)&req, sizeof(req));
+       if (ret)
+               return ret;
+
+       if (data->status & DP_STATUS_HPD_STATE)
+               return pmc_usb_mux_dp_hpd(port, state);
+
+       return 0;
 }
 
 static int
index 0173890..b56a088 100644 (file)
@@ -179,26 +179,6 @@ out:
        return tcpci_irq(chip->tcpci);
 }
 
-static int rt1711h_init_alert(struct rt1711h_chip *chip,
-                             struct i2c_client *client)
-{
-       int ret;
-
-       /* Disable chip interrupts before requesting irq */
-       ret = rt1711h_write16(chip, TCPC_ALERT_MASK, 0);
-       if (ret < 0)
-               return ret;
-
-       ret = devm_request_threaded_irq(chip->dev, client->irq, NULL,
-                                       rt1711h_irq,
-                                       IRQF_ONESHOT | IRQF_TRIGGER_LOW,
-                                       dev_name(chip->dev), chip);
-       if (ret < 0)
-               return ret;
-       enable_irq_wake(client->irq);
-       return 0;
-}
-
 static int rt1711h_sw_reset(struct rt1711h_chip *chip)
 {
        int ret;
@@ -260,7 +240,8 @@ static int rt1711h_probe(struct i2c_client *client,
        if (ret < 0)
                return ret;
 
-       ret = rt1711h_init_alert(chip, client);
+       /* Disable chip interrupts before requesting irq */
+       ret = rt1711h_write16(chip, TCPC_ALERT_MASK, 0);
        if (ret < 0)
                return ret;
 
@@ -271,6 +252,14 @@ static int rt1711h_probe(struct i2c_client *client,
        if (IS_ERR_OR_NULL(chip->tcpci))
                return PTR_ERR(chip->tcpci);
 
+       ret = devm_request_threaded_irq(chip->dev, client->irq, NULL,
+                                       rt1711h_irq,
+                                       IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+                                       dev_name(chip->dev), chip);
+       if (ret < 0)
+               return ret;
+       enable_irq_wake(client->irq);
+
        return 0;
 }
 
index ff6562f..de211ef 100644 (file)
@@ -63,7 +63,7 @@ static void vdpa_release_dev(struct device *d)
  * @config: the bus operations that is supported by this device
  * @size: size of the parent structure that contains private data
  *
- * Drvier should use vdap_alloc_device() wrapper macro instead of
+ * Driver should use vdpa_alloc_device() wrapper macro instead of
  * using this directly.
  *
  * Returns an error when parent/config/dma_dev is not set or fail to get
index 7c07790..f634c81 100644 (file)
@@ -521,10 +521,14 @@ static void vfio_pci_release(void *device_data)
                vfio_pci_vf_token_user_add(vdev, -1);
                vfio_spapr_pci_eeh_release(vdev->pdev);
                vfio_pci_disable(vdev);
-               if (vdev->err_trigger)
+               if (vdev->err_trigger) {
                        eventfd_ctx_put(vdev->err_trigger);
-               if (vdev->req_trigger)
+                       vdev->err_trigger = NULL;
+               }
+               if (vdev->req_trigger) {
                        eventfd_ctx_put(vdev->req_trigger);
+                       vdev->req_trigger = NULL;
+               }
        }
 
        mutex_unlock(&vdev->reflck->lock);
index 8746c94..d98843f 100644 (file)
@@ -398,9 +398,15 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write)
 /* Caller should hold memory_lock semaphore */
 bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
 {
+       struct pci_dev *pdev = vdev->pdev;
        u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
 
-       return cmd & PCI_COMMAND_MEMORY;
+       /*
+        * SR-IOV VF memory enable is handled by the MSE bit in the
+        * PF SR-IOV capability, there's therefore no need to trigger
+        * faults based on the virtual value.
+        */
+       return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY);
 }
 
 /*
@@ -1728,6 +1734,15 @@ int vfio_config_init(struct vfio_pci_device *vdev)
                                 vconfig[PCI_INTERRUPT_PIN]);
 
                vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */
+
+               /*
+                * VFs do no implement the memory enable bit of the COMMAND
+                * register therefore we'll not have it set in our initial
+                * copy of config space after pci_enable_device().  For
+                * consistency with PFs, set the virtual enable bit here.
+                */
+               *(__le16 *)&vconfig[PCI_COMMAND] |=
+                                       cpu_to_le16(PCI_COMMAND_MEMORY);
        }
 
        if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx)
index 0466921..a09dedc 100644 (file)
@@ -263,9 +263,62 @@ static int vhost_test_set_features(struct vhost_test *n, u64 features)
        return 0;
 }
 
+static long vhost_test_set_backend(struct vhost_test *n, unsigned index, int fd)
+{
+       static void *backend;
+
+       const bool enable = fd != -1;
+       struct vhost_virtqueue *vq;
+       int r;
+
+       mutex_lock(&n->dev.mutex);
+       r = vhost_dev_check_owner(&n->dev);
+       if (r)
+               goto err;
+
+       if (index >= VHOST_TEST_VQ_MAX) {
+               r = -ENOBUFS;
+               goto err;
+       }
+       vq = &n->vqs[index];
+       mutex_lock(&vq->mutex);
+
+       /* Verify that ring has been setup correctly. */
+       if (!vhost_vq_access_ok(vq)) {
+               r = -EFAULT;
+               goto err_vq;
+       }
+       if (!enable) {
+               vhost_poll_stop(&vq->poll);
+               backend = vhost_vq_get_backend(vq);
+               vhost_vq_set_backend(vq, NULL);
+       } else {
+               vhost_vq_set_backend(vq, backend);
+               r = vhost_vq_init_access(vq);
+               if (r == 0)
+                       r = vhost_poll_start(&vq->poll, vq->kick);
+       }
+
+       mutex_unlock(&vq->mutex);
+
+       if (enable) {
+               vhost_test_flush_vq(n, index);
+       }
+
+       mutex_unlock(&n->dev.mutex);
+       return 0;
+
+err_vq:
+       mutex_unlock(&vq->mutex);
+err:
+       mutex_unlock(&n->dev.mutex);
+       return r;
+}
+
 static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
                             unsigned long arg)
 {
+       struct vhost_vring_file backend;
        struct vhost_test *n = f->private_data;
        void __user *argp = (void __user *)arg;
        u64 __user *featurep = argp;
@@ -277,6 +330,10 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
                if (copy_from_user(&test, argp, sizeof test))
                        return -EFAULT;
                return vhost_test_run(n, test);
+       case VHOST_TEST_SET_BACKEND:
+               if (copy_from_user(&backend, argp, sizeof backend))
+                       return -EFAULT;
+               return vhost_test_set_backend(n, backend.index, backend.fd);
        case VHOST_GET_FEATURES:
                features = VHOST_FEATURES;
                if (copy_to_user(featurep, &features, sizeof features))
index 7dd265b..822bc4b 100644 (file)
@@ -4,5 +4,6 @@
 
 /* Start a given test on the virtio null device. 0 stops all tests. */
 #define VHOST_TEST_RUN _IOW(VHOST_VIRTIO, 0x31, int)
+#define VHOST_TEST_SET_BACKEND _IOW(VHOST_VIRTIO, 0x32, int)
 
 #endif
index 7580e34..a54b60d 100644 (file)
@@ -818,7 +818,7 @@ static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
        struct vdpa_device *vdpa = v->vdpa;
        const struct vdpa_config_ops *ops = vdpa->config;
        struct vdpa_notification_area notify;
-       int index = vma->vm_pgoff;
+       unsigned long index = vma->vm_pgoff;
 
        if (vma->vm_end - vma->vm_start != PAGE_SIZE)
                return -EINVAL;
index e8ab583..113116d 100644 (file)
@@ -107,7 +107,7 @@ static void tosa_lcd_tg_on(struct tosa_lcd_data *data)
        /* TG LCD GVSS */
        tosa_tg_send(spi, TG_PINICTL, 0x0);
 
-       if (!data->i2c) {
+       if (IS_ERR_OR_NULL(data->i2c)) {
                /*
                 * after the pannel is powered up the first time,
                 * we can access the i2c bus so probe for the DAC
@@ -119,7 +119,7 @@ static void tosa_lcd_tg_on(struct tosa_lcd_data *data)
                        .addr   = DAC_BASE,
                        .platform_data = data->spi,
                };
-               data->i2c = i2c_new_device(adap, &info);
+               data->i2c = i2c_new_client_device(adap, &info);
        }
 }
 
index 9d28a8e..e2a490c 100644 (file)
@@ -2402,7 +2402,8 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
                ops->graphics = 1;
 
                if (!blank) {
-                       var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE;
+                       var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE |
+                               FB_ACTIVATE_KD_TEXT;
                        fb_set_var(info, &var);
                        ops->graphics = 0;
                        ops->var = info->var;
index f02be0d..8d418ab 100644 (file)
@@ -402,7 +402,7 @@ int __init hpfb_init(void)
        if (err)
                return err;
 
-       err = probe_kernel_read(&i, (unsigned char *)INTFBVADDR + DIO_IDOFF, 1);
+       err = copy_from_kernel_nofault(&i, (unsigned char *)INTFBVADDR + DIO_IDOFF, 1);
 
        if (!err && (i == DIO_ID_FBUFFER) && topcat_sid_ok(sid = DIO_SECID(INTFBVADDR))) {
                if (!request_mem_region(INTFBPADDR, DIO_DEVSIZE, "Internal Topcat"))
index bee29aa..def14ac 100644 (file)
@@ -1836,7 +1836,7 @@ static int uvesafb_setup(char *options)
                else if (!strcmp(this_opt, "noedid"))
                        noedid = true;
                else if (!strcmp(this_opt, "noblank"))
-                       blank = true;
+                       blank = false;
                else if (!strncmp(this_opt, "vtotal:", 7))
                        vram_total = simple_strtoul(this_opt + 7, NULL, 0);
                else if (!strncmp(this_opt, "vremap:", 7))
index 50c689f..f26f5f6 100644 (file)
@@ -101,6 +101,11 @@ struct virtio_mem {
 
        /* The parent resource for all memory added via this device. */
        struct resource *parent_resource;
+       /*
+        * Copy of "System RAM (virtio_mem)" to be used for
+        * add_memory_driver_managed().
+        */
+       const char *resource_name;
 
        /* Summary of all memory block states. */
        unsigned long nb_mb_state[VIRTIO_MEM_MB_STATE_COUNT];
@@ -414,8 +419,20 @@ static int virtio_mem_mb_add(struct virtio_mem *vm, unsigned long mb_id)
        if (nid == NUMA_NO_NODE)
                nid = memory_add_physaddr_to_nid(addr);
 
+       /*
+        * When force-unloading the driver and we still have memory added to
+        * Linux, the resource name has to stay.
+        */
+       if (!vm->resource_name) {
+               vm->resource_name = kstrdup_const("System RAM (virtio_mem)",
+                                                 GFP_KERNEL);
+               if (!vm->resource_name)
+                       return -ENOMEM;
+       }
+
        dev_dbg(&vm->vdev->dev, "adding memory block: %lu\n", mb_id);
-       return add_memory(nid, addr, memory_block_size_bytes());
+       return add_memory_driver_managed(nid, addr, memory_block_size_bytes(),
+                                        vm->resource_name);
 }
 
 /*
@@ -1192,7 +1209,7 @@ static int virtio_mem_mb_plug_any_sb(struct virtio_mem *vm, unsigned long mb_id,
                                                VIRTIO_MEM_MB_STATE_OFFLINE);
        }
 
-       return rc;
+       return 0;
 }
 
 /*
@@ -1890,10 +1907,12 @@ static void virtio_mem_remove(struct virtio_device *vdev)
            vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL] ||
            vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE] ||
            vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL] ||
-           vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE])
+           vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE]) {
                dev_warn(&vdev->dev, "device still has system memory added\n");
-       else
+       } else {
                virtio_mem_delete_resource(vm);
+               kfree_const(vm->resource_name);
+       }
 
        /* remove all tracking data - no locking needed */
        vfree(vm->mb_state);
index 3041092..449680a 100644 (file)
@@ -73,7 +73,7 @@ struct w1_netlink_msg
                        __u32           res;
                } mst;
        } id;
-       __u8                            data[0];
+       __u8                            data[];
 };
 
 /**
@@ -122,7 +122,7 @@ struct w1_netlink_cmd
        __u8                            cmd;
        __u8                            res;
        __u16                           len;
-       __u8                            data[0];
+       __u8                            data[];
 };
 
 #ifdef __KERNEL__
index 040d2a4..4f168b4 100644 (file)
@@ -69,11 +69,27 @@ struct xenbus_map_node {
        unsigned int   nr_handles;
 };
 
+struct map_ring_valloc {
+       struct xenbus_map_node *node;
+
+       /* Why do we need two arrays? See comment of __xenbus_map_ring */
+       union {
+               unsigned long addrs[XENBUS_MAX_RING_GRANTS];
+               pte_t *ptes[XENBUS_MAX_RING_GRANTS];
+       };
+       phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
+
+       struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
+       struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
+
+       unsigned int idx;       /* HVM only. */
+};
+
 static DEFINE_SPINLOCK(xenbus_valloc_lock);
 static LIST_HEAD(xenbus_valloc_pages);
 
 struct xenbus_ring_ops {
-       int (*map)(struct xenbus_device *dev,
+       int (*map)(struct xenbus_device *dev, struct map_ring_valloc *info,
                   grant_ref_t *gnt_refs, unsigned int nr_grefs,
                   void **vaddr);
        int (*unmap)(struct xenbus_device *dev, void *vaddr);
@@ -440,8 +456,7 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
  * Map @nr_grefs pages of memory into this domain from another
  * domain's grant table.  xenbus_map_ring_valloc allocates @nr_grefs
  * pages of virtual address space, maps the pages to that address, and
- * sets *vaddr to that address.  Returns 0 on success, and GNTST_*
- * (see xen/include/interface/grant_table.h) or -ENOMEM / -EINVAL on
+ * sets *vaddr to that address.  Returns 0 on success, and -errno on
  * error. If an error is returned, device will switch to
  * XenbusStateClosing and the error message will be saved in XenStore.
  */
@@ -449,12 +464,25 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
                           unsigned int nr_grefs, void **vaddr)
 {
        int err;
+       struct map_ring_valloc *info;
+
+       *vaddr = NULL;
+
+       if (nr_grefs > XENBUS_MAX_RING_GRANTS)
+               return -EINVAL;
+
+       info = kzalloc(sizeof(*info), GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
 
-       err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
-       /* Some hypervisors are buggy and can return 1. */
-       if (err > 0)
-               err = GNTST_general_error;
+       info->node = kzalloc(sizeof(*info->node), GFP_KERNEL);
+       if (!info->node)
+               err = -ENOMEM;
+       else
+               err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr);
 
+       kfree(info->node);
+       kfree(info);
        return err;
 }
 EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
@@ -466,62 +494,57 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
                             grant_ref_t *gnt_refs,
                             unsigned int nr_grefs,
                             grant_handle_t *handles,
-                            phys_addr_t *addrs,
+                            struct map_ring_valloc *info,
                             unsigned int flags,
                             bool *leaked)
 {
-       struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
-       struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
        int i, j;
-       int err = GNTST_okay;
 
        if (nr_grefs > XENBUS_MAX_RING_GRANTS)
                return -EINVAL;
 
        for (i = 0; i < nr_grefs; i++) {
-               memset(&map[i], 0, sizeof(map[i]));
-               gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i],
-                                 dev->otherend_id);
+               gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags,
+                                 gnt_refs[i], dev->otherend_id);
                handles[i] = INVALID_GRANT_HANDLE;
        }
 
-       gnttab_batch_map(map, i);
+       gnttab_batch_map(info->map, i);
 
        for (i = 0; i < nr_grefs; i++) {
-               if (map[i].status != GNTST_okay) {
-                       err = map[i].status;
-                       xenbus_dev_fatal(dev, map[i].status,
+               if (info->map[i].status != GNTST_okay) {
+                       xenbus_dev_fatal(dev, info->map[i].status,
                                         "mapping in shared page %d from domain %d",
                                         gnt_refs[i], dev->otherend_id);
                        goto fail;
                } else
-                       handles[i] = map[i].handle;
+                       handles[i] = info->map[i].handle;
        }
 
-       return GNTST_okay;
+       return 0;
 
  fail:
        for (i = j = 0; i < nr_grefs; i++) {
                if (handles[i] != INVALID_GRANT_HANDLE) {
-                       memset(&unmap[j], 0, sizeof(unmap[j]));
-                       gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i],
+                       gnttab_set_unmap_op(&info->unmap[j],
+                                           info->phys_addrs[i],
                                            GNTMAP_host_map, handles[i]);
                        j++;
                }
        }
 
-       if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j))
+       if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j))
                BUG();
 
        *leaked = false;
        for (i = 0; i < j; i++) {
-               if (unmap[i].status != GNTST_okay) {
+               if (info->unmap[i].status != GNTST_okay) {
                        *leaked = true;
                        break;
                }
        }
 
-       return err;
+       return -ENOENT;
 }
 
 /**
@@ -566,21 +589,12 @@ static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles,
        return err;
 }
 
-struct map_ring_valloc_hvm
-{
-       unsigned int idx;
-
-       /* Why do we need two arrays? See comment of __xenbus_map_ring */
-       phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
-       unsigned long addrs[XENBUS_MAX_RING_GRANTS];
-};
-
 static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
                                            unsigned int goffset,
                                            unsigned int len,
                                            void *data)
 {
-       struct map_ring_valloc_hvm *info = data;
+       struct map_ring_valloc *info = data;
        unsigned long vaddr = (unsigned long)gfn_to_virt(gfn);
 
        info->phys_addrs[info->idx] = vaddr;
@@ -589,39 +603,28 @@ static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
        info->idx++;
 }
 
-static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
-                                     grant_ref_t *gnt_ref,
-                                     unsigned int nr_grefs,
-                                     void **vaddr)
+static int xenbus_map_ring_hvm(struct xenbus_device *dev,
+                              struct map_ring_valloc *info,
+                              grant_ref_t *gnt_ref,
+                              unsigned int nr_grefs,
+                              void **vaddr)
 {
-       struct xenbus_map_node *node;
+       struct xenbus_map_node *node = info->node;
        int err;
        void *addr;
        bool leaked = false;
-       struct map_ring_valloc_hvm info = {
-               .idx = 0,
-       };
        unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
 
-       if (nr_grefs > XENBUS_MAX_RING_GRANTS)
-               return -EINVAL;
-
-       *vaddr = NULL;
-
-       node = kzalloc(sizeof(*node), GFP_KERNEL);
-       if (!node)
-               return -ENOMEM;
-
        err = alloc_xenballooned_pages(nr_pages, node->hvm.pages);
        if (err)
                goto out_err;
 
        gnttab_foreach_grant(node->hvm.pages, nr_grefs,
                             xenbus_map_ring_setup_grant_hvm,
-                            &info);
+                            info);
 
        err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
-                               info.phys_addrs, GNTMAP_host_map, &leaked);
+                               info, GNTMAP_host_map, &leaked);
        node->nr_handles = nr_grefs;
 
        if (err)
@@ -641,11 +644,13 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
        spin_unlock(&xenbus_valloc_lock);
 
        *vaddr = addr;
+       info->node = NULL;
+
        return 0;
 
  out_xenbus_unmap_ring:
        if (!leaked)
-               xenbus_unmap_ring(dev, node->handles, nr_grefs, info.addrs);
+               xenbus_unmap_ring(dev, node->handles, nr_grefs, info->addrs);
        else
                pr_alert("leaking %p size %u page(s)",
                         addr, nr_pages);
@@ -653,7 +658,6 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
        if (!leaked)
                free_xenballooned_pages(nr_pages, node->hvm.pages);
  out_err:
-       kfree(node);
        return err;
 }
 
@@ -676,40 +680,30 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
 EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
 
 #ifdef CONFIG_XEN_PV
-static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
-                                    grant_ref_t *gnt_refs,
-                                    unsigned int nr_grefs,
-                                    void **vaddr)
+static int xenbus_map_ring_pv(struct xenbus_device *dev,
+                             struct map_ring_valloc *info,
+                             grant_ref_t *gnt_refs,
+                             unsigned int nr_grefs,
+                             void **vaddr)
 {
-       struct xenbus_map_node *node;
+       struct xenbus_map_node *node = info->node;
        struct vm_struct *area;
-       pte_t *ptes[XENBUS_MAX_RING_GRANTS];
-       phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
        int err = GNTST_okay;
        int i;
        bool leaked;
 
-       *vaddr = NULL;
-
-       if (nr_grefs > XENBUS_MAX_RING_GRANTS)
-               return -EINVAL;
-
-       node = kzalloc(sizeof(*node), GFP_KERNEL);
-       if (!node)
-               return -ENOMEM;
-
-       area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes);
+       area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, info->ptes);
        if (!area) {
                kfree(node);
                return -ENOMEM;
        }
 
        for (i = 0; i < nr_grefs; i++)
-               phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
+               info->phys_addrs[i] =
+                       arbitrary_virt_to_machine(info->ptes[i]).maddr;
 
        err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
-                               phys_addrs,
-                               GNTMAP_host_map | GNTMAP_contains_pte,
+                               info, GNTMAP_host_map | GNTMAP_contains_pte,
                                &leaked);
        if (err)
                goto failed;
@@ -722,6 +716,8 @@ static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
        spin_unlock(&xenbus_valloc_lock);
 
        *vaddr = area->addr;
+       info->node = NULL;
+
        return 0;
 
 failed:
@@ -730,11 +726,10 @@ failed:
        else
                pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
 
-       kfree(node);
        return err;
 }
 
-static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
+static int xenbus_unmap_ring_pv(struct xenbus_device *dev, void *vaddr)
 {
        struct xenbus_map_node *node;
        struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
@@ -798,12 +793,12 @@ static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
 }
 
 static const struct xenbus_ring_ops ring_ops_pv = {
-       .map = xenbus_map_ring_valloc_pv,
-       .unmap = xenbus_unmap_ring_vfree_pv,
+       .map = xenbus_map_ring_pv,
+       .unmap = xenbus_unmap_ring_pv,
 };
 #endif
 
-struct unmap_ring_vfree_hvm
+struct unmap_ring_hvm
 {
        unsigned int idx;
        unsigned long addrs[XENBUS_MAX_RING_GRANTS];
@@ -814,19 +809,19 @@ static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn,
                                              unsigned int len,
                                              void *data)
 {
-       struct unmap_ring_vfree_hvm *info = data;
+       struct unmap_ring_hvm *info = data;
 
        info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn);
 
        info->idx++;
 }
 
-static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
+static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr)
 {
        int rv;
        struct xenbus_map_node *node;
        void *addr;
-       struct unmap_ring_vfree_hvm info = {
+       struct unmap_ring_hvm info = {
                .idx = 0,
        };
        unsigned int nr_pages;
@@ -887,8 +882,8 @@ enum xenbus_state xenbus_read_driver_state(const char *path)
 EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
 
 static const struct xenbus_ring_ops ring_ops_hvm = {
-       .map = xenbus_map_ring_valloc_hvm,
-       .unmap = xenbus_unmap_ring_vfree_hvm,
+       .map = xenbus_map_ring_hvm,
+       .unmap = xenbus_unmap_ring_hvm,
 };
 
 void __init xenbus_ring_ops_init(void)
index 005921e..5b79cdc 100644 (file)
@@ -154,10 +154,17 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
                return ERR_PTR(-ENOMEM);
        }
 
+       cell->name = kmalloc(namelen + 1, GFP_KERNEL);
+       if (!cell->name) {
+               kfree(cell);
+               return ERR_PTR(-ENOMEM);
+       }
+
        cell->net = net;
        cell->name_len = namelen;
        for (i = 0; i < namelen; i++)
                cell->name[i] = tolower(name[i]);
+       cell->name[i] = 0;
 
        atomic_set(&cell->usage, 2);
        INIT_WORK(&cell->manager, afs_manage_cell);
@@ -207,6 +214,7 @@ parse_failed:
        if (ret == -EINVAL)
                printk(KERN_ERR "kAFS: bad VL server IP address\n");
 error:
+       kfree(cell->name);
        kfree(cell);
        _leave(" = %d", ret);
        return ERR_PTR(ret);
@@ -489,6 +497,7 @@ static void afs_cell_destroy(struct rcu_head *rcu)
        afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers));
        afs_put_cell(cell->net, cell->alias_of);
        key_put(cell->anonymous_key);
+       kfree(cell->name);
        kfree(cell);
 
        _leave(" [destroyed]");
index aa1d341..96757f3 100644 (file)
@@ -648,7 +648,7 @@ static void afs_do_lookup_success(struct afs_operation *op)
                        vp = &op->file[0];
                        abort_code = vp->scb.status.abort_code;
                        if (abort_code != 0) {
-                               op->abort_code = abort_code;
+                               op->ac.abort_code = abort_code;
                                op->error = afs_abort_to_error(abort_code);
                        }
                        break;
@@ -696,10 +696,11 @@ static const struct afs_operation_ops afs_inline_bulk_status_operation = {
        .success        = afs_do_lookup_success,
 };
 
-static const struct afs_operation_ops afs_fetch_status_operation = {
+static const struct afs_operation_ops afs_lookup_fetch_status_operation = {
        .issue_afs_rpc  = afs_fs_fetch_status,
        .issue_yfs_rpc  = yfs_fs_fetch_status,
        .success        = afs_do_lookup_success,
+       .aborted        = afs_check_for_remote_deletion,
 };
 
 /*
@@ -844,7 +845,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
                 * to FS.FetchStatus for op->file[1].
                 */
                op->fetch_status.which = 1;
-               op->ops = &afs_fetch_status_operation;
+               op->ops = &afs_lookup_fetch_status_operation;
                afs_begin_vnode_operation(op);
                afs_wait_for_operation(op);
        }
@@ -1236,6 +1237,17 @@ void afs_d_release(struct dentry *dentry)
        _enter("%pd", dentry);
 }
 
+void afs_check_for_remote_deletion(struct afs_operation *op)
+{
+       struct afs_vnode *vnode = op->file[0].vnode;
+
+       switch (op->ac.abort_code) {
+       case VNOVNODE:
+               set_bit(AFS_VNODE_DELETED, &vnode->flags);
+               afs_break_callback(vnode, afs_cb_break_for_deleted);
+       }
+}
+
 /*
  * Create a new inode for create/mkdir/symlink
  */
@@ -1268,7 +1280,7 @@ static void afs_vnode_new_inode(struct afs_operation *op)
 static void afs_create_success(struct afs_operation *op)
 {
        _enter("op=%08x", op->debug_id);
-       afs_check_for_remote_deletion(op, op->file[0].vnode);
+       op->ctime = op->file[0].scb.status.mtime_client;
        afs_vnode_commit_status(op, &op->file[0]);
        afs_update_dentry_version(op, &op->file[0], op->dentry);
        afs_vnode_new_inode(op);
@@ -1302,6 +1314,7 @@ static const struct afs_operation_ops afs_mkdir_operation = {
        .issue_afs_rpc  = afs_fs_make_dir,
        .issue_yfs_rpc  = yfs_fs_make_dir,
        .success        = afs_create_success,
+       .aborted        = afs_check_for_remote_deletion,
        .edit_dir       = afs_create_edit_dir,
        .put            = afs_create_put,
 };
@@ -1325,6 +1338,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 
        afs_op_set_vnode(op, 0, dvnode);
        op->file[0].dv_delta = 1;
+       op->file[0].update_ctime = true;
        op->dentry      = dentry;
        op->create.mode = S_IFDIR | mode;
        op->create.reason = afs_edit_dir_for_mkdir;
@@ -1350,7 +1364,7 @@ static void afs_dir_remove_subdir(struct dentry *dentry)
 static void afs_rmdir_success(struct afs_operation *op)
 {
        _enter("op=%08x", op->debug_id);
-       afs_check_for_remote_deletion(op, op->file[0].vnode);
+       op->ctime = op->file[0].scb.status.mtime_client;
        afs_vnode_commit_status(op, &op->file[0]);
        afs_update_dentry_version(op, &op->file[0], op->dentry);
 }
@@ -1382,6 +1396,7 @@ static const struct afs_operation_ops afs_rmdir_operation = {
        .issue_afs_rpc  = afs_fs_remove_dir,
        .issue_yfs_rpc  = yfs_fs_remove_dir,
        .success        = afs_rmdir_success,
+       .aborted        = afs_check_for_remote_deletion,
        .edit_dir       = afs_rmdir_edit_dir,
        .put            = afs_rmdir_put,
 };
@@ -1404,6 +1419,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
 
        afs_op_set_vnode(op, 0, dvnode);
        op->file[0].dv_delta = 1;
+       op->file[0].update_ctime = true;
 
        op->dentry      = dentry;
        op->ops         = &afs_rmdir_operation;
@@ -1479,7 +1495,8 @@ static void afs_dir_remove_link(struct afs_operation *op)
 static void afs_unlink_success(struct afs_operation *op)
 {
        _enter("op=%08x", op->debug_id);
-       afs_check_for_remote_deletion(op, op->file[0].vnode);
+       op->ctime = op->file[0].scb.status.mtime_client;
+       afs_check_dir_conflict(op, &op->file[0]);
        afs_vnode_commit_status(op, &op->file[0]);
        afs_vnode_commit_status(op, &op->file[1]);
        afs_update_dentry_version(op, &op->file[0], op->dentry);
@@ -1511,6 +1528,7 @@ static const struct afs_operation_ops afs_unlink_operation = {
        .issue_afs_rpc  = afs_fs_remove_file,
        .issue_yfs_rpc  = yfs_fs_remove_file,
        .success        = afs_unlink_success,
+       .aborted        = afs_check_for_remote_deletion,
        .edit_dir       = afs_unlink_edit_dir,
        .put            = afs_unlink_put,
 };
@@ -1537,6 +1555,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
 
        afs_op_set_vnode(op, 0, dvnode);
        op->file[0].dv_delta = 1;
+       op->file[0].update_ctime = true;
 
        /* Try to make sure we have a callback promise on the victim. */
        ret = afs_validate(vnode, op->key);
@@ -1561,9 +1580,25 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
        spin_unlock(&dentry->d_lock);
 
        op->file[1].vnode = vnode;
+       op->file[1].update_ctime = true;
+       op->file[1].op_unlinked = true;
        op->dentry      = dentry;
        op->ops         = &afs_unlink_operation;
-       return afs_do_sync_operation(op);
+       afs_begin_vnode_operation(op);
+       afs_wait_for_operation(op);
+
+       /* If there was a conflict with a third party, check the status of the
+        * unlinked vnode.
+        */
+       if (op->error == 0 && (op->flags & AFS_OPERATION_DIR_CONFLICT)) {
+               op->file[1].update_ctime = false;
+               op->fetch_status.which = 1;
+               op->ops = &afs_fetch_status_operation;
+               afs_begin_vnode_operation(op);
+               afs_wait_for_operation(op);
+       }
+
+       return afs_put_operation(op);
 
 error:
        return afs_put_operation(op);
@@ -1573,6 +1608,7 @@ static const struct afs_operation_ops afs_create_operation = {
        .issue_afs_rpc  = afs_fs_create_file,
        .issue_yfs_rpc  = yfs_fs_create_file,
        .success        = afs_create_success,
+       .aborted        = afs_check_for_remote_deletion,
        .edit_dir       = afs_create_edit_dir,
        .put            = afs_create_put,
 };
@@ -1601,6 +1637,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 
        afs_op_set_vnode(op, 0, dvnode);
        op->file[0].dv_delta = 1;
+       op->file[0].update_ctime = true;
 
        op->dentry      = dentry;
        op->create.mode = S_IFREG | mode;
@@ -1620,6 +1657,7 @@ static void afs_link_success(struct afs_operation *op)
        struct afs_vnode_param *vp = &op->file[1];
 
        _enter("op=%08x", op->debug_id);
+       op->ctime = dvp->scb.status.mtime_client;
        afs_vnode_commit_status(op, dvp);
        afs_vnode_commit_status(op, vp);
        afs_update_dentry_version(op, dvp, op->dentry);
@@ -1640,6 +1678,7 @@ static const struct afs_operation_ops afs_link_operation = {
        .issue_afs_rpc  = afs_fs_link,
        .issue_yfs_rpc  = yfs_fs_link,
        .success        = afs_link_success,
+       .aborted        = afs_check_for_remote_deletion,
        .edit_dir       = afs_create_edit_dir,
        .put            = afs_link_put,
 };
@@ -1672,6 +1711,8 @@ static int afs_link(struct dentry *from, struct inode *dir,
        afs_op_set_vnode(op, 0, dvnode);
        afs_op_set_vnode(op, 1, vnode);
        op->file[0].dv_delta = 1;
+       op->file[0].update_ctime = true;
+       op->file[1].update_ctime = true;
 
        op->dentry              = dentry;
        op->dentry_2            = from;
@@ -1689,6 +1730,7 @@ static const struct afs_operation_ops afs_symlink_operation = {
        .issue_afs_rpc  = afs_fs_symlink,
        .issue_yfs_rpc  = yfs_fs_symlink,
        .success        = afs_create_success,
+       .aborted        = afs_check_for_remote_deletion,
        .edit_dir       = afs_create_edit_dir,
        .put            = afs_create_put,
 };
@@ -1740,9 +1782,13 @@ static void afs_rename_success(struct afs_operation *op)
 {
        _enter("op=%08x", op->debug_id);
 
+       op->ctime = op->file[0].scb.status.mtime_client;
+       afs_check_dir_conflict(op, &op->file[1]);
        afs_vnode_commit_status(op, &op->file[0]);
-       if (op->file[1].vnode != op->file[0].vnode)
+       if (op->file[1].vnode != op->file[0].vnode) {
+               op->ctime = op->file[1].scb.status.mtime_client;
                afs_vnode_commit_status(op, &op->file[1]);
+       }
 }
 
 static void afs_rename_edit_dir(struct afs_operation *op)
@@ -1860,6 +1906,8 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
        afs_op_set_vnode(op, 1, new_dvnode); /* May be same as orig_dvnode */
        op->file[0].dv_delta = 1;
        op->file[1].dv_delta = 1;
+       op->file[0].update_ctime = true;
+       op->file[1].update_ctime = true;
 
        op->dentry              = old_dentry;
        op->dentry_2            = new_dentry;
index b14e3d9..04f75a4 100644 (file)
@@ -16,6 +16,7 @@ static void afs_silly_rename_success(struct afs_operation *op)
 {
        _enter("op=%08x", op->debug_id);
 
+       afs_check_dir_conflict(op, &op->file[0]);
        afs_vnode_commit_status(op, &op->file[0]);
 }
 
@@ -69,6 +70,11 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode
                return PTR_ERR(op);
 
        afs_op_set_vnode(op, 0, dvnode);
+       afs_op_set_vnode(op, 1, dvnode);
+       op->file[0].dv_delta = 1;
+       op->file[1].dv_delta = 1;
+       op->file[0].update_ctime = true;
+       op->file[1].update_ctime = true;
 
        op->dentry              = old;
        op->dentry_2            = new;
@@ -129,6 +135,7 @@ int afs_sillyrename(struct afs_vnode *dvnode, struct afs_vnode *vnode,
        switch (ret) {
        case 0:
                /* The rename succeeded. */
+               set_bit(AFS_VNODE_SILLY_DELETED, &vnode->flags);
                d_move(dentry, sdentry);
                break;
        case -ERESTARTSYS:
@@ -148,19 +155,11 @@ out:
 
 static void afs_silly_unlink_success(struct afs_operation *op)
 {
-       struct afs_vnode *vnode = op->file[1].vnode;
-
        _enter("op=%08x", op->debug_id);
-       afs_check_for_remote_deletion(op, op->file[0].vnode);
+       afs_check_dir_conflict(op, &op->file[0]);
        afs_vnode_commit_status(op, &op->file[0]);
        afs_vnode_commit_status(op, &op->file[1]);
        afs_update_dentry_version(op, &op->file[0], op->dentry);
-
-       drop_nlink(&vnode->vfs_inode);
-       if (vnode->vfs_inode.i_nlink == 0) {
-               set_bit(AFS_VNODE_DELETED, &vnode->flags);
-               clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
-       }
 }
 
 static void afs_silly_unlink_edit_dir(struct afs_operation *op)
@@ -181,6 +180,7 @@ static const struct afs_operation_ops afs_silly_unlink_operation = {
        .issue_afs_rpc  = afs_fs_remove_file,
        .issue_yfs_rpc  = yfs_fs_remove_file,
        .success        = afs_silly_unlink_success,
+       .aborted        = afs_check_for_remote_deletion,
        .edit_dir       = afs_silly_unlink_edit_dir,
 };
 
@@ -200,12 +200,30 @@ static int afs_do_silly_unlink(struct afs_vnode *dvnode, struct afs_vnode *vnode
 
        afs_op_set_vnode(op, 0, dvnode);
        afs_op_set_vnode(op, 1, vnode);
+       op->file[0].dv_delta = 1;
+       op->file[0].update_ctime = true;
+       op->file[1].op_unlinked = true;
+       op->file[1].update_ctime = true;
 
        op->dentry      = dentry;
        op->ops         = &afs_silly_unlink_operation;
 
        trace_afs_silly_rename(vnode, true);
-       return afs_do_sync_operation(op);
+       afs_begin_vnode_operation(op);
+       afs_wait_for_operation(op);
+
+       /* If there was a conflict with a third party, check the status of the
+        * unlinked vnode.
+        */
+       if (op->error == 0 && (op->flags & AFS_OPERATION_DIR_CONFLICT)) {
+               op->file[1].update_ctime = false;
+               op->fetch_status.which = 1;
+               op->ops = &afs_fetch_status_operation;
+               afs_begin_vnode_operation(op);
+               afs_wait_for_operation(op);
+       }
+
+       return afs_put_operation(op);
 }
 
 /*
index 506c474..6f6ed16 100644 (file)
@@ -225,7 +225,6 @@ static void afs_fetch_data_success(struct afs_operation *op)
        struct afs_vnode *vnode = op->file[0].vnode;
 
        _enter("op=%08x", op->debug_id);
-       afs_check_for_remote_deletion(op, vnode);
        afs_vnode_commit_status(op, &op->file[0]);
        afs_stat_v(vnode, n_fetches);
        atomic_long_add(op->fetch.req->actual_len, &op->net->n_fetch_bytes);
@@ -240,6 +239,7 @@ static const struct afs_operation_ops afs_fetch_data_operation = {
        .issue_afs_rpc  = afs_fs_fetch_data,
        .issue_yfs_rpc  = yfs_fs_fetch_data,
        .success        = afs_fetch_data_success,
+       .aborted        = afs_check_for_remote_deletion,
        .put            = afs_fetch_data_put,
 };
 
index 71eea2a..ffb8575 100644 (file)
@@ -175,10 +175,7 @@ static void afs_kill_lockers_enoent(struct afs_vnode *vnode)
 
 static void afs_lock_success(struct afs_operation *op)
 {
-       struct afs_vnode *vnode = op->file[0].vnode;
-
        _enter("op=%08x", op->debug_id);
-       afs_check_for_remote_deletion(op, vnode);
        afs_vnode_commit_status(op, &op->file[0]);
 }
 
@@ -186,6 +183,7 @@ static const struct afs_operation_ops afs_set_lock_operation = {
        .issue_afs_rpc  = afs_fs_set_lock,
        .issue_yfs_rpc  = yfs_fs_set_lock,
        .success        = afs_lock_success,
+       .aborted        = afs_check_for_remote_deletion,
 };
 
 /*
index 2d2dff5..c264839 100644 (file)
@@ -187,9 +187,17 @@ void afs_wait_for_operation(struct afs_operation *op)
                op->error = afs_wait_for_call_to_complete(op->call, &op->ac);
        }
 
-       if (op->error == 0) {
+       switch (op->error) {
+       case 0:
                _debug("success");
                op->ops->success(op);
+               break;
+       case -ECONNABORTED:
+               if (op->ops->aborted)
+                       op->ops->aborted(op);
+               break;
+       default:
+               break;
        }
 
        afs_end_vnode_operation(op);
index b34f74b..5d9ef51 100644 (file)
@@ -314,7 +314,7 @@ void afs_fs_probe_timer(struct timer_list *timer)
 {
        struct afs_net *net = container_of(timer, struct afs_net, fs_probe_timer);
 
-       if (!queue_work(afs_wq, &net->fs_prober))
+       if (!net->live || !queue_work(afs_wq, &net->fs_prober))
                afs_dec_servers_outstanding(net);
 }
 
@@ -458,3 +458,12 @@ dont_wait:
                return -ETIME;
        return -EDESTADDRREQ;
 }
+
+/*
+ * Clean up the probing when the namespace is killed off.
+ */
+void afs_fs_probe_cleanup(struct afs_net *net)
+{
+       if (del_timer_sync(&net->fs_probe_timer))
+               afs_dec_servers_outstanding(net);
+}
index cd0a006..1d13d2e 100644 (file)
@@ -165,9 +165,11 @@ static void afs_apply_status(struct afs_operation *op,
 {
        struct afs_file_status *status = &vp->scb.status;
        struct afs_vnode *vnode = vp->vnode;
+       struct inode *inode = &vnode->vfs_inode;
        struct timespec64 t;
        umode_t mode;
        bool data_changed = false;
+       bool change_size = vp->set_size;
 
        _enter("{%llx:%llu.%u} %s",
               vp->fid.vid, vp->fid.vnode, vp->fid.unique,
@@ -186,25 +188,25 @@ static void afs_apply_status(struct afs_operation *op,
        }
 
        if (status->nlink != vnode->status.nlink)
-               set_nlink(&vnode->vfs_inode, status->nlink);
+               set_nlink(inode, status->nlink);
 
        if (status->owner != vnode->status.owner)
-               vnode->vfs_inode.i_uid = make_kuid(&init_user_ns, status->owner);
+               inode->i_uid = make_kuid(&init_user_ns, status->owner);
 
        if (status->group != vnode->status.group)
-               vnode->vfs_inode.i_gid = make_kgid(&init_user_ns, status->group);
+               inode->i_gid = make_kgid(&init_user_ns, status->group);
 
        if (status->mode != vnode->status.mode) {
-               mode = vnode->vfs_inode.i_mode;
+               mode = inode->i_mode;
                mode &= ~S_IALLUGO;
                mode |= status->mode;
-               WRITE_ONCE(vnode->vfs_inode.i_mode, mode);
+               WRITE_ONCE(inode->i_mode, mode);
        }
 
        t = status->mtime_client;
-       vnode->vfs_inode.i_ctime = t;
-       vnode->vfs_inode.i_mtime = t;
-       vnode->vfs_inode.i_atime = t;
+       inode->i_mtime = t;
+       if (vp->update_ctime)
+               inode->i_ctime = op->ctime;
 
        if (vnode->status.data_version != status->data_version)
                data_changed = true;
@@ -226,6 +228,7 @@ static void afs_apply_status(struct afs_operation *op,
                } else {
                        set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags);
                }
+               change_size = true;
        } else if (vnode->status.type == AFS_FTYPE_DIR) {
                /* Expected directory change is handled elsewhere so
                 * that we can locally edit the directory and save on a
@@ -233,11 +236,22 @@ static void afs_apply_status(struct afs_operation *op,
                 */
                if (test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
                        data_changed = false;
+               change_size = true;
        }
 
        if (data_changed) {
-               inode_set_iversion_raw(&vnode->vfs_inode, status->data_version);
-               afs_set_i_size(vnode, status->size);
+               inode_set_iversion_raw(inode, status->data_version);
+
+               /* Only update the size if the data version jumped.  If the
+                * file is being modified locally, then we might have our own
+                * idea of what the size should be that's not the same as
+                * what's on the server.
+                */
+               if (change_size) {
+                       afs_set_i_size(vnode, status->size);
+                       inode->i_ctime = t;
+                       inode->i_atime = t;
+               }
        }
 }
 
@@ -267,32 +281,39 @@ void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *v
 
        _enter("");
 
-       ASSERTCMP(op->error, ==, 0);
-
        write_seqlock(&vnode->cb_lock);
 
        if (vp->scb.have_error) {
+               /* A YFS server will return this from RemoveFile2 and AFS and
+                * YFS will return this from InlineBulkStatus.
+                */
                if (vp->scb.status.abort_code == VNOVNODE) {
                        set_bit(AFS_VNODE_DELETED, &vnode->flags);
                        clear_nlink(&vnode->vfs_inode);
                        __afs_break_callback(vnode, afs_cb_break_for_deleted);
+                       op->flags &= ~AFS_OPERATION_DIR_CONFLICT;
                }
-       } else {
-               if (vp->scb.have_status)
-                       afs_apply_status(op, vp);
+       } else if (vp->scb.have_status) {
+               afs_apply_status(op, vp);
                if (vp->scb.have_cb)
                        afs_apply_callback(op, vp);
+       } else if (vp->op_unlinked && !(op->flags & AFS_OPERATION_DIR_CONFLICT)) {
+               drop_nlink(&vnode->vfs_inode);
+               if (vnode->vfs_inode.i_nlink == 0) {
+                       set_bit(AFS_VNODE_DELETED, &vnode->flags);
+                       __afs_break_callback(vnode, afs_cb_break_for_deleted);
+               }
        }
 
        write_sequnlock(&vnode->cb_lock);
 
-       if (op->error == 0 && vp->scb.have_status)
+       if (vp->scb.have_status)
                afs_cache_permit(vnode, op->key, vp->cb_break_before, &vp->scb);
 }
 
 static void afs_fetch_status_success(struct afs_operation *op)
 {
-       struct afs_vnode_param *vp = &op->file[0];
+       struct afs_vnode_param *vp = &op->file[op->fetch_status.which];
        struct afs_vnode *vnode = vp->vnode;
        int ret;
 
@@ -306,10 +327,11 @@ static void afs_fetch_status_success(struct afs_operation *op)
        }
 }
 
-static const struct afs_operation_ops afs_fetch_status_operation = {
+const struct afs_operation_ops afs_fetch_status_operation = {
        .issue_afs_rpc  = afs_fs_fetch_status,
        .issue_yfs_rpc  = yfs_fs_fetch_status,
        .success        = afs_fetch_status_success,
+       .aborted        = afs_check_for_remote_deletion,
 };
 
 /*
@@ -716,6 +738,9 @@ int afs_getattr(const struct path *path, struct kstat *stat,
        do {
                read_seqbegin_or_lock(&vnode->cb_lock, &seq);
                generic_fillattr(inode, stat);
+               if (test_bit(AFS_VNODE_SILLY_DELETED, &vnode->flags) &&
+                   stat->nlink > 0)
+                       stat->nlink -= 1;
        } while (need_seqretry(&vnode->cb_lock, seq));
 
        done_seqretry(&vnode->cb_lock, seq);
@@ -785,7 +810,15 @@ void afs_evict_inode(struct inode *inode)
 
 static void afs_setattr_success(struct afs_operation *op)
 {
+       struct inode *inode = &op->file[0].vnode->vfs_inode;
+
        afs_vnode_commit_status(op, &op->file[0]);
+       if (op->setattr.attr->ia_valid & ATTR_SIZE) {
+               loff_t i_size = inode->i_size, size = op->setattr.attr->ia_size;
+               if (size > i_size)
+                       pagecache_isize_extended(inode, i_size, size);
+               truncate_pagecache(inode, size);
+       }
 }
 
 static const struct afs_operation_ops afs_setattr_operation = {
@@ -801,17 +834,31 @@ int afs_setattr(struct dentry *dentry, struct iattr *attr)
 {
        struct afs_operation *op;
        struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
+       int ret;
 
        _enter("{%llx:%llu},{n=%pd},%x",
               vnode->fid.vid, vnode->fid.vnode, dentry,
               attr->ia_valid);
 
        if (!(attr->ia_valid & (ATTR_SIZE | ATTR_MODE | ATTR_UID | ATTR_GID |
-                               ATTR_MTIME))) {
+                               ATTR_MTIME | ATTR_MTIME_SET | ATTR_TIMES_SET |
+                               ATTR_TOUCH))) {
                _leave(" = 0 [unsupported]");
                return 0;
        }
 
+       if (attr->ia_valid & ATTR_SIZE) {
+               if (!S_ISREG(vnode->vfs_inode.i_mode))
+                       return -EISDIR;
+
+               ret = inode_newsize_ok(&vnode->vfs_inode, attr->ia_size);
+               if (ret)
+                       return ret;
+
+               if (attr->ia_size == i_size_read(&vnode->vfs_inode))
+                       attr->ia_valid &= ~ATTR_SIZE;
+       }
+
        /* flush any dirty data outstanding on a regular file */
        if (S_ISREG(vnode->vfs_inode.i_mode))
                filemap_write_and_wait(vnode->vfs_inode.i_mapping);
@@ -825,8 +872,12 @@ int afs_setattr(struct dentry *dentry, struct iattr *attr)
        afs_op_set_vnode(op, 0, vnode);
        op->setattr.attr = attr;
 
-       if (attr->ia_valid & ATTR_SIZE)
+       if (attr->ia_valid & ATTR_SIZE) {
                op->file[0].dv_delta = 1;
+               op->file[0].set_size = true;
+       }
+       op->ctime = attr->ia_ctime;
+       op->file[0].update_ctime = 1;
 
        op->ops = &afs_setattr_operation;
        return afs_do_sync_operation(op);
index 0c9806e..792ac71 100644 (file)
@@ -388,7 +388,7 @@ struct afs_cell {
        struct afs_vlserver_list __rcu *vl_servers;
 
        u8                      name_len;       /* Length of name */
-       char                    name[64 + 1];   /* Cell name, case-flattened and NUL-padded */
+       char                    *name;          /* Cell name, case-flattened and NUL-padded */
 };
 
 /*
@@ -634,6 +634,7 @@ struct afs_vnode {
 #define AFS_VNODE_AUTOCELL     6               /* set if Vnode is an auto mount point */
 #define AFS_VNODE_PSEUDODIR    7               /* set if Vnode is a pseudo directory */
 #define AFS_VNODE_NEW_CONTENT  8               /* Set if file has new content (create/trunc-0) */
+#define AFS_VNODE_SILLY_DELETED        9               /* Set if file has been silly-deleted */
 
        struct list_head        wb_keys;        /* List of keys available for writeback */
        struct list_head        pending_locks;  /* locks waiting to be granted */
@@ -744,8 +745,11 @@ struct afs_vnode_param {
        afs_dataversion_t       dv_before;      /* Data version before the call */
        unsigned int            cb_break_before; /* cb_break + cb_s_break before the call */
        u8                      dv_delta;       /* Expected change in data version */
-       bool                    put_vnode;      /* T if we have a ref on the vnode */
-       bool                    need_io_lock;   /* T if we need the I/O lock on this */
+       bool                    put_vnode:1;    /* T if we have a ref on the vnode */
+       bool                    need_io_lock:1; /* T if we need the I/O lock on this */
+       bool                    update_ctime:1; /* Need to update the ctime */
+       bool                    set_size:1;     /* Must update i_size */
+       bool                    op_unlinked:1;  /* True if file was unlinked by op */
 };
 
 /*
@@ -766,9 +770,9 @@ struct afs_operation {
        struct dentry           *dentry;        /* Dentry to be altered */
        struct dentry           *dentry_2;      /* Second dentry to be altered */
        struct timespec64       mtime;          /* Modification time to record */
+       struct timespec64       ctime;          /* Change time to set */
        short                   nr_files;       /* Number of entries in file[], more_files */
        short                   error;
-       unsigned int            abort_code;
        unsigned int            debug_id;
 
        unsigned int            cb_v_break;     /* Volume break counter before op */
@@ -837,6 +841,7 @@ struct afs_operation {
 #define AFS_OPERATION_LOCK_1           0x0200  /* Set if have io_lock on file[1] */
 #define AFS_OPERATION_TRIED_ALL                0x0400  /* Set if we've tried all the fileservers */
 #define AFS_OPERATION_RETRY_SERVER     0x0800  /* Set if we should retry the current server */
+#define AFS_OPERATION_DIR_CONFLICT     0x1000  /* Set if we detected a 3rd-party dir change */
 };
 
 /*
@@ -932,6 +937,7 @@ extern const struct address_space_operations afs_dir_aops;
 extern const struct dentry_operations afs_fs_dentry_operations;
 
 extern void afs_d_release(struct dentry *);
+extern void afs_check_for_remote_deletion(struct afs_operation *);
 
 /*
  * dir_edit.c
@@ -1059,10 +1065,13 @@ extern int afs_wait_for_fs_probes(struct afs_server_list *, unsigned long);
 extern void afs_probe_fileserver(struct afs_net *, struct afs_server *);
 extern void afs_fs_probe_dispatcher(struct work_struct *);
 extern int afs_wait_for_one_fs_probe(struct afs_server *, bool);
+extern void afs_fs_probe_cleanup(struct afs_net *);
 
 /*
  * inode.c
  */
+extern const struct afs_operation_ops afs_fetch_status_operation;
+
 extern void afs_vnode_commit_status(struct afs_operation *, struct afs_vnode_param *);
 extern int afs_fetch_status(struct afs_vnode *, struct key *, bool, afs_access_t *);
 extern int afs_ilookup5_test_by_fid(struct inode *, void *);
@@ -1435,7 +1444,6 @@ extern ssize_t afs_listxattr(struct dentry *, char *, size_t);
 /*
  * yfsclient.c
  */
-extern void yfs_fs_fetch_file_status(struct afs_operation *);
 extern void yfs_fs_fetch_data(struct afs_operation *);
 extern void yfs_fs_create_file(struct afs_operation *);
 extern void yfs_fs_make_dir(struct afs_operation *);
@@ -1481,15 +1489,6 @@ static inline struct inode *AFS_VNODE_TO_I(struct afs_vnode *vnode)
        return &vnode->vfs_inode;
 }
 
-static inline void afs_check_for_remote_deletion(struct afs_operation *op,
-                                                struct afs_vnode *vnode)
-{
-       if (op->error == -ENOENT) {
-               set_bit(AFS_VNODE_DELETED, &vnode->flags);
-               afs_break_callback(vnode, afs_cb_break_for_deleted);
-       }
-}
-
 /*
  * Note that a dentry got changed.  We need to set d_fsdata to the data version
  * number derived from the result of the operation.  It doesn't matter if
@@ -1504,6 +1503,18 @@ static inline void afs_update_dentry_version(struct afs_operation *op,
                        (void *)(unsigned long)dir_vp->scb.status.data_version;
 }
 
+/*
+ * Check for a conflicting operation on a directory that we just unlinked from.
+ * If someone managed to sneak a link or an unlink in on the file we just
+ * unlinked, we won't be able to trust nlink on an AFS file (but not YFS).
+ */
+static inline void afs_check_dir_conflict(struct afs_operation *op,
+                                         struct afs_vnode_param *dvp)
+{
+       if (dvp->dv_before + dvp->dv_delta != dvp->scb.status.data_version)
+               op->flags |= AFS_OPERATION_DIR_CONFLICT;
+}
+
 static inline int afs_io_error(struct afs_call *call, enum afs_io_error where)
 {
        trace_afs_io_error(call->debug_id, -EIO, where);
index 9c79c91..31b472f 100644 (file)
@@ -100,6 +100,7 @@ static int __net_init afs_net_init(struct net *net_ns)
        timer_setup(&net->fs_timer, afs_servers_timer, 0);
        INIT_WORK(&net->fs_prober, afs_fs_probe_dispatcher);
        timer_setup(&net->fs_probe_timer, afs_fs_probe_timer, 0);
+       atomic_set(&net->servers_outstanding, 1);
 
        ret = -ENOMEM;
        sysnames = kzalloc(sizeof(*sysnames), GFP_KERNEL);
@@ -130,6 +131,7 @@ static int __net_init afs_net_init(struct net *net_ns)
 
 error_open_socket:
        net->live = false;
+       afs_fs_probe_cleanup(net);
        afs_cell_purge(net);
        afs_purge_servers(net);
 error_cell_init:
@@ -150,6 +152,7 @@ static void __net_exit afs_net_exit(struct net *net_ns)
        struct afs_net *net = afs_net(net_ns);
 
        net->live = false;
+       afs_fs_probe_cleanup(net);
        afs_cell_purge(net);
        afs_purge_servers(net);
        afs_close_socket(net);
index 52b19e9..5334f1b 100644 (file)
@@ -83,6 +83,7 @@ int afs_abort_to_error(u32 abort_code)
        case UAENOLCK:                  return -ENOLCK;
        case UAENOTEMPTY:               return -ENOTEMPTY;
        case UAELOOP:                   return -ELOOP;
+       case UAEOVERFLOW:               return -EOVERFLOW;
        case UAENOMEDIUM:               return -ENOMEDIUM;
        case UAEDQUOT:                  return -EDQUOT;
 
index 039e348..e82e452 100644 (file)
@@ -605,11 +605,12 @@ void afs_purge_servers(struct afs_net *net)
        _enter("");
 
        if (del_timer_sync(&net->fs_timer))
-               atomic_dec(&net->servers_outstanding);
+               afs_dec_servers_outstanding(net);
 
        afs_queue_server_manager(net);
 
        _debug("wait");
+       atomic_dec(&net->servers_outstanding);
        wait_var_event(&net->servers_outstanding,
                       !atomic_read(&net->servers_outstanding));
        _leave("");
index 768497f..7437806 100644 (file)
@@ -194,11 +194,11 @@ int afs_write_end(struct file *file, struct address_space *mapping,
 
        i_size = i_size_read(&vnode->vfs_inode);
        if (maybe_i_size > i_size) {
-               spin_lock(&vnode->wb_lock);
+               write_seqlock(&vnode->cb_lock);
                i_size = i_size_read(&vnode->vfs_inode);
                if (maybe_i_size > i_size)
                        i_size_write(&vnode->vfs_inode, maybe_i_size);
-               spin_unlock(&vnode->wb_lock);
+               write_sequnlock(&vnode->cb_lock);
        }
 
        if (!PageUptodate(page)) {
@@ -393,6 +393,7 @@ static void afs_store_data_success(struct afs_operation *op)
 {
        struct afs_vnode *vnode = op->file[0].vnode;
 
+       op->ctime = op->file[0].scb.status.mtime_client;
        afs_vnode_commit_status(op, &op->file[0]);
        if (op->error == 0) {
                afs_pages_written_back(vnode, op->store.first, op->store.last);
@@ -491,6 +492,7 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
        unsigned long count, priv;
        unsigned n, offset, to, f, t;
        pgoff_t start, first, last;
+       loff_t i_size, end;
        int loop, ret;
 
        _enter(",%lx", primary_page->index);
@@ -591,7 +593,12 @@ no_more:
        first = primary_page->index;
        last = first + count - 1;
 
+       end = (loff_t)last * PAGE_SIZE + to;
+       i_size = i_size_read(&vnode->vfs_inode);
+
        _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);
+       if (end > i_size)
+               to = i_size & ~PAGE_MASK;
 
        ret = afs_store_data(mapping, first, last, offset, to);
        switch (ret) {
@@ -844,6 +851,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
                             vmf->page->index, priv);
        SetPagePrivate(vmf->page);
        set_page_private(vmf->page, priv);
+       file_update_time(file);
 
        sb_end_pagefault(inode->i_sb);
        return VM_FAULT_LOCKED;
index 52d5af5..8c24fdc 100644 (file)
@@ -330,29 +330,6 @@ static void xdr_decode_YFSFetchVolumeStatus(const __be32 **_bp,
 }
 
 /*
- * Deliver a reply that's a status, callback and volsync.
- */
-static int yfs_deliver_fs_status_cb_and_volsync(struct afs_call *call)
-{
-       struct afs_operation *op = call->op;
-       const __be32 *bp;
-       int ret;
-
-       ret = afs_transfer_reply(call);
-       if (ret < 0)
-               return ret;
-
-       /* unmarshall the reply once we've received all of it */
-       bp = call->buffer;
-       xdr_decode_YFSFetchStatus(&bp, call, &op->file[0].scb);
-       xdr_decode_YFSCallBack(&bp, call, &op->file[0].scb);
-       xdr_decode_YFSVolSync(&bp, &op->volsync);
-
-       _leave(" = 0 [done]");
-       return 0;
-}
-
-/*
  * Deliver reply data to operations that just return a file status and a volume
  * sync record.
  */
@@ -375,48 +352,6 @@ static int yfs_deliver_status_and_volsync(struct afs_call *call)
 }
 
 /*
- * YFS.FetchStatus operation type
- */
-static const struct afs_call_type yfs_RXYFSFetchStatus_vnode = {
-       .name           = "YFS.FetchStatus(vnode)",
-       .op             = yfs_FS_FetchStatus,
-       .deliver        = yfs_deliver_fs_status_cb_and_volsync,
-       .destructor     = afs_flat_call_destructor,
-};
-
-/*
- * Fetch the status information for a file.
- */
-void yfs_fs_fetch_file_status(struct afs_operation *op)
-{
-       struct afs_vnode_param *vp = &op->file[0];
-       struct afs_call *call;
-       __be32 *bp;
-
-       _enter(",%x,{%llx:%llu},,",
-              key_serial(op->key), vp->fid.vid, vp->fid.vnode);
-
-       call = afs_alloc_flat_call(op->net, &yfs_RXYFSFetchStatus_vnode,
-                                  sizeof(__be32) * 2 +
-                                  sizeof(struct yfs_xdr_YFSFid),
-                                  sizeof(struct yfs_xdr_YFSFetchStatus) +
-                                  sizeof(struct yfs_xdr_YFSCallBack) +
-                                  sizeof(struct yfs_xdr_YFSVolSync));
-       if (!call)
-               return afs_op_nomem(op);
-
-       /* marshall the parameters */
-       bp = call->request;
-       bp = xdr_encode_u32(bp, YFSFETCHSTATUS);
-       bp = xdr_encode_u32(bp, 0); /* RPC flags */
-       bp = xdr_encode_YFSFid(bp, &vp->fid);
-       yfs_check_req(call, bp);
-
-       trace_afs_make_fs_call(call, &vp->fid);
-       afs_make_op_call(op, call, GFP_NOFS);
-}
-
-/*
  * Deliver reply data to an YFS.FetchData64.
  */
 static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
@@ -1605,12 +1540,36 @@ void yfs_fs_release_lock(struct afs_operation *op)
 }
 
 /*
+ * Deliver a reply to YFS.FetchStatus
+ */
+static int yfs_deliver_fs_fetch_status(struct afs_call *call)
+{
+       struct afs_operation *op = call->op;
+       struct afs_vnode_param *vp = &op->file[op->fetch_status.which];
+       const __be32 *bp;
+       int ret;
+
+       ret = afs_transfer_reply(call);
+       if (ret < 0)
+               return ret;
+
+       /* unmarshall the reply once we've received all of it */
+       bp = call->buffer;
+       xdr_decode_YFSFetchStatus(&bp, call, &vp->scb);
+       xdr_decode_YFSCallBack(&bp, call, &vp->scb);
+       xdr_decode_YFSVolSync(&bp, &op->volsync);
+
+       _leave(" = 0 [done]");
+       return 0;
+}
+
+/*
  * YFS.FetchStatus operation type
  */
 static const struct afs_call_type yfs_RXYFSFetchStatus = {
        .name           = "YFS.FetchStatus",
        .op             = yfs_FS_FetchStatus,
-       .deliver        = yfs_deliver_fs_status_cb_and_volsync,
+       .deliver        = yfs_deliver_fs_fetch_status,
        .destructor     = afs_flat_call_destructor,
 };
 
@@ -1619,7 +1578,7 @@ static const struct afs_call_type yfs_RXYFSFetchStatus = {
  */
 void yfs_fs_fetch_status(struct afs_operation *op)
 {
-       struct afs_vnode_param *vp = &op->file[0];
+       struct afs_vnode_param *vp = &op->file[op->fetch_status.which];
        struct afs_call *call;
        __be32 *bp;
 
index 7ecddc2..91e7cc4 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -67,7 +67,7 @@ struct aio_ring {
        unsigned        header_length;  /* size of aio_ring */
 
 
-       struct io_event         io_events[0];
+       struct io_event         io_events[];
 }; /* 128 bytes + ring size */
 
 /*
index b04c528..74c886f 100644 (file)
@@ -53,7 +53,7 @@ static int autofs_write(struct autofs_sb_info *sbi,
 
        mutex_lock(&sbi->pipe_mutex);
        while (bytes) {
-               wr = __kernel_write(file, data, bytes, &file->f_pos);
+               wr = kernel_write(file, data, bytes, &file->f_pos);
                if (wr <= 0)
                        break;
                data += wr;
index 47860e5..0ae656e 100644 (file)
@@ -75,7 +75,7 @@ static void bdev_write_inode(struct block_device *bdev)
 }
 
 /* Kill _all_ buffers and pagecache , dirty or not.. */
-void kill_bdev(struct block_device *bdev)
+static void kill_bdev(struct block_device *bdev)
 {
        struct address_space *mapping = bdev->bd_inode->i_mapping;
 
@@ -84,8 +84,7 @@ void kill_bdev(struct block_device *bdev)
 
        invalidate_bh_lrus();
        truncate_inode_pages(mapping, 0);
-}      
-EXPORT_SYMBOL(kill_bdev);
+}
 
 /* Invalidate clean unused buffers and pagecache. */
 void invalidate_bdev(struct block_device *bdev)
@@ -1565,10 +1564,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
         */
        if (!for_part) {
                ret = devcgroup_inode_permission(bdev->bd_inode, perm);
-               if (ret != 0) {
-                       bdput(bdev);
+               if (ret != 0)
                        return ret;
-               }
        }
 
  restart:
@@ -1637,8 +1634,10 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
                                goto out_clear;
                        BUG_ON(for_part);
                        ret = __blkdev_get(whole, mode, 1);
-                       if (ret)
+                       if (ret) {
+                               bdput(whole);
                                goto out_clear;
+                       }
                        bdev->bd_contains = whole;
                        bdev->bd_part = disk_get_part(disk, partno);
                        if (!(disk->flags & GENHD_FL_UP) ||
@@ -1688,7 +1687,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
        disk_unblock_events(disk);
        put_disk_and_module(disk);
  out:
-       bdput(bdev);
 
        return ret;
 }
@@ -1755,6 +1753,9 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
                bdput(whole);
        }
 
+       if (res)
+               bdput(bdev);
+
        return res;
 }
 EXPORT_SYMBOL(blkdev_get);
index 176e8a2..c037ef5 100644 (file)
@@ -940,7 +940,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
        path = btrfs_alloc_path();
        if (!path) {
                ret = -ENOMEM;
-               goto out_put_group;
+               goto out;
        }
 
        /*
@@ -978,7 +978,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                ret = btrfs_orphan_add(trans, BTRFS_I(inode));
                if (ret) {
                        btrfs_add_delayed_iput(inode);
-                       goto out_put_group;
+                       goto out;
                }
                clear_nlink(inode);
                /* One for the block groups ref */
@@ -1001,13 +1001,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 
        ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
        if (ret < 0)
-               goto out_put_group;
+               goto out;
        if (ret > 0)
                btrfs_release_path(path);
        if (ret == 0) {
                ret = btrfs_del_item(trans, tree_root, path);
                if (ret)
-                       goto out_put_group;
+                       goto out;
                btrfs_release_path(path);
        }
 
@@ -1016,6 +1016,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                 &fs_info->block_group_cache_tree);
        RB_CLEAR_NODE(&block_group->cache_node);
 
+       /* Once for the block groups rbtree */
+       btrfs_put_block_group(block_group);
+
        if (fs_info->first_logical_byte == block_group->start)
                fs_info->first_logical_byte = (u64)-1;
        spin_unlock(&fs_info->block_group_cache_lock);
@@ -1089,6 +1092,25 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 
        spin_unlock(&block_group->space_info->lock);
 
+       /*
+        * Remove the free space for the block group from the free space tree
+        * and the block group's item from the extent tree before marking the
+        * block group as removed. This is to prevent races with tasks that
+        * freeze and unfreeze a block group, this task and another task
+        * allocating a new block group - the unfreeze task ends up removing
+        * the block group's extent map before the task calling this function
+        * deletes the block group item from the extent tree, allowing for
+        * another task to attempt to create another block group with the same
+        * item key (and failing with -EEXIST and a transaction abort).
+        */
+       ret = remove_block_group_free_space(trans, block_group);
+       if (ret)
+               goto out;
+
+       ret = remove_block_group_item(trans, path, block_group);
+       if (ret < 0)
+               goto out;
+
        mutex_lock(&fs_info->chunk_mutex);
        spin_lock(&block_group->lock);
        block_group->removed = 1;
@@ -1123,17 +1145,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
 
        mutex_unlock(&fs_info->chunk_mutex);
 
-       ret = remove_block_group_free_space(trans, block_group);
-       if (ret)
-               goto out_put_group;
-
-       /* Once for the block groups rbtree */
-       btrfs_put_block_group(block_group);
-
-       ret = remove_block_group_item(trans, path, block_group);
-       if (ret < 0)
-               goto out;
-
        if (remove_em) {
                struct extent_map_tree *em_tree;
 
@@ -1145,10 +1156,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                free_extent_map(em);
        }
 
-out_put_group:
+out:
        /* Once for the lookup reference */
        btrfs_put_block_group(block_group);
-out:
        if (remove_rsv)
                btrfs_delayed_refs_rsv_release(fs_info, 1);
        btrfs_free_path(path);
index 3a7648b..82ab6e5 100644 (file)
@@ -1196,7 +1196,7 @@ __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
                switch (tm->op) {
                case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
                        BUG_ON(tm->slot < n);
-                       /* Fallthrough */
+                       fallthrough;
                case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
                case MOD_LOG_KEY_REMOVE:
                        btrfs_set_node_key(eb, &tm->key, tm->slot);
index 30ce703..d404cce 100644 (file)
@@ -1009,6 +1009,8 @@ enum {
        BTRFS_ROOT_DEAD_RELOC_TREE,
        /* Mark dead root stored on device whose cleanup needs to be resumed */
        BTRFS_ROOT_DEAD_TREE,
+       /* The root has a log tree. Used only for subvolume roots. */
+       BTRFS_ROOT_HAS_LOG_TREE,
 };
 
 /*
index 7c6f0bb..b1a1480 100644 (file)
@@ -2593,10 +2593,12 @@ static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
                    !extent_buffer_uptodate(tree_root->node)) {
                        handle_error = true;
 
-                       if (IS_ERR(tree_root->node))
+                       if (IS_ERR(tree_root->node)) {
                                ret = PTR_ERR(tree_root->node);
-                       else if (!extent_buffer_uptodate(tree_root->node))
+                               tree_root->node = NULL;
+                       } else if (!extent_buffer_uptodate(tree_root->node)) {
                                ret = -EUCLEAN;
+                       }
 
                        btrfs_warn(fs_info, "failed to read tree root");
                        continue;
index 68c9605..608f934 100644 (file)
@@ -5058,25 +5058,28 @@ struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
 static void check_buffer_tree_ref(struct extent_buffer *eb)
 {
        int refs;
-       /* the ref bit is tricky.  We have to make sure it is set
-        * if we have the buffer dirty.   Otherwise the
-        * code to free a buffer can end up dropping a dirty
-        * page
+       /*
+        * The TREE_REF bit is first set when the extent_buffer is added
+        * to the radix tree. It is also reset, if unset, when a new reference
+        * is created by find_extent_buffer.
         *
-        * Once the ref bit is set, it won't go away while the
-        * buffer is dirty or in writeback, and it also won't
-        * go away while we have the reference count on the
-        * eb bumped.
+        * It is only cleared in two cases: freeing the last non-tree
+        * reference to the extent_buffer when its STALE bit is set or
+        * calling releasepage when the tree reference is the only reference.
         *
-        * We can't just set the ref bit without bumping the
-        * ref on the eb because free_extent_buffer might
-        * see the ref bit and try to clear it.  If this happens
-        * free_extent_buffer might end up dropping our original
-        * ref by mistake and freeing the page before we are able
-        * to add one more ref.
+        * In both cases, care is taken to ensure that the extent_buffer's
+        * pages are not under io. However, releasepage can be concurrently
+        * called with creating new references, which is prone to race
+        * conditions between the calls to check_buffer_tree_ref in those
+        * codepaths and clearing TREE_REF in try_release_extent_buffer.
         *
-        * So bump the ref count first, then set the bit.  If someone
-        * beat us to it, drop the ref we added.
+        * The actual lifetime of the extent_buffer in the radix tree is
+        * adequately protected by the refcount, but the TREE_REF bit and
+        * its corresponding reference are not. To protect against this
+        * class of races, we call check_buffer_tree_ref from the codepaths
+        * which trigger io after they set eb->io_pages. Note that once io is
+        * initiated, TREE_REF can no longer be cleared, so that is the
+        * moment at which any such race is best fixed.
         */
        refs = atomic_read(&eb->refs);
        if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
@@ -5527,6 +5530,11 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num)
        clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
        eb->read_mirror = 0;
        atomic_set(&eb->io_pages, num_reads);
+       /*
+        * It is possible for releasepage to clear the TREE_REF bit before we
+        * set io_pages. See check_buffer_tree_ref for a more detailed comment.
+        */
+       check_buffer_tree_ref(eb);
        for (i = 0; i < num_pages; i++) {
                page = eb->pages[i];
 
index 2c14312..2520605 100644 (file)
@@ -1533,7 +1533,7 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
 }
 
 static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
-                                   size_t *write_bytes)
+                                   size_t *write_bytes, bool nowait)
 {
        struct btrfs_fs_info *fs_info = inode->root->fs_info;
        struct btrfs_root *root = inode->root;
@@ -1541,27 +1541,43 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
        u64 num_bytes;
        int ret;
 
-       if (!btrfs_drew_try_write_lock(&root->snapshot_lock))
+       if (!nowait && !btrfs_drew_try_write_lock(&root->snapshot_lock))
                return -EAGAIN;
 
        lockstart = round_down(pos, fs_info->sectorsize);
        lockend = round_up(pos + *write_bytes,
                           fs_info->sectorsize) - 1;
+       num_bytes = lockend - lockstart + 1;
 
-       btrfs_lock_and_flush_ordered_range(inode, lockstart,
-                                          lockend, NULL);
+       if (nowait) {
+               struct btrfs_ordered_extent *ordered;
+
+               if (!try_lock_extent(&inode->io_tree, lockstart, lockend))
+                       return -EAGAIN;
+
+               ordered = btrfs_lookup_ordered_range(inode, lockstart,
+                                                    num_bytes);
+               if (ordered) {
+                       btrfs_put_ordered_extent(ordered);
+                       ret = -EAGAIN;
+                       goto out_unlock;
+               }
+       } else {
+               btrfs_lock_and_flush_ordered_range(inode, lockstart,
+                                                  lockend, NULL);
+       }
 
-       num_bytes = lockend - lockstart + 1;
        ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
                        NULL, NULL, NULL);
        if (ret <= 0) {
                ret = 0;
-               btrfs_drew_write_unlock(&root->snapshot_lock);
+               if (!nowait)
+                       btrfs_drew_write_unlock(&root->snapshot_lock);
        } else {
                *write_bytes = min_t(size_t, *write_bytes ,
                                     num_bytes - pos + lockstart);
        }
-
+out_unlock:
        unlock_extent(&inode->io_tree, lockstart, lockend);
 
        return ret;
@@ -1633,7 +1649,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
                        if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
                                                      BTRFS_INODE_PREALLOC)) &&
                            check_can_nocow(BTRFS_I(inode), pos,
-                                       &write_bytes) > 0) {
+                                           &write_bytes, false) > 0) {
                                /*
                                 * For nodata cow case, no need to reserve
                                 * data space.
@@ -1904,13 +1920,25 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
        pos = iocb->ki_pos;
        count = iov_iter_count(from);
        if (iocb->ki_flags & IOCB_NOWAIT) {
+               size_t nocow_bytes = count;
+
                /*
                 * We will allocate space in case nodatacow is not set,
                 * so bail
                 */
                if (!(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
                                              BTRFS_INODE_PREALLOC)) ||
-                   check_can_nocow(BTRFS_I(inode), pos, &count) <= 0) {
+                   check_can_nocow(BTRFS_I(inode), pos, &nocow_bytes,
+                                   true) <= 0) {
+                       inode_unlock(inode);
+                       return -EAGAIN;
+               }
+               /*
+                * There are holes in the range or parts of the range that must
+                * be COWed (shared extents, RO block groups, etc), so just bail
+                * out.
+                */
+               if (nocow_bytes < count) {
                        inode_unlock(inode);
                        return -EAGAIN;
                }
index d04c82c..18d384f 100644 (file)
@@ -985,6 +985,7 @@ static noinline int cow_file_range(struct inode *inode,
        u64 num_bytes;
        unsigned long ram_size;
        u64 cur_alloc_size = 0;
+       u64 min_alloc_size;
        u64 blocksize = fs_info->sectorsize;
        struct btrfs_key ins;
        struct extent_map *em;
@@ -1035,10 +1036,26 @@ static noinline int cow_file_range(struct inode *inode,
        btrfs_drop_extent_cache(BTRFS_I(inode), start,
                        start + num_bytes - 1, 0);
 
+       /*
+        * Relocation relies on the relocated extents to have exactly the same
+        * size as the original extents. Normally writeback for relocation data
+        * extents follows a NOCOW path because relocation preallocates the
+        * extents. However, due to an operation such as scrub turning a block
+        * group to RO mode, it may fallback to COW mode, so we must make sure
+        * an extent allocated during COW has exactly the requested size and can
+        * not be split into smaller extents, otherwise relocation breaks and
+        * fails during the stage where it updates the bytenr of file extent
+        * items.
+        */
+       if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
+               min_alloc_size = num_bytes;
+       else
+               min_alloc_size = fs_info->sectorsize;
+
        while (num_bytes > 0) {
                cur_alloc_size = num_bytes;
                ret = btrfs_reserve_extent(root, cur_alloc_size, cur_alloc_size,
-                                          fs_info->sectorsize, 0, alloc_hint,
+                                          min_alloc_size, 0, alloc_hint,
                                           &ins, 1, 1);
                if (ret < 0)
                        goto out_unlock;
@@ -1361,6 +1378,8 @@ static int fallback_to_cow(struct inode *inode, struct page *locked_page,
                           int *page_started, unsigned long *nr_written)
 {
        const bool is_space_ino = btrfs_is_free_space_inode(BTRFS_I(inode));
+       const bool is_reloc_ino = (BTRFS_I(inode)->root->root_key.objectid ==
+                                  BTRFS_DATA_RELOC_TREE_OBJECTID);
        const u64 range_bytes = end + 1 - start;
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
        u64 range_start = start;
@@ -1391,18 +1410,23 @@ static int fallback_to_cow(struct inode *inode, struct page *locked_page,
         *    data space info, which we incremented in the step above.
         *
         * If we need to fallback to cow and the inode corresponds to a free
-        * space cache inode, we must also increment bytes_may_use of the data
-        * space_info for the same reason. Space caches always get a prealloc
+        * space cache inode or an inode of the data relocation tree, we must
+        * also increment bytes_may_use of the data space_info for the same
+        * reason. Space caches and relocated data extents always get a prealloc
         * extent for them, however scrub or balance may have set the block
-        * group that contains that extent to RO mode.
+        * group that contains that extent to RO mode and therefore force COW
+        * when starting writeback.
         */
        count = count_range_bits(io_tree, &range_start, end, range_bytes,
                                 EXTENT_NORESERVE, 0);
-       if (count > 0 || is_space_ino) {
-               const u64 bytes = is_space_ino ? range_bytes : count;
+       if (count > 0 || is_space_ino || is_reloc_ino) {
+               u64 bytes = count;
                struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
                struct btrfs_space_info *sinfo = fs_info->data_sinfo;
 
+               if (is_space_ino || is_reloc_ino)
+                       bytes = range_bytes;
+
                spin_lock(&sinfo->lock);
                btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes);
                spin_unlock(&sinfo->lock);
@@ -7865,9 +7889,6 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
                        dio_data.overwrite = 1;
                        inode_unlock(inode);
                        relock = true;
-               } else if (iocb->ki_flags & IOCB_NOWAIT) {
-                       ret = -EAGAIN;
-                       goto out;
                }
                ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
                                                   offset, count);
index 168deb8..e8f7c5f 100644 (file)
@@ -2692,7 +2692,7 @@ out:
        btrfs_put_root(root);
 out_free:
        btrfs_free_path(path);
-       kzfree(subvol_info);
+       kfree(subvol_info);
        return ret;
 }
 
index 7887317..af92525 100644 (file)
@@ -509,7 +509,7 @@ static int process_leaf(struct btrfs_root *root,
                switch (key.type) {
                case BTRFS_EXTENT_ITEM_KEY:
                        *num_bytes = key.offset;
-                       /* fall through */
+                       fallthrough;
                case BTRFS_METADATA_ITEM_KEY:
                        *bytenr = key.objectid;
                        ret = process_extent_item(fs_info, path, &key, i,
index 41ee886..c7bd3fd 100644 (file)
@@ -879,8 +879,8 @@ static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
                return false;
        }
        global_rsv->reserved -= ticket->bytes;
+       remove_ticket(space_info, ticket);
        ticket->bytes = 0;
-       list_del_init(&ticket->list);
        wake_up(&ticket->wait);
        space_info->tickets_id++;
        if (global_rsv->reserved < global_rsv->size)
index bc73fd6..c3826ae 100644 (file)
@@ -523,7 +523,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                case Opt_compress_force:
                case Opt_compress_force_type:
                        compress_force = true;
-                       /* Fallthrough */
+                       fallthrough;
                case Opt_compress:
                case Opt_compress_type:
                        saved_compress_type = btrfs_test_opt(info,
@@ -622,7 +622,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                        btrfs_set_opt(info->mount_opt, NOSSD);
                        btrfs_clear_and_info(info, SSD,
                                             "not using ssd optimizations");
-                       /* Fallthrough */
+                       fallthrough;
                case Opt_nossd_spread:
                        btrfs_clear_and_info(info, SSD_SPREAD,
                                             "not using spread ssd allocation scheme");
@@ -793,7 +793,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
                case Opt_recovery:
                        btrfs_warn(info,
                                   "'recovery' is deprecated, use 'usebackuproot' instead");
-                       /* fall through */
+                       fallthrough;
                case Opt_usebackuproot:
                        btrfs_info(info,
                                   "trying to use backup root at mount time");
index 920cee3..cd5348f 100644 (file)
@@ -169,6 +169,7 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
                if (ret)
                        goto out;
 
+               set_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
                clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
                root->log_start_pid = current->pid;
        }
@@ -195,6 +196,9 @@ static int join_running_log_trans(struct btrfs_root *root)
 {
        int ret = -ENOENT;
 
+       if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state))
+               return ret;
+
        mutex_lock(&root->log_mutex);
        if (root->log_root) {
                ret = 0;
@@ -3303,6 +3307,7 @@ int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
        if (root->log_root) {
                free_log_tree(trans, root->log_root);
                root->log_root = NULL;
+               clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state);
        }
        return 0;
 }
index f067b59..75af233 100644 (file)
@@ -408,7 +408,7 @@ static inline enum btrfs_map_op btrfs_op(struct bio *bio)
                return BTRFS_MAP_WRITE;
        default:
                WARN_ON_ONCE(1);
-               /* fall through */
+               fallthrough;
        case REQ_OP_READ:
                return BTRFS_MAP_READ;
        }
index e7726f5..3080cda 100644 (file)
@@ -937,7 +937,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
        }
 
        data = kmap(page);
-       ret = __kernel_write(file, data, len, &pos);
+       ret = kernel_write(file, data, len, &pos);
        kunmap(page);
        fput(file);
        if (ret != len)
index fc98b97..53588d7 100644 (file)
@@ -399,6 +399,10 @@ skip_rdma:
                        if (ses->sign)
                                seq_puts(m, " signed");
 
+                       seq_printf(m, "\n\tUser: %d Cred User: %d",
+                                  from_kuid(&init_user_ns, ses->linux_uid),
+                                  from_kuid(&init_user_ns, ses->cred_uid));
+
                        if (ses->chan_count > 1) {
                                seq_printf(m, "\n\n\tExtra Channels: %zu\n",
                                           ses->chan_count-1);
@@ -406,7 +410,7 @@ skip_rdma:
                                        cifs_dump_channel(m, j, &ses->chans[j]);
                        }
 
-                       seq_puts(m, "\n\tShares:");
+                       seq_puts(m, "\n\n\tShares:");
                        j = 0;
 
                        seq_printf(m, "\n\t%d) IPC: ", j);
index c7a311d..99b3180 100644 (file)
@@ -156,5 +156,5 @@ extern int cifs_truncate_page(struct address_space *mapping, loff_t from);
 extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
-#define CIFS_VERSION   "2.27"
+#define CIFS_VERSION   "2.28"
 #endif                         /* _CIFSFS_H */
index 5fac34f..a61abde 100644 (file)
@@ -5306,9 +5306,15 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
        vol_info->nocase = master_tcon->nocase;
        vol_info->nohandlecache = master_tcon->nohandlecache;
        vol_info->local_lease = master_tcon->local_lease;
+       vol_info->no_lease = master_tcon->no_lease;
+       vol_info->resilient = master_tcon->use_resilient;
+       vol_info->persistent = master_tcon->use_persistent;
+       vol_info->handle_timeout = master_tcon->handle_timeout;
        vol_info->no_linux_ext = !master_tcon->unix_ext;
+       vol_info->linux_ext = master_tcon->posix_extensions;
        vol_info->sectype = master_tcon->ses->sectype;
        vol_info->sign = master_tcon->ses->sign;
+       vol_info->seal = master_tcon->seal;
 
        rc = cifs_set_vol_auth(vol_info, master_tcon->ses);
        if (rc) {
@@ -5334,10 +5340,6 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
                goto out;
        }
 
-       /* if new SMB3.11 POSIX extensions are supported do not remap / and \ */
-       if (tcon->posix_extensions)
-               cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
-
        if (cap_unix(ses))
                reset_cifs_unix_caps(0, tcon, NULL, vol_info);
 
index 4fe757c..be46fab 100644 (file)
@@ -1149,20 +1149,20 @@ cifs_posix_lock_test(struct file *file, struct file_lock *flock)
 
 /*
  * Set the byte-range lock (posix style). Returns:
- * 1) 0, if we set the lock and don't need to request to the server;
- * 2) 1, if we need to request to the server;
- * 3) <0, if the error occurs while setting the lock.
+ * 1) <0, if the error occurs while setting the lock;
+ * 2) 0, if we set the lock and don't need to request to the server;
+ * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
+ * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
  */
 static int
 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
 {
        struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
-       int rc = 1;
+       int rc = FILE_LOCK_DEFERRED + 1;
 
        if ((flock->fl_flags & FL_POSIX) == 0)
                return rc;
 
-try_again:
        cifs_down_write(&cinode->lock_sem);
        if (!cinode->can_cache_brlcks) {
                up_write(&cinode->lock_sem);
@@ -1171,13 +1171,6 @@ try_again:
 
        rc = posix_lock_file(file, flock, NULL);
        up_write(&cinode->lock_sem);
-       if (rc == FILE_LOCK_DEFERRED) {
-               rc = wait_event_interruptible(flock->fl_wait,
-                                       list_empty(&flock->fl_blocked_member));
-               if (!rc)
-                       goto try_again;
-               locks_delete_block(flock);
-       }
        return rc;
 }
 
@@ -1652,7 +1645,7 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
                int posix_lock_type;
 
                rc = cifs_posix_lock_set(file, flock);
-               if (!rc || rc < 0)
+               if (rc <= FILE_LOCK_DEFERRED)
                        return rc;
 
                if (type & server->vals->shared_lock_type)
@@ -4336,7 +4329,8 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
                        break;
 
                __SetPageLocked(page);
-               if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
+               rc = add_to_page_cache_locked(page, mapping, page->index, gfp);
+               if (rc) {
                        __ClearPageLocked(page);
                        break;
                }
@@ -4352,6 +4346,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
        struct list_head *page_list, unsigned num_pages)
 {
        int rc;
+       int err = 0;
        struct list_head tmplist;
        struct cifsFileInfo *open_file = file->private_data;
        struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
@@ -4396,7 +4391,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
         * the order of declining indexes. When we put the pages in
         * the rdata->pages, then we want them in increasing order.
         */
-       while (!list_empty(page_list)) {
+       while (!list_empty(page_list) && !err) {
                unsigned int i, nr_pages, bytes, rsize;
                loff_t offset;
                struct page *page, *tpage;
@@ -4429,9 +4424,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
                        return 0;
                }
 
-               rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
+               nr_pages = 0;
+               err = readpages_get_pages(mapping, page_list, rsize, &tmplist,
                                         &nr_pages, &offset, &bytes);
-               if (rc) {
+               if (!nr_pages) {
                        add_credits_and_wake_if(server, credits, 0);
                        break;
                }
index 583f5e4..49c3ea8 100644 (file)
@@ -2044,6 +2044,7 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry,
        FILE_UNIX_BASIC_INFO *info_buf_target;
        unsigned int xid;
        int rc, tmprc;
+       bool new_target = d_really_is_negative(target_dentry);
 
        if (flags & ~RENAME_NOREPLACE)
                return -EINVAL;
@@ -2120,8 +2121,13 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry,
         */
 
 unlink_target:
-       /* Try unlinking the target dentry if it's not negative */
-       if (d_really_is_positive(target_dentry) && (rc == -EACCES || rc == -EEXIST)) {
+       /*
+        * If the target dentry was created during the rename, try
+        * unlinking it if it's not negative
+        */
+       if (new_target &&
+           d_really_is_positive(target_dentry) &&
+           (rc == -EACCES || rc == -EEXIST)) {
                if (d_is_dir(target_dentry))
                        tmprc = cifs_rmdir(target_dir, target_dentry);
                else
@@ -2535,6 +2541,15 @@ set_size_out:
        if (rc == 0) {
                cifsInode->server_eof = attrs->ia_size;
                cifs_setsize(inode, attrs->ia_size);
+
+               /*
+                * The man page of truncate says if the size changed,
+                * then the st_ctime and st_mtime fields for the file
+                * are updated.
+                */
+               attrs->ia_ctime = attrs->ia_mtime = current_time(inode);
+               attrs->ia_valid |= ATTR_CTIME | ATTR_MTIME;
+
                cifs_truncate_page(inode->i_mapping, inode->i_size);
        }
 
index 4a73e63..dcde44f 100644 (file)
@@ -169,6 +169,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
        unsigned int xid;
        struct cifsFileInfo *pSMBFile = filep->private_data;
        struct cifs_tcon *tcon;
+       struct tcon_link *tlink;
        struct cifs_sb_info *cifs_sb;
        __u64   ExtAttrBits = 0;
        __u64   caps;
@@ -307,13 +308,19 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
                                break;
                        }
                        cifs_sb = CIFS_SB(inode->i_sb);
-                       tcon = tlink_tcon(cifs_sb_tlink(cifs_sb));
+                       tlink = cifs_sb_tlink(cifs_sb);
+                       if (IS_ERR(tlink)) {
+                               rc = PTR_ERR(tlink);
+                               break;
+                       }
+                       tcon = tlink_tcon(tlink);
                        if (tcon && tcon->ses->server->ops->notify) {
                                rc = tcon->ses->server->ops->notify(xid,
                                                filep, (void __user *)arg);
                                cifs_dbg(FYI, "ioctl notify rc %d\n", rc);
                        } else
                                rc = -EOPNOTSUPP;
+                       cifs_put_tlink(tlink);
                        break;
                default:
                        cifs_dbg(FYI, "unsupported ioctl\n");
index 56791a6..e44d049 100644 (file)
@@ -844,28 +844,26 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
        struct bio_vec *bv = NULL;
 
        if (iov_iter_is_kvec(iter)) {
-               memcpy(&ctx->iter, iter, sizeof(struct iov_iter));
+               memcpy(&ctx->iter, iter, sizeof(*iter));
                ctx->len = count;
                iov_iter_advance(iter, count);
                return 0;
        }
 
-       if (max_pages * sizeof(struct bio_vec) <= CIFS_AIO_KMALLOC_LIMIT)
-               bv = kmalloc_array(max_pages, sizeof(struct bio_vec),
-                                  GFP_KERNEL);
+       if (array_size(max_pages, sizeof(*bv)) <= CIFS_AIO_KMALLOC_LIMIT)
+               bv = kmalloc_array(max_pages, sizeof(*bv), GFP_KERNEL);
 
        if (!bv) {
-               bv = vmalloc(array_size(max_pages, sizeof(struct bio_vec)));
+               bv = vmalloc(array_size(max_pages, sizeof(*bv)));
                if (!bv)
                        return -ENOMEM;
        }
 
-       if (max_pages * sizeof(struct page *) <= CIFS_AIO_KMALLOC_LIMIT)
-               pages = kmalloc_array(max_pages, sizeof(struct page *),
-                                     GFP_KERNEL);
+       if (array_size(max_pages, sizeof(*pages)) <= CIFS_AIO_KMALLOC_LIMIT)
+               pages = kmalloc_array(max_pages, sizeof(*pages), GFP_KERNEL);
 
        if (!pages) {
-               pages = vmalloc(array_size(max_pages, sizeof(struct page *)));
+               pages = vmalloc(array_size(max_pages, sizeof(*pages)));
                if (!pages) {
                        kvfree(bv);
                        return -ENOMEM;
index 6a39451..1579928 100644 (file)
@@ -354,9 +354,13 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_sync_hdr *shdr)
                  ((struct smb2_ioctl_rsp *)shdr)->OutputCount);
                break;
        case SMB2_CHANGE_NOTIFY:
+               *off = le16_to_cpu(
+                 ((struct smb2_change_notify_rsp *)shdr)->OutputBufferOffset);
+               *len = le32_to_cpu(
+                 ((struct smb2_change_notify_rsp *)shdr)->OutputBufferLength);
+               break;
        default:
-               /* BB FIXME for unimplemented cases above */
-               cifs_dbg(VFS, "no length check for command\n");
+               cifs_dbg(VFS, "no length check for command %d\n", le16_to_cpu(shdr->Command));
                break;
        }
 
index 736d86b..32f90dc 100644 (file)
@@ -763,6 +763,7 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
                        /* close extra handle outside of crit sec */
                        SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
                }
+               rc = 0;
                goto oshr_free;
        }
 
@@ -2147,7 +2148,7 @@ smb3_notify(const unsigned int xid, struct file *pfile,
 
        tcon = cifs_sb_master_tcon(cifs_sb);
        oparms.tcon = tcon;
-       oparms.desired_access = FILE_READ_ATTRIBUTES;
+       oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
        oparms.disposition = FILE_OPEN;
        oparms.create_options = cifs_create_options(cifs_sb, 0);
        oparms.fid = &fid;
@@ -3187,6 +3188,11 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
        trace_smb3_zero_enter(xid, cfile->fid.persistent_fid, tcon->tid,
                              ses->Suid, offset, len);
 
+       /*
+        * We zero the range through ioctl, so we need remove the page caches
+        * first, otherwise the data may be inconsistent with the server.
+        */
+       truncate_pagecache_range(inode, offset, offset + len - 1);
 
        /* if file not oplocked can't be sure whether asking to extend size */
        if (!CIFS_CACHE_READ(cifsi))
@@ -3253,6 +3259,12 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
                return rc;
        }
 
+       /*
+        * We implement the punch hole through ioctl, so we need remove the page
+        * caches first, otherwise the data may be inconsistent with the server.
+        */
+       truncate_pagecache_range(inode, offset, offset + len - 1);
+
        cifs_dbg(FYI, "Offset %lld len %lld\n", offset, len);
 
        fsctl_buf.FileOffset = cpu_to_le64(offset);
index d11e310..84433d0 100644 (file)
@@ -523,7 +523,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
                      const int timeout, const int flags,
                      unsigned int *instance)
 {
-       int rc;
+       long rc;
        int *credits;
        int optype;
        long int t;
index ae49a55..d0ed71f 100644 (file)
@@ -918,11 +918,6 @@ struct dentry *debugfs_create_blob(const char *name, umode_t mode,
 }
 EXPORT_SYMBOL_GPL(debugfs_create_blob);
 
-struct array_data {
-       void *array;
-       u32 elements;
-};
-
 static size_t u32_format_array(char *buf, size_t bufsize,
                               u32 *array, int array_size)
 {
@@ -943,8 +938,8 @@ static size_t u32_format_array(char *buf, size_t bufsize,
 
 static int u32_array_open(struct inode *inode, struct file *file)
 {
-       struct array_data *data = inode->i_private;
-       int size, elements = data->elements;
+       struct debugfs_u32_array *data = inode->i_private;
+       int size, elements = data->n_elements;
        char *buf;
 
        /*
@@ -959,7 +954,7 @@ static int u32_array_open(struct inode *inode, struct file *file)
        buf[size] = 0;
 
        file->private_data = buf;
-       u32_format_array(buf, size, data->array, data->elements);
+       u32_format_array(buf, size, data->array, data->n_elements);
 
        return nonseekable_open(inode, file);
 }
@@ -996,8 +991,7 @@ static const struct file_operations u32_array_fops = {
  * @parent: a pointer to the parent dentry for this file.  This should be a
  *          directory dentry if set.  If this parameter is %NULL, then the
  *          file will be created in the root of the debugfs filesystem.
- * @array: u32 array that provides data.
- * @elements: total number of elements in the array.
+ * @array: wrapper struct containing data pointer and size of the array.
  *
  * This function creates a file in debugfs with the given name that exports
  * @array as data. If the @mode variable is so set it can be read from.
@@ -1005,17 +999,10 @@ static const struct file_operations u32_array_fops = {
  * Once array is created its size can not be changed.
  */
 void debugfs_create_u32_array(const char *name, umode_t mode,
-                             struct dentry *parent, u32 *array, u32 elements)
+                             struct dentry *parent,
+                             struct debugfs_u32_array *array)
 {
-       struct array_data *data = kmalloc(sizeof(*data), GFP_KERNEL);
-
-       if (data == NULL)
-               return;
-
-       data->array = array;
-       data->elements = elements;
-
-       debugfs_create_file_unsafe(name, mode, parent, data, &u32_array_fops);
+       debugfs_create_file_unsafe(name, mode, parent, array, &u32_array_fops);
 }
 EXPORT_SYMBOL_GPL(debugfs_create_u32_array);
 
index e9e27a2..feaa5e1 100644 (file)
@@ -51,6 +51,7 @@ static ssize_t efivarfs_file_write(struct file *file,
        } else {
                inode_lock(inode);
                i_size_write(inode, datasize + sizeof(attributes));
+               inode->i_mtime = current_time(inode);
                inode_unlock(inode);
        }
 
@@ -72,10 +73,8 @@ static ssize_t efivarfs_file_read(struct file *file, char __user *userbuf,
        ssize_t size = 0;
        int err;
 
-       while (!__ratelimit(&file->f_cred->user->ratelimit)) {
-               if (!msleep_interruptible(50))
-                       return -EINTR;
-       }
+       while (!__ratelimit(&file->f_cred->user->ratelimit))
+               msleep(50);
 
        err = efivar_entry_size(var, &datasize);
 
index 7824f55..9b66c28 100644 (file)
@@ -144,22 +144,22 @@ static inline void z_erofs_onlinepage_init(struct page *page)
 static inline void z_erofs_onlinepage_fixup(struct page *page,
        uintptr_t index, bool down)
 {
-       unsigned long *p, o, v, id;
-repeat:
-       p = &page_private(page);
-       o = READ_ONCE(*p);
+       union z_erofs_onlinepage_converter u = { .v = &page_private(page) };
+       int orig, orig_index, val;
 
-       id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
-       if (id) {
+repeat:
+       orig = atomic_read(u.o);
+       orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
+       if (orig_index) {
                if (!index)
                        return;
 
-               DBG_BUGON(id != index);
+               DBG_BUGON(orig_index != index);
        }
 
-       v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
-               ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
-       if (cmpxchg(p, o, v) != o)
+       val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
+               ((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
+       if (atomic_cmpxchg(u.o, orig, val) != orig)
                goto repeat;
 }
 
index e6e8a9a..23dfbb8 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1818,13 +1818,14 @@ static int exec_binprm(struct linux_binprm *bprm)
 /*
  * sys_execve() executes a new program.
  */
-static int __do_execve_file(int fd, struct filename *filename,
-                           struct user_arg_ptr argv,
-                           struct user_arg_ptr envp,
-                           int flags, struct file *file)
+static int do_execveat_common(int fd, struct filename *filename,
+                             struct user_arg_ptr argv,
+                             struct user_arg_ptr envp,
+                             int flags)
 {
        char *pathbuf = NULL;
        struct linux_binprm *bprm;
+       struct file *file;
        struct files_struct *displaced;
        int retval;
 
@@ -1863,8 +1864,7 @@ static int __do_execve_file(int fd, struct filename *filename,
        check_unsafe_exec(bprm);
        current->in_execve = 1;
 
-       if (!file)
-               file = do_open_execat(fd, filename, flags);
+       file = do_open_execat(fd, filename, flags);
        retval = PTR_ERR(file);
        if (IS_ERR(file))
                goto out_unmark;
@@ -1872,9 +1872,7 @@ static int __do_execve_file(int fd, struct filename *filename,
        sched_exec();
 
        bprm->file = file;
-       if (!filename) {
-               bprm->filename = "none";
-       } else if (fd == AT_FDCWD || filename->name[0] == '/') {
+       if (fd == AT_FDCWD || filename->name[0] == '/') {
                bprm->filename = filename->name;
        } else {
                if (filename->name[0] == '\0')
@@ -1935,8 +1933,7 @@ static int __do_execve_file(int fd, struct filename *filename,
        task_numa_free(current, false);
        free_bprm(bprm);
        kfree(pathbuf);
-       if (filename)
-               putname(filename);
+       putname(filename);
        if (displaced)
                put_files_struct(displaced);
        return retval;
@@ -1967,27 +1964,10 @@ out_files:
        if (displaced)
                reset_files_struct(displaced);
 out_ret:
-       if (filename)
-               putname(filename);
+       putname(filename);
        return retval;
 }
 
-static int do_execveat_common(int fd, struct filename *filename,
-                             struct user_arg_ptr argv,
-                             struct user_arg_ptr envp,
-                             int flags)
-{
-       return __do_execve_file(fd, filename, argv, envp, flags, NULL);
-}
-
-int do_execve_file(struct file *file, void *__argv, void *__envp)
-{
-       struct user_arg_ptr argv = { .ptr.native = __argv };
-       struct user_arg_ptr envp = { .ptr.native = __envp };
-
-       return __do_execve_file(AT_FDCWD, NULL, argv, envp, 0, file);
-}
-
 int do_execve(struct filename *filename,
        const char __user *const __user *__argv,
        const char __user *const __user *__envp)
index de43534..91ece64 100644 (file)
@@ -309,7 +309,7 @@ const struct file_operations exfat_dir_operations = {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
        .iterate        = exfat_iterate,
-       .fsync          = generic_file_fsync,
+       .fsync          = exfat_file_fsync,
 };
 
 int exfat_alloc_new_dir(struct inode *inode, struct exfat_chain *clu)
@@ -425,10 +425,12 @@ static void exfat_init_name_entry(struct exfat_dentry *ep,
        ep->dentry.name.flags = 0x0;
 
        for (i = 0; i < EXFAT_FILE_NAME_LEN; i++) {
-               ep->dentry.name.unicode_0_14[i] = cpu_to_le16(*uniname);
-               if (*uniname == 0x0)
-                       break;
-               uniname++;
+               if (*uniname != 0x0) {
+                       ep->dentry.name.unicode_0_14[i] = cpu_to_le16(*uniname);
+                       uniname++;
+               } else {
+                       ep->dentry.name.unicode_0_14[i] = 0x0;
+               }
        }
 }
 
index 595f311..7579cd3 100644 (file)
@@ -420,6 +420,7 @@ void exfat_truncate(struct inode *inode, loff_t size);
 int exfat_setattr(struct dentry *dentry, struct iattr *attr);
 int exfat_getattr(const struct path *path, struct kstat *stat,
                unsigned int request_mask, unsigned int query_flags);
+int exfat_file_fsync(struct file *file, loff_t start, loff_t end, int datasync);
 
 /* namei.c */
 extern const struct dentry_operations exfat_dentry_ops;
index fce03f3..3b7fea4 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/slab.h>
 #include <linux/cred.h>
 #include <linux/buffer_head.h>
+#include <linux/blkdev.h>
 
 #include "exfat_raw.h"
 #include "exfat_fs.h"
@@ -346,12 +347,28 @@ out:
        return error;
 }
 
+int exfat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
+{
+       struct inode *inode = filp->f_mapping->host;
+       int err;
+
+       err = __generic_file_fsync(filp, start, end, datasync);
+       if (err)
+               return err;
+
+       err = sync_blockdev(inode->i_sb->s_bdev);
+       if (err)
+               return err;
+
+       return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL);
+}
+
 const struct file_operations exfat_file_operations = {
        .llseek         = generic_file_llseek,
        .read_iter      = generic_file_read_iter,
        .write_iter     = generic_file_write_iter,
        .mmap           = generic_file_mmap,
-       .fsync          = generic_file_fsync,
+       .fsync          = exfat_file_fsync,
        .splice_read    = generic_file_splice_read,
        .splice_write   = iter_file_splice_write,
 };
index 5b0f353..2b9e210 100644 (file)
@@ -975,7 +975,6 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
                goto unlock;
        }
 
-       exfat_set_vol_flags(sb, VOL_DIRTY);
        exfat_chain_set(&clu_to_free, ei->start_clu,
                EXFAT_B_TO_CLU_ROUND_UP(i_size_read(inode), sbi), ei->flags);
 
@@ -1002,6 +1001,7 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
        num_entries++;
        brelse(bh);
 
+       exfat_set_vol_flags(sb, VOL_DIRTY);
        err = exfat_remove_entries(dir, &cdir, entry, 0, num_entries);
        if (err) {
                exfat_err(sb, "failed to exfat_remove_entries : err(%d)", err);
@@ -1077,10 +1077,14 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir,
 
                epold = exfat_get_dentry(sb, p_dir, oldentry + 1, &old_bh,
                        &sector_old);
+               if (!epold)
+                       return -EIO;
                epnew = exfat_get_dentry(sb, p_dir, newentry + 1, &new_bh,
                        &sector_new);
-               if (!epold || !epnew)
+               if (!epnew) {
+                       brelse(old_bh);
                        return -EIO;
+               }
 
                memcpy(epnew, epold, DENTRY_SIZE);
                exfat_update_bh(sb, new_bh, sync);
@@ -1161,10 +1165,14 @@ static int exfat_move_file(struct inode *inode, struct exfat_chain *p_olddir,
 
        epmov = exfat_get_dentry(sb, p_olddir, oldentry + 1, &mov_bh,
                &sector_mov);
+       if (!epmov)
+               return -EIO;
        epnew = exfat_get_dentry(sb, p_newdir, newentry + 1, &new_bh,
                &sector_new);
-       if (!epmov || !epnew)
+       if (!epnew) {
+               brelse(mov_bh);
                return -EIO;
+       }
 
        memcpy(epnew, epmov, DENTRY_SIZE);
        exfat_update_bh(sb, new_bh, IS_DIRSYNC(inode));
index e650e65..253a924 100644 (file)
@@ -693,10 +693,20 @@ static void exfat_free(struct fs_context *fc)
        }
 }
 
+static int exfat_reconfigure(struct fs_context *fc)
+{
+       fc->sb_flags |= SB_NODIRATIME;
+
+       /* volume flag will be updated in exfat_sync_fs */
+       sync_filesystem(fc->root->d_sb);
+       return 0;
+}
+
 static const struct fs_context_operations exfat_context_ops = {
        .parse_param    = exfat_parse_param,
        .get_tree       = exfat_get_tree,
        .free           = exfat_free,
+       .reconfigure    = exfat_reconfigure,
 };
 
 static int exfat_init_fs_context(struct fs_context *fc)
index 4ccb3c9..2e42f47 100644 (file)
@@ -9,7 +9,8 @@ ext4-y  := balloc.o bitmap.o block_validity.o dir.o ext4_jbd2.o extents.o \
                extents_status.o file.o fsmap.o fsync.o hash.o ialloc.o \
                indirect.o inline.o inode.o ioctl.o mballoc.o migrate.o \
                mmp.o move_extent.o namei.o page-io.o readpage.o resize.o \
-               super.o symlink.o sysfs.o xattr.o xattr_trusted.o xattr_user.o
+               super.o symlink.o sysfs.o xattr.o xattr_hurd.o xattr_trusted.o \
+               xattr_user.o
 
 ext4-$(CONFIG_EXT4_FS_POSIX_ACL)       += acl.o
 ext4-$(CONFIG_EXT4_FS_SECURITY)                += xattr_security.o
index c654205..1d82336 100644 (file)
@@ -675,6 +675,7 @@ static int ext4_d_compare(const struct dentry *dentry, unsigned int len,
        struct qstr qstr = {.name = str, .len = len };
        const struct dentry *parent = READ_ONCE(dentry->d_parent);
        const struct inode *inode = READ_ONCE(parent->d_inode);
+       char strbuf[DNAME_INLINE_LEN];
 
        if (!inode || !IS_CASEFOLDED(inode) ||
            !EXT4_SB(inode->i_sb)->s_encoding) {
@@ -683,6 +684,21 @@ static int ext4_d_compare(const struct dentry *dentry, unsigned int len,
                return memcmp(str, name->name, len);
        }
 
+       /*
+        * If the dentry name is stored in-line, then it may be concurrently
+        * modified by a rename.  If this happens, the VFS will eventually retry
+        * the lookup, so it doesn't matter what ->d_compare() returns.
+        * However, it's unsafe to call utf8_strncasecmp() with an unstable
+        * string.  Therefore, we have to copy the name into a temporary buffer.
+        */
+       if (len <= DNAME_INLINE_LEN - 1) {
+               memcpy(strbuf, str, len);
+               strbuf[len] = 0;
+               qstr.name = strbuf;
+               /* prevent compiler from optimizing out the temporary buffer */
+               barrier();
+       }
+
        return ext4_ci_compare(inode, name, &qstr, false);
 }
 
index b08841f..42f5060 100644 (file)
@@ -426,13 +426,16 @@ struct flex_groups {
 #define EXT4_VERITY_FL                 0x00100000 /* Verity protected inode */
 #define EXT4_EA_INODE_FL               0x00200000 /* Inode used for large EA */
 /* 0x00400000 was formerly EXT4_EOFBLOCKS_FL */
+
+#define EXT4_DAX_FL                    0x02000000 /* Inode is DAX */
+
 #define EXT4_INLINE_DATA_FL            0x10000000 /* Inode has inline data. */
 #define EXT4_PROJINHERIT_FL            0x20000000 /* Create with parents projid */
 #define EXT4_CASEFOLD_FL               0x40000000 /* Casefolded directory */
 #define EXT4_RESERVED_FL               0x80000000 /* reserved for ext4 lib */
 
-#define EXT4_FL_USER_VISIBLE           0x705BDFFF /* User visible flags */
-#define EXT4_FL_USER_MODIFIABLE                0x604BC0FF /* User modifiable flags */
+#define EXT4_FL_USER_VISIBLE           0x725BDFFF /* User visible flags */
+#define EXT4_FL_USER_MODIFIABLE                0x624BC0FF /* User modifiable flags */
 
 /* Flags we can manipulate with through EXT4_IOC_FSSETXATTR */
 #define EXT4_FL_XFLAG_VISIBLE          (EXT4_SYNC_FL | \
@@ -440,14 +443,16 @@ struct flex_groups {
                                         EXT4_APPEND_FL | \
                                         EXT4_NODUMP_FL | \
                                         EXT4_NOATIME_FL | \
-                                        EXT4_PROJINHERIT_FL)
+                                        EXT4_PROJINHERIT_FL | \
+                                        EXT4_DAX_FL)
 
 /* Flags that should be inherited by new inodes from their parent. */
 #define EXT4_FL_INHERITED (EXT4_SECRM_FL | EXT4_UNRM_FL | EXT4_COMPR_FL |\
                           EXT4_SYNC_FL | EXT4_NODUMP_FL | EXT4_NOATIME_FL |\
                           EXT4_NOCOMPR_FL | EXT4_JOURNAL_DATA_FL |\
                           EXT4_NOTAIL_FL | EXT4_DIRSYNC_FL |\
-                          EXT4_PROJINHERIT_FL | EXT4_CASEFOLD_FL)
+                          EXT4_PROJINHERIT_FL | EXT4_CASEFOLD_FL |\
+                          EXT4_DAX_FL)
 
 /* Flags that are appropriate for regular files (all but dir-specific ones). */
 #define EXT4_REG_FLMASK (~(EXT4_DIRSYNC_FL | EXT4_TOPDIR_FL | EXT4_CASEFOLD_FL |\
@@ -459,6 +464,10 @@ struct flex_groups {
 /* The only flags that should be swapped */
 #define EXT4_FL_SHOULD_SWAP (EXT4_HUGE_FILE_FL | EXT4_EXTENTS_FL)
 
+/* Flags which are mutually exclusive to DAX */
+#define EXT4_DAX_MUT_EXCL (EXT4_VERITY_FL | EXT4_ENCRYPT_FL |\
+                          EXT4_JOURNAL_DATA_FL)
+
 /* Mask out flags that are inappropriate for the given type of inode. */
 static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags)
 {
@@ -499,6 +508,7 @@ enum {
        EXT4_INODE_VERITY       = 20,   /* Verity protected inode */
        EXT4_INODE_EA_INODE     = 21,   /* Inode used for large EA */
 /* 22 was formerly EXT4_INODE_EOFBLOCKS */
+       EXT4_INODE_DAX          = 25,   /* Inode is DAX */
        EXT4_INODE_INLINE_DATA  = 28,   /* Data in inode. */
        EXT4_INODE_PROJINHERIT  = 29,   /* Create with parents projid */
        EXT4_INODE_CASEFOLD     = 30,   /* Casefolded directory */
@@ -1135,9 +1145,9 @@ struct ext4_inode_info {
 #define EXT4_MOUNT_MINIX_DF            0x00080 /* Mimics the Minix statfs */
 #define EXT4_MOUNT_NOLOAD              0x00100 /* Don't use existing journal*/
 #ifdef CONFIG_FS_DAX
-#define EXT4_MOUNT_DAX                 0x00200 /* Direct Access */
+#define EXT4_MOUNT_DAX_ALWAYS          0x00200 /* Direct Access */
 #else
-#define EXT4_MOUNT_DAX                 0
+#define EXT4_MOUNT_DAX_ALWAYS          0
 #endif
 #define EXT4_MOUNT_DATA_FLAGS          0x00C00 /* Mode for data writes: */
 #define EXT4_MOUNT_JOURNAL_DATA                0x00400 /* Write data to journal */
@@ -1180,6 +1190,8 @@ struct ext4_inode_info {
                                                      blocks */
 #define EXT4_MOUNT2_HURD_COMPAT                0x00000004 /* Support HURD-castrated
                                                      file systems */
+#define EXT4_MOUNT2_DAX_NEVER          0x00000008 /* Do not allow Direct Access */
+#define EXT4_MOUNT2_DAX_INODE          0x00000010 /* For printing options only */
 
 #define EXT4_MOUNT2_EXPLICIT_JOURNAL_CHECKSUM  0x00000008 /* User explicitly
                                                specified journal checksum */
@@ -1992,6 +2004,7 @@ static inline bool ext4_has_incompat_features(struct super_block *sb)
  */
 #define EXT4_FLAGS_RESIZING    0
 #define EXT4_FLAGS_SHUTDOWN    1
+#define EXT4_FLAGS_BDEV_IS_DAX 2
 
 static inline int ext4_forced_shutdown(struct ext4_sb_info *sbi)
 {
@@ -2705,7 +2718,7 @@ extern int ext4_can_truncate(struct inode *inode);
 extern int ext4_truncate(struct inode *);
 extern int ext4_break_layouts(struct inode *);
 extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
-extern void ext4_set_inode_flags(struct inode *);
+extern void ext4_set_inode_flags(struct inode *, bool init);
 extern int ext4_alloc_da_blocks(struct inode *inode);
 extern void ext4_set_aops(struct inode *inode);
 extern int ext4_writepage_trans_blocks(struct inode *);
index 7d088ff..221f240 100644 (file)
@@ -2844,7 +2844,7 @@ again:
                         * in use to avoid freeing it when removing blocks.
                         */
                        if (sbi->s_cluster_ratio > 1) {
-                               pblk = ext4_ext_pblock(ex) + end - ee_block + 2;
+                               pblk = ext4_ext_pblock(ex) + end - ee_block + 1;
                                partial.pclu = EXT4_B2C(sbi, pblk);
                                partial.state = nofree;
                        }
index 54d324e..df25d38 100644 (file)
@@ -1116,7 +1116,7 @@ got:
        ei->i_block_group = group;
        ei->i_last_alloc_group = ~0;
 
-       ext4_set_inode_flags(inode);
+       ext4_set_inode_flags(inode, true);
        if (IS_DIRSYNC(inode))
                ext4_handle_sync(handle);
        if (insert_inode_locked(inode) < 0) {
index 40ec5c7..10dd470 100644 (file)
@@ -4403,9 +4403,11 @@ int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
                !ext4_test_inode_state(inode, EXT4_STATE_XATTR));
 }
 
-static bool ext4_should_use_dax(struct inode *inode)
+static bool ext4_should_enable_dax(struct inode *inode)
 {
-       if (!test_opt(inode->i_sb, DAX))
+       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+
+       if (test_opt2(inode->i_sb, DAX_NEVER))
                return false;
        if (!S_ISREG(inode->i_mode))
                return false;
@@ -4417,14 +4419,21 @@ static bool ext4_should_use_dax(struct inode *inode)
                return false;
        if (ext4_test_inode_flag(inode, EXT4_INODE_VERITY))
                return false;
-       return true;
+       if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags))
+               return false;
+       if (test_opt(inode->i_sb, DAX_ALWAYS))
+               return true;
+
+       return ext4_test_inode_flag(inode, EXT4_INODE_DAX);
 }
 
-void ext4_set_inode_flags(struct inode *inode)
+void ext4_set_inode_flags(struct inode *inode, bool init)
 {
        unsigned int flags = EXT4_I(inode)->i_flags;
        unsigned int new_fl = 0;
 
+       WARN_ON_ONCE(IS_DAX(inode) && init);
+
        if (flags & EXT4_SYNC_FL)
                new_fl |= S_SYNC;
        if (flags & EXT4_APPEND_FL)
@@ -4435,8 +4444,13 @@ void ext4_set_inode_flags(struct inode *inode)
                new_fl |= S_NOATIME;
        if (flags & EXT4_DIRSYNC_FL)
                new_fl |= S_DIRSYNC;
-       if (ext4_should_use_dax(inode))
+
+       /* Because of the way inode_set_flags() works we must preserve S_DAX
+        * here if already set. */
+       new_fl |= (inode->i_flags & S_DAX);
+       if (init && ext4_should_enable_dax(inode))
                new_fl |= S_DAX;
+
        if (flags & EXT4_ENCRYPT_FL)
                new_fl |= S_ENCRYPTED;
        if (flags & EXT4_CASEFOLD_FL)
@@ -4650,7 +4664,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
                 * not initialized on a new filesystem. */
        }
        ei->i_flags = le32_to_cpu(raw_inode->i_flags);
-       ext4_set_inode_flags(inode);
+       ext4_set_inode_flags(inode, true);
        inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
        ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
        if (ext4_has_feature_64bit(sb))
index 2162db0..999cf6a 100644 (file)
@@ -292,6 +292,38 @@ static int ext4_ioctl_check_immutable(struct inode *inode, __u32 new_projid,
        return 0;
 }
 
+static void ext4_dax_dontcache(struct inode *inode, unsigned int flags)
+{
+       struct ext4_inode_info *ei = EXT4_I(inode);
+
+       if (S_ISDIR(inode->i_mode))
+               return;
+
+       if (test_opt2(inode->i_sb, DAX_NEVER) ||
+           test_opt(inode->i_sb, DAX_ALWAYS))
+               return;
+
+       if ((ei->i_flags ^ flags) & EXT4_DAX_FL)
+               d_mark_dontcache(inode);
+}
+
+static bool dax_compatible(struct inode *inode, unsigned int oldflags,
+                          unsigned int flags)
+{
+       if (flags & EXT4_DAX_FL) {
+               if ((oldflags & EXT4_DAX_MUT_EXCL) ||
+                    ext4_test_inode_state(inode,
+                                         EXT4_STATE_VERITY_IN_PROGRESS)) {
+                       return false;
+               }
+       }
+
+       if ((flags & EXT4_DAX_MUT_EXCL) && (oldflags & EXT4_DAX_FL))
+                       return false;
+
+       return true;
+}
+
 static int ext4_ioctl_setflags(struct inode *inode,
                               unsigned int flags)
 {
@@ -300,7 +332,6 @@ static int ext4_ioctl_setflags(struct inode *inode,
        int err = -EPERM, migrate = 0;
        struct ext4_iloc iloc;
        unsigned int oldflags, mask, i;
-       unsigned int jflag;
        struct super_block *sb = inode->i_sb;
 
        /* Is it quota file? Do not allow user to mess with it */
@@ -309,9 +340,6 @@ static int ext4_ioctl_setflags(struct inode *inode,
 
        oldflags = ei->i_flags;
 
-       /* The JOURNAL_DATA flag is modifiable only by root */
-       jflag = flags & EXT4_JOURNAL_DATA_FL;
-
        err = vfs_ioc_setflags_prepare(inode, oldflags, flags);
        if (err)
                goto flags_out;
@@ -320,10 +348,16 @@ static int ext4_ioctl_setflags(struct inode *inode,
         * The JOURNAL_DATA flag can only be changed by
         * the relevant capability.
         */
-       if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) {
+       if ((flags ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) {
                if (!capable(CAP_SYS_RESOURCE))
                        goto flags_out;
        }
+
+       if (!dax_compatible(inode, oldflags, flags)) {
+               err = -EOPNOTSUPP;
+               goto flags_out;
+       }
+
        if ((flags ^ oldflags) & EXT4_EXTENTS_FL)
                migrate = 1;
 
@@ -369,6 +403,8 @@ static int ext4_ioctl_setflags(struct inode *inode,
        if (err)
                goto flags_err;
 
+       ext4_dax_dontcache(inode, flags);
+
        for (i = 0, mask = 1; i < 32; i++, mask <<= 1) {
                if (!(mask & EXT4_FL_USER_MODIFIABLE))
                        continue;
@@ -381,7 +417,8 @@ static int ext4_ioctl_setflags(struct inode *inode,
                        ext4_clear_inode_flag(inode, i);
        }
 
-       ext4_set_inode_flags(inode);
+       ext4_set_inode_flags(inode, false);
+
        inode->i_ctime = current_time(inode);
 
        err = ext4_mark_iloc_dirty(handle, inode, &iloc);
@@ -390,17 +427,18 @@ flags_err:
        if (err)
                goto flags_out;
 
-       if ((jflag ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) {
+       if ((flags ^ oldflags) & (EXT4_JOURNAL_DATA_FL)) {
                /*
                 * Changes to the journaling mode can cause unsafe changes to
-                * S_DAX if we are using the DAX mount option.
+                * S_DAX if the inode is DAX
                 */
-               if (test_opt(inode->i_sb, DAX)) {
+               if (IS_DAX(inode)) {
                        err = -EBUSY;
                        goto flags_out;
                }
 
-               err = ext4_change_inode_journal_flag(inode, jflag);
+               err = ext4_change_inode_journal_flag(inode,
+                                                    flags & EXT4_JOURNAL_DATA_FL);
                if (err)
                        goto flags_out;
        }
@@ -527,12 +565,15 @@ static inline __u32 ext4_iflags_to_xflags(unsigned long iflags)
                xflags |= FS_XFLAG_NOATIME;
        if (iflags & EXT4_PROJINHERIT_FL)
                xflags |= FS_XFLAG_PROJINHERIT;
+       if (iflags & EXT4_DAX_FL)
+               xflags |= FS_XFLAG_DAX;
        return xflags;
 }
 
 #define EXT4_SUPPORTED_FS_XFLAGS (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | \
                                  FS_XFLAG_APPEND | FS_XFLAG_NODUMP | \
-                                 FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT)
+                                 FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT | \
+                                 FS_XFLAG_DAX)
 
 /* Transfer xflags flags to internal */
 static inline unsigned long ext4_xflags_to_iflags(__u32 xflags)
@@ -551,6 +592,8 @@ static inline unsigned long ext4_xflags_to_iflags(__u32 xflags)
                iflags |= EXT4_NOATIME_FL;
        if (xflags & FS_XFLAG_PROJINHERIT)
                iflags |= EXT4_PROJINHERIT_FL;
+       if (xflags & FS_XFLAG_DAX)
+               iflags |= EXT4_DAX_FL;
 
        return iflags;
 }
index a908311..c0a331e 100644 (file)
@@ -4708,7 +4708,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
        }
 
        ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
-       seq = *this_cpu_ptr(&discard_pa_seq);
+       seq = this_cpu_read(discard_pa_seq);
        if (!ext4_mb_use_preallocated(ac)) {
                ac->ac_op = EXT4_MB_HISTORY_ALLOC;
                ext4_mb_normalize_request(ac, ar);
index c668f6b..330957e 100644 (file)
@@ -522,9 +522,6 @@ static void ext4_handle_error(struct super_block *sb)
                smp_wmb();
                sb->s_flags |= SB_RDONLY;
        } else if (test_opt(sb, ERRORS_PANIC)) {
-               if (EXT4_SB(sb)->s_journal &&
-                 !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
-                       return;
                panic("EXT4-fs (device %s): panic forced after error\n",
                        sb->s_id);
        }
@@ -725,23 +722,20 @@ void __ext4_abort(struct super_block *sb, const char *function,
        va_end(args);
 
        if (sb_rdonly(sb) == 0) {
-               ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
                EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
+               if (EXT4_SB(sb)->s_journal)
+                       jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
+
+               ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
                /*
                 * Make sure updated value of ->s_mount_flags will be visible
                 * before ->s_flags update
                 */
                smp_wmb();
                sb->s_flags |= SB_RDONLY;
-               if (EXT4_SB(sb)->s_journal)
-                       jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
        }
-       if (test_opt(sb, ERRORS_PANIC) && !system_going_down()) {
-               if (EXT4_SB(sb)->s_journal &&
-                 !(EXT4_SB(sb)->s_journal->j_flags & JBD2_REC_ERR))
-                       return;
+       if (test_opt(sb, ERRORS_PANIC) && !system_going_down())
                panic("EXT4-fs panic from previous error\n");
-       }
 }
 
 void __ext4_msg(struct super_block *sb,
@@ -1324,6 +1318,9 @@ static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
        if (WARN_ON_ONCE(IS_DAX(inode) && i_size_read(inode)))
                return -EINVAL;
 
+       if (ext4_test_inode_flag(inode, EXT4_INODE_DAX))
+               return -EOPNOTSUPP;
+
        res = ext4_convert_inline_data(inode);
        if (res)
                return res;
@@ -1349,7 +1346,7 @@ static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
                         * Update inode->i_flags - S_ENCRYPTED will be enabled,
                         * S_DAX may be disabled
                         */
-                       ext4_set_inode_flags(inode);
+                       ext4_set_inode_flags(inode, false);
                }
                return res;
        }
@@ -1376,7 +1373,7 @@ retry:
                 * Update inode->i_flags - S_ENCRYPTED will be enabled,
                 * S_DAX may be disabled
                 */
-               ext4_set_inode_flags(inode);
+               ext4_set_inode_flags(inode, false);
                res = ext4_mark_inode_dirty(handle, inode);
                if (res)
                        EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
@@ -1514,7 +1511,8 @@ enum {
        Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
        Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
        Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
-       Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax,
+       Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version,
+       Opt_dax, Opt_dax_always, Opt_dax_inode, Opt_dax_never,
        Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error,
        Opt_nowarn_on_error, Opt_mblk_io_submit,
        Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize,
@@ -1581,6 +1579,9 @@ static const match_table_t tokens = {
        {Opt_nobarrier, "nobarrier"},
        {Opt_i_version, "i_version"},
        {Opt_dax, "dax"},
+       {Opt_dax_always, "dax=always"},
+       {Opt_dax_inode, "dax=inode"},
+       {Opt_dax_never, "dax=never"},
        {Opt_stripe, "stripe=%u"},
        {Opt_delalloc, "delalloc"},
        {Opt_warn_on_error, "warn_on_error"},
@@ -1729,6 +1730,7 @@ static int clear_qf_name(struct super_block *sb, int qtype)
 #define MOPT_NO_EXT3   0x0200
 #define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3)
 #define MOPT_STRING    0x0400
+#define MOPT_SKIP      0x0800
 
 static const struct mount_opts {
        int     token;
@@ -1778,7 +1780,13 @@ static const struct mount_opts {
        {Opt_min_batch_time, 0, MOPT_GTE0},
        {Opt_inode_readahead_blks, 0, MOPT_GTE0},
        {Opt_init_itable, 0, MOPT_GTE0},
-       {Opt_dax, EXT4_MOUNT_DAX, MOPT_SET},
+       {Opt_dax, EXT4_MOUNT_DAX_ALWAYS, MOPT_SET | MOPT_SKIP},
+       {Opt_dax_always, EXT4_MOUNT_DAX_ALWAYS,
+               MOPT_EXT4_ONLY | MOPT_SET | MOPT_SKIP},
+       {Opt_dax_inode, EXT4_MOUNT2_DAX_INODE,
+               MOPT_EXT4_ONLY | MOPT_SET | MOPT_SKIP},
+       {Opt_dax_never, EXT4_MOUNT2_DAX_NEVER,
+               MOPT_EXT4_ONLY | MOPT_SET | MOPT_SKIP},
        {Opt_stripe, 0, MOPT_GTE0},
        {Opt_resuid, 0, MOPT_GTE0},
        {Opt_resgid, 0, MOPT_GTE0},
@@ -2123,13 +2131,56 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
                }
                sbi->s_jquota_fmt = m->mount_opt;
 #endif
-       } else if (token == Opt_dax) {
+       } else if (token == Opt_dax || token == Opt_dax_always ||
+                  token == Opt_dax_inode || token == Opt_dax_never) {
 #ifdef CONFIG_FS_DAX
-               ext4_msg(sb, KERN_WARNING,
-               "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
-               sbi->s_mount_opt |= m->mount_opt;
+               switch (token) {
+               case Opt_dax:
+               case Opt_dax_always:
+                       if (is_remount &&
+                           (!(sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) ||
+                            (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER))) {
+                       fail_dax_change_remount:
+                               ext4_msg(sb, KERN_ERR, "can't change "
+                                        "dax mount option while remounting");
+                               return -1;
+                       }
+                       if (is_remount &&
+                           (test_opt(sb, DATA_FLAGS) ==
+                            EXT4_MOUNT_JOURNAL_DATA)) {
+                                   ext4_msg(sb, KERN_ERR, "can't mount with "
+                                            "both data=journal and dax");
+                                   return -1;
+                       }
+                       ext4_msg(sb, KERN_WARNING,
+                               "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
+                       sbi->s_mount_opt |= EXT4_MOUNT_DAX_ALWAYS;
+                       sbi->s_mount_opt2 &= ~EXT4_MOUNT2_DAX_NEVER;
+                       break;
+               case Opt_dax_never:
+                       if (is_remount &&
+                           (!(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) ||
+                            (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS)))
+                               goto fail_dax_change_remount;
+                       sbi->s_mount_opt2 |= EXT4_MOUNT2_DAX_NEVER;
+                       sbi->s_mount_opt &= ~EXT4_MOUNT_DAX_ALWAYS;
+                       break;
+               case Opt_dax_inode:
+                       if (is_remount &&
+                           ((sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) ||
+                            (sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_NEVER) ||
+                            !(sbi->s_mount_opt2 & EXT4_MOUNT2_DAX_INODE)))
+                               goto fail_dax_change_remount;
+                       sbi->s_mount_opt &= ~EXT4_MOUNT_DAX_ALWAYS;
+                       sbi->s_mount_opt2 &= ~EXT4_MOUNT2_DAX_NEVER;
+                       /* Strictly for printing options */
+                       sbi->s_mount_opt2 |= EXT4_MOUNT2_DAX_INODE;
+                       break;
+               }
 #else
                ext4_msg(sb, KERN_INFO, "dax option not supported");
+               sbi->s_mount_opt2 |= EXT4_MOUNT2_DAX_NEVER;
+               sbi->s_mount_opt &= ~EXT4_MOUNT_DAX_ALWAYS;
                return -1;
 #endif
        } else if (token == Opt_data_err_abort) {
@@ -2293,7 +2344,7 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
        for (m = ext4_mount_opts; m->token != Opt_err; m++) {
                int want_set = m->flags & MOPT_SET;
                if (((m->flags & (MOPT_SET|MOPT_CLEAR)) == 0) ||
-                   (m->flags & MOPT_CLEAR_ERR))
+                   (m->flags & MOPT_CLEAR_ERR) || m->flags & MOPT_SKIP)
                        continue;
                if (!nodefs && !(m->mount_opt & (sbi->s_mount_opt ^ def_mount_opt)))
                        continue; /* skip if same as the default */
@@ -2353,6 +2404,17 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
 
        fscrypt_show_test_dummy_encryption(seq, sep, sb);
 
+       if (test_opt(sb, DAX_ALWAYS)) {
+               if (IS_EXT2_SB(sb))
+                       SEQ_OPTS_PUTS("dax");
+               else
+                       SEQ_OPTS_PUTS("dax=always");
+       } else if (test_opt2(sb, DAX_NEVER)) {
+               SEQ_OPTS_PUTS("dax=never");
+       } else if (test_opt2(sb, DAX_INODE)) {
+               SEQ_OPTS_PUTS("dax=inode");
+       }
+
        ext4_show_quota_options(seq, sb);
        return 0;
 }
@@ -2383,6 +2445,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
                ext4_msg(sb, KERN_ERR, "revision level too high, "
                         "forcing read-only mode");
                err = -EROFS;
+               goto done;
        }
        if (read_only)
                goto done;
@@ -4017,7 +4080,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                                 "both data=journal and delalloc");
                        goto failed_mount;
                }
-               if (test_opt(sb, DAX)) {
+               if (test_opt(sb, DAX_ALWAYS)) {
                        ext4_msg(sb, KERN_ERR, "can't mount with "
                                 "both data=journal and dax");
                        goto failed_mount;
@@ -4127,13 +4190,16 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                goto failed_mount;
        }
 
-       if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
+       if (bdev_dax_supported(sb->s_bdev, blocksize))
+               set_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags);
+
+       if (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) {
                if (ext4_has_feature_inline_data(sb)) {
                        ext4_msg(sb, KERN_ERR, "Cannot use DAX on a filesystem"
                                        " that may contain inline data");
                        goto failed_mount;
                }
-               if (!bdev_dax_supported(sb->s_bdev, blocksize)) {
+               if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags)) {
                        ext4_msg(sb, KERN_ERR,
                                "DAX unsupported by block device.");
                        goto failed_mount;
@@ -5447,12 +5513,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                        err = -EINVAL;
                        goto restore_opts;
                }
-               if (test_opt(sb, DAX)) {
-                       ext4_msg(sb, KERN_ERR, "can't mount with "
-                                "both data=journal and dax");
-                       err = -EINVAL;
-                       goto restore_opts;
-               }
        } else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) {
                if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
                        ext4_msg(sb, KERN_ERR, "can't mount with "
@@ -5468,12 +5528,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                goto restore_opts;
        }
 
-       if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) {
-               ext4_msg(sb, KERN_WARNING, "warning: refusing change of "
-                       "dax flag with busy inodes while remounting");
-               sbi->s_mount_opt ^= EXT4_MOUNT_DAX;
-       }
-
        if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
                ext4_abort(sb, EXT4_ERR_ESHUTDOWN, "Abort forced by user");
 
index dec1244..bbd5e7e 100644 (file)
@@ -113,6 +113,9 @@ static int ext4_begin_enable_verity(struct file *filp)
        handle_t *handle;
        int err;
 
+       if (IS_DAX(inode) || ext4_test_inode_flag(inode, EXT4_INODE_DAX))
+               return -EINVAL;
+
        if (ext4_verity_in_progress(inode))
                return -EBUSY;
 
@@ -241,7 +244,7 @@ static int ext4_end_enable_verity(struct file *filp, const void *desc,
                if (err)
                        goto out_stop;
                ext4_set_inode_flag(inode, EXT4_INODE_VERITY);
-               ext4_set_inode_flags(inode);
+               ext4_set_inode_flags(inode, false);
                err = ext4_mark_iloc_dirty(handle, inode, &iloc);
        }
 out_stop:
index 9b29a40..7d2f657 100644 (file)
@@ -93,6 +93,7 @@ static const struct xattr_handler * const ext4_xattr_handler_map[] = {
 #ifdef CONFIG_EXT4_FS_SECURITY
        [EXT4_XATTR_INDEX_SECURITY]          = &ext4_xattr_security_handler,
 #endif
+       [EXT4_XATTR_INDEX_HURD]              = &ext4_xattr_hurd_handler,
 };
 
 const struct xattr_handler *ext4_xattr_handlers[] = {
@@ -105,6 +106,7 @@ const struct xattr_handler *ext4_xattr_handlers[] = {
 #ifdef CONFIG_EXT4_FS_SECURITY
        &ext4_xattr_security_handler,
 #endif
+       &ext4_xattr_hurd_handler,
        NULL
 };
 
index ffe21ac..730b91f 100644 (file)
@@ -124,6 +124,7 @@ struct ext4_xattr_inode_array {
 extern const struct xattr_handler ext4_xattr_user_handler;
 extern const struct xattr_handler ext4_xattr_trusted_handler;
 extern const struct xattr_handler ext4_xattr_security_handler;
+extern const struct xattr_handler ext4_xattr_hurd_handler;
 
 #define EXT4_XATTR_NAME_ENCRYPTION_CONTEXT "c"
 
diff --git a/fs/ext4/xattr_hurd.c b/fs/ext4/xattr_hurd.c
new file mode 100644 (file)
index 0000000..8cfa74a
--- /dev/null
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/fs/ext4/xattr_hurd.c
+ * Handler for extended gnu attributes for the Hurd.
+ *
+ * Copyright (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
+ * Copyright (C) 2020 by Jan (janneke) Nieuwenhuizen, <janneke@gnu.org>
+ */
+
+#include <linux/init.h>
+#include <linux/string.h>
+#include "ext4.h"
+#include "xattr.h"
+
+static bool
+ext4_xattr_hurd_list(struct dentry *dentry)
+{
+       return test_opt(dentry->d_sb, XATTR_USER);
+}
+
+static int
+ext4_xattr_hurd_get(const struct xattr_handler *handler,
+                   struct dentry *unused, struct inode *inode,
+                   const char *name, void *buffer, size_t size)
+{
+       if (!test_opt(inode->i_sb, XATTR_USER))
+               return -EOPNOTSUPP;
+
+       return ext4_xattr_get(inode, EXT4_XATTR_INDEX_HURD,
+                             name, buffer, size);
+}
+
+static int
+ext4_xattr_hurd_set(const struct xattr_handler *handler,
+                   struct dentry *unused, struct inode *inode,
+                   const char *name, const void *value,
+                   size_t size, int flags)
+{
+       if (!test_opt(inode->i_sb, XATTR_USER))
+               return -EOPNOTSUPP;
+
+       return ext4_xattr_set(inode, EXT4_XATTR_INDEX_HURD,
+                             name, value, size, flags);
+}
+
+const struct xattr_handler ext4_xattr_hurd_handler = {
+       .prefix = XATTR_HURD_PREFIX,
+       .list   = ext4_xattr_hurd_list,
+       .get    = ext4_xattr_hurd_get,
+       .set    = ext4_xattr_hurd_set,
+};
index 72c9560..68cd700 100644 (file)
@@ -468,21 +468,10 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
 }
 
 
-/**
- * __gfs2_readpage - readpage
- * @file: The file to read a page for
- * @page: The page to read
- *
- * This is the core of gfs2's readpage. It's used by the internal file
- * reading code as in that case we already hold the glock. Also it's
- * called by gfs2_readpage() once the required lock has been granted.
- */
-
 static int __gfs2_readpage(void *file, struct page *page)
 {
        struct gfs2_inode *ip = GFS2_I(page->mapping->host);
        struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
-
        int error;
 
        if (i_blocksize(page->mapping->host) == PAGE_SIZE &&
@@ -505,36 +494,11 @@ static int __gfs2_readpage(void *file, struct page *page)
  * gfs2_readpage - read a page of a file
  * @file: The file to read
  * @page: The page of the file
- *
- * This deals with the locking required. We have to unlock and
- * relock the page in order to get the locking in the right
- * order.
  */
 
 static int gfs2_readpage(struct file *file, struct page *page)
 {
-       struct address_space *mapping = page->mapping;
-       struct gfs2_inode *ip = GFS2_I(mapping->host);
-       struct gfs2_holder gh;
-       int error;
-
-       unlock_page(page);
-       gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
-       error = gfs2_glock_nq(&gh);
-       if (unlikely(error))
-               goto out;
-       error = AOP_TRUNCATED_PAGE;
-       lock_page(page);
-       if (page->mapping == mapping && !PageUptodate(page))
-               error = __gfs2_readpage(file, page);
-       else
-               unlock_page(page);
-       gfs2_glock_dq(&gh);
-out:
-       gfs2_holder_uninit(&gh);
-       if (error && error != AOP_TRUNCATED_PAGE)
-               lock_page(page);
-       return error;
+       return __gfs2_readpage(file, page);
 }
 
 /**
@@ -598,16 +562,9 @@ static void gfs2_readahead(struct readahead_control *rac)
 {
        struct inode *inode = rac->mapping->host;
        struct gfs2_inode *ip = GFS2_I(inode);
-       struct gfs2_holder gh;
 
-       gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
-       if (gfs2_glock_nq(&gh))
-               goto out_uninit;
        if (!gfs2_is_stuffed(ip))
                mpage_readahead(rac, gfs2_block_map);
-       gfs2_glock_dq(&gh);
-out_uninit:
-       gfs2_holder_uninit(&gh);
 }
 
 /**
index fe305e4..bebde53 100644 (file)
@@ -558,8 +558,29 @@ out_uninit:
        return block_page_mkwrite_return(ret);
 }
 
+static vm_fault_t gfs2_fault(struct vm_fault *vmf)
+{
+       struct inode *inode = file_inode(vmf->vma->vm_file);
+       struct gfs2_inode *ip = GFS2_I(inode);
+       struct gfs2_holder gh;
+       vm_fault_t ret;
+       int err;
+
+       gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
+       err = gfs2_glock_nq(&gh);
+       if (err) {
+               ret = block_page_mkwrite_return(err);
+               goto out_uninit;
+       }
+       ret = filemap_fault(vmf);
+       gfs2_glock_dq(&gh);
+out_uninit:
+       gfs2_holder_uninit(&gh);
+       return ret;
+}
+
 static const struct vm_operations_struct gfs2_vm_ops = {
-       .fault = filemap_fault,
+       .fault = gfs2_fault,
        .map_pages = filemap_map_pages,
        .page_mkwrite = gfs2_page_mkwrite,
 };
@@ -824,6 +845,9 @@ out_uninit:
 
 static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
+       struct gfs2_inode *ip;
+       struct gfs2_holder gh;
+       size_t written = 0;
        ssize_t ret;
 
        if (iocb->ki_flags & IOCB_DIRECT) {
@@ -832,7 +856,31 @@ static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
                        return ret;
                iocb->ki_flags &= ~IOCB_DIRECT;
        }
-       return generic_file_read_iter(iocb, to);
+       iocb->ki_flags |= IOCB_NOIO;
+       ret = generic_file_read_iter(iocb, to);
+       iocb->ki_flags &= ~IOCB_NOIO;
+       if (ret >= 0) {
+               if (!iov_iter_count(to))
+                       return ret;
+               written = ret;
+       } else {
+               if (ret != -EAGAIN)
+                       return ret;
+               if (iocb->ki_flags & IOCB_NOWAIT)
+                       return ret;
+       }
+       ip = GFS2_I(iocb->ki_filp->f_mapping->host);
+       gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
+       ret = gfs2_glock_nq(&gh);
+       if (ret)
+               goto out_uninit;
+       ret = generic_file_read_iter(iocb, to);
+       if (ret > 0)
+               written += ret;
+       gfs2_glock_dq(&gh);
+out_uninit:
+       gfs2_holder_uninit(&gh);
+       return written ? written : ret;
 }
 
 /**
index 2299dcc..8545024 100644 (file)
@@ -1899,7 +1899,10 @@ bool gfs2_delete_work_queued(const struct gfs2_glock *gl)
 
 static void flush_delete_work(struct gfs2_glock *gl)
 {
-       flush_delayed_work(&gl->gl_delete);
+       if (cancel_delayed_work(&gl->gl_delete)) {
+               queue_delayed_work(gfs2_delete_workqueue,
+                                  &gl->gl_delete, 0);
+       }
        gfs2_glock_queue_work(gl, 0);
 }
 
index c848877..de1d5f1 100644 (file)
@@ -531,8 +531,7 @@ static int freeze_go_sync(struct gfs2_glock *gl)
        int error = 0;
        struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
 
-       if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
-           test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
+       if (gl->gl_req == LM_ST_EXCLUSIVE && !gfs2_withdrawn(sdp)) {
                atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
                error = freeze_super(sdp->sd_vfs);
                if (error) {
@@ -545,8 +544,11 @@ static int freeze_go_sync(struct gfs2_glock *gl)
                        gfs2_assert_withdraw(sdp, 0);
                }
                queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
-               gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
-                              GFS2_LFC_FREEZE_GO_SYNC);
+               if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
+                       gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
+                                      GFS2_LFC_FREEZE_GO_SYNC);
+               else /* read-only mounts */
+                       atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
        }
        return 0;
 }
index 03ab11f..ca2ec02 100644 (file)
@@ -399,7 +399,6 @@ enum {
        GIF_QD_LOCKED           = 1,
        GIF_ALLOC_FAILED        = 2,
        GIF_SW_PAGED            = 3,
-       GIF_ORDERED             = 4,
        GIF_FREE_VFS_INODE      = 5,
        GIF_GLOP_PENDING        = 6,
        GIF_DEFERRED_DELETE     = 7,
index 370c3a4..6774865 100644 (file)
@@ -207,10 +207,11 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
 
        if (no_formal_ino && ip->i_no_formal_ino &&
            no_formal_ino != ip->i_no_formal_ino) {
+               error = -ESTALE;
                if (inode->i_state & I_NEW)
                        goto fail;
                iput(inode);
-               return ERR_PTR(-ESTALE);
+               return ERR_PTR(error);
        }
 
        if (inode->i_state & I_NEW)
index 3e47344..a76e55b 100644 (file)
@@ -613,6 +613,12 @@ static int ip_cmp(void *priv, struct list_head *a, struct list_head *b)
        return 0;
 }
 
+static void __ordered_del_inode(struct gfs2_inode *ip)
+{
+       if (!list_empty(&ip->i_ordered))
+               list_del_init(&ip->i_ordered);
+}
+
 static void gfs2_ordered_write(struct gfs2_sbd *sdp)
 {
        struct gfs2_inode *ip;
@@ -623,8 +629,7 @@ static void gfs2_ordered_write(struct gfs2_sbd *sdp)
        while (!list_empty(&sdp->sd_log_ordered)) {
                ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
                if (ip->i_inode.i_mapping->nrpages == 0) {
-                       test_and_clear_bit(GIF_ORDERED, &ip->i_flags);
-                       list_del(&ip->i_ordered);
+                       __ordered_del_inode(ip);
                        continue;
                }
                list_move(&ip->i_ordered, &written);
@@ -643,8 +648,7 @@ static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
        spin_lock(&sdp->sd_ordered_lock);
        while (!list_empty(&sdp->sd_log_ordered)) {
                ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
-               list_del(&ip->i_ordered);
-               WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags));
+               __ordered_del_inode(ip);
                if (ip->i_inode.i_mapping->nrpages == 0)
                        continue;
                spin_unlock(&sdp->sd_ordered_lock);
@@ -659,8 +663,7 @@ void gfs2_ordered_del_inode(struct gfs2_inode *ip)
        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 
        spin_lock(&sdp->sd_ordered_lock);
-       if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags))
-               list_del(&ip->i_ordered);
+       __ordered_del_inode(ip);
        spin_unlock(&sdp->sd_ordered_lock);
 }
 
@@ -1002,6 +1005,16 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
 
 out:
        if (gfs2_withdrawn(sdp)) {
+               /**
+                * If the tr_list is empty, we're withdrawing during a log
+                * flush that targets a transaction, but the transaction was
+                * never queued onto any of the ail lists. Here we add it to
+                * ail1 just so that ail_drain() will find and free it.
+                */
+               spin_lock(&sdp->sd_ail_lock);
+               if (tr && list_empty(&tr->tr_list))
+                       list_add(&tr->tr_list, &sdp->sd_ail1_list);
+               spin_unlock(&sdp->sd_ail_lock);
                ail_drain(sdp); /* frees all transactions */
                tr = NULL;
        }
index c1cd6ae..8965c75 100644 (file)
@@ -53,9 +53,9 @@ static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip)
        if (gfs2_is_jdata(ip) || !gfs2_is_ordered(sdp))
                return;
 
-       if (!test_bit(GIF_ORDERED, &ip->i_flags)) {
+       if (list_empty(&ip->i_ordered)) {
                spin_lock(&sdp->sd_ordered_lock);
-               if (!test_and_set_bit(GIF_ORDERED, &ip->i_flags))
+               if (list_empty(&ip->i_ordered))
                        list_add(&ip->i_ordered, &sdp->sd_log_ordered);
                spin_unlock(&sdp->sd_ordered_lock);
        }
index 733470c..c7393ee 100644 (file)
@@ -39,6 +39,7 @@ static void gfs2_init_inode_once(void *foo)
        atomic_set(&ip->i_sizehint, 0);
        init_rwsem(&ip->i_rw_mutex);
        INIT_LIST_HEAD(&ip->i_trunc_list);
+       INIT_LIST_HEAD(&ip->i_ordered);
        ip->i_qadata = NULL;
        gfs2_holder_mark_uninitialized(&ip->i_rgd_gh);
        memset(&ip->i_res, 0, sizeof(ip->i_res));
index 094f5fe..6d18d2c 100644 (file)
@@ -1136,7 +1136,18 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
                goto fail_per_node;
        }
 
-       if (!sb_rdonly(sb)) {
+       if (sb_rdonly(sb)) {
+               struct gfs2_holder freeze_gh;
+
+               error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+                                          LM_FLAG_NOEXP | GL_EXACT,
+                                          &freeze_gh);
+               if (error) {
+                       fs_err(sdp, "can't make FS RO: %d\n", error);
+                       goto fail_per_node;
+               }
+               gfs2_glock_dq_uninit(&freeze_gh);
+       } else {
                error = gfs2_make_fs_rw(sdp);
                if (error) {
                        fs_err(sdp, "can't make FS RW: %d\n", error);
index 96c345f..390ea79 100644 (file)
@@ -364,8 +364,8 @@ void gfs2_recover_func(struct work_struct *work)
                /* Acquire a shared hold on the freeze lock */
 
                error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
-                                          LM_FLAG_NOEXP | LM_FLAG_PRIORITY,
-                                          &thaw_gh);
+                                          LM_FLAG_NOEXP | LM_FLAG_PRIORITY |
+                                          GL_EXACT, &thaw_gh);
                if (error)
                        goto fail_gunlock_ji;
 
index 32d8d26..47d0ae1 100644 (file)
@@ -167,7 +167,8 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
        if (error)
                return error;
 
-       error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
+       error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+                                  LM_FLAG_NOEXP | GL_EXACT,
                                   &freeze_gh);
        if (error)
                goto fail_threads;
@@ -203,7 +204,6 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
        return 0;
 
 fail:
-       freeze_gh.gh_flags |= GL_NOCACHE;
        gfs2_glock_dq_uninit(&freeze_gh);
 fail_threads:
        if (sdp->sd_quotad_process)
@@ -430,7 +430,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
        }
 
        error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
-                                  GL_NOCACHE, &sdp->sd_freeze_gh);
+                                  LM_FLAG_NOEXP, &sdp->sd_freeze_gh);
        if (error)
                goto out;
 
@@ -613,13 +613,15 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
            !gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) {
                if (!log_write_allowed) {
                        error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
-                                                  LM_ST_SHARED, GL_NOCACHE |
-                                                  LM_FLAG_TRY, &freeze_gh);
+                                                  LM_ST_SHARED, LM_FLAG_TRY |
+                                                  LM_FLAG_NOEXP | GL_EXACT,
+                                                  &freeze_gh);
                        if (error == GLR_TRYFAILED)
                                error = 0;
                } else {
                        error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
-                                                  LM_ST_SHARED, GL_NOCACHE,
+                                                  LM_ST_SHARED,
+                                                  LM_FLAG_NOEXP | GL_EXACT,
                                                   &freeze_gh);
                        if (error && !gfs2_withdrawn(sdp))
                                return error;
@@ -761,8 +763,8 @@ void gfs2_freeze_func(struct work_struct *work)
        struct super_block *sb = sdp->sd_vfs;
 
        atomic_inc(&sb->s_active);
-       error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
-                                  &freeze_gh);
+       error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+                                  LM_FLAG_NOEXP | GL_EXACT, &freeze_gh);
        if (error) {
                fs_info(sdp, "GFS2: couldn't get freeze lock : %d\n", error);
                gfs2_assert_withdraw(sdp, 0);
@@ -774,8 +776,6 @@ void gfs2_freeze_func(struct work_struct *work)
                                error);
                        gfs2_assert_withdraw(sdp, 0);
                }
-               if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
-                       freeze_gh.gh_flags |= GL_NOCACHE;
                gfs2_glock_dq_uninit(&freeze_gh);
        }
        deactivate_super(sb);
index 0b65a91..47c5f3a 100644 (file)
@@ -903,13 +903,15 @@ void io_wq_cancel_all(struct io_wq *wq)
 struct io_cb_cancel_data {
        work_cancel_fn *fn;
        void *data;
+       int nr_running;
+       int nr_pending;
+       bool cancel_all;
 };
 
 static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
 {
        struct io_cb_cancel_data *match = data;
        unsigned long flags;
-       bool ret = false;
 
        /*
         * Hold the lock to avoid ->cur_work going out of scope, caller
@@ -920,74 +922,90 @@ static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
            !(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL) &&
            match->fn(worker->cur_work, match->data)) {
                send_sig(SIGINT, worker->task, 1);
-               ret = true;
+               match->nr_running++;
        }
        spin_unlock_irqrestore(&worker->lock, flags);
 
-       return ret;
+       return match->nr_running && !match->cancel_all;
 }
 
-static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
-                                           struct io_cb_cancel_data *match)
+static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
+                                      struct io_cb_cancel_data *match)
 {
        struct io_wq_work_node *node, *prev;
        struct io_wq_work *work;
        unsigned long flags;
-       bool found = false;
 
-       /*
-        * First check pending list, if we're lucky we can just remove it
-        * from there. CANCEL_OK means that the work is returned as-new,
-        * no completion will be posted for it.
-        */
+retry:
        spin_lock_irqsave(&wqe->lock, flags);
        wq_list_for_each(node, prev, &wqe->work_list) {
                work = container_of(node, struct io_wq_work, list);
+               if (!match->fn(work, match->data))
+                       continue;
 
-               if (match->fn(work, match->data)) {
-                       wq_list_del(&wqe->work_list, node, prev);
-                       found = true;
-                       break;
-               }
-       }
-       spin_unlock_irqrestore(&wqe->lock, flags);
-
-       if (found) {
+               wq_list_del(&wqe->work_list, node, prev);
+               spin_unlock_irqrestore(&wqe->lock, flags);
                io_run_cancel(work, wqe);
-               return IO_WQ_CANCEL_OK;
+               match->nr_pending++;
+               if (!match->cancel_all)
+                       return;
+
+               /* not safe to continue after unlock */
+               goto retry;
        }
+       spin_unlock_irqrestore(&wqe->lock, flags);
+}
 
-       /*
-        * Now check if a free (going busy) or busy worker has the work
-        * currently running. If we find it there, we'll return CANCEL_RUNNING
-        * as an indication that we attempt to signal cancellation. The
-        * completion will run normally in this case.
-        */
+static void io_wqe_cancel_running_work(struct io_wqe *wqe,
+                                      struct io_cb_cancel_data *match)
+{
        rcu_read_lock();
-       found = io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
+       io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
        rcu_read_unlock();
-       return found ? IO_WQ_CANCEL_RUNNING : IO_WQ_CANCEL_NOTFOUND;
 }
 
 enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
-                                 void *data)
+                                 void *data, bool cancel_all)
 {
        struct io_cb_cancel_data match = {
-               .fn     = cancel,
-               .data   = data,
+               .fn             = cancel,
+               .data           = data,
+               .cancel_all     = cancel_all,
        };
-       enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
        int node;
 
+       /*
+        * First check pending list, if we're lucky we can just remove it
+        * from there. CANCEL_OK means that the work is returned as-new,
+        * no completion will be posted for it.
+        */
        for_each_node(node) {
                struct io_wqe *wqe = wq->wqes[node];
 
-               ret = io_wqe_cancel_work(wqe, &match);
-               if (ret != IO_WQ_CANCEL_NOTFOUND)
-                       break;
+               io_wqe_cancel_pending_work(wqe, &match);
+               if (match.nr_pending && !match.cancel_all)
+                       return IO_WQ_CANCEL_OK;
        }
 
-       return ret;
+       /*
+        * Now check if a free (going busy) or busy worker has the work
+        * currently running. If we find it there, we'll return CANCEL_RUNNING
+        * as an indication that we attempt to signal cancellation. The
+        * completion will run normally in this case.
+        */
+       for_each_node(node) {
+               struct io_wqe *wqe = wq->wqes[node];
+
+               io_wqe_cancel_running_work(wqe, &match);
+               if (match.nr_running && !match.cancel_all)
+                       return IO_WQ_CANCEL_RUNNING;
+       }
+
+       if (match.nr_running)
+               return IO_WQ_CANCEL_RUNNING;
+       if (match.nr_pending)
+               return IO_WQ_CANCEL_OK;
+       return IO_WQ_CANCEL_NOTFOUND;
 }
 
 static bool io_wq_io_cb_cancel_data(struct io_wq_work *work, void *data)
@@ -997,21 +1015,7 @@ static bool io_wq_io_cb_cancel_data(struct io_wq_work *work, void *data)
 
 enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork)
 {
-       return io_wq_cancel_cb(wq, io_wq_io_cb_cancel_data, (void *)cwork);
-}
-
-static bool io_wq_pid_match(struct io_wq_work *work, void *data)
-{
-       pid_t pid = (pid_t) (unsigned long) data;
-
-       return work->task_pid == pid;
-}
-
-enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid)
-{
-       void *data = (void *) (unsigned long) pid;
-
-       return io_wq_cancel_cb(wq, io_wq_pid_match, data);
+       return io_wq_cancel_cb(wq, io_wq_io_cb_cancel_data, (void *)cwork, false);
 }
 
 struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
index 8e138fa..071f1a9 100644 (file)
@@ -90,7 +90,6 @@ struct io_wq_work {
        const struct cred *creds;
        struct fs_struct *fs;
        unsigned flags;
-       pid_t task_pid;
 };
 
 static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
@@ -125,12 +124,11 @@ static inline bool io_wq_is_hashed(struct io_wq_work *work)
 
 void io_wq_cancel_all(struct io_wq *wq);
 enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork);
-enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid);
 
 typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
 
 enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
-                                       void *data);
+                                       void *data, bool cancel_all);
 
 struct task_struct *io_wq_get_task(struct io_wq *wq);
 
index 155f3d8..ca8abde 100644 (file)
@@ -541,6 +541,7 @@ enum {
        REQ_F_NO_FILE_TABLE_BIT,
        REQ_F_QUEUE_TIMEOUT_BIT,
        REQ_F_WORK_INITIALIZED_BIT,
+       REQ_F_TASK_PINNED_BIT,
 
        /* not a real bit, just to check we're not overflowing the space */
        __REQ_F_LAST_BIT,
@@ -598,6 +599,8 @@ enum {
        REQ_F_QUEUE_TIMEOUT     = BIT(REQ_F_QUEUE_TIMEOUT_BIT),
        /* io_wq_work is initialized */
        REQ_F_WORK_INITIALIZED  = BIT(REQ_F_WORK_INITIALIZED_BIT),
+       /* req->task is refcounted */
+       REQ_F_TASK_PINNED       = BIT(REQ_F_TASK_PINNED_BIT),
 };
 
 struct async_poll {
@@ -887,6 +890,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
                                 struct io_uring_files_update *ip,
                                 unsigned nr_args);
 static int io_grab_files(struct io_kiocb *req);
+static void io_complete_rw_common(struct kiocb *kiocb, long res);
 static void io_cleanup_req(struct io_kiocb *req);
 static int io_file_get(struct io_submit_state *state, struct io_kiocb *req,
                       int fd, struct file **out_file, bool fixed);
@@ -910,6 +914,21 @@ struct sock *io_uring_get_socket(struct file *file)
 }
 EXPORT_SYMBOL(io_uring_get_socket);
 
+static void io_get_req_task(struct io_kiocb *req)
+{
+       if (req->flags & REQ_F_TASK_PINNED)
+               return;
+       get_task_struct(req->task);
+       req->flags |= REQ_F_TASK_PINNED;
+}
+
+/* not idempotent -- it doesn't clear REQ_F_TASK_PINNED */
+static void __io_put_req_task(struct io_kiocb *req)
+{
+       if (req->flags & REQ_F_TASK_PINNED)
+               put_task_struct(req->task);
+}
+
 static void io_file_put_work(struct work_struct *work);
 
 /*
@@ -1045,8 +1064,6 @@ static inline void io_req_work_grab_env(struct io_kiocb *req,
                }
                spin_unlock(&current->fs->lock);
        }
-       if (!req->work.task_pid)
-               req->work.task_pid = task_pid_vnr(current);
 }
 
 static inline void io_req_work_drop_env(struct io_kiocb *req)
@@ -1087,6 +1104,7 @@ static inline void io_prep_async_work(struct io_kiocb *req,
                        req->work.flags |= IO_WQ_WORK_UNBOUND;
        }
 
+       io_req_init_async(req);
        io_req_work_grab_env(req, def);
 
        *link = io_prep_linked_timeout(req);
@@ -1256,6 +1274,7 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
        if (cqe) {
                clear_bit(0, &ctx->sq_check_overflow);
                clear_bit(0, &ctx->cq_check_overflow);
+               ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
        }
        spin_unlock_irqrestore(&ctx->completion_lock, flags);
        io_cqring_ev_posted(ctx);
@@ -1293,6 +1312,7 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
                if (list_empty(&ctx->cq_overflow_list)) {
                        set_bit(0, &ctx->sq_check_overflow);
                        set_bit(0, &ctx->cq_check_overflow);
+                       ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
                }
                req->flags |= REQ_F_OVERFLOW;
                refcount_inc(&req->refs);
@@ -1398,9 +1418,7 @@ static void __io_req_aux_free(struct io_kiocb *req)
        kfree(req->io);
        if (req->file)
                io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
-       if (req->task)
-               put_task_struct(req->task);
-
+       __io_put_req_task(req);
        io_req_work_drop_env(req);
 }
 
@@ -1727,6 +1745,26 @@ static int io_put_kbuf(struct io_kiocb *req)
        return cflags;
 }
 
+static void io_iopoll_queue(struct list_head *again)
+{
+       struct io_kiocb *req;
+
+       do {
+               req = list_first_entry(again, struct io_kiocb, list);
+               list_del(&req->list);
+
+               /* shouldn't happen unless io_uring is dying, cancel reqs */
+               if (unlikely(!current->mm)) {
+                       io_complete_rw_common(&req->rw.kiocb, -EAGAIN);
+                       io_put_req(req);
+                       continue;
+               }
+
+               refcount_inc(&req->refs);
+               io_queue_async_work(req);
+       } while (!list_empty(again));
+}
+
 /*
  * Find and free completed poll iocbs
  */
@@ -1735,12 +1773,21 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
 {
        struct req_batch rb;
        struct io_kiocb *req;
+       LIST_HEAD(again);
+
+       /* order with ->result store in io_complete_rw_iopoll() */
+       smp_rmb();
 
        rb.to_free = rb.need_iter = 0;
        while (!list_empty(done)) {
                int cflags = 0;
 
                req = list_first_entry(done, struct io_kiocb, list);
+               if (READ_ONCE(req->result) == -EAGAIN) {
+                       req->iopoll_completed = 0;
+                       list_move_tail(&req->list, &again);
+                       continue;
+               }
                list_del(&req->list);
 
                if (req->flags & REQ_F_BUFFER_SELECTED)
@@ -1758,18 +1805,9 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
        if (ctx->flags & IORING_SETUP_SQPOLL)
                io_cqring_ev_posted(ctx);
        io_free_req_many(ctx, &rb);
-}
-
-static void io_iopoll_queue(struct list_head *again)
-{
-       struct io_kiocb *req;
 
-       do {
-               req = list_first_entry(again, struct io_kiocb, list);
-               list_del(&req->list);
-               refcount_inc(&req->refs);
-               io_queue_async_work(req);
-       } while (!list_empty(again));
+       if (!list_empty(&again))
+               io_iopoll_queue(&again);
 }
 
 static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
@@ -1777,7 +1815,6 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
 {
        struct io_kiocb *req, *tmp;
        LIST_HEAD(done);
-       LIST_HEAD(again);
        bool spin;
        int ret;
 
@@ -1803,13 +1840,6 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
                if (!list_empty(&done))
                        break;
 
-               if (req->result == -EAGAIN) {
-                       list_move_tail(&req->list, &again);
-                       continue;
-               }
-               if (!list_empty(&again))
-                       break;
-
                ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
                if (ret < 0)
                        break;
@@ -1822,9 +1852,6 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
        if (!list_empty(&done))
                io_iopoll_complete(ctx, nr_events, &done);
 
-       if (!list_empty(&again))
-               io_iopoll_queue(&again);
-
        return ret;
 }
 
@@ -1973,11 +2000,13 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
        if (kiocb->ki_flags & IOCB_WRITE)
                kiocb_end_write(req);
 
-       if (res != req->result)
+       if (res != -EAGAIN && res != req->result)
                req_set_fail_links(req);
-       req->result = res;
-       if (res != -EAGAIN)
-               WRITE_ONCE(req->iopoll_completed, 1);
+
+       WRITE_ONCE(req->result, res);
+       /* order with io_poll_complete() checking ->result */
+       smp_wmb();
+       WRITE_ONCE(req->iopoll_completed, 1);
 }
 
 /*
@@ -2650,8 +2679,8 @@ copy_iov:
                }
        }
 out_free:
-       kfree(iovec);
-       req->flags &= ~REQ_F_NEED_CLEANUP;
+       if (!(req->flags & REQ_F_NEED_CLEANUP))
+               kfree(iovec);
        return ret;
 }
 
@@ -2773,8 +2802,8 @@ copy_iov:
                }
        }
 out_free:
-       req->flags &= ~REQ_F_NEED_CLEANUP;
-       kfree(iovec);
+       if (!(req->flags & REQ_F_NEED_CLEANUP))
+               kfree(iovec);
        return ret;
 }
 
@@ -4045,6 +4074,29 @@ struct io_poll_table {
        int error;
 };
 
+static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
+{
+       struct task_struct *tsk = req->task;
+       struct io_ring_ctx *ctx = req->ctx;
+       int ret, notify = TWA_RESUME;
+
+       /*
+        * SQPOLL kernel thread doesn't need notification, just a wakeup.
+        * If we're not using an eventfd, then TWA_RESUME is always fine,
+        * as we won't have dependencies between request completions for
+        * other kernel wait conditions.
+        */
+       if (ctx->flags & IORING_SETUP_SQPOLL)
+               notify = 0;
+       else if (ctx->cq_ev_fd)
+               notify = TWA_SIGNAL;
+
+       ret = task_work_add(tsk, cb, notify);
+       if (!ret)
+               wake_up_process(tsk);
+       return ret;
+}
+
 static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
                           __poll_t mask, task_work_func_t func)
 {
@@ -4068,13 +4120,13 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
         * of executing it. We can't safely execute it anyway, as we may not
         * have the needed state needed for it anyway.
         */
-       ret = task_work_add(tsk, &req->task_work, true);
+       ret = io_req_task_work_add(req, &req->task_work);
        if (unlikely(ret)) {
                WRITE_ONCE(poll->canceled, true);
                tsk = io_wq_get_task(req->ctx->io_wq);
-               task_work_add(tsk, &req->task_work, true);
+               task_work_add(tsk, &req->task_work, 0);
+               wake_up_process(tsk);
        }
-       wake_up_process(tsk);
        return 1;
 }
 
@@ -4236,6 +4288,28 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
        __io_queue_proc(&pt->req->apoll->poll, pt, head);
 }
 
+static void io_sq_thread_drop_mm(struct io_ring_ctx *ctx)
+{
+       struct mm_struct *mm = current->mm;
+
+       if (mm) {
+               kthread_unuse_mm(mm);
+               mmput(mm);
+       }
+}
+
+static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx,
+                                  struct io_kiocb *req)
+{
+       if (io_op_defs[req->opcode].needs_mm && !current->mm) {
+               if (unlikely(!mmget_not_zero(ctx->sqo_mm)))
+                       return -EFAULT;
+               kthread_use_mm(ctx->sqo_mm);
+       }
+
+       return 0;
+}
+
 static void io_async_task_func(struct callback_head *cb)
 {
        struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
@@ -4270,11 +4344,16 @@ static void io_async_task_func(struct callback_head *cb)
 
        if (!canceled) {
                __set_current_state(TASK_RUNNING);
+               if (io_sq_thread_acquire_mm(ctx, req)) {
+                       io_cqring_add_event(req, -EFAULT);
+                       goto end_req;
+               }
                mutex_lock(&ctx->uring_lock);
                __io_queue_sqe(req, NULL);
                mutex_unlock(&ctx->uring_lock);
        } else {
                io_cqring_ev_posted(ctx);
+end_req:
                req_set_fail_links(req);
                io_double_put_req(req);
        }
@@ -4366,8 +4445,7 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
                memcpy(&apoll->work, &req->work, sizeof(req->work));
        had_io = req->io != NULL;
 
-       get_task_struct(current);
-       req->task = current;
+       io_get_req_task(req);
        req->apoll = apoll;
        INIT_HLIST_NODE(&req->hash_node);
 
@@ -4555,8 +4633,7 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
        events = READ_ONCE(sqe->poll_events);
        poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
 
-       get_task_struct(current);
-       req->task = current;
+       io_get_req_task(req);
        return 0;
 }
 
@@ -4772,7 +4849,7 @@ static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
        enum io_wq_cancel cancel_ret;
        int ret = 0;
 
-       cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr);
+       cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false);
        switch (cancel_ret) {
        case IO_WQ_CANCEL_OK:
                ret = 0;
@@ -5308,9 +5385,6 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
        if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
                const bool in_async = io_wq_current_is_worker();
 
-               if (req->result == -EAGAIN)
-                       return -EAGAIN;
-
                /* workqueue context doesn't hold uring_lock, grab it now */
                if (in_async)
                        mutex_lock(&ctx->uring_lock);
@@ -5817,17 +5891,14 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
        req->flags = 0;
        /* one is dropped after submission, the other at completion */
        refcount_set(&req->refs, 2);
-       req->task = NULL;
+       req->task = current;
        req->result = 0;
 
        if (unlikely(req->opcode >= IORING_OP_LAST))
                return -EINVAL;
 
-       if (io_op_defs[req->opcode].needs_mm && !current->mm) {
-               if (unlikely(!mmget_not_zero(ctx->sqo_mm)))
-                       return -EFAULT;
-               kthread_use_mm(ctx->sqo_mm);
-       }
+       if (unlikely(io_sq_thread_acquire_mm(ctx, req)))
+               return -EFAULT;
 
        sqe_flags = READ_ONCE(sqe->flags);
        /* enforce forwards compatibility on users */
@@ -5936,16 +6007,6 @@ fail_req:
        return submitted;
 }
 
-static inline void io_sq_thread_drop_mm(struct io_ring_ctx *ctx)
-{
-       struct mm_struct *mm = current->mm;
-
-       if (mm) {
-               kthread_unuse_mm(mm);
-               mmput(mm);
-       }
-}
-
 static int io_sq_thread(void *data)
 {
        struct io_ring_ctx *ctx = data;
@@ -5979,7 +6040,7 @@ static int io_sq_thread(void *data)
                 * If submit got -EBUSY, flag us as needing the application
                 * to enter the kernel to reap and flush events.
                 */
-               if (!to_submit || ret == -EBUSY) {
+               if (!to_submit || ret == -EBUSY || need_resched()) {
                        /*
                         * Drop cur_mm before scheduling, we can't hold it for
                         * long periods (or over schedule()). Do this before
@@ -5995,7 +6056,7 @@ static int io_sq_thread(void *data)
                         * more IO, we should wait for the application to
                         * reap events and wake us up.
                         */
-                       if (!list_empty(&ctx->poll_list) ||
+                       if (!list_empty(&ctx->poll_list) || need_resched() ||
                            (!time_after(jiffies, timeout) && ret != -EBUSY &&
                            !percpu_ref_is_dying(&ctx->refs))) {
                                if (current->task_works)
@@ -6021,9 +6082,9 @@ static int io_sq_thread(void *data)
                        }
 
                        /* Tell userspace we may need a wakeup call */
+                       spin_lock_irq(&ctx->completion_lock);
                        ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
-                       /* make sure to read SQ tail after writing flags */
-                       smp_mb();
+                       spin_unlock_irq(&ctx->completion_lock);
 
                        to_submit = io_sqring_entries(ctx);
                        if (!to_submit || ret == -EBUSY) {
@@ -6041,13 +6102,17 @@ static int io_sq_thread(void *data)
                                schedule();
                                finish_wait(&ctx->sqo_wait, &wait);
 
+                               spin_lock_irq(&ctx->completion_lock);
                                ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
+                               spin_unlock_irq(&ctx->completion_lock);
                                ret = 0;
                                continue;
                        }
                        finish_wait(&ctx->sqo_wait, &wait);
 
+                       spin_lock_irq(&ctx->completion_lock);
                        ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
+                       spin_unlock_irq(&ctx->completion_lock);
                }
 
                mutex_lock(&ctx->uring_lock);
@@ -6146,15 +6211,23 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
        do {
                prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
                                                TASK_INTERRUPTIBLE);
+               /* make sure we run task_work before checking for signals */
                if (current->task_works)
                        task_work_run();
-               if (io_should_wake(&iowq, false))
-                       break;
-               schedule();
                if (signal_pending(current)) {
+                       if (current->jobctl & JOBCTL_TASK_WORK) {
+                               spin_lock_irq(&current->sighand->siglock);
+                               current->jobctl &= ~JOBCTL_TASK_WORK;
+                               recalc_sigpending();
+                               spin_unlock_irq(&current->sighand->siglock);
+                               continue;
+                       }
                        ret = -EINTR;
                        break;
                }
+               if (io_should_wake(&iowq, false))
+                       break;
+               schedule();
        } while (1);
        finish_wait(&ctx->wait, &iowq.wq);
 
@@ -6626,6 +6699,7 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
                for (i = 0; i < nr_tables; i++)
                        kfree(ctx->file_data->table[i].files);
 
+               percpu_ref_exit(&ctx->file_data->refs);
                kfree(ctx->file_data->table);
                kfree(ctx->file_data);
                ctx->file_data = NULL;
@@ -6778,8 +6852,10 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
                        }
                        table->files[index] = file;
                        err = io_sqe_file_register(ctx, file, i);
-                       if (err)
+                       if (err) {
+                               fput(file);
                                break;
+                       }
                }
                nr_args--;
                done++;
@@ -7275,9 +7351,6 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
        io_mem_free(ctx->sq_sqes);
 
        percpu_ref_exit(&ctx->refs);
-       if (ctx->account_mem)
-               io_unaccount_mem(ctx->user,
-                               ring_pages(ctx->sq_entries, ctx->cq_entries));
        free_uid(ctx->user);
        put_cred(ctx->creds);
        kfree(ctx->cancel_hash);
@@ -7331,7 +7404,17 @@ static void io_ring_exit_work(struct work_struct *work)
        if (ctx->rings)
                io_cqring_overflow_flush(ctx, true);
 
-       wait_for_completion(&ctx->ref_comp);
+       /*
+        * If we're doing polled IO and end up having requests being
+        * submitted async (out-of-line), then completions can come in while
+        * we're waiting for refs to drop. We need to reap these manually,
+        * as nobody else will be looking for them.
+        */
+       while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20)) {
+               io_iopoll_reap_events(ctx);
+               if (ctx->rings)
+                       io_cqring_overflow_flush(ctx, true);
+       }
        io_ring_ctx_free(ctx);
 }
 
@@ -7352,6 +7435,16 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
        if (ctx->rings)
                io_cqring_overflow_flush(ctx, true);
        idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx);
+
+       /*
+        * Do this upfront, so we won't have a grace period where the ring
+        * is closed but resources aren't reaped yet. This can cause
+        * spurious failure in setting up a new ring.
+        */
+       if (ctx->account_mem)
+               io_unaccount_mem(ctx->user,
+                               ring_pages(ctx->sq_entries, ctx->cq_entries));
+
        INIT_WORK(&ctx->exit_work, io_ring_exit_work);
        queue_work(system_wq, &ctx->exit_work);
 }
@@ -7365,9 +7458,22 @@ static int io_uring_release(struct inode *inode, struct file *file)
        return 0;
 }
 
+static bool io_wq_files_match(struct io_wq_work *work, void *data)
+{
+       struct files_struct *files = data;
+
+       return work->files == files;
+}
+
 static void io_uring_cancel_files(struct io_ring_ctx *ctx,
                                  struct files_struct *files)
 {
+       if (list_empty_careful(&ctx->inflight_list))
+               return;
+
+       /* cancel all at once, should be faster than doing it one by one*/
+       io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true);
+
        while (!list_empty_careful(&ctx->inflight_list)) {
                struct io_kiocb *cancel_req = NULL, *req;
                DEFINE_WAIT(wait);
@@ -7398,6 +7504,7 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
                        if (list_empty(&ctx->cq_overflow_list)) {
                                clear_bit(0, &ctx->sq_check_overflow);
                                clear_bit(0, &ctx->cq_check_overflow);
+                               ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
                        }
                        spin_unlock_irq(&ctx->completion_lock);
 
@@ -7423,6 +7530,14 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
        }
 }
 
+static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
+{
+       struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+       struct task_struct *task = data;
+
+       return req->task == task;
+}
+
 static int io_uring_flush(struct file *file, void *data)
 {
        struct io_ring_ctx *ctx = file->private_data;
@@ -7433,7 +7548,7 @@ static int io_uring_flush(struct file *file, void *data)
         * If the task is going away, cancel work it may have pending
         */
        if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
-               io_wq_cancel_pid(ctx->io_wq, task_pid_vnr(current));
+               io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, current, true);
 
        return 0;
 }
index a49d0e6..e494443 100644 (file)
@@ -1140,6 +1140,7 @@ static journal_t *journal_init_common(struct block_device *bdev,
        init_waitqueue_head(&journal->j_wait_commit);
        init_waitqueue_head(&journal->j_wait_updates);
        init_waitqueue_head(&journal->j_wait_reserved);
+       mutex_init(&journal->j_abort_mutex);
        mutex_init(&journal->j_barrier);
        mutex_init(&journal->j_checkpoint_mutex);
        spin_lock_init(&journal->j_revoke_lock);
@@ -1402,7 +1403,8 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
                printk(KERN_ERR "JBD2: Error %d detected when updating "
                       "journal superblock for %s.\n", ret,
                       journal->j_devname);
-               jbd2_journal_abort(journal, ret);
+               if (!is_journal_aborted(journal))
+                       jbd2_journal_abort(journal, ret);
        }
 
        return ret;
@@ -2154,6 +2156,13 @@ void jbd2_journal_abort(journal_t *journal, int errno)
        transaction_t *transaction;
 
        /*
+        * Lock the aborting procedure until everything is done, this avoid
+        * races between filesystem's error handling flow (e.g. ext4_abort()),
+        * ensure panic after the error info is written into journal's
+        * superblock.
+        */
+       mutex_lock(&journal->j_abort_mutex);
+       /*
         * ESHUTDOWN always takes precedence because a file system check
         * caused by any other journal abort error is not required after
         * a shutdown triggered.
@@ -2167,6 +2176,7 @@ void jbd2_journal_abort(journal_t *journal, int errno)
                        journal->j_errno = errno;
                        jbd2_journal_update_sb_errno(journal);
                }
+               mutex_unlock(&journal->j_abort_mutex);
                return;
        }
 
@@ -2188,10 +2198,7 @@ void jbd2_journal_abort(journal_t *journal, int errno)
         * layer could realise that a filesystem check is needed.
         */
        jbd2_journal_update_sb_errno(journal);
-
-       write_lock(&journal->j_state_lock);
-       journal->j_flags |= JBD2_REC_ERR;
-       write_unlock(&journal->j_state_lock);
+       mutex_unlock(&journal->j_abort_mutex);
 }
 
 /**
index 0637271..8ff4d1a 100644 (file)
@@ -259,7 +259,7 @@ struct jffs2_full_dirent
        uint32_t ino; /* == zero for unlink */
        unsigned int nhash;
        unsigned char type;
-       unsigned char name[0];
+       unsigned char name[];
 };
 
 /*
index 60207a2..e4131cb 100644 (file)
@@ -61,7 +61,7 @@ struct jffs2_sum_dirent_flash
        jint32_t ino;           /* == zero for unlink */
        uint8_t nsize;          /* dirent name size */
        uint8_t type;           /* dirent type */
-       uint8_t name[0];        /* dirent name */
+       uint8_t name[]; /* dirent name */
 } __attribute__((packed));
 
 struct jffs2_sum_xattr_flash
@@ -117,7 +117,7 @@ struct jffs2_sum_dirent_mem
        jint32_t ino;           /* == zero for unlink */
        uint8_t nsize;          /* dirent name size */
        uint8_t type;           /* dirent type */
-       uint8_t name[0];        /* dirent name */
+       uint8_t name[]; /* dirent name */
 } __attribute__((packed));
 
 struct jffs2_sum_xattr_mem
index 1b79dd5..3d113cf 100644 (file)
@@ -267,8 +267,6 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq)
 {
        struct inode *inode = dreq->inode;
 
-       inode_dio_end(inode);
-
        if (dreq->iocb) {
                long res = (long) dreq->error;
                if (dreq->count != 0) {
@@ -280,7 +278,10 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq)
 
        complete(&dreq->completion);
 
+       igrab(inode);
        nfs_direct_req_release(dreq);
+       inode_dio_end(inode);
+       iput(inode);
 }
 
 static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
@@ -410,8 +411,10 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
         * generic layer handle the completion.
         */
        if (requested_bytes == 0) {
-               inode_dio_end(inode);
+               igrab(inode);
                nfs_direct_req_release(dreq);
+               inode_dio_end(inode);
+               iput(inode);
                return result < 0 ? result : -EIO;
        }
 
@@ -864,8 +867,10 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
         * generic layer handle the completion.
         */
        if (requested_bytes == 0) {
-               inode_dio_end(inode);
+               igrab(inode);
                nfs_direct_req_release(dreq);
+               inode_dio_end(inode);
+               iput(inode);
                return result < 0 ? result : -EIO;
        }
 
index f96367a..ccd6c16 100644 (file)
@@ -83,6 +83,7 @@ nfs_file_release(struct inode *inode, struct file *filp)
        dprintk("NFS: release(%pD2)\n", filp);
 
        nfs_inc_stats(inode, NFSIOS_VFSRELEASE);
+       inode_dio_wait(inode);
        nfs_file_clear_open_context(filp);
        return 0;
 }
index 7d399f7..de03e44 100644 (file)
@@ -907,9 +907,8 @@ retry:
                goto out_mds;
 
        /* Use a direct mapping of ds_idx to pgio mirror_idx */
-       if (WARN_ON_ONCE(pgio->pg_mirror_count !=
-           FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg)))
-               goto out_mds;
+       if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
+               goto out_eagain;
 
        for (i = 0; i < pgio->pg_mirror_count; i++) {
                mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
@@ -931,7 +930,10 @@ retry:
                        (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
                pgio->pg_maxretrans = io_maxretrans;
        return;
-
+out_eagain:
+       pnfs_generic_pg_cleanup(pgio);
+       pgio->pg_error = -EAGAIN;
+       return;
 out_mds:
        trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
                        0, NFS4_MAX_UINT64, IOMODE_RW,
@@ -941,6 +943,7 @@ out_mds:
        pgio->pg_lseg = NULL;
        pgio->pg_maxretrans = 0;
        nfs_pageio_reset_write_mds(pgio);
+       pgio->pg_error = -EAGAIN;
 }
 
 static unsigned int
index a3ab6e2..8733423 100644 (file)
@@ -308,6 +308,7 @@ static int try_location(struct fs_context *fc,
        if (IS_ERR(export_path))
                return PTR_ERR(export_path);
 
+       kfree(ctx->nfs_server.export_path);
        ctx->nfs_server.export_path = export_path;
 
        source = kmalloc(len + 1 + ctx->nfs_server.export_path_len + 1,
index bb3d2c3..cce2510 100644 (file)
@@ -7912,9 +7912,14 @@ nfs4_state_start_net(struct net *net)
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
        int ret;
 
-       ret = nfs4_state_create_net(net);
+       ret = get_nfsdfs(net);
        if (ret)
                return ret;
+       ret = nfs4_state_create_net(net);
+       if (ret) {
+               mntput(nn->nfsd_mnt);
+               return ret;
+       }
        locks_start_grace(net, &nn->nfsd4_manager);
        nfsd4_client_tracking_init(net);
        if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
@@ -7984,6 +7989,7 @@ nfs4_state_shutdown_net(struct net *net)
 
        nfsd4_client_tracking_exit(net);
        nfs4_state_destroy_net(net);
+       mntput(nn->nfsd_mnt);
 }
 
 void
index b68e966..cd05732 100644 (file)
@@ -1335,6 +1335,7 @@ void nfsd_client_rmdir(struct dentry *dentry)
        WARN_ON_ONCE(ret);
        fsnotify_rmdir(dir, dentry);
        d_delete(dentry);
+       dput(dentry);
        inode_unlock(dir);
 }
 
@@ -1424,6 +1425,18 @@ static struct file_system_type nfsd_fs_type = {
 };
 MODULE_ALIAS_FS("nfsd");
 
+int get_nfsdfs(struct net *net)
+{
+       struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+       struct vfsmount *mnt;
+
+       mnt =  vfs_kern_mount(&nfsd_fs_type, SB_KERNMOUNT, "nfsd", NULL);
+       if (IS_ERR(mnt))
+               return PTR_ERR(mnt);
+       nn->nfsd_mnt = mnt;
+       return 0;
+}
+
 #ifdef CONFIG_PROC_FS
 static int create_proc_exports_entry(void)
 {
@@ -1451,7 +1464,6 @@ unsigned int nfsd_net_id;
 static __net_init int nfsd_init_net(struct net *net)
 {
        int retval;
-       struct vfsmount *mnt;
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
        retval = nfsd_export_init(net);
@@ -1478,16 +1490,8 @@ static __net_init int nfsd_init_net(struct net *net)
        init_waitqueue_head(&nn->ntf_wq);
        seqlock_init(&nn->boot_lock);
 
-       mnt =  vfs_kern_mount(&nfsd_fs_type, SB_KERNMOUNT, "nfsd", NULL);
-       if (IS_ERR(mnt)) {
-               retval = PTR_ERR(mnt);
-               goto out_mount_err;
-       }
-       nn->nfsd_mnt = mnt;
        return 0;
 
-out_mount_err:
-       nfsd_reply_cache_shutdown(nn);
 out_drc_error:
        nfsd_idmap_shutdown(net);
 out_idmap_error:
@@ -1500,7 +1504,6 @@ static __net_exit void nfsd_exit_net(struct net *net)
 {
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
-       mntput(nn->nfsd_mnt);
        nfsd_reply_cache_shutdown(nn);
        nfsd_idmap_shutdown(net);
        nfsd_export_shutdown(net);
index 36cdd81..57c832d 100644 (file)
@@ -90,6 +90,8 @@ void          nfsd_destroy(struct net *net);
 
 bool           i_am_nfsd(void);
 
+int get_nfsdfs(struct net *);
+
 struct nfsdfs_client {
        struct kref cl_ref;
        void (*cl_release)(struct kref *kref);
@@ -100,6 +102,7 @@ struct dentry *nfsd_client_mkdir(struct nfsd_net *nn,
                struct nfsdfs_client *ncl, u32 id, const struct tree_descr *);
 void nfsd_client_rmdir(struct dentry *dentry);
 
+
 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
 #ifdef CONFIG_NFSD_V2_ACL
 extern const struct svc_version nfsd_acl_version2;
index c3fbab1..d22a056 100644 (file)
@@ -1226,6 +1226,9 @@ nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
                iap->ia_mode = 0;
        iap->ia_mode = (iap->ia_mode & S_IALLUGO) | type;
 
+       if (!IS_POSIXACL(dirp))
+               iap->ia_mode &= ~current_umask();
+
        err = 0;
        host_err = 0;
        switch (type) {
@@ -1458,6 +1461,9 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
                goto out;
        }
 
+       if (!IS_POSIXACL(dirp))
+               iap->ia_mode &= ~current_umask();
+
        host_err = vfs_create(dirp, dchild, iap->ia_mode, true);
        if (host_err < 0) {
                fh_drop_write(fhp);
index 152a0fc..751bc4d 100644 (file)
@@ -689,6 +689,12 @@ static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res,
                                   &ocfs2_nfs_sync_lops, osb);
 }
 
+static void ocfs2_nfs_sync_lock_init(struct ocfs2_super *osb)
+{
+       ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
+       init_rwsem(&osb->nfs_sync_rwlock);
+}
+
 void ocfs2_trim_fs_lock_res_init(struct ocfs2_super *osb)
 {
        struct ocfs2_lock_res *lockres = &osb->osb_trim_fs_lockres;
@@ -2855,6 +2861,11 @@ int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex)
        if (ocfs2_is_hard_readonly(osb))
                return -EROFS;
 
+       if (ex)
+               down_write(&osb->nfs_sync_rwlock);
+       else
+               down_read(&osb->nfs_sync_rwlock);
+
        if (ocfs2_mount_local(osb))
                return 0;
 
@@ -2873,6 +2884,10 @@ void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex)
        if (!ocfs2_mount_local(osb))
                ocfs2_cluster_unlock(osb, lockres,
                                     ex ? LKM_EXMODE : LKM_PRMODE);
+       if (ex)
+               up_write(&osb->nfs_sync_rwlock);
+       else
+               up_read(&osb->nfs_sync_rwlock);
 }
 
 int ocfs2_trim_fs_lock(struct ocfs2_super *osb,
@@ -3340,7 +3355,7 @@ int ocfs2_dlm_init(struct ocfs2_super *osb)
 local:
        ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
        ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
-       ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
+       ocfs2_nfs_sync_lock_init(osb);
        ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb);
 
        osb->cconn = conn;
index ee5d985..2dd71d6 100644 (file)
@@ -395,6 +395,7 @@ struct ocfs2_super
        struct ocfs2_lock_res osb_super_lockres;
        struct ocfs2_lock_res osb_rename_lockres;
        struct ocfs2_lock_res osb_nfs_sync_lockres;
+       struct rw_semaphore nfs_sync_rwlock;
        struct ocfs2_lock_res osb_trim_fs_lockres;
        struct mutex obs_trim_fs_mutex;
        struct ocfs2_dlm_debug *osb_dlm_debug;
index 0dd8c41..19137c6 100644 (file)
 #define OCFS2_MAX_SLOTS                        255
 
 /* Slot map indicator for an empty slot */
-#define OCFS2_INVALID_SLOT             -1
+#define OCFS2_INVALID_SLOT             ((u16)-1)
 
 #define OCFS2_VOL_UUID_LEN             16
 #define OCFS2_MAX_VOL_LABEL_LEN                64
@@ -326,8 +326,8 @@ struct ocfs2_system_inode_info {
 enum {
        BAD_BLOCK_SYSTEM_INODE = 0,
        GLOBAL_INODE_ALLOC_SYSTEM_INODE,
+#define OCFS2_FIRST_ONLINE_SYSTEM_INODE GLOBAL_INODE_ALLOC_SYSTEM_INODE
        SLOT_MAP_SYSTEM_INODE,
-#define OCFS2_FIRST_ONLINE_SYSTEM_INODE SLOT_MAP_SYSTEM_INODE
        HEARTBEAT_SYSTEM_INODE,
        GLOBAL_BITMAP_SYSTEM_INODE,
        USER_QUOTA_SYSTEM_INODE,
index 4836bec..45745cc 100644 (file)
@@ -2825,9 +2825,12 @@ int ocfs2_test_inode_bit(struct ocfs2_super *osb, u64 blkno, int *res)
                goto bail;
        }
 
-       inode_alloc_inode =
-               ocfs2_get_system_file_inode(osb, INODE_ALLOC_SYSTEM_INODE,
-                                           suballoc_slot);
+       if (suballoc_slot == (u16)OCFS2_INVALID_SLOT)
+               inode_alloc_inode = ocfs2_get_system_file_inode(osb,
+                       GLOBAL_INODE_ALLOC_SYSTEM_INODE, suballoc_slot);
+       else
+               inode_alloc_inode = ocfs2_get_system_file_inode(osb,
+                       INODE_ALLOC_SYSTEM_INODE, suballoc_slot);
        if (!inode_alloc_inode) {
                /* the error code could be inaccurate, but we are not able to
                 * get the correct one. */
index 9955d75..ad31ec4 100644 (file)
@@ -26,8 +26,9 @@ static int boot_config_proc_show(struct seq_file *m, void *v)
 static int __init copy_xbc_key_value_list(char *dst, size_t size)
 {
        struct xbc_node *leaf, *vnode;
-       const char *val;
        char *key, *end = dst + size;
+       const char *val;
+       char q;
        int ret = 0;
 
        key = kzalloc(XBC_KEYLEN_MAX, GFP_KERNEL);
@@ -41,16 +42,20 @@ static int __init copy_xbc_key_value_list(char *dst, size_t size)
                        break;
                dst += ret;
                vnode = xbc_node_get_child(leaf);
-               if (vnode && xbc_node_is_array(vnode)) {
+               if (vnode) {
                        xbc_array_for_each_value(vnode, val) {
-                               ret = snprintf(dst, rest(dst, end), "\"%s\"%s",
-                                       val, vnode->next ? ", " : "\n");
+                               if (strchr(val, '"'))
+                                       q = '\'';
+                               else
+                                       q = '"';
+                               ret = snprintf(dst, rest(dst, end), "%c%s%c%s",
+                                       q, val, q, vnode->next ? ", " : "\n");
                                if (ret < 0)
                                        goto out;
                                dst += ret;
                        }
                } else {
-                       ret = snprintf(dst, rest(dst, end), "\"%s\"\n", val);
+                       ret = snprintf(dst, rest(dst, end), "\"\"\n");
                        if (ret < 0)
                                break;
                        dst += ret;
index 8ba492d..e502414 100644 (file)
@@ -512,7 +512,8 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
                                 * Using bounce buffer to bypass the
                                 * hardened user copy kernel text checks.
                                 */
-                               if (probe_kernel_read(buf, (void *) start, tsz)) {
+                               if (copy_from_kernel_nofault(buf, (void *)start,
+                                               tsz)) {
                                        if (clear_user(buffer, tsz)) {
                                                ret = -EFAULT;
                                                goto out;
index 42c5128..6c1166c 100644 (file)
@@ -566,8 +566,9 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *ubuf,
                goto out;
 
        /* don't even try if the size is too large */
-       if (count > KMALLOC_MAX_SIZE)
-               return -ENOMEM;
+       error = -ENOMEM;
+       if (count >= KMALLOC_MAX_SIZE)
+               goto out;
 
        if (write) {
                kbuf = memdup_user_nul(ubuf, count);
@@ -576,7 +577,6 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *ubuf,
                        goto out;
                }
        } else {
-               error = -ENOMEM;
                kbuf = kzalloc(count, GFP_KERNEL);
                if (!kbuf)
                        goto out;
index bbfa9b1..4fb7978 100644 (file)
@@ -419,28 +419,42 @@ static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, lo
        return ret;
 }
 
-ssize_t __vfs_read(struct file *file, char __user *buf, size_t count,
-                  loff_t *pos)
+ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos)
 {
+       mm_segment_t old_fs = get_fs();
+       ssize_t ret;
+
+       if (WARN_ON_ONCE(!(file->f_mode & FMODE_READ)))
+               return -EINVAL;
+       if (!(file->f_mode & FMODE_CAN_READ))
+               return -EINVAL;
+
+       if (count > MAX_RW_COUNT)
+               count =  MAX_RW_COUNT;
+       set_fs(KERNEL_DS);
        if (file->f_op->read)
-               return file->f_op->read(file, buf, count, pos);
+               ret = file->f_op->read(file, (void __user *)buf, count, pos);
        else if (file->f_op->read_iter)
-               return new_sync_read(file, buf, count, pos);
+               ret = new_sync_read(file, (void __user *)buf, count, pos);
        else
-               return -EINVAL;
+               ret = -EINVAL;
+       set_fs(old_fs);
+       if (ret > 0) {
+               fsnotify_access(file);
+               add_rchar(current, ret);
+       }
+       inc_syscr(current);
+       return ret;
 }
 
 ssize_t kernel_read(struct file *file, void *buf, size_t count, loff_t *pos)
 {
-       mm_segment_t old_fs;
-       ssize_t result;
+       ssize_t ret;
 
-       old_fs = get_fs();
-       set_fs(KERNEL_DS);
-       /* The cast to a user pointer is valid due to the set_fs() */
-       result = vfs_read(file, (void __user *)buf, count, pos);
-       set_fs(old_fs);
-       return result;
+       ret = rw_verify_area(READ, file, pos, count);
+       if (ret)
+               return ret;
+       return __kernel_read(file, buf, count, pos);
 }
 EXPORT_SYMBOL(kernel_read);
 
@@ -456,17 +470,22 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
                return -EFAULT;
 
        ret = rw_verify_area(READ, file, pos, count);
-       if (!ret) {
-               if (count > MAX_RW_COUNT)
-                       count =  MAX_RW_COUNT;
-               ret = __vfs_read(file, buf, count, pos);
-               if (ret > 0) {
-                       fsnotify_access(file);
-                       add_rchar(current, ret);
-               }
-               inc_syscr(current);
-       }
+       if (ret)
+               return ret;
+       if (count > MAX_RW_COUNT)
+               count =  MAX_RW_COUNT;
 
+       if (file->f_op->read)
+               ret = file->f_op->read(file, buf, count, pos);
+       else if (file->f_op->read_iter)
+               ret = new_sync_read(file, buf, count, pos);
+       else
+               ret = -EINVAL;
+       if (ret > 0) {
+               fsnotify_access(file);
+               add_rchar(current, ret);
+       }
+       inc_syscr(current);
        return ret;
 }
 
@@ -488,23 +507,15 @@ static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t
        return ret;
 }
 
-static ssize_t __vfs_write(struct file *file, const char __user *p,
-                          size_t count, loff_t *pos)
-{
-       if (file->f_op->write)
-               return file->f_op->write(file, p, count, pos);
-       else if (file->f_op->write_iter)
-               return new_sync_write(file, p, count, pos);
-       else
-               return -EINVAL;
-}
-
+/* caller is responsible for file_start_write/file_end_write */
 ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t *pos)
 {
        mm_segment_t old_fs;
        const char __user *p;
        ssize_t ret;
 
+       if (WARN_ON_ONCE(!(file->f_mode & FMODE_WRITE)))
+               return -EBADF;
        if (!(file->f_mode & FMODE_CAN_WRITE))
                return -EINVAL;
 
@@ -513,7 +524,12 @@ ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t
        p = (__force const char __user *)buf;
        if (count > MAX_RW_COUNT)
                count =  MAX_RW_COUNT;
-       ret = __vfs_write(file, p, count, pos);
+       if (file->f_op->write)
+               ret = file->f_op->write(file, p, count, pos);
+       else if (file->f_op->write_iter)
+               ret = new_sync_write(file, p, count, pos);
+       else
+               ret = -EINVAL;
        set_fs(old_fs);
        if (ret > 0) {
                fsnotify_modify(file);
@@ -522,21 +538,20 @@ ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t
        inc_syscw(current);
        return ret;
 }
-EXPORT_SYMBOL(__kernel_write);
 
 ssize_t kernel_write(struct file *file, const void *buf, size_t count,
                            loff_t *pos)
 {
-       mm_segment_t old_fs;
-       ssize_t res;
+       ssize_t ret;
 
-       old_fs = get_fs();
-       set_fs(KERNEL_DS);
-       /* The cast to a user pointer is valid due to the set_fs() */
-       res = vfs_write(file, (__force const char __user *)buf, count, pos);
-       set_fs(old_fs);
+       ret = rw_verify_area(WRITE, file, pos, count);
+       if (ret)
+               return ret;
 
-       return res;
+       file_start_write(file);
+       ret =  __kernel_write(file, buf, count, pos);
+       file_end_write(file);
+       return ret;
 }
 EXPORT_SYMBOL(kernel_write);
 
@@ -552,19 +567,23 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
                return -EFAULT;
 
        ret = rw_verify_area(WRITE, file, pos, count);
-       if (!ret) {
-               if (count > MAX_RW_COUNT)
-                       count =  MAX_RW_COUNT;
-               file_start_write(file);
-               ret = __vfs_write(file, buf, count, pos);
-               if (ret > 0) {
-                       fsnotify_modify(file);
-                       add_wchar(current, ret);
-               }
-               inc_syscw(current);
-               file_end_write(file);
+       if (ret)
+               return ret;
+       if (count > MAX_RW_COUNT)
+               count =  MAX_RW_COUNT;
+       file_start_write(file);
+       if (file->f_op->write)
+               ret = file->f_op->write(file, buf, count, pos);
+       else if (file->f_op->write_iter)
+               ret = new_sync_write(file, buf, count, pos);
+       else
+               ret = -EINVAL;
+       if (ret > 0) {
+               fsnotify_modify(file);
+               add_wchar(current, ret);
        }
-
+       inc_syscw(current);
+       file_end_write(file);
        return ret;
 }
 
index 7187bd1..8d64edb 100644 (file)
@@ -262,7 +262,7 @@ struct squashfs_dir_index {
        __le32                  index;
        __le32                  start_block;
        __le32                  size;
-       unsigned char           name[0];
+       unsigned char           name[];
 };
 
 struct squashfs_base_inode {
@@ -327,7 +327,7 @@ struct squashfs_symlink_inode {
        __le32                  inode_number;
        __le32                  nlink;
        __le32                  symlink_size;
-       char                    symlink[0];
+       char                    symlink[];
 };
 
 struct squashfs_reg_inode {
@@ -341,7 +341,7 @@ struct squashfs_reg_inode {
        __le32                  fragment;
        __le32                  offset;
        __le32                  file_size;
-       __le16                  block_list[0];
+       __le16                  block_list[];
 };
 
 struct squashfs_lreg_inode {
@@ -358,7 +358,7 @@ struct squashfs_lreg_inode {
        __le32                  fragment;
        __le32                  offset;
        __le32                  xattr;
-       __le16                  block_list[0];
+       __le16                  block_list[];
 };
 
 struct squashfs_dir_inode {
@@ -389,7 +389,7 @@ struct squashfs_ldir_inode {
        __le16                  i_count;
        __le16                  offset;
        __le32                  xattr;
-       struct squashfs_dir_index       index[0];
+       struct squashfs_dir_index       index[];
 };
 
 union squashfs_inode {
@@ -410,7 +410,7 @@ struct squashfs_dir_entry {
        __le16                  inode_number;
        __le16                  type;
        __le16                  size;
-       char                    name[0];
+       char                    name[];
 };
 
 struct squashfs_dir_header {
@@ -428,12 +428,12 @@ struct squashfs_fragment_entry {
 struct squashfs_xattr_entry {
        __le16                  type;
        __le16                  size;
-       char                    data[0];
+       char                    data[];
 };
 
 struct squashfs_xattr_val {
        __le32                  vsize;
-       char                    value[0];
+       char                    value[];
 };
 
 struct squashfs_xattr_id {
index b43f0e8..9ed9036 100644 (file)
@@ -671,7 +671,8 @@ xlog_cil_push_work(
        /*
         * Wake up any background push waiters now this context is being pushed.
         */
-       wake_up_all(&ctx->push_wait);
+       if (ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log))
+               wake_up_all(&cil->xc_push_wait);
 
        /*
         * Check if we've anything to push. If there is nothing, then we don't
@@ -743,13 +744,12 @@ xlog_cil_push_work(
 
        /*
         * initialise the new context and attach it to the CIL. Then attach
-        * the current context to the CIL committing lsit so it can be found
+        * the current context to the CIL committing list so it can be found
         * during log forces to extract the commit lsn of the sequence that
         * needs to be forced.
         */
        INIT_LIST_HEAD(&new_ctx->committing);
        INIT_LIST_HEAD(&new_ctx->busy_extents);
-       init_waitqueue_head(&new_ctx->push_wait);
        new_ctx->sequence = ctx->sequence + 1;
        new_ctx->cil = cil;
        cil->xc_ctx = new_ctx;
@@ -937,7 +937,7 @@ xlog_cil_push_background(
        if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log)) {
                trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
                ASSERT(cil->xc_ctx->space_used < log->l_logsize);
-               xlog_wait(&cil->xc_ctx->push_wait, &cil->xc_push_lock);
+               xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
                return;
        }
 
@@ -1216,12 +1216,12 @@ xlog_cil_init(
        INIT_LIST_HEAD(&cil->xc_committing);
        spin_lock_init(&cil->xc_cil_lock);
        spin_lock_init(&cil->xc_push_lock);
+       init_waitqueue_head(&cil->xc_push_wait);
        init_rwsem(&cil->xc_ctx_lock);
        init_waitqueue_head(&cil->xc_commit_wait);
 
        INIT_LIST_HEAD(&ctx->committing);
        INIT_LIST_HEAD(&ctx->busy_extents);
-       init_waitqueue_head(&ctx->push_wait);
        ctx->sequence = 1;
        ctx->cil = cil;
        cil->xc_ctx = ctx;
index ec22c7a..75a6287 100644 (file)
@@ -240,7 +240,6 @@ struct xfs_cil_ctx {
        struct xfs_log_vec      *lv_chain;      /* logvecs being pushed */
        struct list_head        iclog_entry;
        struct list_head        committing;     /* ctx committing list */
-       wait_queue_head_t       push_wait;      /* background push throttle */
        struct work_struct      discard_endio_work;
 };
 
@@ -274,6 +273,7 @@ struct xfs_cil {
        wait_queue_head_t       xc_commit_wait;
        xfs_lsn_t               xc_current_sequence;
        struct work_struct      xc_push_work;
+       wait_queue_head_t       xc_push_wait;   /* background push throttle */
 } ____cacheline_aligned_in_smp;
 
 /*
index 907fa5d..4a674db 100644 (file)
@@ -2,6 +2,11 @@
 #ifndef _ASM_GENERIC_CACHEFLUSH_H
 #define _ASM_GENERIC_CACHEFLUSH_H
 
+struct mm_struct;
+struct vm_area_struct;
+struct page;
+struct address_space;
+
 /*
  * The cache doesn't need to be flushed when TLB entries change when
  * the cache is mapped to physical memory, not virtual memory
index 5a80f8e..cd8b75a 100644 (file)
@@ -23,11 +23,9 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum);
  * here even more important to align src and dst on a 32-bit (or even
  * better 64-bit) boundary
  */
-extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum);
-
 #ifndef csum_partial_copy_nocheck
-#define csum_partial_copy_nocheck(src, dst, len, sum)  \
-       csum_partial_copy((src), (dst), (len), (sum))
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len,
+               __wsum sum);
 #endif
 
 #ifndef ip_fast_csum
index 40f85de..8e1e624 100644 (file)
@@ -122,7 +122,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
 #ifndef __HAVE_ARCH_HUGE_PTEP_GET
 static inline pte_t huge_ptep_get(pte_t *ptep)
 {
-       return READ_ONCE(*ptep);
+       return ptep_get(ptep);
 }
 #endif
 
index db600ef..0be2ee2 100644 (file)
                __start_BTF = .;                                        \
                *(.BTF)                                                 \
                __stop_BTF = .;                                         \
+       }                                                               \
+       . = ALIGN(4);                                                   \
+       .BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) {                   \
+               *(.BTF_ids)                                             \
        }
 #else
 #define BTF
index 56527c8..088c1de 100644 (file)
@@ -29,8 +29,8 @@ struct alg_sock {
 
        struct sock *parent;
 
-       unsigned int refcnt;
-       unsigned int nokey_refcnt;
+       atomic_t refcnt;
+       atomic_t nokey_refcnt;
 
        const struct af_alg_type *type;
        void *private;
index 27bdd27..77941ef 100644 (file)
@@ -89,7 +89,7 @@ struct displayid_detailed_timings_1 {
 
 struct displayid_detailed_timing_block {
        struct displayid_block base;
-       struct displayid_detailed_timings_1 timings[0];
+       struct displayid_detailed_timings_1 timings[];
 };
 
 #define for_each_displayid_db(displayid, block, idx, length) \
index 9e9ccb2..38afb34 100644 (file)
@@ -27,7 +27,7 @@ struct encrypted_key_payload {
        unsigned short payload_datalen;         /* payload data length */
        unsigned short encrypted_key_format;    /* encrypted key format */
        u8 *decrypted_data;     /* decrypted data */
-       u8 payload_data[0];     /* payload data + datablob + hmac */
+       u8 payload_data[];      /* payload data + datablob + hmac */
 };
 
 extern struct key_type key_type_encrypted;
index a183278..2b0b15a 100644 (file)
@@ -28,7 +28,7 @@ struct rxkad_key {
        u8      primary_flag;           /* T if key for primary cell for this user */
        u16     ticket_len;             /* length of ticket[] */
        u8      session_key[8];         /* DES session key */
-       u8      ticket[0];              /* the encrypted ticket */
+       u8      ticket[];               /* the encrypted ticket */
 };
 
 /*
@@ -100,7 +100,7 @@ struct rxrpc_key_data_v1 {
        u32             expiry;                 /* time_t */
        u32             kvno;
        u8              session_key[8];
-       u8              ticket[0];
+       u8              ticket[];
 };
 
 /*
index 8124815..5d5ff22 100644 (file)
@@ -176,11 +176,6 @@ struct atm_dev {
 #define ATM_OF_IMMED  1                /* Attempt immediate delivery */
 #define ATM_OF_INRATE 2                /* Attempt in-rate delivery */
 
-
-/*
- * ioctl, getsockopt, and setsockopt are optional and can be set to NULL.
- */
-
 struct atmdev_ops { /* only send is required */
        void (*dev_close)(struct atm_dev *dev);
        int (*open)(struct atm_vcc *vcc);
@@ -190,10 +185,6 @@ struct atmdev_ops { /* only send is required */
        int (*compat_ioctl)(struct atm_dev *dev,unsigned int cmd,
                            void __user *arg);
 #endif
-       int (*getsockopt)(struct atm_vcc *vcc,int level,int optname,
-           void __user *optval,int optlen);
-       int (*setsockopt)(struct atm_vcc *vcc,int level,int optname,
-           void __user *optval,unsigned int optlen);
        int (*send)(struct atm_vcc *vcc,struct sk_buff *skb);
        int (*send_oam)(struct atm_vcc *vcc,void *cell,int flags);
        void (*phy_put)(struct atm_dev *dev,unsigned char value,
index 2c4927b..fd525c7 100644 (file)
@@ -77,6 +77,9 @@
 
 #endif /* cmpxchg64_relaxed */
 
+#define arch_atomic_read atomic_read
+#define arch_atomic_read_acquire atomic_read_acquire
+
 #ifndef atomic_read_acquire
 static __always_inline int
 atomic_read_acquire(const atomic_t *v)
@@ -86,6 +89,9 @@ atomic_read_acquire(const atomic_t *v)
 #define atomic_read_acquire atomic_read_acquire
 #endif
 
+#define arch_atomic_set atomic_set
+#define arch_atomic_set_release atomic_set_release
+
 #ifndef atomic_set_release
 static __always_inline void
 atomic_set_release(atomic_t *v, int i)
@@ -95,6 +101,13 @@ atomic_set_release(atomic_t *v, int i)
 #define atomic_set_release atomic_set_release
 #endif
 
+#define arch_atomic_add atomic_add
+
+#define arch_atomic_add_return atomic_add_return
+#define arch_atomic_add_return_acquire atomic_add_return_acquire
+#define arch_atomic_add_return_release atomic_add_return_release
+#define arch_atomic_add_return_relaxed atomic_add_return_relaxed
+
 #ifndef atomic_add_return_relaxed
 #define atomic_add_return_acquire atomic_add_return
 #define atomic_add_return_release atomic_add_return
@@ -137,6 +150,11 @@ atomic_add_return(int i, atomic_t *v)
 
 #endif /* atomic_add_return_relaxed */
 
+#define arch_atomic_fetch_add atomic_fetch_add
+#define arch_atomic_fetch_add_acquire atomic_fetch_add_acquire
+#define arch_atomic_fetch_add_release atomic_fetch_add_release
+#define arch_atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+
 #ifndef atomic_fetch_add_relaxed
 #define atomic_fetch_add_acquire atomic_fetch_add
 #define atomic_fetch_add_release atomic_fetch_add
@@ -179,6 +197,13 @@ atomic_fetch_add(int i, atomic_t *v)
 
 #endif /* atomic_fetch_add_relaxed */
 
+#define arch_atomic_sub atomic_sub
+
+#define arch_atomic_sub_return atomic_sub_return
+#define arch_atomic_sub_return_acquire atomic_sub_return_acquire
+#define arch_atomic_sub_return_release atomic_sub_return_release
+#define arch_atomic_sub_return_relaxed atomic_sub_return_relaxed
+
 #ifndef atomic_sub_return_relaxed
 #define atomic_sub_return_acquire atomic_sub_return
 #define atomic_sub_return_release atomic_sub_return
@@ -221,6 +246,11 @@ atomic_sub_return(int i, atomic_t *v)
 
 #endif /* atomic_sub_return_relaxed */
 
+#define arch_atomic_fetch_sub atomic_fetch_sub
+#define arch_atomic_fetch_sub_acquire atomic_fetch_sub_acquire
+#define arch_atomic_fetch_sub_release atomic_fetch_sub_release
+#define arch_atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+
 #ifndef atomic_fetch_sub_relaxed
 #define atomic_fetch_sub_acquire atomic_fetch_sub
 #define atomic_fetch_sub_release atomic_fetch_sub
@@ -263,6 +293,8 @@ atomic_fetch_sub(int i, atomic_t *v)
 
 #endif /* atomic_fetch_sub_relaxed */
 
+#define arch_atomic_inc atomic_inc
+
 #ifndef atomic_inc
 static __always_inline void
 atomic_inc(atomic_t *v)
@@ -272,6 +304,11 @@ atomic_inc(atomic_t *v)
 #define atomic_inc atomic_inc
 #endif
 
+#define arch_atomic_inc_return atomic_inc_return
+#define arch_atomic_inc_return_acquire atomic_inc_return_acquire
+#define arch_atomic_inc_return_release atomic_inc_return_release
+#define arch_atomic_inc_return_relaxed atomic_inc_return_relaxed
+
 #ifndef atomic_inc_return_relaxed
 #ifdef atomic_inc_return
 #define atomic_inc_return_acquire atomic_inc_return
@@ -353,6 +390,11 @@ atomic_inc_return(atomic_t *v)
 
 #endif /* atomic_inc_return_relaxed */
 
+#define arch_atomic_fetch_inc atomic_fetch_inc
+#define arch_atomic_fetch_inc_acquire atomic_fetch_inc_acquire
+#define arch_atomic_fetch_inc_release atomic_fetch_inc_release
+#define arch_atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
+
 #ifndef atomic_fetch_inc_relaxed
 #ifdef atomic_fetch_inc
 #define atomic_fetch_inc_acquire atomic_fetch_inc
@@ -434,6 +476,8 @@ atomic_fetch_inc(atomic_t *v)
 
 #endif /* atomic_fetch_inc_relaxed */
 
+#define arch_atomic_dec atomic_dec
+
 #ifndef atomic_dec
 static __always_inline void
 atomic_dec(atomic_t *v)
@@ -443,6 +487,11 @@ atomic_dec(atomic_t *v)
 #define atomic_dec atomic_dec
 #endif
 
+#define arch_atomic_dec_return atomic_dec_return
+#define arch_atomic_dec_return_acquire atomic_dec_return_acquire
+#define arch_atomic_dec_return_release atomic_dec_return_release
+#define arch_atomic_dec_return_relaxed atomic_dec_return_relaxed
+
 #ifndef atomic_dec_return_relaxed
 #ifdef atomic_dec_return
 #define atomic_dec_return_acquire atomic_dec_return
@@ -524,6 +573,11 @@ atomic_dec_return(atomic_t *v)
 
 #endif /* atomic_dec_return_relaxed */
 
+#define arch_atomic_fetch_dec atomic_fetch_dec
+#define arch_atomic_fetch_dec_acquire atomic_fetch_dec_acquire
+#define arch_atomic_fetch_dec_release atomic_fetch_dec_release
+#define arch_atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
+
 #ifndef atomic_fetch_dec_relaxed
 #ifdef atomic_fetch_dec
 #define atomic_fetch_dec_acquire atomic_fetch_dec
@@ -605,6 +659,13 @@ atomic_fetch_dec(atomic_t *v)
 
 #endif /* atomic_fetch_dec_relaxed */
 
+#define arch_atomic_and atomic_and
+
+#define arch_atomic_fetch_and atomic_fetch_and
+#define arch_atomic_fetch_and_acquire atomic_fetch_and_acquire
+#define arch_atomic_fetch_and_release atomic_fetch_and_release
+#define arch_atomic_fetch_and_relaxed atomic_fetch_and_relaxed
+
 #ifndef atomic_fetch_and_relaxed
 #define atomic_fetch_and_acquire atomic_fetch_and
 #define atomic_fetch_and_release atomic_fetch_and
@@ -647,6 +708,8 @@ atomic_fetch_and(int i, atomic_t *v)
 
 #endif /* atomic_fetch_and_relaxed */
 
+#define arch_atomic_andnot atomic_andnot
+
 #ifndef atomic_andnot
 static __always_inline void
 atomic_andnot(int i, atomic_t *v)
@@ -656,6 +719,11 @@ atomic_andnot(int i, atomic_t *v)
 #define atomic_andnot atomic_andnot
 #endif
 
+#define arch_atomic_fetch_andnot atomic_fetch_andnot
+#define arch_atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
+#define arch_atomic_fetch_andnot_release atomic_fetch_andnot_release
+#define arch_atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
+
 #ifndef atomic_fetch_andnot_relaxed
 #ifdef atomic_fetch_andnot
 #define atomic_fetch_andnot_acquire atomic_fetch_andnot
@@ -737,6 +805,13 @@ atomic_fetch_andnot(int i, atomic_t *v)
 
 #endif /* atomic_fetch_andnot_relaxed */
 
+#define arch_atomic_or atomic_or
+
+#define arch_atomic_fetch_or atomic_fetch_or
+#define arch_atomic_fetch_or_acquire atomic_fetch_or_acquire
+#define arch_atomic_fetch_or_release atomic_fetch_or_release
+#define arch_atomic_fetch_or_relaxed atomic_fetch_or_relaxed
+
 #ifndef atomic_fetch_or_relaxed
 #define atomic_fetch_or_acquire atomic_fetch_or
 #define atomic_fetch_or_release atomic_fetch_or
@@ -779,6 +854,13 @@ atomic_fetch_or(int i, atomic_t *v)
 
 #endif /* atomic_fetch_or_relaxed */
 
+#define arch_atomic_xor atomic_xor
+
+#define arch_atomic_fetch_xor atomic_fetch_xor
+#define arch_atomic_fetch_xor_acquire atomic_fetch_xor_acquire
+#define arch_atomic_fetch_xor_release atomic_fetch_xor_release
+#define arch_atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+
 #ifndef atomic_fetch_xor_relaxed
 #define atomic_fetch_xor_acquire atomic_fetch_xor
 #define atomic_fetch_xor_release atomic_fetch_xor
@@ -821,6 +903,11 @@ atomic_fetch_xor(int i, atomic_t *v)
 
 #endif /* atomic_fetch_xor_relaxed */
 
+#define arch_atomic_xchg atomic_xchg
+#define arch_atomic_xchg_acquire atomic_xchg_acquire
+#define arch_atomic_xchg_release atomic_xchg_release
+#define arch_atomic_xchg_relaxed atomic_xchg_relaxed
+
 #ifndef atomic_xchg_relaxed
 #define atomic_xchg_acquire atomic_xchg
 #define atomic_xchg_release atomic_xchg
@@ -863,6 +950,11 @@ atomic_xchg(atomic_t *v, int i)
 
 #endif /* atomic_xchg_relaxed */
 
+#define arch_atomic_cmpxchg atomic_cmpxchg
+#define arch_atomic_cmpxchg_acquire atomic_cmpxchg_acquire
+#define arch_atomic_cmpxchg_release atomic_cmpxchg_release
+#define arch_atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
+
 #ifndef atomic_cmpxchg_relaxed
 #define atomic_cmpxchg_acquire atomic_cmpxchg
 #define atomic_cmpxchg_release atomic_cmpxchg
@@ -905,6 +997,11 @@ atomic_cmpxchg(atomic_t *v, int old, int new)
 
 #endif /* atomic_cmpxchg_relaxed */
 
+#define arch_atomic_try_cmpxchg atomic_try_cmpxchg
+#define arch_atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
+#define arch_atomic_try_cmpxchg_release atomic_try_cmpxchg_release
+#define arch_atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
+
 #ifndef atomic_try_cmpxchg_relaxed
 #ifdef atomic_try_cmpxchg
 #define atomic_try_cmpxchg_acquire atomic_try_cmpxchg
@@ -1002,6 +1099,8 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new)
 
 #endif /* atomic_try_cmpxchg_relaxed */
 
+#define arch_atomic_sub_and_test atomic_sub_and_test
+
 #ifndef atomic_sub_and_test
 /**
  * atomic_sub_and_test - subtract value from variable and test result
@@ -1020,6 +1119,8 @@ atomic_sub_and_test(int i, atomic_t *v)
 #define atomic_sub_and_test atomic_sub_and_test
 #endif
 
+#define arch_atomic_dec_and_test atomic_dec_and_test
+
 #ifndef atomic_dec_and_test
 /**
  * atomic_dec_and_test - decrement and test
@@ -1037,6 +1138,8 @@ atomic_dec_and_test(atomic_t *v)
 #define atomic_dec_and_test atomic_dec_and_test
 #endif
 
+#define arch_atomic_inc_and_test atomic_inc_and_test
+
 #ifndef atomic_inc_and_test
 /**
  * atomic_inc_and_test - increment and test
@@ -1054,6 +1157,8 @@ atomic_inc_and_test(atomic_t *v)
 #define atomic_inc_and_test atomic_inc_and_test
 #endif
 
+#define arch_atomic_add_negative atomic_add_negative
+
 #ifndef atomic_add_negative
 /**
  * atomic_add_negative - add and test if negative
@@ -1072,6 +1177,8 @@ atomic_add_negative(int i, atomic_t *v)
 #define atomic_add_negative atomic_add_negative
 #endif
 
+#define arch_atomic_fetch_add_unless atomic_fetch_add_unless
+
 #ifndef atomic_fetch_add_unless
 /**
  * atomic_fetch_add_unless - add unless the number is already a given value
@@ -1097,6 +1204,8 @@ atomic_fetch_add_unless(atomic_t *v, int a, int u)
 #define atomic_fetch_add_unless atomic_fetch_add_unless
 #endif
 
+#define arch_atomic_add_unless atomic_add_unless
+
 #ifndef atomic_add_unless
 /**
  * atomic_add_unless - add unless the number is already a given value
@@ -1115,6 +1224,8 @@ atomic_add_unless(atomic_t *v, int a, int u)
 #define atomic_add_unless atomic_add_unless
 #endif
 
+#define arch_atomic_inc_not_zero atomic_inc_not_zero
+
 #ifndef atomic_inc_not_zero
 /**
  * atomic_inc_not_zero - increment unless the number is zero
@@ -1131,6 +1242,8 @@ atomic_inc_not_zero(atomic_t *v)
 #define atomic_inc_not_zero atomic_inc_not_zero
 #endif
 
+#define arch_atomic_inc_unless_negative atomic_inc_unless_negative
+
 #ifndef atomic_inc_unless_negative
 static __always_inline bool
 atomic_inc_unless_negative(atomic_t *v)
@@ -1147,6 +1260,8 @@ atomic_inc_unless_negative(atomic_t *v)
 #define atomic_inc_unless_negative atomic_inc_unless_negative
 #endif
 
+#define arch_atomic_dec_unless_positive atomic_dec_unless_positive
+
 #ifndef atomic_dec_unless_positive
 static __always_inline bool
 atomic_dec_unless_positive(atomic_t *v)
@@ -1163,6 +1278,8 @@ atomic_dec_unless_positive(atomic_t *v)
 #define atomic_dec_unless_positive atomic_dec_unless_positive
 #endif
 
+#define arch_atomic_dec_if_positive atomic_dec_if_positive
+
 #ifndef atomic_dec_if_positive
 static __always_inline int
 atomic_dec_if_positive(atomic_t *v)
@@ -1184,6 +1301,9 @@ atomic_dec_if_positive(atomic_t *v)
 #include <asm-generic/atomic64.h>
 #endif
 
+#define arch_atomic64_read atomic64_read
+#define arch_atomic64_read_acquire atomic64_read_acquire
+
 #ifndef atomic64_read_acquire
 static __always_inline s64
 atomic64_read_acquire(const atomic64_t *v)
@@ -1193,6 +1313,9 @@ atomic64_read_acquire(const atomic64_t *v)
 #define atomic64_read_acquire atomic64_read_acquire
 #endif
 
+#define arch_atomic64_set atomic64_set
+#define arch_atomic64_set_release atomic64_set_release
+
 #ifndef atomic64_set_release
 static __always_inline void
 atomic64_set_release(atomic64_t *v, s64 i)
@@ -1202,6 +1325,13 @@ atomic64_set_release(atomic64_t *v, s64 i)
 #define atomic64_set_release atomic64_set_release
 #endif
 
+#define arch_atomic64_add atomic64_add
+
+#define arch_atomic64_add_return atomic64_add_return
+#define arch_atomic64_add_return_acquire atomic64_add_return_acquire
+#define arch_atomic64_add_return_release atomic64_add_return_release
+#define arch_atomic64_add_return_relaxed atomic64_add_return_relaxed
+
 #ifndef atomic64_add_return_relaxed
 #define atomic64_add_return_acquire atomic64_add_return
 #define atomic64_add_return_release atomic64_add_return
@@ -1244,6 +1374,11 @@ atomic64_add_return(s64 i, atomic64_t *v)
 
 #endif /* atomic64_add_return_relaxed */
 
+#define arch_atomic64_fetch_add atomic64_fetch_add
+#define arch_atomic64_fetch_add_acquire atomic64_fetch_add_acquire
+#define arch_atomic64_fetch_add_release atomic64_fetch_add_release
+#define arch_atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+
 #ifndef atomic64_fetch_add_relaxed
 #define atomic64_fetch_add_acquire atomic64_fetch_add
 #define atomic64_fetch_add_release atomic64_fetch_add
@@ -1286,6 +1421,13 @@ atomic64_fetch_add(s64 i, atomic64_t *v)
 
 #endif /* atomic64_fetch_add_relaxed */
 
+#define arch_atomic64_sub atomic64_sub
+
+#define arch_atomic64_sub_return atomic64_sub_return
+#define arch_atomic64_sub_return_acquire atomic64_sub_return_acquire
+#define arch_atomic64_sub_return_release atomic64_sub_return_release
+#define arch_atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+
 #ifndef atomic64_sub_return_relaxed
 #define atomic64_sub_return_acquire atomic64_sub_return
 #define atomic64_sub_return_release atomic64_sub_return
@@ -1328,6 +1470,11 @@ atomic64_sub_return(s64 i, atomic64_t *v)
 
 #endif /* atomic64_sub_return_relaxed */
 
+#define arch_atomic64_fetch_sub atomic64_fetch_sub
+#define arch_atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
+#define arch_atomic64_fetch_sub_release atomic64_fetch_sub_release
+#define arch_atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+
 #ifndef atomic64_fetch_sub_relaxed
 #define atomic64_fetch_sub_acquire atomic64_fetch_sub
 #define atomic64_fetch_sub_release atomic64_fetch_sub
@@ -1370,6 +1517,8 @@ atomic64_fetch_sub(s64 i, atomic64_t *v)
 
 #endif /* atomic64_fetch_sub_relaxed */
 
+#define arch_atomic64_inc atomic64_inc
+
 #ifndef atomic64_inc
 static __always_inline void
 atomic64_inc(atomic64_t *v)
@@ -1379,6 +1528,11 @@ atomic64_inc(atomic64_t *v)
 #define atomic64_inc atomic64_inc
 #endif
 
+#define arch_atomic64_inc_return atomic64_inc_return
+#define arch_atomic64_inc_return_acquire atomic64_inc_return_acquire
+#define arch_atomic64_inc_return_release atomic64_inc_return_release
+#define arch_atomic64_inc_return_relaxed atomic64_inc_return_relaxed
+
 #ifndef atomic64_inc_return_relaxed
 #ifdef atomic64_inc_return
 #define atomic64_inc_return_acquire atomic64_inc_return
@@ -1460,6 +1614,11 @@ atomic64_inc_return(atomic64_t *v)
 
 #endif /* atomic64_inc_return_relaxed */
 
+#define arch_atomic64_fetch_inc atomic64_fetch_inc
+#define arch_atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
+#define arch_atomic64_fetch_inc_release atomic64_fetch_inc_release
+#define arch_atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
+
 #ifndef atomic64_fetch_inc_relaxed
 #ifdef atomic64_fetch_inc
 #define atomic64_fetch_inc_acquire atomic64_fetch_inc
@@ -1541,6 +1700,8 @@ atomic64_fetch_inc(atomic64_t *v)
 
 #endif /* atomic64_fetch_inc_relaxed */
 
+#define arch_atomic64_dec atomic64_dec
+
 #ifndef atomic64_dec
 static __always_inline void
 atomic64_dec(atomic64_t *v)
@@ -1550,6 +1711,11 @@ atomic64_dec(atomic64_t *v)
 #define atomic64_dec atomic64_dec
 #endif
 
+#define arch_atomic64_dec_return atomic64_dec_return
+#define arch_atomic64_dec_return_acquire atomic64_dec_return_acquire
+#define arch_atomic64_dec_return_release atomic64_dec_return_release
+#define arch_atomic64_dec_return_relaxed atomic64_dec_return_relaxed
+
 #ifndef atomic64_dec_return_relaxed
 #ifdef atomic64_dec_return
 #define atomic64_dec_return_acquire atomic64_dec_return
@@ -1631,6 +1797,11 @@ atomic64_dec_return(atomic64_t *v)
 
 #endif /* atomic64_dec_return_relaxed */
 
+#define arch_atomic64_fetch_dec atomic64_fetch_dec
+#define arch_atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
+#define arch_atomic64_fetch_dec_release atomic64_fetch_dec_release
+#define arch_atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
+
 #ifndef atomic64_fetch_dec_relaxed
 #ifdef atomic64_fetch_dec
 #define atomic64_fetch_dec_acquire atomic64_fetch_dec
@@ -1712,6 +1883,13 @@ atomic64_fetch_dec(atomic64_t *v)
 
 #endif /* atomic64_fetch_dec_relaxed */
 
+#define arch_atomic64_and atomic64_and
+
+#define arch_atomic64_fetch_and atomic64_fetch_and
+#define arch_atomic64_fetch_and_acquire atomic64_fetch_and_acquire
+#define arch_atomic64_fetch_and_release atomic64_fetch_and_release
+#define arch_atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+
 #ifndef atomic64_fetch_and_relaxed
 #define atomic64_fetch_and_acquire atomic64_fetch_and
 #define atomic64_fetch_and_release atomic64_fetch_and
@@ -1754,6 +1932,8 @@ atomic64_fetch_and(s64 i, atomic64_t *v)
 
 #endif /* atomic64_fetch_and_relaxed */
 
+#define arch_atomic64_andnot atomic64_andnot
+
 #ifndef atomic64_andnot
 static __always_inline void
 atomic64_andnot(s64 i, atomic64_t *v)
@@ -1763,6 +1943,11 @@ atomic64_andnot(s64 i, atomic64_t *v)
 #define atomic64_andnot atomic64_andnot
 #endif
 
+#define arch_atomic64_fetch_andnot atomic64_fetch_andnot
+#define arch_atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
+#define arch_atomic64_fetch_andnot_release atomic64_fetch_andnot_release
+#define arch_atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
+
 #ifndef atomic64_fetch_andnot_relaxed
 #ifdef atomic64_fetch_andnot
 #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
@@ -1844,6 +2029,13 @@ atomic64_fetch_andnot(s64 i, atomic64_t *v)
 
 #endif /* atomic64_fetch_andnot_relaxed */
 
+#define arch_atomic64_or atomic64_or
+
+#define arch_atomic64_fetch_or atomic64_fetch_or
+#define arch_atomic64_fetch_or_acquire atomic64_fetch_or_acquire
+#define arch_atomic64_fetch_or_release atomic64_fetch_or_release
+#define arch_atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
+
 #ifndef atomic64_fetch_or_relaxed
 #define atomic64_fetch_or_acquire atomic64_fetch_or
 #define atomic64_fetch_or_release atomic64_fetch_or
@@ -1886,6 +2078,13 @@ atomic64_fetch_or(s64 i, atomic64_t *v)
 
 #endif /* atomic64_fetch_or_relaxed */
 
+#define arch_atomic64_xor atomic64_xor
+
+#define arch_atomic64_fetch_xor atomic64_fetch_xor
+#define arch_atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
+#define arch_atomic64_fetch_xor_release atomic64_fetch_xor_release
+#define arch_atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+
 #ifndef atomic64_fetch_xor_relaxed
 #define atomic64_fetch_xor_acquire atomic64_fetch_xor
 #define atomic64_fetch_xor_release atomic64_fetch_xor
@@ -1928,6 +2127,11 @@ atomic64_fetch_xor(s64 i, atomic64_t *v)
 
 #endif /* atomic64_fetch_xor_relaxed */
 
+#define arch_atomic64_xchg atomic64_xchg
+#define arch_atomic64_xchg_acquire atomic64_xchg_acquire
+#define arch_atomic64_xchg_release atomic64_xchg_release
+#define arch_atomic64_xchg_relaxed atomic64_xchg_relaxed
+
 #ifndef atomic64_xchg_relaxed
 #define atomic64_xchg_acquire atomic64_xchg
 #define atomic64_xchg_release atomic64_xchg
@@ -1970,6 +2174,11 @@ atomic64_xchg(atomic64_t *v, s64 i)
 
 #endif /* atomic64_xchg_relaxed */
 
+#define arch_atomic64_cmpxchg atomic64_cmpxchg
+#define arch_atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
+#define arch_atomic64_cmpxchg_release atomic64_cmpxchg_release
+#define arch_atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
+
 #ifndef atomic64_cmpxchg_relaxed
 #define atomic64_cmpxchg_acquire atomic64_cmpxchg
 #define atomic64_cmpxchg_release atomic64_cmpxchg
@@ -2012,6 +2221,11 @@ atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
 
 #endif /* atomic64_cmpxchg_relaxed */
 
+#define arch_atomic64_try_cmpxchg atomic64_try_cmpxchg
+#define arch_atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
+#define arch_atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
+#define arch_atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
+
 #ifndef atomic64_try_cmpxchg_relaxed
 #ifdef atomic64_try_cmpxchg
 #define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg
@@ -2109,6 +2323,8 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
 
 #endif /* atomic64_try_cmpxchg_relaxed */
 
+#define arch_atomic64_sub_and_test atomic64_sub_and_test
+
 #ifndef atomic64_sub_and_test
 /**
  * atomic64_sub_and_test - subtract value from variable and test result
@@ -2127,6 +2343,8 @@ atomic64_sub_and_test(s64 i, atomic64_t *v)
 #define atomic64_sub_and_test atomic64_sub_and_test
 #endif
 
+#define arch_atomic64_dec_and_test atomic64_dec_and_test
+
 #ifndef atomic64_dec_and_test
 /**
  * atomic64_dec_and_test - decrement and test
@@ -2144,6 +2362,8 @@ atomic64_dec_and_test(atomic64_t *v)
 #define atomic64_dec_and_test atomic64_dec_and_test
 #endif
 
+#define arch_atomic64_inc_and_test atomic64_inc_and_test
+
 #ifndef atomic64_inc_and_test
 /**
  * atomic64_inc_and_test - increment and test
@@ -2161,6 +2381,8 @@ atomic64_inc_and_test(atomic64_t *v)
 #define atomic64_inc_and_test atomic64_inc_and_test
 #endif
 
+#define arch_atomic64_add_negative atomic64_add_negative
+
 #ifndef atomic64_add_negative
 /**
  * atomic64_add_negative - add and test if negative
@@ -2179,6 +2401,8 @@ atomic64_add_negative(s64 i, atomic64_t *v)
 #define atomic64_add_negative atomic64_add_negative
 #endif
 
+#define arch_atomic64_fetch_add_unless atomic64_fetch_add_unless
+
 #ifndef atomic64_fetch_add_unless
 /**
  * atomic64_fetch_add_unless - add unless the number is already a given value
@@ -2204,6 +2428,8 @@ atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
 #define atomic64_fetch_add_unless atomic64_fetch_add_unless
 #endif
 
+#define arch_atomic64_add_unless atomic64_add_unless
+
 #ifndef atomic64_add_unless
 /**
  * atomic64_add_unless - add unless the number is already a given value
@@ -2222,6 +2448,8 @@ atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
 #define atomic64_add_unless atomic64_add_unless
 #endif
 
+#define arch_atomic64_inc_not_zero atomic64_inc_not_zero
+
 #ifndef atomic64_inc_not_zero
 /**
  * atomic64_inc_not_zero - increment unless the number is zero
@@ -2238,6 +2466,8 @@ atomic64_inc_not_zero(atomic64_t *v)
 #define atomic64_inc_not_zero atomic64_inc_not_zero
 #endif
 
+#define arch_atomic64_inc_unless_negative atomic64_inc_unless_negative
+
 #ifndef atomic64_inc_unless_negative
 static __always_inline bool
 atomic64_inc_unless_negative(atomic64_t *v)
@@ -2254,6 +2484,8 @@ atomic64_inc_unless_negative(atomic64_t *v)
 #define atomic64_inc_unless_negative atomic64_inc_unless_negative
 #endif
 
+#define arch_atomic64_dec_unless_positive atomic64_dec_unless_positive
+
 #ifndef atomic64_dec_unless_positive
 static __always_inline bool
 atomic64_dec_unless_positive(atomic64_t *v)
@@ -2270,6 +2502,8 @@ atomic64_dec_unless_positive(atomic64_t *v)
 #define atomic64_dec_unless_positive atomic64_dec_unless_positive
 #endif
 
+#define arch_atomic64_dec_if_positive atomic64_dec_if_positive
+
 #ifndef atomic64_dec_if_positive
 static __always_inline s64
 atomic64_dec_if_positive(atomic64_t *v)
@@ -2288,4 +2522,4 @@ atomic64_dec_if_positive(atomic64_t *v)
 #endif
 
 #endif /* _LINUX_ATOMIC_FALLBACK_H */
-// 1fac0941c79bf0ae100723cc2ac9b94061f0b67a
+// 9d95b56f98d82a2a26c7b79ccdd0c47572d50a6f
index 4a20b75..7c27d7b 100644 (file)
@@ -141,6 +141,5 @@ extern int do_execveat(int, struct filename *,
                       const char __user * const __user *,
                       const char __user * const __user *,
                       int);
-int do_execve_file(struct file *file, void *__argv, void *__envp);
 
 #endif /* _LINUX_BINFMTS_H */
index 4671fbf..7f475d5 100644 (file)
@@ -18,8 +18,7 @@
  * position @h. For example
  * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
  */
-#if !defined(__ASSEMBLY__) && \
-       (!defined(CONFIG_CC_IS_GCC) || CONFIG_GCC_VERSION >= 49000)
+#if !defined(__ASSEMBLY__)
 #include <linux/build_bug.h>
 #define GENMASK_INPUT_CHECK(h, l) \
        (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
index 8fd9009..5724141 100644 (file)
@@ -590,6 +590,7 @@ struct request_queue {
        u64                     write_hints[BLK_MAX_WRITE_HINTS];
 };
 
+/* Keep blk_queue_flag_name[] in sync with the definitions below */
 #define QUEUE_FLAG_STOPPED     0       /* queue is stopped */
 #define QUEUE_FLAG_DYING       1       /* queue being torn down */
 #define QUEUE_FLAG_NOMERGES     3      /* disable merge attempts */
index c66c545..2c6f266 100644 (file)
@@ -210,6 +210,9 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk)                                     \
        BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
 
+#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk)                             \
+       BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE)
+
 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk)                                       \
        BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
 
@@ -401,6 +404,7 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
index 4052d64..722f799 100644 (file)
@@ -8,6 +8,7 @@
 enum netns_bpf_attach_type {
        NETNS_BPF_INVALID = -1,
        NETNS_BPF_FLOW_DISSECTOR = 0,
+       NETNS_BPF_SK_LOOKUP,
        MAX_NETNS_BPF_ATTACH_TYPE
 };
 
@@ -17,6 +18,8 @@ to_netns_bpf_attach_type(enum bpf_attach_type attach_type)
        switch (attach_type) {
        case BPF_FLOW_DISSECTOR:
                return NETNS_BPF_FLOW_DISSECTOR;
+       case BPF_SK_LOOKUP:
+               return NETNS_BPF_SK_LOOKUP;
        default:
                return NETNS_BPF_INVALID;
        }
@@ -33,7 +36,7 @@ int netns_bpf_prog_query(const union bpf_attr *attr,
                         union bpf_attr __user *uattr);
 int netns_bpf_prog_attach(const union bpf_attr *attr,
                          struct bpf_prog *prog);
-int netns_bpf_prog_detach(const union bpf_attr *attr);
+int netns_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
 int netns_bpf_link_create(const union bpf_attr *attr,
                          struct bpf_prog *prog);
 #else
@@ -49,7 +52,8 @@ static inline int netns_bpf_prog_attach(const union bpf_attr *attr,
        return -EOPNOTSUPP;
 }
 
-static inline int netns_bpf_prog_detach(const union bpf_attr *attr)
+static inline int netns_bpf_prog_detach(const union bpf_attr *attr,
+                                       enum bpf_prog_type ptype)
 {
        return -EOPNOTSUPP;
 }
index 07052d4..bae557f 100644 (file)
@@ -92,6 +92,10 @@ struct bpf_map_ops {
        int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
        __poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
                             struct poll_table_struct *pts);
+
+       /* BTF name and id of struct allocated by map_alloc */
+       const char * const map_btf_name;
+       int *map_btf_id;
 };
 
 struct bpf_map_memory {
@@ -245,6 +249,7 @@ enum bpf_arg_type {
        ARG_PTR_TO_INT,         /* pointer to int */
        ARG_PTR_TO_LONG,        /* pointer to long */
        ARG_PTR_TO_SOCKET,      /* pointer to bpf_sock (fullsock) */
+       ARG_PTR_TO_SOCKET_OR_NULL,      /* pointer to bpf_sock (fullsock) or NULL */
        ARG_PTR_TO_BTF_ID,      /* pointer to in-kernel struct */
        ARG_PTR_TO_ALLOC_MEM,   /* pointer to dynamically allocated memory */
        ARG_PTR_TO_ALLOC_MEM_OR_NULL,   /* pointer to dynamically allocated memory or NULL */
@@ -261,6 +266,7 @@ enum bpf_return_type {
        RET_PTR_TO_TCP_SOCK_OR_NULL,    /* returns a pointer to a tcp_sock or NULL */
        RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */
        RET_PTR_TO_ALLOC_MEM_OR_NULL,   /* returns a pointer to dynamically allocated memory or NULL */
+       RET_PTR_TO_BTF_ID_OR_NULL,      /* returns a pointer to a btf_id or NULL */
 };
 
 /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
@@ -283,6 +289,12 @@ struct bpf_func_proto {
                enum bpf_arg_type arg_type[5];
        };
        int *btf_id; /* BTF ids of arguments */
+       bool (*check_btf_id)(u32 btf_id, u32 arg); /* if the argument btf_id is
+                                                   * valid. Often used if more
+                                                   * than one btf id is permitted
+                                                   * for this argument.
+                                                   */
+       int *ret_btf_id; /* return value btf_id */
 };
 
 /* bpf_context is intentionally undefined structure. Pointer to bpf_context is
@@ -656,6 +668,7 @@ struct bpf_jit_poke_descriptor {
 struct bpf_ctx_arg_aux {
        u32 offset;
        enum bpf_reg_type reg_type;
+       u32 btf_id;
 };
 
 struct bpf_prog_aux {
@@ -917,6 +930,9 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs,
 
 void bpf_prog_array_delete_safe(struct bpf_prog_array *progs,
                                struct bpf_prog *old_prog);
+int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index);
+int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
+                            struct bpf_prog *prog);
 int bpf_prog_array_copy_info(struct bpf_prog_array *array,
                             u32 *prog_ids, u32 request_cnt,
                             u32 *prog_cnt);
@@ -1109,6 +1125,11 @@ static inline bool bpf_allow_ptr_leaks(void)
        return perfmon_capable();
 }
 
+static inline bool bpf_allow_ptr_to_map_access(void)
+{
+       return perfmon_capable();
+}
+
 static inline bool bpf_bypass_spec_v1(void)
 {
        return perfmon_capable();
@@ -1256,6 +1277,7 @@ struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
 void __cpu_map_flush(void);
 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
                    struct net_device *dev_rx);
+bool cpu_map_prog_allowed(struct bpf_map *map);
 
 /* Return map's numa specified by userspace */
 static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
@@ -1416,6 +1438,11 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu,
        return 0;
 }
 
+static inline bool cpu_map_prog_allowed(struct bpf_map *map)
+{
+       return false;
+}
+
 static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
                                enum bpf_prog_type type)
 {
@@ -1543,13 +1570,16 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
 #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
 
 #if defined(CONFIG_BPF_STREAM_PARSER)
-int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which);
+int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
+                        struct bpf_prog *old, u32 which);
 int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
+int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
 void sock_map_unhash(struct sock *sk);
 void sock_map_close(struct sock *sk, long timeout);
 #else
 static inline int sock_map_prog_update(struct bpf_map *map,
-                                      struct bpf_prog *prog, u32 which)
+                                      struct bpf_prog *prog,
+                                      struct bpf_prog *old, u32 which)
 {
        return -EOPNOTSUPP;
 }
@@ -1559,6 +1589,12 @@ static inline int sock_map_get_from_fd(const union bpf_attr *attr,
 {
        return -EINVAL;
 }
+
+static inline int sock_map_prog_detach(const union bpf_attr *attr,
+                                      enum bpf_prog_type ptype)
+{
+       return -EOPNOTSUPP;
+}
 #endif /* CONFIG_BPF_STREAM_PARSER */
 
 #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
@@ -1607,6 +1643,7 @@ extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
 extern const struct bpf_func_proto bpf_get_current_comm_proto;
 extern const struct bpf_func_proto bpf_get_stackid_proto;
 extern const struct bpf_func_proto bpf_get_stack_proto;
+extern const struct bpf_func_proto bpf_get_task_stack_proto;
 extern const struct bpf_func_proto bpf_sock_map_update_proto;
 extern const struct bpf_func_proto bpf_sock_hash_update_proto;
 extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
@@ -1629,6 +1666,11 @@ extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
 extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
 extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
 extern const struct bpf_func_proto bpf_ringbuf_query_proto;
+extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
+extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
 
 const struct bpf_func_proto *bpf_tracing_func_proto(
        enum bpf_func_id func_id, const struct bpf_prog *prog);
index a18ae82..a52a568 100644 (file)
@@ -64,6 +64,8 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2,
 #ifdef CONFIG_INET
 BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport,
              struct sk_reuseport_md, struct sk_reuseport_kern)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SK_LOOKUP, sk_lookup,
+             struct bpf_sk_lookup, struct bpf_sk_lookup_kern)
 #endif
 #if defined(CONFIG_BPF_JIT)
 BPF_PROG_TYPE(BPF_PROG_TYPE_STRUCT_OPS, bpf_struct_ops,
index ca08db4..53c7bd5 100644 (file)
@@ -379,6 +379,7 @@ struct bpf_verifier_env {
        u32 used_map_cnt;               /* number of used maps */
        u32 id_gen;                     /* used to generate unique reg IDs */
        bool allow_ptr_leaks;
+       bool allow_ptr_to_map_access;
        bool bpf_capable;
        bool bypass_spec_v1;
        bool bypass_spec_v4;
index d815622..2ae3c8e 100644 (file)
@@ -3,22 +3,23 @@
 #define _LINUX_BPFILTER_H
 
 #include <uapi/linux/bpfilter.h>
-#include <linux/umh.h>
+#include <linux/usermode_driver.h>
+#include <linux/sockptr.h>
 
 struct sock;
-int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
+int bpfilter_ip_set_sockopt(struct sock *sk, int optname, sockptr_t optval,
                            unsigned int optlen);
 int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
                            int __user *optlen);
+void bpfilter_umh_cleanup(struct umd_info *info);
+
 struct bpfilter_umh_ops {
-       struct umh_info info;
+       struct umd_info info;
        /* since ip_getsockopt() can run in parallel, serialize access to umh */
        struct mutex lock;
-       int (*sockopt)(struct sock *sk, int optname,
-                      char __user *optval,
+       int (*sockopt)(struct sock *sk, int optname, sockptr_t optval,
                       unsigned int optlen, bool is_set);
        int (*start)(void);
-       bool stop;
 };
 extern struct bpfilter_umh_ops bpfilter_ops;
 #endif
index 5c1ea99..8b81fbb 100644 (file)
@@ -82,6 +82,11 @@ static inline bool btf_type_is_int(const struct btf_type *t)
        return BTF_INFO_KIND(t->info) == BTF_KIND_INT;
 }
 
+static inline bool btf_type_is_small_int(const struct btf_type *t)
+{
+       return btf_type_is_int(t) && t->size <= sizeof(u64);
+}
+
 static inline bool btf_type_is_enum(const struct btf_type *t)
 {
        return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM;
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
new file mode 100644 (file)
index 0000000..4867d54
--- /dev/null
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_BTF_IDS_H
+#define _LINUX_BTF_IDS_H
+
+#ifdef CONFIG_DEBUG_INFO_BTF
+
+#include <linux/compiler.h> /* for __PASTE */
+
+/*
+ * Following macros help to define lists of BTF IDs placed
+ * in .BTF_ids section. They are initially filled with zeros
+ * (during compilation) and resolved later during the
+ * linking phase by resolve_btfids tool.
+ *
+ * Any change in list layout must be reflected in resolve_btfids
+ * tool logic.
+ */
+
+#define BTF_IDS_SECTION ".BTF_ids"
+
+#define ____BTF_ID(symbol)                             \
+asm(                                                   \
+".pushsection " BTF_IDS_SECTION ",\"a\";       \n"     \
+".local " #symbol " ;                          \n"     \
+".type  " #symbol ", STT_OBJECT;               \n"     \
+".size  " #symbol ", 4;                        \n"     \
+#symbol ":                                     \n"     \
+".zero 4                                       \n"     \
+".popsection;                                  \n");
+
+#define __BTF_ID(symbol) \
+       ____BTF_ID(symbol)
+
+#define __ID(prefix) \
+       __PASTE(prefix, __COUNTER__)
+
+/*
+ * The BTF_ID defines unique symbol for each ID pointing
+ * to 4 zero bytes.
+ */
+#define BTF_ID(prefix, name) \
+       __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__))
+
+/*
+ * The BTF_ID_LIST macro defines pure (unsorted) list
+ * of BTF IDs, with following layout:
+ *
+ * BTF_ID_LIST(list1)
+ * BTF_ID(type1, name1)
+ * BTF_ID(type2, name2)
+ *
+ * list1:
+ * __BTF_ID__type1__name1__1:
+ * .zero 4
+ * __BTF_ID__type2__name2__2:
+ * .zero 4
+ *
+ */
+#define __BTF_ID_LIST(name, scope)                     \
+asm(                                                   \
+".pushsection " BTF_IDS_SECTION ",\"a\";       \n"     \
+"." #scope " " #name ";                        \n"     \
+#name ":;                                      \n"     \
+".popsection;                                  \n");   \
+
+#define BTF_ID_LIST(name)                              \
+__BTF_ID_LIST(name, local)                             \
+extern u32 name[];
+
+#define BTF_ID_LIST_GLOBAL(name)                       \
+__BTF_ID_LIST(name, globl)
+
+/*
+ * The BTF_ID_UNUSED macro defines 4 zero bytes.
+ * It's used when we want to define 'unused' entry
+ * in BTF_ID_LIST, like:
+ *
+ *   BTF_ID_LIST(bpf_skb_output_btf_ids)
+ *   BTF_ID(struct, sk_buff)
+ *   BTF_ID_UNUSED
+ *   BTF_ID(struct, task_struct)
+ */
+
+#define BTF_ID_UNUSED                                  \
+asm(                                                   \
+".pushsection " BTF_IDS_SECTION ",\"a\";       \n"     \
+".zero 4                                       \n"     \
+".popsection;                                  \n");
+
+#else
+
+#define BTF_ID_LIST(name) static u32 name[5];
+#define BTF_ID(prefix, name)
+#define BTF_ID_UNUSED
+#define BTF_ID_LIST_GLOBAL(name) u32 name[1];
+
+#endif /* CONFIG_DEBUG_INFO_BTF */
+
+#ifdef CONFIG_NET
+/* Define a list of socket types which can be the argument for
+ * skc_to_*_sock() helpers. All these sockets should have
+ * sock_common as the first argument in its memory layout.
+ */
+#define BTF_SOCK_TYPE_xxx \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET, inet_sock)                    \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_CONN, inet_connection_sock)    \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_REQ, inet_request_sock)        \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_TW, inet_timewait_sock)        \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_REQ, request_sock)                  \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK, sock)                         \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK_COMMON, sock_common)           \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP, tcp_sock)                      \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_REQ, tcp_request_sock)          \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock)          \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock)                    \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock)                      \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock)
+
+enum {
+#define BTF_SOCK_TYPE(name, str) name,
+BTF_SOCK_TYPE_xxx
+#undef BTF_SOCK_TYPE
+MAX_BTF_SOCK_TYPE,
+};
+
+extern u32 btf_sock_ids[];
+#endif
+
+#endif
index a954def..900b9f4 100644 (file)
@@ -34,7 +34,7 @@
 struct can_skb_priv {
        int ifindex;
        int skbcnt;
-       struct can_frame cf[0];
+       struct can_frame cf[];
 };
 
 static inline struct can_skb_priv *can_skb_prv(struct sk_buff *skb)
index 60de3fe..405657a 100644 (file)
@@ -36,7 +36,7 @@ struct cb710_chip {
        unsigned                slot_mask;
        unsigned                slots;
        spinlock_t              irq_lock;
-       struct cb710_slot       slot[0];
+       struct cb710_slot       slot[];
 };
 
 /* NOTE: cb710_chip.slots is modified only during device init/exit and
index 2247e71..e5ed1c5 100644 (file)
@@ -52,8 +52,7 @@ struct ceph_options {
        unsigned long osd_idle_ttl;             /* jiffies */
        unsigned long osd_keepalive_timeout;    /* jiffies */
        unsigned long osd_request_timeout;      /* jiffies */
-
-       u32 osd_req_flags;  /* CEPH_OSD_FLAG_*, applied to each OSD request */
+       u32 read_from_replica;  /* CEPH_OSD_FLAG_BALANCE/LOCALIZE_READS */
 
        /*
         * any type that can't be simply compared or doesn't need
@@ -76,6 +75,7 @@ struct ceph_options {
 #define CEPH_OSD_KEEPALIVE_DEFAULT     msecs_to_jiffies(5 * 1000)
 #define CEPH_OSD_IDLE_TTL_DEFAULT      msecs_to_jiffies(60 * 1000)
 #define CEPH_OSD_REQUEST_TIMEOUT_DEFAULT 0  /* no timeout */
+#define CEPH_READ_FROM_REPLICA_DEFAULT 0  /* read from primary */
 
 #define CEPH_MONC_HUNT_INTERVAL                msecs_to_jiffies(3 * 1000)
 #define CEPH_MONC_PING_INTERVAL                msecs_to_jiffies(10 * 1000)
index 5266115..fee0b55 100644 (file)
@@ -790,7 +790,9 @@ struct sock_cgroup_data {
        union {
 #ifdef __LITTLE_ENDIAN
                struct {
-                       u8      is_data;
+                       u8      is_data : 1;
+                       u8      no_refcnt : 1;
+                       u8      unused : 6;
                        u8      padding;
                        u16     prioidx;
                        u32     classid;
@@ -800,7 +802,9 @@ struct sock_cgroup_data {
                        u32     classid;
                        u16     prioidx;
                        u8      padding;
-                       u8      is_data;
+                       u8      unused : 6;
+                       u8      no_refcnt : 1;
+                       u8      is_data : 1;
                } __packed;
 #endif
                u64             val;
index 4598e4d..618838c 100644 (file)
@@ -822,6 +822,7 @@ extern spinlock_t cgroup_sk_update_lock;
 
 void cgroup_sk_alloc_disable(void);
 void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
+void cgroup_sk_clone(struct sock_cgroup_data *skcd);
 void cgroup_sk_free(struct sock_cgroup_data *skcd);
 
 static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
@@ -835,7 +836,7 @@ static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
         */
        v = READ_ONCE(skcd->val);
 
-       if (v & 1)
+       if (v & 3)
                return &cgrp_dfl_root.cgrp;
 
        return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
@@ -847,6 +848,7 @@ static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
 #else  /* CONFIG_CGROUP_DATA */
 
 static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
+static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {}
 static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
 
 #endif /* CONFIG_CGROUP_DATA */
index e90100c..c4255d8 100644 (file)
@@ -737,10 +737,6 @@ asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
 asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, compat_size_t len,
                            unsigned flags, struct sockaddr __user *addr,
                            int __user *addrlen);
-asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
-                                     char __user *optval, unsigned int optlen);
-asmlinkage long compat_sys_getsockopt(int fd, int level, int optname,
-                                     char __user *optval, int __user *optlen);
 asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg,
                                   unsigned flags);
 asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg,
index ee37256..5e55302 100644 (file)
 #define __no_sanitize_thread
 #endif
 
+#if __has_feature(undefined_behavior_sanitizer)
+/* GCC does not have __SANITIZE_UNDEFINED__ */
+#define __no_sanitize_undefined \
+               __attribute__((no_sanitize("undefined")))
+#else
+#define __no_sanitize_undefined
+#endif
+
 /*
  * Not all versions of clang implement the the type-generic versions
  * of the builtin overflow checkers. Fortunately, clang implements
index 7dd4e03..0b1dc61 100644 (file)
@@ -11,7 +11,7 @@
                     + __GNUC_PATCHLEVEL__)
 
 /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 */
-#if GCC_VERSION < 40800
+#if GCC_VERSION < 40900
 # error Sorry, your compiler is too old - please upgrade it.
 #endif
 
 #define __no_sanitize_thread
 #endif
 
+#if __has_attribute(__no_sanitize_undefined__)
+#define __no_sanitize_undefined __attribute__((no_sanitize_undefined))
+#else
+#define __no_sanitize_undefined
+#endif
+
 #if GCC_VERSION >= 50100
 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
 #endif
index 30827f8..204e768 100644 (file)
@@ -123,7 +123,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
 #ifdef CONFIG_DEBUG_ENTRY
 /* Begin/end of an instrumentation safe region */
 #define instrumentation_begin() ({                                     \
-       asm volatile("%c0:\n\t"                                         \
+       asm volatile("%c0: nop\n\t"                                             \
                     ".pushsection .discard.instr_begin\n\t"            \
                     ".long %c0b - .\n\t"                               \
                     ".popsection\n\t" : : "i" (__COUNTER__));          \
index cdf0165..c8f03d2 100644 (file)
@@ -40,6 +40,7 @@
 # define __GCC4_has_attribute___noclone__             1
 # define __GCC4_has_attribute___nonstring__           0
 # define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8)
+# define __GCC4_has_attribute___no_sanitize_undefined__ (__GNUC_MINOR__ >= 9)
 # define __GCC4_has_attribute___fallthrough__         0
 #endif
 
index 21aed09..01dd58c 100644 (file)
@@ -5,20 +5,20 @@
 #ifndef __ASSEMBLY__
 
 #ifdef __CHECKER__
-# define __user                __attribute__((noderef, address_space(1)))
 # define __kernel      __attribute__((address_space(0)))
+# define __user                __attribute__((noderef, address_space(__user)))
 # define __safe                __attribute__((safe))
 # define __force       __attribute__((force))
 # define __nocast      __attribute__((nocast))
-# define __iomem       __attribute__((noderef, address_space(2)))
+# define __iomem       __attribute__((noderef, address_space(__iomem)))
 # define __must_hold(x)        __attribute__((context(x,1,1)))
 # define __acquires(x) __attribute__((context(x,0,1)))
 # define __releases(x) __attribute__((context(x,1,0)))
 # define __acquire(x)  __context__(x,1)
 # define __release(x)  __context__(x,-1)
 # define __cond_lock(x,c)      ((c) ? ({ __acquire(x); 1; }) : 0)
-# define __percpu      __attribute__((noderef, address_space(3)))
-# define __rcu         __attribute__((noderef, address_space(4)))
+# define __percpu      __attribute__((noderef, address_space(__percpu)))
+# define __rcu         __attribute__((noderef, address_space(__rcu)))
 # define __private     __attribute__((noderef))
 extern void __chk_user_ptr(const volatile void __user *);
 extern void __chk_io_ptr(const volatile void __iomem *);
@@ -118,10 +118,6 @@ struct ftrace_likely_data {
 #define notrace                        __attribute__((__no_instrument_function__))
 #endif
 
-/* Section for code which can't be instrumented at all */
-#define noinstr                                                                \
-       noinline notrace __attribute((__section__(".noinstr.text")))
-
 /*
  * it doesn't make sense on ARM (currently the only user of __naked)
  * to trace naked functions because then mcount is called without
@@ -193,16 +189,18 @@ struct ftrace_likely_data {
 
 #define __no_kcsan __no_sanitize_thread
 #ifdef __SANITIZE_THREAD__
-# define __no_kcsan_or_inline __no_kcsan notrace __maybe_unused
-# define __no_sanitize_or_inline __no_kcsan_or_inline
-#else
-# define __no_kcsan_or_inline __always_inline
+# define __no_sanitize_or_inline __no_kcsan notrace __maybe_unused
 #endif
 
 #ifndef __no_sanitize_or_inline
 #define __no_sanitize_or_inline __always_inline
 #endif
 
+/* Section for code which can't be instrumented at all */
+#define noinstr                                                                \
+       noinline notrace __attribute((__section__(".noinstr.text")))    \
+       __no_kcsan __no_sanitize_address
+
 #endif /* __KERNEL__ */
 
 #endif /* __ASSEMBLY__ */
@@ -254,32 +252,8 @@ struct ftrace_likely_data {
  * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving
  *                            non-scalar types unchanged.
  */
-#if (defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 40900) || defined(__CHECKER__)
 /*
- * We build this out of a couple of helper macros in a vain attempt to
- * help you keep your lunch down while reading it.
- */
-#define __pick_scalar_type(x, type, otherwise)                                 \
-       __builtin_choose_expr(__same_type(x, type), (type)0, otherwise)
-
-/*
- * 'char' is not type-compatible with either 'signed char' or 'unsigned char',
- * so we include the naked type here as well as the signed/unsigned variants.
- */
-#define __pick_integer_type(x, type, otherwise)                                        \
-       __pick_scalar_type(x, type,                                             \
-               __pick_scalar_type(x, unsigned type,                            \
-                       __pick_scalar_type(x, signed type, otherwise)))
-
-#define __unqual_scalar_typeof(x) typeof(                                      \
-       __pick_integer_type(x, char,                                            \
-               __pick_integer_type(x, short,                                   \
-                       __pick_integer_type(x, int,                             \
-                               __pick_integer_type(x, long,                    \
-                                       __pick_integer_type(x, long long, x))))))
-#else
-/*
- * If supported, prefer C11 _Generic for better compile-times. As above, 'char'
+ * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char'
  * is not type-compatible with 'signed char', and we define a separate case.
  */
 #define __scalar_type_to_expr_cases(type)                              \
@@ -295,7 +269,6 @@ struct ftrace_likely_data {
                         __scalar_type_to_expr_cases(long),             \
                         __scalar_type_to_expr_cases(long long),        \
                         default: (x)))
-#endif
 
 /* Is this type a native word size -- useful for atomic operations */
 #define __native_word(t) \
index 63cb360..851dd1f 100644 (file)
@@ -38,6 +38,11 @@ struct debugfs_regset32 {
        struct device *dev;     /* Optional device for Runtime PM */
 };
 
+struct debugfs_u32_array {
+       u32 *array;
+       u32 n_elements;
+};
+
 extern struct dentry *arch_debugfs_dir;
 
 #define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt)          \
@@ -136,7 +141,8 @@ void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
                          int nregs, void __iomem *base, char *prefix);
 
 void debugfs_create_u32_array(const char *name, umode_t mode,
-                             struct dentry *parent, u32 *array, u32 elements);
+                             struct dentry *parent,
+                             struct debugfs_u32_array *array);
 
 struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name,
                                           struct dentry *parent,
@@ -316,8 +322,8 @@ static inline bool debugfs_initialized(void)
 }
 
 static inline void debugfs_create_u32_array(const char *name, umode_t mode,
-                                           struct dentry *parent, u32 *array,
-                                           u32 elements)
+                                           struct dentry *parent,
+                                           struct debugfs_u32_array *array)
 {
 }
 
index 136f984..5184735 100644 (file)
@@ -77,8 +77,6 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
                dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
 void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
                dma_addr_t dma_addr, unsigned long attrs);
-struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
-               gfp_t gfp, unsigned long attrs);
 int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
                void *cpu_addr, dma_addr_t dma_addr, size_t size,
                unsigned long attrs);
@@ -87,4 +85,5 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
                void *cpu_addr, dma_addr_t dma_addr, size_t size,
                unsigned long attrs);
 int dma_direct_supported(struct device *dev, u64 mask);
+bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
 #endif /* _LINUX_DMA_DIRECT_H */
index 78f677c..a33ed39 100644 (file)
@@ -461,6 +461,7 @@ int dma_set_mask(struct device *dev, u64 mask);
 int dma_set_coherent_mask(struct device *dev, u64 mask);
 u64 dma_get_required_mask(struct device *dev);
 size_t dma_max_mapping_size(struct device *dev);
+bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
 unsigned long dma_get_merge_boundary(struct device *dev);
 #else /* CONFIG_HAS_DMA */
 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
@@ -571,6 +572,10 @@ static inline size_t dma_max_mapping_size(struct device *dev)
 {
        return 0;
 }
+static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
+{
+       return false;
+}
 static inline unsigned long dma_get_merge_boundary(struct device *dev)
 {
        return 0;
index e1c0333..6283917 100644 (file)
@@ -153,7 +153,7 @@ struct dma_interleaved_template {
        bool dst_sgl;
        size_t numf;
        size_t frame_size;
-       struct data_chunk sgl[0];
+       struct data_chunk sgl[];
 };
 
 /**
@@ -535,7 +535,7 @@ struct dmaengine_unmap_data {
        struct device *dev;
        struct kref kref;
        size_t len;
-       dma_addr_t addr[0];
+       dma_addr_t addr[];
 };
 
 struct dma_async_tx_descriptor;
index 2c6495f..bb35f33 100644 (file)
@@ -350,6 +350,7 @@ void efi_native_runtime_setup(void);
  * associated with ConOut
  */
 #define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID   EFI_GUID(0xe03fc20a, 0x85dc, 0x406e,  0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
+#define LINUX_EFI_ARM_CPU_STATE_TABLE_GUID     EFI_GUID(0xef79e4aa, 0x3c3d, 0x4989,  0xb9, 0x02, 0x07, 0xa9, 0x43, 0xe5, 0x50, 0xd2)
 #define LINUX_EFI_LOADER_ENTRY_GUID            EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf,  0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
 #define LINUX_EFI_RANDOM_SEED_TABLE_GUID       EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2,  0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b)
 #define LINUX_EFI_TPM_EVENT_LOG_GUID           EFI_GUID(0xb7799cb0, 0xeca2, 0x4943,  0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa)
@@ -1236,14 +1237,11 @@ struct linux_efi_memreserve {
        struct {
                phys_addr_t     base;
                phys_addr_t     size;
-       } entry[0];
+       } entry[];
 };
 
-#define EFI_MEMRESERVE_SIZE(count) (sizeof(struct linux_efi_memreserve) + \
-       (count) * sizeof(((struct linux_efi_memreserve *)0)->entry[0]))
-
 #define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \
-       / sizeof(((struct linux_efi_memreserve *)0)->entry[0]))
+       / sizeof_field(struct linux_efi_memreserve, entry[0]))
 
 void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size);
 
index a23b26e..969a802 100644 (file)
@@ -86,6 +86,22 @@ struct net_device;
 u32 ethtool_op_get_link(struct net_device *dev);
 int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti);
 
+
+/**
+ * struct ethtool_link_ext_state_info - link extended state and substate.
+ */
+struct ethtool_link_ext_state_info {
+       enum ethtool_link_ext_state link_ext_state;
+       union {
+               enum ethtool_link_ext_substate_autoneg autoneg;
+               enum ethtool_link_ext_substate_link_training link_training;
+               enum ethtool_link_ext_substate_link_logical_mismatch link_logical_mismatch;
+               enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity;
+               enum ethtool_link_ext_substate_cable_issue cable_issue;
+               u8 __link_ext_substate;
+       };
+};
+
 /**
  * ethtool_rxfh_indir_default - get default value for RX flow hash indirection
  * @index: Index in RX flow hash indirection table
@@ -245,6 +261,11 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
  * @get_link: Report whether physical link is up.  Will only be called if
  *     the netdev is up.  Should usually be set to ethtool_op_get_link(),
  *     which uses netif_carrier_ok().
+ * @get_link_ext_state: Report link extended state. Should set link_ext_state and
+ *     link_ext_substate (link_ext_substate of 0 means link_ext_substate is unknown,
+ *     do not attach ext_substate attribute to netlink message). If link_ext_state
+ *     and link_ext_substate are unknown, return -ENODATA. If not implemented,
+ *     link_ext_state and link_ext_substate will not be sent to userspace.
  * @get_eeprom: Read data from the device EEPROM.
  *     Should fill in the magic field.  Don't need to check len for zero
  *     or wraparound.  Fill in the data argument with the eeprom values
@@ -384,6 +405,8 @@ struct ethtool_ops {
        void    (*set_msglevel)(struct net_device *, u32);
        int     (*nway_reset)(struct net_device *);
        u32     (*get_link)(struct net_device *);
+       int     (*get_link_ext_state)(struct net_device *,
+                                     struct ethtool_link_ext_state_info *);
        int     (*get_eeprom_len)(struct net_device *);
        int     (*get_eeprom)(struct net_device *,
                              struct ethtool_eeprom *, u8 *);
@@ -479,5 +502,37 @@ int ethtool_virtdev_set_link_ksettings(struct net_device *dev,
                                       const struct ethtool_link_ksettings *cmd,
                                       u32 *dev_speed, u8 *dev_duplex);
 
+struct netlink_ext_ack;
+struct phy_device;
+struct phy_tdr_config;
+
+/**
+ * struct ethtool_phy_ops - Optional PHY device options
+ * @get_sset_count: Get number of strings that @get_strings will write.
+ * @get_strings: Return a set of strings that describe the requested objects
+ * @get_stats: Return extended statistics about the PHY device.
+ * @start_cable_test - Start a cable test
+ * @start_cable_test_tdr - Start a Time Domain Reflectometry cable test
+ *
+ * All operations are optional (i.e. the function pointer may be set to %NULL)
+ * and callers must take this into account. Callers must hold the RTNL lock.
+ */
+struct ethtool_phy_ops {
+       int (*get_sset_count)(struct phy_device *dev);
+       int (*get_strings)(struct phy_device *dev, u8 *data);
+       int (*get_stats)(struct phy_device *dev,
+                        struct ethtool_stats *stats, u64 *data);
+       int (*start_cable_test)(struct phy_device *phydev,
+                               struct netlink_ext_ack *extack);
+       int (*start_cable_test_tdr)(struct phy_device *phydev,
+                                   struct netlink_ext_ack *extack,
+                                   const struct phy_tdr_config *config);
+};
+
+/**
+ * ethtool_set_ethtool_phy_ops - Set the ethtool_phy_ops singleton
+ * @ops: Ethtool PHY operations to set
+ */
+void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops);
 
 #endif /* _LINUX_ETHTOOL_H */
index 2593777..d07a6e9 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/kallsyms.h>
 #include <linux/if_vlan.h>
 #include <linux/vmalloc.h>
+#include <linux/sockptr.h>
 #include <crypto/sha.h>
 
 #include <net/sch_generic.h>
@@ -502,13 +503,11 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
                offsetof(TYPE, MEMBER);                                         \
        })
 
-#ifdef CONFIG_COMPAT
 /* A struct sock_filter is architecture independent. */
 struct compat_sock_fprog {
        u16             len;
        compat_uptr_t   filter; /* struct sock_filter * */
 };
-#endif
 
 struct sock_fprog_kern {
        u16                     len;
@@ -884,12 +883,12 @@ void bpf_jit_compile(struct bpf_prog *prog);
 bool bpf_jit_needs_zext(void);
 bool bpf_helper_changes_pkt_data(void *func);
 
-static inline bool bpf_dump_raw_ok(void)
+static inline bool bpf_dump_raw_ok(const struct cred *cred)
 {
        /* Reconstruction of call-sites is dependent on kallsyms,
         * thus make dump the same restriction.
         */
-       return kallsyms_show_value() == 1;
+       return kallsyms_show_value(cred);
 }
 
 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
@@ -1278,4 +1277,153 @@ struct bpf_sockopt_kern {
        s32             retval;
 };
 
+int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len);
+
+struct bpf_sk_lookup_kern {
+       u16             family;
+       u16             protocol;
+       struct {
+               __be32 saddr;
+               __be32 daddr;
+       } v4;
+       struct {
+               const struct in6_addr *saddr;
+               const struct in6_addr *daddr;
+       } v6;
+       __be16          sport;
+       u16             dport;
+       struct sock     *selected_sk;
+       bool            no_reuseport;
+};
+
+extern struct static_key_false bpf_sk_lookup_enabled;
+
+/* Runners for BPF_SK_LOOKUP programs to invoke on socket lookup.
+ *
+ * Allowed return values for a BPF SK_LOOKUP program are SK_PASS and
+ * SK_DROP. Their meaning is as follows:
+ *
+ *  SK_PASS && ctx.selected_sk != NULL: use selected_sk as lookup result
+ *  SK_PASS && ctx.selected_sk == NULL: continue to htable-based socket lookup
+ *  SK_DROP                           : terminate lookup with -ECONNREFUSED
+ *
+ * This macro aggregates return values and selected sockets from
+ * multiple BPF programs according to following rules in order:
+ *
+ *  1. If any program returned SK_PASS and a non-NULL ctx.selected_sk,
+ *     macro result is SK_PASS and last ctx.selected_sk is used.
+ *  2. If any program returned SK_DROP return value,
+ *     macro result is SK_DROP.
+ *  3. Otherwise result is SK_PASS and ctx.selected_sk is NULL.
+ *
+ * Caller must ensure that the prog array is non-NULL, and that the
+ * array as well as the programs it contains remain valid.
+ */
+#define BPF_PROG_SK_LOOKUP_RUN_ARRAY(array, ctx, func)                 \
+       ({                                                              \
+               struct bpf_sk_lookup_kern *_ctx = &(ctx);               \
+               struct bpf_prog_array_item *_item;                      \
+               struct sock *_selected_sk = NULL;                       \
+               bool _no_reuseport = false;                             \
+               struct bpf_prog *_prog;                                 \
+               bool _all_pass = true;                                  \
+               u32 _ret;                                               \
+                                                                       \
+               migrate_disable();                                      \
+               _item = &(array)->items[0];                             \
+               while ((_prog = READ_ONCE(_item->prog))) {              \
+                       /* restore most recent selection */             \
+                       _ctx->selected_sk = _selected_sk;               \
+                       _ctx->no_reuseport = _no_reuseport;             \
+                                                                       \
+                       _ret = func(_prog, _ctx);                       \
+                       if (_ret == SK_PASS && _ctx->selected_sk) {     \
+                               /* remember last non-NULL socket */     \
+                               _selected_sk = _ctx->selected_sk;       \
+                               _no_reuseport = _ctx->no_reuseport;     \
+                       } else if (_ret == SK_DROP && _all_pass) {      \
+                               _all_pass = false;                      \
+                       }                                               \
+                       _item++;                                        \
+               }                                                       \
+               _ctx->selected_sk = _selected_sk;                       \
+               _ctx->no_reuseport = _no_reuseport;                     \
+               migrate_enable();                                       \
+               _all_pass || _selected_sk ? SK_PASS : SK_DROP;          \
+        })
+
+static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
+                                       const __be32 saddr, const __be16 sport,
+                                       const __be32 daddr, const u16 dport,
+                                       struct sock **psk)
+{
+       struct bpf_prog_array *run_array;
+       struct sock *selected_sk = NULL;
+       bool no_reuseport = false;
+
+       rcu_read_lock();
+       run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]);
+       if (run_array) {
+               struct bpf_sk_lookup_kern ctx = {
+                       .family         = AF_INET,
+                       .protocol       = protocol,
+                       .v4.saddr       = saddr,
+                       .v4.daddr       = daddr,
+                       .sport          = sport,
+                       .dport          = dport,
+               };
+               u32 act;
+
+               act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN);
+               if (act == SK_PASS) {
+                       selected_sk = ctx.selected_sk;
+                       no_reuseport = ctx.no_reuseport;
+               } else {
+                       selected_sk = ERR_PTR(-ECONNREFUSED);
+               }
+       }
+       rcu_read_unlock();
+       *psk = selected_sk;
+       return no_reuseport;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
+                                       const struct in6_addr *saddr,
+                                       const __be16 sport,
+                                       const struct in6_addr *daddr,
+                                       const u16 dport,
+                                       struct sock **psk)
+{
+       struct bpf_prog_array *run_array;
+       struct sock *selected_sk = NULL;
+       bool no_reuseport = false;
+
+       rcu_read_lock();
+       run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]);
+       if (run_array) {
+               struct bpf_sk_lookup_kern ctx = {
+                       .family         = AF_INET6,
+                       .protocol       = protocol,
+                       .v6.saddr       = saddr,
+                       .v6.daddr       = daddr,
+                       .sport          = sport,
+                       .dport          = dport,
+               };
+               u32 act;
+
+               act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN);
+               if (act == SK_PASS) {
+                       selected_sk = ctx.selected_sk;
+                       no_reuseport = ctx.no_reuseport;
+               } else {
+                       selected_sk = ERR_PTR(-ECONNREFUSED);
+               }
+       }
+       rcu_read_unlock();
+       *psk = selected_sk;
+       return no_reuseport;
+}
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+
 #endif /* __LINUX_FILTER_H__ */
index 6c4ab4d..f5abba8 100644 (file)
@@ -315,6 +315,7 @@ enum rw_hint {
 #define IOCB_SYNC              (1 << 5)
 #define IOCB_WRITE             (1 << 6)
 #define IOCB_NOWAIT            (1 << 7)
+#define IOCB_NOIO              (1 << 9)
 
 struct kiocb {
        struct file             *ki_filp;
@@ -1917,7 +1918,6 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
                              struct iovec *fast_pointer,
                              struct iovec **ret_pointer);
 
-extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *);
 extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
 extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
 extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
@@ -2592,7 +2592,6 @@ extern void bdput(struct block_device *);
 extern void invalidate_bdev(struct block_device *);
 extern void iterate_bdevs(void (*)(struct block_device *, void *), void *);
 extern int sync_blockdev(struct block_device *bdev);
-extern void kill_bdev(struct block_device *);
 extern struct super_block *freeze_bdev(struct block_device *);
 extern void emergency_thaw_all(void);
 extern void emergency_thaw_bdev(struct super_block *sb);
@@ -2608,7 +2607,6 @@ static inline bool sb_is_blkdev_sb(struct super_block *sb)
 #else
 static inline void bd_forget(struct inode *inode) {}
 static inline int sync_blockdev(struct block_device *bdev) { return 0; }
-static inline void kill_bdev(struct block_device *bdev) {}
 static inline void invalidate_bdev(struct block_device *bdev) {}
 
 static inline struct super_block *freeze_bdev(struct block_device *sb)
@@ -3035,6 +3033,7 @@ extern int kernel_read_file_from_path_initns(const char *, void **, loff_t *, lo
 extern int kernel_read_file_from_fd(int, void **, loff_t *, loff_t,
                                    enum kernel_read_file_id);
 extern ssize_t kernel_read(struct file *, void *, size_t, loff_t *);
+ssize_t __kernel_read(struct file *file, void *buf, size_t count, loff_t *pos);
 extern ssize_t kernel_write(struct file *, const void *, size_t, loff_t *);
 extern ssize_t __kernel_write(struct file *, const void *, size_t, loff_t *);
 extern struct file * open_exec(const char *);
index ce0b5fb..3f0b19d 100644 (file)
@@ -46,7 +46,7 @@ struct fscache_cache_tag {
        unsigned long           flags;
 #define FSCACHE_TAG_RESERVED   0               /* T if tag is reserved for a cache */
        atomic_t                usage;
-       char                    name[0];        /* tag name */
+       char                    name[]; /* tag name */
 };
 
 /*
index 4875dd3..2d92033 100644 (file)
@@ -15,6 +15,7 @@
 #define ENETC_PCS_IF_MODE_SGMII_EN             BIT(0)
 #define ENETC_PCS_IF_MODE_USE_SGMII_AN         BIT(1)
 #define ENETC_PCS_IF_MODE_SGMII_SPEED(x)       (((x) << 2) & GENMASK(3, 2))
+#define ENETC_PCS_IF_MODE_DUPLEX_HALF          BIT(3)
 
 /* Not a mistake, the SerDes PLL needs to be set at 3.125 GHz by Reset
  * Configuration Word (RCW, outside Linux control) for 2.5G SGMII mode. The PCS
index 78b6ea5..f6c6667 100644 (file)
@@ -173,9 +173,9 @@ static inline void hash_del_rcu(struct hlist_node *node)
  * @member: the name of the hlist_node within the struct
  * @key: the key of the objects to iterate over
  */
-#define hash_for_each_possible_rcu(name, obj, member, key)             \
+#define hash_for_each_possible_rcu(name, obj, member, key, cond...)    \
        hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\
-               member)
+               member, ## cond)
 
 /**
  * hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing
index c230b4e..a3a568b 100644 (file)
@@ -48,6 +48,9 @@ struct host1x_client_ops {
  * @channel: host1x channel associated with this client
  * @syncpts: array of syncpoints requested for this client
  * @num_syncpts: number of syncpoints requested for this client
+ * @parent: pointer to parent structure
+ * @usecount: reference count for this structure
+ * @lock: mutex for mutually exclusive concurrency
  */
 struct host1x_client {
        struct list_head list;
index c10617b..b8b8963 100644 (file)
@@ -408,7 +408,7 @@ static inline bool i2c_detect_slave_mode(struct device *dev) { return false; }
  * that are present.  This information is used to grow the driver model tree.
  * For mainboards this is done statically using i2c_register_board_info();
  * bus numbers identify adapters that aren't yet available.  For add-on boards,
- * i2c_new_device() does this dynamically with the adapter already known.
+ * i2c_new_client_device() does this dynamically with the adapter already known.
  */
 struct i2c_board_info {
        char            type[I2C_NAME_SIZE];
@@ -439,14 +439,12 @@ struct i2c_board_info {
 
 
 #if IS_ENABLED(CONFIG_I2C)
-/* Add-on boards should register/unregister their devices; e.g. a board
+/*
+ * Add-on boards should register/unregister their devices; e.g. a board
  * with integrated I2C, a config eeprom, sensors, and a codec that's
  * used in conjunction with the primary hardware.
  */
 struct i2c_client *
-i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
-
-struct i2c_client *
 i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
 
 /* If you don't know the exact address of an I2C device, use this variant
index 81ca84c..0af4d21 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/skbuff.h>
 #include <uapi/linux/icmp.h>
+#include <uapi/linux/errqueue.h>
 
 static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb)
 {
@@ -35,4 +36,8 @@ static inline bool icmp_is_err(int type)
        return false;
 }
 
+void ip_icmp_error_rfc4884(const struct sk_buff *skb,
+                          struct sock_ee_data_rfc4884 *out,
+                          int thlen, int off);
+
 #endif /* _LINUX_ICMP_H */
index 33d3796..1b3371a 100644 (file)
@@ -13,12 +13,32 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
 #include <linux/netdevice.h>
 
 #if IS_ENABLED(CONFIG_IPV6)
-extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info);
 
 typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info,
                             const struct in6_addr *force_saddr);
+#if IS_BUILTIN(CONFIG_IPV6)
+void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+               const struct in6_addr *force_saddr);
+static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
+{
+       icmp6_send(skb, type, code, info, NULL);
+}
+static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn)
+{
+       BUILD_BUG_ON(fn != icmp6_send);
+       return 0;
+}
+static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn)
+{
+       BUILD_BUG_ON(fn != icmp6_send);
+       return 0;
+}
+#else
+extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info);
 extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn);
 extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn);
+#endif
+
 int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
                               unsigned int data_len);
 
index fe15f83..9f73249 100644 (file)
@@ -3333,13 +3333,17 @@ struct ieee80211_multiple_bssid_configuration {
 #define WLAN_AKM_SUITE_TDLS                    SUITE(0x000FAC, 7)
 #define WLAN_AKM_SUITE_SAE                     SUITE(0x000FAC, 8)
 #define WLAN_AKM_SUITE_FT_OVER_SAE             SUITE(0x000FAC, 9)
+#define WLAN_AKM_SUITE_AP_PEER_KEY             SUITE(0x000FAC, 10)
 #define WLAN_AKM_SUITE_8021X_SUITE_B           SUITE(0x000FAC, 11)
 #define WLAN_AKM_SUITE_8021X_SUITE_B_192       SUITE(0x000FAC, 12)
+#define WLAN_AKM_SUITE_FT_8021X_SHA384         SUITE(0x000FAC, 13)
 #define WLAN_AKM_SUITE_FILS_SHA256             SUITE(0x000FAC, 14)
 #define WLAN_AKM_SUITE_FILS_SHA384             SUITE(0x000FAC, 15)
 #define WLAN_AKM_SUITE_FT_FILS_SHA256          SUITE(0x000FAC, 16)
 #define WLAN_AKM_SUITE_FT_FILS_SHA384          SUITE(0x000FAC, 17)
 #define WLAN_AKM_SUITE_OWE                     SUITE(0x000FAC, 18)
+#define WLAN_AKM_SUITE_FT_PSK_SHA384           SUITE(0x000FAC, 19)
+#define WLAN_AKM_SUITE_PSK_SHA384              SUITE(0x000FAC, 20)
 
 #define WLAN_MAX_KEY_LEN               32
 
index b3a8d30..6479a38 100644 (file)
@@ -49,6 +49,7 @@ struct br_ip_list {
 #define BR_ISOLATED            BIT(16)
 #define BR_MRP_AWARE           BIT(17)
 #define BR_MRP_LOST_CONT       BIT(18)
+#define BR_MRP_LOST_IN_CONT    BIT(19)
 
 #define BR_DEFAULT_AGEING_TIME (300 * HZ)
 
index b05e855..41a5183 100644 (file)
@@ -25,6 +25,8 @@
 #define VLAN_ETH_DATA_LEN      1500    /* Max. octets in payload        */
 #define VLAN_ETH_FRAME_LEN     1518    /* Max. octets in frame sans FCS */
 
+#define VLAN_MAX_DEPTH 8               /* Max. number of nested VLAN tags parsed */
+
 /*
  *     struct vlan_hdr - vlan header
  *     @h_vlan_TCI: priority and VLAN ID
@@ -577,10 +579,10 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
  * Returns the EtherType of the packet, regardless of whether it is
  * vlan encapsulated (normal or hardware accelerated) or not.
  */
-static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
+static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
                                         int *depth)
 {
-       unsigned int vlan_depth = skb->mac_len;
+       unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH;
 
        /* if type is 802.1Q/AD then the header should already be
         * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
@@ -595,13 +597,12 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
                        vlan_depth = ETH_HLEN;
                }
                do {
-                       struct vlan_hdr *vh;
+                       struct vlan_hdr vhdr, *vh;
 
-                       if (unlikely(!pskb_may_pull(skb,
-                                                   vlan_depth + VLAN_HLEN)))
+                       vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr);
+                       if (unlikely(!vh || !--parse_depth))
                                return 0;
 
-                       vh = (struct vlan_hdr *)(skb->data + vlan_depth);
                        type = vh->h_vlan_encapsulated_proto;
                        vlan_depth += VLAN_HLEN;
                } while (eth_type_vlan(type));
@@ -620,11 +621,25 @@ static inline __be16 __vlan_get_protocol(struct sk_buff *skb, __be16 type,
  * Returns the EtherType of the packet, regardless of whether it is
  * vlan encapsulated (normal or hardware accelerated) or not.
  */
-static inline __be16 vlan_get_protocol(struct sk_buff *skb)
+static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
 {
        return __vlan_get_protocol(skb, skb->protocol, NULL);
 }
 
+/* A getter for the SKB protocol field which will handle VLAN tags consistently
+ * whether VLAN acceleration is enabled or not.
+ */
+static inline __be16 skb_protocol(const struct sk_buff *skb, bool skip_vlan)
+{
+       if (!skip_vlan)
+               /* VLAN acceleration strips the VLAN header from the skb and
+                * moves it to skb->vlan_proto
+                */
+               return skb_vlan_tag_present(skb) ? skb->vlan_proto : skb->protocol;
+
+       return vlan_get_protocol(skb);
+}
+
 static inline void vlan_set_encap_proto(struct sk_buff *skb,
                                        struct vlan_hdr *vhdr)
 {
index 00d7e8e..54c02c8 100644 (file)
                likely(f == f2) ? f2(__VA_ARGS__) :                     \
                                  INDIRECT_CALL_1(f, f1, __VA_ARGS__);  \
        })
+#define INDIRECT_CALL_3(f, f3, f2, f1, ...)                                    \
+       ({                                                                      \
+               likely(f == f3) ? f3(__VA_ARGS__) :                             \
+                                 INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__);      \
+       })
+#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...)                                        \
+       ({                                                                      \
+               likely(f == f4) ? f4(__VA_ARGS__) :                             \
+                                 INDIRECT_CALL_3(f, f3, f2, f1, __VA_ARGS__);  \
+       })
 
 #define INDIRECT_CALLABLE_DECLARE(f)   f
 #define INDIRECT_CALLABLE_SCOPE
@@ -30,6 +40,8 @@
 #else
 #define INDIRECT_CALL_1(f, f1, ...) f(__VA_ARGS__)
 #define INDIRECT_CALL_2(f, f2, f1, ...) f(__VA_ARGS__)
+#define INDIRECT_CALL_3(f, f3, f2, f1, ...) f(__VA_ARGS__)
+#define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) f(__VA_ARGS__)
 #define INDIRECT_CALLABLE_DECLARE(f)
 #define INDIRECT_CALLABLE_SCOPE                static
 #endif
index 4100bd2..3e8fa1c 100644 (file)
@@ -41,6 +41,7 @@
 #define DMA_PTE_SNP            BIT_ULL(11)
 
 #define DMA_FL_PTE_PRESENT     BIT_ULL(0)
+#define DMA_FL_PTE_US          BIT_ULL(2)
 #define DMA_FL_PTE_XD          BIT_ULL(63)
 
 #define ADDR_WIDTH_5LEVEL      (57)
index 2cb445a..a44789d 100644 (file)
@@ -223,7 +223,7 @@ struct ipv6_pinfo {
 
        /*
         * Packed in 16bits.
-        * Omit one shift by by putting the signed field at MSB.
+        * Omit one shift by putting the signed field at MSB.
         */
 #if defined(__BIG_ENDIAN_BITFIELD)
        __s16                   hop_limit:9;
@@ -283,6 +283,7 @@ struct ipv6_pinfo {
                                autoflowlabel:1,
                                autoflowlabel_set:1,
                                mc_all:1,
+                               recverr_rfc4884:1,
                                rtalert_isolate:1;
        __u8                    min_hopcount;
        __u8                    tclass;
index 2735da5..3082378 100644 (file)
@@ -2,7 +2,7 @@
 #ifndef _LINUX_IRQ_WORK_H
 #define _LINUX_IRQ_WORK_H
 
-#include <linux/llist.h>
+#include <linux/smp_types.h>
 
 /*
  * An entry can be in one of four states:
  * busy      NULL, 2 -> {free, claimed} : callback in progress, can be claimed
  */
 
-/* flags share CSD_FLAG_ space */
-
-#define IRQ_WORK_PENDING       BIT(0)
-#define IRQ_WORK_BUSY          BIT(1)
-
-/* Doesn't want IPI, wait for tick: */
-#define IRQ_WORK_LAZY          BIT(2)
-/* Run hard IRQ context, even on RT */
-#define IRQ_WORK_HARD_IRQ      BIT(3)
-
-#define IRQ_WORK_CLAIMED       (IRQ_WORK_PENDING | IRQ_WORK_BUSY)
-
-/*
- * structure shares layout with single_call_data_t.
- */
 struct irq_work {
-       struct llist_node llnode;
-       atomic_t flags;
+       union {
+               struct __call_single_node node;
+               struct {
+                       struct llist_node llnode;
+                       atomic_t flags;
+               };
+       };
        void (*func)(struct irq_work *);
 };
 
index f613d85..d56128d 100644 (file)
@@ -766,6 +766,11 @@ struct journal_s
        int                     j_errno;
 
        /**
+        * @j_abort_mutex: Lock the whole aborting procedure.
+        */
+       struct mutex            j_abort_mutex;
+
+       /**
         * @j_sb_buffer: The first part of the superblock buffer.
         */
        struct buffer_head      *j_sb_buffer;
@@ -1247,7 +1252,6 @@ JBD2_FEATURE_INCOMPAT_FUNCS(csum3,                CSUM_V3)
 #define JBD2_ABORT_ON_SYNCDATA_ERR     0x040   /* Abort the journal on file
                                                 * data write error in ordered
                                                 * mode */
-#define JBD2_REC_ERR   0x080   /* The errno in the sb has been recorded */
 
 /*
  * Function declarations for the journaling transaction and buffer
index 98338dc..481273f 100644 (file)
@@ -18,6 +18,7 @@
 #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
                         2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
 
+struct cred;
 struct module;
 
 static inline int is_kernel_inittext(unsigned long addr)
@@ -98,7 +99,7 @@ int lookup_symbol_name(unsigned long addr, char *symname);
 int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
 
 /* How and when do we show kallsyms values? */
-extern int kallsyms_show_value(void);
+extern bool kallsyms_show_value(const struct cred *cred);
 
 #else /* !CONFIG_KALLSYMS */
 
@@ -158,7 +159,7 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
        return -ERANGE;
 }
 
-static inline int kallsyms_show_value(void)
+static inline bool kallsyms_show_value(const struct cred *cred)
 {
        return false;
 }
index 1776eb2..ea67910 100644 (file)
@@ -208,7 +208,7 @@ struct crash_mem_range {
 struct crash_mem {
        unsigned int max_nr_ranges;
        unsigned int nr_ranges;
-       struct crash_mem_range ranges[0];
+       struct crash_mem_range ranges[];
 };
 
 extern int crash_exclude_mem_range(struct crash_mem *mem,
index c62d764..529116b 100644 (file)
@@ -276,8 +276,7 @@ struct kgdb_arch {
  * the I/O driver.
  * @post_exception: Pointer to a function that will do any cleanup work
  * for the I/O driver.
- * @is_console: 1 if the end device is a console 0 if the I/O device is
- * not a console
+ * @cons: valid if the I/O device is a console; else NULL.
  */
 struct kgdb_io {
        const char              *name;
@@ -288,7 +287,7 @@ struct kgdb_io {
        void                    (*deinit) (void);
        void                    (*pre_exception) (void);
        void                    (*post_exception) (void);
-       int                     is_console;
+       struct console          *cons;
 };
 
 extern const struct kgdb_arch          arch_kgdb_ops;
index 594265b..6adf90f 100644 (file)
@@ -161,7 +161,7 @@ struct kretprobe_instance {
        kprobe_opcode_t *ret_addr;
        struct task_struct *task;
        void *fp;
-       char data[0];
+       char data[];
 };
 
 struct kretprobe_blackpoint {
@@ -350,6 +350,10 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
        return this_cpu_ptr(&kprobe_ctlblk);
 }
 
+extern struct kprobe kprobe_busy;
+void kprobe_busy_begin(void);
+void kprobe_busy_end(void);
+
 kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset);
 int register_kprobe(struct kprobe *p);
 void unregister_kprobe(struct kprobe *p);
index 62ec926..d564855 100644 (file)
@@ -409,7 +409,7 @@ struct kvm_irq_routing_table {
         * Array indexed by gsi. Each entry contains list of irq chips
         * the gsi is connected to.
         */
-       struct hlist_head map[0];
+       struct hlist_head map[];
 };
 #endif
 
index af83285..77ccf04 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/acpi.h>
 #include <linux/cdrom.h>
 #include <linux/sched.h>
+#include <linux/async.h>
 
 /*
  * Define if arch has non-standard setup.  This is a _PCI_ standard
@@ -609,7 +610,7 @@ struct ata_host {
        struct task_struct      *eh_owner;
 
        struct ata_port         *simplex_claimed;       /* channel owning the DMA */
-       struct ata_port         *ports[0];
+       struct ata_port         *ports[];
 };
 
 struct ata_queued_cmd {
@@ -872,6 +873,8 @@ struct ata_port {
        struct timer_list       fastdrain_timer;
        unsigned long           fastdrain_cnt;
 
+       async_cookie_t          cookie;
+
        int                     em_message_type;
        void                    *private_data;
 
@@ -1092,7 +1095,11 @@ extern int ata_scsi_ioctl(struct scsi_device *dev, unsigned int cmd,
 #define ATA_SCSI_COMPAT_IOCTL /* empty */
 #endif
 extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd);
+#if IS_REACHABLE(CONFIG_ATA)
 bool ata_scsi_dma_need_drain(struct request *rq);
+#else
+#define ata_scsi_dma_need_drain NULL
+#endif
 extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev,
                            unsigned int cmd, void __user *arg);
 extern bool ata_link_online(struct ata_link *link);
index c664c27..f8397f3 100644 (file)
@@ -82,6 +82,12 @@ static inline int linkmode_equal(const unsigned long *src1,
        return bitmap_equal(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
 }
 
+static inline int linkmode_intersects(const unsigned long *src1,
+                                     const unsigned long *src2)
+{
+       return bitmap_intersects(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
 static inline int linkmode_subset(const unsigned long *src1,
                                  const unsigned long *src2)
 {
index 6791813..af998f9 100644 (file)
@@ -150,7 +150,7 @@ LSM_HOOK(int, 0, inode_listsecurity, struct inode *inode, char *buffer,
         size_t buffer_size)
 LSM_HOOK(void, LSM_RET_VOID, inode_getsecid, struct inode *inode, u32 *secid)
 LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new)
-LSM_HOOK(int, 0, inode_copy_up_xattr, const char *name)
+LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, const char *name)
 LSM_HOOK(int, 0, kernfs_init_security, struct kernfs_node *kn_dir,
         struct kernfs_node *kn)
 LSM_HOOK(int, 0, file_permission, struct file *file, int mask)
@@ -360,7 +360,7 @@ LSM_HOOK(int, 0, key_alloc, struct key *key, const struct cred *cred,
         unsigned long flags)
 LSM_HOOK(void, LSM_RET_VOID, key_free, struct key *key)
 LSM_HOOK(int, 0, key_permission, key_ref_t key_ref, const struct cred *cred,
-        unsigned perm)
+        enum key_need_perm need_perm)
 LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **_buffer)
 #endif /* CONFIG_KEYS */
 
index af6b11d..ff7b760 100644 (file)
 #define MARVELL_PHY_ID_88E1149R                0x01410e50
 #define MARVELL_PHY_ID_88E1240         0x01410e30
 #define MARVELL_PHY_ID_88E1318S                0x01410e90
+#define MARVELL_PHY_ID_88E1340S                0x01410dc0
 #define MARVELL_PHY_ID_88E1116R                0x01410e40
 #define MARVELL_PHY_ID_88E1510         0x01410dd0
 #define MARVELL_PHY_ID_88E1540         0x01410eb0
 #define MARVELL_PHY_ID_88E1545         0x01410ea0
+#define MARVELL_PHY_ID_88E1548P                0x01410ec0
 #define MARVELL_PHY_ID_88E3016         0x01410e60
 #define MARVELL_PHY_ID_88X3310         0x002b09a0
 #define MARVELL_PHY_ID_88E2110         0x002b09b0
index 36d2e06..898cbf0 100644 (file)
@@ -18,6 +18,7 @@
 
 struct gpio_desc;
 struct mii_bus;
+struct reset_control;
 
 /* Multiple levels of nesting are possible. However typically this is
  * limited to nested DSA like layer, a MUX layer, and the normal
index 96ebaa9..dacf695 100644 (file)
@@ -126,7 +126,7 @@ enum mlx5_accel_ipsec_cap {
        MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN       = 1 << 7,
 };
 
-#ifdef CONFIG_MLX5_FPGA_IPSEC
+#ifdef CONFIG_MLX5_ACCEL
 
 u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev);
 
@@ -152,5 +152,5 @@ static inline int
 mlx5_accel_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
                           const struct mlx5_accel_esp_xfrm_attrs *attrs) { return -EOPNOTSUPP; }
 
-#endif
-#endif
+#endif /* CONFIG_MLX5_ACCEL */
+#endif /* __MLX5_ACCEL_H__ */
index b5a9399..7bfb673 100644 (file)
@@ -33,7 +33,6 @@
 #ifndef MLX5_CORE_CQ_H
 #define MLX5_CORE_CQ_H
 
-#include <rdma/ib_verbs.h>
 #include <linux/mlx5/driver.h>
 #include <linux/refcount.h>
 
index 1bc27ac..57db125 100644 (file)
@@ -458,6 +458,15 @@ enum {
        MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS = 0x2,
 };
 
+struct mlx5_wqe_tls_static_params_seg {
+       u8     ctx[MLX5_ST_SZ_BYTES(tls_static_params)];
+};
+
+struct mlx5_wqe_tls_progress_params_seg {
+       __be32 tis_tir_num;
+       u8     ctx[MLX5_ST_SZ_BYTES(tls_progress_params)];
+};
+
 enum {
        MLX5_SET_PORT_RESET_QKEY        = 0,
        MLX5_SET_PORT_GUID0             = 16,
index 13c0e45..6a97ad6 100644 (file)
@@ -147,6 +147,7 @@ enum {
        MLX5_REG_MCDA            = 0x9063,
        MLX5_REG_MCAM            = 0x907f,
        MLX5_REG_MIRC            = 0x9162,
+       MLX5_REG_SBCAM           = 0xB01F,
        MLX5_REG_RESOURCE_DUMP   = 0xC000,
 };
 
@@ -707,6 +708,9 @@ struct mlx5_core_dev {
 #ifdef CONFIG_MLX5_FPGA
        struct mlx5_fpga_device *fpga;
 #endif
+#ifdef CONFIG_MLX5_ACCEL
+       const struct mlx5_accel_ipsec_ops *ipsec_ops;
+#endif
        struct mlx5_clock        clock;
        struct mlx5_ib_clock_info  *clock_info;
        struct mlx5_fw_tracer   *tracer;
index 6c5aa0a..92d991d 100644 (file)
@@ -207,7 +207,10 @@ struct mlx5_flow_act {
        u32 action;
        struct mlx5_modify_hdr  *modify_hdr;
        struct mlx5_pkt_reformat *pkt_reformat;
-       uintptr_t esp_id;
+       union {
+               u32 ipsec_obj_id;
+               uintptr_t esp_id;
+       };
        u32 flags;
        struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
        struct ib_counters *counters;
index 116bd9b..9e64710 100644 (file)
@@ -416,7 +416,11 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
        u8         table_miss_action_domain[0x1];
        u8         termination_table[0x1];
        u8         reformat_and_fwd_to_table[0x1];
-       u8         reserved_at_1a[0x6];
+       u8         reserved_at_1a[0x2];
+       u8         ipsec_encrypt[0x1];
+       u8         ipsec_decrypt[0x1];
+       u8         reserved_at_1e[0x2];
+
        u8         termination_table_raw_traffic[0x1];
        u8         reserved_at_21[0x1];
        u8         log_max_ft_size[0x6];
@@ -2965,6 +2969,8 @@ enum {
        MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100,
        MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2  = 0x400,
        MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800,
+       MLX5_FLOW_CONTEXT_ACTION_IPSEC_DECRYPT = 0x1000,
+       MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT = 0x2000,
 };
 
 enum {
@@ -3006,7 +3012,8 @@ struct mlx5_ifc_flow_context_bits {
 
        struct mlx5_ifc_vlan_bits push_vlan_2;
 
-       u8         reserved_at_120[0xe0];
+       u8         ipsec_obj_id[0x20];
+       u8         reserved_at_140[0xc0];
 
        struct mlx5_ifc_fte_match_param_bits match_value;
 
@@ -4283,7 +4290,8 @@ struct mlx5_ifc_rst2init_qp_out_bits {
 
        u8         syndrome[0x20];
 
-       u8         reserved_at_40[0x40];
+       u8         reserved_at_40[0x20];
+       u8         ece[0x20];
 };
 
 struct mlx5_ifc_rst2init_qp_in_bits {
@@ -4300,7 +4308,7 @@ struct mlx5_ifc_rst2init_qp_in_bits {
 
        u8         opt_param_mask[0x20];
 
-       u8         reserved_at_a0[0x20];
+       u8         ece[0x20];
 
        struct mlx5_ifc_qpc_bits qpc;
 
@@ -5751,6 +5759,7 @@ enum {
        MLX5_ACTION_IN_FIELD_METADATA_REG_C_7  = 0x58,
        MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM   = 0x59,
        MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM   = 0x5B,
+       MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME    = 0x5D,
 };
 
 struct mlx5_ifc_alloc_modify_header_context_out_bits {
@@ -6619,7 +6628,8 @@ struct mlx5_ifc_init2init_qp_out_bits {
 
        u8         syndrome[0x20];
 
-       u8         reserved_at_40[0x40];
+       u8         reserved_at_40[0x20];
+       u8         ece[0x20];
 };
 
 struct mlx5_ifc_init2init_qp_in_bits {
@@ -6636,7 +6646,7 @@ struct mlx5_ifc_init2init_qp_in_bits {
 
        u8         opt_param_mask[0x20];
 
-       u8         reserved_at_a0[0x20];
+       u8         ece[0x20];
 
        struct mlx5_ifc_qpc_bits qpc;
 
@@ -9958,6 +9968,34 @@ struct mlx5_ifc_pptb_reg_bits {
        u8         untagged_buff[0x4];
 };
 
+struct mlx5_ifc_sbcam_reg_bits {
+       u8         reserved_at_0[0x8];
+       u8         feature_group[0x8];
+       u8         reserved_at_10[0x8];
+       u8         access_reg_group[0x8];
+
+       u8         reserved_at_20[0x20];
+
+       u8         sb_access_reg_cap_mask[4][0x20];
+
+       u8         reserved_at_c0[0x80];
+
+       u8         sb_feature_cap_mask[4][0x20];
+
+       u8         reserved_at_1c0[0x40];
+
+       u8         cap_total_buffer_size[0x20];
+
+       u8         cap_cell_size[0x10];
+       u8         cap_max_pg_buffers[0x8];
+       u8         cap_num_pool_supported[0x8];
+
+       u8         reserved_at_240[0x8];
+       u8         cap_sbsr_stat_size[0x8];
+       u8         cap_max_tclass_data[0x8];
+       u8         cap_max_cpu_ingress_tclass_sb[0x8];
+};
+
 struct mlx5_ifc_pbmc_reg_bits {
        u8         reserved_at_0[0x8];
        u8         local_port[0x8];
@@ -10638,16 +10676,13 @@ struct mlx5_ifc_tls_static_params_bits {
 };
 
 struct mlx5_ifc_tls_progress_params_bits {
-       u8         reserved_at_0[0x8];
-       u8         tisn[0x18];
-
        u8         next_record_tcp_sn[0x20];
 
        u8         hw_resync_tcp_sn[0x20];
 
        u8         record_tracker_state[0x2];
        u8         auth_state[0x2];
-       u8         reserved_at_64[0x4];
+       u8         reserved_at_44[0x4];
        u8         hw_offset_record_number[0x18];
 };
 
index de9a272..2d45a6a 100644 (file)
@@ -104,8 +104,11 @@ enum mlx5e_ext_link_mode {
        MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR     = 8,
        MLX5E_CAUI_4_100GBASE_CR4_KR4           = 9,
        MLX5E_100GAUI_2_100GBASE_CR2_KR2        = 10,
+       MLX5E_100GAUI_1_100GBASE_CR_KR          = 11,
        MLX5E_200GAUI_4_200GBASE_CR4_KR4        = 12,
+       MLX5E_200GAUI_2_200GBASE_CR2_KR2        = 13,
        MLX5E_400GAUI_8                         = 15,
+       MLX5E_400GAUI_4_400GBASE_CR4_KR4        = 16,
        MLX5E_EXT_LINK_MODES_NUMBER,
 };
 
index b8992b8..36492a1 100644 (file)
@@ -209,7 +209,7 @@ struct mlx5_wqe_ctrl_seg {
                __be32          general_id;
                __be32          imm;
                __be32          umr_mkey;
-               __be32          tisn;
+               __be32          tis_tir_num;
        };
 };
 
diff --git a/include/linux/mlx5/rsc_dump.h b/include/linux/mlx5/rsc_dump.h
new file mode 100644 (file)
index 0000000..d11c0b2
--- /dev/null
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies inc. */
+
+#include <linux/mlx5/driver.h>
+
+#ifndef __MLX5_RSC_DUMP
+#define __MLX5_RSC_DUMP
+
+enum mlx5_sgmt_type {
+       MLX5_SGMT_TYPE_HW_CQPC,
+       MLX5_SGMT_TYPE_HW_SQPC,
+       MLX5_SGMT_TYPE_HW_RQPC,
+       MLX5_SGMT_TYPE_FULL_SRQC,
+       MLX5_SGMT_TYPE_FULL_CQC,
+       MLX5_SGMT_TYPE_FULL_EQC,
+       MLX5_SGMT_TYPE_FULL_QPC,
+       MLX5_SGMT_TYPE_SND_BUFF,
+       MLX5_SGMT_TYPE_RCV_BUFF,
+       MLX5_SGMT_TYPE_SRQ_BUFF,
+       MLX5_SGMT_TYPE_CQ_BUFF,
+       MLX5_SGMT_TYPE_EQ_BUFF,
+       MLX5_SGMT_TYPE_SX_SLICE,
+       MLX5_SGMT_TYPE_SX_SLICE_ALL,
+       MLX5_SGMT_TYPE_RDB,
+       MLX5_SGMT_TYPE_RX_SLICE_ALL,
+       MLX5_SGMT_TYPE_PRM_QUERY_QP,
+       MLX5_SGMT_TYPE_PRM_QUERY_CQ,
+       MLX5_SGMT_TYPE_PRM_QUERY_MKEY,
+       MLX5_SGMT_TYPE_MENU,
+       MLX5_SGMT_TYPE_TERMINATE,
+
+       MLX5_SGMT_TYPE_NUM, /* Keep last */
+};
+
+struct mlx5_rsc_key {
+       enum mlx5_sgmt_type rsc;
+       int index1;
+       int index2;
+       int num_of_obj1;
+       int num_of_obj2;
+       int size;
+};
+
+struct mlx5_rsc_dump_cmd;
+
+struct mlx5_rsc_dump_cmd *mlx5_rsc_dump_cmd_create(struct mlx5_core_dev *dev,
+                                                  struct mlx5_rsc_key *key);
+void mlx5_rsc_dump_cmd_destroy(struct mlx5_rsc_dump_cmd *cmd);
+int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd,
+                      struct page *page, int *size);
+#endif /* __MLX5_RSC_DUMP */
index 8170da1..4db87bc 100644 (file)
@@ -75,7 +75,7 @@ void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline);
 int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
                                     u16 vport, u8 min_inline);
 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
-                                     u16 vport, u8 *addr);
+                                     u16 vport, const u8 *addr);
 int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
 int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
index 15ed8ce..519820d 100644 (file)
 #define SDIO_DEVICE_ID_MEDIATEK_MT7663         0x7663
 #define SDIO_DEVICE_ID_MEDIATEK_MT7668         0x7668
 
+#define SDIO_VENDOR_ID_MICROCHIP_WILC          0x0296
+#define SDIO_DEVICE_ID_MICROCHIP_WILC1000      0x5347
+
 #define SDIO_VENDOR_ID_SIANO                   0x039a
 #define SDIO_DEVICE_ID_SIANO_NOVA_B0           0x0201
 #define SDIO_DEVICE_ID_SIANO_NICE              0x0202
index c4c37fd..f6f8849 100644 (file)
@@ -257,8 +257,8 @@ struct lruvec {
         */
        unsigned long                   anon_cost;
        unsigned long                   file_cost;
-       /* Evictions & activations on the inactive file list */
-       atomic_long_t                   inactive_age;
+       /* Non-resident age, driven by LRU movement */
+       atomic_long_t                   nonresident_age;
        /* Refaults at the time of last reclaim cycle */
        unsigned long                   refaults;
        /* Various lruvec state flags (enum lruvec_flags) */
index 9a36fad..6cbbfe9 100644 (file)
@@ -8,6 +8,7 @@
 #include <net/fib_notifier.h>
 #include <uapi/linux/mroute.h>
 #include <linux/mroute_base.h>
+#include <linux/sockptr.h>
 
 #ifdef CONFIG_IP_MROUTE
 static inline int ip_mroute_opt(int opt)
@@ -15,7 +16,7 @@ static inline int ip_mroute_opt(int opt)
        return opt >= MRT_BASE && opt <= MRT_MAX;
 }
 
-int ip_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
+int ip_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int);
 int ip_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg);
 int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
@@ -23,7 +24,7 @@ int ip_mr_init(void);
 bool ipmr_rule_default(const struct fib_rule *rule);
 #else
 static inline int ip_mroute_setsockopt(struct sock *sock, int optname,
-                                      char __user *optval, unsigned int optlen)
+                                      sockptr_t optval, unsigned int optlen)
 {
        return -ENOPROTOOPT;
 }
index c4a4585..bc351a8 100644 (file)
@@ -8,6 +8,7 @@
 #include <net/net_namespace.h>
 #include <uapi/linux/mroute6.h>
 #include <linux/mroute_base.h>
+#include <linux/sockptr.h>
 #include <net/fib_rules.h>
 
 #ifdef CONFIG_IPV6_MROUTE
@@ -25,7 +26,7 @@ static inline int ip6_mroute_opt(int opt)
 struct sock;
 
 #ifdef CONFIG_IPV6_MROUTE
-extern int ip6_mroute_setsockopt(struct sock *, int, char __user *, unsigned int);
+extern int ip6_mroute_setsockopt(struct sock *, int, sockptr_t, unsigned int);
 extern int ip6_mroute_getsockopt(struct sock *, int, char __user *, int __user *);
 extern int ip6_mr_input(struct sk_buff *skb);
 extern int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg);
@@ -33,9 +34,8 @@ extern int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *ar
 extern int ip6_mr_init(void);
 extern void ip6_mr_cleanup(void);
 #else
-static inline
-int ip6_mroute_setsockopt(struct sock *sock,
-                         int optname, char __user *optval, unsigned int optlen)
+static inline int ip6_mroute_setsockopt(struct sock *sock, int optname,
+               sockptr_t optval, unsigned int optlen)
 {
        return -ENOPROTOOPT;
 }
index 016a9c5..d48ff11 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/rcupdate.h>
 #include <linux/once.h>
 #include <linux/fs.h>
+#include <linux/sockptr.h>
 
 #include <uapi/linux/net.h>
 
@@ -162,15 +163,10 @@ struct proto_ops {
        int             (*listen)    (struct socket *sock, int len);
        int             (*shutdown)  (struct socket *sock, int flags);
        int             (*setsockopt)(struct socket *sock, int level,
-                                     int optname, char __user *optval, unsigned int optlen);
+                                     int optname, sockptr_t optval,
+                                     unsigned int optlen);
        int             (*getsockopt)(struct socket *sock, int level,
                                      int optname, char __user *optval, int __user *optlen);
-#ifdef CONFIG_COMPAT
-       int             (*compat_setsockopt)(struct socket *sock, int level,
-                                     int optname, char __user *optval, unsigned int optlen);
-       int             (*compat_getsockopt)(struct socket *sock, int level,
-                                     int optname, char __user *optval, int __user *optlen);
-#endif
        void            (*show_fdinfo)(struct seq_file *m, struct socket *sock);
        int             (*sendmsg)   (struct socket *sock, struct msghdr *m,
                                      size_t total_len);
similarity index 93%
rename from drivers/net/ethernet/intel/i40e/i40e_client.h
rename to include/linux/net/intel/i40e_client.h
index 72994ba..f41387a 100644 (file)
@@ -37,11 +37,6 @@ enum i40e_client_instance_state {
 struct i40e_ops;
 struct i40e_client;
 
-/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
- * In order for us to keep the interface simple, SW will define a
- * unique type value for AEQ.
- */
-#define I40E_QUEUE_TYPE_PE_AEQ  0x80
 #define I40E_QUEUE_INVALID_IDX 0xFFFF
 
 struct i40e_qv_info {
@@ -56,7 +51,6 @@ struct i40e_qvlist_info {
        struct i40e_qv_info qv_info[1];
 };
 
-#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF
 
 /* set of LAN parameters useful for clients managed by LAN */
 
@@ -87,7 +81,6 @@ struct i40e_info {
        u8 __iomem *hw_addr;
        u8 fid; /* function id, PF id or VF id */
 #define I40E_CLIENT_FTYPE_PF 0
-#define I40E_CLIENT_FTYPE_VF 1
        u8 ftype; /* function type, PF or VF */
        void *pf;
 
@@ -184,8 +177,6 @@ struct i40e_client {
        unsigned long state;            /* client state */
        atomic_t ref_cnt;  /* Count of all the client devices of this kind */
        u32 flags;
-#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE      BIT(0)
-#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS      BIT(2)
        u8 type;
 #define I40E_CLIENT_IWARP 0
        const struct i40e_client_ops *ops; /* client ops provided by the client */
index 6fc613e..ac2cd3f 100644 (file)
@@ -65,6 +65,8 @@ struct wpan_dev;
 struct mpls_dev;
 /* UDP Tunnel offloads */
 struct udp_tunnel_info;
+struct udp_tunnel_nic_info;
+struct udp_tunnel_nic;
 struct bpf_prog;
 struct xdp_buff;
 
@@ -1836,6 +1838,10 @@ enum netdev_priv_flags {
  *
  *     @macsec_ops:    MACsec offloading ops
  *
+ *     @udp_tunnel_nic_info:   static structure describing the UDP tunnel
+ *                             offload capabilities of the device
+ *     @udp_tunnel_nic:        UDP tunnel offload state
+ *
  *     FIXME: cleanup struct net_device such that network protocol info
  *     moves out.
  */
@@ -2134,6 +2140,8 @@ struct net_device {
        /* MACsec management functions */
        const struct macsec_ops *macsec_ops;
 #endif
+       const struct udp_tunnel_nic_info        *udp_tunnel_nic_info;
+       struct udp_tunnel_nic   *udp_tunnel_nic;
 };
 #define to_net_dev(d) container_of(d, struct net_device, dev)
 
@@ -3157,7 +3165,7 @@ static inline int dev_recursion_level(void)
        return this_cpu_read(softnet_data.xmit.recursion);
 }
 
-#define XMIT_RECURSION_LIMIT   10
+#define XMIT_RECURSION_LIMIT   8
 static inline bool dev_xmit_recursion(void)
 {
        return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
index eb312e7..0101747 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/static_key.h>
 #include <linux/netfilter_defs.h>
 #include <linux/netdevice.h>
+#include <linux/sockptr.h>
 #include <net/net_namespace.h>
 
 static inline int NF_DROP_GETERR(int verdict)
@@ -163,18 +164,11 @@ struct nf_sockopt_ops {
        /* Non-inclusive ranges: use 0/0/NULL to never get called. */
        int set_optmin;
        int set_optmax;
-       int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len);
-#ifdef CONFIG_COMPAT
-       int (*compat_set)(struct sock *sk, int optval,
-                       void __user *user, unsigned int len);
-#endif
+       int (*set)(struct sock *sk, int optval, sockptr_t arg,
+                  unsigned int len);
        int get_optmin;
        int get_optmax;
        int (*get)(struct sock *sk, int optval, void __user *user, int *len);
-#ifdef CONFIG_COMPAT
-       int (*compat_get)(struct sock *sk, int optval,
-                       void __user *user, int *len);
-#endif
        /* Use the module struct to lock set/get code in place */
        struct module *owner;
 };
@@ -346,16 +340,10 @@ NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
 }
 
 /* Call setsockopt() */
-int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
+int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, sockptr_t opt,
                  unsigned int len);
 int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
                  int *len);
-#ifdef CONFIG_COMPAT
-int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval,
-               char __user *opt, unsigned int len);
-int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval,
-               char __user *opt, int *len);
-#endif
 
 struct flowi;
 struct nf_queue_entry;
index 5da8845..5deb099 100644 (file)
@@ -301,8 +301,8 @@ int xt_target_to_user(const struct xt_entry_target *t,
 int xt_data_to_user(void __user *dst, const void *src,
                    int usersize, int size, int aligned_size);
 
-void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
-                                struct xt_counters_info *info, bool compat);
+void *xt_copy_counters(sockptr_t arg, unsigned int len,
+                      struct xt_counters_info *info);
 struct xt_counters *xt_counters_alloc(unsigned int counters);
 
 struct xt_table *xt_register_table(struct net *net,
index b394bd4..c4676d6 100644 (file)
 int ipt_register_table(struct net *net, const struct xt_table *table,
                       const struct ipt_replace *repl,
                       const struct nf_hook_ops *ops, struct xt_table **res);
+
+void ipt_unregister_table_pre_exit(struct net *net, struct xt_table *table,
+                      const struct nf_hook_ops *ops);
+
+void ipt_unregister_table_exit(struct net *net, struct xt_table *table);
+
 void ipt_unregister_table(struct net *net, struct xt_table *table,
                          const struct nf_hook_ops *ops);
 
index 8225f78..1547d5f 100644 (file)
@@ -29,6 +29,9 @@ int ip6t_register_table(struct net *net, const struct xt_table *table,
                        const struct nf_hook_ops *ops, struct xt_table **res);
 void ip6t_unregister_table(struct net *net, struct xt_table *table,
                           const struct nf_hook_ops *ops);
+void ip6t_unregister_table_pre_exit(struct net *net, struct xt_table *table,
+                                   const struct nf_hook_ops *ops);
+void ip6t_unregister_table_exit(struct net *net, struct xt_table *table);
 extern unsigned int ip6t_do_table(struct sk_buff *skb,
                                  const struct nf_hook_state *state,
                                  struct xt_table *table);
index f47af13..e6a2d72 100644 (file)
@@ -102,9 +102,6 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi)
 static inline void netpoll_poll_unlock(void *have)
 {
 }
-static inline void netpoll_netdev_init(struct net_device *dev)
-{
-}
 static inline bool netpoll_tx_running(struct net_device *dev)
 {
        return false;
index 0f61a4a..1efb88d 100644 (file)
@@ -8,31 +8,33 @@
 #ifndef __LINUX_OF_MDIO_H
 #define __LINUX_OF_MDIO_H
 
+#include <linux/device.h>
 #include <linux/phy.h>
 #include <linux/of.h>
 
 #if IS_ENABLED(CONFIG_OF_MDIO)
-extern bool of_mdiobus_child_is_phy(struct device_node *child);
-extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
-extern struct phy_device *of_phy_find_device(struct device_node *phy_np);
-extern struct phy_device *of_phy_connect(struct net_device *dev,
-                                        struct device_node *phy_np,
-                                        void (*hndlr)(struct net_device *),
-                                        u32 flags, phy_interface_t iface);
-extern struct phy_device *
+bool of_mdiobus_child_is_phy(struct device_node *child);
+int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
+int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
+                            struct device_node *np);
+struct phy_device *of_phy_find_device(struct device_node *phy_np);
+struct phy_device *
+of_phy_connect(struct net_device *dev, struct device_node *phy_np,
+              void (*hndlr)(struct net_device *), u32 flags,
+              phy_interface_t iface);
+struct phy_device *
 of_phy_get_and_connect(struct net_device *dev, struct device_node *np,
                       void (*hndlr)(struct net_device *));
-struct phy_device *of_phy_attach(struct net_device *dev,
-                                struct device_node *phy_np, u32 flags,
-                                phy_interface_t iface);
-
-extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
-extern int of_phy_register_fixed_link(struct device_node *np);
-extern void of_phy_deregister_fixed_link(struct device_node *np);
-extern bool of_phy_is_fixed_link(struct device_node *np);
-extern int of_mdiobus_phy_device_register(struct mii_bus *mdio,
-                                    struct phy_device *phy,
-                                    struct device_node *child, u32 addr);
+struct phy_device *
+of_phy_attach(struct net_device *dev, struct device_node *phy_np,
+             u32 flags, phy_interface_t iface);
+
+struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
+int of_phy_register_fixed_link(struct device_node *np);
+void of_phy_deregister_fixed_link(struct device_node *np);
+bool of_phy_is_fixed_link(struct device_node *np);
+int of_mdiobus_phy_device_register(struct mii_bus *mdio, struct phy_device *phy,
+                                  struct device_node *child, u32 addr);
 
 static inline int of_mdio_parse_addr(struct device *dev,
                                     const struct device_node *np)
index 6590450..93fcef1 100644 (file)
@@ -304,16 +304,33 @@ static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c)
  * struct_size() - Calculate size of structure with trailing array.
  * @p: Pointer to the structure.
  * @member: Name of the array member.
- * @n: Number of elements in the array.
+ * @count: Number of elements in the array.
  *
  * Calculates size of memory needed for structure @p followed by an
- * array of @n @member elements.
+ * array of @count number of @member elements.
  *
  * Return: number of bytes needed or SIZE_MAX on overflow.
  */
-#define struct_size(p, member, n)                                      \
-       __ab_c_size(n,                                                  \
+#define struct_size(p, member, count)                                  \
+       __ab_c_size(count,                                              \
                    sizeof(*(p)->member) + __must_be_array((p)->member),\
                    sizeof(*(p)))
 
+/**
+ * flex_array_size() - Calculate size of a flexible array member
+ *                     within an enclosing structure.
+ *
+ * @p: Pointer to the structure.
+ * @member: Name of the flexible array member.
+ * @count: Number of elements in the array.
+ *
+ * Calculates size of a flexible array of @count number of @member
+ * elements, at the end of structure @p.
+ *
+ * Return: number of bytes needed or SIZE_MAX on overflow.
+ */
+#define flex_array_size(p, member, count)                              \
+       array_size(count,                                               \
+                   sizeof(*(p)->member) + __must_be_array((p)->member))
+
 #endif /* __LINUX_OVERFLOW_H */
index c79d833..34c1c4f 100644 (file)
@@ -2169,12 +2169,11 @@ static inline int pci_pcie_type(const struct pci_dev *dev)
  */
 static inline struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
 {
-       struct pci_dev *bridge = pci_upstream_bridge(dev);
-
-       while (bridge) {
-               if (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT)
-                       return bridge;
-               bridge = pci_upstream_bridge(bridge);
+       while (dev) {
+               if (pci_is_pcie(dev) &&
+                   pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+                       return dev;
+               dev = pci_upstream_bridge(dev);
        }
 
        return NULL;
index b4bb320..00ab5ef 100644 (file)
@@ -1244,6 +1244,8 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
 extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
 extern int get_callchain_buffers(int max_stack);
 extern void put_callchain_buffers(void);
+extern struct perf_callchain_entry *get_callchain_entry(int *rctx);
+extern void put_callchain_entry(int rctx);
 
 extern int sysctl_perf_event_max_stack;
 extern int sysctl_perf_event_max_contexts_per_stack;
index 32b6c52..56c1e8e 100644 (file)
@@ -249,6 +249,13 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
 }
 #endif
 
+#ifndef __HAVE_ARCH_PTEP_GET
+static inline pte_t ptep_get(pte_t *ptep)
+{
+       return READ_ONCE(*ptep);
+}
+#endif
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 #ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
index 8c05d0f..0403eb7 100644 (file)
@@ -244,7 +244,8 @@ struct phy_package_shared {
 };
 
 /* used as bit number in atomic bitops */
-#define PHY_SHARED_F_INIT_DONE 0
+#define PHY_SHARED_F_INIT_DONE  0
+#define PHY_SHARED_F_PROBE_DONE 1
 
 /*
  * The Bus class for PHYs.  Devices which provide access to
@@ -260,9 +261,6 @@ struct mii_bus {
        int (*reset)(struct mii_bus *bus);
        struct mdio_bus_stats stats[PHY_MAX_ADDR];
 
-       unsigned int is_managed:1;      /* is device-managed */
-       unsigned int is_managed_registered:1;
-
        /*
         * A lock to ensure that only one thing can read/write
         * the MDIO bus at a time
@@ -298,6 +296,14 @@ struct mii_bus {
        /* RESET GPIO descriptor pointer */
        struct gpio_desc *reset_gpiod;
 
+       /* bus capabilities, used for probing */
+       enum {
+               MDIOBUS_NO_CAP = 0,
+               MDIOBUS_C22,
+               MDIOBUS_C45,
+               MDIOBUS_C22_C45,
+       } probe_capabilities;
+
        /* protect access to the shared element */
        struct mutex shared_lock;
 
@@ -313,20 +319,11 @@ static inline struct mii_bus *mdiobus_alloc(void)
 }
 
 int __mdiobus_register(struct mii_bus *bus, struct module *owner);
+int __devm_mdiobus_register(struct device *dev, struct mii_bus *bus,
+                           struct module *owner);
 #define mdiobus_register(bus) __mdiobus_register(bus, THIS_MODULE)
-static inline int devm_mdiobus_register(struct mii_bus *bus)
-{
-       int ret;
-
-       if (!bus->is_managed)
-               return -EPERM;
-
-       ret = mdiobus_register(bus);
-       if (!ret)
-               bus->is_managed_registered = 1;
-
-       return ret;
-}
+#define devm_mdiobus_register(dev, bus) \
+               __devm_mdiobus_register(dev, bus, THIS_MODULE)
 
 void mdiobus_unregister(struct mii_bus *bus);
 void mdiobus_free(struct mii_bus *bus);
@@ -337,7 +334,6 @@ static inline struct mii_bus *devm_mdiobus_alloc(struct device *dev)
 }
 
 struct mii_bus *mdio_find_bus(const char *mdio_name);
-void devm_mdiobus_free(struct device *dev, struct mii_bus *bus);
 struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
 
 #define PHY_INTERRUPT_DISABLED false
@@ -388,14 +384,18 @@ enum phy_state {
        PHY_CABLETEST,
 };
 
+#define MDIO_MMD_NUM 32
+
 /**
  * struct phy_c45_device_ids - 802.3-c45 Device Identifiers
- * @devices_in_package: Bit vector of devices present.
+ * @devices_in_package: IEEE 802.3 devices in package register value.
+ * @mmds_present: bit vector of MMDs present.
  * @device_ids: The device identifer for each present device.
  */
 struct phy_c45_device_ids {
        u32 devices_in_package;
-       u32 device_ids[8];
+       u32 mmds_present;
+       u32 device_ids[MDIO_MMD_NUM];
 };
 
 struct macsec_context;
@@ -1385,6 +1385,9 @@ int genphy_c45_pma_read_abilities(struct phy_device *phydev);
 int genphy_c45_read_status(struct phy_device *phydev);
 int genphy_c45_config_aneg(struct phy_device *phydev);
 
+/* Generic C45 PHY driver */
+extern struct phy_driver genphy_c45_driver;
+
 /* The gen10g_* functions are the old Clause 45 stub */
 int gen10g_config_aneg(struct phy_device *phydev);
 
@@ -1416,6 +1419,7 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev,
 int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
 int phy_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
 int phy_do_ioctl_running(struct net_device *dev, struct ifreq *ifr, int cmd);
+int phy_disable_interrupts(struct phy_device *phydev);
 void phy_request_interrupt(struct phy_device *phydev);
 void phy_free_interrupt(struct phy_device *phydev);
 void phy_print_status(struct phy_device *phydev);
@@ -1430,6 +1434,10 @@ void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx);
 bool phy_validate_pause(struct phy_device *phydev,
                        struct ethtool_pauseparam *pp);
 void phy_get_pause(struct phy_device *phydev, bool *tx_pause, bool *rx_pause);
+
+s32 phy_get_internal_delay(struct phy_device *phydev, struct device *dev,
+                          const int *delay_values, int size, bool is_rx);
+
 void phy_resolve_pause(unsigned long *local_adv, unsigned long *partner_adv,
                       bool *tx_pause, bool *rx_pause);
 
@@ -1466,51 +1474,10 @@ int __init mdio_bus_init(void);
 void mdio_bus_exit(void);
 #endif
 
-/* Inline function for use within net/core/ethtool.c (built-in) */
-static inline int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data)
-{
-       if (!phydev->drv)
-               return -EIO;
-
-       mutex_lock(&phydev->lock);
-       phydev->drv->get_strings(phydev, data);
-       mutex_unlock(&phydev->lock);
-
-       return 0;
-}
-
-static inline int phy_ethtool_get_sset_count(struct phy_device *phydev)
-{
-       int ret;
-
-       if (!phydev->drv)
-               return -EIO;
-
-       if (phydev->drv->get_sset_count &&
-           phydev->drv->get_strings &&
-           phydev->drv->get_stats) {
-               mutex_lock(&phydev->lock);
-               ret = phydev->drv->get_sset_count(phydev);
-               mutex_unlock(&phydev->lock);
-
-               return ret;
-       }
-
-       return -EOPNOTSUPP;
-}
-
-static inline int phy_ethtool_get_stats(struct phy_device *phydev,
-                                       struct ethtool_stats *stats, u64 *data)
-{
-       if (!phydev->drv)
-               return -EIO;
-
-       mutex_lock(&phydev->lock);
-       phydev->drv->get_stats(phydev, stats, data);
-       mutex_unlock(&phydev->lock);
-
-       return 0;
-}
+int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data);
+int phy_ethtool_get_sset_count(struct phy_device *phydev);
+int phy_ethtool_get_stats(struct phy_device *phydev,
+                         struct ethtool_stats *stats, u64 *data);
 
 static inline int phy_package_read(struct phy_device *phydev, u32 regnum)
 {
@@ -1554,14 +1521,25 @@ static inline int __phy_package_write(struct phy_device *phydev,
        return __mdiobus_write(phydev->mdio.bus, shared->addr, regnum, val);
 }
 
-static inline bool phy_package_init_once(struct phy_device *phydev)
+static inline bool __phy_package_set_once(struct phy_device *phydev,
+                                         unsigned int b)
 {
        struct phy_package_shared *shared = phydev->shared;
 
        if (!shared)
                return false;
 
-       return !test_and_set_bit(PHY_SHARED_F_INIT_DONE, &shared->flags);
+       return !test_and_set_bit(b, &shared->flags);
+}
+
+static inline bool phy_package_init_once(struct phy_device *phydev)
+{
+       return __phy_package_set_once(phydev, PHY_SHARED_F_INIT_DONE);
+}
+
+static inline bool phy_package_probe_once(struct phy_device *phydev)
+{
+       return __phy_package_set_once(phydev, PHY_SHARED_F_PROBE_DONE);
 }
 
 extern struct bus_type mdio_bus_type;
index cc5b452..1aad2ae 100644 (file)
@@ -76,7 +76,9 @@ struct phylink_config {
  * struct phylink_mac_ops - MAC operations structure.
  * @validate: Validate and update the link configuration.
  * @mac_pcs_get_state: Read the current link state from the hardware.
+ * @mac_prepare: prepare for a major reconfiguration of the interface.
  * @mac_config: configure the MAC for the selected mode and state.
+ * @mac_finish: finish a major reconfiguration of the interface.
  * @mac_an_restart: restart 802.3z BaseX autonegotiation.
  * @mac_link_down: take the link down.
  * @mac_link_up: allow the link to come up.
@@ -89,8 +91,12 @@ struct phylink_mac_ops {
                         struct phylink_link_state *state);
        void (*mac_pcs_get_state)(struct phylink_config *config,
                                  struct phylink_link_state *state);
+       int (*mac_prepare)(struct phylink_config *config, unsigned int mode,
+                          phy_interface_t iface);
        void (*mac_config)(struct phylink_config *config, unsigned int mode,
                           const struct phylink_link_state *state);
+       int (*mac_finish)(struct phylink_config *config, unsigned int mode,
+                         phy_interface_t iface);
        void (*mac_an_restart)(struct phylink_config *config);
        void (*mac_link_down)(struct phylink_config *config, unsigned int mode,
                              phy_interface_t interface);
@@ -146,6 +152,31 @@ void mac_pcs_get_state(struct phylink_config *config,
                       struct phylink_link_state *state);
 
 /**
+ * mac_prepare() - prepare to change the PHY interface mode
+ * @config: a pointer to a &struct phylink_config.
+ * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
+ * @iface: interface mode to switch to
+ *
+ * phylink will call this method at the beginning of a full initialisation
+ * of the link, which includes changing the interface mode or at initial
+ * startup time. It may be called for the current mode. The MAC driver
+ * should perform whatever actions are required, e.g. disabling the
+ * Serdes PHY.
+ *
+ * This will be the first call in the sequence:
+ * - mac_prepare()
+ * - mac_config()
+ * - pcs_config()
+ * - possible pcs_an_restart()
+ * - mac_finish()
+ *
+ * Returns zero on success, or negative errno on failure which will be
+ * reported to the kernel log.
+ */
+int mac_prepare(struct phylink_config *config, unsigned int mode,
+               phy_interface_t iface);
+
+/**
  * mac_config() - configure the MAC for the selected mode and state
  * @config: a pointer to a &struct phylink_config.
  * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
@@ -221,6 +252,23 @@ void mac_config(struct phylink_config *config, unsigned int mode,
                const struct phylink_link_state *state);
 
 /**
+ * mac_finish() - finish a to change the PHY interface mode
+ * @config: a pointer to a &struct phylink_config.
+ * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
+ * @iface: interface mode to switch to
+ *
+ * phylink will call this if it called mac_prepare() to allow the MAC to
+ * complete any necessary steps after the MAC and PCS have been configured
+ * for the @mode and @iface. E.g. a MAC driver may wish to re-enable the
+ * Serdes PHY here if it was previously disabled by mac_prepare().
+ *
+ * Returns zero on success, or negative errno on failure which will be
+ * reported to the kernel log.
+ */
+int mac_finish(struct phylink_config *config, unsigned int mode,
+               phy_interface_t iface);
+
+/**
  * mac_an_restart() - restart 802.3z BaseX autonegotiation
  * @config: a pointer to a &struct phylink_config.
  */
@@ -273,6 +321,21 @@ void mac_link_up(struct phylink_config *config, struct phy_device *phy,
                 int speed, int duplex, bool tx_pause, bool rx_pause);
 #endif
 
+struct phylink_pcs_ops;
+
+/**
+ * struct phylink_pcs - PHYLINK PCS instance
+ * @ops: a pointer to the &struct phylink_pcs_ops structure
+ * @poll: poll the PCS for link changes
+ *
+ * This structure is designed to be embedded within the PCS private data,
+ * and will be passed between phylink and the PCS.
+ */
+struct phylink_pcs {
+       const struct phylink_pcs_ops *ops;
+       bool poll;
+};
+
 /**
  * struct phylink_pcs_ops - MAC PCS operations structure.
  * @pcs_get_state: read the current MAC PCS link state from the hardware.
@@ -282,20 +345,21 @@ void mac_link_up(struct phylink_config *config, struct phy_device *phy,
  *               (where necessary).
  */
 struct phylink_pcs_ops {
-       void (*pcs_get_state)(struct phylink_config *config,
+       void (*pcs_get_state)(struct phylink_pcs *pcs,
                              struct phylink_link_state *state);
-       int (*pcs_config)(struct phylink_config *config, unsigned int mode,
+       int (*pcs_config)(struct phylink_pcs *pcs, unsigned int mode,
                          phy_interface_t interface,
-                         const unsigned long *advertising);
-       void (*pcs_an_restart)(struct phylink_config *config);
-       void (*pcs_link_up)(struct phylink_config *config, unsigned int mode,
+                         const unsigned long *advertising,
+                         bool permit_pause_to_mac);
+       void (*pcs_an_restart)(struct phylink_pcs *pcs);
+       void (*pcs_link_up)(struct phylink_pcs *pcs, unsigned int mode,
                            phy_interface_t interface, int speed, int duplex);
 };
 
 #if 0 /* For kernel-doc purposes only. */
 /**
  * pcs_get_state() - Read the current inband link state from the hardware
- * @config: a pointer to a &struct phylink_config.
+ * @pcs: a pointer to a &struct phylink_pcs.
  * @state: a pointer to a &struct phylink_link_state.
  *
  * Read the current inband link state from the MAC PCS, reporting the
@@ -308,18 +372,20 @@ struct phylink_pcs_ops {
  * When present, this overrides mac_pcs_get_state() in &struct
  * phylink_mac_ops.
  */
-void pcs_get_state(struct phylink_config *config,
+void pcs_get_state(struct phylink_pcs *pcs,
                   struct phylink_link_state *state);
 
 /**
  * pcs_config() - Configure the PCS mode and advertisement
- * @config: a pointer to a &struct phylink_config.
+ * @pcs: a pointer to a &struct phylink_pcs.
  * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND.
  * @interface: interface mode to be used
  * @advertising: adertisement ethtool link mode mask
+ * @permit_pause_to_mac: permit forwarding pause resolution to MAC
  *
  * Configure the PCS for the operating mode, the interface mode, and set
- * the advertisement mask.
+ * the advertisement mask. @permit_pause_to_mac indicates whether the
+ * hardware may forward the pause mode resolution to the MAC.
  *
  * When operating in %MLO_AN_INBAND, inband should always be enabled,
  * otherwise inband should be disabled.
@@ -331,21 +397,21 @@ void pcs_get_state(struct phylink_config *config,
  *
  * For most 10GBASE-R, there is no advertisement.
  */
-int (*pcs_config)(struct phylink_config *config, unsigned int mode,
-                 phy_interface_t interface, const unsigned long *advertising);
+int pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+              phy_interface_t interface, const unsigned long *advertising);
 
 /**
  * pcs_an_restart() - restart 802.3z BaseX autonegotiation
- * @config: a pointer to a &struct phylink_config.
+ * @pcs: a pointer to a &struct phylink_pcs.
  *
  * When PCS ops are present, this overrides mac_an_restart() in &struct
  * phylink_mac_ops.
  */
-void (*pcs_an_restart)(struct phylink_config *config);
+void pcs_an_restart(struct phylink_pcs *pcs);
 
 /**
  * pcs_link_up() - program the PCS for the resolved link configuration
- * @config: a pointer to a &struct phylink_config.
+ * @pcs: a pointer to a &struct phylink_pcs.
  * @mode: link autonegotiation mode
  * @interface: link &typedef phy_interface_t mode
  * @speed: link speed
@@ -356,14 +422,14 @@ void (*pcs_an_restart)(struct phylink_config *config);
  * mode without in-band AN needs to be manually configured for the link
  * and duplex setting. Otherwise, this should be a no-op.
  */
-void (*pcs_link_up)(struct phylink_config *config, unsigned int mode,
-                   phy_interface_t interface, int speed, int duplex);
+void pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+                phy_interface_t interface, int speed, int duplex);
 #endif
 
 struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *,
                               phy_interface_t iface,
                               const struct phylink_mac_ops *mac_ops);
-void phylink_add_pcs(struct phylink *, const struct phylink_pcs_ops *ops);
+void phylink_set_pcs(struct phylink *, struct phylink_pcs *pcs);
 void phylink_destroy(struct phylink *);
 
 int phylink_connect_phy(struct phylink *, struct phy_device *);
@@ -392,6 +458,8 @@ int phylink_init_eee(struct phylink *, bool);
 int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *);
 int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *);
 int phylink_mii_ioctl(struct phylink *, struct ifreq *, int);
+int phylink_speed_down(struct phylink *pl, bool sync);
+int phylink_speed_up(struct phylink *pl);
 
 #define phylink_zero(bm) \
        bitmap_zero(bm, __ETHTOOL_LINK_MODE_MASK_NBITS)
@@ -410,6 +478,9 @@ void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
 int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs,
                                          phy_interface_t interface,
                                          const unsigned long *advertising);
+int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode,
+                              phy_interface_t interface,
+                              const unsigned long *advertising);
 void phylink_mii_c22_pcs_an_restart(struct mdio_device *pcs);
 
 void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs,
index 7fbc867..49d155c 100644 (file)
@@ -597,7 +597,7 @@ int sev_guest_df_flush(int *error);
  */
 int sev_guest_decommission(struct sev_data_decommission *data, int *error);
 
-void *psp_copy_user_blob(u64 __user uaddr, u32 len);
+void *psp_copy_user_blob(u64 uaddr, u32 len);
 
 #else  /* !CONFIG_CRYPTO_DEV_SP_PSP */
 
index 2c4737e..977807e 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2016  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef _COMMON_HSI_H
index 95f5fd6..cd1207a 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef __ETH_COMMON__
index 98cfc19..68eda1c 100644 (file)
@@ -1,6 +1,7 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef __FCOE_COMMON__
index 2f0a771..157019f 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef __ISCSI_COMMON__
index c6cfd39..14f9e4c 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef __IWARP_COMMON__
index 733fad7..4d58dc8 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef _QED_CHAIN_H
@@ -37,6 +11,7 @@
 #include <asm/byteorder.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
+#include <linux/sizes.h>
 #include <linux/slab.h>
 #include <linux/qed/common_hsi.h>
 
@@ -52,9 +27,9 @@ enum qed_chain_mode {
 };
 
 enum qed_chain_use_mode {
-       QED_CHAIN_USE_TO_PRODUCE,               /* Chain starts empty */
-       QED_CHAIN_USE_TO_CONSUME,               /* Chain starts full */
-       QED_CHAIN_USE_TO_CONSUME_PRODUCE,       /* Chain starts empty */
+       QED_CHAIN_USE_TO_PRODUCE,                       /* Chain starts empty */
+       QED_CHAIN_USE_TO_CONSUME,                       /* Chain starts full */
+       QED_CHAIN_USE_TO_CONSUME_PRODUCE,               /* Chain starts empty */
 };
 
 enum qed_chain_cnt_type {
@@ -66,191 +41,230 @@ enum qed_chain_cnt_type {
 };
 
 struct qed_chain_next {
-       struct regpair  next_phys;
-       void            *next_virt;
+       struct regpair                                  next_phys;
+       void                                            *next_virt;
 };
 
 struct qed_chain_pbl_u16 {
-       u16 prod_page_idx;
-       u16 cons_page_idx;
+       u16                                             prod_page_idx;
+       u16                                             cons_page_idx;
 };
 
 struct qed_chain_pbl_u32 {
-       u32 prod_page_idx;
-       u32 cons_page_idx;
-};
-
-struct qed_chain_ext_pbl {
-       dma_addr_t p_pbl_phys;
-       void *p_pbl_virt;
+       u32                                             prod_page_idx;
+       u32                                             cons_page_idx;
 };
 
 struct qed_chain_u16 {
        /* Cyclic index of next element to produce/consme */
-       u16 prod_idx;
-       u16 cons_idx;
+       u16                                             prod_idx;
+       u16                                             cons_idx;
 };
 
 struct qed_chain_u32 {
        /* Cyclic index of next element to produce/consme */
-       u32 prod_idx;
-       u32 cons_idx;
+       u32                                             prod_idx;
+       u32                                             cons_idx;
 };
 
 struct addr_tbl_entry {
-       void *virt_addr;
-       dma_addr_t dma_map;
+       void                                            *virt_addr;
+       dma_addr_t                                      dma_map;
 };
 
 struct qed_chain {
-       /* fastpath portion of the chain - required for commands such
+       /* Fastpath portion of the chain - required for commands such
         * as produce / consume.
         */
+
        /* Point to next element to produce/consume */
-       void *p_prod_elem;
-       void *p_cons_elem;
+       void                                            *p_prod_elem;
+       void                                            *p_cons_elem;
 
        /* Fastpath portions of the PBL [if exists] */
+
        struct {
                /* Table for keeping the virtual and physical addresses of the
                 * chain pages, respectively to the physical addresses
                 * in the pbl table.
                 */
-               struct addr_tbl_entry *pp_addr_tbl;
+               struct addr_tbl_entry                   *pp_addr_tbl;
 
                union {
-                       struct qed_chain_pbl_u16 u16;
-                       struct qed_chain_pbl_u32 u32;
-               } c;
-       } pbl;
+                       struct qed_chain_pbl_u16        u16;
+                       struct qed_chain_pbl_u32        u32;
+               }                                       c;
+       }                                               pbl;
 
        union {
-               struct qed_chain_u16 chain16;
-               struct qed_chain_u32 chain32;
-       } u;
+               struct qed_chain_u16                    chain16;
+               struct qed_chain_u32                    chain32;
+       }                                               u;
 
        /* Capacity counts only usable elements */
-       u32 capacity;
-       u32 page_cnt;
+       u32                                             capacity;
+       u32                                             page_cnt;
 
-       enum qed_chain_mode mode;
+       enum qed_chain_mode                             mode;
 
        /* Elements information for fast calculations */
-       u16 elem_per_page;
-       u16 elem_per_page_mask;
-       u16 elem_size;
-       u16 next_page_mask;
-       u16 usable_per_page;
-       u8 elem_unusable;
+       u16                                             elem_per_page;
+       u16                                             elem_per_page_mask;
+       u16                                             elem_size;
+       u16                                             next_page_mask;
+       u16                                             usable_per_page;
+       u8                                              elem_unusable;
 
-       u8 cnt_type;
+       enum qed_chain_cnt_type                         cnt_type;
 
        /* Slowpath of the chain - required for initialization and destruction,
         * but isn't involved in regular functionality.
         */
 
+       u32                                             page_size;
+
        /* Base address of a pre-allocated buffer for pbl */
        struct {
-               dma_addr_t p_phys_table;
-               void *p_virt_table;
-       } pbl_sp;
+               __le64                                  *table_virt;
+               dma_addr_t                              table_phys;
+               size_t                                  table_size;
+       }                                               pbl_sp;
 
        /* Address of first page of the chain - the address is required
-        * for fastpath operation [consume/produce] but only for the the SINGLE
+        * for fastpath operation [consume/produce] but only for the SINGLE
         * flavour which isn't considered fastpath [== SPQ].
         */
-       void *p_virt_addr;
-       dma_addr_t p_phys_addr;
+       void                                            *p_virt_addr;
+       dma_addr_t                                      p_phys_addr;
 
        /* Total number of elements [for entire chain] */
-       u32 size;
+       u32                                             size;
+
+       enum qed_chain_use_mode                         intended_use;
+
+       bool                                            b_external_pbl;
+};
 
-       u8 intended_use;
+struct qed_chain_init_params {
+       enum qed_chain_mode                             mode;
+       enum qed_chain_use_mode                         intended_use;
+       enum qed_chain_cnt_type                         cnt_type;
 
-       bool b_external_pbl;
+       u32                                             page_size;
+       u32                                             num_elems;
+       size_t                                          elem_size;
+
+       void                                            *ext_pbl_virt;
+       dma_addr_t                                      ext_pbl_phys;
 };
 
-#define QED_CHAIN_PBL_ENTRY_SIZE        (8)
-#define QED_CHAIN_PAGE_SIZE             (0x1000)
-#define ELEMS_PER_PAGE(elem_size)       (QED_CHAIN_PAGE_SIZE / (elem_size))
+#define QED_CHAIN_PAGE_SIZE                            SZ_4K
+
+#define ELEMS_PER_PAGE(elem_size, page_size)                                \
+       ((page_size) / (elem_size))
 
-#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)        \
-       (((mode) == QED_CHAIN_MODE_NEXT_PTR) ?           \
-        (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \
-                  (elem_size))) : 0)
+#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)                            \
+       (((mode) == QED_CHAIN_MODE_NEXT_PTR) ?                               \
+        (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / (elem_size))) :     \
+        0)
 
-#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
-       ((u32)(ELEMS_PER_PAGE(elem_size) -     \
-              UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
+#define USABLE_ELEMS_PER_PAGE(elem_size, page_size, mode)                   \
+       ((u32)(ELEMS_PER_PAGE((elem_size), (page_size)) -                    \
+              UNUSABLE_ELEMS_PER_PAGE((elem_size), (mode))))
 
-#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
-       DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
+#define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, page_size, mode)            \
+       DIV_ROUND_UP((elem_cnt),                                             \
+                    USABLE_ELEMS_PER_PAGE((elem_size), (page_size), (mode)))
 
-#define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
-#define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
+#define is_chain_u16(p)                                                             \
+       ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
+#define is_chain_u32(p)                                                             \
+       ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
 
 /* Accessors */
-static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
+
+static inline u16 qed_chain_get_prod_idx(const struct qed_chain *chain)
 {
-       return p_chain->u.chain16.prod_idx;
+       return chain->u.chain16.prod_idx;
 }
 
-static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
+static inline u16 qed_chain_get_cons_idx(const struct qed_chain *chain)
 {
-       return p_chain->u.chain16.cons_idx;
+       return chain->u.chain16.cons_idx;
 }
 
-static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_prod_idx_u32(const struct qed_chain *chain)
 {
-       return p_chain->u.chain32.cons_idx;
+       return chain->u.chain32.prod_idx;
 }
 
-static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_cons_idx_u32(const struct qed_chain *chain)
 {
+       return chain->u.chain32.cons_idx;
+}
+
+static inline u16 qed_chain_get_elem_used(const struct qed_chain *chain)
+{
+       u32 prod = qed_chain_get_prod_idx(chain);
+       u32 cons = qed_chain_get_cons_idx(chain);
+       u16 elem_per_page = chain->elem_per_page;
        u16 used;
 
-       used = (u16) (((u32)0x10000 +
-                      (u32)p_chain->u.chain16.prod_idx) -
-                     (u32)p_chain->u.chain16.cons_idx);
-       if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
-               used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
-                   p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
+       if (prod < cons)
+               prod += (u32)U16_MAX + 1;
+
+       used = (u16)(prod - cons);
+       if (chain->mode == QED_CHAIN_MODE_NEXT_PTR)
+               used -= (u16)(prod / elem_per_page - cons / elem_per_page);
+
+       return used;
+}
 
-       return (u16)(p_chain->capacity - used);
+static inline u16 qed_chain_get_elem_left(const struct qed_chain *chain)
+{
+       return (u16)(chain->capacity - qed_chain_get_elem_used(chain));
 }
 
-static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_elem_used_u32(const struct qed_chain *chain)
 {
+       u64 prod = qed_chain_get_prod_idx_u32(chain);
+       u64 cons = qed_chain_get_cons_idx_u32(chain);
+       u16 elem_per_page = chain->elem_per_page;
        u32 used;
 
-       used = (u32) (((u64)0x100000000ULL +
-                      (u64)p_chain->u.chain32.prod_idx) -
-                     (u64)p_chain->u.chain32.cons_idx);
-       if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
-               used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
-                   p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
+       if (prod < cons)
+               prod += (u64)U32_MAX + 1;
+
+       used = (u32)(prod - cons);
+       if (chain->mode == QED_CHAIN_MODE_NEXT_PTR)
+               used -= (u32)(prod / elem_per_page - cons / elem_per_page);
+
+       return used;
+}
 
-       return p_chain->capacity - used;
+static inline u32 qed_chain_get_elem_left_u32(const struct qed_chain *chain)
+{
+       return chain->capacity - qed_chain_get_elem_used_u32(chain);
 }
 
-static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
+static inline u16 qed_chain_get_usable_per_page(const struct qed_chain *chain)
 {
-       return p_chain->usable_per_page;
+       return chain->usable_per_page;
 }
 
-static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
+static inline u8 qed_chain_get_unusable_per_page(const struct qed_chain *chain)
 {
-       return p_chain->elem_unusable;
+       return chain->elem_unusable;
 }
 
-static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
+static inline u32 qed_chain_get_page_cnt(const struct qed_chain *chain)
 {
-       return p_chain->page_cnt;
+       return chain->page_cnt;
 }
 
-static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
+static inline dma_addr_t qed_chain_get_pbl_phys(const struct qed_chain *chain)
 {
-       return p_chain->pbl_sp.p_phys_table;
+       return chain->pbl_sp.table_phys;
 }
 
 /**
@@ -505,118 +519,6 @@ static inline void qed_chain_reset(struct qed_chain *p_chain)
 }
 
 /**
- * @brief qed_chain_init - Initalizes a basic chain struct
- *
- * @param p_chain
- * @param p_virt_addr
- * @param p_phys_addr  physical address of allocated buffer's beginning
- * @param page_cnt     number of pages in the allocated buffer
- * @param elem_size    size of each element in the chain
- * @param intended_use
- * @param mode
- */
-static inline void qed_chain_init_params(struct qed_chain *p_chain,
-                                        u32 page_cnt,
-                                        u8 elem_size,
-                                        enum qed_chain_use_mode intended_use,
-                                        enum qed_chain_mode mode,
-                                        enum qed_chain_cnt_type cnt_type)
-{
-       /* chain fixed parameters */
-       p_chain->p_virt_addr = NULL;
-       p_chain->p_phys_addr = 0;
-       p_chain->elem_size      = elem_size;
-       p_chain->intended_use = (u8)intended_use;
-       p_chain->mode           = mode;
-       p_chain->cnt_type = (u8)cnt_type;
-
-       p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
-       p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
-       p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
-       p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
-       p_chain->next_page_mask = (p_chain->usable_per_page &
-                                  p_chain->elem_per_page_mask);
-
-       p_chain->page_cnt = page_cnt;
-       p_chain->capacity = p_chain->usable_per_page * page_cnt;
-       p_chain->size = p_chain->elem_per_page * page_cnt;
-
-       p_chain->pbl_sp.p_phys_table = 0;
-       p_chain->pbl_sp.p_virt_table = NULL;
-       p_chain->pbl.pp_addr_tbl = NULL;
-}
-
-/**
- * @brief qed_chain_init_mem -
- *
- * Initalizes a basic chain struct with its chain buffers
- *
- * @param p_chain
- * @param p_virt_addr  virtual address of allocated buffer's beginning
- * @param p_phys_addr  physical address of allocated buffer's beginning
- *
- */
-static inline void qed_chain_init_mem(struct qed_chain *p_chain,
-                                     void *p_virt_addr, dma_addr_t p_phys_addr)
-{
-       p_chain->p_virt_addr = p_virt_addr;
-       p_chain->p_phys_addr = p_phys_addr;
-}
-
-/**
- * @brief qed_chain_init_pbl_mem -
- *
- * Initalizes a basic chain struct with its pbl buffers
- *
- * @param p_chain
- * @param p_virt_pbl   pointer to a pre allocated side table which will hold
- *                      virtual page addresses.
- * @param p_phys_pbl   pointer to a pre-allocated side table which will hold
- *                      physical page addresses.
- * @param pp_virt_addr_tbl
- *                      pointer to a pre-allocated side table which will hold
- *                      the virtual addresses of the chain pages.
- *
- */
-static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
-                                         void *p_virt_pbl,
-                                         dma_addr_t p_phys_pbl,
-                                         struct addr_tbl_entry *pp_addr_tbl)
-{
-       p_chain->pbl_sp.p_phys_table = p_phys_pbl;
-       p_chain->pbl_sp.p_virt_table = p_virt_pbl;
-       p_chain->pbl.pp_addr_tbl = pp_addr_tbl;
-}
-
-/**
- * @brief qed_chain_init_next_ptr_elem -
- *
- * Initalizes a next pointer element
- *
- * @param p_chain
- * @param p_virt_curr  virtual address of a chain page of which the next
- *                      pointer element is initialized
- * @param p_virt_next  virtual address of the next chain page
- * @param p_phys_next  physical address of the next chain page
- *
- */
-static inline void
-qed_chain_init_next_ptr_elem(struct qed_chain *p_chain,
-                            void *p_virt_curr,
-                            void *p_virt_next, dma_addr_t p_phys_next)
-{
-       struct qed_chain_next *p_next;
-       u32 size;
-
-       size = p_chain->elem_size * p_chain->usable_per_page;
-       p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size);
-
-       DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
-
-       p_next->next_virt = p_virt_next;
-}
-
-/**
  * @brief qed_chain_get_last_elem -
  *
  * Returns a pointer to the last element of the chain
@@ -723,7 +625,7 @@ static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
 
        for (i = 0; i < page_cnt; i++)
                memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0,
-                      QED_CHAIN_PAGE_SIZE);
+                      p_chain->page_size);
 }
 
 #endif
index a131048..812a4d7 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef _QED_ETH_IF_H
index 4608248..16752ec 100644 (file)
@@ -1,4 +1,6 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright (c) 2019-2020 Marvell International Ltd. */
+
 #ifndef _QED_FCOE_IF_H
 #define _QED_FCOE_IF_H
 #include <linux/types.h>
index 8cb7640..cd6a5c7 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef _QED_IF_H
@@ -524,7 +498,7 @@ struct qed_fcoe_pf_params {
        u8 bdq_pbl_num_entries[2];
 };
 
-/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
+/* Most of the parameters below are described in the FW iSCSI / TCP HSI */
 struct qed_iscsi_pf_params {
        u64 glbl_q_params_addr;
        u64 bdq_pbl_base_addr[3];
@@ -620,6 +594,7 @@ enum qed_hw_err_type {
 enum qed_dev_type {
        QED_DEV_TYPE_BB,
        QED_DEV_TYPE_AH,
+       QED_DEV_TYPE_E5,
 };
 
 struct qed_dev_info {
@@ -687,87 +662,72 @@ enum qed_protocol {
        QED_PROTOCOL_FCOE,
 };
 
-enum qed_link_mode_bits {
-       QED_LM_FIBRE_BIT = BIT(0),
-       QED_LM_Autoneg_BIT = BIT(1),
-       QED_LM_Asym_Pause_BIT = BIT(2),
-       QED_LM_Pause_BIT = BIT(3),
-       QED_LM_1000baseT_Full_BIT = BIT(4),
-       QED_LM_10000baseT_Full_BIT = BIT(5),
-       QED_LM_10000baseKR_Full_BIT = BIT(6),
-       QED_LM_20000baseKR2_Full_BIT = BIT(7),
-       QED_LM_25000baseKR_Full_BIT = BIT(8),
-       QED_LM_40000baseLR4_Full_BIT = BIT(9),
-       QED_LM_50000baseKR2_Full_BIT = BIT(10),
-       QED_LM_100000baseKR4_Full_BIT = BIT(11),
-       QED_LM_TP_BIT = BIT(12),
-       QED_LM_Backplane_BIT = BIT(13),
-       QED_LM_1000baseKX_Full_BIT = BIT(14),
-       QED_LM_10000baseKX4_Full_BIT = BIT(15),
-       QED_LM_10000baseR_FEC_BIT = BIT(16),
-       QED_LM_40000baseKR4_Full_BIT = BIT(17),
-       QED_LM_40000baseCR4_Full_BIT = BIT(18),
-       QED_LM_40000baseSR4_Full_BIT = BIT(19),
-       QED_LM_25000baseCR_Full_BIT = BIT(20),
-       QED_LM_25000baseSR_Full_BIT = BIT(21),
-       QED_LM_50000baseCR2_Full_BIT = BIT(22),
-       QED_LM_100000baseSR4_Full_BIT = BIT(23),
-       QED_LM_100000baseCR4_Full_BIT = BIT(24),
-       QED_LM_100000baseLR4_ER4_Full_BIT = BIT(25),
-       QED_LM_50000baseSR2_Full_BIT = BIT(26),
-       QED_LM_1000baseX_Full_BIT = BIT(27),
-       QED_LM_10000baseCR_Full_BIT = BIT(28),
-       QED_LM_10000baseSR_Full_BIT = BIT(29),
-       QED_LM_10000baseLR_Full_BIT = BIT(30),
-       QED_LM_10000baseLRM_Full_BIT = BIT(31),
-       QED_LM_COUNT = 32
+enum qed_fec_mode {
+       QED_FEC_MODE_NONE                       = BIT(0),
+       QED_FEC_MODE_FIRECODE                   = BIT(1),
+       QED_FEC_MODE_RS                         = BIT(2),
+       QED_FEC_MODE_AUTO                       = BIT(3),
+       QED_FEC_MODE_UNSUPPORTED                = BIT(4),
 };
 
 struct qed_link_params {
-       bool    link_up;
-
-#define QED_LINK_OVERRIDE_SPEED_AUTONEG         BIT(0)
-#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS      BIT(1)
-#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED    BIT(2)
-#define QED_LINK_OVERRIDE_PAUSE_CONFIG          BIT(3)
-#define QED_LINK_OVERRIDE_LOOPBACK_MODE         BIT(4)
-#define QED_LINK_OVERRIDE_EEE_CONFIG            BIT(5)
-       u32     override_flags;
-       bool    autoneg;
-       u32     adv_speeds;
-       u32     forced_speed;
-#define QED_LINK_PAUSE_AUTONEG_ENABLE           BIT(0)
-#define QED_LINK_PAUSE_RX_ENABLE                BIT(1)
-#define QED_LINK_PAUSE_TX_ENABLE                BIT(2)
-       u32     pause_config;
-#define QED_LINK_LOOPBACK_NONE                  BIT(0)
-#define QED_LINK_LOOPBACK_INT_PHY               BIT(1)
-#define QED_LINK_LOOPBACK_EXT_PHY               BIT(2)
-#define QED_LINK_LOOPBACK_EXT                   BIT(3)
-#define QED_LINK_LOOPBACK_MAC                   BIT(4)
-       u32     loopback_mode;
-       struct qed_link_eee_params eee;
+       bool                                    link_up;
+
+       u32                                     override_flags;
+#define QED_LINK_OVERRIDE_SPEED_AUTONEG                BIT(0)
+#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS     BIT(1)
+#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED   BIT(2)
+#define QED_LINK_OVERRIDE_PAUSE_CONFIG         BIT(3)
+#define QED_LINK_OVERRIDE_LOOPBACK_MODE                BIT(4)
+#define QED_LINK_OVERRIDE_EEE_CONFIG           BIT(5)
+#define QED_LINK_OVERRIDE_FEC_CONFIG           BIT(6)
+
+       bool                                    autoneg;
+       __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_speeds);
+       u32                                     forced_speed;
+
+       u32                                     pause_config;
+#define QED_LINK_PAUSE_AUTONEG_ENABLE          BIT(0)
+#define QED_LINK_PAUSE_RX_ENABLE               BIT(1)
+#define QED_LINK_PAUSE_TX_ENABLE               BIT(2)
+
+       u32                                     loopback_mode;
+#define QED_LINK_LOOPBACK_NONE                 BIT(0)
+#define QED_LINK_LOOPBACK_INT_PHY              BIT(1)
+#define QED_LINK_LOOPBACK_EXT_PHY              BIT(2)
+#define QED_LINK_LOOPBACK_EXT                  BIT(3)
+#define QED_LINK_LOOPBACK_MAC                  BIT(4)
+#define QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123    BIT(5)
+#define QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301    BIT(6)
+#define QED_LINK_LOOPBACK_PCS_AH_ONLY          BIT(7)
+#define QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY  BIT(8)
+#define QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY  BIT(9)
+
+       struct qed_link_eee_params              eee;
+       u32                                     fec;
 };
 
 struct qed_link_output {
-       bool    link_up;
+       bool                                    link_up;
 
-       /* In QED_LM_* defs */
-       u32     supported_caps;
-       u32     advertised_caps;
-       u32     lp_caps;
+       __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_caps);
+       __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised_caps);
+       __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_caps);
 
-       u32     speed;                  /* In Mb/s */
-       u8      duplex;                 /* In DUPLEX defs */
-       u8      port;                   /* In PORT defs */
-       bool    autoneg;
-       u32     pause_config;
+       u32                                     speed;     /* In Mb/s */
+       u8                                      duplex;    /* In DUPLEX defs */
+       u8                                      port;      /* In PORT defs */
+       bool                                    autoneg;
+       u32                                     pause_config;
 
        /* EEE - capability & param */
-       bool eee_supported;
-       bool eee_active;
-       u8 sup_caps;
-       struct qed_link_eee_params eee;
+       bool                                    eee_supported;
+       bool                                    eee_active;
+       u8                                      sup_caps;
+       struct qed_link_eee_params              eee;
+
+       u32                                     sup_fec;
+       u32                                     active_fec;
 };
 
 struct qed_probe_params {
@@ -988,13 +948,8 @@ struct qed_common_ops {
                                         u8 dp_level);
 
        int             (*chain_alloc)(struct qed_dev *cdev,
-                                      enum qed_chain_use_mode intended_use,
-                                      enum qed_chain_mode mode,
-                                      enum qed_chain_cnt_type cnt_type,
-                                      u32 num_elems,
-                                      size_t elem_size,
-                                      struct qed_chain *p_chain,
-                                      struct qed_chain_ext_pbl *ext_pbl);
+                                      struct qed_chain *chain,
+                                      struct qed_chain_init_params *params);
 
        void            (*chain_free)(struct qed_dev *cdev,
                                      struct qed_chain *p_chain);
@@ -1429,16 +1384,15 @@ static inline void qed_sb_ack(struct qed_sb_info *sb_info,
                              enum igu_int_cmd int_cmd,
                              u8 upd_flg)
 {
-       struct igu_prod_cons_update igu_ack = { 0 };
+       u32 igu_ack;
 
-       igu_ack.sb_id_and_flags =
-               ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
-                (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
-                (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
-                (IGU_SEG_ACCESS_REG <<
-                 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+       igu_ack = ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+                  (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+                  (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+                  (IGU_SEG_ACCESS_REG <<
+                   IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
 
-       DIRECT_REG_WR(sb_info->igu_addr, igu_ack.sb_id_and_flags);
+       DIRECT_REG_WR(sb_info->igu_addr, igu_ack);
 
        /* Both segments (interrupts & acks) are written to same place address;
         * Need to guarantee all commands will be received (in-order) by HW.
index ac2e6a3..8e31a28 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef _QED_IOV_IF_H
index d0df1be..04180d9 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef _QED_ISCSI_IF_H
index 1313c34..2f64ed7 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef _QED_LL2_IF_H
index 2d3ddd2..f464d85 100644 (file)
@@ -1,34 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
+
 #ifndef _QED_RDMA_IF_H
 #define _QED_RDMA_IF_H
 #include <linux/types.h>
index 5a00c7a..072da2f 100644 (file)
@@ -1,34 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qedr NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
+
 #ifndef QEDE_ROCE_H
 #define QEDE_ROCE_H
 
index 480a57e..bab078b 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef __RDMA_COMMON__
index 473fba7..ccddd7a 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef __ROCE_COMMON__
index 9a973ff..91896e8 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef __STORAGE_COMMON__
index 4a48451..2b2c87d 100644 (file)
@@ -1,33 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
 /* QLogic qed NIC Driver
  * Copyright (c) 2015-2017  QLogic Corporation
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and /or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019-2020 Marvell International Ltd.
  */
 
 #ifndef __TCP_COMMON__
index 4f922af..45cf7b6 100644 (file)
@@ -155,7 +155,7 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
  * Loop over each sg element in the given sg_table object.
  */
 #define for_each_sgtable_sg(sgt, sg, i)                \
-       for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
+       for_each_sg((sgt)->sgl, sg, (sgt)->orig_nents, i)
 
 /*
  * Loop over each sg element in the given *DMA mapped* sg_table object.
@@ -163,7 +163,7 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
  * of the each element.
  */
 #define for_each_sgtable_dma_sg(sgt, sg, i)    \
-       for_each_sg(sgt->sgl, sg, sgt->nents, i)
+       for_each_sg((sgt)->sgl, sg, (sgt)->nents, i)
 
 /**
  * sg_chain - Chain two sglists together
@@ -451,7 +451,7 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
  * See also for_each_sg_page(). In each loop it operates on PAGE_SIZE unit.
  */
 #define for_each_sgtable_page(sgt, piter, pgoffset)    \
-       for_each_sg_page(sgt->sgl, piter, sgt->orig_nents, pgoffset)
+       for_each_sg_page((sgt)->sgl, piter, (sgt)->orig_nents, pgoffset)
 
 /**
  * for_each_sgtable_dma_page - iterate over the DMA mapped sg_table object
@@ -465,7 +465,7 @@ sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter)
  * unit.
  */
 #define for_each_sgtable_dma_page(sgt, dma_iter, pgoffset)     \
-       for_each_sg_dma_page(sgt->sgl, dma_iter, sgt->nents, pgoffset)
+       for_each_sg_dma_page((sgt)->sgl, dma_iter, (sgt)->nents, pgoffset)
 
 
 /*
index b62e6aa..c191e44 100644 (file)
@@ -654,9 +654,8 @@ struct task_struct {
        unsigned int                    ptrace;
 
 #ifdef CONFIG_SMP
-       struct llist_node               wake_entry;
-       unsigned int                    wake_entry_type;
        int                             on_cpu;
+       struct __call_single_node       wake_entry;
 #ifdef CONFIG_THREAD_INFO_IN_TASK
        /* Current CPU: */
        unsigned int                    cpu;
@@ -1511,7 +1510,6 @@ extern struct pid *cad_pid;
 #define PF_KTHREAD             0x00200000      /* I am a kernel thread */
 #define PF_RANDOMIZE           0x00400000      /* Randomize virtual address space */
 #define PF_SWAPWRITE           0x00800000      /* Allowed to write to swap */
-#define PF_UMH                 0x02000000      /* I'm an Usermodehelper process */
 #define PF_NO_SETAFFINITY      0x04000000      /* Userland is not allowed to meddle with cpus_mask */
 #define PF_MCE_EARLY           0x08000000      /* Early kill for mce process policy */
 #define PF_MEMALLOC_NOCMA      0x10000000      /* All allocation request will have _GFP_MOVABLE cleared */
@@ -2020,14 +2018,6 @@ static inline void rseq_execve(struct task_struct *t)
 
 #endif
 
-void __exit_umh(struct task_struct *tsk);
-
-static inline void exit_umh(struct task_struct *tsk)
-{
-       if (unlikely(tsk->flags & PF_UMH))
-               __exit_umh(tsk);
-}
-
 #ifdef CONFIG_DEBUG_RSEQ
 
 void rseq_syscall(struct pt_regs *regs);
index fa067de..d2b4204 100644 (file)
@@ -19,6 +19,7 @@ struct task_struct;
 #define JOBCTL_TRAPPING_BIT    21      /* switching to TRACED */
 #define JOBCTL_LISTENING_BIT   22      /* ptracer is listening for events */
 #define JOBCTL_TRAP_FREEZE_BIT 23      /* trap for cgroup freezer */
+#define JOBCTL_TASK_WORK_BIT   24      /* set by TWA_SIGNAL */
 
 #define JOBCTL_STOP_DEQUEUED   (1UL << JOBCTL_STOP_DEQUEUED_BIT)
 #define JOBCTL_STOP_PENDING    (1UL << JOBCTL_STOP_PENDING_BIT)
@@ -28,9 +29,10 @@ struct task_struct;
 #define JOBCTL_TRAPPING                (1UL << JOBCTL_TRAPPING_BIT)
 #define JOBCTL_LISTENING       (1UL << JOBCTL_LISTENING_BIT)
 #define JOBCTL_TRAP_FREEZE     (1UL << JOBCTL_TRAP_FREEZE_BIT)
+#define JOBCTL_TASK_WORK       (1UL << JOBCTL_TASK_WORK_BIT)
 
 #define JOBCTL_TRAP_MASK       (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
-#define JOBCTL_PENDING_MASK    (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
+#define JOBCTL_PENDING_MASK    (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK | JOBCTL_TASK_WORK)
 
 extern bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask);
 extern void task_clear_jobctl_trapping(struct task_struct *task);
index 0ee5e69..1bad18a 100644 (file)
@@ -674,6 +674,8 @@ static inline int thread_group_empty(struct task_struct *p)
 #define delay_group_leader(p) \
                (thread_group_leader(p) && !thread_group_empty(p))
 
+extern bool thread_group_exited(struct pid *pid);
+
 extern struct sighand_struct *__lock_task_sighand(struct task_struct *task,
                                                        unsigned long *flags);
 
index 8ccd821..7673123 100644 (file)
@@ -221,7 +221,7 @@ struct sctp_datahdr {
        __be16 stream;
        __be16 ssn;
        __u32 ppid;
-       __u8  payload[0];
+       __u8  payload[];
 };
 
 struct sctp_data_chunk {
@@ -269,7 +269,7 @@ struct sctp_inithdr {
        __be16 num_outbound_streams;
        __be16 num_inbound_streams;
        __be32 initial_tsn;
-       __u8  params[0];
+       __u8  params[];
 };
 
 struct sctp_init_chunk {
@@ -299,13 +299,13 @@ struct sctp_cookie_preserve_param {
 /* Section 3.3.2.1 Host Name Address (11) */
 struct sctp_hostname_param {
        struct sctp_paramhdr param_hdr;
-       uint8_t hostname[0];
+       uint8_t hostname[];
 };
 
 /* Section 3.3.2.1 Supported Address Types (12) */
 struct sctp_supported_addrs_param {
        struct sctp_paramhdr param_hdr;
-       __be16 types[0];
+       __be16 types[];
 };
 
 /* ADDIP Section 3.2.6 Adaptation Layer Indication */
@@ -317,25 +317,25 @@ struct sctp_adaptation_ind_param {
 /* ADDIP Section 4.2.7 Supported Extensions Parameter */
 struct sctp_supported_ext_param {
        struct sctp_paramhdr param_hdr;
-       __u8 chunks[0];
+       __u8 chunks[];
 };
 
 /* AUTH Section 3.1 Random */
 struct sctp_random_param {
        struct sctp_paramhdr param_hdr;
-       __u8 random_val[0];
+       __u8 random_val[];
 };
 
 /* AUTH Section 3.2 Chunk List */
 struct sctp_chunks_param {
        struct sctp_paramhdr param_hdr;
-       __u8 chunks[0];
+       __u8 chunks[];
 };
 
 /* AUTH Section 3.3 HMAC Algorithm */
 struct sctp_hmac_algo_param {
        struct sctp_paramhdr param_hdr;
-       __be16 hmac_ids[0];
+       __be16 hmac_ids[];
 };
 
 /* RFC 2960.  Section 3.3.3 Initiation Acknowledgement (INIT ACK) (2):
@@ -350,7 +350,7 @@ struct sctp_initack_chunk {
 /* Section 3.3.3.1 State Cookie (7) */
 struct sctp_cookie_param {
        struct sctp_paramhdr p;
-       __u8 body[0];
+       __u8 body[];
 };
 
 /* Section 3.3.3.1 Unrecognized Parameters (8) */
@@ -384,7 +384,7 @@ struct sctp_sackhdr {
        __be32 a_rwnd;
        __be16 num_gap_ack_blocks;
        __be16 num_dup_tsns;
-       union sctp_sack_variable variable[0];
+       union sctp_sack_variable variable[];
 };
 
 struct sctp_sack_chunk {
@@ -436,7 +436,7 @@ struct sctp_shutdown_chunk {
 struct sctp_errhdr {
        __be16 cause;
        __be16 length;
-       __u8  variable[0];
+       __u8  variable[];
 };
 
 struct sctp_operr_chunk {
@@ -594,7 +594,7 @@ struct sctp_fwdtsn_skip {
 
 struct sctp_fwdtsn_hdr {
        __be32 new_cum_tsn;
-       struct sctp_fwdtsn_skip skip[0];
+       struct sctp_fwdtsn_skip skip[];
 };
 
 struct sctp_fwdtsn_chunk {
@@ -611,7 +611,7 @@ struct sctp_ifwdtsn_skip {
 
 struct sctp_ifwdtsn_hdr {
        __be32 new_cum_tsn;
-       struct sctp_ifwdtsn_skip skip[0];
+       struct sctp_ifwdtsn_skip skip[];
 };
 
 struct sctp_ifwdtsn_chunk {
@@ -658,7 +658,7 @@ struct sctp_addip_param {
 
 struct sctp_addiphdr {
        __be32  serial;
-       __u8    params[0];
+       __u8    params[];
 };
 
 struct sctp_addip_chunk {
@@ -718,7 +718,7 @@ struct sctp_addip_chunk {
 struct sctp_authhdr {
        __be16 shkey_id;
        __be16 hmac_id;
-       __u8   hmac[0];
+       __u8   hmac[];
 };
 
 struct sctp_auth_chunk {
@@ -733,7 +733,7 @@ struct sctp_infox {
 
 struct sctp_reconf_chunk {
        struct sctp_chunkhdr chunk_hdr;
-       __u8 params[0];
+       __u8 params[];
 };
 
 struct sctp_strreset_outreq {
@@ -741,13 +741,13 @@ struct sctp_strreset_outreq {
        __be32 request_seq;
        __be32 response_seq;
        __be32 send_reset_at_tsn;
-       __be16 list_of_streams[0];
+       __be16 list_of_streams[];
 };
 
 struct sctp_strreset_inreq {
        struct sctp_paramhdr param_hdr;
        __be32 request_seq;
-       __be16 list_of_streams[0];
+       __be16 list_of_streams[];
 };
 
 struct sctp_strreset_tsnreq {
index 0c0377f..fa817a1 100644 (file)
@@ -1328,7 +1328,7 @@ void skb_flow_dissect_meta(const struct sk_buff *skb,
                           void *target_container);
 
 /* Gets a skb connection tracking info, ctinfo map should be a
- * map of mapsize to translate enum ip_conntrack_info states
+ * map of mapsize to translate enum ip_conntrack_info states
  * to user states.
  */
 void
@@ -1342,6 +1342,10 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
                             struct flow_dissector *flow_dissector,
                             void *target_container);
 
+void skb_flow_dissect_hash(const struct sk_buff *skb,
+                          struct flow_dissector *flow_dissector,
+                          void *target_container);
+
 static inline __u32 skb_get_hash(struct sk_buff *skb)
 {
        if (!skb->l4_hash && !skb->sw_hash)
@@ -3812,7 +3816,7 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
  * must call this function to return the skb back to the stack with a
  * timestamp.
  *
- * @skb: clone of the the original outgoing packet
+ * @skb: clone of the original outgoing packet
  * @hwtstamps: hardware time stamps
  *
  */
index 08674cd..1e9ed84 100644 (file)
@@ -430,6 +430,19 @@ static inline void psock_set_prog(struct bpf_prog **pprog,
                bpf_prog_put(prog);
 }
 
+static inline int psock_replace_prog(struct bpf_prog **pprog,
+                                    struct bpf_prog *prog,
+                                    struct bpf_prog *old)
+{
+       if (cmpxchg(pprog, old, prog) != old)
+               return -ENOENT;
+
+       if (old)
+               bpf_prog_put(old);
+
+       return 0;
+}
+
 static inline void psock_progs_drop(struct sk_psock_progs *progs)
 {
        psock_set_prog(&progs->msg_parser, NULL);
index 7ee202a..80d557e 100644 (file)
 #include <linux/list.h>
 #include <linux/cpumask.h>
 #include <linux/init.h>
-#include <linux/llist.h>
+#include <linux/smp_types.h>
 
 typedef void (*smp_call_func_t)(void *info);
 typedef bool (*smp_cond_func_t)(int cpu, void *info);
 
-enum {
-       CSD_FLAG_LOCK           = 0x01,
-
-       /* IRQ_WORK_flags */
-
-       CSD_TYPE_ASYNC          = 0x00,
-       CSD_TYPE_SYNC           = 0x10,
-       CSD_TYPE_IRQ_WORK       = 0x20,
-       CSD_TYPE_TTWU           = 0x30,
-       CSD_FLAG_TYPE_MASK      = 0xF0,
-};
-
 /*
  * structure shares (partial) layout with struct irq_work
  */
 struct __call_single_data {
-       struct llist_node llist;
-       unsigned int flags;
+       union {
+               struct __call_single_node node;
+               struct {
+                       struct llist_node llist;
+                       unsigned int flags;
+               };
+       };
        smp_call_func_t func;
        void *info;
 };
diff --git a/include/linux/smp_types.h b/include/linux/smp_types.h
new file mode 100644 (file)
index 0000000..364b3ae
--- /dev/null
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_SMP_TYPES_H
+#define __LINUX_SMP_TYPES_H
+
+#include <linux/llist.h>
+
+enum {
+       CSD_FLAG_LOCK           = 0x01,
+
+       IRQ_WORK_PENDING        = 0x01,
+       IRQ_WORK_BUSY           = 0x02,
+       IRQ_WORK_LAZY           = 0x04, /* No IPI, wait for tick */
+       IRQ_WORK_HARD_IRQ       = 0x08, /* IRQ context on PREEMPT_RT */
+
+       IRQ_WORK_CLAIMED        = (IRQ_WORK_PENDING | IRQ_WORK_BUSY),
+
+       CSD_TYPE_ASYNC          = 0x00,
+       CSD_TYPE_SYNC           = 0x10,
+       CSD_TYPE_IRQ_WORK       = 0x20,
+       CSD_TYPE_TTWU           = 0x30,
+
+       CSD_FLAG_TYPE_MASK      = 0xF0,
+};
+
+/*
+ * struct __call_single_node is the primary type on
+ * smp.c:call_single_queue.
+ *
+ * flush_smp_call_function_queue() only reads the type from
+ * __call_single_node::u_flags as a regular load, the above
+ * (anonymous) enum defines all the bits of this word.
+ *
+ * Other bits are not modified until the type is known.
+ *
+ * CSD_TYPE_SYNC/ASYNC:
+ *     struct {
+ *             struct llist_node node;
+ *             unsigned int flags;
+ *             smp_call_func_t func;
+ *             void *info;
+ *     };
+ *
+ * CSD_TYPE_IRQ_WORK:
+ *     struct {
+ *             struct llist_node node;
+ *             atomic_t flags;
+ *             void (*func)(struct irq_work *);
+ *     };
+ *
+ * CSD_TYPE_TTWU:
+ *     struct {
+ *             struct llist_node node;
+ *             unsigned int flags;
+ *     };
+ *
+ */
+
+struct __call_single_node {
+       struct llist_node       llist;
+       union {
+               unsigned int    u_flags;
+               atomic_t        a_flags;
+       };
+};
+
+#endif /* __LINUX_SMP_TYPES_H */
diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h
new file mode 100644 (file)
index 0000000..7d5cdb2
--- /dev/null
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 Christoph Hellwig.
+ *
+ * Support for "universal" pointers that can point to either kernel or userspace
+ * memory.
+ */
+#ifndef _LINUX_SOCKPTR_H
+#define _LINUX_SOCKPTR_H
+
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+typedef union {
+       void            *kernel;
+       void __user     *user;
+} sockptr_t;
+
+static inline bool sockptr_is_kernel(sockptr_t sockptr)
+{
+       return (unsigned long)sockptr.kernel >= TASK_SIZE;
+}
+
+static inline sockptr_t KERNEL_SOCKPTR(void *p)
+{
+       return (sockptr_t) { .kernel = p };
+}
+
+static inline int __must_check init_user_sockptr(sockptr_t *sp, void __user *p)
+{
+       if ((unsigned long)p >= TASK_SIZE)
+               return -EFAULT;
+       sp->user = p;
+       return 0;
+}
+#else /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
+typedef struct {
+       union {
+               void            *kernel;
+               void __user     *user;
+       };
+       bool            is_kernel : 1;
+} sockptr_t;
+
+static inline bool sockptr_is_kernel(sockptr_t sockptr)
+{
+       return sockptr.is_kernel;
+}
+
+static inline sockptr_t KERNEL_SOCKPTR(void *p)
+{
+       return (sockptr_t) { .kernel = p, .is_kernel = true };
+}
+
+static inline int __must_check init_user_sockptr(sockptr_t *sp, void __user *p)
+{
+       sp->user = p;
+       sp->is_kernel = false;
+       return 0;
+}
+#endif /* CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE */
+
+static inline bool sockptr_is_null(sockptr_t sockptr)
+{
+       return !sockptr.user && !sockptr.kernel;
+}
+
+static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size)
+{
+       if (!sockptr_is_kernel(src))
+               return copy_from_user(dst, src.user, size);
+       memcpy(dst, src.kernel, size);
+       return 0;
+}
+
+static inline int copy_to_sockptr(sockptr_t dst, const void *src, size_t size)
+{
+       if (!sockptr_is_kernel(dst))
+               return copy_to_user(dst.user, src, size);
+       memcpy(dst.kernel, src, size);
+       return 0;
+}
+
+static inline void *memdup_sockptr(sockptr_t src, size_t len)
+{
+       void *p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
+
+       if (!p)
+               return ERR_PTR(-ENOMEM);
+       if (copy_from_sockptr(p, src, len)) {
+               kfree(p);
+               return ERR_PTR(-EFAULT);
+       }
+       return p;
+}
+
+static inline void *memdup_sockptr_nul(sockptr_t src, size_t len)
+{
+       char *p = kmalloc_track_caller(len + 1, GFP_KERNEL);
+
+       if (!p)
+               return ERR_PTR(-ENOMEM);
+       if (copy_from_sockptr(p, src, len)) {
+               kfree(p);
+               return ERR_PTR(-EFAULT);
+       }
+       p[len] = '\0';
+       return p;
+}
+
+static inline void sockptr_advance(sockptr_t sockptr, size_t len)
+{
+       if (sockptr_is_kernel(sockptr))
+               sockptr.kernel += len;
+       else
+               sockptr.user += len;
+}
+
+static inline long strncpy_from_sockptr(char *dst, sockptr_t src, size_t count)
+{
+       if (sockptr_is_kernel(src)) {
+               size_t len = min(strnlen(src.kernel, count - 1) + 1, count);
+
+               memcpy(dst, src.kernel, len);
+               return len;
+       }
+       return strncpy_from_user(dst, src.user, count);
+}
+
+#endif /* _LINUX_SOCKPTR_H */
index 4c5974b..5b3216b 100644 (file)
@@ -313,6 +313,7 @@ struct vma_swap_readahead {
 };
 
 /* linux/mm/workingset.c */
+void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
 void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg);
 void workingset_refault(struct page *page, void *shadow);
 void workingset_activation(struct page *page);
index 7c354c2..aa46825 100644 (file)
@@ -1360,7 +1360,7 @@ static inline long ksys_lchown(const char __user *filename, uid_t user,
 
 extern long do_sys_ftruncate(unsigned int fd, loff_t length, int small);
 
-static inline long ksys_ftruncate(unsigned int fd, unsigned long length)
+static inline long ksys_ftruncate(unsigned int fd, loff_t length)
 {
        return do_sys_ftruncate(fd, length, 1);
 }
@@ -1424,4 +1424,8 @@ long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
                            unsigned int nsops,
                            const struct old_timespec32 __user *timeout);
 
+int __sys_getsockopt(int fd, int level, int optname, char __user *optval,
+               int __user *optlen);
+int __sys_setsockopt(int fd, int level, int optname, char __user *optval,
+               int optlen);
 #endif
index bd9a6a9..0fb93aa 100644 (file)
@@ -13,7 +13,10 @@ init_task_work(struct callback_head *twork, task_work_func_t func)
        twork->func = func;
 }
 
-int task_work_add(struct task_struct *task, struct callback_head *twork, bool);
+#define TWA_RESUME     1
+#define TWA_SIGNAL     2
+int task_work_add(struct task_struct *task, struct callback_head *twork, int);
+
 struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t);
 void task_work_run(void);
 
index 9aac824..3bdec31 100644 (file)
@@ -499,6 +499,7 @@ int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount,
 
 void tcp_sock_set_cork(struct sock *sk, bool on);
 int tcp_sock_set_keepcnt(struct sock *sk, int val);
+int tcp_sock_set_keepidle_locked(struct sock *sk, int val);
 int tcp_sock_set_keepidle(struct sock *sk, int val);
 int tcp_sock_set_keepintvl(struct sock *sk, int val);
 void tcp_sock_set_nodelay(struct sock *sk);
index 299cbb8..44073d0 100644 (file)
@@ -124,7 +124,7 @@ struct tifm_adapter {
        int                 (*has_ms_pif)(struct tifm_adapter *fm,
                                          struct tifm_dev *sock);
 
-       struct tifm_dev     *sockets[0];
+       struct tifm_dev     *sockets[];
 };
 
 struct tifm_adapter *tifm_alloc_adapter(unsigned int num_sockets,
index b27e2ff..d5471d6 100644 (file)
@@ -222,9 +222,9 @@ extern bool timekeeping_rtc_skipresume(void);
 
 extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta);
 
-/*
+/**
  * struct system_time_snapshot - simultaneous raw/real time capture with
- *     counter value
+ *                              counter value
  * @cycles:    Clocksource counter value to produce the system times
  * @real:      Realtime system time
  * @raw:       Monotonic raw system time
@@ -239,9 +239,9 @@ struct system_time_snapshot {
        u8              cs_was_changed_seq;
 };
 
-/*
+/**
  * struct system_device_crosststamp - system/device cross-timestamp
- *     (syncronized capture)
+ *                                   (synchronized capture)
  * @device:            Device time
  * @sys_realtime:      Realtime simultaneous with device time
  * @sys_monoraw:       Monotonic raw simultaneous with device time
@@ -252,12 +252,12 @@ struct system_device_crosststamp {
        ktime_t sys_monoraw;
 };
 
-/*
+/**
  * struct system_counterval_t - system counter value with the pointer to the
- *     corresponding clocksource
+ *                             corresponding clocksource
  * @cycles:    System counter value
  * @cs:                Clocksource corresponding to system counter value. Used by
- *     timekeeping code to verify comparibility of two cycle values
+ *             timekeeping code to verify comparibility of two cycle values
  */
 struct system_counterval_t {
        u64                     cycles;
index 4f8c90c..64356b1 100644 (file)
@@ -81,6 +81,8 @@ struct tcg_efi_specid_event_algs {
        u16 digest_size;
 } __packed;
 
+#define TCG_SPECID_SIG "Spec ID Event03"
+
 struct tcg_efi_specid_event_head {
        u8 signature[16];
        u32 platform_class;
@@ -171,6 +173,7 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
        int i;
        int j;
        u32 count, event_type;
+       const u8 zero_digest[sizeof(event_header->digest)] = {0};
 
        marker = event;
        marker_start = marker;
@@ -198,10 +201,19 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
        count = READ_ONCE(event->count);
        event_type = READ_ONCE(event->event_type);
 
+       /* Verify that it's the log header */
+       if (event_header->pcr_idx != 0 ||
+           event_header->event_type != NO_ACTION ||
+           memcmp(event_header->digest, zero_digest, sizeof(zero_digest))) {
+               size = 0;
+               goto out;
+       }
+
        efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
 
        /* Check if event is malformed. */
-       if (count > efispecid->num_algs) {
+       if (memcmp(efispecid->signature, TCG_SPECID_SIG,
+                  sizeof(TCG_SPECID_SIG)) || count > efispecid->num_algs) {
                size = 0;
                goto out;
        }
index 7bcadca..0a76ddc 100644 (file)
@@ -301,13 +301,14 @@ copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
        return 0;
 }
 
-bool probe_kernel_read_allowed(const void *unsafe_src, size_t size);
+bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
 
-extern long probe_kernel_read(void *dst, const void *src, size_t size);
-extern long probe_user_read(void *dst, const void __user *src, size_t size);
+long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
+long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
 
-extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
-extern long notrace probe_user_write(void __user *dst, const void *src, size_t size);
+long copy_from_user_nofault(void *dst, const void __user *src, size_t size);
+long notrace copy_to_user_nofault(void __user *dst, const void *src,
+               size_t size);
 
 long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
                long count);
@@ -317,14 +318,16 @@ long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
 long strnlen_user_nofault(const void __user *unsafe_addr, long count);
 
 /**
- * probe_kernel_address(): safely attempt to read from a location
- * @addr: address to read from
- * @retval: read into this variable
+ * get_kernel_nofault(): safely attempt to read from a location
+ * @val: read into this variable
+ * @ptr: address to read from
  *
  * Returns 0 on success, or -EFAULT.
  */
-#define probe_kernel_address(addr, retval)             \
-       probe_kernel_read(&retval, addr, sizeof(retval))
+#define get_kernel_nofault(val, ptr) ({                                \
+       const typeof(val) *__gk_ptr = (ptr);                    \
+       copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
+})
 
 #ifndef user_access_begin
 #define user_access_begin(ptr,len) access_ok(ptr, len)
index 0c08de3..244aff6 100644 (file)
@@ -22,10 +22,8 @@ struct subprocess_info {
        const char *path;
        char **argv;
        char **envp;
-       struct file *file;
        int wait;
        int retval;
-       pid_t pid;
        int (*init)(struct subprocess_info *info, struct cred *new);
        void (*cleanup)(struct subprocess_info *info);
        void *data;
@@ -40,19 +38,6 @@ call_usermodehelper_setup(const char *path, char **argv, char **envp,
                          int (*init)(struct subprocess_info *info, struct cred *new),
                          void (*cleanup)(struct subprocess_info *), void *data);
 
-struct subprocess_info *call_usermodehelper_setup_file(struct file *file,
-                         int (*init)(struct subprocess_info *info, struct cred *new),
-                         void (*cleanup)(struct subprocess_info *), void *data);
-struct umh_info {
-       const char *cmdline;
-       struct file *pipe_to_umh;
-       struct file *pipe_from_umh;
-       struct list_head list;
-       void (*cleanup)(struct umh_info *info);
-       pid_t pid;
-};
-int fork_usermode_blob(void *data, size_t len, struct umh_info *info);
-
 extern int
 call_usermodehelper_exec(struct subprocess_info *info, int wait);
 
index b0bff30..2e4f772 100644 (file)
@@ -207,6 +207,7 @@ struct cdc_state {
        struct usb_interface            *data;
 };
 
+extern void usbnet_cdc_update_filter(struct usbnet *dev);
 extern int usbnet_generic_cdc_bind(struct usbnet *, struct usb_interface *);
 extern int usbnet_ether_cdc_bind(struct usbnet *dev, struct usb_interface *intf);
 extern int usbnet_cdc_bind(struct usbnet *, struct usb_interface *);
@@ -273,6 +274,7 @@ extern int usbnet_set_link_ksettings(struct net_device *net,
 extern u32 usbnet_get_link(struct net_device *net);
 extern u32 usbnet_get_msglevel(struct net_device *);
 extern void usbnet_set_msglevel(struct net_device *, u32);
+extern void usbnet_set_rx_mode(struct net_device *net);
 extern void usbnet_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
 extern int usbnet_nway_reset(struct net_device *net);
 
diff --git a/include/linux/usermode_driver.h b/include/linux/usermode_driver.h
new file mode 100644 (file)
index 0000000..073a9e0
--- /dev/null
@@ -0,0 +1,18 @@
+#ifndef __LINUX_USERMODE_DRIVER_H__
+#define __LINUX_USERMODE_DRIVER_H__
+
+#include <linux/umh.h>
+#include <linux/path.h>
+
+struct umd_info {
+       const char *driver_name;
+       struct file *pipe_to_umh;
+       struct file *pipe_from_umh;
+       struct path wd;
+       struct pid *tgid;
+};
+int umd_load_blob(struct umd_info *info, const void *data, size_t len);
+int umd_unload_blob(struct umd_info *info);
+int fork_usermode_driver(struct umd_info *info);
+
+#endif /* __LINUX_USERMODE_DRIVER_H__ */
index 48bb681..0221f85 100644 (file)
@@ -106,7 +106,6 @@ extern void *vzalloc(unsigned long size);
 extern void *vmalloc_user(unsigned long size);
 extern void *vmalloc_node(unsigned long size, int node);
 extern void *vzalloc_node(unsigned long size, int node);
-extern void *vmalloc_exec(unsigned long size);
 extern void *vmalloc_32(unsigned long size);
 extern void *vmalloc_32_user(unsigned long size);
 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask);
index 4dd2c1c..cdae052 100644 (file)
@@ -184,8 +184,8 @@ do {                                                                        \
 
 
 /*
- * CPP sintatic sugar to generate A_B like symbol names when one of
- * the arguments is a preprocessor #define.
+ * CPP syntactic sugar to generate A_B like symbol names when one of
+ * the arguments is a preprocessor #define.
  */
 #define __D_PASTE__(varname, modulename) varname##_##modulename
 #define __D_PASTE(varname, modulename) (__D_PASTE__(varname, modulename))
index 98a2be2..3eb4261 100644 (file)
@@ -25,7 +25,7 @@
  * @request: member function to issue a request to the transport
  * @cancel: member function to cancel a request (if it hasn't been sent)
  * @cancelled: member function to notify that a cancelled request will not
- *             not receive a reply
+ *             receive a reply
  *
  * This is the basic API for a transport module which is registered by the
  * transport module with the 9P core network module and used by the client
index 8c39348..cb382a8 100644 (file)
@@ -106,7 +106,7 @@ struct tc_action_ops {
                        struct netlink_callback *, int,
                        const struct tc_action_ops *,
                        struct netlink_ext_ack *);
-       void    (*stats_update)(struct tc_action *, u64, u32, u64, bool);
+       void    (*stats_update)(struct tc_action *, u64, u64, u64, u64, bool);
        size_t  (*get_fill_size)(const struct tc_action *act);
        struct net_device *(*get_dev)(const struct tc_action *a,
                                      tc_action_priv_destructor *destructor);
@@ -232,8 +232,8 @@ static inline void tcf_action_inc_overlimit_qstats(struct tc_action *a)
        spin_unlock(&a->tcfa_lock);
 }
 
-void tcf_action_update_stats(struct tc_action *a, u64 bytes, u32 packets,
-                            bool drop, bool hw);
+void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
+                            u64 drops, bool hw);
 int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
 
 int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
@@ -244,13 +244,14 @@ struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
 #endif /* CONFIG_NET_CLS_ACT */
 
 static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
-                                          u64 packets, u64 lastuse, bool hw)
+                                          u64 packets, u64 drops,
+                                          u64 lastuse, bool hw)
 {
 #ifdef CONFIG_NET_CLS_ACT
        if (!a->ops->stats_update)
                return;
 
-       a->ops->stats_update(a, bytes, packets, lastuse, hw);
+       a->ops->stats_update(a, bytes, packets, drops, lastuse, hw);
 #endif
 }
 
index aa854a9..7d132cc 100644 (file)
 #define bond_for_each_slave_rcu(bond, pos, iter) \
        netdev_for_each_lower_private_rcu((bond)->dev, pos, iter)
 
+#ifdef CONFIG_XFRM_OFFLOAD
+#define BOND_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \
+                           NETIF_F_GSO_ESP)
+#endif /* CONFIG_XFRM_OFFLOAD */
+
 #ifdef CONFIG_NET_POLL_CONTROLLER
 extern atomic_t netpoll_block_tx;
 
@@ -238,6 +243,9 @@ struct bonding {
        struct   dentry *debug_dir;
 #endif /* CONFIG_DEBUG_FS */
        struct rtnl_link_stats64 bond_stats;
+#ifdef CONFIG_XFRM_OFFLOAD
+       struct xfrm_state *xs;
+#endif /* CONFIG_XFRM_OFFLOAD */
 };
 
 #define bond_slave_get_rcu(dev) \
index 86e0283..b001fa9 100644 (file)
@@ -114,7 +114,11 @@ static inline void skb_mark_napi_id(struct sk_buff *skb,
                                    struct napi_struct *napi)
 {
 #ifdef CONFIG_NET_RX_BUSY_POLL
-       skb->napi_id = napi->napi_id;
+       /* If the skb was already marked with a valid NAPI ID, avoid overwriting
+        * it.
+        */
+       if (skb->napi_id < MIN_NAPI_ID)
+               skb->napi_id = napi->napi_id;
 #endif
 }
 
index 0640941..51f7bb4 100644 (file)
@@ -156,7 +156,7 @@ struct cflayer {
         *  CAIF packets upwards in the stack.
         *      Packet handling rules:
         *            - The CAIF packet (cfpkt) ownership is passed to the
-        *              called receive function. This means that the the
+        *              called receive function. This means that the
         *              packet cannot be accessed after passing it to the
         *              above layer using up->receive().
         *
@@ -184,7 +184,7 @@ struct cflayer {
         *      CAIF packet downwards in the stack.
         *      Packet handling rules:
         *            - The CAIF packet (cfpkt) ownership is passed to the
-        *              transmit function. This means that the the packet
+        *              transmit function. This means that the packet
         *              cannot be accessed after passing it to the below
         *              layer using dn->transmit().
         *
index 428b672..53dd7d9 100644 (file)
@@ -150,18 +150,6 @@ static inline int cipso_v4_doi_walk(u32 *skip_cnt,
 {
        return 0;
 }
-
-static inline int cipso_v4_doi_domhsh_add(struct cipso_v4_doi *doi_def,
-                                         const char *domain)
-{
-       return -ENOSYS;
-}
-
-static inline int cipso_v4_doi_domhsh_remove(struct cipso_v4_doi *doi_def,
-                                            const char *domain)
-{
-       return 0;
-}
 #endif /* CONFIG_NETLABEL */
 
 /*
index f241666..745db0d 100644 (file)
@@ -61,7 +61,6 @@ int __get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg,
                        compat_size_t *len);
 int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
                      struct sockaddr __user **, struct iovec **);
-struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval);
 int put_cmsg_compat(struct msghdr*, int, int, int, void *);
 
 int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *,
index 1df6dfe..19d990c 100644 (file)
@@ -40,7 +40,9 @@ struct devlink {
        struct xarray snapshot_ids;
        struct device *dev;
        possible_net_t _net;
-       struct mutex lock;
+       struct mutex lock; /* Serializes access to devlink instance specific objects such as
+                           * port, sb, dpipe, resource, params, region, traps and more.
+                           */
        u8 reload_failed:1,
           reload_enabled:1,
           registered:1;
@@ -52,7 +54,7 @@ struct devlink_port_phys_attrs {
                          * A physical port which is visible to the user
                          * for a given port flavour.
                          */
-       u32 split_subport_number;
+       u32 split_subport_number; /* If the port is split, this is the number of subport. */
 };
 
 struct devlink_port_pci_pf_attrs {
@@ -64,10 +66,18 @@ struct devlink_port_pci_vf_attrs {
        u16 vf; /* Associated PCI VF for of the PCI PF for this port. */
 };
 
+/**
+ * struct devlink_port_attrs - devlink port object
+ * @flavour: flavour of the port
+ * @split: indicates if this is split port
+ * @splittable: indicates if the port can be split.
+ * @lanes: maximum number of lanes the port supports. 0 value is not passed to netlink.
+ * @switch_id: if the port is part of switch, this is buffer with ID, otherwise this is NULL
+ */
 struct devlink_port_attrs {
-       u8 set:1,
-          split:1,
-          switch_port:1;
+       u8 split:1,
+          splittable:1;
+       u32 lanes;
        enum devlink_port_flavour flavour;
        struct netdev_phys_item_id switch_id;
        union {
@@ -90,7 +100,11 @@ struct devlink_port {
        enum devlink_port_type desired_type;
        void *type_dev;
        struct devlink_port_attrs attrs;
+       u8 attrs_set:1,
+          switch_port:1;
        struct delayed_work type_warn_dw;
+       struct list_head reporter_list;
+       struct mutex reporters_lock; /* Protects reporter_list */
 };
 
 struct devlink_sb_pool_info {
@@ -1107,6 +1121,28 @@ struct devlink_ops {
        int (*trap_policer_counter_get)(struct devlink *devlink,
                                        const struct devlink_trap_policer *policer,
                                        u64 *p_drops);
+       /**
+        * @port_function_hw_addr_get: Port function's hardware address get function.
+        *
+        * Should be used by device drivers to report the hardware address of a function managed
+        * by the devlink port. Driver should return -EOPNOTSUPP if it doesn't support port
+        * function handling for a particular port.
+        *
+        * Note: @extack can be NULL when port notifier queries the port function.
+        */
+       int (*port_function_hw_addr_get)(struct devlink *devlink, struct devlink_port *port,
+                                        u8 *hw_addr, int *hw_addr_len,
+                                        struct netlink_ext_ack *extack);
+       /**
+        * @port_function_hw_addr_set: Port function's hardware address set function.
+        *
+        * Should be used by device drivers to set the hardware address of a function managed
+        * by the devlink port. Driver should return -EOPNOTSUPP if it doesn't support port
+        * function handling for a particular port.
+        */
+       int (*port_function_hw_addr_set)(struct devlink *devlink, struct devlink_port *port,
+                                        const u8 *hw_addr, int hw_addr_len,
+                                        struct netlink_ext_ack *extack);
 };
 
 static inline void *devlink_priv(struct devlink *devlink)
@@ -1158,17 +1194,9 @@ void devlink_port_type_ib_set(struct devlink_port *devlink_port,
                              struct ib_device *ibdev);
 void devlink_port_type_clear(struct devlink_port *devlink_port);
 void devlink_port_attrs_set(struct devlink_port *devlink_port,
-                           enum devlink_port_flavour flavour,
-                           u32 port_number, bool split,
-                           u32 split_subport_number,
-                           const unsigned char *switch_id,
-                           unsigned char switch_id_len);
-void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port,
-                                  const unsigned char *switch_id,
-                                  unsigned char switch_id_len, u16 pf);
+                           struct devlink_port_attrs *devlink_port_attrs);
+void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u16 pf);
 void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port,
-                                  const unsigned char *switch_id,
-                                  unsigned char switch_id_len,
                                   u16 pf, u16 vf);
 int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
                        u32 size, u16 ingress_pools_count,
@@ -1262,6 +1290,8 @@ int devlink_info_serial_number_put(struct devlink_info_req *req,
                                   const char *sn);
 int devlink_info_driver_name_put(struct devlink_info_req *req,
                                 const char *name);
+int devlink_info_board_serial_number_put(struct devlink_info_req *req,
+                                        const char *bsn);
 int devlink_info_version_fixed_put(struct devlink_info_req *req,
                                   const char *version_name,
                                   const char *version_value);
@@ -1310,9 +1340,18 @@ struct devlink_health_reporter *
 devlink_health_reporter_create(struct devlink *devlink,
                               const struct devlink_health_reporter_ops *ops,
                               u64 graceful_period, void *priv);
+
+struct devlink_health_reporter *
+devlink_port_health_reporter_create(struct devlink_port *port,
+                                   const struct devlink_health_reporter_ops *ops,
+                                   u64 graceful_period, void *priv);
+
 void
 devlink_health_reporter_destroy(struct devlink_health_reporter *reporter);
 
+void
+devlink_port_health_reporter_destroy(struct devlink_health_reporter *reporter);
+
 void *
 devlink_health_reporter_priv(struct devlink_health_reporter *reporter);
 int devlink_health_report(struct devlink_health_reporter *reporter,
index 5038977..75c8fac 100644 (file)
@@ -44,6 +44,7 @@ struct phylink_link_state;
 #define DSA_TAG_PROTO_KSZ8795_VALUE            14
 #define DSA_TAG_PROTO_OCELOT_VALUE             15
 #define DSA_TAG_PROTO_AR9331_VALUE             16
+#define DSA_TAG_PROTO_RTL4_A_VALUE             17
 
 enum dsa_tag_protocol {
        DSA_TAG_PROTO_NONE              = DSA_TAG_PROTO_NONE_VALUE,
@@ -63,6 +64,7 @@ enum dsa_tag_protocol {
        DSA_TAG_PROTO_KSZ8795           = DSA_TAG_PROTO_KSZ8795_VALUE,
        DSA_TAG_PROTO_OCELOT            = DSA_TAG_PROTO_OCELOT_VALUE,
        DSA_TAG_PROTO_AR9331            = DSA_TAG_PROTO_AR9331_VALUE,
+       DSA_TAG_PROTO_RTL4_A            = DSA_TAG_PROTO_RTL4_A_VALUE,
 };
 
 struct packet_type;
@@ -84,6 +86,16 @@ struct dsa_device_ops {
        enum dsa_tag_protocol proto;
 };
 
+/* This structure defines the control interfaces that are overlayed by the
+ * DSA layer on top of the DSA CPU/management net_device instance. This is
+ * used by the core net_device layer while calling various net_device_ops
+ * function pointers.
+ */
+struct dsa_netdevice_ops {
+       int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr,
+                           int cmd);
+};
+
 #define DSA_TAG_DRIVER_ALIAS "dsa_tag-"
 #define MODULE_ALIAS_DSA_TAG_DRIVER(__proto)                           \
        MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS __stringify(__proto##_VALUE))
@@ -144,7 +156,7 @@ struct dsa_mall_mirror_tc_entry {
 
 /* TC port policer entry */
 struct dsa_mall_policer_tc_entry {
-       s64 burst;
+       u32 burst;
        u64 rate_bytes_per_sec;
 };
 
@@ -215,7 +227,7 @@ struct dsa_port {
        /*
         * Original copy of the master netdev net_device_ops
         */
-       const struct net_device_ops *orig_ndo_ops;
+       const struct dsa_netdevice_ops *netdev_ops;
 
        bool setup;
 };
@@ -610,7 +622,7 @@ struct dsa_switch_ops {
         * MTU change functionality. Switches can also adjust their MRU through
         * this method. By MTU, one understands the SDU (L2 payload) length.
         * If the switch needs to account for the DSA tag on the CPU port, this
-        * method needs to to do so privately.
+        * method needs to do so privately.
         */
        int     (*port_change_mtu)(struct dsa_switch *ds, int port,
                                   int new_mtu);
@@ -677,6 +689,42 @@ static inline bool dsa_can_decode(const struct sk_buff *skb,
        return false;
 }
 
+#if IS_ENABLED(CONFIG_NET_DSA)
+static inline int __dsa_netdevice_ops_check(struct net_device *dev)
+{
+       int err = -EOPNOTSUPP;
+
+       if (!dev->dsa_ptr)
+               return err;
+
+       if (!dev->dsa_ptr->netdev_ops)
+               return err;
+
+       return 0;
+}
+
+static inline int dsa_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr,
+                                  int cmd)
+{
+       const struct dsa_netdevice_ops *ops;
+       int err;
+
+       err = __dsa_netdevice_ops_check(dev);
+       if (err)
+               return err;
+
+       ops = dev->dsa_ptr->netdev_ops;
+
+       return ops->ndo_do_ioctl(dev, ifr, cmd);
+}
+#else
+static inline int dsa_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr,
+                                  int cmd)
+{
+       return -EOPNOTSUPP;
+}
+#endif
+
 void dsa_unregister_switch(struct dsa_switch *ds);
 int dsa_register_switch(struct dsa_switch *ds);
 struct dsa_switch *dsa_switch_find(int tree_index, int sw_index);
index 07adfac..852d8fb 100644 (file)
@@ -400,7 +400,15 @@ static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, co
 static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
                                                     struct sk_buff *skb)
 {
-       struct neighbour *n =  dst->ops->neigh_lookup(dst, skb, NULL);
+       struct neighbour *n = NULL;
+
+       /* The packets from tunnel devices (eg bareudp) may have only
+        * metadata in the dst pointer of skb. Hence a pointer check of
+        * neigh_lookup is needed.
+        */
+       if (dst->ops->neigh_lookup)
+               n = dst->ops->neigh_lookup(dst, skb, NULL);
+
        return IS_ERR(n) ? NULL : n;
 }
 
index a50fb77..929d3ca 100644 (file)
@@ -204,24 +204,6 @@ static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn)
        return container_of(fldn, struct flowi, u.dn);
 }
 
-typedef unsigned long flow_compare_t;
-
-static inline unsigned int flow_key_size(u16 family)
-{
-       switch (family) {
-       case AF_INET:
-               BUILD_BUG_ON(sizeof(struct flowi4) % sizeof(flow_compare_t));
-               return sizeof(struct flowi4) / sizeof(flow_compare_t);
-       case AF_INET6:
-               BUILD_BUG_ON(sizeof(struct flowi6) % sizeof(flow_compare_t));
-               return sizeof(struct flowi6) / sizeof(flow_compare_t);
-       case AF_DECnet:
-               BUILD_BUG_ON(sizeof(struct flowidn) % sizeof(flow_compare_t));
-               return sizeof(struct flowidn) / sizeof(flow_compare_t);
-       }
-       return 0;
-}
-
 __u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys);
 
 #endif
index a7eba43..cc10b10 100644 (file)
@@ -243,6 +243,14 @@ struct flow_dissector_key_ct {
        u32     ct_labels[4];
 };
 
+/**
+ * struct flow_dissector_key_hash:
+ * @hash: hash value
+ */
+struct flow_dissector_key_hash {
+       u32 hash;
+};
+
 enum flow_dissector_key_id {
        FLOW_DISSECTOR_KEY_CONTROL, /* struct flow_dissector_key_control */
        FLOW_DISSECTOR_KEY_BASIC, /* struct flow_dissector_key_basic */
@@ -271,6 +279,7 @@ enum flow_dissector_key_id {
        FLOW_DISSECTOR_KEY_ENC_OPTS, /* struct flow_dissector_key_enc_opts */
        FLOW_DISSECTOR_KEY_META, /* struct flow_dissector_key_meta */
        FLOW_DISSECTOR_KEY_CT, /* struct flow_dissector_key_ct */
+       FLOW_DISSECTOR_KEY_HASH, /* struct flow_dissector_key_hash */
 
        FLOW_DISSECTOR_KEY_MAX,
 };
@@ -372,7 +381,8 @@ flow_dissector_init_keys(struct flow_dissector_key_control *key_control,
 }
 
 #ifdef CONFIG_BPF_SYSCALL
-int flow_dissector_bpf_prog_attach(struct net *net, struct bpf_prog *prog);
+int flow_dissector_bpf_prog_attach_check(struct net *net,
+                                        struct bpf_prog *prog);
 #endif /* CONFIG_BPF_SYSCALL */
 
 #endif
index f2c8311..9f88a7b 100644 (file)
@@ -232,8 +232,10 @@ struct flow_action_entry {
                        bool                    truncate;
                } sample;
                struct {                                /* FLOW_ACTION_POLICE */
-                       s64                     burst;
+                       u32                     index;
+                       u32                     burst;
                        u64                     rate_bytes_ps;
+                       u32                     mtu;
                } police;
                struct {                                /* FLOW_ACTION_CT */
                        int action;
@@ -389,17 +391,20 @@ static inline bool flow_rule_match_key(const struct flow_rule *rule,
 struct flow_stats {
        u64     pkts;
        u64     bytes;
+       u64     drops;
        u64     lastused;
        enum flow_action_hw_stats used_hw_stats;
        bool used_hw_stats_valid;
 };
 
 static inline void flow_stats_update(struct flow_stats *flow_stats,
-                                    u64 bytes, u64 pkts, u64 lastused,
+                                    u64 bytes, u64 pkts,
+                                    u64 drops, u64 lastused,
                                     enum flow_action_hw_stats used_hw_stats)
 {
        flow_stats->pkts        += pkts;
        flow_stats->bytes       += bytes;
+       flow_stats->drops       += drops;
        flow_stats->lastused    = max_t(u64, flow_stats->lastused, lastused);
 
        /* The driver should pass value with a maximum of one bit set.
@@ -419,6 +424,8 @@ enum flow_block_binder_type {
        FLOW_BLOCK_BINDER_TYPE_UNSPEC,
        FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
        FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
+       FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
+       FLOW_BLOCK_BINDER_TYPE_RED_MARK,
 };
 
 struct flow_block {
@@ -437,6 +444,7 @@ struct flow_block_offload {
        struct list_head cb_list;
        struct list_head *driver_block_list;
        struct netlink_ext_ack *extack;
+       struct Qdisc *sch;
 };
 
 enum tc_setup_type;
@@ -448,8 +456,10 @@ struct flow_block_cb;
 struct flow_block_indr {
        struct list_head                list;
        struct net_device               *dev;
+       struct Qdisc                    *sch;
        enum flow_block_binder_type     binder_type;
        void                            *data;
+       void                            *cb_priv;
        void                            (*cleanup)(struct flow_block_cb *block_cb);
 };
 
@@ -467,6 +477,14 @@ struct flow_block_cb {
 struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
                                          void *cb_ident, void *cb_priv,
                                          void (*release)(void *cb_priv));
+struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
+                                              void *cb_ident, void *cb_priv,
+                                              void (*release)(void *cb_priv),
+                                              struct flow_block_offload *bo,
+                                              struct net_device *dev,
+                                              struct Qdisc *sch, void *data,
+                                              void *indr_cb_priv,
+                                              void (*cleanup)(struct flow_block_cb *block_cb));
 void flow_block_cb_free(struct flow_block_cb *block_cb);
 
 struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
@@ -488,6 +506,13 @@ static inline void flow_block_cb_remove(struct flow_block_cb *block_cb,
        list_move(&block_cb->list, &offload->cb_list);
 }
 
+static inline void flow_indr_block_cb_remove(struct flow_block_cb *block_cb,
+                                            struct flow_block_offload *offload)
+{
+       list_del(&block_cb->indr.list);
+       list_move(&block_cb->list, &offload->cb_list);
+}
+
 bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
                           struct list_head *driver_block_list);
 
@@ -531,13 +556,15 @@ static inline void flow_block_init(struct flow_block *flow_block)
        INIT_LIST_HEAD(&flow_block->cb_list);
 }
 
-typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
-                                     enum tc_setup_type type, void *type_data);
+typedef int flow_indr_block_bind_cb_t(struct net_device *dev, struct Qdisc *sch, void *cb_priv,
+                                     enum tc_setup_type type, void *type_data,
+                                     void *data,
+                                     void (*cleanup)(struct flow_block_cb *block_cb));
 
 int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv);
 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
-                             flow_setup_cb_t *setup_cb);
-int flow_indr_dev_setup_offload(struct net_device *dev,
+                             void (*release)(void *cb_priv));
+int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
                                enum tc_setup_type type, void *data,
                                struct flow_block_offload *bo,
                                void (*cleanup)(struct flow_block_cb *block_cb));
index 7495066..6e5f1e1 100644 (file)
@@ -35,13 +35,6 @@ struct genl_info;
  *     do additional, common, filtering and return an error
  * @post_doit: called after an operation's doit callback, it may
  *     undo operations done by pre_doit, for example release locks
- * @mcast_bind: a socket bound to the given multicast group (which
- *     is given as the offset into the groups array)
- * @mcast_unbind: a socket was unbound from the given multicast group.
- *     Note that unbind() will not be called symmetrically if the
- *     generic netlink family is removed while there are still open
- *     sockets.
- * @attrbuf: buffer to store parsed attributes (private)
  * @mcgrps: multicast groups used by this family
  * @n_mcgrps: number of multicast groups
  * @mcgrp_offset: starting number of multicast group IDs in this family
@@ -64,9 +57,6 @@ struct genl_family {
        void                    (*post_doit)(const struct genl_ops *ops,
                                             struct sk_buff *skb,
                                             struct genl_info *info);
-       int                     (*mcast_bind)(struct net *net, int group);
-       void                    (*mcast_unbind)(struct net *net, int group);
-       struct nlattr **        attrbuf;        /* private */
        const struct genl_ops * ops;
        const struct genl_multicast_group *mcgrps;
        unsigned int            n_ops;
index 3a6595b..e42402f 100644 (file)
@@ -21,7 +21,7 @@
  * |                                                               |
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  *
- * C bit indicates contol message when set, data message when unset.
+ * C bit indicates control message when set, data message when unset.
  * For a control message, proto/ctype is interpreted as a type of
  * control message. For data messages, proto/ctype is the IP protocol
  * of the next header.
index e5b388f..1e209ce 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/timer.h>
 #include <linux/poll.h>
 #include <linux/kernel.h>
+#include <linux/sockptr.h>
 
 #include <net/inet_sock.h>
 #include <net/request_sock.h>
@@ -45,17 +46,9 @@ struct inet_connection_sock_af_ops {
        u16         net_frag_header_len;
        u16         sockaddr_len;
        int         (*setsockopt)(struct sock *sk, int level, int optname,
-                                 char __user *optval, unsigned int optlen);
+                                 sockptr_t optval, unsigned int optlen);
        int         (*getsockopt)(struct sock *sk, int level, int optname,
                                  char __user *optval, int __user *optlen);
-#ifdef CONFIG_COMPAT
-       int         (*compat_setsockopt)(struct sock *sk,
-                               int level, int optname,
-                               char __user *optval, unsigned int optlen);
-       int         (*compat_getsockopt)(struct sock *sk,
-                               int level, int optname,
-                               char __user *optval, int __user *optlen);
-#endif
        void        (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
        void        (*mtu_reduced)(struct sock *sk);
 };
@@ -311,11 +304,6 @@ void inet_csk_listen_stop(struct sock *sk);
 
 void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
 
-int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
-                              char __user *optval, int __user *optlen);
-int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
-                              char __user *optval, unsigned int optlen);
-
 struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
 
 #define TCP_PINGPONG_THRESH    3
index 0f0d1ef..e1eaf17 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/ip.h>
 #include <linux/skbuff.h>
+#include <linux/if_vlan.h>
 
 #include <net/inet_sock.h>
 #include <net/dsfield.h>
@@ -172,7 +173,7 @@ static inline void ipv6_copy_dscp(unsigned int dscp, struct ipv6hdr *inner)
 
 static inline int INET_ECN_set_ce(struct sk_buff *skb)
 {
-       switch (skb->protocol) {
+       switch (skb_protocol(skb, true)) {
        case cpu_to_be16(ETH_P_IP):
                if (skb_network_header(skb) + sizeof(struct iphdr) <=
                    skb_tail_pointer(skb))
@@ -191,7 +192,7 @@ static inline int INET_ECN_set_ce(struct sk_buff *skb)
 
 static inline int INET_ECN_set_ect1(struct sk_buff *skb)
 {
-       switch (skb->protocol) {
+       switch (skb_protocol(skb, true)) {
        case cpu_to_be16(ETH_P_IP):
                if (skb_network_header(skb) + sizeof(struct iphdr) <=
                    skb_tail_pointer(skb))
@@ -272,12 +273,16 @@ static inline int IP_ECN_decapsulate(const struct iphdr *oiph,
 {
        __u8 inner;
 
-       if (skb->protocol == htons(ETH_P_IP))
+       switch (skb_protocol(skb, true)) {
+       case htons(ETH_P_IP):
                inner = ip_hdr(skb)->tos;
-       else if (skb->protocol == htons(ETH_P_IPV6))
+               break;
+       case htons(ETH_P_IPV6):
                inner = ipv6_get_dsfield(ipv6_hdr(skb));
-       else
+               break;
+       default:
                return 0;
+       }
 
        return INET_ECN_decapsulate(skb, oiph->tos, inner);
 }
@@ -287,12 +292,16 @@ static inline int IP6_ECN_decapsulate(const struct ipv6hdr *oipv6h,
 {
        __u8 inner;
 
-       if (skb->protocol == htons(ETH_P_IP))
+       switch (skb_protocol(skb, true)) {
+       case htons(ETH_P_IP):
                inner = ip_hdr(skb)->tos;
-       else if (skb->protocol == htons(ETH_P_IPV6))
+               break;
+       case htons(ETH_P_IPV6):
                inner = ipv6_get_dsfield(ipv6_hdr(skb));
-       else
+               break;
+       default:
                return 0;
+       }
 
        return INET_ECN_decapsulate(skb, ipv6_get_dsfield(oipv6h), inner);
 }
index a7ce00a..a3702d1 100644 (file)
@@ -225,6 +225,7 @@ struct inet_sock {
                                mc_all:1,
                                nodefrag:1;
        __u8                    bind_address_no_port:1,
+                               recverr_rfc4884:1,
                                defer_connect:1; /* Indicates that fastopen_connect is set
                                                  * and cookie exists so we defer connect
                                                  * until first data frame is written
index 04ebe7b..b09c48d 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/in.h>
 #include <linux/skbuff.h>
 #include <linux/jhash.h>
+#include <linux/sockptr.h>
 
 #include <net/inet_sock.h>
 #include <net/route.h>
@@ -231,11 +232,7 @@ struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
                            struct ipcm_cookie *ipc, struct rtable **rtp,
                            struct inet_cork *cork, unsigned int flags);
 
-static inline int ip_queue_xmit(struct sock *sk, struct sk_buff *skb,
-                               struct flowi *fl)
-{
-       return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
-}
+int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
 
 static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
 {
@@ -711,9 +708,7 @@ int __ip_options_compile(struct net *net, struct ip_options *opt,
 int ip_options_compile(struct net *net, struct ip_options *opt,
                       struct sk_buff *skb);
 int ip_options_get(struct net *net, struct ip_options_rcu **optp,
-                  unsigned char *data, int optlen);
-int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
-                            unsigned char __user *data, int optlen);
+                  sockptr_t data, int optlen);
 void ip_options_undo(struct ip_options *opt);
 void ip_forward_options(struct sk_buff *skb);
 int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
@@ -727,14 +722,10 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
                         struct sk_buff *skb, int tlen, int offset);
 int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
                 struct ipcm_cookie *ipc, bool allow_ipv6);
-int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
+int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
                  unsigned int optlen);
 int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
                  int __user *optlen);
-int compat_ip_setsockopt(struct sock *sk, int level, int optname,
-                        char __user *optval, unsigned int optlen);
-int compat_ip_getsockopt(struct sock *sk, int level, int optname,
-                        char __user *optval, int __user *optlen);
 int ip_ra_control(struct sock *sk, unsigned char on,
                  void (*destructor)(struct sock *));
 
index 27ec612..b3f4eaa 100644 (file)
@@ -85,15 +85,6 @@ static inline void tcp_v6_gso_csum_prep(struct sk_buff *skb)
        th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
 }
 
-#if IS_ENABLED(CONFIG_IPV6)
-static inline void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
-{
-       struct ipv6_pinfo *np = inet6_sk(sk);
-
-       __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr);
-}
-#endif
-
 static inline __sum16 udp_v6_check(int len,
                                   const struct in6_addr *saddr,
                                   const struct in6_addr *daddr,
index 3f615a2..ac5ff3c 100644 (file)
@@ -19,6 +19,7 @@
 #include <net/netlink.h>
 #include <net/inetpeer.h>
 #include <net/fib_notifier.h>
+#include <linux/indirect_call_wrapper.h>
 
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
 #define FIB6_TABLE_HASHSZ 256
@@ -165,7 +166,7 @@ struct fib6_info {
        struct fib6_node __rcu          *fib6_node;
 
        /* Multipath routes:
-        * siblings is a list of fib6_info that have the the same metric/weight,
+        * siblings is a list of fib6_info that have the same metric/weight,
         * destination, but not the same gateway. nsiblings is just a cache
         * to speed up lookup.
         */
@@ -552,6 +553,41 @@ struct bpf_iter__ipv6_route {
 };
 #endif
 
+INDIRECT_CALLABLE_DECLARE(struct rt6_info *ip6_pol_route_output(struct net *net,
+                                            struct fib6_table *table,
+                                            struct flowi6 *fl6,
+                                            const struct sk_buff *skb,
+                                            int flags));
+INDIRECT_CALLABLE_DECLARE(struct rt6_info *ip6_pol_route_input(struct net *net,
+                                            struct fib6_table *table,
+                                            struct flowi6 *fl6,
+                                            const struct sk_buff *skb,
+                                            int flags));
+INDIRECT_CALLABLE_DECLARE(struct rt6_info *__ip6_route_redirect(struct net *net,
+                                            struct fib6_table *table,
+                                            struct flowi6 *fl6,
+                                            const struct sk_buff *skb,
+                                            int flags));
+INDIRECT_CALLABLE_DECLARE(struct rt6_info *ip6_pol_route_lookup(struct net *net,
+                                            struct fib6_table *table,
+                                            struct flowi6 *fl6,
+                                            const struct sk_buff *skb,
+                                            int flags));
+static inline struct rt6_info *pol_lookup_func(pol_lookup_t lookup,
+                                               struct net *net,
+                                               struct fib6_table *table,
+                                               struct flowi6 *fl6,
+                                               const struct sk_buff *skb,
+                                               int flags)
+{
+       return INDIRECT_CALL_4(lookup,
+                              ip6_pol_route_output,
+                              ip6_pol_route_input,
+                              ip6_pol_route_lookup,
+                              __ip6_route_redirect,
+                              net, table, fl6, skb, flags);
+}
+
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
 static inline bool fib6_has_custom_rules(const struct net *net)
 {
index 076e5d7..36025de 100644 (file)
@@ -290,6 +290,9 @@ int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
                      struct ip_tunnel_parm *p, __u32 fwmark);
 void ip_tunnel_setup(struct net_device *dev, unsigned int net_id);
 
+extern const struct header_ops ip_tunnel_header_ops;
+__be16 ip_tunnel_parse_protocol(const struct sk_buff *skb);
+
 struct ip_tunnel_encap_ops {
        size_t (*encap_hlen)(struct ip_tunnel_encap *e);
        int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e,
index 83be2d9..011f407 100644 (file)
@@ -874,6 +874,7 @@ struct netns_ipvs {
        struct ip_vs_stats              tot_stats;  /* Statistics & est. */
 
        int                     num_services;    /* no of virtual services */
+       int                     num_services6;   /* IPv6 virtual services */
 
        /* Trash for destinations */
        struct list_head        dest_trash;
@@ -960,6 +961,7 @@ struct netns_ipvs {
         * are not supported when synchronization is enabled.
         */
        unsigned int            mixed_address_family_dests;
+       unsigned int            hooks_afmask;   /* &1=AF_INET, &2=AF_INET6 */
 };
 
 #define DEFAULT_SYNC_THRESHOLD 3
@@ -1624,18 +1626,16 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
 }
 #endif /* CONFIG_IP_VS_NFCT */
 
-/* Really using conntrack? */
-static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp,
-                                            struct sk_buff *skb)
+/* Using old conntrack that can not be redirected to another real server? */
+static inline bool ip_vs_conn_uses_old_conntrack(struct ip_vs_conn *cp,
+                                                struct sk_buff *skb)
 {
 #ifdef CONFIG_IP_VS_NFCT
        enum ip_conntrack_info ctinfo;
        struct nf_conn *ct;
 
-       if (!(cp->flags & IP_VS_CONN_F_NFCT))
-               return false;
        ct = nf_ct_get(skb, &ctinfo);
-       if (ct)
+       if (ct && nf_ct_is_confirmed(ct))
                return true;
 #endif
        return false;
@@ -1670,6 +1670,9 @@ static inline void ip_vs_unregister_conntrack(struct ip_vs_service *svc)
 #endif
 }
 
+int ip_vs_register_hooks(struct netns_ipvs *ipvs, unsigned int af);
+void ip_vs_unregister_hooks(struct netns_ipvs *ipvs, unsigned int af);
+
 static inline int
 ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
 {
index 5e65bf2..bd1f396 100644 (file)
@@ -406,7 +406,7 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
                                         struct ip6_flowlabel *fl,
                                         struct ipv6_txoptions *fopt);
 void fl6_free_socklist(struct sock *sk);
-int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen);
+int ipv6_flowlabel_opt(struct sock *sk, sockptr_t optval, int optlen);
 int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
                           int flags);
 int ip6_flowlabel_init(void);
@@ -1084,14 +1084,10 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
  *     socket options (ipv6_sockglue.c)
  */
 
-int ipv6_setsockopt(struct sock *sk, int level, int optname,
-                   char __user *optval, unsigned int optlen);
+int ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+                   unsigned int optlen);
 int ipv6_getsockopt(struct sock *sk, int level, int optname,
                    char __user *optval, int __user *optlen);
-int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
-                          char __user *optval, unsigned int optlen);
-int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
-                          char __user *optval, int __user *optlen);
 
 int __ip6_datagram_connect(struct sock *sk, struct sockaddr *addr,
                           int addr_len);
index e942372..031c661 100644 (file)
 #include <net/dst.h>
 #include <net/fib_rules.h>
 
+enum l3mdev_type {
+       L3MDEV_TYPE_UNSPEC,
+       L3MDEV_TYPE_VRF,
+       __L3MDEV_TYPE_MAX
+};
+
+#define L3MDEV_TYPE_MAX (__L3MDEV_TYPE_MAX - 1)
+
+typedef int (*lookup_by_table_id_t)(struct net *net, u32 table_d);
+
 /**
  * struct l3mdev_ops - l3mdev operations
  *
@@ -37,6 +47,15 @@ struct l3mdev_ops {
 
 #ifdef CONFIG_NET_L3_MASTER_DEV
 
+int l3mdev_table_lookup_register(enum l3mdev_type l3type,
+                                lookup_by_table_id_t fn);
+
+void l3mdev_table_lookup_unregister(enum l3mdev_type l3type,
+                                   lookup_by_table_id_t fn);
+
+int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type, struct net *net,
+                                     u32 table_id);
+
 int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
                          struct fib_lookup_arg *arg);
 
@@ -281,6 +300,26 @@ struct sk_buff *l3mdev_ip6_out(struct sock *sk, struct sk_buff *skb)
 }
 
 static inline
+int l3mdev_table_lookup_register(enum l3mdev_type l3type,
+                                lookup_by_table_id_t fn)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline
+void l3mdev_table_lookup_unregister(enum l3mdev_type l3type,
+                                   lookup_by_table_id_t fn)
+{
+}
+
+static inline
+int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type, struct net *net,
+                                     u32 table_id)
+{
+       return -ENODEV;
+}
+
+static inline
 int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
                          struct fib_lookup_arg *arg)
 {
index 46d0487..02158c2 100644 (file)
@@ -164,10 +164,6 @@ static inline bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
        return false;
 }
 
-static inline void mptcp_rcv_synsent(struct sock *sk)
-{
-}
-
 static inline bool mptcp_synack_options(const struct request_sock *req,
                                        unsigned int *size,
                                        struct mptcp_out_options *opts)
index d7338bf..16e8b2f 100644 (file)
@@ -161,10 +161,51 @@ struct nf_flow_route {
 struct flow_offload *flow_offload_alloc(struct nf_conn *ct);
 void flow_offload_free(struct flow_offload *flow);
 
-int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
-                                flow_setup_cb_t *cb, void *cb_priv);
-void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
-                                 flow_setup_cb_t *cb, void *cb_priv);
+static inline int
+nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
+                            flow_setup_cb_t *cb, void *cb_priv)
+{
+       struct flow_block *block = &flow_table->flow_block;
+       struct flow_block_cb *block_cb;
+       int err = 0;
+
+       down_write(&flow_table->flow_block_lock);
+       block_cb = flow_block_cb_lookup(block, cb, cb_priv);
+       if (block_cb) {
+               err = -EEXIST;
+               goto unlock;
+       }
+
+       block_cb = flow_block_cb_alloc(cb, cb_priv, cb_priv, NULL);
+       if (IS_ERR(block_cb)) {
+               err = PTR_ERR(block_cb);
+               goto unlock;
+       }
+
+       list_add_tail(&block_cb->list, &block->cb_list);
+
+unlock:
+       up_write(&flow_table->flow_block_lock);
+       return err;
+}
+
+static inline void
+nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
+                            flow_setup_cb_t *cb, void *cb_priv)
+{
+       struct flow_block *block = &flow_table->flow_block;
+       struct flow_block_cb *block_cb;
+
+       down_write(&flow_table->flow_block_lock);
+       block_cb = flow_block_cb_lookup(block, cb, cb_priv);
+       if (block_cb) {
+               list_del(&block_cb->list);
+               flow_block_cb_free(block_cb);
+       } else {
+               WARN_ON(true);
+       }
+       up_write(&flow_table->flow_block_lock);
+}
 
 int flow_offload_route_init(struct flow_offload *flow,
                            const struct nf_flow_route *route);
index 6f0f6fc..822c267 100644 (file)
@@ -899,6 +899,8 @@ static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule)
        return (void *)&rule->data[rule->dlen];
 }
 
+void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *rule);
+
 static inline void nft_set_elem_update_expr(const struct nft_set_ext *ext,
                                            struct nft_regs *regs,
                                            const struct nft_pktinfo *pkt)
@@ -921,11 +923,6 @@ static inline void nft_set_elem_update_expr(const struct nft_set_ext *ext,
             (expr) != (last); \
             (expr) = nft_expr_next(expr))
 
-enum nft_chain_flags {
-       NFT_BASE_CHAIN                  = 0x1,
-       NFT_CHAIN_HW_OFFLOAD            = 0x2,
-};
-
 #define NFT_CHAIN_POLICY_UNSET         U8_MAX
 
 /**
@@ -949,7 +946,8 @@ struct nft_chain {
        struct nft_table                *table;
        u64                             handle;
        u32                             use;
-       u8                              flags:6,
+       u8                              flags:5,
+                                       bound:1,
                                        genmask:2;
        char                            *name;
 
@@ -994,6 +992,14 @@ int nft_chain_validate_dependency(const struct nft_chain *chain,
 int nft_chain_validate_hooks(const struct nft_chain *chain,
                              unsigned int hook_flags);
 
+static inline bool nft_chain_is_bound(struct nft_chain *chain)
+{
+       return (chain->flags & NFT_CHAIN_BINDING) && chain->bound;
+}
+
+void nft_chain_del(struct nft_chain *chain);
+void nf_tables_chain_destroy(struct nft_ctx *ctx);
+
 struct nft_stats {
        u64                     bytes;
        u64                     pkts;
@@ -1036,7 +1042,7 @@ static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chai
 
 static inline bool nft_is_base_chain(const struct nft_chain *chain)
 {
-       return chain->flags & NFT_BASE_CHAIN;
+       return chain->flags & NFT_CHAIN_BASE;
 }
 
 int __nft_release_basechain(struct nft_ctx *ctx);
@@ -1433,6 +1439,7 @@ struct nft_trans_chain {
        char                            *name;
        struct nft_stats __percpu       *stats;
        u8                              policy;
+       u32                             chain_id;
 };
 
 #define nft_trans_chain_update(trans)  \
@@ -1443,6 +1450,8 @@ struct nft_trans_chain {
        (((struct nft_trans_chain *)trans->data)->stats)
 #define nft_trans_chain_policy(trans)  \
        (((struct nft_trans_chain *)trans->data)->policy)
+#define nft_trans_chain_id(trans)      \
+       (((struct nft_trans_chain *)trans->data)->chain_id)
 
 struct nft_trans_table {
        bool                            update;
index a8dce2a..0ca6a1b 100644 (file)
@@ -9,10 +9,13 @@
 #include <linux/bpf-netns.h>
 
 struct bpf_prog;
+struct bpf_prog_array;
 
 struct netns_bpf {
-       struct bpf_prog __rcu *progs[MAX_NETNS_BPF_ATTACH_TYPE];
-       struct bpf_link *links[MAX_NETNS_BPF_ATTACH_TYPE];
+       /* Array of programs to run compiled from progs or links */
+       struct bpf_prog_array __rcu *run_array[MAX_NETNS_BPF_ATTACH_TYPE];
+       struct bpf_prog *progs[MAX_NETNS_BPF_ATTACH_TYPE];
+       struct list_head links[MAX_NETNS_BPF_ATTACH_TYPE];
 };
 
 #endif /* __NETNS_BPF_H__ */
index ed65619..d4d4612 100644 (file)
@@ -32,6 +32,12 @@ struct tcf_block_ext_info {
        u32 block_index;
 };
 
+struct tcf_qevent {
+       struct tcf_block        *block;
+       struct tcf_block_ext_info info;
+       struct tcf_proto __rcu *filter_chain;
+};
+
 struct tcf_block_cb;
 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
 
@@ -262,7 +268,7 @@ static inline void tcf_exts_put_net(struct tcf_exts *exts)
 
 static inline void
 tcf_exts_stats_update(const struct tcf_exts *exts,
-                     u64 bytes, u64 packets, u64 lastuse,
+                     u64 bytes, u64 packets, u64 drops, u64 lastuse,
                      u8 used_hw_stats, bool used_hw_stats_valid)
 {
 #ifdef CONFIG_NET_CLS_ACT
@@ -273,7 +279,8 @@ tcf_exts_stats_update(const struct tcf_exts *exts,
        for (i = 0; i < exts->nr_actions; i++) {
                struct tc_action *a = exts->actions[i];
 
-               tcf_action_stats_update(a, bytes, packets, lastuse, true);
+               tcf_action_stats_update(a, bytes, packets, drops,
+                                       lastuse, true);
                a->used_hw_stats = used_hw_stats;
                a->used_hw_stats_valid = used_hw_stats_valid;
        }
@@ -552,6 +559,49 @@ int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
                          void *cb_priv, u32 *flags, unsigned int *in_hw_count);
 unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
 
+#ifdef CONFIG_NET_CLS_ACT
+int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
+                   enum flow_block_binder_type binder_type,
+                   struct nlattr *block_index_attr,
+                   struct netlink_ext_ack *extack);
+void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch);
+int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
+                              struct netlink_ext_ack *extack);
+struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
+                                 struct sk_buff **to_free, int *ret);
+int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe);
+#else
+static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
+                                 enum flow_block_binder_type binder_type,
+                                 struct nlattr *block_index_attr,
+                                 struct netlink_ext_ack *extack)
+{
+       return 0;
+}
+
+static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
+{
+}
+
+static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
+                                            struct netlink_ext_ack *extack)
+{
+       return 0;
+}
+
+static inline struct sk_buff *
+tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
+                 struct sk_buff **to_free, int *ret)
+{
+       return skb;
+}
+
+static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
+{
+       return 0;
+}
+#endif
+
 struct tc_cls_u32_knode {
        struct tcf_exts *exts;
        struct tcf_result *res;
index 9092e69..ac8c890 100644 (file)
@@ -136,17 +136,6 @@ static inline void qdisc_run(struct Qdisc *q)
        }
 }
 
-static inline __be16 tc_skb_protocol(const struct sk_buff *skb)
-{
-       /* We need to take extra care in case the skb came via
-        * vlan accelerated path. In that case, use skb->vlan_proto
-        * as the original vlan header was already stripped.
-        */
-       if (skb_vlan_tag_present(skb))
-               return skb->vlan_proto;
-       return skb->protocol;
-}
-
 /* Calculate maximal size of packet seen by hard_start_xmit
    routine of this device.
  */
index dceff60..308ef0a 100644 (file)
@@ -26,12 +26,6 @@ static inline void rpl_exit(void) {}
 /* Worst decompression memory usage ipv6 address (16) + pad 7 */
 #define IPV6_RPL_SRH_WORST_SWAP_SIZE (sizeof(struct in6_addr) + 7)
 
-static inline size_t ipv6_rpl_srh_alloc_size(unsigned char n)
-{
-       return sizeof(struct ipv6_rpl_sr_hdr) +
-               ((n + 1) * sizeof(struct in6_addr));
-}
-
 size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri,
                         unsigned char cmpre);
 
index 15b4d9a..122d9e2 100644 (file)
@@ -353,11 +353,13 @@ enum {
         ipv4_is_anycast_6to4(a))
 
 /* Flags used for the bind address copy functions.  */
-#define SCTP_ADDR6_ALLOWED     0x00000001      /* IPv6 address is allowed by
+#define SCTP_ADDR4_ALLOWED     0x00000001      /* IPv4 address is allowed by
                                                   local sock family */
-#define SCTP_ADDR4_PEERSUPP    0x00000002      /* IPv4 address is supported by
+#define SCTP_ADDR6_ALLOWED     0x00000002      /* IPv6 address is allowed by
+                                                  local sock family */
+#define SCTP_ADDR4_PEERSUPP    0x00000004      /* IPv4 address is supported by
                                                   peer */
-#define SCTP_ADDR6_PEERSUPP    0x00000004      /* IPv6 address is supported by
+#define SCTP_ADDR6_PEERSUPP    0x00000008      /* IPv6 address is supported by
                                                   peer */
 
 /* Reasons to retransmit. */
index f8bcb75..4fc747b 100644 (file)
@@ -291,7 +291,7 @@ atomic_dec(&sctp_dbg_objcnt_## name)
 #define SCTP_DBG_OBJCNT(name) \
 atomic_t sctp_dbg_objcnt_## name = ATOMIC_INIT(0)
 
-/* Macro to help create new entries in in the global array of
+/* Macro to help create new entries in the global array of
  * objcnt counters.
  */
 #define SCTP_DBG_OBJCNT_ENTRY(name) \
@@ -412,7 +412,7 @@ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
 /* Tests if the list has one and only one entry. */
 static inline int sctp_list_single_entry(struct list_head *head)
 {
-       return (head->next != head) && (head->next == head->prev);
+       return list_is_singular(head);
 }
 
 static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk)
index fb42c90..b33f1ae 100644 (file)
@@ -431,23 +431,13 @@ struct sctp_af {
        int             (*setsockopt)   (struct sock *sk,
                                         int level,
                                         int optname,
-                                        char __user *optval,
+                                        sockptr_t optval,
                                         unsigned int optlen);
        int             (*getsockopt)   (struct sock *sk,
                                         int level,
                                         int optname,
                                         char __user *optval,
                                         int __user *optlen);
-       int             (*compat_setsockopt)    (struct sock *sk,
-                                        int level,
-                                        int optname,
-                                        char __user *optval,
-                                        unsigned int optlen);
-       int             (*compat_getsockopt)    (struct sock *sk,
-                                        int level,
-                                        int optname,
-                                        char __user *optval,
-                                        int __user *optlen);
        void            (*get_dst)      (struct sctp_transport *t,
                                         union sctp_addr *saddr,
                                         struct flowi *fl,
@@ -1398,7 +1388,7 @@ struct sctp_stream_priorities {
        struct list_head prio_sched;
        /* List of streams scheduled */
        struct list_head active;
-       /* The next stream stream in line */
+       /* The next stream in line */
        struct sctp_stream_out_ext *next;
        __u16 prio;
 };
@@ -1460,7 +1450,7 @@ struct sctp_stream {
                struct {
                        /* List of streams scheduled */
                        struct list_head rr_list;
-                       /* The next stream stream in line */
+                       /* The next stream in line */
                        struct sctp_stream_out_ext *rr_next;
                };
        };
@@ -1770,7 +1760,7 @@ struct sctp_association {
        int max_burst;
 
        /* This is the max_retrans value for the association.  This value will
-        * be initialized initialized from system defaults, but can be
+        * be initialized from system defaults, but can be
         * modified by the SCTP_ASSOCINFO socket option.
         */
        int max_retrans;
index c53cc42..2cc3ba6 100644 (file)
@@ -59,6 +59,7 @@
 #include <linux/filter.h>
 #include <linux/rculist_nulls.h>
 #include <linux/poll.h>
+#include <linux/sockptr.h>
 
 #include <linux/atomic.h>
 #include <linux/refcount.h>
@@ -533,7 +534,8 @@ enum sk_pacing {
  * be copied.
  */
 #define SK_USER_DATA_NOCOPY    1UL
-#define SK_USER_DATA_PTRMASK   ~(SK_USER_DATA_NOCOPY)
+#define SK_USER_DATA_BPF       2UL     /* Managed by BPF */
+#define SK_USER_DATA_PTRMASK   ~(SK_USER_DATA_NOCOPY | SK_USER_DATA_BPF)
 
 /**
  * sk_user_data_is_nocopy - Test if sk_user_data pointer must not be copied
@@ -879,6 +881,15 @@ static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
        __clear_bit(flag, &sk->sk_flags);
 }
 
+static inline void sock_valbool_flag(struct sock *sk, enum sock_flags bit,
+                                    int valbool)
+{
+       if (valbool)
+               sock_set_flag(sk, bit);
+       else
+               sock_reset_flag(sk, bit);
+}
+
 static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
 {
        return test_bit(flag, &sk->sk_flags);
@@ -1130,21 +1141,13 @@ struct proto {
        void                    (*destroy)(struct sock *sk);
        void                    (*shutdown)(struct sock *sk, int how);
        int                     (*setsockopt)(struct sock *sk, int level,
-                                       int optname, char __user *optval,
+                                       int optname, sockptr_t optval,
                                        unsigned int optlen);
        int                     (*getsockopt)(struct sock *sk, int level,
                                        int optname, char __user *optval,
                                        int __user *option);
        void                    (*keepalive)(struct sock *sk, int valbool);
 #ifdef CONFIG_COMPAT
-       int                     (*compat_setsockopt)(struct sock *sk,
-                                       int level,
-                                       int optname, char __user *optval,
-                                       unsigned int optlen);
-       int                     (*compat_getsockopt)(struct sock *sk,
-                                       int level,
-                                       int optname, char __user *optval,
-                                       int __user *option);
        int                     (*compat_ioctl)(struct sock *sk,
                                        unsigned int cmd, unsigned long arg);
 #endif
@@ -1667,7 +1670,7 @@ void sock_pfree(struct sk_buff *skb);
 #endif
 
 int sock_setsockopt(struct socket *sock, int level, int op,
-                   char __user *optval, unsigned int optlen);
+                   sockptr_t optval, unsigned int optlen);
 
 int sock_getsockopt(struct socket *sock, int level, int op,
                    char __user *optval, int __user *optlen);
@@ -1712,8 +1715,6 @@ int sock_no_getname(struct socket *, struct sockaddr *, int);
 int sock_no_ioctl(struct socket *, unsigned int, unsigned long);
 int sock_no_listen(struct socket *, int);
 int sock_no_shutdown(struct socket *, int);
-int sock_no_getsockopt(struct socket *, int , int, char __user *, int __user *);
-int sock_no_setsockopt(struct socket *, int, int, char __user *, unsigned int);
 int sock_no_sendmsg(struct socket *, struct msghdr *, size_t);
 int sock_no_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t len);
 int sock_no_recvmsg(struct socket *, struct msghdr *, size_t, int);
@@ -1733,11 +1734,7 @@ int sock_common_getsockopt(struct socket *sock, int level, int optname,
 int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
                        int flags);
 int sock_common_setsockopt(struct socket *sock, int level, int optname,
-                                 char __user *optval, unsigned int optlen);
-int compat_sock_common_getsockopt(struct socket *sock, int level,
-               int optname, char __user *optval, int __user *optlen);
-int compat_sock_common_setsockopt(struct socket *sock, int level,
-               int optname, char __user *optval, unsigned int optlen);
+                          sockptr_t optval, unsigned int optlen);
 
 void sk_common_release(struct sock *sk);
 
@@ -1848,7 +1845,6 @@ static inline int sk_rx_queue_get(const struct sock *sk)
 
 static inline void sk_set_socket(struct sock *sk, struct socket *sock)
 {
-       sk_tx_queue_clear(sk);
        sk->sk_socket = sock;
 }
 
index b8c059b..ff22469 100644 (file)
@@ -76,6 +76,10 @@ enum switchdev_obj_id {
        SWITCHDEV_OBJ_ID_RING_TEST_MRP,
        SWITCHDEV_OBJ_ID_RING_ROLE_MRP,
        SWITCHDEV_OBJ_ID_RING_STATE_MRP,
+       SWITCHDEV_OBJ_ID_IN_TEST_MRP,
+       SWITCHDEV_OBJ_ID_IN_ROLE_MRP,
+       SWITCHDEV_OBJ_ID_IN_STATE_MRP,
+
 #endif
 };
 
@@ -155,6 +159,40 @@ struct switchdev_obj_ring_state_mrp {
 #define SWITCHDEV_OBJ_RING_STATE_MRP(OBJ) \
        container_of((OBJ), struct switchdev_obj_ring_state_mrp, obj)
 
+/* SWITCHDEV_OBJ_ID_IN_TEST_MRP */
+struct switchdev_obj_in_test_mrp {
+       struct switchdev_obj obj;
+       /* The value is in us and a value of 0 represents to stop */
+       u32 interval;
+       u32 in_id;
+       u32 period;
+       u8 max_miss;
+};
+
+#define SWITCHDEV_OBJ_IN_TEST_MRP(OBJ) \
+       container_of((OBJ), struct switchdev_obj_in_test_mrp, obj)
+
+/* SWICHDEV_OBJ_ID_IN_ROLE_MRP */
+struct switchdev_obj_in_role_mrp {
+       struct switchdev_obj obj;
+       struct net_device *i_port;
+       u32 ring_id;
+       u16 in_id;
+       u8 in_role;
+};
+
+#define SWITCHDEV_OBJ_IN_ROLE_MRP(OBJ) \
+       container_of((OBJ), struct switchdev_obj_in_role_mrp, obj)
+
+struct switchdev_obj_in_state_mrp {
+       struct switchdev_obj obj;
+       u32 in_id;
+       u8 in_state;
+};
+
+#define SWITCHDEV_OBJ_IN_STATE_MRP(OBJ) \
+       container_of((OBJ), struct switchdev_obj_in_state_mrp, obj)
+
 #endif
 
 typedef int switchdev_obj_dump_cb_t(struct switchdev_obj *obj);
index 79654bc..8250d6f 100644 (file)
@@ -66,7 +66,16 @@ static inline struct nf_flowtable *tcf_ct_ft(const struct tc_action *a)
 #endif /* CONFIG_NF_CONNTRACK */
 
 #if IS_ENABLED(CONFIG_NET_ACT_CT)
-void tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie);
+static inline void
+tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie)
+{
+       enum ip_conntrack_info ctinfo = cookie & NFCT_INFOMASK;
+       struct nf_conn *ct;
+
+       ct = (struct nf_conn *)(cookie & NFCT_PTRMASK);
+       nf_conntrack_get(&ct->ct_general);
+       nf_ct_set(skb, ct, ctinfo);
+}
 #else
 static inline void
 tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie) { }
index f098ad4..6d1e26b 100644 (file)
@@ -59,14 +59,52 @@ static inline u64 tcf_police_rate_bytes_ps(const struct tc_action *act)
        return params->rate.rate_bytes_ps;
 }
 
-static inline s64 tcf_police_tcfp_burst(const struct tc_action *act)
+static inline u32 tcf_police_burst(const struct tc_action *act)
 {
        struct tcf_police *police = to_police(act);
        struct tcf_police_params *params;
+       u32 burst;
 
        params = rcu_dereference_protected(police->params,
                                           lockdep_is_held(&police->tcf_lock));
-       return params->tcfp_burst;
+
+       /*
+        *  "rate" bytes   "burst" nanoseconds
+        *  ------------ * -------------------
+        *    1 second          2^6 ticks
+        *
+        * ------------------------------------
+        *        NSEC_PER_SEC nanoseconds
+        *        ------------------------
+        *              2^6 ticks
+        *
+        *    "rate" bytes   "burst" nanoseconds            2^6 ticks
+        *  = ------------ * ------------------- * ------------------------
+        *      1 second          2^6 ticks        NSEC_PER_SEC nanoseconds
+        *
+        *   "rate" * "burst"
+        * = ---------------- bytes/nanosecond
+        *    NSEC_PER_SEC^2
+        *
+        *
+        *   "rate" * "burst"
+        * = ---------------- bytes/second
+        *     NSEC_PER_SEC
+        */
+       burst = div_u64(params->tcfp_burst * params->rate.rate_bytes_ps,
+                       NSEC_PER_SEC);
+
+       return burst;
+}
+
+static inline u32 tcf_police_tcfp_mtu(const struct tc_action *act)
+{
+       struct tcf_police *police = to_police(act);
+       struct tcf_police_params *params;
+
+       params = rcu_dereference_protected(police->params,
+                                          lockdep_is_held(&police->tcf_lock));
+       return params->tcfp_mtu;
 }
 
 #endif /* __NET_TC_POLICE_H */
index 4de9485..e0c35d5 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/skbuff.h>
 #include <linux/kref.h>
 #include <linux/ktime.h>
+#include <linux/indirect_call_wrapper.h>
 
 #include <net/inet_connection_sock.h>
 #include <net/inet_timewait_sock.h>
@@ -398,12 +399,8 @@ __poll_t tcp_poll(struct file *file, struct socket *sock,
                      struct poll_table_struct *wait);
 int tcp_getsockopt(struct sock *sk, int level, int optname,
                   char __user *optval, int __user *optlen);
-int tcp_setsockopt(struct sock *sk, int level, int optname,
-                  char __user *optval, unsigned int optlen);
-int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
-                         char __user *optval, int __user *optlen);
-int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
-                         char __user *optval, unsigned int optlen);
+int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+                  unsigned int optlen);
 void tcp_set_keepalive(struct sock *sk, int val);
 void tcp_syn_ack_timeout(const struct request_sock *req);
 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
@@ -906,6 +903,8 @@ static inline void tcp_skb_bpf_redirect_clear(struct sk_buff *skb)
        TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
 }
 
+extern const struct inet_connection_sock_af_ops ipv4_specific;
+
 #if IS_ENABLED(CONFIG_IPV6)
 /* This is the variant of inet6_iif() that must be used by TCP,
  * as TCP moves IP6CB into a different location in skb->cb[]
@@ -931,6 +930,13 @@ static inline int tcp_v6_sdif(const struct sk_buff *skb)
 #endif
        return 0;
 }
+
+extern const struct inet_connection_sock_af_ops ipv6_specific;
+
+INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb));
+INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb));
+INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *skb));
+
 #endif
 
 static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
@@ -1935,6 +1941,7 @@ struct tcp_iter_state {
        struct seq_net_private  p;
        enum tcp_seq_states     state;
        struct sock             *syn_wait_sk;
+       struct tcp_seq_afinfo   *bpf_seq_afinfo;
        int                     bucket, offset, sbucket, num;
        loff_t                  last_pos;
 };
@@ -1947,6 +1954,10 @@ void tcp_v4_destroy_sock(struct sock *sk);
 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
                                netdev_features_t features);
 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
+INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *skb, int thoff));
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb));
+INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *skb, int thoff));
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb));
 int tcp_gro_complete(struct sk_buff *skb);
 
 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
@@ -1991,7 +2002,7 @@ struct tcp_sock_af_ops {
                                         const struct sk_buff *skb);
        int             (*md5_parse)(struct sock *sk,
                                     int optname,
-                                    char __user *optval,
+                                    sockptr_t optval,
                                     int optlen);
 #endif
 };
index 3212d3c..e5dac7e 100644 (file)
@@ -291,11 +291,19 @@ struct tlsdev_ops {
 enum tls_offload_sync_type {
        TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ = 0,
        TLS_OFFLOAD_SYNC_TYPE_CORE_NEXT_HINT = 1,
+       TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC = 2,
 };
 
 #define TLS_DEVICE_RESYNC_NH_START_IVAL                2
 #define TLS_DEVICE_RESYNC_NH_MAX_IVAL          128
 
+#define TLS_DEVICE_RESYNC_ASYNC_LOGMAX         13
+struct tls_offload_resync_async {
+       atomic64_t req;
+       u32 loglen;
+       u32 log[TLS_DEVICE_RESYNC_ASYNC_LOGMAX];
+};
+
 struct tls_offload_context_rx {
        /* sw must be the first member of tls_offload_context_rx */
        struct tls_sw_context_rx sw;
@@ -314,6 +322,10 @@ struct tls_offload_context_rx {
                        u32 decrypted_failed;
                        u32 decrypted_tgt;
                } resync_nh;
+               /* TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC */
+               struct {
+                       struct tls_offload_resync_async *resync_async;
+               };
        };
        u8 driver_state[] __aligned(8);
        /* The TLS layer reserves room for driver specific state
@@ -606,9 +618,9 @@ tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
 }
 #endif
 
+#define RESYNC_REQ BIT(0)
+#define RESYNC_REQ_ASYNC BIT(1)
 /* The TLS context is valid until sk_destruct is called */
-#define RESYNC_REQ (1 << 0)
-#define RESYNC_REQ_FORCE (1 << 1)
 static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
 {
        struct tls_context *tls_ctx = tls_get_ctx(sk);
@@ -617,12 +629,26 @@ static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
        atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
 }
 
-static inline void tls_offload_rx_force_resync_request(struct sock *sk)
+/* Log all TLS record header TCP sequences in [seq, seq+len] */
+static inline void
+tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len)
+{
+       struct tls_context *tls_ctx = tls_get_ctx(sk);
+       struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
+
+       atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
+                    ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
+       rx_ctx->resync_async->loglen = 0;
+}
+
+static inline void
+tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq)
 {
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
 
-       atomic64_set(&rx_ctx->resync_req, RESYNC_REQ | RESYNC_REQ_FORCE);
+       atomic64_set(&rx_ctx->resync_async->req,
+                    ((u64)ntohl(seq) << 32) | RESYNC_REQ);
 }
 
 static inline void
index a8f6020..da06613 100644 (file)
@@ -56,9 +56,6 @@ ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp,
 
 #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006)
 
-/* address family specific functions */
-extern const struct inet_connection_sock_af_ops ipv4_specific;
-
 void inet6_destroy_sock(struct sock *sk);
 
 #define IPV6_SEQ_DGRAM_HEADER                                         \
index 7e166a5..62c98a9 100644 (file)
@@ -4,21 +4,22 @@
 
 #include <net/ip.h>
 
-#define TSO_HEADER_SIZE                128
+#define TSO_HEADER_SIZE                256
 
 struct tso_t {
-       int next_frag_idx;
-       void *data;
-       size_t size;
-       u16 ip_id;
-       bool ipv6;
-       u32 tcp_seq;
+       int     next_frag_idx;
+       int     size;
+       void    *data;
+       u16     ip_id;
+       u8      tlen; /* transport header len */
+       bool    ipv6;
+       u32     tcp_seq;
 };
 
-int tso_count_descs(struct sk_buff *skb);
-void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
+int tso_count_descs(const struct sk_buff *skb);
+void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso,
                   int size, bool is_last);
-void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size);
-void tso_start(struct sk_buff *skb, struct tso_t *tso);
+void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size);
+int tso_start(struct sk_buff *skb, struct tso_t *tso);
 
 #endif /* _TSO_H */
index a8fa6c0..295d52a 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/ipv6.h>
 #include <linux/seq_file.h>
 #include <linux/poll.h>
+#include <linux/indirect_call_wrapper.h>
 
 /**
  *     struct udp_skb_cb  -  UDP(-Lite) private variables
@@ -166,6 +167,12 @@ static inline void udp_csum_pull_header(struct sk_buff *skb)
 typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
                                     __be16 dport);
 
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
+                                                          struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
+INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
+                                                          struct sk_buff *));
+INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
 struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
                                struct udphdr *uh, struct sock *sk);
 int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
@@ -299,7 +306,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
 int udp_lib_getsockopt(struct sock *sk, int level, int optname,
                       char __user *optval, int __user *optlen);
 int udp_lib_setsockopt(struct sock *sk, int level, int optname,
-                      char __user *optval, unsigned int optlen,
+                      sockptr_t optval, unsigned int optlen,
                       int (*push_pending_frames)(struct sock *));
 struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
                             __be32 daddr, __be16 dport, int dif);
@@ -440,6 +447,7 @@ struct udp_seq_afinfo {
 struct udp_iter_state {
        struct seq_net_private  p;
        int                     bucket;
+       struct udp_seq_afinfo   *bpf_seq_afinfo;
 };
 
 void *udp_seq_start(struct seq_file *seq, loff_t *pos);
index e7312ce..dd20ce9 100644 (file)
@@ -106,15 +106,16 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
  * call this function to perform Tx offloads on outgoing traffic.
  */
 enum udp_parsable_tunnel_type {
-       UDP_TUNNEL_TYPE_VXLAN,          /* RFC 7348 */
-       UDP_TUNNEL_TYPE_GENEVE,         /* draft-ietf-nvo3-geneve */
-       UDP_TUNNEL_TYPE_VXLAN_GPE,      /* draft-ietf-nvo3-vxlan-gpe */
+       UDP_TUNNEL_TYPE_VXLAN     = BIT(0), /* RFC 7348 */
+       UDP_TUNNEL_TYPE_GENEVE    = BIT(1), /* draft-ietf-nvo3-geneve */
+       UDP_TUNNEL_TYPE_VXLAN_GPE = BIT(2), /* draft-ietf-nvo3-vxlan-gpe */
 };
 
 struct udp_tunnel_info {
        unsigned short type;
        sa_family_t sa_family;
        __be16 port;
+       u8 hw_priv;
 };
 
 /* Notify network devices of offloadable types */
@@ -181,4 +182,161 @@ static inline void udp_tunnel_encap_enable(struct socket *sock)
                udp_encap_enable();
 }
 
+#define UDP_TUNNEL_NIC_MAX_TABLES      4
+
+enum udp_tunnel_nic_info_flags {
+       /* Device callbacks may sleep */
+       UDP_TUNNEL_NIC_INFO_MAY_SLEEP   = BIT(0),
+       /* Device only supports offloads when it's open, all ports
+        * will be removed before close and re-added after open.
+        */
+       UDP_TUNNEL_NIC_INFO_OPEN_ONLY   = BIT(1),
+       /* Device supports only IPv4 tunnels */
+       UDP_TUNNEL_NIC_INFO_IPV4_ONLY   = BIT(2),
+};
+
+/**
+ * struct udp_tunnel_nic_info - driver UDP tunnel offload information
+ * @set_port:  callback for adding a new port
+ * @unset_port:        callback for removing a port
+ * @sync_table:        callback for syncing the entire port table at once
+ * @flags:     device flags from enum udp_tunnel_nic_info_flags
+ * @tables:    UDP port tables this device has
+ * @tables.n_entries:          number of entries in this table
+ * @tables.tunnel_types:       types of tunnels this table accepts
+ *
+ * Drivers are expected to provide either @set_port and @unset_port callbacks
+ * or the @sync_table callback. Callbacks are invoked with rtnl lock held.
+ *
+ * Known limitations:
+ *  - UDP tunnel port notifications are fundamentally best-effort -
+ *    it is likely the driver will both see skbs which use a UDP tunnel port,
+ *    while not being a tunneled skb, and tunnel skbs from other ports -
+ *    drivers should only use these ports for non-critical RX-side offloads,
+ *    e.g. the checksum offload;
+ *  - none of the devices care about the socket family at present, so we don't
+ *    track it. Please extend this code if you care.
+ */
+struct udp_tunnel_nic_info {
+       /* one-by-one */
+       int (*set_port)(struct net_device *dev,
+                       unsigned int table, unsigned int entry,
+                       struct udp_tunnel_info *ti);
+       int (*unset_port)(struct net_device *dev,
+                         unsigned int table, unsigned int entry,
+                         struct udp_tunnel_info *ti);
+
+       /* all at once */
+       int (*sync_table)(struct net_device *dev, unsigned int table);
+
+       unsigned int flags;
+
+       struct udp_tunnel_nic_table_info {
+               unsigned int n_entries;
+               unsigned int tunnel_types;
+       } tables[UDP_TUNNEL_NIC_MAX_TABLES];
+};
+
+/* UDP tunnel module dependencies
+ *
+ * Tunnel drivers are expected to have a hard dependency on the udp_tunnel
+ * module. NIC drivers are not, they just attach their
+ * struct udp_tunnel_nic_info to the netdev and wait for callbacks to come.
+ * Loading a tunnel driver will cause the udp_tunnel module to be loaded
+ * and only then will all the required state structures be allocated.
+ * Since we want a weak dependency from the drivers and the core to udp_tunnel
+ * we call things through the following stubs.
+ */
+struct udp_tunnel_nic_ops {
+       void (*get_port)(struct net_device *dev, unsigned int table,
+                        unsigned int idx, struct udp_tunnel_info *ti);
+       void (*set_port_priv)(struct net_device *dev, unsigned int table,
+                             unsigned int idx, u8 priv);
+       void (*add_port)(struct net_device *dev, struct udp_tunnel_info *ti);
+       void (*del_port)(struct net_device *dev, struct udp_tunnel_info *ti);
+       void (*reset_ntf)(struct net_device *dev);
+
+       size_t (*dump_size)(struct net_device *dev, unsigned int table);
+       int (*dump_write)(struct net_device *dev, unsigned int table,
+                         struct sk_buff *skb);
+};
+
+#ifdef CONFIG_INET
+extern const struct udp_tunnel_nic_ops *udp_tunnel_nic_ops;
+#else
+#define udp_tunnel_nic_ops     ((struct udp_tunnel_nic_ops *)NULL)
+#endif
+
+static inline void
+udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table,
+                       unsigned int idx, struct udp_tunnel_info *ti)
+{
+       /* This helper is used from .sync_table, we indicate empty entries
+        * by zero'ed @ti. Drivers which need to know the details of a port
+        * when it gets deleted should use the .set_port / .unset_port
+        * callbacks.
+        * Zero out here, otherwise !CONFIG_INET causes uninitilized warnings.
+        */
+       memset(ti, 0, sizeof(*ti));
+
+       if (udp_tunnel_nic_ops)
+               udp_tunnel_nic_ops->get_port(dev, table, idx, ti);
+}
+
+static inline void
+udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table,
+                            unsigned int idx, u8 priv)
+{
+       if (udp_tunnel_nic_ops)
+               udp_tunnel_nic_ops->set_port_priv(dev, table, idx, priv);
+}
+
+static inline void
+udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti)
+{
+       if (udp_tunnel_nic_ops)
+               udp_tunnel_nic_ops->add_port(dev, ti);
+}
+
+static inline void
+udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti)
+{
+       if (udp_tunnel_nic_ops)
+               udp_tunnel_nic_ops->del_port(dev, ti);
+}
+
+/**
+ * udp_tunnel_nic_reset_ntf() - device-originating reset notification
+ * @dev: network interface device structure
+ *
+ * Called by the driver to inform the core that the entire UDP tunnel port
+ * state has been lost, usually due to device reset. Core will assume device
+ * forgot all the ports and issue .set_port and .sync_table callbacks as
+ * necessary.
+ *
+ * This function must be called with rtnl lock held, and will issue all
+ * the callbacks before returning.
+ */
+static inline void udp_tunnel_nic_reset_ntf(struct net_device *dev)
+{
+       if (udp_tunnel_nic_ops)
+               udp_tunnel_nic_ops->reset_ntf(dev);
+}
+
+static inline size_t
+udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table)
+{
+       if (!udp_tunnel_nic_ops)
+               return 0;
+       return udp_tunnel_nic_ops->dump_size(dev, table);
+}
+
+static inline int
+udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table,
+                         struct sk_buff *skb)
+{
+       if (!udp_tunnel_nic_ops)
+               return 0;
+       return udp_tunnel_nic_ops->dump_write(dev, table, skb);
+}
 #endif
index 24ba7e8..f6e31d2 100644 (file)
@@ -28,7 +28,7 @@
  *
  * USAGE
  *
- * Embed a `struct wimax_dev` at the beginning of the the device's
+ * Embed a `struct wimax_dev` at the beginning of the device's
  * private structure, initialize and register it. For details, see
  * `struct wimax_dev`s documentation.
  *
index 609f819..dbe9c60 100644 (file)
@@ -85,6 +85,12 @@ struct xdp_buff {
        ((xdp)->data_hard_start + (xdp)->frame_sz -     \
         SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 
+static inline struct skb_shared_info *
+xdp_get_shared_info_from_buff(struct xdp_buff *xdp)
+{
+       return (struct skb_shared_info *)xdp_data_hard_end(xdp);
+}
+
 struct xdp_frame {
        void *data;
        u16 len;
@@ -98,6 +104,22 @@ struct xdp_frame {
        struct net_device *dev_rx; /* used by cpumap */
 };
 
+
+static inline struct skb_shared_info *
+xdp_get_shared_info_from_frame(struct xdp_frame *frame)
+{
+       void *data_hard_start = frame->data - frame->headroom - sizeof(*frame);
+
+       return (struct skb_shared_info *)(data_hard_start + frame->frame_sz -
+                               SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+}
+
+struct xdp_cpumap_stats {
+       unsigned int redirect;
+       unsigned int pass;
+       unsigned int drop;
+};
+
 /* Clear kernel pointers in xdp_frame */
 static inline void xdp_scrub_frame(struct xdp_frame *frame)
 {
@@ -121,39 +143,48 @@ void xdp_convert_frame_to_buff(struct xdp_frame *frame, struct xdp_buff *xdp)
        xdp->frame_sz = frame->frame_sz;
 }
 
-/* Convert xdp_buff to xdp_frame */
 static inline
-struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp)
+int xdp_update_frame_from_buff(struct xdp_buff *xdp,
+                              struct xdp_frame *xdp_frame)
 {
-       struct xdp_frame *xdp_frame;
-       int metasize;
-       int headroom;
-
-       if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
-               return xdp_convert_zc_to_xdp_frame(xdp);
+       int metasize, headroom;
 
        /* Assure headroom is available for storing info */
        headroom = xdp->data - xdp->data_hard_start;
        metasize = xdp->data - xdp->data_meta;
        metasize = metasize > 0 ? metasize : 0;
        if (unlikely((headroom - metasize) < sizeof(*xdp_frame)))
-               return NULL;
+               return -ENOSPC;
 
        /* Catch if driver didn't reserve tailroom for skb_shared_info */
        if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
                XDP_WARN("Driver BUG: missing reserved tailroom");
-               return NULL;
+               return -ENOSPC;
        }
 
-       /* Store info in top of packet */
-       xdp_frame = xdp->data_hard_start;
-
        xdp_frame->data = xdp->data;
        xdp_frame->len  = xdp->data_end - xdp->data;
        xdp_frame->headroom = headroom - sizeof(*xdp_frame);
        xdp_frame->metasize = metasize;
        xdp_frame->frame_sz = xdp->frame_sz;
 
+       return 0;
+}
+
+/* Convert xdp_buff to xdp_frame */
+static inline
+struct xdp_frame *xdp_convert_buff_to_frame(struct xdp_buff *xdp)
+{
+       struct xdp_frame *xdp_frame;
+
+       if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
+               return xdp_convert_zc_to_xdp_frame(xdp);
+
+       /* Store info in top of packet */
+       xdp_frame = xdp->data_hard_start;
+       if (unlikely(xdp_update_frame_from_buff(xdp, xdp_frame) < 0))
+               return NULL;
+
        /* rxq only valid until napi_schedule ends, convert to xdp_mem_info */
        xdp_frame->mem = xdp->rxq->mem;
 
index 96bfc5f..c9d87cc 100644 (file)
@@ -69,7 +69,11 @@ struct xdp_sock {
        spinlock_t tx_completion_lock;
        /* Protects generic receive. */
        spinlock_t rx_lock;
+
+       /* Statistics */
        u64 rx_dropped;
+       u64 rx_queue_full;
+
        struct list_head map_list;
        /* Protects map_list */
        spinlock_t map_list_lock;
index 094fe68..5e81868 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/audit.h>
 #include <linux/slab.h>
 #include <linux/refcount.h>
+#include <linux/sockptr.h>
 
 #include <net/sock.h>
 #include <net/dst.h>
@@ -127,6 +128,7 @@ struct xfrm_state_walk {
 
 struct xfrm_state_offload {
        struct net_device       *dev;
+       struct net_device       *real_dev;
        unsigned long           offload_handle;
        unsigned int            num_exthdrs;
        u8                      flags;
@@ -1008,6 +1010,7 @@ struct xfrm_offload {
 #define        XFRM_GRO                32
 #define        XFRM_ESP_NO_TRAILER     64
 #define        XFRM_DEV_RESUME         128
+#define        XFRM_XMIT               256
 
        __u32                   status;
 #define CRYPTO_SUCCESS                         1
@@ -1607,10 +1610,11 @@ int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
 void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu);
 int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
 int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
-int xfrm_user_policy(struct sock *sk, int optname,
-                    u8 __user *optval, int optlen);
+int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval,
+                    int optlen);
 #else
-static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
+static inline int xfrm_user_policy(struct sock *sk, int optname,
+                                  sockptr_t optval, int optlen)
 {
        return -ENOPROTOOPT;
 }
index a4ff226..6842990 100644 (file)
@@ -40,7 +40,7 @@ struct xsk_buff_pool {
        u32 headroom;
        u32 chunk_size;
        u32 frame_len;
-       bool cheap_dma;
+       bool dma_need_sync;
        bool unaligned;
        void *addrs;
        struct device *dev;
@@ -80,7 +80,7 @@ static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
 void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
 static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
 {
-       if (xskb->pool->cheap_dma)
+       if (!xskb->pool->dma_need_sync)
                return;
 
        xp_dma_sync_for_cpu_slow(xskb);
@@ -91,7 +91,7 @@ void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
 static inline void xp_dma_sync_for_device(struct xsk_buff_pool *pool,
                                          dma_addr_t dma, size_t size)
 {
-       if (pool->cheap_dma)
+       if (!pool->dma_need_sync)
                return;
 
        xp_dma_sync_for_device_slow(pool, dma, size);
index 4953e99..da369b1 100644 (file)
 #define PGID_MCIPV4                    62
 #define PGID_MCIPV6                    63
 
+#define for_each_unicast_dest_pgid(ocelot, pgid)               \
+       for ((pgid) = 0;                                        \
+            (pgid) < (ocelot)->num_phys_ports;                 \
+            (pgid)++)
+
+#define for_each_nonreserved_multicast_dest_pgid(ocelot, pgid) \
+       for ((pgid) = (ocelot)->num_phys_ports + 1;             \
+            (pgid) < PGID_CPU;                                 \
+            (pgid)++)
+
+#define for_each_aggr_pgid(ocelot, pgid)                       \
+       for ((pgid) = PGID_AGGR;                                \
+            (pgid) < PGID_SRC;                                 \
+            (pgid)++)
+
 /* Aggregation PGIDs, one per Link Aggregation Code */
 #define PGID_AGGR                      64
 
@@ -111,6 +126,7 @@ enum ocelot_target {
        HSIO,
        PTP,
        GCB,
+       DEV_GMII,
        TARGET_MAX,
 };
 
@@ -393,6 +409,48 @@ enum ocelot_reg {
        PTP_CLK_CFG_ADJ_CFG,
        PTP_CLK_CFG_ADJ_FREQ,
        GCB_SOFT_RST = GCB << TARGET_OFFSET,
+       GCB_MIIM_MII_STATUS,
+       GCB_MIIM_MII_CMD,
+       GCB_MIIM_MII_DATA,
+       DEV_CLOCK_CFG = DEV_GMII << TARGET_OFFSET,
+       DEV_PORT_MISC,
+       DEV_EVENTS,
+       DEV_EEE_CFG,
+       DEV_RX_PATH_DELAY,
+       DEV_TX_PATH_DELAY,
+       DEV_PTP_PREDICT_CFG,
+       DEV_MAC_ENA_CFG,
+       DEV_MAC_MODE_CFG,
+       DEV_MAC_MAXLEN_CFG,
+       DEV_MAC_TAGS_CFG,
+       DEV_MAC_ADV_CHK_CFG,
+       DEV_MAC_IFG_CFG,
+       DEV_MAC_HDX_CFG,
+       DEV_MAC_DBG_CFG,
+       DEV_MAC_FC_MAC_LOW_CFG,
+       DEV_MAC_FC_MAC_HIGH_CFG,
+       DEV_MAC_STICKY,
+       PCS1G_CFG,
+       PCS1G_MODE_CFG,
+       PCS1G_SD_CFG,
+       PCS1G_ANEG_CFG,
+       PCS1G_ANEG_NP_CFG,
+       PCS1G_LB_CFG,
+       PCS1G_DBG_CFG,
+       PCS1G_CDET_CFG,
+       PCS1G_ANEG_STATUS,
+       PCS1G_ANEG_NP_STATUS,
+       PCS1G_LINK_STATUS,
+       PCS1G_LINK_DOWN_CNT,
+       PCS1G_STICKY,
+       PCS1G_DEBUG_STATUS,
+       PCS1G_LPI_CFG,
+       PCS1G_LPI_WAKE_ERROR_CNT,
+       PCS1G_LPI_STATUS,
+       PCS1G_TSTPAT_MODE_CFG,
+       PCS1G_TSTPAT_STATUS,
+       DEV_PCS_FX100_CFG,
+       DEV_PCS_FX100_STATUS,
 };
 
 enum ocelot_regfield {
@@ -432,15 +490,30 @@ enum ocelot_regfield {
        ANA_TABLES_MACACCESS_B_DOM,
        ANA_TABLES_MACTINDX_BUCKET,
        ANA_TABLES_MACTINDX_M_INDEX,
+       QSYS_SWITCH_PORT_MODE_PORT_ENA,
+       QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG,
+       QSYS_SWITCH_PORT_MODE_YEL_RSRVD,
+       QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE,
+       QSYS_SWITCH_PORT_MODE_TX_PFC_ENA,
+       QSYS_SWITCH_PORT_MODE_TX_PFC_MODE,
        QSYS_TIMED_FRAME_ENTRY_TFRM_VLD,
        QSYS_TIMED_FRAME_ENTRY_TFRM_FP,
        QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO,
        QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL,
        QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T,
+       SYS_PORT_MODE_DATA_WO_TS,
+       SYS_PORT_MODE_INCL_INJ_HDR,
+       SYS_PORT_MODE_INCL_XTR_HDR,
+       SYS_PORT_MODE_INCL_HDR_ERR,
        SYS_RESET_CFG_CORE_ENA,
        SYS_RESET_CFG_MEM_ENA,
        SYS_RESET_CFG_MEM_INIT,
        GCB_SOFT_RST_SWC_RST,
+       GCB_MIIM_MII_STATUS_PENDING,
+       GCB_MIIM_MII_STATUS_BUSY,
+       SYS_PAUSE_CFG_PAUSE_START,
+       SYS_PAUSE_CFG_PAUSE_STOP,
+       SYS_PAUSE_CFG_PAUSE_ENA,
        REGFIELD_MAX
 };
 
@@ -468,9 +541,10 @@ struct ocelot;
 
 struct ocelot_ops {
        int (*reset)(struct ocelot *ocelot);
+       u16 (*wm_enc)(u16 value);
 };
 
-struct ocelot_acl_block {
+struct ocelot_vcap_block {
        struct list_head rules;
        int count;
        int pol_lpr;
@@ -479,7 +553,7 @@ struct ocelot_acl_block {
 struct ocelot_port {
        struct ocelot                   *ocelot;
 
-       void __iomem                    *regs;
+       struct regmap                   *target;
 
        bool                            vlan_aware;
 
@@ -494,6 +568,8 @@ struct ocelot_port {
        u8                              ts_id;
 
        phy_interface_t                 phy_mode;
+
+       u8                              *xmit_template;
 };
 
 struct ocelot {
@@ -535,7 +611,7 @@ struct ocelot {
 
        struct list_head                multicast;
 
-       struct ocelot_acl_block         acl_block;
+       struct ocelot_vcap_block        block;
 
        const struct vcap_field         *vcap_is2_keys;
        const struct vcap_field         *vcap_is2_actions;
@@ -578,6 +654,11 @@ struct ocelot_policer {
 #define ocelot_rmw_rix(ocelot, val, m, reg, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_RSZ * (ri))
 #define ocelot_rmw(ocelot, val, m, reg) __ocelot_rmw_ix(ocelot, val, m, reg, 0)
 
+#define ocelot_field_write(ocelot, reg, val) regmap_field_write((ocelot)->regfields[(reg)], (val))
+#define ocelot_field_read(ocelot, reg, val) regmap_field_read((ocelot)->regfields[(reg)], (val))
+#define ocelot_fields_write(ocelot, id, reg, val) regmap_fields_write((ocelot)->regfields[(reg)], (id), (val))
+#define ocelot_fields_read(ocelot, id, reg, val) regmap_fields_read((ocelot)->regfields[(reg)], (id), (val))
+
 /* I/O */
 u32 ocelot_port_readl(struct ocelot_port *port, u32 reg);
 void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
@@ -641,5 +722,9 @@ int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,
                              struct flow_cls_offload *f, bool ingress);
 int ocelot_cls_flower_stats(struct ocelot *ocelot, int port,
                            struct flow_cls_offload *f, bool ingress);
+int ocelot_port_mdb_add(struct ocelot *ocelot, int port,
+                       const struct switchdev_obj_port_mdb *mdb);
+int ocelot_port_mdb_del(struct ocelot *ocelot, int port,
+                       const struct switchdev_obj_port_mdb *mdb);
 
 #endif
index 7c08437..0c6021f 100644 (file)
@@ -8,8 +8,6 @@
 #ifndef _MSCC_OCELOT_DEV_H_
 #define _MSCC_OCELOT_DEV_H_
 
-#define DEV_CLOCK_CFG                                     0x0
-
 #define DEV_CLOCK_CFG_MAC_TX_RST                          BIT(7)
 #define DEV_CLOCK_CFG_MAC_RX_RST                          BIT(6)
 #define DEV_CLOCK_CFG_PCS_TX_RST                          BIT(5)
 #define DEV_CLOCK_CFG_LINK_SPEED(x)                       ((x) & GENMASK(1, 0))
 #define DEV_CLOCK_CFG_LINK_SPEED_M                        GENMASK(1, 0)
 
-#define DEV_PORT_MISC                                     0x4
-
 #define DEV_PORT_MISC_FWD_ERROR_ENA                       BIT(4)
 #define DEV_PORT_MISC_FWD_PAUSE_ENA                       BIT(3)
 #define DEV_PORT_MISC_FWD_CTRL_ENA                        BIT(2)
 #define DEV_PORT_MISC_DEV_LOOP_ENA                        BIT(1)
 #define DEV_PORT_MISC_HDX_FAST_DIS                        BIT(0)
 
-#define DEV_EVENTS                                        0x8
-
-#define DEV_EEE_CFG                                       0xc
-
 #define DEV_EEE_CFG_EEE_ENA                               BIT(22)
 #define DEV_EEE_CFG_EEE_TIMER_AGE(x)                      (((x) << 15) & GENMASK(21, 15))
 #define DEV_EEE_CFG_EEE_TIMER_AGE_M                       GENMASK(21, 15)
 #define DEV_EEE_CFG_EEE_TIMER_HOLDOFF_X(x)                (((x) & GENMASK(7, 1)) >> 1)
 #define DEV_EEE_CFG_PORT_LPI                              BIT(0)
 
-#define DEV_RX_PATH_DELAY                                 0x10
-
-#define DEV_TX_PATH_DELAY                                 0x14
-
-#define DEV_PTP_PREDICT_CFG                               0x18
-
 #define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG(x)        (((x) << 4) & GENMASK(11, 4))
 #define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_M         GENMASK(11, 4)
 #define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_X(x)      (((x) & GENMASK(11, 4)) >> 4)
 #define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG(x)      ((x) & GENMASK(3, 0))
 #define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG_M       GENMASK(3, 0)
 
-#define DEV_MAC_ENA_CFG                                   0x1c
-
 #define DEV_MAC_ENA_CFG_RX_ENA                            BIT(4)
 #define DEV_MAC_ENA_CFG_TX_ENA                            BIT(0)
 
-#define DEV_MAC_MODE_CFG                                  0x20
-
 #define DEV_MAC_MODE_CFG_FC_WORD_SYNC_ENA                 BIT(8)
 #define DEV_MAC_MODE_CFG_GIGA_MODE_ENA                    BIT(4)
 #define DEV_MAC_MODE_CFG_FDX_ENA                          BIT(0)
 
-#define DEV_MAC_MAXLEN_CFG                                0x24
-
-#define DEV_MAC_TAGS_CFG                                  0x28
-
 #define DEV_MAC_TAGS_CFG_TAG_ID(x)                        (((x) << 16) & GENMASK(31, 16))
 #define DEV_MAC_TAGS_CFG_TAG_ID_M                         GENMASK(31, 16)
 #define DEV_MAC_TAGS_CFG_TAG_ID_X(x)                      (((x) & GENMASK(31, 16)) >> 16)
 #define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA                 BIT(1)
 #define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA                     BIT(0)
 
-#define DEV_MAC_ADV_CHK_CFG                               0x2c
-
 #define DEV_MAC_ADV_CHK_CFG_LEN_DROP_ENA                  BIT(0)
 
-#define DEV_MAC_IFG_CFG                                   0x30
-
 #define DEV_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK             BIT(17)
 #define DEV_MAC_IFG_CFG_REDUCED_TX_IFG                    BIT(16)
 #define DEV_MAC_IFG_CFG_TX_IFG(x)                         (((x) << 8) & GENMASK(12, 8))
@@ -94,8 +68,6 @@
 #define DEV_MAC_IFG_CFG_RX_IFG1(x)                        ((x) & GENMASK(3, 0))
 #define DEV_MAC_IFG_CFG_RX_IFG1_M                         GENMASK(3, 0)
 
-#define DEV_MAC_HDX_CFG                                   0x34
-
 #define DEV_MAC_HDX_CFG_BYPASS_COL_SYNC                   BIT(26)
 #define DEV_MAC_HDX_CFG_OB_ENA                            BIT(25)
 #define DEV_MAC_HDX_CFG_WEXC_DIS                          BIT(24)
 #define DEV_MAC_HDX_CFG_LATE_COL_POS(x)                   ((x) & GENMASK(6, 0))
 #define DEV_MAC_HDX_CFG_LATE_COL_POS_M                    GENMASK(6, 0)
 
-#define DEV_MAC_DBG_CFG                                   0x38
-
 #define DEV_MAC_DBG_CFG_TBI_MODE                          BIT(4)
 #define DEV_MAC_DBG_CFG_IFG_CRS_EXT_CHK_ENA               BIT(0)
 
-#define DEV_MAC_FC_MAC_LOW_CFG                            0x3c
-
-#define DEV_MAC_FC_MAC_HIGH_CFG                           0x40
-
-#define DEV_MAC_STICKY                                    0x44
-
 #define DEV_MAC_STICKY_RX_IPG_SHRINK_STICKY               BIT(9)
 #define DEV_MAC_STICKY_RX_PREAM_SHRINK_STICKY             BIT(8)
 #define DEV_MAC_STICKY_RX_CARRIER_EXT_STICKY              BIT(7)
 #define DEV_MAC_STICKY_TX_FRM_LEN_OVR_STICKY              BIT(1)
 #define DEV_MAC_STICKY_TX_ABORT_STICKY                    BIT(0)
 
-#define PCS1G_CFG                                         0x48
-
 #define PCS1G_CFG_LINK_STATUS_TYPE                        BIT(4)
 #define PCS1G_CFG_AN_LINK_CTRL_ENA                        BIT(1)
 #define PCS1G_CFG_PCS_ENA                                 BIT(0)
 
-#define PCS1G_MODE_CFG                                    0x4c
-
 #define PCS1G_MODE_CFG_UNIDIR_MODE_ENA                    BIT(4)
 #define PCS1G_MODE_CFG_SGMII_MODE_ENA                     BIT(0)
 
-#define PCS1G_SD_CFG                                      0x50
-
 #define PCS1G_SD_CFG_SD_SEL                               BIT(8)
 #define PCS1G_SD_CFG_SD_POL                               BIT(4)
 #define PCS1G_SD_CFG_SD_ENA                               BIT(0)
 
-#define PCS1G_ANEG_CFG                                    0x54
-
 #define PCS1G_ANEG_CFG_ADV_ABILITY(x)                     (((x) << 16) & GENMASK(31, 16))
 #define PCS1G_ANEG_CFG_ADV_ABILITY_M                      GENMASK(31, 16)
 #define PCS1G_ANEG_CFG_ADV_ABILITY_X(x)                   (((x) & GENMASK(31, 16)) >> 16)
 #define PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT              BIT(1)
 #define PCS1G_ANEG_CFG_ANEG_ENA                           BIT(0)
 
-#define PCS1G_ANEG_NP_CFG                                 0x58
-
 #define PCS1G_ANEG_NP_CFG_NP_TX(x)                        (((x) << 16) & GENMASK(31, 16))
 #define PCS1G_ANEG_NP_CFG_NP_TX_M                         GENMASK(31, 16)
 #define PCS1G_ANEG_NP_CFG_NP_TX_X(x)                      (((x) & GENMASK(31, 16)) >> 16)
 #define PCS1G_ANEG_NP_CFG_NP_LOADED_ONE_SHOT              BIT(0)
 
-#define PCS1G_LB_CFG                                      0x5c
-
 #define PCS1G_LB_CFG_RA_ENA                               BIT(4)
 #define PCS1G_LB_CFG_GMII_PHY_LB_ENA                      BIT(1)
 #define PCS1G_LB_CFG_TBI_HOST_LB_ENA                      BIT(0)
 
-#define PCS1G_DBG_CFG                                     0x60
-
 #define PCS1G_DBG_CFG_UDLT                                BIT(0)
 
-#define PCS1G_CDET_CFG                                    0x64
-
 #define PCS1G_CDET_CFG_CDET_ENA                           BIT(0)
 
-#define PCS1G_ANEG_STATUS                                 0x68
-
 #define PCS1G_ANEG_STATUS_LP_ADV_ABILITY(x)               (((x) << 16) & GENMASK(31, 16))
 #define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_M                GENMASK(31, 16)
 #define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_X(x)             (((x) & GENMASK(31, 16)) >> 16)
 #define PCS1G_ANEG_STATUS_PAGE_RX_STICKY                  BIT(3)
 #define PCS1G_ANEG_STATUS_ANEG_COMPLETE                   BIT(0)
 
-#define PCS1G_ANEG_NP_STATUS                              0x6c
-
-#define PCS1G_LINK_STATUS                                 0x70
-
 #define PCS1G_LINK_STATUS_DELAY_VAR(x)                    (((x) << 12) & GENMASK(15, 12))
 #define PCS1G_LINK_STATUS_DELAY_VAR_M                     GENMASK(15, 12)
 #define PCS1G_LINK_STATUS_DELAY_VAR_X(x)                  (((x) & GENMASK(15, 12)) >> 12)
 #define PCS1G_LINK_STATUS_LINK_STATUS                     BIT(4)
 #define PCS1G_LINK_STATUS_SYNC_STATUS                     BIT(0)
 
-#define PCS1G_LINK_DOWN_CNT                               0x74
-
-#define PCS1G_STICKY                                      0x78
-
 #define PCS1G_STICKY_LINK_DOWN_STICKY                     BIT(4)
 #define PCS1G_STICKY_OUT_OF_SYNC_STICKY                   BIT(0)
 
-#define PCS1G_DEBUG_STATUS                                0x7c
-
-#define PCS1G_LPI_CFG                                     0x80
-
 #define PCS1G_LPI_CFG_QSGMII_MS_SEL                       BIT(20)
 #define PCS1G_LPI_CFG_RX_LPI_OUT_DIS                      BIT(17)
 #define PCS1G_LPI_CFG_LPI_TESTMODE                        BIT(16)
 #define PCS1G_LPI_CFG_LPI_RX_WTIM_X(x)                    (((x) & GENMASK(5, 4)) >> 4)
 #define PCS1G_LPI_CFG_TX_ASSERT_LPIDLE                    BIT(0)
 
-#define PCS1G_LPI_WAKE_ERROR_CNT                          0x84
-
-#define PCS1G_LPI_STATUS                                  0x88
-
 #define PCS1G_LPI_STATUS_RX_LPI_FAIL                      BIT(16)
 #define PCS1G_LPI_STATUS_RX_LPI_EVENT_STICKY              BIT(12)
 #define PCS1G_LPI_STATUS_RX_QUIET                         BIT(9)
 #define PCS1G_LPI_STATUS_TX_QUIET                         BIT(1)
 #define PCS1G_LPI_STATUS_TX_LPI_MODE                      BIT(0)
 
-#define PCS1G_TSTPAT_MODE_CFG                             0x8c
-
-#define PCS1G_TSTPAT_STATUS                               0x90
-
 #define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT(x)                (((x) << 8) & GENMASK(15, 8))
 #define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_M                 GENMASK(15, 8)
 #define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_X(x)              (((x) & GENMASK(15, 8)) >> 8)
 #define PCS1G_TSTPAT_STATUS_JTP_ERR                       BIT(4)
 #define PCS1G_TSTPAT_STATUS_JTP_LOCK                      BIT(0)
 
-#define DEV_PCS_FX100_CFG                                 0x94
-
 #define DEV_PCS_FX100_CFG_SD_SEL                          BIT(26)
 #define DEV_PCS_FX100_CFG_SD_POL                          BIT(25)
 #define DEV_PCS_FX100_CFG_SD_ENA                          BIT(24)
 #define DEV_PCS_FX100_CFG_FEFGEN_ENA                      BIT(1)
 #define DEV_PCS_FX100_CFG_PCS_ENA                         BIT(0)
 
-#define DEV_PCS_FX100_STATUS                              0x98
-
 #define DEV_PCS_FX100_STATUS_EDGE_POS_PTP(x)              (((x) << 8) & GENMASK(11, 8))
 #define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_M               GENMASK(11, 8)
 #define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_X(x)            (((x) & GENMASK(11, 8)) >> 8)
index d8c63aa..a814bc2 100644 (file)
 #define QSYS_PORT_MODE_DEQUEUE_DIS                        BIT(1)
 #define QSYS_PORT_MODE_DEQUEUE_LATE                       BIT(0)
 
-#define QSYS_SWITCH_PORT_MODE_RSZ                         0x4
-
-#define QSYS_SWITCH_PORT_MODE_PORT_ENA                    BIT(14)
-#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(x)             (((x) << 11) & GENMASK(13, 11))
-#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_M              GENMASK(13, 11)
-#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_X(x)           (((x) & GENMASK(13, 11)) >> 11)
-#define QSYS_SWITCH_PORT_MODE_YEL_RSRVD                   BIT(10)
-#define QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE           BIT(9)
-#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA(x)               (((x) << 1) & GENMASK(8, 1))
-#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_M                GENMASK(8, 1)
-#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_X(x)             (((x) & GENMASK(8, 1)) >> 1)
-#define QSYS_SWITCH_PORT_MODE_TX_PFC_MODE                 BIT(0)
-
 #define QSYS_STAT_CNT_CFG_TX_GREEN_CNT_MODE               BIT(5)
 #define QSYS_STAT_CNT_CFG_TX_YELLOW_CNT_MODE              BIT(4)
 #define QSYS_STAT_CNT_CFG_DROP_GREEN_CNT_MODE             BIT(3)
index 16f91e1..79cf40c 100644 (file)
 
 #define SYS_COUNT_TX_OCTETS_RSZ                           0x4
 
-#define SYS_PORT_MODE_RSZ                                 0x4
-
-#define SYS_PORT_MODE_DATA_WO_TS(x)                       (((x) << 5) & GENMASK(6, 5))
-#define SYS_PORT_MODE_DATA_WO_TS_M                        GENMASK(6, 5)
-#define SYS_PORT_MODE_DATA_WO_TS_X(x)                     (((x) & GENMASK(6, 5)) >> 5)
-#define SYS_PORT_MODE_INCL_INJ_HDR(x)                     (((x) << 3) & GENMASK(4, 3))
-#define SYS_PORT_MODE_INCL_INJ_HDR_M                      GENMASK(4, 3)
-#define SYS_PORT_MODE_INCL_INJ_HDR_X(x)                   (((x) & GENMASK(4, 3)) >> 3)
-#define SYS_PORT_MODE_INCL_XTR_HDR(x)                     (((x) << 1) & GENMASK(2, 1))
-#define SYS_PORT_MODE_INCL_XTR_HDR_M                      GENMASK(2, 1)
-#define SYS_PORT_MODE_INCL_XTR_HDR_X(x)                   (((x) & GENMASK(2, 1)) >> 1)
-#define SYS_PORT_MODE_INJ_HDR_ERR                         BIT(0)
-
 #define SYS_FRONT_PORT_MODE_RSZ                           0x4
 
 #define SYS_FRONT_PORT_MODE_HDX_MODE                      BIT(0)
 #define SYS_TIMESTAMP_OFFSET_TIMESTAMP_OFFSET(x)          ((x) & GENMASK(5, 0))
 #define SYS_TIMESTAMP_OFFSET_TIMESTAMP_OFFSET_M           GENMASK(5, 0)
 
-#define SYS_PAUSE_CFG_RSZ                                 0x4
-
-#define SYS_PAUSE_CFG_PAUSE_START(x)                      (((x) << 10) & GENMASK(18, 10))
-#define SYS_PAUSE_CFG_PAUSE_START_M                       GENMASK(18, 10)
-#define SYS_PAUSE_CFG_PAUSE_START_X(x)                    (((x) & GENMASK(18, 10)) >> 10)
-#define SYS_PAUSE_CFG_PAUSE_STOP(x)                       (((x) << 1) & GENMASK(9, 1))
-#define SYS_PAUSE_CFG_PAUSE_STOP_M                        GENMASK(9, 1)
-#define SYS_PAUSE_CFG_PAUSE_STOP_X(x)                     (((x) & GENMASK(9, 1)) >> 1)
-#define SYS_PAUSE_CFG_PAUSE_ENA                           BIT(0)
-
 #define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START(x)              (((x) << 9) & GENMASK(17, 9))
 #define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START_M               GENMASK(17, 9)
 #define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START_X(x)            (((x) & GENMASK(17, 9)) >> 9)
index 6ce8eff..70cbc50 100644 (file)
@@ -66,6 +66,7 @@ struct snd_compr_runtime {
  * @direction: stream direction, playback/recording
  * @metadata_set: metadata set flag, true when set
  * @next_track: has userspace signal next track transition, true when set
+ * @partial_drain: undergoing partial_drain for stream, true when set
  * @private_data: pointer to DSP private data
  * @dma_buffer: allocated buffer if any
  */
@@ -78,6 +79,7 @@ struct snd_compr_stream {
        enum snd_compr_direction direction;
        bool metadata_set;
        bool next_track;
+       bool partial_drain;
        void *private_data;
        struct snd_dma_buffer dma_buffer;
 };
@@ -182,7 +184,13 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
        if (snd_BUG_ON(!stream))
                return;
 
-       stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+       /* for partial_drain case we are back to running state on success */
+       if (stream->partial_drain) {
+               stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
+               stream->partial_drain = false; /* clear this flag as well */
+       } else {
+               stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+       }
 
        wake_up(&stream->runtime->sleep);
 }
index b652206..8c5e381 100644 (file)
@@ -161,4 +161,15 @@ int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
 
 #define SND_DMAENGINE_PCM_DRV_NAME "snd_dmaengine_pcm"
 
+struct dmaengine_pcm {
+       struct dma_chan *chan[SNDRV_PCM_STREAM_LAST + 1];
+       const struct snd_dmaengine_pcm_config *config;
+       struct snd_soc_component component;
+       unsigned int flags;
+};
+
+static inline struct dmaengine_pcm *soc_component_to_pcm(struct snd_soc_component *p)
+{
+       return container_of(p, struct dmaengine_pcm, component);
+}
 #endif
index ef5dd28..2756f9b 100644 (file)
@@ -444,6 +444,8 @@ int devm_snd_soc_register_component(struct device *dev,
                         const struct snd_soc_component_driver *component_driver,
                         struct snd_soc_dai_driver *dai_drv, int num_dai);
 void snd_soc_unregister_component(struct device *dev);
+struct snd_soc_component *snd_soc_lookup_component_nolocked(struct device *dev,
+                                                           const char *driver_name);
 struct snd_soc_component *snd_soc_lookup_component(struct device *dev,
                                                   const char *driver_name);
 
@@ -1361,6 +1363,10 @@ void snd_soc_remove_pcm_runtime(struct snd_soc_card *card,
 struct snd_soc_dai *snd_soc_register_dai(struct snd_soc_component *component,
                                         struct snd_soc_dai_driver *dai_drv,
                                         bool legacy_dai_naming);
+struct snd_soc_dai *devm_snd_soc_register_dai(struct device *dev,
+                                             struct snd_soc_component *component,
+                                             struct snd_soc_dai_driver *dai_drv,
+                                             bool legacy_dai_naming);
 void snd_soc_unregister_dai(struct snd_soc_dai *dai);
 
 struct snd_soc_dai *snd_soc_find_dai(
index 1257f26..93b1142 100644 (file)
@@ -254,7 +254,6 @@ TRACE_EVENT(block_bio_bounce,
  * block_bio_complete - completed all work on the block operation
  * @q: queue holding the block operation
  * @bio: block operation completed
- * @error: io error value
  *
  * This tracepoint indicates there is no further work to do on this
  * block IO operation @bio.
index ba9efdc..059b6e4 100644 (file)
@@ -400,7 +400,7 @@ enum rxrpc_tx_point {
        EM(rxrpc_cong_begin_retransmission,     " Retrans") \
        EM(rxrpc_cong_cleared_nacks,            " Cleared") \
        EM(rxrpc_cong_new_low_nack,             " NewLowN") \
-       EM(rxrpc_cong_no_change,                "") \
+       EM(rxrpc_cong_no_change,                " -") \
        EM(rxrpc_cong_progress,                 " Progres") \
        EM(rxrpc_cong_retransmit_again,         " ReTxAgn") \
        EM(rxrpc_cong_rtt_window_end,           " RttWinE") \
index b73d3e1..cd24e8a 100644 (file)
@@ -177,9 +177,9 @@ DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err,
 TRACE_EVENT(xdp_cpumap_kthread,
 
        TP_PROTO(int map_id, unsigned int processed,  unsigned int drops,
-                int sched),
+                int sched, struct xdp_cpumap_stats *xdp_stats),
 
-       TP_ARGS(map_id, processed, drops, sched),
+       TP_ARGS(map_id, processed, drops, sched, xdp_stats),
 
        TP_STRUCT__entry(
                __field(int, map_id)
@@ -188,6 +188,9 @@ TRACE_EVENT(xdp_cpumap_kthread,
                __field(unsigned int, drops)
                __field(unsigned int, processed)
                __field(int, sched)
+               __field(unsigned int, xdp_pass)
+               __field(unsigned int, xdp_drop)
+               __field(unsigned int, xdp_redirect)
        ),
 
        TP_fast_assign(
@@ -197,16 +200,21 @@ TRACE_EVENT(xdp_cpumap_kthread,
                __entry->drops          = drops;
                __entry->processed      = processed;
                __entry->sched  = sched;
+               __entry->xdp_pass       = xdp_stats->pass;
+               __entry->xdp_drop       = xdp_stats->drop;
+               __entry->xdp_redirect   = xdp_stats->redirect;
        ),
 
        TP_printk("kthread"
                  " cpu=%d map_id=%d action=%s"
                  " processed=%u drops=%u"
-                 " sched=%d",
+                 " sched=%d"
+                 " xdp_pass=%u xdp_drop=%u xdp_redirect=%u",
                  __entry->cpu, __entry->map_id,
                  __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
                  __entry->processed, __entry->drops,
-                 __entry->sched)
+                 __entry->sched,
+                 __entry->xdp_pass, __entry->xdp_drop, __entry->xdp_redirect)
 );
 
 TRACE_EVENT(xdp_cpumap_enqueue,
index f4a0130..c8c189a 100644 (file)
@@ -606,9 +606,9 @@ __SYSCALL(__NR_sendto, sys_sendto)
 #define __NR_recvfrom 207
 __SC_COMP(__NR_recvfrom, sys_recvfrom, compat_sys_recvfrom)
 #define __NR_setsockopt 208
-__SC_COMP(__NR_setsockopt, sys_setsockopt, compat_sys_setsockopt)
+__SC_COMP(__NR_setsockopt, sys_setsockopt, sys_setsockopt)
 #define __NR_getsockopt 209
-__SC_COMP(__NR_getsockopt, sys_getsockopt, compat_sys_getsockopt)
+__SC_COMP(__NR_getsockopt, sys_getsockopt, sys_getsockopt)
 #define __NR_shutdown 210
 __SYSCALL(__NR_shutdown, sys_shutdown)
 #define __NR_sendmsg 211
index cd7655e..a9030bc 100644 (file)
@@ -5,7 +5,7 @@
 
 
 /*
- * See http://icawww1.epfl.ch/linux-atm/magic.html for the complete list of
+ * See https://icawww1.epfl.ch/linux-atm/magic.html for the complete list of
  * "magic" ioctl numbers.
  */
 
index 0ae34c8..9c8604c 100644 (file)
@@ -72,8 +72,8 @@ enum batadv_subtype {
 
 /**
  * enum batadv_iv_flags - flags used in B.A.T.M.A.N. IV OGM packets
- * @BATADV_NOT_BEST_NEXT_HOP: flag is set when ogm packet is forwarded and was
- *     previously received from someone else than the best neighbor.
+ * @BATADV_NOT_BEST_NEXT_HOP: flag is set when the ogm packet is forwarded and
+ *  was previously received from someone other than the best neighbor.
  * @BATADV_PRIMARIES_FIRST_HOP: flag unused.
  * @BATADV_DIRECTLINK: flag is for the first hop or if rebroadcasted from a
  *     one hop neighbor on the interface where it was originally received.
@@ -195,8 +195,8 @@ struct batadv_bla_claim_dst {
 /**
  * struct batadv_ogm_packet - ogm (routing protocol) packet
  * @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
  * @flags: contains routing relevant flags - see enum batadv_iv_flags
  * @seqno: sequence identification
  * @orig: address of the source node
@@ -247,7 +247,7 @@ struct batadv_ogm2_packet {
 /**
  * struct batadv_elp_packet - elp (neighbor discovery) packet
  * @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
  * @orig: originator mac address
  * @seqno: sequence number
  * @elp_interval: currently used ELP sending interval in ms
@@ -265,15 +265,15 @@ struct batadv_elp_packet {
 /**
  * struct batadv_icmp_header - common members among all the ICMP packets
  * @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
  * @msg_type: ICMP packet type
  * @dst: address of the destination node
  * @orig: address of the source node
  * @uid: local ICMP socket identifier
  * @align: not used - useful for alignment purposes only
  *
- * This structure is used for ICMP packets parsing only and it is never sent
+ * This structure is used for ICMP packet parsing only and it is never sent
  * over the wire. The alignment field at the end is there to ensure that
  * members are padded the same way as they are in real packets.
  */
@@ -291,8 +291,8 @@ struct batadv_icmp_header {
 /**
  * struct batadv_icmp_packet - ICMP packet
  * @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
  * @msg_type: ICMP packet type
  * @dst: address of the destination node
  * @orig: address of the source node
@@ -315,8 +315,8 @@ struct batadv_icmp_packet {
 /**
  * struct batadv_icmp_tp_packet - ICMP TP Meter packet
  * @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
  * @msg_type: ICMP packet type
  * @dst: address of the destination node
  * @orig: address of the source node
@@ -358,8 +358,8 @@ enum batadv_icmp_tp_subtype {
 /**
  * struct batadv_icmp_packet_rr - ICMP RouteRecord packet
  * @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
  * @msg_type: ICMP packet type
  * @dst: address of the destination node
  * @orig: address of the source node
@@ -397,8 +397,8 @@ struct batadv_icmp_packet_rr {
 /**
  * struct batadv_unicast_packet - unicast packet for network payload
  * @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
  * @ttvn: translation table version number
  * @dest: originator destination of the unicast packet
  */
@@ -433,8 +433,8 @@ struct batadv_unicast_4addr_packet {
 /**
  * struct batadv_frag_packet - fragmented packet
  * @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
  * @dest: final destination used when routing fragments
  * @orig: originator of the fragment used when merging the packet
  * @no: fragment number within this sequence
@@ -467,8 +467,8 @@ struct batadv_frag_packet {
 /**
  * struct batadv_bcast_packet - broadcast packet for network payload
  * @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
  * @reserved: reserved byte for alignment
  * @seqno: sequence identification
  * @orig: originator of the broadcast packet
@@ -488,10 +488,10 @@ struct batadv_bcast_packet {
 /**
  * struct batadv_coded_packet - network coded packet
  * @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
  * @first_source: original source of first included packet
- * @first_orig_dest: original destinal of first included packet
+ * @first_orig_dest: original destination of first included packet
  * @first_crc: checksum of first included packet
  * @first_ttvn: tt-version number of first included packet
  * @second_ttl: ttl of second packet
@@ -523,8 +523,8 @@ struct batadv_coded_packet {
 /**
  * struct batadv_unicast_tvlv_packet - generic unicast packet with tvlv payload
  * @packet_type: batman-adv packet type, part of the general header
- * @version: batman-adv protocol version, part of the genereal header
- * @ttl: time to live for this packet, part of the genereal header
+ * @version: batman-adv protocol version, part of the general header
+ * @ttl: time to live for this packet, part of the general header
  * @reserved: reserved field (for packet alignment)
  * @src: address of the source
  * @dst: address of the destination
index 617c180..bb0ae94 100644 (file)
@@ -69,7 +69,7 @@ enum batadv_tt_client_flags {
 
        /**
         * @BATADV_TT_CLIENT_TEMP: this global client has been detected to be
-        * part of the network but no nnode has already announced it
+        * part of the network but no node has already announced it
         */
        BATADV_TT_CLIENT_TEMP    = (1 << 11),
 };
@@ -131,7 +131,7 @@ enum batadv_gw_modes {
        /** @BATADV_GW_MODE_CLIENT: send DHCP requests to gw servers */
        BATADV_GW_MODE_CLIENT,
 
-       /** @BATADV_GW_MODE_SERVER: announce itself as gatway server */
+       /** @BATADV_GW_MODE_SERVER: announce itself as gateway server */
        BATADV_GW_MODE_SERVER,
 };
 
@@ -427,7 +427,8 @@ enum batadv_nl_attrs {
 
        /**
         * @BATADV_ATTR_HOP_PENALTY: defines the penalty which will be applied
-        *  to an originator message's tq-field on every hop.
+        *  to an originator message's tq-field on every hop and/or per
+        *  hard interface
         */
        BATADV_ATTR_HOP_PENALTY,
 
index 1968481..54d0c88 100644 (file)
@@ -189,6 +189,7 @@ enum bpf_prog_type {
        BPF_PROG_TYPE_STRUCT_OPS,
        BPF_PROG_TYPE_EXT,
        BPF_PROG_TYPE_LSM,
+       BPF_PROG_TYPE_SK_LOOKUP,
 };
 
 enum bpf_attach_type {
@@ -226,6 +227,9 @@ enum bpf_attach_type {
        BPF_CGROUP_INET4_GETSOCKNAME,
        BPF_CGROUP_INET6_GETSOCKNAME,
        BPF_XDP_DEVMAP,
+       BPF_CGROUP_INET_SOCK_RELEASE,
+       BPF_XDP_CPUMAP,
+       BPF_SK_LOOKUP,
        __MAX_BPF_ATTACH_TYPE
 };
 
@@ -653,7 +657,7 @@ union bpf_attr {
  *             Map value associated to *key*, or **NULL** if no entry was
  *             found.
  *
- * int bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags)
+ * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags)
  *     Description
  *             Add or update the value of the entry associated to *key* in
  *             *map* with *value*. *flags* is one of:
@@ -671,13 +675,13 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_map_delete_elem(struct bpf_map *map, const void *key)
+ * long bpf_map_delete_elem(struct bpf_map *map, const void *key)
  *     Description
  *             Delete entry with *key* from *map*.
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr)
  *     Description
  *             For tracing programs, safely attempt to read *size* bytes from
  *             kernel space address *unsafe_ptr* and store the data in *dst*.
@@ -695,7 +699,7 @@ union bpf_attr {
  *     Return
  *             Current *ktime*.
  *
- * int bpf_trace_printk(const char *fmt, u32 fmt_size, ...)
+ * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...)
  *     Description
  *             This helper is a "printk()-like" facility for debugging. It
  *             prints a message defined by format *fmt* (of size *fmt_size*)
@@ -775,7 +779,7 @@ union bpf_attr {
  *     Return
  *             The SMP id of the processor running the program.
  *
- * int bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
+ * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
  *     Description
  *             Store *len* bytes from address *from* into the packet
  *             associated to *skb*, at *offset*. *flags* are a combination of
@@ -792,7 +796,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size)
+ * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size)
  *     Description
  *             Recompute the layer 3 (e.g. IP) checksum for the packet
  *             associated to *skb*. Computation is incremental, so the helper
@@ -817,7 +821,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags)
+ * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags)
  *     Description
  *             Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the
  *             packet associated to *skb*. Computation is incremental, so the
@@ -849,7 +853,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index)
+ * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index)
  *     Description
  *             This special helper is used to trigger a "tail call", or in
  *             other words, to jump into another eBPF program. The same stack
@@ -880,7 +884,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags)
+ * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags)
  *     Description
  *             Clone and redirect the packet associated to *skb* to another
  *             net device of index *ifindex*. Both ingress and egress
@@ -916,7 +920,7 @@ union bpf_attr {
  *             A 64-bit integer containing the current GID and UID, and
  *             created as such: *current_gid* **<< 32 \|** *current_uid*.
  *
- * int bpf_get_current_comm(void *buf, u32 size_of_buf)
+ * long bpf_get_current_comm(void *buf, u32 size_of_buf)
  *     Description
  *             Copy the **comm** attribute of the current task into *buf* of
  *             *size_of_buf*. The **comm** attribute contains the name of
@@ -953,7 +957,7 @@ union bpf_attr {
  *     Return
  *             The classid, or 0 for the default unconfigured classid.
  *
- * int bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
+ * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
  *     Description
  *             Push a *vlan_tci* (VLAN tag control information) of protocol
  *             *vlan_proto* to the packet associated to *skb*, then update
@@ -969,7 +973,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_skb_vlan_pop(struct sk_buff *skb)
+ * long bpf_skb_vlan_pop(struct sk_buff *skb)
  *     Description
  *             Pop a VLAN header from the packet associated to *skb*.
  *
@@ -981,7 +985,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
+ * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
  *     Description
  *             Get tunnel metadata. This helper takes a pointer *key* to an
  *             empty **struct bpf_tunnel_key** of **size**, that will be
@@ -1032,7 +1036,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
+ * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
  *     Description
  *             Populate tunnel metadata for packet associated to *skb.* The
  *             tunnel metadata is set to the contents of *key*, of *size*. The
@@ -1098,7 +1102,7 @@ union bpf_attr {
  *             The value of the perf event counter read from the map, or a
  *             negative error code in case of failure.
  *
- * int bpf_redirect(u32 ifindex, u64 flags)
+ * long bpf_redirect(u32 ifindex, u64 flags)
  *     Description
  *             Redirect the packet to another net device of index *ifindex*.
  *             This helper is somewhat similar to **bpf_clone_redirect**\
@@ -1145,7 +1149,7 @@ union bpf_attr {
  *             The realm of the route for the packet associated to *skb*, or 0
  *             if none was found.
  *
- * int bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
  *     Description
  *             Write raw *data* blob into a special BPF perf event held by
  *             *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
@@ -1190,7 +1194,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len)
+ * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len)
  *     Description
  *             This helper was provided as an easy way to load data from a
  *             packet. It can be used to load *len* bytes from *offset* from
@@ -1207,7 +1211,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags)
+ * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags)
  *     Description
  *             Walk a user or a kernel stack and return its id. To achieve
  *             this, the helper needs *ctx*, which is a pointer to the context
@@ -1276,7 +1280,7 @@ union bpf_attr {
  *             The checksum result, or a negative error code in case of
  *             failure.
  *
- * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
+ * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
  *     Description
  *             Retrieve tunnel options metadata for the packet associated to
  *             *skb*, and store the raw tunnel option data to the buffer *opt*
@@ -1294,7 +1298,7 @@ union bpf_attr {
  *     Return
  *             The size of the option data retrieved.
  *
- * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
+ * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
  *     Description
  *             Set tunnel options metadata for the packet associated to *skb*
  *             to the option data contained in the raw buffer *opt* of *size*.
@@ -1304,7 +1308,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags)
+ * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags)
  *     Description
  *             Change the protocol of the *skb* to *proto*. Currently
  *             supported are transition from IPv4 to IPv6, and from IPv6 to
@@ -1331,7 +1335,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_skb_change_type(struct sk_buff *skb, u32 type)
+ * long bpf_skb_change_type(struct sk_buff *skb, u32 type)
  *     Description
  *             Change the packet type for the packet associated to *skb*. This
  *             comes down to setting *skb*\ **->pkt_type** to *type*, except
@@ -1358,7 +1362,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index)
+ * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index)
  *     Description
  *             Check whether *skb* is a descendant of the cgroup2 held by
  *             *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
@@ -1389,7 +1393,7 @@ union bpf_attr {
  *     Return
  *             A pointer to the current task struct.
  *
- * int bpf_probe_write_user(void *dst, const void *src, u32 len)
+ * long bpf_probe_write_user(void *dst, const void *src, u32 len)
  *     Description
  *             Attempt in a safe way to write *len* bytes from the buffer
  *             *src* to *dst* in memory. It only works for threads that are in
@@ -1408,7 +1412,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_current_task_under_cgroup(struct bpf_map *map, u32 index)
+ * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index)
  *     Description
  *             Check whether the probe is being run is the context of a given
  *             subset of the cgroup2 hierarchy. The cgroup2 to test is held by
@@ -1420,7 +1424,7 @@ union bpf_attr {
  *             * 1, if the *skb* task does not belong to the cgroup2.
  *             * A negative error code, if an error occurred.
  *
- * int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
+ * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
  *     Description
  *             Resize (trim or grow) the packet associated to *skb* to the
  *             new *len*. The *flags* are reserved for future usage, and must
@@ -1444,7 +1448,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_skb_pull_data(struct sk_buff *skb, u32 len)
+ * long bpf_skb_pull_data(struct sk_buff *skb, u32 len)
  *     Description
  *             Pull in non-linear data in case the *skb* is non-linear and not
  *             all of *len* are part of the linear section. Make *len* bytes
@@ -1500,7 +1504,7 @@ union bpf_attr {
  *             recalculation the next time the kernel tries to access this
  *             hash or when the **bpf_get_hash_recalc**\ () helper is called.
  *
- * int bpf_get_numa_node_id(void)
+ * long bpf_get_numa_node_id(void)
  *     Description
  *             Return the id of the current NUMA node. The primary use case
  *             for this helper is the selection of sockets for the local NUMA
@@ -1511,7 +1515,7 @@ union bpf_attr {
  *     Return
  *             The id of current NUMA node.
  *
- * int bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags)
+ * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags)
  *     Description
  *             Grows headroom of packet associated to *skb* and adjusts the
  *             offset of the MAC header accordingly, adding *len* bytes of
@@ -1532,7 +1536,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta)
+ * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta)
  *     Description
  *             Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that
  *             it is possible to use a negative value for *delta*. This helper
@@ -1547,7 +1551,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr)
  *     Description
  *             Copy a NUL terminated string from an unsafe kernel address
  *             *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for
@@ -1595,14 +1599,14 @@ union bpf_attr {
  *             is returned (note that **overflowuid** might also be the actual
  *             UID value for the socket).
  *
- * u32 bpf_set_hash(struct sk_buff *skb, u32 hash)
+ * long bpf_set_hash(struct sk_buff *skb, u32 hash)
  *     Description
  *             Set the full hash for *skb* (set the field *skb*\ **->hash**)
  *             to value *hash*.
  *     Return
  *             0
  *
- * int bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
+ * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
  *     Description
  *             Emulate a call to **setsockopt()** on the socket associated to
  *             *bpf_socket*, which must be a full socket. The *level* at
@@ -1621,16 +1625,19 @@ union bpf_attr {
  *
  *             * **SOL_SOCKET**, which supports the following *optname*\ s:
  *               **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**,
- *               **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**.
+ *               **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**,
+ *               **SO_BINDTODEVICE**, **SO_KEEPALIVE**.
  *             * **IPPROTO_TCP**, which supports the following *optname*\ s:
  *               **TCP_CONGESTION**, **TCP_BPF_IW**,
- *               **TCP_BPF_SNDCWND_CLAMP**.
+ *               **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**,
+ *               **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**,
+ *               **TCP_SYNCNT**, **TCP_USER_TIMEOUT**.
  *             * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
  *             * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags)
+ * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags)
  *     Description
  *             Grow or shrink the room for data in the packet associated to
  *             *skb* by *len_diff*, and according to the selected *mode*.
@@ -1676,7 +1683,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags)
+ * long bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags)
  *     Description
  *             Redirect the packet to the endpoint referenced by *map* at
  *             index *key*. Depending on its type, this *map* can contain
@@ -1697,7 +1704,7 @@ union bpf_attr {
  *             **XDP_REDIRECT** on success, or the value of the two lower bits
  *             of the *flags* argument on error.
  *
- * int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
+ * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
  *     Description
  *             Redirect the packet to the socket referenced by *map* (of type
  *             **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
@@ -1708,7 +1715,7 @@ union bpf_attr {
  *     Return
  *             **SK_PASS** on success, or **SK_DROP** on error.
  *
- * int bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
+ * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
  *     Description
  *             Add an entry to, or update a *map* referencing sockets. The
  *             *skops* is used as a new value for the entry associated to
@@ -1727,7 +1734,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta)
+ * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta)
  *     Description
  *             Adjust the address pointed by *xdp_md*\ **->data_meta** by
  *             *delta* (which can be positive or negative). Note that this
@@ -1756,7 +1763,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size)
+ * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size)
  *     Description
  *             Read the value of a perf event counter, and store it into *buf*
  *             of size *buf_size*. This helper relies on a *map* of type
@@ -1806,7 +1813,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
+ * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
  *     Description
  *             For en eBPF program attached to a perf event, retrieve the
  *             value of the event counter associated to *ctx* and store it in
@@ -1817,7 +1824,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
+ * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
  *     Description
  *             Emulate a call to **getsockopt()** on the socket associated to
  *             *bpf_socket*, which must be a full socket. The *level* at
@@ -1842,7 +1849,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_override_return(struct pt_regs *regs, u64 rc)
+ * long bpf_override_return(struct pt_regs *regs, u64 rc)
  *     Description
  *             Used for error injection, this helper uses kprobes to override
  *             the return value of the probed function, and to set it to *rc*.
@@ -1867,7 +1874,7 @@ union bpf_attr {
  *     Return
  *             0
  *
- * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval)
+ * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval)
  *     Description
  *             Attempt to set the value of the **bpf_sock_ops_cb_flags** field
  *             for the full TCP socket associated to *bpf_sock_ops* to
@@ -1911,7 +1918,7 @@ union bpf_attr {
  *             be set is returned (which comes down to 0 if all bits were set
  *             as required).
  *
- * int bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags)
+ * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags)
  *     Description
  *             This helper is used in programs implementing policies at the
  *             socket level. If the message *msg* is allowed to pass (i.e. if
@@ -1925,7 +1932,7 @@ union bpf_attr {
  *     Return
  *             **SK_PASS** on success, or **SK_DROP** on error.
  *
- * int bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes)
+ * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes)
  *     Description
  *             For socket policies, apply the verdict of the eBPF program to
  *             the next *bytes* (number of bytes) of message *msg*.
@@ -1959,7 +1966,7 @@ union bpf_attr {
  *     Return
  *             0
  *
- * int bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes)
+ * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes)
  *     Description
  *             For socket policies, prevent the execution of the verdict eBPF
  *             program for message *msg* until *bytes* (byte number) have been
@@ -1977,7 +1984,7 @@ union bpf_attr {
  *     Return
  *             0
  *
- * int bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags)
+ * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags)
  *     Description
  *             For socket policies, pull in non-linear data from user space
  *             for *msg* and set pointers *msg*\ **->data** and *msg*\
@@ -2008,7 +2015,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len)
+ * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len)
  *     Description
  *             Bind the socket associated to *ctx* to the address pointed by
  *             *addr*, of length *addr_len*. This allows for making outgoing
@@ -2026,7 +2033,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta)
+ * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta)
  *     Description
  *             Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is
  *             possible to both shrink and grow the packet tail.
@@ -2040,7 +2047,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags)
+ * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags)
  *     Description
  *             Retrieve the XFRM state (IP transform framework, see also
  *             **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*.
@@ -2056,7 +2063,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags)
+ * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags)
  *     Description
  *             Return a user or a kernel stack in bpf program provided buffer.
  *             To achieve this, the helper needs *ctx*, which is a pointer
@@ -2089,7 +2096,7 @@ union bpf_attr {
  *             A non-negative value equal to or less than *size* on success,
  *             or a negative error in case of failure.
  *
- * int bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
+ * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
  *     Description
  *             This helper is similar to **bpf_skb_load_bytes**\ () in that
  *             it provides an easy way to load *len* bytes from *offset*
@@ -2111,7 +2118,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags)
+ * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags)
  *     Description
  *             Do FIB lookup in kernel tables using parameters in *params*.
  *             If lookup is successful and result shows packet is to be
@@ -2142,7 +2149,7 @@ union bpf_attr {
  *             * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
  *               packet is not forwarded or needs assist from full stack
  *
- * int bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
+ * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
  *     Description
  *             Add an entry to, or update a sockhash *map* referencing sockets.
  *             The *skops* is used as a new value for the entry associated to
@@ -2161,7 +2168,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags)
+ * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags)
  *     Description
  *             This helper is used in programs implementing policies at the
  *             socket level. If the message *msg* is allowed to pass (i.e. if
@@ -2175,7 +2182,7 @@ union bpf_attr {
  *     Return
  *             **SK_PASS** on success, or **SK_DROP** on error.
  *
- * int bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags)
+ * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags)
  *     Description
  *             This helper is used in programs implementing policies at the
  *             skb socket level. If the sk_buff *skb* is allowed to pass (i.e.
@@ -2189,7 +2196,7 @@ union bpf_attr {
  *     Return
  *             **SK_PASS** on success, or **SK_DROP** on error.
  *
- * int bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
+ * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
  *     Description
  *             Encapsulate the packet associated to *skb* within a Layer 3
  *             protocol header. This header is provided in the buffer at
@@ -2226,7 +2233,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len)
+ * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len)
  *     Description
  *             Store *len* bytes from address *from* into the packet
  *             associated to *skb*, at *offset*. Only the flags, tag and TLVs
@@ -2241,7 +2248,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta)
+ * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta)
  *     Description
  *             Adjust the size allocated to TLVs in the outermost IPv6
  *             Segment Routing Header contained in the packet associated to
@@ -2257,7 +2264,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len)
+ * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len)
  *     Description
  *             Apply an IPv6 Segment Routing action of type *action* to the
  *             packet associated to *skb*. Each action takes a parameter
@@ -2286,7 +2293,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_rc_repeat(void *ctx)
+ * long bpf_rc_repeat(void *ctx)
  *     Description
  *             This helper is used in programs implementing IR decoding, to
  *             report a successfully decoded repeat key message. This delays
@@ -2305,7 +2312,7 @@ union bpf_attr {
  *     Return
  *             0
  *
- * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
+ * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
  *     Description
  *             This helper is used in programs implementing IR decoding, to
  *             report a successfully decoded key press with *scancode*,
@@ -2370,7 +2377,7 @@ union bpf_attr {
  *     Return
  *             A pointer to the local storage area.
  *
- * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
+ * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
  *     Description
  *             Select a **SO_REUSEPORT** socket from a
  *             **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*.
@@ -2415,7 +2422,7 @@ union bpf_attr {
  *                     Look for an IPv6 socket.
  *
  *             If the *netns* is a negative signed 32-bit integer, then the
- *             socket lookup table in the netns associated with the *ctx* will
+ *             socket lookup table in the netns associated with the *ctx*
  *             will be used. For the TC hooks, this is the netns of the device
  *             in the skb. For socket hooks, this is the netns of the socket.
  *             If *netns* is any other signed 32-bit value greater than or
@@ -2452,7 +2459,7 @@ union bpf_attr {
  *                     Look for an IPv6 socket.
  *
  *             If the *netns* is a negative signed 32-bit integer, then the
- *             socket lookup table in the netns associated with the *ctx* will
+ *             socket lookup table in the netns associated with the *ctx*
  *             will be used. For the TC hooks, this is the netns of the device
  *             in the skb. For socket hooks, this is the netns of the socket.
  *             If *netns* is any other signed 32-bit value greater than or
@@ -2471,7 +2478,7 @@ union bpf_attr {
  *             result is from *reuse*\ **->socks**\ [] using the hash of the
  *             tuple.
  *
- * int bpf_sk_release(struct bpf_sock *sock)
+ * long bpf_sk_release(struct bpf_sock *sock)
  *     Description
  *             Release the reference held by *sock*. *sock* must be a
  *             non-**NULL** pointer that was returned from
@@ -2479,7 +2486,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
+ * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
  *     Description
  *             Push an element *value* in *map*. *flags* is one of:
  *
@@ -2489,19 +2496,19 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_map_pop_elem(struct bpf_map *map, void *value)
+ * long bpf_map_pop_elem(struct bpf_map *map, void *value)
  *     Description
  *             Pop an element from *map*.
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_map_peek_elem(struct bpf_map *map, void *value)
+ * long bpf_map_peek_elem(struct bpf_map *map, void *value)
  *     Description
  *             Get an element from *map* without removing it.
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
+ * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
  *     Description
  *             For socket policies, insert *len* bytes into *msg* at offset
  *             *start*.
@@ -2517,7 +2524,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
+ * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
  *     Description
  *             Will remove *len* bytes from a *msg* starting at byte *start*.
  *             This may result in **ENOMEM** errors under certain situations if
@@ -2529,7 +2536,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y)
+ * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y)
  *     Description
  *             This helper is used in programs implementing IR decoding, to
  *             report a successfully decoded pointer movement.
@@ -2543,7 +2550,7 @@ union bpf_attr {
  *     Return
  *             0
  *
- * int bpf_spin_lock(struct bpf_spin_lock *lock)
+ * long bpf_spin_lock(struct bpf_spin_lock *lock)
  *     Description
  *             Acquire a spinlock represented by the pointer *lock*, which is
  *             stored as part of a value of a map. Taking the lock allows to
@@ -2591,7 +2598,7 @@ union bpf_attr {
  *     Return
  *             0
  *
- * int bpf_spin_unlock(struct bpf_spin_lock *lock)
+ * long bpf_spin_unlock(struct bpf_spin_lock *lock)
  *     Description
  *             Release the *lock* previously locked by a call to
  *             **bpf_spin_lock**\ (\ *lock*\ ).
@@ -2614,7 +2621,7 @@ union bpf_attr {
  *             A **struct bpf_tcp_sock** pointer on success, or **NULL** in
  *             case of failure.
  *
- * int bpf_skb_ecn_set_ce(struct sk_buff *skb)
+ * long bpf_skb_ecn_set_ce(struct sk_buff *skb)
  *     Description
  *             Set ECN (Explicit Congestion Notification) field of IP header
  *             to **CE** (Congestion Encountered) if current value is **ECT**
@@ -2651,7 +2658,7 @@ union bpf_attr {
  *             result is from *reuse*\ **->socks**\ [] using the hash of the
  *             tuple.
  *
- * int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
+ * long bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
  *     Description
  *             Check whether *iph* and *th* contain a valid SYN cookie ACK for
  *             the listening socket in *sk*.
@@ -2666,7 +2673,7 @@ union bpf_attr {
  *             0 if *iph* and *th* are a valid SYN cookie ACK, or a negative
  *             error otherwise.
  *
- * int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags)
+ * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags)
  *     Description
  *             Get name of sysctl in /proc/sys/ and copy it into provided by
  *             program buffer *buf* of size *buf_len*.
@@ -2682,7 +2689,7 @@ union bpf_attr {
  *             **-E2BIG** if the buffer wasn't big enough (*buf* will contain
  *             truncated name in this case).
  *
- * int bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
+ * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
  *     Description
  *             Get current value of sysctl as it is presented in /proc/sys
  *             (incl. newline, etc), and copy it as a string into provided
@@ -2701,7 +2708,7 @@ union bpf_attr {
  *             **-EINVAL** if current value was unavailable, e.g. because
  *             sysctl is uninitialized and read returns -EIO for it.
  *
- * int bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
+ * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
  *     Description
  *             Get new value being written by user space to sysctl (before
  *             the actual write happens) and copy it as a string into
@@ -2718,7 +2725,7 @@ union bpf_attr {
  *
  *             **-EINVAL** if sysctl is being read.
  *
- * int bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len)
+ * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len)
  *     Description
  *             Override new value being written by user space to sysctl with
  *             value provided by program in buffer *buf* of size *buf_len*.
@@ -2735,7 +2742,7 @@ union bpf_attr {
  *
  *             **-EINVAL** if sysctl is being read.
  *
- * int bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res)
+ * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res)
  *     Description
  *             Convert the initial part of the string from buffer *buf* of
  *             size *buf_len* to a long integer according to the given base
@@ -2759,7 +2766,7 @@ union bpf_attr {
  *
  *             **-ERANGE** if resulting value was out of range.
  *
- * int bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res)
+ * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res)
  *     Description
  *             Convert the initial part of the string from buffer *buf* of
  *             size *buf_len* to an unsigned long integer according to the
@@ -2810,7 +2817,7 @@ union bpf_attr {
  *             **NULL** if not found or there was an error in adding
  *             a new bpf-local-storage.
  *
- * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk)
+ * long bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk)
  *     Description
  *             Delete a bpf-local-storage from a *sk*.
  *     Return
@@ -2818,7 +2825,7 @@ union bpf_attr {
  *
  *             **-ENOENT** if the bpf-local-storage cannot be found.
  *
- * int bpf_send_signal(u32 sig)
+ * long bpf_send_signal(u32 sig)
  *     Description
  *             Send signal *sig* to the process of the current task.
  *             The signal may be delivered to any of this process's threads.
@@ -2859,7 +2866,7 @@ union bpf_attr {
  *
  *             **-EPROTONOSUPPORT** IP packet version is not 4 or 6
  *
- * int bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
  *     Description
  *             Write raw *data* blob into a special BPF perf event held by
  *             *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
@@ -2883,21 +2890,21 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr)
  *     Description
  *             Safely attempt to read *size* bytes from user space address
  *             *unsafe_ptr* and store the data in *dst*.
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
  *     Description
  *             Safely attempt to read *size* bytes from kernel space address
  *             *unsafe_ptr* and store the data in *dst*.
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr)
  *     Description
  *             Copy a NUL terminated string from an unsafe user address
  *             *unsafe_ptr* to *dst*. The *size* should include the
@@ -2941,7 +2948,7 @@ union bpf_attr {
  *             including the trailing NUL character. On error, a negative
  *             value.
  *
- * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr)
  *     Description
  *             Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr*
  *             to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply.
@@ -2949,14 +2956,14 @@ union bpf_attr {
  *             On success, the strictly positive length of the string, including
  *             the trailing NUL character. On error, a negative value.
  *
- * int bpf_tcp_send_ack(void *tp, u32 rcv_nxt)
+ * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt)
  *     Description
  *             Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**.
  *             *rcv_nxt* is the ack_seq to be sent out.
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_send_signal_thread(u32 sig)
+ * long bpf_send_signal_thread(u32 sig)
  *     Description
  *             Send signal *sig* to the thread corresponding to the current task.
  *     Return
@@ -2976,7 +2983,7 @@ union bpf_attr {
  *     Return
  *             The 64 bit jiffies
  *
- * int bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags)
+ * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags)
  *     Description
  *             For an eBPF program attached to a perf event, retrieve the
  *             branch records (**struct perf_branch_entry**) associated to *ctx*
@@ -2995,7 +3002,7 @@ union bpf_attr {
  *
  *             **-ENOENT** if architecture does not support branch records.
  *
- * int bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size)
+ * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size)
  *     Description
  *             Returns 0 on success, values for *pid* and *tgid* as seen from the current
  *             *namespace* will be returned in *nsdata*.
@@ -3007,7 +3014,7 @@ union bpf_attr {
  *
  *             **-ENOENT** if pidns does not exists for the current task.
  *
- * int bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
  *     Description
  *             Write raw *data* blob into a special BPF perf event held by
  *             *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
@@ -3062,8 +3069,12 @@ union bpf_attr {
  *     Return
  *             The id is returned or 0 in case the id could not be retrieved.
  *
- * int bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags)
+ * long bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags)
  *     Description
+ *             Helper is overloaded depending on BPF program type. This
+ *             description applies to **BPF_PROG_TYPE_SCHED_CLS** and
+ *             **BPF_PROG_TYPE_SCHED_ACT** programs.
+ *
  *             Assign the *sk* to the *skb*. When combined with appropriate
  *             routing configuration to receive the packet towards the socket,
  *             will cause *skb* to be delivered to the specified socket.
@@ -3089,6 +3100,56 @@ union bpf_attr {
  *             **-ESOCKTNOSUPPORT** if the socket type is not supported
  *             (reuseport).
  *
+ * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags)
+ *     Description
+ *             Helper is overloaded depending on BPF program type. This
+ *             description applies to **BPF_PROG_TYPE_SK_LOOKUP** programs.
+ *
+ *             Select the *sk* as a result of a socket lookup.
+ *
+ *             For the operation to succeed passed socket must be compatible
+ *             with the packet description provided by the *ctx* object.
+ *
+ *             L4 protocol (**IPPROTO_TCP** or **IPPROTO_UDP**) must
+ *             be an exact match. While IP family (**AF_INET** or
+ *             **AF_INET6**) must be compatible, that is IPv6 sockets
+ *             that are not v6-only can be selected for IPv4 packets.
+ *
+ *             Only TCP listeners and UDP unconnected sockets can be
+ *             selected. *sk* can also be NULL to reset any previous
+ *             selection.
+ *
+ *             *flags* argument can combination of following values:
+ *
+ *             * **BPF_SK_LOOKUP_F_REPLACE** to override the previous
+ *               socket selection, potentially done by a BPF program
+ *               that ran before us.
+ *
+ *             * **BPF_SK_LOOKUP_F_NO_REUSEPORT** to skip
+ *               load-balancing within reuseport group for the socket
+ *               being selected.
+ *
+ *             On success *ctx->sk* will point to the selected socket.
+ *
+ *     Return
+ *             0 on success, or a negative errno in case of failure.
+ *
+ *             * **-EAFNOSUPPORT** if socket family (*sk->family*) is
+ *               not compatible with packet family (*ctx->family*).
+ *
+ *             * **-EEXIST** if socket has been already selected,
+ *               potentially by another program, and
+ *               **BPF_SK_LOOKUP_F_REPLACE** flag was not specified.
+ *
+ *             * **-EINVAL** if unsupported flags were specified.
+ *
+ *             * **-EPROTOTYPE** if socket L4 protocol
+ *               (*sk->protocol*) doesn't match packet protocol
+ *               (*ctx->protocol*).
+ *
+ *             * **-ESOCKTNOSUPPORT** if socket is not in allowed
+ *               state (TCP listening or UDP unconnected).
+ *
  * u64 bpf_ktime_get_boot_ns(void)
  *     Description
  *             Return the time elapsed since system boot, in nanoseconds.
@@ -3097,7 +3158,7 @@ union bpf_attr {
  *     Return
  *             Current *ktime*.
  *
- * int bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len)
+ * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len)
  *     Description
  *             **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print
  *             out the format string.
@@ -3126,7 +3187,7 @@ union bpf_attr {
  *
  *             **-EOVERFLOW** if an overflow happened: The same object will be tried again.
  *
- * int bpf_seq_write(struct seq_file *m, const void *data, u32 len)
+ * long bpf_seq_write(struct seq_file *m, const void *data, u32 len)
  *     Description
  *             **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data.
  *             The *m* represents the seq_file. The *data* and *len* represent the
@@ -3168,16 +3229,15 @@ union bpf_attr {
  *     Return
  *             The id is returned or 0 in case the id could not be retrieved.
  *
- * void *bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
+ * int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
  *     Description
  *             Copy *size* bytes from *data* into a ring buffer *ringbuf*.
- *             If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
- *             new data availability is sent.
- *             IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
- *             new data availability is sent unconditionally.
+ *             If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ *             of new data availability is sent.
+ *             If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ *             of new data availability is sent unconditionally.
  *     Return
- *             0, on success;
- *             < 0, on error.
+ *             0 on success, or a negative error in case of failure.
  *
  * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags)
  *     Description
@@ -3189,20 +3249,20 @@ union bpf_attr {
  * void bpf_ringbuf_submit(void *data, u64 flags)
  *     Description
  *             Submit reserved ring buffer sample, pointed to by *data*.
- *             If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
- *             new data availability is sent.
- *             IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
- *             new data availability is sent unconditionally.
+ *             If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ *             of new data availability is sent.
+ *             If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ *             of new data availability is sent unconditionally.
  *     Return
  *             Nothing. Always succeeds.
  *
  * void bpf_ringbuf_discard(void *data, u64 flags)
  *     Description
  *             Discard reserved ring buffer sample, pointed to by *data*.
- *             If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
- *             new data availability is sent.
- *             IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
- *             new data availability is sent unconditionally.
+ *             If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ *             of new data availability is sent.
+ *             If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ *             of new data availability is sent unconditionally.
  *     Return
  *             Nothing. Always succeeds.
  *
@@ -3210,18 +3270,20 @@ union bpf_attr {
  *     Description
  *             Query various characteristics of provided ring buffer. What
  *             exactly is queries is determined by *flags*:
- *               - BPF_RB_AVAIL_DATA - amount of data not yet consumed;
- *               - BPF_RB_RING_SIZE - the size of ring buffer;
- *               - BPF_RB_CONS_POS - consumer position (can wrap around);
- *               - BPF_RB_PROD_POS - producer(s) position (can wrap around);
- *             Data returned is just a momentary snapshots of actual values
+ *
+ *             * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed.
+ *             * **BPF_RB_RING_SIZE**: The size of ring buffer.
+ *             * **BPF_RB_CONS_POS**: Consumer position (can wrap around).
+ *             * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around).
+ *
+ *             Data returned is just a momentary snapshot of actual values
  *             and could be inaccurate, so this facility should be used to
  *             power heuristics and for reporting, not to make 100% correct
  *             calculation.
  *     Return
- *             Requested value, or 0, if flags are not recognized.
+ *             Requested value, or 0, if *flags* are not recognized.
  *
- * int bpf_csum_level(struct sk_buff *skb, u64 level)
+ * long bpf_csum_level(struct sk_buff *skb, u64 level)
  *     Description
  *             Change the skbs checksum level by one layer up or down, or
  *             reset it entirely to none in order to have the stack perform
@@ -3252,6 +3314,69 @@ union bpf_attr {
  *             case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level
  *             is returned or the error code -EACCES in case the skb is not
  *             subject to CHECKSUM_UNNECESSARY.
+ *
+ * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk)
+ *     Description
+ *             Dynamically cast a *sk* pointer to a *tcp6_sock* pointer.
+ *     Return
+ *             *sk* if casting is valid, or NULL otherwise.
+ *
+ * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk)
+ *     Description
+ *             Dynamically cast a *sk* pointer to a *tcp_sock* pointer.
+ *     Return
+ *             *sk* if casting is valid, or NULL otherwise.
+ *
+ * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk)
+ *     Description
+ *             Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer.
+ *     Return
+ *             *sk* if casting is valid, or NULL otherwise.
+ *
+ * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk)
+ *     Description
+ *             Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer.
+ *     Return
+ *             *sk* if casting is valid, or NULL otherwise.
+ *
+ * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk)
+ *     Description
+ *             Dynamically cast a *sk* pointer to a *udp6_sock* pointer.
+ *     Return
+ *             *sk* if casting is valid, or NULL otherwise.
+ *
+ * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
+ *     Description
+ *             Return a user or a kernel stack in bpf program provided buffer.
+ *             To achieve this, the helper needs *task*, which is a valid
+ *             pointer to struct task_struct. To store the stacktrace, the
+ *             bpf program provides *buf* with a nonnegative *size*.
+ *
+ *             The last argument, *flags*, holds the number of stack frames to
+ *             skip (from 0 to 255), masked with
+ *             **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
+ *             the following flags:
+ *
+ *             **BPF_F_USER_STACK**
+ *                     Collect a user space stack instead of a kernel stack.
+ *             **BPF_F_USER_BUILD_ID**
+ *                     Collect buildid+offset instead of ips for user stack,
+ *                     only valid if **BPF_F_USER_STACK** is also specified.
+ *
+ *             **bpf_get_task_stack**\ () can collect up to
+ *             **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
+ *             to sufficient large buffer size. Note that
+ *             this limit can be controlled with the **sysctl** program, and
+ *             that it should be manually increased in order to profile long
+ *             user stacks (such as stacks for Java programs). To do so, use:
+ *
+ *             ::
+ *
+ *                     # sysctl kernel.perf_event_max_stack=<new value>
+ *     Return
+ *             A non-negative value equal to or less than *size* on success,
+ *             or a negative error in case of failure.
+ *
  */
 #define __BPF_FUNC_MAPPER(FN)          \
        FN(unspec),                     \
@@ -3389,7 +3514,14 @@ union bpf_attr {
        FN(ringbuf_submit),             \
        FN(ringbuf_discard),            \
        FN(ringbuf_query),              \
-       FN(csum_level),
+       FN(csum_level),                 \
+       FN(skc_to_tcp6_sock),           \
+       FN(skc_to_tcp_sock),            \
+       FN(skc_to_tcp_timewait_sock),   \
+       FN(skc_to_tcp_request_sock),    \
+       FN(skc_to_udp6_sock),           \
+       FN(get_task_stack),             \
+       /* */
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
@@ -3531,6 +3663,12 @@ enum {
        BPF_RINGBUF_HDR_SZ              = 8,
 };
 
+/* BPF_FUNC_sk_assign flags in bpf_sk_lookup context. */
+enum {
+       BPF_SK_LOOKUP_F_REPLACE         = (1ULL << 0),
+       BPF_SK_LOOKUP_F_NO_REUSEPORT    = (1ULL << 1),
+};
+
 /* Mode for BPF_FUNC_skb_adjust_room helper. */
 enum bpf_adj_room_mode {
        BPF_ADJ_ROOM_NET,
@@ -3774,6 +3912,19 @@ struct bpf_devmap_val {
        } bpf_prog;
 };
 
+/* CPUMAP map-value layout
+ *
+ * The struct data-layout of map-value is a configuration interface.
+ * New members can only be added to the end of this structure.
+ */
+struct bpf_cpumap_val {
+       __u32 qsize;    /* queue size to remote target CPU */
+       union {
+               int   fd;       /* prog fd on map write */
+               __u32 id;       /* prog id on map read */
+       } bpf_prog;
+};
+
 enum sk_action {
        SK_DROP = 0,
        SK_PASS,
@@ -3911,7 +4062,7 @@ struct bpf_link_info {
 
 /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed
  * by user and intended to be used by socket (e.g. to bind to, depends on
- * attach attach type).
+ * attach type).
  */
 struct bpf_sock_addr {
        __u32 user_family;      /* Allows 4-byte read, but no write. */
@@ -4260,4 +4411,19 @@ struct bpf_pidns_info {
        __u32 pid;
        __u32 tgid;
 };
+
+/* User accessible data for SK_LOOKUP programs. Add new fields at the end. */
+struct bpf_sk_lookup {
+       __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
+
+       __u32 family;           /* Protocol family (AF_INET, AF_INET6) */
+       __u32 protocol;         /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */
+       __u32 remote_ip4;       /* Network byte order */
+       __u32 remote_ip6[4];    /* Network byte order */
+       __u32 remote_port;      /* Network byte order */
+       __u32 local_ip4;        /* Network byte order */
+       __u32 local_ip6[4];     /* Network byte order */
+       __u32 local_port;       /* Host byte order */
+};
+
 #endif /* _UAPI__LINUX_BPF_H__ */
index 10ec1d1..d9970bb 100644 (file)
@@ -169,7 +169,7 @@ struct sockaddr_caif {
  * @CAIFSO_LINK_SELECT:                Selector used if multiple CAIF Link layers are
  *                             available. Either a high bandwidth
  *                             link can be selected (CAIF_LINK_HIGH_BANDW) or
- *                             or a low latency link (CAIF_LINK_LOW_LATENCY).
+ *                             a low latency link (CAIF_LINK_LOW_LATENCY).
  *                              This option is of type __u32.
  *                             Alternatively SO_BINDTODEVICE can be used.
  *
index 08563e6..cfef424 100644 (file)
@@ -451,6 +451,13 @@ enum devlink_attr {
        DEVLINK_ATTR_TRAP_POLICER_RATE,                 /* u64 */
        DEVLINK_ATTR_TRAP_POLICER_BURST,                /* u64 */
 
+       DEVLINK_ATTR_PORT_FUNCTION,                     /* nested */
+
+       DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER,  /* string */
+
+       DEVLINK_ATTR_PORT_LANES,                        /* u32 */
+       DEVLINK_ATTR_PORT_SPLITTABLE,                   /* u8 */
+
        /* add new attributes above here, update the policy in devlink.c */
 
        __DEVLINK_ATTR_MAX,
@@ -497,4 +504,12 @@ enum devlink_resource_unit {
        DEVLINK_RESOURCE_UNIT_ENTRY,
 };
 
+enum devlink_port_function_attr {
+       DEVLINK_PORT_FUNCTION_ATTR_UNSPEC,
+       DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR,     /* binary */
+
+       __DEVLINK_PORT_FUNCTION_ATTR_MAX,
+       DEVLINK_PORT_FUNCTION_ATTR_MAX = __DEVLINK_PORT_FUNCTION_ATTR_MAX - 1
+};
+
 #endif /* _UAPI_LINUX_DEVLINK_H_ */
index ca5cb3e..3c70e8a 100644 (file)
@@ -5,6 +5,13 @@
 #include <linux/types.h>
 #include <linux/time_types.h>
 
+/* RFC 4884: return offset to extension struct + validation */
+struct sock_ee_data_rfc4884 {
+       __u16   len;
+       __u8    flags;
+       __u8    reserved;
+};
+
 struct sock_extended_err {
        __u32   ee_errno;       
        __u8    ee_origin;
@@ -12,7 +19,10 @@ struct sock_extended_err {
        __u8    ee_code;
        __u8    ee_pad;
        __u32   ee_info;
-       __u32   ee_data;
+       union   {
+               __u32   ee_data;
+               struct sock_ee_data_rfc4884 ee_rfc4884;
+       };
 };
 
 #define SO_EE_ORIGIN_NONE      0
@@ -31,6 +41,8 @@ struct sock_extended_err {
 #define SO_EE_CODE_TXTIME_INVALID_PARAM        1
 #define SO_EE_CODE_TXTIME_MISSED       2
 
+#define SO_EE_RFC4884_FLAG_INVALID     1
+
 /**
  *     struct scm_timestamping - timestamps exposed through cmsg
  *
index f4662b3..b4f2d13 100644 (file)
@@ -579,6 +579,76 @@ struct ethtool_pauseparam {
        __u32   tx_pause;
 };
 
+/**
+ * enum ethtool_link_ext_state - link extended state
+ */
+enum ethtool_link_ext_state {
+       ETHTOOL_LINK_EXT_STATE_AUTONEG,
+       ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE,
+       ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH,
+       ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY,
+       ETHTOOL_LINK_EXT_STATE_NO_CABLE,
+       ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE,
+       ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE,
+       ETHTOOL_LINK_EXT_STATE_CALIBRATION_FAILURE,
+       ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED,
+       ETHTOOL_LINK_EXT_STATE_OVERHEAT,
+};
+
+/**
+ * enum ethtool_link_ext_substate_autoneg - more information in addition to
+ * ETHTOOL_LINK_EXT_STATE_AUTONEG.
+ */
+enum ethtool_link_ext_substate_autoneg {
+       ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED = 1,
+       ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED,
+       ETHTOOL_LINK_EXT_SUBSTATE_AN_NEXT_PAGE_EXCHANGE_FAILED,
+       ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_PARTNER_DETECTED_FORCE_MODE,
+       ETHTOOL_LINK_EXT_SUBSTATE_AN_FEC_MISMATCH_DURING_OVERRIDE,
+       ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD,
+};
+
+/**
+ * enum ethtool_link_ext_substate_link_training - more information in addition to
+ * ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE.
+ */
+enum ethtool_link_ext_substate_link_training {
+       ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_FRAME_LOCK_NOT_ACQUIRED = 1,
+       ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT,
+       ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY,
+       ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT,
+};
+
+/**
+ * enum ethtool_link_ext_substate_logical_mismatch - more information in addition
+ * to ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH.
+ */
+enum ethtool_link_ext_substate_link_logical_mismatch {
+       ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK = 1,
+       ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_AM_LOCK,
+       ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_GET_ALIGN_STATUS,
+       ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED,
+       ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED,
+};
+
+/**
+ * enum ethtool_link_ext_substate_bad_signal_integrity - more information in
+ * addition to ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY.
+ */
+enum ethtool_link_ext_substate_bad_signal_integrity {
+       ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS = 1,
+       ETHTOOL_LINK_EXT_SUBSTATE_BSI_UNSUPPORTED_RATE,
+};
+
+/**
+ * enum ethtool_link_ext_substate_cable_issue - more information in
+ * addition to ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE.
+ */
+enum ethtool_link_ext_substate_cable_issue {
+       ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE = 1,
+       ETHTOOL_LINK_EXT_SUBSTATE_CI_CABLE_TEST_FAILURE,
+};
+
 #define ETH_GSTRING_LEN                32
 
 /**
@@ -599,6 +669,7 @@ struct ethtool_pauseparam {
  * @ETH_SS_SOF_TIMESTAMPING: SOF_TIMESTAMPING_* flags
  * @ETH_SS_TS_TX_TYPES: timestamping Tx types
  * @ETH_SS_TS_RX_FILTERS: timestamping Rx filters
+ * @ETH_SS_UDP_TUNNEL_TYPES: UDP tunnel types
  */
 enum ethtool_stringset {
        ETH_SS_TEST             = 0,
@@ -616,6 +687,7 @@ enum ethtool_stringset {
        ETH_SS_SOF_TIMESTAMPING,
        ETH_SS_TS_TX_TYPES,
        ETH_SS_TS_RX_FILTERS,
+       ETH_SS_UDP_TUNNEL_TYPES,
 
        /* add new constants above here */
        ETH_SS_COUNT
@@ -1530,6 +1602,21 @@ enum ethtool_link_mode_bit_indices {
        ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT         = 72,
        ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT         = 73,
        ETHTOOL_LINK_MODE_FEC_LLRS_BIT                   = 74,
+       ETHTOOL_LINK_MODE_100000baseKR_Full_BIT          = 75,
+       ETHTOOL_LINK_MODE_100000baseSR_Full_BIT          = 76,
+       ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT    = 77,
+       ETHTOOL_LINK_MODE_100000baseCR_Full_BIT          = 78,
+       ETHTOOL_LINK_MODE_100000baseDR_Full_BIT          = 79,
+       ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT         = 80,
+       ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT         = 81,
+       ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT = 82,
+       ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT         = 83,
+       ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT         = 84,
+       ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT         = 85,
+       ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT         = 86,
+       ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT = 87,
+       ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT         = 88,
+       ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT         = 89,
        /* must be last entry */
        __ETHTOOL_LINK_MODE_MASK_NBITS
 };
index 4dda5e4..5dcd24c 100644 (file)
@@ -41,6 +41,7 @@ enum {
        ETHTOOL_MSG_TSINFO_GET,
        ETHTOOL_MSG_CABLE_TEST_ACT,
        ETHTOOL_MSG_CABLE_TEST_TDR_ACT,
+       ETHTOOL_MSG_TUNNEL_INFO_GET,
 
        /* add new constants above here */
        __ETHTOOL_MSG_USER_CNT,
@@ -236,6 +237,8 @@ enum {
        ETHTOOL_A_LINKSTATE_LINK,               /* u8 */
        ETHTOOL_A_LINKSTATE_SQI,                /* u32 */
        ETHTOOL_A_LINKSTATE_SQI_MAX,            /* u32 */
+       ETHTOOL_A_LINKSTATE_EXT_STATE,          /* u8 */
+       ETHTOOL_A_LINKSTATE_EXT_SUBSTATE,       /* u8 */
 
        /* add new constants above here */
        __ETHTOOL_A_LINKSTATE_CNT,
@@ -554,6 +557,60 @@ enum {
        ETHTOOL_A_CABLE_TEST_TDR_NTF_MAX = __ETHTOOL_A_CABLE_TEST_TDR_NTF_CNT - 1
 };
 
+/* TUNNEL INFO */
+
+enum {
+       ETHTOOL_UDP_TUNNEL_TYPE_VXLAN,
+       ETHTOOL_UDP_TUNNEL_TYPE_GENEVE,
+       ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE,
+
+       __ETHTOOL_UDP_TUNNEL_TYPE_CNT
+};
+
+enum {
+       ETHTOOL_A_TUNNEL_UDP_ENTRY_UNSPEC,
+
+       ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT,                /* be16 */
+       ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE,                /* u32 */
+
+       /* add new constants above here */
+       __ETHTOOL_A_TUNNEL_UDP_ENTRY_CNT,
+       ETHTOOL_A_TUNNEL_UDP_ENTRY_MAX = (__ETHTOOL_A_TUNNEL_UDP_ENTRY_CNT - 1)
+};
+
+enum {
+       ETHTOOL_A_TUNNEL_UDP_TABLE_UNSPEC,
+
+       ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE,                /* u32 */
+       ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES,               /* bitset */
+       ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY,               /* nest - _UDP_ENTRY_* */
+
+       /* add new constants above here */
+       __ETHTOOL_A_TUNNEL_UDP_TABLE_CNT,
+       ETHTOOL_A_TUNNEL_UDP_TABLE_MAX = (__ETHTOOL_A_TUNNEL_UDP_TABLE_CNT - 1)
+};
+
+enum {
+       ETHTOOL_A_TUNNEL_UDP_UNSPEC,
+
+       ETHTOOL_A_TUNNEL_UDP_TABLE,                     /* nest - _UDP_TABLE_* */
+
+       /* add new constants above here */
+       __ETHTOOL_A_TUNNEL_UDP_CNT,
+       ETHTOOL_A_TUNNEL_UDP_MAX = (__ETHTOOL_A_TUNNEL_UDP_CNT - 1)
+};
+
+enum {
+       ETHTOOL_A_TUNNEL_INFO_UNSPEC,
+       ETHTOOL_A_TUNNEL_INFO_HEADER,                   /* nest - _A_HEADER_* */
+
+       ETHTOOL_A_TUNNEL_INFO_UDP_PORTS,                /* nest - _UDP_TABLE */
+
+       /* add new constants above here */
+       __ETHTOOL_A_TUNNEL_INFO_CNT,
+       ETHTOOL_A_TUNNEL_INFO_MAX = (__ETHTOOL_A_TUNNEL_INFO_CNT - 1)
+};
+
 /* generic netlink info */
 #define ETHTOOL_GENL_NAME "ethtool"
 #define ETHTOOL_GENL_VERSION 1
index b6aac7e..4c14e8b 100644 (file)
@@ -205,6 +205,7 @@ struct fb_bitfield {
 #define FB_ACTIVATE_ALL               64       /* change all VCs on this fb    */
 #define FB_ACTIVATE_FORCE     128      /* force apply even when no change*/
 #define FB_ACTIVATE_INV_MODE  256       /* invalidate videomode */
+#define FB_ACTIVATE_KD_TEXT   512       /* for KDSET vt ioctl */
 
 #define FB_ACCELF_TEXT         1       /* (OBSOLETE) see fb_info.flags and vc_mode */
 
index 379a612..f44eb0a 100644 (file)
@@ -262,6 +262,7 @@ struct fsxattr {
 #define FS_EA_INODE_FL                 0x00200000 /* Inode used for large EA */
 #define FS_EOFBLOCKS_FL                        0x00400000 /* Reserved for ext4 */
 #define FS_NOCOW_FL                    0x00800000 /* Do not cow file */
+#define FS_DAX_FL                      0x02000000 /* Inode is DAX */
 #define FS_INLINE_DATA_FL              0x10000000 /* Reserved for ext4 */
 #define FS_PROJINHERIT_FL              0x20000000 /* Create with parents projid */
 #define FS_CASEFOLD_FL                 0x40000000 /* Folder is case insensitive */
index 5589eeb..fb169a5 100644 (file)
@@ -19,6 +19,7 @@
 #define _UAPI_LINUX_ICMP_H
 
 #include <linux/types.h>
+#include <asm/byteorder.h>
 
 #define ICMP_ECHOREPLY         0       /* Echo Reply                   */
 #define ICMP_DEST_UNREACH      3       /* Destination Unreachable      */
@@ -95,5 +96,26 @@ struct icmp_filter {
        __u32           data;
 };
 
+/* RFC 4884 extension struct: one per message */
+struct icmp_ext_hdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+       __u8            reserved1:4,
+                       version:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+       __u8            version:4,
+                       reserved1:4;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+       __u8            reserved2;
+       __sum16         checksum;
+};
+
+/* RFC 4884 extension object header: one for each object */
+struct icmp_extobj_hdr {
+       __be16          length;
+       __u8            class_num;
+       __u8            class_type;
+};
 
 #endif /* _UAPI_LINUX_ICMP_H */
index 2622b5a..c1661fe 100644 (file)
@@ -68,6 +68,7 @@ struct icmp6hdr {
 #define icmp6_mtu              icmp6_dataun.un_data32[0]
 #define icmp6_unused           icmp6_dataun.un_data32[0]
 #define icmp6_maxdelay         icmp6_dataun.un_data16[0]
+#define icmp6_datagram_len     icmp6_dataun.un_data8[0]
 #define icmp6_router           icmp6_dataun.u_nd_advt.router
 #define icmp6_solicited                icmp6_dataun.u_nd_advt.solicited
 #define icmp6_override         icmp6_dataun.u_nd_advt.override
index caa6914..c1227ae 100644 (file)
@@ -166,6 +166,10 @@ enum {
        IFLA_BRIDGE_MRP_RING_STATE,
        IFLA_BRIDGE_MRP_RING_ROLE,
        IFLA_BRIDGE_MRP_START_TEST,
+       IFLA_BRIDGE_MRP_INFO,
+       IFLA_BRIDGE_MRP_IN_ROLE,
+       IFLA_BRIDGE_MRP_IN_STATE,
+       IFLA_BRIDGE_MRP_START_IN_TEST,
        __IFLA_BRIDGE_MRP_MAX,
 };
 
@@ -228,6 +232,58 @@ enum {
 
 #define IFLA_BRIDGE_MRP_START_TEST_MAX (__IFLA_BRIDGE_MRP_START_TEST_MAX - 1)
 
+enum {
+       IFLA_BRIDGE_MRP_INFO_UNSPEC,
+       IFLA_BRIDGE_MRP_INFO_RING_ID,
+       IFLA_BRIDGE_MRP_INFO_P_IFINDEX,
+       IFLA_BRIDGE_MRP_INFO_S_IFINDEX,
+       IFLA_BRIDGE_MRP_INFO_PRIO,
+       IFLA_BRIDGE_MRP_INFO_RING_STATE,
+       IFLA_BRIDGE_MRP_INFO_RING_ROLE,
+       IFLA_BRIDGE_MRP_INFO_TEST_INTERVAL,
+       IFLA_BRIDGE_MRP_INFO_TEST_MAX_MISS,
+       IFLA_BRIDGE_MRP_INFO_TEST_MONITOR,
+       IFLA_BRIDGE_MRP_INFO_I_IFINDEX,
+       IFLA_BRIDGE_MRP_INFO_IN_STATE,
+       IFLA_BRIDGE_MRP_INFO_IN_ROLE,
+       IFLA_BRIDGE_MRP_INFO_IN_TEST_INTERVAL,
+       IFLA_BRIDGE_MRP_INFO_IN_TEST_MAX_MISS,
+       __IFLA_BRIDGE_MRP_INFO_MAX,
+};
+
+#define IFLA_BRIDGE_MRP_INFO_MAX (__IFLA_BRIDGE_MRP_INFO_MAX - 1)
+
+enum {
+       IFLA_BRIDGE_MRP_IN_STATE_UNSPEC,
+       IFLA_BRIDGE_MRP_IN_STATE_IN_ID,
+       IFLA_BRIDGE_MRP_IN_STATE_STATE,
+       __IFLA_BRIDGE_MRP_IN_STATE_MAX,
+};
+
+#define IFLA_BRIDGE_MRP_IN_STATE_MAX (__IFLA_BRIDGE_MRP_IN_STATE_MAX - 1)
+
+enum {
+       IFLA_BRIDGE_MRP_IN_ROLE_UNSPEC,
+       IFLA_BRIDGE_MRP_IN_ROLE_RING_ID,
+       IFLA_BRIDGE_MRP_IN_ROLE_IN_ID,
+       IFLA_BRIDGE_MRP_IN_ROLE_ROLE,
+       IFLA_BRIDGE_MRP_IN_ROLE_I_IFINDEX,
+       __IFLA_BRIDGE_MRP_IN_ROLE_MAX,
+};
+
+#define IFLA_BRIDGE_MRP_IN_ROLE_MAX (__IFLA_BRIDGE_MRP_IN_ROLE_MAX - 1)
+
+enum {
+       IFLA_BRIDGE_MRP_START_IN_TEST_UNSPEC,
+       IFLA_BRIDGE_MRP_START_IN_TEST_IN_ID,
+       IFLA_BRIDGE_MRP_START_IN_TEST_INTERVAL,
+       IFLA_BRIDGE_MRP_START_IN_TEST_MAX_MISS,
+       IFLA_BRIDGE_MRP_START_IN_TEST_PERIOD,
+       __IFLA_BRIDGE_MRP_START_IN_TEST_MAX,
+};
+
+#define IFLA_BRIDGE_MRP_START_IN_TEST_MAX (__IFLA_BRIDGE_MRP_START_IN_TEST_MAX - 1)
+
 struct br_mrp_instance {
        __u32 ring_id;
        __u32 p_ifindex;
@@ -253,6 +309,25 @@ struct br_mrp_start_test {
        __u32 monitor;
 };
 
+struct br_mrp_in_state {
+       __u32 in_state;
+       __u16 in_id;
+};
+
+struct br_mrp_in_role {
+       __u32 ring_id;
+       __u32 in_role;
+       __u32 i_ifindex;
+       __u16 in_id;
+};
+
+struct br_mrp_start_in_test {
+       __u32 interval;
+       __u32 max_miss;
+       __u32 period;
+       __u16 in_id;
+};
+
 struct bridge_stp_xstats {
        __u64 transition_blk;
        __u64 transition_fwd;
index a009365..af8f319 100644 (file)
@@ -344,6 +344,7 @@ enum {
        IFLA_BRPORT_ISOLATED,
        IFLA_BRPORT_BACKUP_PORT,
        IFLA_BRPORT_MRP_RING_OPEN,
+       IFLA_BRPORT_MRP_IN_OPEN,
        __IFLA_BRPORT_MAX
 };
 #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
index be328c5..a78a809 100644 (file)
@@ -73,9 +73,12 @@ struct xdp_umem_reg {
 };
 
 struct xdp_statistics {
-       __u64 rx_dropped; /* Dropped for reasons other than invalid desc */
+       __u64 rx_dropped; /* Dropped for other reasons */
        __u64 rx_invalid_descs; /* Dropped due to invalid descriptor */
        __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */
+       __u64 rx_ring_full; /* Dropped due to rx ring being full */
+       __u64 rx_fill_ring_empty_descs; /* Failed to retrieve item from fill ring */
+       __u64 tx_ring_empty_descs; /* Failed to retrieve item from tx ring */
 };
 
 struct xdp_options {
index 8533bf0..3d0d823 100644 (file)
@@ -123,6 +123,7 @@ struct in_addr {
 #define IP_CHECKSUM    23
 #define IP_BIND_ADDRESS_NO_PORT        24
 #define IP_RECVFRAGSIZE        25
+#define IP_RECVERR_RFC4884     26
 
 /* IP_MTU_DISCOVER values */
 #define IP_PMTUDISC_DONT               0       /* Never send DF frames */
index 9f2273a..5ad396a 100644 (file)
@@ -179,6 +179,7 @@ struct in6_flowlabel_req {
 #define IPV6_LEAVE_ANYCAST     28
 #define IPV6_MULTICAST_ALL     29
 #define IPV6_ROUTER_ALERT_ISOLATE      30
+#define IPV6_RECVERR_RFC4884   31
 
 /* IPV6_MTU_DISCOVER values */
 #define IPV6_PMTUDISC_DONT             0
index e6f183e..5ba122c 100644 (file)
@@ -65,6 +65,7 @@ enum {
        INET_DIAG_REQ_NONE,
        INET_DIAG_REQ_BYTECODE,
        INET_DIAG_REQ_SK_BPF_STORAGES,
+       INET_DIAG_REQ_PROTOCOL,
        __INET_DIAG_REQ_MAX,
 };
 
index 92c2269..7843742 100644 (file)
@@ -197,6 +197,7 @@ struct io_sqring_offsets {
  * sq_ring->flags
  */
 #define IORING_SQ_NEED_WAKEUP  (1U << 0) /* needs io_uring_enter wakeup */
+#define IORING_SQ_CQ_OVERFLOW  (1U << 1) /* CQ ring is overflown */
 
 struct io_cqring_offsets {
        __u32 head;
index 4bcb41c..3f302e2 100644 (file)
@@ -324,4 +324,30 @@ static inline __u16 mdio_phy_id_c45(int prtad, int devad)
        return MDIO_PHY_ID_C45 | (prtad << 5) | devad;
 }
 
+/* UsxgmiiChannelInfo[15:0] for USXGMII in-band auto-negotiation.*/
+#define MDIO_USXGMII_EEE_CLK_STP       0x0080  /* EEE clock stop supported */
+#define MDIO_USXGMII_EEE               0x0100  /* EEE supported */
+#define MDIO_USXGMII_SPD_MASK          0x0e00  /* USXGMII speed mask */
+#define MDIO_USXGMII_FULL_DUPLEX       0x1000  /* USXGMII full duplex */
+#define MDIO_USXGMII_DPX_SPD_MASK      0x1e00  /* USXGMII duplex and speed bits */
+#define MDIO_USXGMII_10                        0x0000  /* 10Mbps */
+#define MDIO_USXGMII_10HALF            0x0000  /* 10Mbps half-duplex */
+#define MDIO_USXGMII_10FULL            0x1000  /* 10Mbps full-duplex */
+#define MDIO_USXGMII_100               0x0200  /* 100Mbps */
+#define MDIO_USXGMII_100HALF           0x0200  /* 100Mbps half-duplex */
+#define MDIO_USXGMII_100FULL           0x1200  /* 100Mbps full-duplex */
+#define MDIO_USXGMII_1000              0x0400  /* 1000Mbps */
+#define MDIO_USXGMII_1000HALF          0x0400  /* 1000Mbps half-duplex */
+#define MDIO_USXGMII_1000FULL          0x1400  /* 1000Mbps full-duplex */
+#define MDIO_USXGMII_10G               0x0600  /* 10Gbps */
+#define MDIO_USXGMII_10GHALF           0x0600  /* 10Gbps half-duplex */
+#define MDIO_USXGMII_10GFULL           0x1600  /* 10Gbps full-duplex */
+#define MDIO_USXGMII_2500              0x0800  /* 2500Mbps */
+#define MDIO_USXGMII_2500HALF          0x0800  /* 2500Mbps half-duplex */
+#define MDIO_USXGMII_2500FULL          0x1800  /* 2500Mbps full-duplex */
+#define MDIO_USXGMII_5000              0x0a00  /* 5000Mbps */
+#define MDIO_USXGMII_5000HALF          0x0a00  /* 5000Mbps half-duplex */
+#define MDIO_USXGMII_5000FULL          0x1a00  /* 5000Mbps full-duplex */
+#define MDIO_USXGMII_LINK              0x8000  /* PHY link with copper-side partner */
+
 #endif /* _UAPI__LINUX_MDIO_H__ */
index 5f2c770..9762660 100644 (file)
@@ -86,4 +86,21 @@ enum {
        __MPTCP_PM_CMD_AFTER_LAST
 };
 
+#define MPTCP_INFO_FLAG_FALLBACK               _BITUL(0)
+#define MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED    _BITUL(1)
+
+struct mptcp_info {
+       __u8    mptcpi_subflows;
+       __u8    mptcpi_add_addr_signal;
+       __u8    mptcpi_add_addr_accepted;
+       __u8    mptcpi_subflows_max;
+       __u8    mptcpi_add_addr_signal_max;
+       __u8    mptcpi_add_addr_accepted_max;
+       __u32   mptcpi_flags;
+       __u32   mptcpi_token;
+       __u64   mptcpi_write_seq;
+       __u64   mptcpi_snd_una;
+       __u64   mptcpi_rcv_nxt;
+};
+
 #endif /* _UAPI_MPTCP_H */
index 84f15f4..6aeb13e 100644 (file)
@@ -21,11 +21,22 @@ enum br_mrp_ring_role_type {
        BR_MRP_RING_ROLE_MRA,
 };
 
+enum br_mrp_in_role_type {
+       BR_MRP_IN_ROLE_DISABLED,
+       BR_MRP_IN_ROLE_MIC,
+       BR_MRP_IN_ROLE_MIM,
+};
+
 enum br_mrp_ring_state_type {
        BR_MRP_RING_STATE_OPEN,
        BR_MRP_RING_STATE_CLOSED,
 };
 
+enum br_mrp_in_state_type {
+       BR_MRP_IN_STATE_OPEN,
+       BR_MRP_IN_STATE_CLOSED,
+};
+
 enum br_mrp_port_state_type {
        BR_MRP_PORT_STATE_DISABLED,
        BR_MRP_PORT_STATE_BLOCKED,
@@ -36,7 +47,7 @@ enum br_mrp_port_state_type {
 enum br_mrp_port_role_type {
        BR_MRP_PORT_ROLE_PRIMARY,
        BR_MRP_PORT_ROLE_SECONDARY,
-       BR_MRP_PORT_ROLE_NONE,
+       BR_MRP_PORT_ROLE_INTER,
 };
 
 enum br_mrp_tlv_header_type {
@@ -46,6 +57,10 @@ enum br_mrp_tlv_header_type {
        BR_MRP_TLV_HEADER_RING_TOPO = 0x3,
        BR_MRP_TLV_HEADER_RING_LINK_DOWN = 0x4,
        BR_MRP_TLV_HEADER_RING_LINK_UP = 0x5,
+       BR_MRP_TLV_HEADER_IN_TEST = 0x6,
+       BR_MRP_TLV_HEADER_IN_TOPO = 0x7,
+       BR_MRP_TLV_HEADER_IN_LINK_DOWN = 0x8,
+       BR_MRP_TLV_HEADER_IN_LINK_UP = 0x9,
        BR_MRP_TLV_HEADER_OPTION = 0x7f,
 };
 
@@ -119,4 +134,26 @@ struct br_mrp_oui_hdr {
        __u8 oui[MRP_OUI_LENGTH];
 };
 
+struct br_mrp_in_test_hdr {
+       __be16 id;
+       __u8 sa[ETH_ALEN];
+       __be16 port_role;
+       __be16 state;
+       __be16 transitions;
+       __be32 timestamp;
+};
+
+struct br_mrp_in_topo_hdr {
+       __u8 sa[ETH_ALEN];
+       __be16 id;
+       __be16 interval;
+};
+
+struct br_mrp_in_link_hdr {
+       __u8 sa[ETH_ALEN];
+       __be16 port_role;
+       __be16 id;
+       __be16 interval;
+};
+
 #endif
index de5d902..0e09dc5 100644 (file)
@@ -244,6 +244,7 @@ struct nd_cmd_pkg {
 #define NVDIMM_FAMILY_HPE2 2
 #define NVDIMM_FAMILY_MSFT 3
 #define NVDIMM_FAMILY_HYPERV 4
+#define NVDIMM_FAMILY_PAPR 5
 
 #define ND_IOCTL_CALL                  _IOWR(ND_IOCTL, ND_CMD_CALL,\
                                        struct nd_cmd_pkg)
index eefcda8..dc8b722 100644 (file)
@@ -30,6 +30,7 @@ enum {
        NDA_SRC_VNI,
        NDA_PROTOCOL,  /* Originator of entry */
        NDA_NH_ID,
+       NDA_FDB_EXT_ATTRS,
        __NDA_MAX
 };
 
@@ -172,4 +173,27 @@ enum {
 };
 #define NDTA_MAX (__NDTA_MAX - 1)
 
+ /* FDB activity notification bits used in NFEA_ACTIVITY_NOTIFY:
+  * - FDB_NOTIFY_BIT - notify on activity/expire for any entry
+  * - FDB_NOTIFY_INACTIVE_BIT - mark as inactive to avoid multiple notifications
+  */
+enum {
+       FDB_NOTIFY_BIT          = (1 << 0),
+       FDB_NOTIFY_INACTIVE_BIT = (1 << 1)
+};
+
+/* embedded into NDA_FDB_EXT_ATTRS:
+ * [NDA_FDB_EXT_ATTRS] = {
+ *     [NFEA_ACTIVITY_NOTIFY]
+ *     ...
+ * }
+ */
+enum {
+       NFEA_UNSPEC,
+       NFEA_ACTIVITY_NOTIFY,
+       NFEA_DONT_REFRESH,
+       __NFEA_MAX
+};
+#define NFEA_MAX (__NFEA_MAX - 1)
+
 #endif
index 4565456..42f351c 100644 (file)
@@ -184,6 +184,15 @@ enum nft_table_attributes {
 };
 #define NFTA_TABLE_MAX         (__NFTA_TABLE_MAX - 1)
 
+enum nft_chain_flags {
+       NFT_CHAIN_BASE          = (1 << 0),
+       NFT_CHAIN_HW_OFFLOAD    = (1 << 1),
+       NFT_CHAIN_BINDING       = (1 << 2),
+};
+#define NFT_CHAIN_FLAGS                (NFT_CHAIN_BASE         | \
+                                NFT_CHAIN_HW_OFFLOAD   | \
+                                NFT_CHAIN_BINDING)
+
 /**
  * enum nft_chain_attributes - nf_tables chain netlink attributes
  *
@@ -196,6 +205,7 @@ enum nft_table_attributes {
  * @NFTA_CHAIN_TYPE: type name of the string (NLA_NUL_STRING)
  * @NFTA_CHAIN_COUNTERS: counter specification of the chain (NLA_NESTED: nft_counter_attributes)
  * @NFTA_CHAIN_FLAGS: chain flags
+ * @NFTA_CHAIN_ID: uniquely identifies a chain in a transaction (NLA_U32)
  */
 enum nft_chain_attributes {
        NFTA_CHAIN_UNSPEC,
@@ -209,6 +219,7 @@ enum nft_chain_attributes {
        NFTA_CHAIN_COUNTERS,
        NFTA_CHAIN_PAD,
        NFTA_CHAIN_FLAGS,
+       NFTA_CHAIN_ID,
        __NFTA_CHAIN_MAX
 };
 #define NFTA_CHAIN_MAX         (__NFTA_CHAIN_MAX - 1)
@@ -238,6 +249,7 @@ enum nft_rule_attributes {
        NFTA_RULE_PAD,
        NFTA_RULE_ID,
        NFTA_RULE_POSITION_ID,
+       NFTA_RULE_CHAIN_ID,
        __NFTA_RULE_MAX
 };
 #define NFTA_RULE_MAX          (__NFTA_RULE_MAX - 1)
@@ -468,11 +480,13 @@ enum nft_data_attributes {
  *
  * @NFTA_VERDICT_CODE: nf_tables verdict (NLA_U32: enum nft_verdicts)
  * @NFTA_VERDICT_CHAIN: jump target chain name (NLA_STRING)
+ * @NFTA_VERDICT_CHAIN_ID: jump target chain ID (NLA_U32)
  */
 enum nft_verdict_attributes {
        NFTA_VERDICT_UNSPEC,
        NFTA_VERDICT_CODE,
        NFTA_VERDICT_CHAIN,
+       NFTA_VERDICT_CHAIN_ID,
        __NFTA_VERDICT_MAX
 };
 #define NFTA_VERDICT_MAX       (__NFTA_VERDICT_MAX - 1)
index 7576209..ee95f42 100644 (file)
@@ -578,6 +578,9 @@ enum {
 
        TCA_FLOWER_KEY_MPLS_OPTS,
 
+       TCA_FLOWER_KEY_HASH,            /* u32 */
+       TCA_FLOWER_KEY_HASH_MASK,       /* u32 */
+
        __TCA_FLOWER_MAX,
 };
 
index a95f3ae..9e7c2c6 100644 (file)
@@ -257,6 +257,8 @@ enum {
        TCA_RED_STAB,
        TCA_RED_MAX_P,
        TCA_RED_FLAGS,          /* bitfield32 */
+       TCA_RED_EARLY_DROP_BLOCK, /* u32 */
+       TCA_RED_MARK_BLOCK,     /* u32 */
        __TCA_RED_MAX,
 };
 
index ff070aa..1d108d5 100644 (file)
 /*
  * Bits of the ptp_perout_request.flags field:
  */
-#define PTP_PEROUT_ONE_SHOT (1<<0)
+#define PTP_PEROUT_ONE_SHOT            (1<<0)
+#define PTP_PEROUT_DUTY_CYCLE          (1<<1)
+#define PTP_PEROUT_PHASE               (1<<2)
 
 /*
  * flag fields valid for the new PTP_PEROUT_REQUEST2 ioctl.
  */
-#define PTP_PEROUT_VALID_FLAGS (PTP_PEROUT_ONE_SHOT)
+#define PTP_PEROUT_VALID_FLAGS         (PTP_PEROUT_ONE_SHOT | \
+                                        PTP_PEROUT_DUTY_CYCLE | \
+                                        PTP_PEROUT_PHASE)
 
 /*
  * No flags are valid for the original PTP_PEROUT_REQUEST ioctl
@@ -101,11 +105,33 @@ struct ptp_extts_request {
 };
 
 struct ptp_perout_request {
-       struct ptp_clock_time start;  /* Absolute start time. */
+       union {
+               /*
+                * Absolute start time.
+                * Valid only if (flags & PTP_PEROUT_PHASE) is unset.
+                */
+               struct ptp_clock_time start;
+               /*
+                * Phase offset. The signal should start toggling at an
+                * unspecified integer multiple of the period, plus this value.
+                * The start time should be "as soon as possible".
+                * Valid only if (flags & PTP_PEROUT_PHASE) is set.
+                */
+               struct ptp_clock_time phase;
+       };
        struct ptp_clock_time period; /* Desired period, zero means disable. */
        unsigned int index;           /* Which channel to configure. */
        unsigned int flags;
-       unsigned int rsv[4];          /* Reserved for future use. */
+       union {
+               /*
+                * The "on" time of the signal.
+                * Must be lower than the period.
+                * Valid only if (flags & PTP_PEROUT_DUTY_CYCLE) is set.
+                */
+               struct ptp_clock_time on;
+               /* Reserved for future use. */
+               unsigned int rsv[4];
+       };
 };
 
 #define PTP_MAX_SAMPLES 25 /* Maximum allowed offset measurement samples. */
index cba368e..c21edb9 100644 (file)
 
 /* supported values for SO_RDS_TRANSPORT */
 #define        RDS_TRANS_IB    0
-#define        RDS_TRANS_IWARP 1
+#define        RDS_TRANS_GAP   1
 #define        RDS_TRANS_TCP   2
 #define RDS_TRANS_COUNT        3
 #define        RDS_TRANS_NONE  (~0)
+/* don't use RDS_TRANS_IWARP - it is deprecated */
+#define RDS_TRANS_IWARP RDS_TRANS_GAP
 
 /* IOCTLS commands for SOL_RDS */
 #define SIOCRDSSETTOS          (SIOCPROTOPRIVATE)
index 073e71e..9b814c9 100644 (file)
@@ -257,12 +257,12 @@ enum {
 
 /* rtm_protocol */
 
-#define RTPROT_UNSPEC  0
-#define RTPROT_REDIRECT        1       /* Route installed by ICMP redirects;
-                                  not used by current IPv4 */
-#define RTPROT_KERNEL  2       /* Route installed by kernel            */
-#define RTPROT_BOOT    3       /* Route installed during boot          */
-#define RTPROT_STATIC  4       /* Route installed by administrator     */
+#define RTPROT_UNSPEC          0
+#define RTPROT_REDIRECT                1       /* Route installed by ICMP redirects;
+                                          not used by current IPv4 */
+#define RTPROT_KERNEL          2       /* Route installed by kernel            */
+#define RTPROT_BOOT            3       /* Route installed during boot          */
+#define RTPROT_STATIC          4       /* Route installed by administrator     */
 
 /* Values of protocol >= RTPROT_STATIC are not interpreted by kernel;
    they are just passed from user and back as is.
@@ -271,22 +271,23 @@ enum {
    avoid conflicts.
  */
 
-#define RTPROT_GATED   8       /* Apparently, GateD */
-#define RTPROT_RA      9       /* RDISC/ND router advertisements */
-#define RTPROT_MRT     10      /* Merit MRT */
-#define RTPROT_ZEBRA   11      /* Zebra */
-#define RTPROT_BIRD    12      /* BIRD */
-#define RTPROT_DNROUTED        13      /* DECnet routing daemon */
-#define RTPROT_XORP    14      /* XORP */
-#define RTPROT_NTK     15      /* Netsukuku */
-#define RTPROT_DHCP    16      /* DHCP client */
-#define RTPROT_MROUTED 17      /* Multicast daemon */
-#define RTPROT_BABEL   42      /* Babel daemon */
-#define RTPROT_BGP     186     /* BGP Routes */
-#define RTPROT_ISIS    187     /* ISIS Routes */
-#define RTPROT_OSPF    188     /* OSPF Routes */
-#define RTPROT_RIP     189     /* RIP Routes */
-#define RTPROT_EIGRP   192     /* EIGRP Routes */
+#define RTPROT_GATED           8       /* Apparently, GateD */
+#define RTPROT_RA              9       /* RDISC/ND router advertisements */
+#define RTPROT_MRT             10      /* Merit MRT */
+#define RTPROT_ZEBRA           11      /* Zebra */
+#define RTPROT_BIRD            12      /* BIRD */
+#define RTPROT_DNROUTED                13      /* DECnet routing daemon */
+#define RTPROT_XORP            14      /* XORP */
+#define RTPROT_NTK             15      /* Netsukuku */
+#define RTPROT_DHCP            16      /* DHCP client */
+#define RTPROT_MROUTED         17      /* Multicast daemon */
+#define RTPROT_KEEPALIVED      18      /* Keepalived daemon */
+#define RTPROT_BABEL           42      /* Babel daemon */
+#define RTPROT_BGP             186     /* BGP Routes */
+#define RTPROT_ISIS            187     /* ISIS Routes */
+#define RTPROT_OSPF            188     /* OSPF Routes */
+#define RTPROT_RIP             189     /* RIP Routes */
+#define RTPROT_EIGRP           192     /* EIGRP Routes */
 
 /* rtm_scope
 
@@ -777,6 +778,7 @@ enum {
 #define RTEXT_FILTER_BRVLAN    (1 << 1)
 #define RTEXT_FILTER_BRVLAN_COMPRESSED (1 << 2)
 #define        RTEXT_FILTER_SKIP_STATS (1 << 3)
+#define RTEXT_FILTER_MRP       (1 << 4)
 
 /* End of information exported to user level */
 
index 7d91f4d..cee9f8e 100644 (file)
@@ -287,6 +287,7 @@ enum
        LINUX_MIB_TCPFASTOPENPASSIVEALTKEY,     /* TCPFastOpenPassiveAltKey */
        LINUX_MIB_TCPTIMEOUTREHASH,             /* TCPTimeoutRehash */
        LINUX_MIB_TCPDUPLICATEDATAREHASH,       /* TCPDuplicateDataRehash */
+       LINUX_MIB_TCPDSACKRECVSEGS,             /* TCPDSACKRecvSegs */
        __LINUX_MIB_MAX
 };
 
index ee0f246..d56427c 100644 (file)
 #define SPI_TX_QUAD            0x200
 #define SPI_RX_DUAL            0x400
 #define SPI_RX_QUAD            0x800
+#define SPI_CS_WORD            0x1000
+#define SPI_TX_OCTAL           0x2000
+#define SPI_RX_OCTAL           0x4000
+#define SPI_3WIRE_HIZ          0x8000
 
 /*---------------------------------------------------------------------------*/
 
index eca6692..9204705 100644 (file)
@@ -1030,7 +1030,7 @@ struct vfio_iommu_type1_info_cap_iova_range {
  * size in bytes that can be used by user applications when getting the dirty
  * bitmap.
  */
-#define VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION  1
+#define VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION  2
 
 struct vfio_iommu_type1_info_cap_migration {
        struct  vfio_info_cap_header header;
index c1395b5..9463db2 100644 (file)
@@ -7,6 +7,7 @@
   Copyright (C) 2001 by Andreas Gruenbacher <a.gruenbacher@computer.org>
   Copyright (c) 2001-2002 Silicon Graphics, Inc.  All Rights Reserved.
   Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
+  Copyright (c) 2020 Jan (janneke) Nieuwenhuizen <janneke@gnu.org>
 */
 
 #include <linux/libc-compat.h>
@@ -31,6 +32,9 @@
 #define XATTR_BTRFS_PREFIX "btrfs."
 #define XATTR_BTRFS_PREFIX_LEN (sizeof(XATTR_BTRFS_PREFIX) - 1)
 
+#define XATTR_HURD_PREFIX "gnu."
+#define XATTR_HURD_PREFIX_LEN (sizeof(XATTR_HURD_PREFIX) - 1)
+
 #define XATTR_SECURITY_PREFIX  "security."
 #define XATTR_SECURITY_PREFIX_LEN (sizeof(XATTR_SECURITY_PREFIX) - 1)
 
index 78b2591..66b9973 100644 (file)
@@ -30,6 +30,7 @@ struct xdp_diag_msg {
 #define XDP_SHOW_RING_CFG      (1 << 1)
 #define XDP_SHOW_UMEM          (1 << 2)
 #define XDP_SHOW_MEMINFO       (1 << 3)
+#define XDP_SHOW_STATS         (1 << 4)
 
 enum {
        XDP_DIAG_NONE,
@@ -41,6 +42,7 @@ enum {
        XDP_DIAG_UMEM_FILL_RING,
        XDP_DIAG_UMEM_COMPLETION_RING,
        XDP_DIAG_MEMINFO,
+       XDP_DIAG_STATS,
        __XDP_DIAG_MAX,
 };
 
@@ -69,4 +71,13 @@ struct xdp_diag_umem {
        __u32   refs;
 };
 
+struct xdp_diag_stats {
+       __u64   n_rx_dropped;
+       __u64   n_rx_invalid;
+       __u64   n_rx_full;
+       __u64   n_fill_ring_empty;
+       __u64   n_tx_invalid;
+       __u64   n_tx_ring_empty;
+};
+
 #endif /* _LINUX_XDP_DIAG_H */
index 4f20dbc..2194322 100644 (file)
  */
 
 /*
+ * "xdp-headroom" is used to request that extra space is added
+ * for XDP processing.  The value is measured in bytes and passed by
+ * the frontend to be consistent between both ends.
+ * If the value is greater than zero that means that
+ * an RX response is going to be passed to an XDP program for processing.
+ * XEN_NETIF_MAX_XDP_HEADROOM defines the maximum headroom offset in bytes
+ *
+ * "feature-xdp-headroom" is set to "1" by the netback side like other features
+ * so a guest can check if an XDP program can be processed.
+ */
+#define XEN_NETIF_MAX_XDP_HEADROOM 0x7FFF
+
+/*
  * Control ring
  * ============
  *
@@ -846,7 +859,8 @@ struct xen_netif_tx_request {
 #define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2)     /* u.mcast */
 #define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3)     /* u.mcast */
 #define XEN_NETIF_EXTRA_TYPE_HASH      (4)     /* u.hash */
-#define XEN_NETIF_EXTRA_TYPE_MAX       (5)
+#define XEN_NETIF_EXTRA_TYPE_XDP       (5)     /* u.xdp */
+#define XEN_NETIF_EXTRA_TYPE_MAX       (6)
 
 /* xen_netif_extra_info_t flags. */
 #define _XEN_NETIF_EXTRA_FLAG_MORE (0)
@@ -879,6 +893,10 @@ struct xen_netif_extra_info {
                        uint8_t algorithm;
                        uint8_t value[4];
                } hash;
+               struct {
+                       uint16_t headroom;
+                       uint16_t pad[2];
+               } xdp;
                uint16_t pad[3];
        } u;
 };
index a46aa8f..0498af5 100644 (file)
@@ -49,13 +49,13 @@ config CLANG_VERSION
 
 config CC_CAN_LINK
        bool
-       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(m64-flag)) if 64BIT
-       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(m32-flag))
+       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m64-flag)) if 64BIT
+       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m32-flag))
 
 config CC_CAN_LINK_STATIC
        bool
-       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) -static $(m64-flag)) if 64BIT
-       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) -static $(m32-flag))
+       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m64-flag) -static) if 64BIT
+       default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(m32-flag) -static)
 
 config CC_HAS_ASM_GOTO
        def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
index f3218bc..4392875 100644 (file)
@@ -12,6 +12,7 @@ obj-y     = fork.o exec_domain.o panic.o \
            notifier.o ksysfs.o cred.o reboot.o \
            async.o range.o smpboot.o ucount.o
 
+obj-$(CONFIG_BPFILTER) += usermode_driver.o
 obj-$(CONFIG_MODULES) += kmod.o
 obj-$(CONFIG_MULTIUSER) += groups.o
 
index 1158461..c66e827 100644 (file)
@@ -386,13 +386,6 @@ static void array_map_free(struct bpf_map *map)
 {
        struct bpf_array *array = container_of(map, struct bpf_array, map);
 
-       /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
-        * so the programs (can be more than one that used this map) were
-        * disconnected from events. Wait for outstanding programs to complete
-        * and free the array
-        */
-       synchronize_rcu();
-
        if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
                bpf_array_free_percpu(array);
 
@@ -494,6 +487,7 @@ static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
                                   vma->vm_pgoff + pgoff);
 }
 
+static int array_map_btf_id;
 const struct bpf_map_ops array_map_ops = {
        .map_alloc_check = array_map_alloc_check,
        .map_alloc = array_map_alloc,
@@ -510,8 +504,11 @@ const struct bpf_map_ops array_map_ops = {
        .map_check_btf = array_map_check_btf,
        .map_lookup_batch = generic_map_lookup_batch,
        .map_update_batch = generic_map_update_batch,
+       .map_btf_name = "bpf_array",
+       .map_btf_id = &array_map_btf_id,
 };
 
+static int percpu_array_map_btf_id;
 const struct bpf_map_ops percpu_array_map_ops = {
        .map_alloc_check = array_map_alloc_check,
        .map_alloc = array_map_alloc,
@@ -522,6 +519,8 @@ const struct bpf_map_ops percpu_array_map_ops = {
        .map_delete_elem = array_map_delete_elem,
        .map_seq_show_elem = percpu_array_map_seq_show_elem,
        .map_check_btf = array_map_check_btf,
+       .map_btf_name = "bpf_array",
+       .map_btf_id = &percpu_array_map_btf_id,
 };
 
 static int fd_array_map_alloc_check(union bpf_attr *attr)
@@ -540,8 +539,6 @@ static void fd_array_map_free(struct bpf_map *map)
        struct bpf_array *array = container_of(map, struct bpf_array, map);
        int i;
 
-       synchronize_rcu();
-
        /* make sure it's empty */
        for (i = 0; i < array->map.max_entries; i++)
                BUG_ON(array->ptrs[i] != NULL);
@@ -868,6 +865,7 @@ static void prog_array_map_free(struct bpf_map *map)
        fd_array_map_free(map);
 }
 
+static int prog_array_map_btf_id;
 const struct bpf_map_ops prog_array_map_ops = {
        .map_alloc_check = fd_array_map_alloc_check,
        .map_alloc = prog_array_map_alloc,
@@ -883,6 +881,8 @@ const struct bpf_map_ops prog_array_map_ops = {
        .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
        .map_release_uref = prog_array_map_clear,
        .map_seq_show_elem = prog_array_map_seq_show_elem,
+       .map_btf_name = "bpf_array",
+       .map_btf_id = &prog_array_map_btf_id,
 };
 
 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
@@ -961,6 +961,7 @@ static void perf_event_fd_array_release(struct bpf_map *map,
        rcu_read_unlock();
 }
 
+static int perf_event_array_map_btf_id;
 const struct bpf_map_ops perf_event_array_map_ops = {
        .map_alloc_check = fd_array_map_alloc_check,
        .map_alloc = array_map_alloc,
@@ -972,6 +973,8 @@ const struct bpf_map_ops perf_event_array_map_ops = {
        .map_fd_put_ptr = perf_event_fd_array_put_ptr,
        .map_release = perf_event_fd_array_release,
        .map_check_btf = map_check_no_btf,
+       .map_btf_name = "bpf_array",
+       .map_btf_id = &perf_event_array_map_btf_id,
 };
 
 #ifdef CONFIG_CGROUPS
@@ -994,6 +997,7 @@ static void cgroup_fd_array_free(struct bpf_map *map)
        fd_array_map_free(map);
 }
 
+static int cgroup_array_map_btf_id;
 const struct bpf_map_ops cgroup_array_map_ops = {
        .map_alloc_check = fd_array_map_alloc_check,
        .map_alloc = array_map_alloc,
@@ -1004,6 +1008,8 @@ const struct bpf_map_ops cgroup_array_map_ops = {
        .map_fd_get_ptr = cgroup_fd_array_get_ptr,
        .map_fd_put_ptr = cgroup_fd_array_put_ptr,
        .map_check_btf = map_check_no_btf,
+       .map_btf_name = "bpf_array",
+       .map_btf_id = &cgroup_array_map_btf_id,
 };
 #endif
 
@@ -1077,6 +1083,7 @@ static u32 array_of_map_gen_lookup(struct bpf_map *map,
        return insn - insn_buf;
 }
 
+static int array_of_maps_map_btf_id;
 const struct bpf_map_ops array_of_maps_map_ops = {
        .map_alloc_check = fd_array_map_alloc_check,
        .map_alloc = array_of_map_alloc,
@@ -1089,4 +1096,6 @@ const struct bpf_map_ops array_of_maps_map_ops = {
        .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
        .map_gen_lookup = array_of_map_gen_lookup,
        .map_check_btf = map_check_no_btf,
+       .map_btf_name = "bpf_array",
+       .map_btf_id = &array_of_maps_map_btf_id,
 };
index c6b0dec..969c5d4 100644 (file)
@@ -611,6 +611,7 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
        return map;
 }
 
+static int bpf_struct_ops_map_btf_id;
 const struct bpf_map_ops bpf_struct_ops_map_ops = {
        .map_alloc_check = bpf_struct_ops_map_alloc_check,
        .map_alloc = bpf_struct_ops_map_alloc,
@@ -620,6 +621,8 @@ const struct bpf_map_ops bpf_struct_ops_map_ops = {
        .map_delete_elem = bpf_struct_ops_map_delete_elem,
        .map_update_elem = bpf_struct_ops_map_update_elem,
        .map_seq_show_elem = bpf_struct_ops_map_seq_show_elem,
+       .map_btf_name = "bpf_struct_ops_map",
+       .map_btf_id = &bpf_struct_ops_map_btf_id,
 };
 
 /* "const void *" because some subsystem is
index 58c9af1..ee36b7f 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/sort.h>
 #include <linux/bpf_verifier.h>
 #include <linux/btf.h>
+#include <linux/btf_ids.h>
 #include <linux/skmsg.h>
 #include <linux/perf_event.h>
 #include <net/sock.h>
@@ -3571,6 +3572,41 @@ btf_get_prog_ctx_type(struct bpf_verifier_log *log, struct btf *btf,
        return ctx_type;
 }
 
+static const struct bpf_map_ops * const btf_vmlinux_map_ops[] = {
+#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
+#define BPF_LINK_TYPE(_id, _name)
+#define BPF_MAP_TYPE(_id, _ops) \
+       [_id] = &_ops,
+#include <linux/bpf_types.h>
+#undef BPF_PROG_TYPE
+#undef BPF_LINK_TYPE
+#undef BPF_MAP_TYPE
+};
+
+static int btf_vmlinux_map_ids_init(const struct btf *btf,
+                                   struct bpf_verifier_log *log)
+{
+       const struct bpf_map_ops *ops;
+       int i, btf_id;
+
+       for (i = 0; i < ARRAY_SIZE(btf_vmlinux_map_ops); ++i) {
+               ops = btf_vmlinux_map_ops[i];
+               if (!ops || (!ops->map_btf_name && !ops->map_btf_id))
+                       continue;
+               if (!ops->map_btf_name || !ops->map_btf_id) {
+                       bpf_log(log, "map type %d is misconfigured\n", i);
+                       return -EINVAL;
+               }
+               btf_id = btf_find_by_name_kind(btf, ops->map_btf_name,
+                                              BTF_KIND_STRUCT);
+               if (btf_id < 0)
+                       return btf_id;
+               *ops->map_btf_id = btf_id;
+       }
+
+       return 0;
+}
+
 static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
                                     struct btf *btf,
                                     const struct btf_type *t,
@@ -3586,12 +3622,15 @@ static int btf_translate_to_vmlinux(struct bpf_verifier_log *log,
        return kern_ctx_type->type;
 }
 
+BTF_ID_LIST(bpf_ctx_convert_btf_id)
+BTF_ID(struct, bpf_ctx_convert)
+
 struct btf *btf_parse_vmlinux(void)
 {
        struct btf_verifier_env *env = NULL;
        struct bpf_verifier_log *log;
        struct btf *btf = NULL;
-       int err, i;
+       int err;
 
        env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
        if (!env)
@@ -3624,25 +3663,13 @@ struct btf *btf_parse_vmlinux(void)
        if (err)
                goto errout;
 
-       /* find struct bpf_ctx_convert for type checking later */
-       for (i = 1; i <= btf->nr_types; i++) {
-               const struct btf_type *t;
-               const char *tname;
+       /* btf_parse_vmlinux() runs under bpf_verifier_lock */
+       bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]);
 
-               t = btf_type_by_id(btf, i);
-               if (!__btf_type_is_struct(t))
-                       continue;
-               tname = __btf_name_by_offset(btf, t->name_off);
-               if (!strcmp(tname, "bpf_ctx_convert")) {
-                       /* btf_parse_vmlinux() runs under bpf_verifier_lock */
-                       bpf_ctx_convert.t = t;
-                       break;
-               }
-       }
-       if (i > btf->nr_types) {
-               err = -ENOENT;
+       /* find bpf map structs for map_ptr access checking */
+       err = btf_vmlinux_map_ids_init(btf, log);
+       if (err < 0)
                goto errout;
-       }
 
        bpf_struct_ops_init(btf, log);
 
@@ -3746,7 +3773,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
                                return false;
 
                        t = btf_type_skip_modifiers(btf, t->type, NULL);
-                       if (!btf_type_is_int(t)) {
+                       if (!btf_type_is_small_int(t)) {
                                bpf_log(log,
                                        "ret type %s not allowed for fmod_ret\n",
                                        btf_kind_str[BTF_INFO_KIND(t->info)]);
@@ -3768,7 +3795,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
        /* skip modifiers */
        while (btf_type_is_modifier(t))
                t = btf_type_by_id(btf, t->type);
-       if (btf_type_is_int(t) || btf_type_is_enum(t))
+       if (btf_type_is_small_int(t) || btf_type_is_enum(t))
                /* accessing a scalar */
                return true;
        if (!btf_type_is_ptr(t)) {
@@ -3790,16 +3817,17 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
                return true;
 
        /* this is a pointer to another type */
-       info->reg_type = PTR_TO_BTF_ID;
        for (i = 0; i < prog->aux->ctx_arg_info_size; i++) {
                const struct bpf_ctx_arg_aux *ctx_arg_info = &prog->aux->ctx_arg_info[i];
 
                if (ctx_arg_info->offset == off) {
                        info->reg_type = ctx_arg_info->reg_type;
-                       break;
+                       info->btf_id = ctx_arg_info->btf_id;
+                       return true;
                }
        }
 
+       info->reg_type = PTR_TO_BTF_ID;
        if (tgt_prog) {
                ret = btf_translate_to_vmlinux(log, btf, t, tgt_prog->type, arg);
                if (ret > 0) {
@@ -4049,96 +4077,17 @@ error:
        return -EINVAL;
 }
 
-static int __btf_resolve_helper_id(struct bpf_verifier_log *log, void *fn,
-                                  int arg)
-{
-       char fnname[KSYM_SYMBOL_LEN + 4] = "btf_";
-       const struct btf_param *args;
-       const struct btf_type *t;
-       const char *tname, *sym;
-       u32 btf_id, i;
-
-       if (IS_ERR(btf_vmlinux)) {
-               bpf_log(log, "btf_vmlinux is malformed\n");
-               return -EINVAL;
-       }
-
-       sym = kallsyms_lookup((long)fn, NULL, NULL, NULL, fnname + 4);
-       if (!sym) {
-               bpf_log(log, "kernel doesn't have kallsyms\n");
-               return -EFAULT;
-       }
-
-       for (i = 1; i <= btf_vmlinux->nr_types; i++) {
-               t = btf_type_by_id(btf_vmlinux, i);
-               if (BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF)
-                       continue;
-               tname = __btf_name_by_offset(btf_vmlinux, t->name_off);
-               if (!strcmp(tname, fnname))
-                       break;
-       }
-       if (i > btf_vmlinux->nr_types) {
-               bpf_log(log, "helper %s type is not found\n", fnname);
-               return -ENOENT;
-       }
-
-       t = btf_type_by_id(btf_vmlinux, t->type);
-       if (!btf_type_is_ptr(t))
-               return -EFAULT;
-       t = btf_type_by_id(btf_vmlinux, t->type);
-       if (!btf_type_is_func_proto(t))
-               return -EFAULT;
-
-       args = (const struct btf_param *)(t + 1);
-       if (arg >= btf_type_vlen(t)) {
-               bpf_log(log, "bpf helper %s doesn't have %d-th argument\n",
-                       fnname, arg);
-               return -EINVAL;
-       }
-
-       t = btf_type_by_id(btf_vmlinux, args[arg].type);
-       if (!btf_type_is_ptr(t) || !t->type) {
-               /* anything but the pointer to struct is a helper config bug */
-               bpf_log(log, "ARG_PTR_TO_BTF is misconfigured\n");
-               return -EFAULT;
-       }
-       btf_id = t->type;
-       t = btf_type_by_id(btf_vmlinux, t->type);
-       /* skip modifiers */
-       while (btf_type_is_modifier(t)) {
-               btf_id = t->type;
-               t = btf_type_by_id(btf_vmlinux, t->type);
-       }
-       if (!btf_type_is_struct(t)) {
-               bpf_log(log, "ARG_PTR_TO_BTF is not a struct\n");
-               return -EFAULT;
-       }
-       bpf_log(log, "helper %s arg%d has btf_id %d struct %s\n", fnname + 4,
-               arg, btf_id, __btf_name_by_offset(btf_vmlinux, t->name_off));
-       return btf_id;
-}
-
 int btf_resolve_helper_id(struct bpf_verifier_log *log,
                          const struct bpf_func_proto *fn, int arg)
 {
-       int *btf_id = &fn->btf_id[arg];
-       int ret;
+       int id;
 
        if (fn->arg_type[arg] != ARG_PTR_TO_BTF_ID)
                return -EINVAL;
-
-       ret = READ_ONCE(*btf_id);
-       if (ret)
-               return ret;
-       /* ok to race the search. The result is the same */
-       ret = __btf_resolve_helper_id(log, fn->func, arg);
-       if (!ret) {
-               /* Function argument cannot be type 'void' */
-               bpf_log(log, "BTF resolution bug\n");
-               return -EFAULT;
-       }
-       WRITE_ONCE(*btf_id, ret);
-       return ret;
+       id = fn->btf_id[arg];
+       if (!id || id > btf_vmlinux->nr_types)
+               return -EINVAL;
+       return id;
 }
 
 static int __get_type_size(struct btf *btf, u32 btf_id,
index 4d76f16..ac53102 100644 (file)
@@ -1276,16 +1276,23 @@ static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp,
 
 static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen)
 {
-       if (unlikely(max_optlen > PAGE_SIZE) || max_optlen < 0)
+       if (unlikely(max_optlen < 0))
                return -EINVAL;
 
+       if (unlikely(max_optlen > PAGE_SIZE)) {
+               /* We don't expose optvals that are greater than PAGE_SIZE
+                * to the BPF program.
+                */
+               max_optlen = PAGE_SIZE;
+       }
+
        ctx->optval = kzalloc(max_optlen, GFP_USER);
        if (!ctx->optval)
                return -ENOMEM;
 
        ctx->optval_end = ctx->optval + max_optlen;
 
-       return 0;
+       return max_optlen;
 }
 
 static void sockopt_free_buf(struct bpf_sockopt_kern *ctx)
@@ -1319,13 +1326,13 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
         */
        max_optlen = max_t(int, 16, *optlen);
 
-       ret = sockopt_alloc_buf(&ctx, max_optlen);
-       if (ret)
-               return ret;
+       max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
+       if (max_optlen < 0)
+               return max_optlen;
 
        ctx.optlen = *optlen;
 
-       if (copy_from_user(ctx.optval, optval, *optlen) != 0) {
+       if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) {
                ret = -EFAULT;
                goto out;
        }
@@ -1353,8 +1360,14 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
                /* export any potential modifications */
                *level = ctx.level;
                *optname = ctx.optname;
-               *optlen = ctx.optlen;
-               *kernel_optval = ctx.optval;
+
+               /* optlen == 0 from BPF indicates that we should
+                * use original userspace data.
+                */
+               if (ctx.optlen != 0) {
+                       *optlen = ctx.optlen;
+                       *kernel_optval = ctx.optval;
+               }
        }
 
 out:
@@ -1385,12 +1398,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
            __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT))
                return retval;
 
-       ret = sockopt_alloc_buf(&ctx, max_optlen);
-       if (ret)
-               return ret;
-
        ctx.optlen = max_optlen;
 
+       max_optlen = sockopt_alloc_buf(&ctx, max_optlen);
+       if (max_optlen < 0)
+               return max_optlen;
+
        if (!retval) {
                /* If kernel getsockopt finished successfully,
                 * copy whatever was returned to the user back
@@ -1404,10 +1417,8 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
                        goto out;
                }
 
-               if (ctx.optlen > max_optlen)
-                       ctx.optlen = max_optlen;
-
-               if (copy_from_user(ctx.optval, optval, ctx.optlen) != 0) {
+               if (copy_from_user(ctx.optval, optval,
+                                  min(ctx.optlen, max_optlen)) != 0) {
                        ret = -EFAULT;
                        goto out;
                }
@@ -1436,10 +1447,12 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
                goto out;
        }
 
-       if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
-           put_user(ctx.optlen, optlen)) {
-               ret = -EFAULT;
-               goto out;
+       if (ctx.optlen != 0) {
+               if (copy_to_user(optval, ctx.optval, ctx.optlen) ||
+                   put_user(ctx.optlen, optlen)) {
+                       ret = -EFAULT;
+                       goto out;
+               }
        }
 
        ret = ctx.retval;
index 9df4cc9..7be02e5 100644 (file)
@@ -1958,6 +1958,61 @@ void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
                }
 }
 
+/**
+ * bpf_prog_array_delete_safe_at() - Replaces the program at the given
+ *                                   index into the program array with
+ *                                   a dummy no-op program.
+ * @array: a bpf_prog_array
+ * @index: the index of the program to replace
+ *
+ * Skips over dummy programs, by not counting them, when calculating
+ * the the position of the program to replace.
+ *
+ * Return:
+ * * 0         - Success
+ * * -EINVAL   - Invalid index value. Must be a non-negative integer.
+ * * -ENOENT   - Index out of range
+ */
+int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index)
+{
+       return bpf_prog_array_update_at(array, index, &dummy_bpf_prog.prog);
+}
+
+/**
+ * bpf_prog_array_update_at() - Updates the program at the given index
+ *                              into the program array.
+ * @array: a bpf_prog_array
+ * @index: the index of the program to update
+ * @prog: the program to insert into the array
+ *
+ * Skips over dummy programs, by not counting them, when calculating
+ * the position of the program to update.
+ *
+ * Return:
+ * * 0         - Success
+ * * -EINVAL   - Invalid index value. Must be a non-negative integer.
+ * * -ENOENT   - Index out of range
+ */
+int bpf_prog_array_update_at(struct bpf_prog_array *array, int index,
+                            struct bpf_prog *prog)
+{
+       struct bpf_prog_array_item *item;
+
+       if (unlikely(index < 0))
+               return -EINVAL;
+
+       for (item = array->items; item->prog; item++) {
+               if (item->prog == &dummy_bpf_prog.prog)
+                       continue;
+               if (!index) {
+                       WRITE_ONCE(item->prog, prog);
+                       return 0;
+               }
+               index--;
+       }
+       return -ENOENT;
+}
+
 int bpf_prog_array_copy(struct bpf_prog_array *old_array,
                        struct bpf_prog *exclude_prog,
                        struct bpf_prog *include_prog,
index 27595fc..f1c4652 100644 (file)
@@ -52,7 +52,6 @@ struct xdp_bulk_queue {
 struct bpf_cpu_map_entry {
        u32 cpu;    /* kthread CPU and map index */
        int map_id; /* Back reference to map */
-       u32 qsize;  /* Queue size placeholder for map lookup */
 
        /* XDP can run multiple RX-ring queues, need __percpu enqueue store */
        struct xdp_bulk_queue __percpu *bulkq;
@@ -62,10 +61,14 @@ struct bpf_cpu_map_entry {
        /* Queue with potential multi-producers, and single-consumer kthread */
        struct ptr_ring *queue;
        struct task_struct *kthread;
-       struct work_struct kthread_stop_wq;
+
+       struct bpf_cpumap_val value;
+       struct bpf_prog *prog;
 
        atomic_t refcnt; /* Control when this struct can be free'ed */
        struct rcu_head rcu;
+
+       struct work_struct kthread_stop_wq;
 };
 
 struct bpf_cpu_map {
@@ -80,6 +83,7 @@ static int bq_flush_to_queue(struct xdp_bulk_queue *bq);
 
 static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
 {
+       u32 value_size = attr->value_size;
        struct bpf_cpu_map *cmap;
        int err = -ENOMEM;
        u64 cost;
@@ -90,7 +94,9 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
 
        /* check sanity of attributes */
        if (attr->max_entries == 0 || attr->key_size != 4 ||
-           attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
+           (value_size != offsetofend(struct bpf_cpumap_val, qsize) &&
+            value_size != offsetofend(struct bpf_cpumap_val, bpf_prog.fd)) ||
+           attr->map_flags & ~BPF_F_NUMA_NODE)
                return ERR_PTR(-EINVAL);
 
        cmap = kzalloc(sizeof(*cmap), GFP_USER);
@@ -212,6 +218,8 @@ static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
 static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
 {
        if (atomic_dec_and_test(&rcpu->refcnt)) {
+               if (rcpu->prog)
+                       bpf_prog_put(rcpu->prog);
                /* The queue should be empty at this point */
                __cpu_map_ring_cleanup(rcpu->queue);
                ptr_ring_cleanup(rcpu->queue, NULL);
@@ -220,6 +228,75 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
        }
 }
 
+static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
+                                   void **frames, int n,
+                                   struct xdp_cpumap_stats *stats)
+{
+       struct xdp_rxq_info rxq;
+       struct xdp_buff xdp;
+       int i, nframes = 0;
+
+       if (!rcpu->prog)
+               return n;
+
+       rcu_read_lock_bh();
+
+       xdp_set_return_frame_no_direct();
+       xdp.rxq = &rxq;
+
+       for (i = 0; i < n; i++) {
+               struct xdp_frame *xdpf = frames[i];
+               u32 act;
+               int err;
+
+               rxq.dev = xdpf->dev_rx;
+               rxq.mem = xdpf->mem;
+               /* TODO: report queue_index to xdp_rxq_info */
+
+               xdp_convert_frame_to_buff(xdpf, &xdp);
+
+               act = bpf_prog_run_xdp(rcpu->prog, &xdp);
+               switch (act) {
+               case XDP_PASS:
+                       err = xdp_update_frame_from_buff(&xdp, xdpf);
+                       if (err < 0) {
+                               xdp_return_frame(xdpf);
+                               stats->drop++;
+                       } else {
+                               frames[nframes++] = xdpf;
+                               stats->pass++;
+                       }
+                       break;
+               case XDP_REDIRECT:
+                       err = xdp_do_redirect(xdpf->dev_rx, &xdp,
+                                             rcpu->prog);
+                       if (unlikely(err)) {
+                               xdp_return_frame(xdpf);
+                               stats->drop++;
+                       } else {
+                               stats->redirect++;
+                       }
+                       break;
+               default:
+                       bpf_warn_invalid_xdp_action(act);
+                       /* fallthrough */
+               case XDP_DROP:
+                       xdp_return_frame(xdpf);
+                       stats->drop++;
+                       break;
+               }
+       }
+
+       if (stats->redirect)
+               xdp_do_flush_map();
+
+       xdp_clear_return_frame_no_direct();
+
+       rcu_read_unlock_bh(); /* resched point, may call do_softirq() */
+
+       return nframes;
+}
+
 #define CPUMAP_BATCH 8
 
 static int cpu_map_kthread_run(void *data)
@@ -234,11 +311,12 @@ static int cpu_map_kthread_run(void *data)
         * kthread_stop signal until queue is empty.
         */
        while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) {
+               struct xdp_cpumap_stats stats = {}; /* zero stats */
+               gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
                unsigned int drops = 0, sched = 0;
                void *frames[CPUMAP_BATCH];
                void *skbs[CPUMAP_BATCH];
-               gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
-               int i, n, m;
+               int i, n, m, nframes;
 
                /* Release CPU reschedule checks */
                if (__ptr_ring_empty(rcpu->queue)) {
@@ -259,8 +337,8 @@ static int cpu_map_kthread_run(void *data)
                 * kthread CPU pinned. Lockless access to ptr_ring
                 * consume side valid as no-resize allowed of queue.
                 */
-               n = ptr_ring_consume_batched(rcpu->queue, frames, CPUMAP_BATCH);
-
+               n = __ptr_ring_consume_batched(rcpu->queue, frames,
+                                              CPUMAP_BATCH);
                for (i = 0; i < n; i++) {
                        void *f = frames[i];
                        struct page *page = virt_to_page(f);
@@ -272,15 +350,19 @@ static int cpu_map_kthread_run(void *data)
                        prefetchw(page);
                }
 
-               m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, n, skbs);
-               if (unlikely(m == 0)) {
-                       for (i = 0; i < n; i++)
-                               skbs[i] = NULL; /* effect: xdp_return_frame */
-                       drops = n;
+               /* Support running another XDP prog on this CPU */
+               nframes = cpu_map_bpf_prog_run_xdp(rcpu, frames, n, &stats);
+               if (nframes) {
+                       m = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, skbs);
+                       if (unlikely(m == 0)) {
+                               for (i = 0; i < nframes; i++)
+                                       skbs[i] = NULL; /* effect: xdp_return_frame */
+                               drops += nframes;
+                       }
                }
 
                local_bh_disable();
-               for (i = 0; i < n; i++) {
+               for (i = 0; i < nframes; i++) {
                        struct xdp_frame *xdpf = frames[i];
                        struct sk_buff *skb = skbs[i];
                        int ret;
@@ -297,7 +379,7 @@ static int cpu_map_kthread_run(void *data)
                                drops++;
                }
                /* Feedback loop via tracepoint */
-               trace_xdp_cpumap_kthread(rcpu->map_id, n, drops, sched);
+               trace_xdp_cpumap_kthread(rcpu->map_id, n, drops, sched, &stats);
 
                local_bh_enable(); /* resched point, may call do_softirq() */
        }
@@ -307,13 +389,38 @@ static int cpu_map_kthread_run(void *data)
        return 0;
 }
 
-static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
-                                                      int map_id)
+bool cpu_map_prog_allowed(struct bpf_map *map)
 {
+       return map->map_type == BPF_MAP_TYPE_CPUMAP &&
+              map->value_size != offsetofend(struct bpf_cpumap_val, qsize);
+}
+
+static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu, int fd)
+{
+       struct bpf_prog *prog;
+
+       prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
+       if (IS_ERR(prog))
+               return PTR_ERR(prog);
+
+       if (prog->expected_attach_type != BPF_XDP_CPUMAP) {
+               bpf_prog_put(prog);
+               return -EINVAL;
+       }
+
+       rcpu->value.bpf_prog.id = prog->aux->id;
+       rcpu->prog = prog;
+
+       return 0;
+}
+
+static struct bpf_cpu_map_entry *
+__cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id)
+{
+       int numa, err, i, fd = value->bpf_prog.fd;
        gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
        struct bpf_cpu_map_entry *rcpu;
        struct xdp_bulk_queue *bq;
-       int numa, err, i;
 
        /* Have map->numa_node, but choose node of redirect target CPU */
        numa = cpu_to_node(cpu);
@@ -338,19 +445,22 @@ static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
        if (!rcpu->queue)
                goto free_bulkq;
 
-       err = ptr_ring_init(rcpu->queue, qsize, gfp);
+       err = ptr_ring_init(rcpu->queue, value->qsize, gfp);
        if (err)
                goto free_queue;
 
        rcpu->cpu    = cpu;
        rcpu->map_id = map_id;
-       rcpu->qsize  = qsize;
+       rcpu->value.qsize  = value->qsize;
+
+       if (fd > 0 && __cpu_map_load_bpf_program(rcpu, fd))
+               goto free_ptr_ring;
 
        /* Setup kthread */
        rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
                                               "cpumap/%d/map:%d", cpu, map_id);
        if (IS_ERR(rcpu->kthread))
-               goto free_ptr_ring;
+               goto free_prog;
 
        get_cpu_map_entry(rcpu); /* 1-refcnt for being in cmap->cpu_map[] */
        get_cpu_map_entry(rcpu); /* 1-refcnt for kthread */
@@ -361,6 +471,9 @@ static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
 
        return rcpu;
 
+free_prog:
+       if (rcpu->prog)
+               bpf_prog_put(rcpu->prog);
 free_ptr_ring:
        ptr_ring_cleanup(rcpu->queue, NULL);
 free_queue:
@@ -437,12 +550,12 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
                               u64 map_flags)
 {
        struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
+       struct bpf_cpumap_val cpumap_value = {};
        struct bpf_cpu_map_entry *rcpu;
-
        /* Array index key correspond to CPU number */
        u32 key_cpu = *(u32 *)key;
-       /* Value is the queue size */
-       u32 qsize = *(u32 *)value;
+
+       memcpy(&cpumap_value, value, map->value_size);
 
        if (unlikely(map_flags > BPF_EXIST))
                return -EINVAL;
@@ -450,18 +563,18 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
                return -E2BIG;
        if (unlikely(map_flags == BPF_NOEXIST))
                return -EEXIST;
-       if (unlikely(qsize > 16384)) /* sanity limit on qsize */
+       if (unlikely(cpumap_value.qsize > 16384)) /* sanity limit on qsize */
                return -EOVERFLOW;
 
        /* Make sure CPU is a valid possible cpu */
        if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu))
                return -ENODEV;
 
-       if (qsize == 0) {
+       if (cpumap_value.qsize == 0) {
                rcpu = NULL; /* Same as deleting */
        } else {
                /* Updating qsize cause re-allocation of bpf_cpu_map_entry */
-               rcpu = __cpu_map_entry_alloc(qsize, key_cpu, map->id);
+               rcpu = __cpu_map_entry_alloc(&cpumap_value, key_cpu, map->id);
                if (!rcpu)
                        return -ENOMEM;
                rcpu->cmap = cmap;
@@ -523,7 +636,7 @@ static void *cpu_map_lookup_elem(struct bpf_map *map, void *key)
        struct bpf_cpu_map_entry *rcpu =
                __cpu_map_lookup_elem(map, *(u32 *)key);
 
-       return rcpu ? &rcpu->qsize : NULL;
+       return rcpu ? &rcpu->value : NULL;
 }
 
 static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
@@ -543,6 +656,7 @@ static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
        return 0;
 }
 
+static int cpu_map_btf_id;
 const struct bpf_map_ops cpu_map_ops = {
        .map_alloc              = cpu_map_alloc,
        .map_free               = cpu_map_free,
@@ -551,6 +665,8 @@ const struct bpf_map_ops cpu_map_ops = {
        .map_lookup_elem        = cpu_map_lookup_elem,
        .map_get_next_key       = cpu_map_get_next_key,
        .map_check_btf          = map_check_no_btf,
+       .map_btf_name           = "bpf_cpu_map",
+       .map_btf_id             = &cpu_map_btf_id,
 };
 
 static int bq_flush_to_queue(struct xdp_bulk_queue *bq)
index 0cbb72c..10abb06 100644 (file)
@@ -86,12 +86,13 @@ static DEFINE_PER_CPU(struct list_head, dev_flush_list);
 static DEFINE_SPINLOCK(dev_map_lock);
 static LIST_HEAD(dev_map_list);
 
-static struct hlist_head *dev_map_create_hash(unsigned int entries)
+static struct hlist_head *dev_map_create_hash(unsigned int entries,
+                                             int numa_node)
 {
        int i;
        struct hlist_head *hash;
 
-       hash = kmalloc_array(entries, sizeof(*hash), GFP_KERNEL);
+       hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node);
        if (hash != NULL)
                for (i = 0; i < entries; i++)
                        INIT_HLIST_HEAD(&hash[i]);
@@ -145,7 +146,8 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
                return -EINVAL;
 
        if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
-               dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets);
+               dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets,
+                                                          dtab->map.numa_node);
                if (!dtab->dev_index_head)
                        goto free_charge;
 
@@ -232,7 +234,7 @@ static void dev_map_free(struct bpf_map *map)
                        }
                }
 
-               kfree(dtab->dev_index_head);
+               bpf_map_area_free(dtab->dev_index_head);
        } else {
                for (i = 0; i < dtab->map.max_entries; i++) {
                        struct bpf_dtab_netdev *dev;
@@ -747,6 +749,7 @@ static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
                                         map, key, value, map_flags);
 }
 
+static int dev_map_btf_id;
 const struct bpf_map_ops dev_map_ops = {
        .map_alloc = dev_map_alloc,
        .map_free = dev_map_free,
@@ -755,8 +758,11 @@ const struct bpf_map_ops dev_map_ops = {
        .map_update_elem = dev_map_update_elem,
        .map_delete_elem = dev_map_delete_elem,
        .map_check_btf = map_check_no_btf,
+       .map_btf_name = "bpf_dtab",
+       .map_btf_id = &dev_map_btf_id,
 };
 
+static int dev_map_hash_map_btf_id;
 const struct bpf_map_ops dev_map_hash_ops = {
        .map_alloc = dev_map_alloc,
        .map_free = dev_map_free,
@@ -765,6 +771,8 @@ const struct bpf_map_ops dev_map_hash_ops = {
        .map_update_elem = dev_map_hash_update_elem,
        .map_delete_elem = dev_map_hash_delete_elem,
        .map_check_btf = map_check_no_btf,
+       .map_btf_name = "bpf_dtab",
+       .map_btf_id = &dev_map_hash_map_btf_id,
 };
 
 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
index b4b288a..d4378d7 100644 (file)
@@ -1290,12 +1290,10 @@ static void htab_map_free(struct bpf_map *map)
 {
        struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
 
-       /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
-        * so the programs (can be more than one that used this map) were
-        * disconnected from events. Wait for outstanding critical sections in
-        * these programs to complete
+       /* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback.
+        * bpf_free_used_maps() is called after bpf prog is no longer executing.
+        * There is no need to synchronize_rcu() here to protect map elements.
         */
-       synchronize_rcu();
 
        /* some of free_htab_elem() callbacks for elements of this map may
         * not have executed. Wait for them.
@@ -1614,6 +1612,7 @@ htab_lru_map_lookup_and_delete_batch(struct bpf_map *map,
                                                  true, false);
 }
 
+static int htab_map_btf_id;
 const struct bpf_map_ops htab_map_ops = {
        .map_alloc_check = htab_map_alloc_check,
        .map_alloc = htab_map_alloc,
@@ -1625,8 +1624,11 @@ const struct bpf_map_ops htab_map_ops = {
        .map_gen_lookup = htab_map_gen_lookup,
        .map_seq_show_elem = htab_map_seq_show_elem,
        BATCH_OPS(htab),
+       .map_btf_name = "bpf_htab",
+       .map_btf_id = &htab_map_btf_id,
 };
 
+static int htab_lru_map_btf_id;
 const struct bpf_map_ops htab_lru_map_ops = {
        .map_alloc_check = htab_map_alloc_check,
        .map_alloc = htab_map_alloc,
@@ -1639,6 +1641,8 @@ const struct bpf_map_ops htab_lru_map_ops = {
        .map_gen_lookup = htab_lru_map_gen_lookup,
        .map_seq_show_elem = htab_map_seq_show_elem,
        BATCH_OPS(htab_lru),
+       .map_btf_name = "bpf_htab",
+       .map_btf_id = &htab_lru_map_btf_id,
 };
 
 /* Called from eBPF program */
@@ -1743,6 +1747,7 @@ static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
        rcu_read_unlock();
 }
 
+static int htab_percpu_map_btf_id;
 const struct bpf_map_ops htab_percpu_map_ops = {
        .map_alloc_check = htab_map_alloc_check,
        .map_alloc = htab_map_alloc,
@@ -1753,8 +1758,11 @@ const struct bpf_map_ops htab_percpu_map_ops = {
        .map_delete_elem = htab_map_delete_elem,
        .map_seq_show_elem = htab_percpu_map_seq_show_elem,
        BATCH_OPS(htab_percpu),
+       .map_btf_name = "bpf_htab",
+       .map_btf_id = &htab_percpu_map_btf_id,
 };
 
+static int htab_lru_percpu_map_btf_id;
 const struct bpf_map_ops htab_lru_percpu_map_ops = {
        .map_alloc_check = htab_map_alloc_check,
        .map_alloc = htab_map_alloc,
@@ -1765,6 +1773,8 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
        .map_delete_elem = htab_lru_map_delete_elem,
        .map_seq_show_elem = htab_percpu_map_seq_show_elem,
        BATCH_OPS(htab_lru_percpu),
+       .map_btf_name = "bpf_htab",
+       .map_btf_id = &htab_lru_percpu_map_btf_id,
 };
 
 static int fd_htab_map_alloc_check(union bpf_attr *attr)
@@ -1887,6 +1897,7 @@ static void htab_of_map_free(struct bpf_map *map)
        fd_htab_map_free(map);
 }
 
+static int htab_of_maps_map_btf_id;
 const struct bpf_map_ops htab_of_maps_map_ops = {
        .map_alloc_check = fd_htab_map_alloc_check,
        .map_alloc = htab_of_map_alloc,
@@ -1899,4 +1910,6 @@ const struct bpf_map_ops htab_of_maps_map_ops = {
        .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
        .map_gen_lookup = htab_of_map_gen_lookup,
        .map_check_btf = map_check_no_btf,
+       .map_btf_name = "bpf_htab",
+       .map_btf_id = &htab_of_maps_map_btf_id,
 };
index 33d0186..51bd5a8 100644 (file)
@@ -409,6 +409,7 @@ static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *_key,
        rcu_read_unlock();
 }
 
+static int cgroup_storage_map_btf_id;
 const struct bpf_map_ops cgroup_storage_map_ops = {
        .map_alloc = cgroup_storage_map_alloc,
        .map_free = cgroup_storage_map_free,
@@ -418,6 +419,8 @@ const struct bpf_map_ops cgroup_storage_map_ops = {
        .map_delete_elem = cgroup_storage_delete_elem,
        .map_check_btf = cgroup_storage_check_btf,
        .map_seq_show_elem = cgroup_storage_seq_show_elem,
+       .map_btf_name = "bpf_cgroup_storage_map",
+       .map_btf_id = &cgroup_storage_map_btf_id,
 };
 
 int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *_map)
index c8cc4e4..44474bf 100644 (file)
@@ -589,11 +589,6 @@ static void trie_free(struct bpf_map *map)
        struct lpm_trie_node __rcu **slot;
        struct lpm_trie_node *node;
 
-       /* Wait for outstanding programs to complete
-        * update/lookup/delete/get_next_key and free the trie.
-        */
-       synchronize_rcu();
-
        /* Always start at the root and walk down to a node that has no
         * children. Then free that node, nullify its reference in the parent
         * and start over.
@@ -735,6 +730,7 @@ static int trie_check_btf(const struct bpf_map *map,
               -EINVAL : 0;
 }
 
+static int trie_map_btf_id;
 const struct bpf_map_ops trie_map_ops = {
        .map_alloc = trie_alloc,
        .map_free = trie_free,
@@ -743,4 +739,6 @@ const struct bpf_map_ops trie_map_ops = {
        .map_update_elem = trie_update_elem,
        .map_delete_elem = trie_delete_elem,
        .map_check_btf = trie_check_btf,
+       .map_btf_name = "lpm_trie",
+       .map_btf_id = &trie_map_btf_id,
 };
index c69071e..8a7af11 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/fs.h>
 #include <linux/filter.h>
 #include <linux/kernel.h>
+#include <linux/btf_ids.h>
 
 struct bpf_iter_seq_map_info {
        u32 mid;
@@ -81,7 +82,10 @@ static const struct seq_operations bpf_map_seq_ops = {
        .show   = bpf_map_seq_show,
 };
 
-static const struct bpf_iter_reg bpf_map_reg_info = {
+BTF_ID_LIST(btf_bpf_map_id)
+BTF_ID(struct, bpf_map)
+
+static struct bpf_iter_reg bpf_map_reg_info = {
        .target                 = "bpf_map",
        .seq_ops                = &bpf_map_seq_ops,
        .init_seq_private       = NULL,
@@ -96,6 +100,7 @@ static const struct bpf_iter_reg bpf_map_reg_info = {
 
 static int __init bpf_map_iter_init(void)
 {
+       bpf_map_reg_info.ctx_arg_info[0].btf_id = *btf_bpf_map_id;
        return bpf_iter_reg_target(&bpf_map_reg_info);
 }
 
index 78cf061..71405ed 100644 (file)
@@ -19,18 +19,83 @@ struct bpf_netns_link {
         * with netns_bpf_mutex held.
         */
        struct net *net;
+       struct list_head node; /* node in list of links attached to net */
 };
 
 /* Protects updates to netns_bpf */
 DEFINE_MUTEX(netns_bpf_mutex);
 
+static void netns_bpf_attach_type_unneed(enum netns_bpf_attach_type type)
+{
+       switch (type) {
+#ifdef CONFIG_INET
+       case NETNS_BPF_SK_LOOKUP:
+               static_branch_dec(&bpf_sk_lookup_enabled);
+               break;
+#endif
+       default:
+               break;
+       }
+}
+
+static void netns_bpf_attach_type_need(enum netns_bpf_attach_type type)
+{
+       switch (type) {
+#ifdef CONFIG_INET
+       case NETNS_BPF_SK_LOOKUP:
+               static_branch_inc(&bpf_sk_lookup_enabled);
+               break;
+#endif
+       default:
+               break;
+       }
+}
+
 /* Must be called with netns_bpf_mutex held. */
-static void __net_exit bpf_netns_link_auto_detach(struct bpf_link *link)
+static void netns_bpf_run_array_detach(struct net *net,
+                                      enum netns_bpf_attach_type type)
 {
-       struct bpf_netns_link *net_link =
-               container_of(link, struct bpf_netns_link, link);
+       struct bpf_prog_array *run_array;
+
+       run_array = rcu_replace_pointer(net->bpf.run_array[type], NULL,
+                                       lockdep_is_held(&netns_bpf_mutex));
+       bpf_prog_array_free(run_array);
+}
+
+static int link_index(struct net *net, enum netns_bpf_attach_type type,
+                     struct bpf_netns_link *link)
+{
+       struct bpf_netns_link *pos;
+       int i = 0;
 
-       net_link->net = NULL;
+       list_for_each_entry(pos, &net->bpf.links[type], node) {
+               if (pos == link)
+                       return i;
+               i++;
+       }
+       return -ENOENT;
+}
+
+static int link_count(struct net *net, enum netns_bpf_attach_type type)
+{
+       struct list_head *pos;
+       int i = 0;
+
+       list_for_each(pos, &net->bpf.links[type])
+               i++;
+       return i;
+}
+
+static void fill_prog_array(struct net *net, enum netns_bpf_attach_type type,
+                           struct bpf_prog_array *prog_array)
+{
+       struct bpf_netns_link *pos;
+       unsigned int i = 0;
+
+       list_for_each_entry(pos, &net->bpf.links[type], node) {
+               prog_array->items[i].prog = pos->link.prog;
+               i++;
+       }
 }
 
 static void bpf_netns_link_release(struct bpf_link *link)
@@ -38,24 +103,43 @@ static void bpf_netns_link_release(struct bpf_link *link)
        struct bpf_netns_link *net_link =
                container_of(link, struct bpf_netns_link, link);
        enum netns_bpf_attach_type type = net_link->netns_type;
+       struct bpf_prog_array *old_array, *new_array;
        struct net *net;
-
-       /* Link auto-detached by dying netns. */
-       if (!net_link->net)
-               return;
+       int cnt, idx;
 
        mutex_lock(&netns_bpf_mutex);
 
-       /* Recheck after potential sleep. We can race with cleanup_net
-        * here, but if we see a non-NULL struct net pointer pre_exit
-        * has not happened yet and will block on netns_bpf_mutex.
+       /* We can race with cleanup_net, but if we see a non-NULL
+        * struct net pointer, pre_exit has not run yet and wait for
+        * netns_bpf_mutex.
         */
        net = net_link->net;
        if (!net)
                goto out_unlock;
 
-       net->bpf.links[type] = NULL;
-       RCU_INIT_POINTER(net->bpf.progs[type], NULL);
+       /* Mark attach point as unused */
+       netns_bpf_attach_type_unneed(type);
+
+       /* Remember link position in case of safe delete */
+       idx = link_index(net, type, net_link);
+       list_del(&net_link->node);
+
+       cnt = link_count(net, type);
+       if (!cnt) {
+               netns_bpf_run_array_detach(net, type);
+               goto out_unlock;
+       }
+
+       old_array = rcu_dereference_protected(net->bpf.run_array[type],
+                                             lockdep_is_held(&netns_bpf_mutex));
+       new_array = bpf_prog_array_alloc(cnt, GFP_KERNEL);
+       if (!new_array) {
+               WARN_ON(bpf_prog_array_delete_safe_at(old_array, idx));
+               goto out_unlock;
+       }
+       fill_prog_array(net, type, new_array);
+       rcu_assign_pointer(net->bpf.run_array[type], new_array);
+       bpf_prog_array_free(old_array);
 
 out_unlock:
        mutex_unlock(&netns_bpf_mutex);
@@ -76,8 +160,9 @@ static int bpf_netns_link_update_prog(struct bpf_link *link,
        struct bpf_netns_link *net_link =
                container_of(link, struct bpf_netns_link, link);
        enum netns_bpf_attach_type type = net_link->netns_type;
+       struct bpf_prog_array *run_array;
        struct net *net;
-       int ret = 0;
+       int idx, ret;
 
        if (old_prog && old_prog != link->prog)
                return -EPERM;
@@ -93,8 +178,14 @@ static int bpf_netns_link_update_prog(struct bpf_link *link,
                goto out_unlock;
        }
 
+       run_array = rcu_dereference_protected(net->bpf.run_array[type],
+                                             lockdep_is_held(&netns_bpf_mutex));
+       idx = link_index(net, type, net_link);
+       ret = bpf_prog_array_update_at(run_array, idx, new_prog);
+       if (ret)
+               goto out_unlock;
+
        old_prog = xchg(&link->prog, new_prog);
-       rcu_assign_pointer(net->bpf.progs[type], new_prog);
        bpf_prog_put(old_prog);
 
 out_unlock:
@@ -142,14 +233,38 @@ static const struct bpf_link_ops bpf_netns_link_ops = {
        .show_fdinfo = bpf_netns_link_show_fdinfo,
 };
 
+/* Must be called with netns_bpf_mutex held. */
+static int __netns_bpf_prog_query(const union bpf_attr *attr,
+                                 union bpf_attr __user *uattr,
+                                 struct net *net,
+                                 enum netns_bpf_attach_type type)
+{
+       __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
+       struct bpf_prog_array *run_array;
+       u32 prog_cnt = 0, flags = 0;
+
+       run_array = rcu_dereference_protected(net->bpf.run_array[type],
+                                             lockdep_is_held(&netns_bpf_mutex));
+       if (run_array)
+               prog_cnt = bpf_prog_array_length(run_array);
+
+       if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
+               return -EFAULT;
+       if (copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
+               return -EFAULT;
+       if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
+               return 0;
+
+       return bpf_prog_array_copy_to_user(run_array, prog_ids,
+                                          attr->query.prog_cnt);
+}
+
 int netns_bpf_prog_query(const union bpf_attr *attr,
                         union bpf_attr __user *uattr)
 {
-       __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
-       u32 prog_id, prog_cnt = 0, flags = 0;
        enum netns_bpf_attach_type type;
-       struct bpf_prog *attached;
        struct net *net;
+       int ret;
 
        if (attr->query.query_flags)
                return -EINVAL;
@@ -162,36 +277,25 @@ int netns_bpf_prog_query(const union bpf_attr *attr,
        if (IS_ERR(net))
                return PTR_ERR(net);
 
-       rcu_read_lock();
-       attached = rcu_dereference(net->bpf.progs[type]);
-       if (attached) {
-               prog_cnt = 1;
-               prog_id = attached->aux->id;
-       }
-       rcu_read_unlock();
+       mutex_lock(&netns_bpf_mutex);
+       ret = __netns_bpf_prog_query(attr, uattr, net, type);
+       mutex_unlock(&netns_bpf_mutex);
 
        put_net(net);
-
-       if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
-               return -EFAULT;
-       if (copy_to_user(&uattr->query.prog_cnt, &prog_cnt, sizeof(prog_cnt)))
-               return -EFAULT;
-
-       if (!attr->query.prog_cnt || !prog_ids || !prog_cnt)
-               return 0;
-
-       if (copy_to_user(prog_ids, &prog_id, sizeof(u32)))
-               return -EFAULT;
-
-       return 0;
+       return ret;
 }
 
 int netns_bpf_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
 {
+       struct bpf_prog_array *run_array;
        enum netns_bpf_attach_type type;
+       struct bpf_prog *attached;
        struct net *net;
        int ret;
 
+       if (attr->target_fd || attr->attach_flags || attr->replace_bpf_fd)
+               return -EINVAL;
+
        type = to_netns_bpf_attach_type(attr->attach_type);
        if (type < 0)
                return -EINVAL;
@@ -200,19 +304,47 @@ int netns_bpf_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
        mutex_lock(&netns_bpf_mutex);
 
        /* Attaching prog directly is not compatible with links */
-       if (net->bpf.links[type]) {
+       if (!list_empty(&net->bpf.links[type])) {
                ret = -EEXIST;
                goto out_unlock;
        }
 
        switch (type) {
        case NETNS_BPF_FLOW_DISSECTOR:
-               ret = flow_dissector_bpf_prog_attach(net, prog);
+               ret = flow_dissector_bpf_prog_attach_check(net, prog);
                break;
        default:
                ret = -EINVAL;
                break;
        }
+       if (ret)
+               goto out_unlock;
+
+       attached = net->bpf.progs[type];
+       if (attached == prog) {
+               /* The same program cannot be attached twice */
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       run_array = rcu_dereference_protected(net->bpf.run_array[type],
+                                             lockdep_is_held(&netns_bpf_mutex));
+       if (run_array) {
+               WRITE_ONCE(run_array->items[0].prog, prog);
+       } else {
+               run_array = bpf_prog_array_alloc(1, GFP_KERNEL);
+               if (!run_array) {
+                       ret = -ENOMEM;
+                       goto out_unlock;
+               }
+               run_array->items[0].prog = prog;
+               rcu_assign_pointer(net->bpf.run_array[type], run_array);
+       }
+
+       net->bpf.progs[type] = prog;
+       if (attached)
+               bpf_prog_put(attached);
+
 out_unlock:
        mutex_unlock(&netns_bpf_mutex);
 
@@ -221,63 +353,89 @@ out_unlock:
 
 /* Must be called with netns_bpf_mutex held. */
 static int __netns_bpf_prog_detach(struct net *net,
-                                  enum netns_bpf_attach_type type)
+                                  enum netns_bpf_attach_type type,
+                                  struct bpf_prog *old)
 {
        struct bpf_prog *attached;
 
        /* Progs attached via links cannot be detached */
-       if (net->bpf.links[type])
+       if (!list_empty(&net->bpf.links[type]))
                return -EINVAL;
 
-       attached = rcu_dereference_protected(net->bpf.progs[type],
-                                            lockdep_is_held(&netns_bpf_mutex));
-       if (!attached)
+       attached = net->bpf.progs[type];
+       if (!attached || attached != old)
                return -ENOENT;
-       RCU_INIT_POINTER(net->bpf.progs[type], NULL);
+       netns_bpf_run_array_detach(net, type);
+       net->bpf.progs[type] = NULL;
        bpf_prog_put(attached);
        return 0;
 }
 
-int netns_bpf_prog_detach(const union bpf_attr *attr)
+int netns_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
 {
        enum netns_bpf_attach_type type;
+       struct bpf_prog *prog;
        int ret;
 
+       if (attr->target_fd)
+               return -EINVAL;
+
        type = to_netns_bpf_attach_type(attr->attach_type);
        if (type < 0)
                return -EINVAL;
 
+       prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
+       if (IS_ERR(prog))
+               return PTR_ERR(prog);
+
        mutex_lock(&netns_bpf_mutex);
-       ret = __netns_bpf_prog_detach(current->nsproxy->net_ns, type);
+       ret = __netns_bpf_prog_detach(current->nsproxy->net_ns, type, prog);
        mutex_unlock(&netns_bpf_mutex);
 
+       bpf_prog_put(prog);
+
        return ret;
 }
 
+static int netns_bpf_max_progs(enum netns_bpf_attach_type type)
+{
+       switch (type) {
+       case NETNS_BPF_FLOW_DISSECTOR:
+               return 1;
+       case NETNS_BPF_SK_LOOKUP:
+               return 64;
+       default:
+               return 0;
+       }
+}
+
 static int netns_bpf_link_attach(struct net *net, struct bpf_link *link,
                                 enum netns_bpf_attach_type type)
 {
-       struct bpf_prog *prog;
-       int err;
+       struct bpf_netns_link *net_link =
+               container_of(link, struct bpf_netns_link, link);
+       struct bpf_prog_array *run_array;
+       int cnt, err;
 
        mutex_lock(&netns_bpf_mutex);
 
-       /* Allow attaching only one prog or link for now */
-       if (net->bpf.links[type]) {
+       cnt = link_count(net, type);
+       if (cnt >= netns_bpf_max_progs(type)) {
                err = -E2BIG;
                goto out_unlock;
        }
        /* Links are not compatible with attaching prog directly */
-       prog = rcu_dereference_protected(net->bpf.progs[type],
-                                        lockdep_is_held(&netns_bpf_mutex));
-       if (prog) {
+       if (net->bpf.progs[type]) {
                err = -EEXIST;
                goto out_unlock;
        }
 
        switch (type) {
        case NETNS_BPF_FLOW_DISSECTOR:
-               err = flow_dissector_bpf_prog_attach(net, link->prog);
+               err = flow_dissector_bpf_prog_attach_check(net, link->prog);
+               break;
+       case NETNS_BPF_SK_LOOKUP:
+               err = 0; /* nothing to check */
                break;
        default:
                err = -EINVAL;
@@ -286,7 +444,21 @@ static int netns_bpf_link_attach(struct net *net, struct bpf_link *link,
        if (err)
                goto out_unlock;
 
-       net->bpf.links[type] = link;
+       run_array = bpf_prog_array_alloc(cnt + 1, GFP_KERNEL);
+       if (!run_array) {
+               err = -ENOMEM;
+               goto out_unlock;
+       }
+
+       list_add_tail(&net_link->node, &net->bpf.links[type]);
+
+       fill_prog_array(net, type, run_array);
+       run_array = rcu_replace_pointer(net->bpf.run_array[type], run_array,
+                                       lockdep_is_held(&netns_bpf_mutex));
+       bpf_prog_array_free(run_array);
+
+       /* Mark attach point as used */
+       netns_bpf_attach_type_need(type);
 
 out_unlock:
        mutex_unlock(&netns_bpf_mutex);
@@ -345,23 +517,36 @@ out_put_net:
        return err;
 }
 
+static int __net_init netns_bpf_pernet_init(struct net *net)
+{
+       int type;
+
+       for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++)
+               INIT_LIST_HEAD(&net->bpf.links[type]);
+
+       return 0;
+}
+
 static void __net_exit netns_bpf_pernet_pre_exit(struct net *net)
 {
        enum netns_bpf_attach_type type;
-       struct bpf_link *link;
+       struct bpf_netns_link *net_link;
 
        mutex_lock(&netns_bpf_mutex);
        for (type = 0; type < MAX_NETNS_BPF_ATTACH_TYPE; type++) {
-               link = net->bpf.links[type];
-               if (link)
-                       bpf_netns_link_auto_detach(link);
-               else
-                       __netns_bpf_prog_detach(net, type);
+               netns_bpf_run_array_detach(net, type);
+               list_for_each_entry(net_link, &net->bpf.links[type], node) {
+                       net_link->net = NULL; /* auto-detach link */
+                       netns_bpf_attach_type_unneed(type);
+               }
+               if (net->bpf.progs[type])
+                       bpf_prog_put(net->bpf.progs[type]);
        }
        mutex_unlock(&netns_bpf_mutex);
 }
 
 static struct pernet_operations netns_bpf_pernet_ops __net_initdata = {
+       .init = netns_bpf_pernet_init,
        .pre_exit = netns_bpf_pernet_pre_exit,
 };
 
index 05c8e04..44184f8 100644 (file)
@@ -101,13 +101,6 @@ static void queue_stack_map_free(struct bpf_map *map)
 {
        struct bpf_queue_stack *qs = bpf_queue_stack(map);
 
-       /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
-        * so the programs (can be more than one that used this map) were
-        * disconnected from events. Wait for outstanding critical sections in
-        * these programs to complete
-        */
-       synchronize_rcu();
-
        bpf_map_area_free(qs);
 }
 
@@ -262,6 +255,7 @@ static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
        return -EINVAL;
 }
 
+static int queue_map_btf_id;
 const struct bpf_map_ops queue_map_ops = {
        .map_alloc_check = queue_stack_map_alloc_check,
        .map_alloc = queue_stack_map_alloc,
@@ -273,8 +267,11 @@ const struct bpf_map_ops queue_map_ops = {
        .map_pop_elem = queue_map_pop_elem,
        .map_peek_elem = queue_map_peek_elem,
        .map_get_next_key = queue_stack_map_get_next_key,
+       .map_btf_name = "bpf_queue_stack",
+       .map_btf_id = &queue_map_btf_id,
 };
 
+static int stack_map_btf_id;
 const struct bpf_map_ops stack_map_ops = {
        .map_alloc_check = queue_stack_map_alloc_check,
        .map_alloc = queue_stack_map_alloc,
@@ -286,4 +283,6 @@ const struct bpf_map_ops stack_map_ops = {
        .map_pop_elem = stack_map_pop_elem,
        .map_peek_elem = stack_map_peek_elem,
        .map_get_next_key = queue_stack_map_get_next_key,
+       .map_btf_name = "bpf_queue_stack",
+       .map_btf_id = &stack_map_btf_id,
 };
index 21cde24..90b29c5 100644 (file)
@@ -20,11 +20,14 @@ static struct reuseport_array *reuseport_array(struct bpf_map *map)
 /* The caller must hold the reuseport_lock */
 void bpf_sk_reuseport_detach(struct sock *sk)
 {
-       struct sock __rcu **socks;
+       uintptr_t sk_user_data;
 
        write_lock_bh(&sk->sk_callback_lock);
-       socks = sk->sk_user_data;
-       if (socks) {
+       sk_user_data = (uintptr_t)sk->sk_user_data;
+       if (sk_user_data & SK_USER_DATA_BPF) {
+               struct sock __rcu **socks;
+
+               socks = (void *)(sk_user_data & SK_USER_DATA_PTRMASK);
                WRITE_ONCE(sk->sk_user_data, NULL);
                /*
                 * Do not move this NULL assignment outside of
@@ -96,8 +99,6 @@ static void reuseport_array_free(struct bpf_map *map)
        struct sock *sk;
        u32 i;
 
-       synchronize_rcu();
-
        /*
         * ops->map_*_elem() will not be able to access this
         * array now. Hence, this function only races with
@@ -252,6 +253,7 @@ int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
        struct sock *free_osk = NULL, *osk, *nsk;
        struct sock_reuseport *reuse;
        u32 index = *(u32 *)key;
+       uintptr_t sk_user_data;
        struct socket *socket;
        int err, fd;
 
@@ -305,7 +307,9 @@ int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
        if (err)
                goto put_file_unlock;
 
-       WRITE_ONCE(nsk->sk_user_data, &array->ptrs[index]);
+       sk_user_data = (uintptr_t)&array->ptrs[index] | SK_USER_DATA_NOCOPY |
+               SK_USER_DATA_BPF;
+       WRITE_ONCE(nsk->sk_user_data, (void *)sk_user_data);
        rcu_assign_pointer(array->ptrs[index], nsk);
        free_osk = osk;
        err = 0;
@@ -345,6 +349,7 @@ static int reuseport_array_get_next_key(struct bpf_map *map, void *key,
        return 0;
 }
 
+static int reuseport_array_map_btf_id;
 const struct bpf_map_ops reuseport_array_ops = {
        .map_alloc_check = reuseport_array_alloc_check,
        .map_alloc = reuseport_array_alloc,
@@ -352,4 +357,6 @@ const struct bpf_map_ops reuseport_array_ops = {
        .map_lookup_elem = reuseport_array_lookup_elem,
        .map_get_next_key = reuseport_array_get_next_key,
        .map_delete_elem = reuseport_array_delete_elem,
+       .map_btf_name = "reuseport_array",
+       .map_btf_id = &reuseport_array_map_btf_id,
 };
index 180414b..002f8a5 100644 (file)
@@ -132,15 +132,6 @@ static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node)
 {
        struct bpf_ringbuf *rb;
 
-       if (!data_sz || !PAGE_ALIGNED(data_sz))
-               return ERR_PTR(-EINVAL);
-
-#ifdef CONFIG_64BIT
-       /* on 32-bit arch, it's impossible to overflow record's hdr->pgoff */
-       if (data_sz > RINGBUF_MAX_DATA_SZ)
-               return ERR_PTR(-E2BIG);
-#endif
-
        rb = bpf_ringbuf_area_alloc(data_sz, numa_node);
        if (!rb)
                return ERR_PTR(-ENOMEM);
@@ -166,9 +157,16 @@ static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr)
                return ERR_PTR(-EINVAL);
 
        if (attr->key_size || attr->value_size ||
-           attr->max_entries == 0 || !PAGE_ALIGNED(attr->max_entries))
+           !is_power_of_2(attr->max_entries) ||
+           !PAGE_ALIGNED(attr->max_entries))
                return ERR_PTR(-EINVAL);
 
+#ifdef CONFIG_64BIT
+       /* on 32-bit arch, it's impossible to overflow record's hdr->pgoff */
+       if (attr->max_entries > RINGBUF_MAX_DATA_SZ)
+               return ERR_PTR(-E2BIG);
+#endif
+
        rb_map = kzalloc(sizeof(*rb_map), GFP_USER);
        if (!rb_map)
                return ERR_PTR(-ENOMEM);
@@ -215,13 +213,6 @@ static void ringbuf_map_free(struct bpf_map *map)
 {
        struct bpf_ringbuf_map *rb_map;
 
-       /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
-        * so the programs (can be more than one that used this map) were
-        * disconnected from events. Wait for outstanding critical sections in
-        * these programs to complete
-        */
-       synchronize_rcu();
-
        rb_map = container_of(map, struct bpf_ringbuf_map, map);
        bpf_ringbuf_free(rb_map->rb);
        kfree(rb_map);
@@ -294,6 +285,7 @@ static __poll_t ringbuf_map_poll(struct bpf_map *map, struct file *filp,
        return 0;
 }
 
+static int ringbuf_map_btf_id;
 const struct bpf_map_ops ringbuf_map_ops = {
        .map_alloc = ringbuf_map_alloc,
        .map_free = ringbuf_map_free,
@@ -303,6 +295,8 @@ const struct bpf_map_ops ringbuf_map_ops = {
        .map_update_elem = ringbuf_map_update_elem,
        .map_delete_elem = ringbuf_map_delete_elem,
        .map_get_next_key = ringbuf_map_get_next_key,
+       .map_btf_name = "bpf_ringbuf_map",
+       .map_btf_id = &ringbuf_map_btf_id,
 };
 
 /* Given pointer to ring buffer record metadata and struct bpf_ringbuf itself,
index 599488f..48d8e73 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/elf.h>
 #include <linux/pagemap.h>
 #include <linux/irq_work.h>
+#include <linux/btf_ids.h>
 #include "percpu_freelist.h"
 
 #define STACK_CREATE_FLAG_MASK                                 \
@@ -348,6 +349,44 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
        }
 }
 
+static struct perf_callchain_entry *
+get_callchain_entry_for_task(struct task_struct *task, u32 init_nr)
+{
+#ifdef CONFIG_STACKTRACE
+       struct perf_callchain_entry *entry;
+       int rctx;
+
+       entry = get_callchain_entry(&rctx);
+
+       if (!entry)
+               return NULL;
+
+       entry->nr = init_nr +
+               stack_trace_save_tsk(task, (unsigned long *)(entry->ip + init_nr),
+                                    sysctl_perf_event_max_stack - init_nr, 0);
+
+       /* stack_trace_save_tsk() works on unsigned long array, while
+        * perf_callchain_entry uses u64 array. For 32-bit systems, it is
+        * necessary to fix this mismatch.
+        */
+       if (__BITS_PER_LONG != 64) {
+               unsigned long *from = (unsigned long *) entry->ip;
+               u64 *to = entry->ip;
+               int i;
+
+               /* copy data from the end to avoid using extra buffer */
+               for (i = entry->nr - 1; i >= (int)init_nr; i--)
+                       to[i] = (u64)(from[i]);
+       }
+
+       put_callchain_entry(rctx);
+
+       return entry;
+#else /* CONFIG_STACKTRACE */
+       return NULL;
+#endif
+}
+
 BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
           u64, flags)
 {
@@ -448,8 +487,8 @@ const struct bpf_func_proto bpf_get_stackid_proto = {
        .arg3_type      = ARG_ANYTHING,
 };
 
-BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size,
-          u64, flags)
+static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
+                           void *buf, u32 size, u64 flags)
 {
        u32 init_nr, trace_nr, copy_len, elem_size, num_elem;
        bool user_build_id = flags & BPF_F_USER_BUILD_ID;
@@ -471,13 +510,22 @@ BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size,
        if (unlikely(size % elem_size))
                goto clear;
 
+       /* cannot get valid user stack for task without user_mode regs */
+       if (task && user && !user_mode(regs))
+               goto err_fault;
+
        num_elem = size / elem_size;
        if (sysctl_perf_event_max_stack < num_elem)
                init_nr = 0;
        else
                init_nr = sysctl_perf_event_max_stack - num_elem;
-       trace = get_perf_callchain(regs, init_nr, kernel, user,
-                                  sysctl_perf_event_max_stack, false, false);
+
+       if (kernel && task)
+               trace = get_callchain_entry_for_task(task, init_nr);
+       else
+               trace = get_perf_callchain(regs, init_nr, kernel, user,
+                                          sysctl_perf_event_max_stack,
+                                          false, false);
        if (unlikely(!trace))
                goto err_fault;
 
@@ -505,6 +553,12 @@ clear:
        return err;
 }
 
+BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size,
+          u64, flags)
+{
+       return __bpf_get_stack(regs, NULL, buf, size, flags);
+}
+
 const struct bpf_func_proto bpf_get_stack_proto = {
        .func           = bpf_get_stack,
        .gpl_only       = true,
@@ -515,6 +569,28 @@ const struct bpf_func_proto bpf_get_stack_proto = {
        .arg4_type      = ARG_ANYTHING,
 };
 
+BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
+          u32, size, u64, flags)
+{
+       struct pt_regs *regs = task_pt_regs(task);
+
+       return __bpf_get_stack(regs, task, buf, size, flags);
+}
+
+BTF_ID_LIST(bpf_get_task_stack_btf_ids)
+BTF_ID(struct, task_struct)
+
+const struct bpf_func_proto bpf_get_task_stack_proto = {
+       .func           = bpf_get_task_stack,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_BTF_ID,
+       .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
+       .arg3_type      = ARG_CONST_SIZE_OR_ZERO,
+       .arg4_type      = ARG_ANYTHING,
+       .btf_id         = bpf_get_task_stack_btf_ids,
+};
+
 /* Called from eBPF program */
 static void *stack_map_lookup_elem(struct bpf_map *map, void *key)
 {
@@ -604,15 +680,13 @@ static void stack_map_free(struct bpf_map *map)
 {
        struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
 
-       /* wait for bpf programs to complete before freeing stack map */
-       synchronize_rcu();
-
        bpf_map_area_free(smap->elems);
        pcpu_freelist_destroy(&smap->freelist);
        bpf_map_area_free(smap);
        put_callchain_buffers();
 }
 
+static int stack_trace_map_btf_id;
 const struct bpf_map_ops stack_trace_map_ops = {
        .map_alloc = stack_map_alloc,
        .map_free = stack_map_free,
@@ -621,6 +695,8 @@ const struct bpf_map_ops stack_trace_map_ops = {
        .map_update_elem = stack_map_update_elem,
        .map_delete_elem = stack_map_delete_elem,
        .map_check_btf = map_check_no_btf,
+       .map_btf_name = "bpf_stack_map",
+       .map_btf_id = &stack_trace_map_btf_id,
 };
 
 static int __init stack_map_init(void)
index 8da1599..d07417d 100644 (file)
@@ -1981,6 +1981,7 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
        case BPF_PROG_TYPE_CGROUP_SOCK:
                switch (expected_attach_type) {
                case BPF_CGROUP_INET_SOCK_CREATE:
+               case BPF_CGROUP_INET_SOCK_RELEASE:
                case BPF_CGROUP_INET4_POST_BIND:
                case BPF_CGROUP_INET6_POST_BIND:
                        return 0;
@@ -2021,6 +2022,10 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
                default:
                        return -EINVAL;
                }
+       case BPF_PROG_TYPE_SK_LOOKUP:
+               if (expected_attach_type == BPF_SK_LOOKUP)
+                       return 0;
+               return -EINVAL;
        case BPF_PROG_TYPE_EXT:
                if (expected_attach_type)
                        return -EINVAL;
@@ -2121,7 +2126,7 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
            !bpf_capable())
                return -EPERM;
 
-       if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN))
+       if (is_net_admin_prog_type(type) && !capable(CAP_NET_ADMIN) && !capable(CAP_SYS_ADMIN))
                return -EPERM;
        if (is_perfmon_prog_type(type) && !perfmon_capable())
                return -EPERM;
@@ -2755,6 +2760,7 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
        case BPF_PROG_TYPE_CGROUP_SOCK:
        case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
        case BPF_PROG_TYPE_CGROUP_SOCKOPT:
+       case BPF_PROG_TYPE_SK_LOOKUP:
                return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
        case BPF_PROG_TYPE_CGROUP_SKB:
                if (!capable(CAP_NET_ADMIN))
@@ -2779,6 +2785,7 @@ attach_type_to_prog_type(enum bpf_attach_type attach_type)
                return BPF_PROG_TYPE_CGROUP_SKB;
                break;
        case BPF_CGROUP_INET_SOCK_CREATE:
+       case BPF_CGROUP_INET_SOCK_RELEASE:
        case BPF_CGROUP_INET4_POST_BIND:
        case BPF_CGROUP_INET6_POST_BIND:
                return BPF_PROG_TYPE_CGROUP_SOCK;
@@ -2815,6 +2822,8 @@ attach_type_to_prog_type(enum bpf_attach_type attach_type)
                return BPF_PROG_TYPE_CGROUP_SOCKOPT;
        case BPF_TRACE_ITER:
                return BPF_PROG_TYPE_TRACING;
+       case BPF_SK_LOOKUP:
+               return BPF_PROG_TYPE_SK_LOOKUP;
        default:
                return BPF_PROG_TYPE_UNSPEC;
        }
@@ -2893,13 +2902,11 @@ static int bpf_prog_detach(const union bpf_attr *attr)
        switch (ptype) {
        case BPF_PROG_TYPE_SK_MSG:
        case BPF_PROG_TYPE_SK_SKB:
-               return sock_map_get_from_fd(attr, NULL);
+               return sock_map_prog_detach(attr, ptype);
        case BPF_PROG_TYPE_LIRC_MODE2:
                return lirc_prog_detach(attr);
        case BPF_PROG_TYPE_FLOW_DISSECTOR:
-               if (!capable(CAP_NET_ADMIN))
-                       return -EPERM;
-               return netns_bpf_prog_detach(attr);
+               return netns_bpf_prog_detach(attr, ptype);
        case BPF_PROG_TYPE_CGROUP_DEVICE:
        case BPF_PROG_TYPE_CGROUP_SKB:
        case BPF_PROG_TYPE_CGROUP_SOCK:
@@ -2929,6 +2936,7 @@ static int bpf_prog_query(const union bpf_attr *attr,
        case BPF_CGROUP_INET_INGRESS:
        case BPF_CGROUP_INET_EGRESS:
        case BPF_CGROUP_INET_SOCK_CREATE:
+       case BPF_CGROUP_INET_SOCK_RELEASE:
        case BPF_CGROUP_INET4_BIND:
        case BPF_CGROUP_INET6_BIND:
        case BPF_CGROUP_INET4_POST_BIND:
@@ -2952,6 +2960,7 @@ static int bpf_prog_query(const union bpf_attr *attr,
        case BPF_LIRC_MODE2:
                return lirc_prog_query(attr, uattr);
        case BPF_FLOW_DISSECTOR:
+       case BPF_SK_LOOKUP:
                return netns_bpf_prog_query(attr, uattr);
        default:
                return -EINVAL;
@@ -3139,7 +3148,8 @@ static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
        return NULL;
 }
 
-static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
+static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
+                                             const struct cred *f_cred)
 {
        const struct bpf_map *map;
        struct bpf_insn *insns;
@@ -3165,7 +3175,7 @@ static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
                    code == (BPF_JMP | BPF_CALL_ARGS)) {
                        if (code == (BPF_JMP | BPF_CALL_ARGS))
                                insns[i].code = BPF_JMP | BPF_CALL;
-                       if (!bpf_dump_raw_ok())
+                       if (!bpf_dump_raw_ok(f_cred))
                                insns[i].imm = 0;
                        continue;
                }
@@ -3221,7 +3231,8 @@ static int set_info_rec_size(struct bpf_prog_info *info)
        return 0;
 }
 
-static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
+static int bpf_prog_get_info_by_fd(struct file *file,
+                                  struct bpf_prog *prog,
                                   const union bpf_attr *attr,
                                   union bpf_attr __user *uattr)
 {
@@ -3290,11 +3301,11 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
                struct bpf_insn *insns_sanitized;
                bool fault;
 
-               if (prog->blinded && !bpf_dump_raw_ok()) {
+               if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) {
                        info.xlated_prog_insns = 0;
                        goto done;
                }
-               insns_sanitized = bpf_insn_prepare_dump(prog);
+               insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred);
                if (!insns_sanitized)
                        return -ENOMEM;
                uinsns = u64_to_user_ptr(info.xlated_prog_insns);
@@ -3328,7 +3339,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
        }
 
        if (info.jited_prog_len && ulen) {
-               if (bpf_dump_raw_ok()) {
+               if (bpf_dump_raw_ok(file->f_cred)) {
                        uinsns = u64_to_user_ptr(info.jited_prog_insns);
                        ulen = min_t(u32, info.jited_prog_len, ulen);
 
@@ -3363,7 +3374,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
        ulen = info.nr_jited_ksyms;
        info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
        if (ulen) {
-               if (bpf_dump_raw_ok()) {
+               if (bpf_dump_raw_ok(file->f_cred)) {
                        unsigned long ksym_addr;
                        u64 __user *user_ksyms;
                        u32 i;
@@ -3394,7 +3405,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
        ulen = info.nr_jited_func_lens;
        info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
        if (ulen) {
-               if (bpf_dump_raw_ok()) {
+               if (bpf_dump_raw_ok(file->f_cred)) {
                        u32 __user *user_lens;
                        u32 func_len, i;
 
@@ -3451,7 +3462,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
        else
                info.nr_jited_line_info = 0;
        if (info.nr_jited_line_info && ulen) {
-               if (bpf_dump_raw_ok()) {
+               if (bpf_dump_raw_ok(file->f_cred)) {
                        __u64 __user *user_linfo;
                        u32 i;
 
@@ -3497,7 +3508,8 @@ done:
        return 0;
 }
 
-static int bpf_map_get_info_by_fd(struct bpf_map *map,
+static int bpf_map_get_info_by_fd(struct file *file,
+                                 struct bpf_map *map,
                                  const union bpf_attr *attr,
                                  union bpf_attr __user *uattr)
 {
@@ -3540,7 +3552,8 @@ static int bpf_map_get_info_by_fd(struct bpf_map *map,
        return 0;
 }
 
-static int bpf_btf_get_info_by_fd(struct btf *btf,
+static int bpf_btf_get_info_by_fd(struct file *file,
+                                 struct btf *btf,
                                  const union bpf_attr *attr,
                                  union bpf_attr __user *uattr)
 {
@@ -3555,7 +3568,8 @@ static int bpf_btf_get_info_by_fd(struct btf *btf,
        return btf_get_info_by_fd(btf, attr, uattr);
 }
 
-static int bpf_link_get_info_by_fd(struct bpf_link *link,
+static int bpf_link_get_info_by_fd(struct file *file,
+                                 struct bpf_link *link,
                                  const union bpf_attr *attr,
                                  union bpf_attr __user *uattr)
 {
@@ -3608,15 +3622,15 @@ static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
                return -EBADFD;
 
        if (f.file->f_op == &bpf_prog_fops)
-               err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
+               err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr,
                                              uattr);
        else if (f.file->f_op == &bpf_map_fops)
-               err = bpf_map_get_info_by_fd(f.file->private_data, attr,
+               err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr,
                                             uattr);
        else if (f.file->f_op == &btf_fops)
-               err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr);
+               err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr);
        else if (f.file->f_op == &bpf_link_fops)
-               err = bpf_link_get_info_by_fd(f.file->private_data,
+               err = bpf_link_get_info_by_fd(f.file, f.file->private_data,
                                              attr, uattr);
        else
                err = -EINVAL;
@@ -3885,6 +3899,7 @@ static int link_create(union bpf_attr *attr)
                ret = tracing_bpf_link_attach(attr, prog);
                break;
        case BPF_PROG_TYPE_FLOW_DISSECTOR:
+       case BPF_PROG_TYPE_SK_LOOKUP:
                ret = netns_bpf_link_create(attr, prog);
                break;
        default:
index 4dbf2b6..2feecf0 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/fs.h>
 #include <linux/fdtable.h>
 #include <linux/filter.h>
+#include <linux/btf_ids.h>
 
 struct bpf_iter_seq_task_common {
        struct pid_namespace *ns;
@@ -312,7 +313,11 @@ static const struct seq_operations task_file_seq_ops = {
        .show   = task_file_seq_show,
 };
 
-static const struct bpf_iter_reg task_reg_info = {
+BTF_ID_LIST(btf_task_file_ids)
+BTF_ID(struct, task_struct)
+BTF_ID(struct, file)
+
+static struct bpf_iter_reg task_reg_info = {
        .target                 = "task",
        .seq_ops                = &task_seq_ops,
        .init_seq_private       = init_seq_pidns,
@@ -325,7 +330,7 @@ static const struct bpf_iter_reg task_reg_info = {
        },
 };
 
-static const struct bpf_iter_reg task_file_reg_info = {
+static struct bpf_iter_reg task_file_reg_info = {
        .target                 = "task_file",
        .seq_ops                = &task_file_seq_ops,
        .init_seq_private       = init_seq_pidns,
@@ -344,10 +349,13 @@ static int __init task_iter_init(void)
 {
        int ret;
 
+       task_reg_info.ctx_arg_info[0].btf_id = btf_task_file_ids[0];
        ret = bpf_iter_reg_target(&task_reg_info);
        if (ret)
                return ret;
 
+       task_file_reg_info.ctx_arg_info[0].btf_id = btf_task_file_ids[0];
+       task_file_reg_info.ctx_arg_info[1].btf_id = btf_task_file_ids[1];
        return bpf_iter_reg_target(&task_file_reg_info);
 }
 late_initcall(task_iter_init);
index 34cde84..9a6703b 100644 (file)
@@ -399,8 +399,7 @@ static bool reg_type_not_null(enum bpf_reg_type type)
        return type == PTR_TO_SOCKET ||
                type == PTR_TO_TCP_SOCK ||
                type == PTR_TO_MAP_VALUE ||
-               type == PTR_TO_SOCK_COMMON ||
-               type == PTR_TO_BTF_ID;
+               type == PTR_TO_SOCK_COMMON;
 }
 
 static bool reg_type_may_be_null(enum bpf_reg_type type)
@@ -1351,6 +1350,19 @@ static void mark_reg_not_init(struct bpf_verifier_env *env,
        __mark_reg_not_init(env, regs + regno);
 }
 
+static void mark_btf_ld_reg(struct bpf_verifier_env *env,
+                           struct bpf_reg_state *regs, u32 regno,
+                           enum bpf_reg_type reg_type, u32 btf_id)
+{
+       if (reg_type == SCALAR_VALUE) {
+               mark_reg_unknown(env, regs, regno);
+               return;
+       }
+       mark_reg_known_zero(env, regs, regno);
+       regs[regno].type = PTR_TO_BTF_ID;
+       regs[regno].btf_id = btf_id;
+}
+
 #define DEF_NOT_SUBREG (0)
 static void init_reg_state(struct bpf_verifier_env *env,
                           struct bpf_func_state *state)
@@ -3182,19 +3194,68 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
        if (ret < 0)
                return ret;
 
-       if (atype == BPF_READ && value_regno >= 0) {
-               if (ret == SCALAR_VALUE) {
-                       mark_reg_unknown(env, regs, value_regno);
-                       return 0;
-               }
-               mark_reg_known_zero(env, regs, value_regno);
-               regs[value_regno].type = PTR_TO_BTF_ID;
-               regs[value_regno].btf_id = btf_id;
+       if (atype == BPF_READ && value_regno >= 0)
+               mark_btf_ld_reg(env, regs, value_regno, ret, btf_id);
+
+       return 0;
+}
+
+static int check_ptr_to_map_access(struct bpf_verifier_env *env,
+                                  struct bpf_reg_state *regs,
+                                  int regno, int off, int size,
+                                  enum bpf_access_type atype,
+                                  int value_regno)
+{
+       struct bpf_reg_state *reg = regs + regno;
+       struct bpf_map *map = reg->map_ptr;
+       const struct btf_type *t;
+       const char *tname;
+       u32 btf_id;
+       int ret;
+
+       if (!btf_vmlinux) {
+               verbose(env, "map_ptr access not supported without CONFIG_DEBUG_INFO_BTF\n");
+               return -ENOTSUPP;
+       }
+
+       if (!map->ops->map_btf_id || !*map->ops->map_btf_id) {
+               verbose(env, "map_ptr access not supported for map type %d\n",
+                       map->map_type);
+               return -ENOTSUPP;
        }
 
+       t = btf_type_by_id(btf_vmlinux, *map->ops->map_btf_id);
+       tname = btf_name_by_offset(btf_vmlinux, t->name_off);
+
+       if (!env->allow_ptr_to_map_access) {
+               verbose(env,
+                       "%s access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN\n",
+                       tname);
+               return -EPERM;
+       }
+
+       if (off < 0) {
+               verbose(env, "R%d is %s invalid negative access: off=%d\n",
+                       regno, tname, off);
+               return -EACCES;
+       }
+
+       if (atype != BPF_READ) {
+               verbose(env, "only read from %s is supported\n", tname);
+               return -EACCES;
+       }
+
+       ret = btf_struct_access(&env->log, t, off, size, atype, &btf_id);
+       if (ret < 0)
+               return ret;
+
+       if (value_regno >= 0)
+               mark_btf_ld_reg(env, regs, value_regno, ret, btf_id);
+
        return 0;
 }
 
+
 /* check whether memory at (regno + off) is accessible for t = (read | write)
  * if t==write, value_regno is a register which value is stored into memory
  * if t==read, value_regno is a register which will receive the value from memory
@@ -3363,6 +3424,9 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
        } else if (reg->type == PTR_TO_BTF_ID) {
                err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
                                              value_regno);
+       } else if (reg->type == CONST_PTR_TO_MAP) {
+               err = check_ptr_to_map_access(env, regs, regno, off, size, t,
+                                             value_regno);
        } else {
                verbose(env, "R%d invalid mem access '%s'\n", regno,
                        reg_type_str[reg->type]);
@@ -3735,12 +3799,14 @@ static int int_ptr_type_to_size(enum bpf_arg_type type)
        return -EINVAL;
 }
 
-static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
-                         enum bpf_arg_type arg_type,
-                         struct bpf_call_arg_meta *meta)
+static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
+                         struct bpf_call_arg_meta *meta,
+                         const struct bpf_func_proto *fn)
 {
+       u32 regno = BPF_REG_1 + arg;
        struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
        enum bpf_reg_type expected_type, type = reg->type;
+       enum bpf_arg_type arg_type = fn->arg_type[arg];
        int err = 0;
 
        if (arg_type == ARG_DONTCARE)
@@ -3812,17 +3878,28 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
                        }
                        meta->ref_obj_id = reg->ref_obj_id;
                }
-       } else if (arg_type == ARG_PTR_TO_SOCKET) {
+       } else if (arg_type == ARG_PTR_TO_SOCKET ||
+                  arg_type == ARG_PTR_TO_SOCKET_OR_NULL) {
                expected_type = PTR_TO_SOCKET;
-               if (type != expected_type)
-                       goto err_type;
+               if (!(register_is_null(reg) &&
+                     arg_type == ARG_PTR_TO_SOCKET_OR_NULL)) {
+                       if (type != expected_type)
+                               goto err_type;
+               }
        } else if (arg_type == ARG_PTR_TO_BTF_ID) {
                expected_type = PTR_TO_BTF_ID;
                if (type != expected_type)
                        goto err_type;
-               if (reg->btf_id != meta->btf_id) {
-                       verbose(env, "Helper has type %s got %s in R%d\n",
-                               kernel_type_name(meta->btf_id),
+               if (!fn->check_btf_id) {
+                       if (reg->btf_id != meta->btf_id) {
+                               verbose(env, "Helper has type %s got %s in R%d\n",
+                                       kernel_type_name(meta->btf_id),
+                                       kernel_type_name(reg->btf_id), regno);
+
+                               return -EACCES;
+                       }
+               } else if (!fn->check_btf_id(reg->btf_id, arg)) {
+                       verbose(env, "Helper does not support %s in R%d\n",
                                kernel_type_name(reg->btf_id), regno);
 
                        return -EACCES;
@@ -4644,10 +4721,12 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
        meta.func_id = func_id;
        /* check args */
        for (i = 0; i < 5; i++) {
-               err = btf_resolve_helper_id(&env->log, fn, i);
-               if (err > 0)
-                       meta.btf_id = err;
-               err = check_func_arg(env, BPF_REG_1 + i, fn->arg_type[i], &meta);
+               if (!fn->check_btf_id) {
+                       err = btf_resolve_helper_id(&env->log, fn, i);
+                       if (err > 0)
+                               meta.btf_id = err;
+               }
+               err = check_func_arg(env, i, &meta, fn);
                if (err)
                        return err;
        }
@@ -4750,6 +4829,18 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
                regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL;
                regs[BPF_REG_0].id = ++env->id_gen;
                regs[BPF_REG_0].mem_size = meta.mem_size;
+       } else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL) {
+               int ret_btf_id;
+
+               mark_reg_known_zero(env, regs, BPF_REG_0);
+               regs[BPF_REG_0].type = PTR_TO_BTF_ID_OR_NULL;
+               ret_btf_id = *fn->ret_btf_id;
+               if (ret_btf_id == 0) {
+                       verbose(env, "invalid return type %d of func %s#%d\n",
+                               fn->ret_type, func_id_name(func_id), func_id);
+                       return -EINVAL;
+               }
+               regs[BPF_REG_0].btf_id = ret_btf_id;
        } else {
                verbose(env, "unknown return type %d of func %s#%d\n",
                        fn->ret_type, func_id_name(func_id), func_id);
@@ -4776,7 +4867,9 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
        if (err)
                return err;
 
-       if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) {
+       if ((func_id == BPF_FUNC_get_stack ||
+            func_id == BPF_FUNC_get_task_stack) &&
+           !env->prog->has_callchain_buf) {
                const char *err_str;
 
 #ifdef CONFIG_PERF_EVENTS
@@ -5031,6 +5124,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 
        if (BPF_CLASS(insn->code) != BPF_ALU64) {
                /* 32-bit ALU ops on pointers produce (meaningless) scalars */
+               if (opcode == BPF_SUB && env->allow_ptr_leaks) {
+                       __mark_reg_unknown(env, dst_reg);
+                       return 0;
+               }
+
                verbose(env,
                        "R%d 32-bit pointer arithmetic prohibited\n",
                        dst);
@@ -7260,6 +7358,9 @@ static int check_return_code(struct bpf_verifier_env *env)
                        return -ENOTSUPP;
                }
                break;
+       case BPF_PROG_TYPE_SK_LOOKUP:
+               range = tnum_range(SK_DROP, SK_PASS);
+               break;
        case BPF_PROG_TYPE_EXT:
                /* freplace program can return anything as its return value
                 * depends on the to-be-replaced kernel func or bpf program.
@@ -9801,7 +9902,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
        int i, j, subprog_start, subprog_end = 0, len, subprog;
        struct bpf_insn *insn;
        void *old_bpf_func;
-       int err;
+       int err, num_exentries;
 
        if (env->subprog_cnt <= 1)
                return 0;
@@ -9876,6 +9977,14 @@ static int jit_subprogs(struct bpf_verifier_env *env)
                func[i]->aux->nr_linfo = prog->aux->nr_linfo;
                func[i]->aux->jited_linfo = prog->aux->jited_linfo;
                func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
+               num_exentries = 0;
+               insn = func[i]->insnsi;
+               for (j = 0; j < func[i]->len; j++, insn++) {
+                       if (BPF_CLASS(insn->code) == BPF_LDX &&
+                           BPF_MODE(insn->code) == BPF_PROBE_MEM)
+                               num_exentries++;
+               }
+               func[i]->aux->num_exentries = num_exentries;
                func[i] = bpf_int_jit_compile(func[i]);
                if (!func[i]->jited) {
                        err = -ENOTSUPP;
@@ -10946,6 +11055,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
                env->strict_alignment = false;
 
        env->allow_ptr_leaks = bpf_allow_ptr_leaks();
+       env->allow_ptr_to_map_access = bpf_allow_ptr_to_map_access();
        env->bypass_spec_v1 = bpf_bypass_spec_v1();
        env->bypass_spec_v4 = bpf_bypass_spec_v4();
        env->bpf_capable = bpf_capable();
index 1ea181a..dd24774 100644 (file)
@@ -6439,18 +6439,8 @@ void cgroup_sk_alloc_disable(void)
 
 void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
 {
-       if (cgroup_sk_alloc_disabled)
-               return;
-
-       /* Socket clone path */
-       if (skcd->val) {
-               /*
-                * We might be cloning a socket which is left in an empty
-                * cgroup and the cgroup might have already been rmdir'd.
-                * Don't use cgroup_get_live().
-                */
-               cgroup_get(sock_cgroup_ptr(skcd));
-               cgroup_bpf_get(sock_cgroup_ptr(skcd));
+       if (cgroup_sk_alloc_disabled) {
+               skcd->no_refcnt = 1;
                return;
        }
 
@@ -6475,10 +6465,27 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
        rcu_read_unlock();
 }
 
+void cgroup_sk_clone(struct sock_cgroup_data *skcd)
+{
+       if (skcd->val) {
+               if (skcd->no_refcnt)
+                       return;
+               /*
+                * We might be cloning a socket which is left in an empty
+                * cgroup and the cgroup might have already been rmdir'd.
+                * Don't use cgroup_get_live().
+                */
+               cgroup_get(sock_cgroup_ptr(skcd));
+               cgroup_bpf_get(sock_cgroup_ptr(skcd));
+       }
+}
+
 void cgroup_sk_free(struct sock_cgroup_data *skcd)
 {
        struct cgroup *cgrp = sock_cgroup_ptr(skcd);
 
+       if (skcd->no_refcnt)
+               return;
        cgroup_bpf_put(cgrp);
        cgroup_put(cgrp);
 }
index ccc0f98..9e59347 100644 (file)
@@ -169,18 +169,18 @@ int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
 {
        int err;
 
-       err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
+       err = copy_from_kernel_nofault(bpt->saved_instr, (char *)bpt->bpt_addr,
                                BREAK_INSTR_SIZE);
        if (err)
                return err;
-       err = probe_kernel_write((char *)bpt->bpt_addr,
+       err = copy_to_kernel_nofault((char *)bpt->bpt_addr,
                                 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
        return err;
 }
 
 int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
 {
-       return probe_kernel_write((char *)bpt->bpt_addr,
+       return copy_to_kernel_nofault((char *)bpt->bpt_addr,
                                  (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
 }
 
@@ -587,6 +587,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
                arch_kgdb_ops.disable_hw_break(regs);
 
 acquirelock:
+       rcu_read_lock();
        /*
         * Interrupts will be restored by the 'trap return' code, except when
         * single stepping.
@@ -646,6 +647,7 @@ return_normal:
                        atomic_dec(&slaves_in_kgdb);
                        dbg_touch_watchdogs();
                        local_irq_restore(flags);
+                       rcu_read_unlock();
                        return 0;
                }
                cpu_relax();
@@ -664,6 +666,7 @@ return_normal:
                raw_spin_unlock(&dbg_master_lock);
                dbg_touch_watchdogs();
                local_irq_restore(flags);
+               rcu_read_unlock();
 
                goto acquirelock;
        }
@@ -787,6 +790,7 @@ kgdb_restore:
        raw_spin_unlock(&dbg_master_lock);
        dbg_touch_watchdogs();
        local_irq_restore(flags);
+       rcu_read_unlock();
 
        return kgdb_info[cpu].ret_state;
 }
index 4b280fc..61774ae 100644 (file)
@@ -247,7 +247,7 @@ char *kgdb_mem2hex(char *mem, char *buf, int count)
         */
        tmp = buf + count;
 
-       err = probe_kernel_read(tmp, mem, count);
+       err = copy_from_kernel_nofault(tmp, mem, count);
        if (err)
                return NULL;
        while (count > 0) {
@@ -283,7 +283,7 @@ int kgdb_hex2mem(char *buf, char *mem, int count)
                *tmp_raw |= hex_to_bin(*tmp_hex--) << 4;
        }
 
-       return probe_kernel_write(mem, tmp_raw, count);
+       return copy_to_kernel_nofault(mem, tmp_raw, count);
 }
 
 /*
@@ -335,7 +335,7 @@ static int kgdb_ebin2mem(char *buf, char *mem, int count)
                size++;
        }
 
-       return probe_kernel_write(mem, c, size);
+       return copy_to_kernel_nofault(mem, c, size);
 }
 
 #if DBG_MAX_REG_NUM > 0
index 924bc92..683a799 100644 (file)
@@ -542,6 +542,44 @@ static int kdb_search_string(char *searched, char *searchfor)
        return 0;
 }
 
+static void kdb_msg_write(const char *msg, int msg_len)
+{
+       struct console *c;
+
+       if (msg_len == 0)
+               return;
+
+       if (dbg_io_ops) {
+               const char *cp = msg;
+               int len = msg_len;
+
+               while (len--) {
+                       dbg_io_ops->write_char(*cp);
+                       cp++;
+               }
+       }
+
+       for_each_console(c) {
+               if (!(c->flags & CON_ENABLED))
+                       continue;
+               if (c == dbg_io_ops->cons)
+                       continue;
+               /*
+                * Set oops_in_progress to encourage the console drivers to
+                * disregard their internal spin locks: in the current calling
+                * context the risk of deadlock is a bigger problem than risks
+                * due to re-entering the console driver. We operate directly on
+                * oops_in_progress rather than using bust_spinlocks() because
+                * the calls bust_spinlocks() makes on exit are not appropriate
+                * for this calling context.
+                */
+               ++oops_in_progress;
+               c->write(c, msg, msg_len);
+               --oops_in_progress;
+               touch_nmi_watchdog();
+       }
+}
+
 int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
 {
        int diag;
@@ -553,7 +591,6 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
        int this_cpu, old_cpu;
        char *cp, *cp2, *cphold = NULL, replaced_byte = ' ';
        char *moreprompt = "more> ";
-       struct console *c;
        unsigned long uninitialized_var(flags);
 
        /* Serialize kdb_printf if multiple cpus try to write at once.
@@ -687,22 +724,11 @@ kdb_printit:
         */
        retlen = strlen(kdb_buffer);
        cp = (char *) printk_skip_headers(kdb_buffer);
-       if (!dbg_kdb_mode && kgdb_connected) {
+       if (!dbg_kdb_mode && kgdb_connected)
                gdbstub_msg_write(cp, retlen - (cp - kdb_buffer));
-       } else {
-               if (dbg_io_ops && !dbg_io_ops->is_console) {
-                       len = retlen - (cp - kdb_buffer);
-                       cp2 = cp;
-                       while (len--) {
-                               dbg_io_ops->write_char(*cp2);
-                               cp2++;
-                       }
-               }
-               for_each_console(c) {
-                       c->write(c, cp, retlen - (cp - kdb_buffer));
-                       touch_nmi_watchdog();
-               }
-       }
+       else
+               kdb_msg_write(cp, retlen - (cp - kdb_buffer));
+
        if (logging) {
                saved_loglevel = console_loglevel;
                console_loglevel = CONSOLE_LOGLEVEL_SILENT;
@@ -751,19 +777,7 @@ kdb_printit:
                        moreprompt = "more> ";
 
                kdb_input_flush();
-
-               if (dbg_io_ops && !dbg_io_ops->is_console) {
-                       len = strlen(moreprompt);
-                       cp = moreprompt;
-                       while (len--) {
-                               dbg_io_ops->write_char(*cp);
-                               cp++;
-                       }
-               }
-               for_each_console(c) {
-                       c->write(c, moreprompt, strlen(moreprompt));
-                       touch_nmi_watchdog();
-               }
+               kdb_msg_write(moreprompt, strlen(moreprompt));
 
                if (logging)
                        printk("%s", moreprompt);
index ec19056..5c79490 100644 (file)
@@ -2326,7 +2326,8 @@ void kdb_ps1(const struct task_struct *p)
        int cpu;
        unsigned long tmp;
 
-       if (!p || probe_kernel_read(&tmp, (char *)p, sizeof(unsigned long)))
+       if (!p ||
+           copy_from_kernel_nofault(&tmp, (char *)p, sizeof(unsigned long)))
                return;
 
        cpu = kdb_process_cpu(p);
index b8e6306..004c5b6 100644 (file)
@@ -325,7 +325,7 @@ char *kdb_strdup(const char *str, gfp_t type)
  */
 int kdb_getarea_size(void *res, unsigned long addr, size_t size)
 {
-       int ret = probe_kernel_read((char *)res, (char *)addr, size);
+       int ret = copy_from_kernel_nofault((char *)res, (char *)addr, size);
        if (ret) {
                if (!KDB_STATE(SUPPRESS)) {
                        kdb_printf("kdb_getarea: Bad address 0x%lx\n", addr);
@@ -350,7 +350,7 @@ int kdb_getarea_size(void *res, unsigned long addr, size_t size)
  */
 int kdb_putarea_size(unsigned long addr, void *res, size_t size)
 {
-       int ret = probe_kernel_read((char *)addr, (char *)res, size);
+       int ret = copy_from_kernel_nofault((char *)addr, (char *)res, size);
        if (ret) {
                if (!KDB_STATE(SUPPRESS)) {
                        kdb_printf("kdb_putarea: Bad address 0x%lx\n", addr);
@@ -624,7 +624,8 @@ char kdb_task_state_char (const struct task_struct *p)
        char state;
        unsigned long tmp;
 
-       if (!p || probe_kernel_read(&tmp, (char *)p, sizeof(unsigned long)))
+       if (!p ||
+           copy_from_kernel_nofault(&tmp, (char *)p, sizeof(unsigned long)))
                return 'E';
 
        cpu = kdb_process_cpu(p);
index d006668..1da3f44 100644 (file)
@@ -71,20 +71,21 @@ config SWIOTLB
 # in the pagetables
 #
 config DMA_NONCOHERENT_MMAP
+       default y if !MMU
        bool
 
-config DMA_REMAP
-       depends on MMU
+config DMA_COHERENT_POOL
        select GENERIC_ALLOCATOR
-       select DMA_NONCOHERENT_MMAP
        bool
 
-config DMA_COHERENT_POOL
+config DMA_REMAP
        bool
-       select DMA_REMAP
+       depends on MMU
+       select DMA_NONCOHERENT_MMAP
 
 config DMA_DIRECT_REMAP
        bool
+       select DMA_REMAP
        select DMA_COHERENT_POOL
 
 config DMA_CMA
index 0a4881e..95866b6 100644 (file)
@@ -109,14 +109,15 @@ static inline bool dma_should_free_from_pool(struct device *dev,
        return false;
 }
 
-struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
+static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
                gfp_t gfp, unsigned long attrs)
 {
-       size_t alloc_size = PAGE_ALIGN(size);
        int node = dev_to_node(dev);
        struct page *page = NULL;
        u64 phys_limit;
 
+       WARN_ON_ONCE(!PAGE_ALIGNED(size));
+
        if (attrs & DMA_ATTR_NO_WARN)
                gfp |= __GFP_NOWARN;
 
@@ -124,14 +125,14 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
        gfp &= ~__GFP_ZERO;
        gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
                                           &phys_limit);
-       page = dma_alloc_contiguous(dev, alloc_size, gfp);
+       page = dma_alloc_contiguous(dev, size, gfp);
        if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
-               dma_free_contiguous(dev, page, alloc_size);
+               dma_free_contiguous(dev, page, size);
                page = NULL;
        }
 again:
        if (!page)
-               page = alloc_pages_node(node, gfp, get_order(alloc_size));
+               page = alloc_pages_node(node, gfp, get_order(size));
        if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
                dma_free_contiguous(dev, page, size);
                page = NULL;
@@ -157,9 +158,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
 {
        struct page *page;
        void *ret;
+       int err;
+
+       size = PAGE_ALIGN(size);
 
        if (dma_should_alloc_from_pool(dev, gfp, attrs)) {
-               ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, gfp);
+               ret = dma_alloc_from_pool(dev, size, &page, gfp);
                if (!ret)
                        return NULL;
                goto done;
@@ -183,14 +187,20 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
             dma_alloc_need_uncached(dev, attrs)) ||
            (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
                /* remove any dirty cache lines on the kernel alias */
-               arch_dma_prep_coherent(page, PAGE_ALIGN(size));
+               arch_dma_prep_coherent(page, size);
 
                /* create a coherent mapping */
-               ret = dma_common_contiguous_remap(page, PAGE_ALIGN(size),
+               ret = dma_common_contiguous_remap(page, size,
                                dma_pgprot(dev, PAGE_KERNEL, attrs),
                                __builtin_return_address(0));
                if (!ret)
                        goto out_free_pages;
+               if (force_dma_unencrypted(dev)) {
+                       err = set_memory_decrypted((unsigned long)ret,
+                                                  1 << get_order(size));
+                       if (err)
+                               goto out_free_pages;
+               }
                memset(ret, 0, size);
                goto done;
        }
@@ -207,8 +217,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
        }
 
        ret = page_address(page);
-       if (force_dma_unencrypted(dev))
-               set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
+       if (force_dma_unencrypted(dev)) {
+               err = set_memory_decrypted((unsigned long)ret,
+                                          1 << get_order(size));
+               if (err)
+                       goto out_free_pages;
+       }
 
        memset(ret, 0, size);
 
@@ -217,7 +231,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
                arch_dma_prep_coherent(page, size);
                ret = arch_dma_set_uncached(ret, size);
                if (IS_ERR(ret))
-                       goto out_free_pages;
+                       goto out_encrypt_pages;
        }
 done:
        if (force_dma_unencrypted(dev))
@@ -225,6 +239,15 @@ done:
        else
                *dma_handle = phys_to_dma(dev, page_to_phys(page));
        return ret;
+
+out_encrypt_pages:
+       if (force_dma_unencrypted(dev)) {
+               err = set_memory_encrypted((unsigned long)page_address(page),
+                                          1 << get_order(size));
+               /* If memory cannot be re-encrypted, it must be leaked */
+               if (err)
+                       return NULL;
+       }
 out_free_pages:
        dma_free_contiguous(dev, page, size);
        return NULL;
@@ -459,7 +482,6 @@ int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
        return ret;
 }
 
-#ifdef CONFIG_MMU
 bool dma_direct_can_mmap(struct device *dev)
 {
        return dev_is_dma_coherent(dev) ||
@@ -485,19 +507,6 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
        return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
                        user_count << PAGE_SHIFT, vma->vm_page_prot);
 }
-#else /* CONFIG_MMU */
-bool dma_direct_can_mmap(struct device *dev)
-{
-       return false;
-}
-
-int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
-               void *cpu_addr, dma_addr_t dma_addr, size_t size,
-               unsigned long attrs)
-{
-       return -ENXIO;
-}
-#endif /* CONFIG_MMU */
 
 int dma_direct_supported(struct device *dev, u64 mask)
 {
@@ -530,3 +539,9 @@ size_t dma_direct_max_mapping_size(struct device *dev)
                return swiotlb_max_mapping_size(dev);
        return SIZE_MAX;
 }
+
+bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
+{
+       return !dev_is_dma_coherent(dev) ||
+               is_swiotlb_buffer(dma_to_phys(dev, dma_addr));
+}
index 98e3d87..a8c18c9 100644 (file)
@@ -397,6 +397,16 @@ size_t dma_max_mapping_size(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(dma_max_mapping_size);
 
+bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
+{
+       const struct dma_map_ops *ops = get_dma_ops(dev);
+
+       if (dma_is_direct(ops))
+               return dma_direct_need_sync(dev, dma_addr);
+       return ops->sync_single_for_cpu || ops->sync_single_for_device;
+}
+EXPORT_SYMBOL_GPL(dma_need_sync);
+
 unsigned long dma_get_merge_boundary(struct device *dev)
 {
        const struct dma_map_ops *ops = get_dma_ops(dev);
index 35bb51c..39ca26f 100644 (file)
@@ -175,10 +175,9 @@ static int __init dma_atomic_pool_init(void)
         * sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER-1.
         */
        if (!atomic_pool_size) {
-               atomic_pool_size = max(totalram_pages() >> PAGE_SHIFT, 1UL) *
-                                       SZ_128K;
-               atomic_pool_size = min_t(size_t, atomic_pool_size,
-                                        1 << (PAGE_SHIFT + MAX_ORDER-1));
+               unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K);
+               pages = min_t(unsigned long, pages, MAX_ORDER_NR_PAGES);
+               atomic_pool_size = max_t(size_t, pages << PAGE_SHIFT, SZ_128K);
        }
        INIT_WORK(&atomic_pool_work, atomic_pool_work_fn);
 
@@ -240,12 +239,16 @@ void *dma_alloc_from_pool(struct device *dev, size_t size,
        }
 
        val = gen_pool_alloc(pool, size);
-       if (val) {
+       if (likely(val)) {
                phys_addr_t phys = gen_pool_virt_to_phys(pool, val);
 
                *ret_page = pfn_to_page(__phys_to_pfn(phys));
                ptr = (void *)val;
                memset(ptr, 0, size);
+       } else {
+               WARN_ONCE(1, "DMA coherent pool depleted, increase size "
+                            "(recommended min coherent_pool=%zuK)\n",
+                         gen_pool_size(pool) >> 9);
        }
        if (gen_pool_avail(pool) < atomic_pool_size)
                schedule_work(&atomic_pool_work);
index e739a6e..78b23f0 100644 (file)
@@ -24,7 +24,8 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
 {
        void *vaddr;
 
-       vaddr = vmap(pages, size >> PAGE_SHIFT, VM_DMA_COHERENT, prot);
+       vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT,
+                    VM_DMA_COHERENT, prot);
        if (vaddr)
                find_vm_area(vaddr)->pages = pages;
        return vaddr;
@@ -37,7 +38,7 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
 void *dma_common_contiguous_remap(struct page *page, size_t size,
                        pgprot_t prot, const void *caller)
 {
-       int count = size >> PAGE_SHIFT;
+       int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
        struct page **pages;
        void *vaddr;
        int i;
index 334d48b..c6ce894 100644 (file)
@@ -149,7 +149,7 @@ void put_callchain_buffers(void)
        }
 }
 
-static struct perf_callchain_entry *get_callchain_entry(int *rctx)
+struct perf_callchain_entry *get_callchain_entry(int *rctx)
 {
        int cpu;
        struct callchain_cpus_entries *entries;
@@ -159,8 +159,10 @@ static struct perf_callchain_entry *get_callchain_entry(int *rctx)
                return NULL;
 
        entries = rcu_dereference(callchain_cpus_entries);
-       if (!entries)
+       if (!entries) {
+               put_recursion_context(this_cpu_ptr(callchain_recursion), *rctx);
                return NULL;
+       }
 
        cpu = smp_processor_id();
 
@@ -168,7 +170,7 @@ static struct perf_callchain_entry *get_callchain_entry(int *rctx)
                (*rctx * perf_callchain_entry__sizeof()));
 }
 
-static void
+void
 put_callchain_entry(int rctx)
 {
        put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
@@ -183,11 +185,8 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
        int rctx;
 
        entry = get_callchain_entry(&rctx);
-       if (rctx == -1)
-               return NULL;
-
        if (!entry)
-               goto exit_put;
+               return NULL;
 
        ctx.entry     = entry;
        ctx.max_stack = max_stack;
index 727150f..39226a0 100644 (file)
@@ -804,7 +804,6 @@ void __noreturn do_exit(long code)
        exit_task_namespaces(tsk);
        exit_task_work(tsk);
        exit_thread(tsk);
-       exit_umh(tsk);
 
        /*
         * Flush inherited counters to the parent - before the parent
@@ -1711,6 +1710,30 @@ Efault:
 }
 #endif
 
+/**
+ * thread_group_exited - check that a thread group has exited
+ * @pid: tgid of thread group to be checked.
+ *
+ * Test if the thread group represented by tgid has exited (all
+ * threads are zombies, dead or completely gone).
+ *
+ * Return: true if the thread group has exited. false otherwise.
+ */
+bool thread_group_exited(struct pid *pid)
+{
+       struct task_struct *task;
+       bool exited;
+
+       rcu_read_lock();
+       task = pid_task(pid, PIDTYPE_PID);
+       exited = !task ||
+               (READ_ONCE(task->exit_state) && thread_group_empty(task));
+       rcu_read_unlock();
+
+       return exited;
+}
+EXPORT_SYMBOL(thread_group_exited);
+
 __weak void abort(void)
 {
        BUG();
index 142b236..1b94965 100644 (file)
@@ -1787,22 +1787,18 @@ static void pidfd_show_fdinfo(struct seq_file *m, struct file *f)
  */
 static __poll_t pidfd_poll(struct file *file, struct poll_table_struct *pts)
 {
-       struct task_struct *task;
        struct pid *pid = file->private_data;
        __poll_t poll_flags = 0;
 
        poll_wait(file, &pid->wait_pidfd, pts);
 
-       rcu_read_lock();
-       task = pid_task(pid, PIDTYPE_PID);
        /*
         * Inform pollers only when the whole thread group exits.
         * If the thread group leader exits before all other threads in the
         * group, then poll(2) should block, similar to the wait(2) family.
         */
-       if (!task || (task->exit_state && thread_group_empty(task)))
+       if (thread_group_exited(pid))
                poll_flags = EPOLLIN | EPOLLRDNORM;
-       rcu_read_unlock();
 
        return poll_flags;
 }
@@ -1977,7 +1973,7 @@ static __latent_entropy struct task_struct *copy_process(
         * to stop root fork bombs.
         */
        retval = -EAGAIN;
-       if (nr_threads >= max_threads)
+       if (data_race(nr_threads >= max_threads))
                goto bad_fork_cleanup_count;
 
        delayacct_tsk_init(p);  /* Must remain after dup_task_struct() */
index 16c8c60..bb14e64 100644 (file)
@@ -644,19 +644,20 @@ static inline int kallsyms_for_perf(void)
  * Otherwise, require CAP_SYSLOG (assuming kptr_restrict isn't set to
  * block even that).
  */
-int kallsyms_show_value(void)
+bool kallsyms_show_value(const struct cred *cred)
 {
        switch (kptr_restrict) {
        case 0:
                if (kallsyms_for_perf())
-                       return 1;
+                       return true;
        /* fallthrough */
        case 1:
-               if (has_capability_noaudit(current, CAP_SYSLOG))
-                       return 1;
+               if (security_capable(cred, &init_user_ns, CAP_SYSLOG,
+                                    CAP_OPT_NOAUDIT) == 0)
+                       return true;
        /* fallthrough */
        default:
-               return 0;
+               return false;
        }
 }
 
@@ -673,7 +674,11 @@ static int kallsyms_open(struct inode *inode, struct file *file)
                return -ENOMEM;
        reset_iter(iter, 0);
 
-       iter->show_value = kallsyms_show_value();
+       /*
+        * Instead of checking this on every s_show() call, cache
+        * the result here at open time.
+        */
+       iter->show_value = kallsyms_show_value(file->f_cred);
        return 0;
 }
 
index bb05fd5..09cc78d 100644 (file)
@@ -181,34 +181,19 @@ void kimage_file_post_load_cleanup(struct kimage *image)
 static int
 kimage_validate_signature(struct kimage *image)
 {
-       const char *reason;
        int ret;
 
        ret = arch_kexec_kernel_verify_sig(image, image->kernel_buf,
                                           image->kernel_buf_len);
-       switch (ret) {
-       case 0:
-               break;
+       if (ret) {
 
-               /* Certain verification errors are non-fatal if we're not
-                * checking errors, provided we aren't mandating that there
-                * must be a valid signature.
-                */
-       case -ENODATA:
-               reason = "kexec of unsigned image";
-               goto decide;
-       case -ENOPKG:
-               reason = "kexec of image with unsupported crypto";
-               goto decide;
-       case -ENOKEY:
-               reason = "kexec of image with unavailable key";
-       decide:
                if (IS_ENABLED(CONFIG_KEXEC_SIG_FORCE)) {
-                       pr_notice("%s rejected\n", reason);
+                       pr_notice("Enforced kernel signature verification failed (%d).\n", ret);
                        return ret;
                }
 
-               /* If IMA is guaranteed to appraise a signature on the kexec
+               /*
+                * If IMA is guaranteed to appraise a signature on the kexec
                 * image, permit it even if the kernel is otherwise locked
                 * down.
                 */
@@ -216,17 +201,10 @@ kimage_validate_signature(struct kimage *image)
                    security_locked_down(LOCKDOWN_KEXEC))
                        return -EPERM;
 
-               return 0;
-
-               /* All other errors are fatal, including nomem, unparseable
-                * signatures and signature check failures - even if signatures
-                * aren't required.
-                */
-       default:
-               pr_notice("kernel signature verification failed (%d).\n", ret);
+               pr_debug("kernel signature verification failed (%d).\n", ret);
        }
 
-       return ret;
+       return 0;
 }
 #endif
 
index 50cd84f..2e97feb 100644 (file)
 
 
 static int kprobes_initialized;
+/* kprobe_table can be accessed by
+ * - Normal hlist traversal and RCU add/del under kprobe_mutex is held.
+ * Or
+ * - RCU hlist traversal under disabling preempt (breakpoint handlers)
+ */
 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
 
@@ -326,7 +331,8 @@ struct kprobe *get_kprobe(void *addr)
        struct kprobe *p;
 
        head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
-       hlist_for_each_entry_rcu(p, head, hlist) {
+       hlist_for_each_entry_rcu(p, head, hlist,
+                                lockdep_is_held(&kprobe_mutex)) {
                if (p->addr == addr)
                        return p;
        }
@@ -586,11 +592,12 @@ static void kprobe_optimizer(struct work_struct *work)
        mutex_unlock(&module_mutex);
        mutex_unlock(&text_mutex);
        cpus_read_unlock();
-       mutex_unlock(&kprobe_mutex);
 
        /* Step 5: Kick optimizer again if needed */
        if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
                kick_kprobe_optimizer();
+
+       mutex_unlock(&kprobe_mutex);
 }
 
 /* Wait for completing optimization and unoptimization */
@@ -668,8 +675,6 @@ static void force_unoptimize_kprobe(struct optimized_kprobe *op)
        lockdep_assert_cpus_held();
        arch_unoptimize_kprobe(op);
        op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
-       if (kprobe_disabled(&op->kp))
-               arch_disarm_kprobe(&op->kp);
 }
 
 /* Unoptimize a kprobe if p is optimized */
@@ -849,7 +854,7 @@ static void optimize_all_kprobes(void)
        kprobes_allow_optimization = true;
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                head = &kprobe_table[i];
-               hlist_for_each_entry_rcu(p, head, hlist)
+               hlist_for_each_entry(p, head, hlist)
                        if (!kprobe_disabled(p))
                                optimize_kprobe(p);
        }
@@ -876,7 +881,7 @@ static void unoptimize_all_kprobes(void)
        kprobes_allow_optimization = false;
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                head = &kprobe_table[i];
-               hlist_for_each_entry_rcu(p, head, hlist) {
+               hlist_for_each_entry(p, head, hlist) {
                        if (!kprobe_disabled(p))
                                unoptimize_kprobe(p, false);
                }
@@ -1236,6 +1241,26 @@ __releases(hlist_lock)
 }
 NOKPROBE_SYMBOL(kretprobe_table_unlock);
 
+struct kprobe kprobe_busy = {
+       .addr = (void *) get_kprobe,
+};
+
+void kprobe_busy_begin(void)
+{
+       struct kprobe_ctlblk *kcb;
+
+       preempt_disable();
+       __this_cpu_write(current_kprobe, &kprobe_busy);
+       kcb = get_kprobe_ctlblk();
+       kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+}
+
+void kprobe_busy_end(void)
+{
+       __this_cpu_write(current_kprobe, NULL);
+       preempt_enable();
+}
+
 /*
  * This function is called from finish_task_switch when task tk becomes dead,
  * so that we can recycle any function-return probe instances associated
@@ -1253,6 +1278,8 @@ void kprobe_flush_task(struct task_struct *tk)
                /* Early boot.  kretprobe_table_locks not yet initialized. */
                return;
 
+       kprobe_busy_begin();
+
        INIT_HLIST_HEAD(&empty_rp);
        hash = hash_ptr(tk, KPROBE_HASH_BITS);
        head = &kretprobe_inst_table[hash];
@@ -1266,6 +1293,8 @@ void kprobe_flush_task(struct task_struct *tk)
                hlist_del(&ri->hlist);
                kfree(ri);
        }
+
+       kprobe_busy_end();
 }
 NOKPROBE_SYMBOL(kprobe_flush_task);
 
@@ -1499,12 +1528,14 @@ static struct kprobe *__get_valid_kprobe(struct kprobe *p)
 {
        struct kprobe *ap, *list_p;
 
+       lockdep_assert_held(&kprobe_mutex);
+
        ap = get_kprobe(p->addr);
        if (unlikely(!ap))
                return NULL;
 
        if (p != ap) {
-               list_for_each_entry_rcu(list_p, &ap->list, list)
+               list_for_each_entry(list_p, &ap->list, list)
                        if (list_p == p)
                        /* kprobe p is a valid probe */
                                goto valid;
@@ -1669,7 +1700,9 @@ static int aggr_kprobe_disabled(struct kprobe *ap)
 {
        struct kprobe *kp;
 
-       list_for_each_entry_rcu(kp, &ap->list, list)
+       lockdep_assert_held(&kprobe_mutex);
+
+       list_for_each_entry(kp, &ap->list, list)
                if (!kprobe_disabled(kp))
                        /*
                         * There is an active probe on the list.
@@ -1748,7 +1781,7 @@ static int __unregister_kprobe_top(struct kprobe *p)
        else {
                /* If disabling probe has special handlers, update aggrprobe */
                if (p->post_handler && !kprobe_gone(p)) {
-                       list_for_each_entry_rcu(list_p, &ap->list, list) {
+                       list_for_each_entry(list_p, &ap->list, list) {
                                if ((list_p != p) && (list_p->post_handler))
                                        goto noclean;
                        }
@@ -2062,13 +2095,15 @@ static void kill_kprobe(struct kprobe *p)
 {
        struct kprobe *kp;
 
+       lockdep_assert_held(&kprobe_mutex);
+
        p->flags |= KPROBE_FLAG_GONE;
        if (kprobe_aggrprobe(p)) {
                /*
                 * If this is an aggr_kprobe, we have to list all the
                 * chained probes and mark them GONE.
                 */
-               list_for_each_entry_rcu(kp, &p->list, list)
+               list_for_each_entry(kp, &p->list, list)
                        kp->flags |= KPROBE_FLAG_GONE;
                p->post_handler = NULL;
                kill_optimized_kprobe(p);
@@ -2312,7 +2347,7 @@ static int kprobes_module_callback(struct notifier_block *nb,
        mutex_lock(&kprobe_mutex);
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                head = &kprobe_table[i];
-               hlist_for_each_entry_rcu(p, head, hlist)
+               hlist_for_each_entry(p, head, hlist)
                        if (within_module_init((unsigned long)p->addr, mod) ||
                            (checkcore &&
                             within_module_core((unsigned long)p->addr, mod))) {
@@ -2413,7 +2448,7 @@ static void report_probe(struct seq_file *pi, struct kprobe *p,
        else
                kprobe_type = "k";
 
-       if (!kallsyms_show_value())
+       if (!kallsyms_show_value(pi->file->f_cred))
                addr = NULL;
 
        if (sym)
@@ -2505,7 +2540,7 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
         * If /proc/kallsyms is not showing kernel address, we won't
         * show them here either.
         */
-       if (!kallsyms_show_value())
+       if (!kallsyms_show_value(m->file->f_cred))
                seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
                           (void *)ent->start_addr);
        else
@@ -2550,7 +2585,7 @@ static int arm_all_kprobes(void)
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                head = &kprobe_table[i];
                /* Arm all kprobes on a best-effort basis */
-               hlist_for_each_entry_rcu(p, head, hlist) {
+               hlist_for_each_entry(p, head, hlist) {
                        if (!kprobe_disabled(p)) {
                                err = arm_kprobe(p);
                                if (err)  {
@@ -2593,7 +2628,7 @@ static int disarm_all_kprobes(void)
        for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
                head = &kprobe_table[i];
                /* Disarm all kprobes on a best-effort basis */
-               hlist_for_each_entry_rcu(p, head, hlist) {
+               hlist_for_each_entry(p, head, hlist) {
                        if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
                                err = disarm_kprobe(p, false);
                                if (err) {
index 8e3d2d7..132f84a 100644 (file)
@@ -201,7 +201,7 @@ void *kthread_probe_data(struct task_struct *task)
        struct kthread *kthread = to_kthread(task);
        void *data = NULL;
 
-       probe_kernel_read(&data, &kthread->data, sizeof(data));
+       copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
        return data;
 }
 
index e8a1985..aa183c9 100644 (file)
@@ -1510,8 +1510,7 @@ static inline bool sect_empty(const Elf_Shdr *sect)
 }
 
 struct module_sect_attr {
-       struct module_attribute mattr;
-       char *name;
+       struct bin_attribute battr;
        unsigned long address;
 };
 
@@ -1521,13 +1520,18 @@ struct module_sect_attrs {
        struct module_sect_attr attrs[];
 };
 
-static ssize_t module_sect_show(struct module_attribute *mattr,
-                               struct module_kobject *mk, char *buf)
+static ssize_t module_sect_read(struct file *file, struct kobject *kobj,
+                               struct bin_attribute *battr,
+                               char *buf, loff_t pos, size_t count)
 {
        struct module_sect_attr *sattr =
-               container_of(mattr, struct module_sect_attr, mattr);
-       return sprintf(buf, "0x%px\n", kptr_restrict < 2 ?
-                      (void *)sattr->address : NULL);
+               container_of(battr, struct module_sect_attr, battr);
+
+       if (pos != 0)
+               return -EINVAL;
+
+       return sprintf(buf, "0x%px\n",
+                      kallsyms_show_value(file->f_cred) ? (void *)sattr->address : NULL);
 }
 
 static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
@@ -1535,7 +1539,7 @@ static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
        unsigned int section;
 
        for (section = 0; section < sect_attrs->nsections; section++)
-               kfree(sect_attrs->attrs[section].name);
+               kfree(sect_attrs->attrs[section].battr.attr.name);
        kfree(sect_attrs);
 }
 
@@ -1544,42 +1548,41 @@ static void add_sect_attrs(struct module *mod, const struct load_info *info)
        unsigned int nloaded = 0, i, size[2];
        struct module_sect_attrs *sect_attrs;
        struct module_sect_attr *sattr;
-       struct attribute **gattr;
+       struct bin_attribute **gattr;
 
        /* Count loaded sections and allocate structures */
        for (i = 0; i < info->hdr->e_shnum; i++)
                if (!sect_empty(&info->sechdrs[i]))
                        nloaded++;
        size[0] = ALIGN(struct_size(sect_attrs, attrs, nloaded),
-                       sizeof(sect_attrs->grp.attrs[0]));
-       size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
+                       sizeof(sect_attrs->grp.bin_attrs[0]));
+       size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.bin_attrs[0]);
        sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
        if (sect_attrs == NULL)
                return;
 
        /* Setup section attributes. */
        sect_attrs->grp.name = "sections";
-       sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
+       sect_attrs->grp.bin_attrs = (void *)sect_attrs + size[0];
 
        sect_attrs->nsections = 0;
        sattr = &sect_attrs->attrs[0];
-       gattr = &sect_attrs->grp.attrs[0];
+       gattr = &sect_attrs->grp.bin_attrs[0];
        for (i = 0; i < info->hdr->e_shnum; i++) {
                Elf_Shdr *sec = &info->sechdrs[i];
                if (sect_empty(sec))
                        continue;
+               sysfs_bin_attr_init(&sattr->battr);
                sattr->address = sec->sh_addr;
-               sattr->name = kstrdup(info->secstrings + sec->sh_name,
-                                       GFP_KERNEL);
-               if (sattr->name == NULL)
+               sattr->battr.attr.name =
+                       kstrdup(info->secstrings + sec->sh_name, GFP_KERNEL);
+               if (sattr->battr.attr.name == NULL)
                        goto out;
                sect_attrs->nsections++;
-               sysfs_attr_init(&sattr->mattr.attr);
-               sattr->mattr.show = module_sect_show;
-               sattr->mattr.store = NULL;
-               sattr->mattr.attr.name = sattr->name;
-               sattr->mattr.attr.mode = S_IRUSR;
-               *(gattr++) = &(sattr++)->mattr.attr;
+               sattr->battr.read = module_sect_read;
+               sattr->battr.size = 3 /* "0x", "\n" */ + (BITS_PER_LONG / 4);
+               sattr->battr.attr.mode = 0400;
+               *(gattr++) = &(sattr++)->battr;
        }
        *gattr = NULL;
 
@@ -1669,7 +1672,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
                        continue;
                if (info->sechdrs[i].sh_type == SHT_NOTE) {
                        sysfs_bin_attr_init(nattr);
-                       nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
+                       nattr->attr.name = mod->sect_attrs->attrs[loaded].battr.attr.name;
                        nattr->attr.mode = S_IRUGO;
                        nattr->size = info->sechdrs[i].sh_size;
                        nattr->private = (void *) info->sechdrs[i].sh_addr;
@@ -2783,7 +2786,9 @@ static void dynamic_debug_remove(struct module *mod, struct _ddebug *debug)
 
 void * __weak module_alloc(unsigned long size)
 {
-       return vmalloc_exec(size);
+       return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+                       GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
+                       NUMA_NO_NODE, __builtin_return_address(0));
 }
 
 bool __weak module_init_section(const char *name)
@@ -4377,7 +4382,7 @@ static int modules_open(struct inode *inode, struct file *file)
 
        if (!err) {
                struct seq_file *m = file->private_data;
-               m->private = kallsyms_show_value() ? NULL : (void *)8ul;
+               m->private = kallsyms_show_value(file->f_cred) ? NULL : (void *)8ul;
        }
 
        return err;
index b03df67..cd35663 100644 (file)
@@ -531,7 +531,7 @@ SYSCALL_DEFINE2(setns, int, fd, int, flags)
        } else if (!IS_ERR(pidfd_pid(file))) {
                err = check_setns_flags(flags);
        } else {
-               err = -EBADF;
+               err = -EINVAL;
        }
        if (err)
                goto out;
index 29fc5d8..4373f7a 100644 (file)
@@ -335,7 +335,7 @@ static void padata_reorder(struct parallel_data *pd)
         *
         * Ensure reorder queue is read after pd->lock is dropped so we see
         * new objects from another task in padata_do_serial.  Pairs with
-        * smp_mb__after_atomic in padata_do_serial.
+        * smp_mb in padata_do_serial.
         */
        smp_mb();
 
@@ -418,7 +418,7 @@ void padata_do_serial(struct padata_priv *padata)
         * with the trylock of pd->lock in padata_reorder.  Pairs with smp_mb
         * in padata_reorder.
         */
-       smp_mb__after_atomic();
+       smp_mb();
 
        padata_reorder(pd);
 }
index 8c14835..b71eaf5 100644 (file)
@@ -974,16 +974,6 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
                user->idx = log_next_idx;
                user->seq = log_next_seq;
                break;
-       case SEEK_CUR:
-               /*
-                * It isn't supported due to the record nature of this
-                * interface: _SET _DATA and _END point to very specific
-                * record positions, while _CUR would be more useful in case
-                * of a byte-based log. Because of that, return the default
-                * errno value for invalid seek operation.
-                */
-               ret = -ESPIPE;
-               break;
        default:
                ret = -EINVAL;
        }
index 16dd1e6..9eb39c2 100644 (file)
@@ -723,7 +723,7 @@ kfree_perf_init(void)
                schedule_timeout_uninterruptible(1);
        }
 
-       pr_alert("kfree object size=%lu\n", kfree_mult * sizeof(struct kfree_obj));
+       pr_alert("kfree object size=%zu\n", kfree_mult * sizeof(struct kfree_obj));
 
        kfree_reader_tasks = kcalloc(kfree_nrealthreads, sizeof(kfree_reader_tasks[0]),
                               GFP_KERNEL);
index c716ead..6c6569e 100644 (file)
@@ -250,7 +250,7 @@ static noinstr void rcu_dynticks_eqs_enter(void)
         * next idle sojourn.
         */
        rcu_dynticks_task_trace_enter();  // Before ->dynticks update!
-       seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
+       seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
        // RCU is no longer watching.  Better be in extended quiescent state!
        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
                     (seq & RCU_DYNTICK_CTRL_CTR));
@@ -274,13 +274,13 @@ static noinstr void rcu_dynticks_eqs_exit(void)
         * and we also must force ordering with the next RCU read-side
         * critical section.
         */
-       seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
+       seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
        // RCU is now watching.  Better not be in an extended quiescent state!
        rcu_dynticks_task_trace_exit();  // After ->dynticks update!
        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
                     !(seq & RCU_DYNTICK_CTRL_CTR));
        if (seq & RCU_DYNTICK_CTRL_MASK) {
-               atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
+               arch_atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
                smp_mb__after_atomic(); /* _exit after clearing mask. */
        }
 }
@@ -313,7 +313,7 @@ static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
 {
        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 
-       return !(atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
+       return !(arch_atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
 }
 
 /*
@@ -633,6 +633,10 @@ static noinstr void rcu_eqs_enter(bool user)
        do_nocb_deferred_wakeup(rdp);
        rcu_prepare_for_idle();
        rcu_preempt_deferred_qs(current);
+
+       // instrumentation for the noinstr rcu_dynticks_eqs_enter()
+       instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
+
        instrumentation_end();
        WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
        // RCU is watching here ...
@@ -692,6 +696,7 @@ noinstr void rcu_nmi_exit(void)
 {
        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 
+       instrumentation_begin();
        /*
         * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
         * (We are exiting an NMI handler, so RCU better be paying attention
@@ -705,7 +710,6 @@ noinstr void rcu_nmi_exit(void)
         * leave it in non-RCU-idle state.
         */
        if (rdp->dynticks_nmi_nesting != 1) {
-               instrumentation_begin();
                trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
                                  atomic_read(&rdp->dynticks));
                WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
@@ -714,13 +718,15 @@ noinstr void rcu_nmi_exit(void)
                return;
        }
 
-       instrumentation_begin();
        /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
        trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
        WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
 
        if (!in_nmi())
                rcu_prepare_for_idle();
+
+       // instrumentation for the noinstr rcu_dynticks_eqs_enter()
+       instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
        instrumentation_end();
 
        // RCU is watching here ...
@@ -838,6 +844,10 @@ static void noinstr rcu_eqs_exit(bool user)
        rcu_dynticks_eqs_exit();
        // ... but is watching here.
        instrumentation_begin();
+
+       // instrumentation for the noinstr rcu_dynticks_eqs_exit()
+       instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
+
        rcu_cleanup_after_idle();
        trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
@@ -983,13 +993,21 @@ noinstr void rcu_nmi_enter(void)
                if (!in_nmi())
                        rcu_cleanup_after_idle();
 
+               instrumentation_begin();
+               // instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs()
+               instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks));
+               // instrumentation for the noinstr rcu_dynticks_eqs_exit()
+               instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
+
                incby = 1;
        } else if (!in_nmi()) {
                instrumentation_begin();
                rcu_irq_enter_check_tick();
                instrumentation_end();
+       } else  {
+               instrumentation_begin();
        }
-       instrumentation_begin();
+
        trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
                          rdp->dynticks_nmi_nesting,
                          rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
index 8f36032..ca5db40 100644 (file)
@@ -1637,7 +1637,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
                goto out;
        }
 
-       if (cpumask_equal(p->cpus_ptr, new_mask))
+       if (cpumask_equal(&p->cpus_mask, new_mask))
                goto out;
 
        /*
@@ -2293,8 +2293,15 @@ void sched_ttwu_pending(void *arg)
        rq_lock_irqsave(rq, &rf);
        update_rq_clock(rq);
 
-       llist_for_each_entry_safe(p, t, llist, wake_entry)
+       llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
+               if (WARN_ON_ONCE(p->on_cpu))
+                       smp_cond_load_acquire(&p->on_cpu, !VAL);
+
+               if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
+                       set_task_cpu(p, cpu_of(rq));
+
                ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0, &rf);
+       }
 
        rq_unlock_irqrestore(rq, &rf);
 }
@@ -2322,7 +2329,7 @@ static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags
        p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
 
        WRITE_ONCE(rq->ttwu_pending, 1);
-       __smp_call_single_queue(cpu, &p->wake_entry);
+       __smp_call_single_queue(cpu, &p->wake_entry.llist);
 }
 
 void wake_up_if_idle(int cpu)
@@ -2369,7 +2376,7 @@ static inline bool ttwu_queue_cond(int cpu, int wake_flags)
         * the soon-to-be-idle CPU as the current CPU is likely busy.
         * nr_running is checked to avoid unnecessary task stacking.
         */
-       if ((wake_flags & WF_ON_RQ) && cpu_rq(cpu)->nr_running <= 1)
+       if ((wake_flags & WF_ON_CPU) && cpu_rq(cpu)->nr_running <= 1)
                return true;
 
        return false;
@@ -2378,6 +2385,9 @@ static inline bool ttwu_queue_cond(int cpu, int wake_flags)
 static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
 {
        if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(cpu, wake_flags)) {
+               if (WARN_ON_ONCE(cpu == smp_processor_id()))
+                       return false;
+
                sched_clock_cpu(cpu); /* Sync clocks across CPUs */
                __ttwu_queue_wakelist(p, cpu, wake_flags);
                return true;
@@ -2528,7 +2538,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
                        goto out;
 
                success = 1;
-               cpu = task_cpu(p);
                trace_sched_waking(p);
                p->state = TASK_RUNNING;
                trace_sched_wakeup(p);
@@ -2550,7 +2559,6 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
 
        /* We're going to change ->state: */
        success = 1;
-       cpu = task_cpu(p);
 
        /*
         * Ensure we load p->on_rq _after_ p->state, otherwise it would
@@ -2614,8 +2622,21 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
         * which potentially sends an IPI instead of spinning on p->on_cpu to
         * let the waker make forward progress. This is safe because IRQs are
         * disabled and the IPI will deliver after on_cpu is cleared.
+        *
+        * Ensure we load task_cpu(p) after p->on_cpu:
+        *
+        * set_task_cpu(p, cpu);
+        *   STORE p->cpu = @cpu
+        * __schedule() (switch to task 'p')
+        *   LOCK rq->lock
+        *   smp_mb__after_spin_lock()          smp_cond_load_acquire(&p->on_cpu)
+        *   STORE p->on_cpu = 1                LOAD p->cpu
+        *
+        * to ensure we observe the correct CPU on which the task is currently
+        * scheduling.
         */
-       if (READ_ONCE(p->on_cpu) && ttwu_queue_wakelist(p, cpu, wake_flags | WF_ON_RQ))
+       if (smp_load_acquire(&p->on_cpu) &&
+           ttwu_queue_wakelist(p, task_cpu(p), wake_flags | WF_ON_CPU))
                goto unlock;
 
        /*
@@ -2635,6 +2656,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
                psi_ttwu_dequeue(p);
                set_task_cpu(p, cpu);
        }
+#else
+       cpu = task_cpu(p);
 #endif /* CONFIG_SMP */
 
        ttwu_queue(p, cpu, wake_flags);
@@ -2642,7 +2665,7 @@ unlock:
        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 out:
        if (success)
-               ttwu_stat(p, cpu, wake_flags);
+               ttwu_stat(p, task_cpu(p), wake_flags);
        preempt_enable();
 
        return success;
@@ -2763,7 +2786,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
 #endif
        init_numa_balancing(clone_flags, p);
 #ifdef CONFIG_SMP
-       p->wake_entry_type = CSD_TYPE_TTWU;
+       p->wake_entry.u_flags = CSD_TYPE_TTWU;
 #endif
 }
 
@@ -4533,7 +4556,8 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
         */
        if (dl_prio(prio)) {
                if (!dl_prio(p->normal_prio) ||
-                   (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
+                   (pi_task && dl_prio(pi_task->prio) &&
+                    dl_entity_preempt(&pi_task->dl, &p->dl))) {
                        p->dl.dl_boosted = 1;
                        queue_flag |= ENQUEUE_REPLENISH;
                } else
index 504d2f5..f63f337 100644 (file)
@@ -2692,6 +2692,7 @@ void __dl_clear_params(struct task_struct *p)
        dl_se->dl_bw                    = 0;
        dl_se->dl_density               = 0;
 
+       dl_se->dl_boosted               = 0;
        dl_se->dl_throttled             = 0;
        dl_se->dl_yielded               = 0;
        dl_se->dl_non_contending        = 0;
index cbcb2f7..658aa7a 100644 (file)
@@ -806,7 +806,7 @@ void post_init_entity_util_avg(struct task_struct *p)
                }
        }
 
-       sa->runnable_avg = cpu_scale;
+       sa->runnable_avg = sa->util_avg;
 
        if (p->sched_class != &fair_sched_class) {
                /*
index 05deb81..1ae95b9 100644 (file)
@@ -96,6 +96,15 @@ void __cpuidle default_idle_call(void)
        }
 }
 
+static int call_cpuidle_s2idle(struct cpuidle_driver *drv,
+                              struct cpuidle_device *dev)
+{
+       if (current_clr_polling_and_test())
+               return -EBUSY;
+
+       return cpuidle_enter_s2idle(drv, dev);
+}
+
 static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
                      int next_state)
 {
@@ -171,11 +180,9 @@ static void cpuidle_idle_call(void)
                if (idle_should_enter_s2idle()) {
                        rcu_idle_enter();
 
-                       entered_state = cpuidle_enter_s2idle(drv, dev);
-                       if (entered_state > 0) {
-                               local_irq_enable();
+                       entered_state = call_cpuidle_s2idle(drv, dev);
+                       if (entered_state > 0)
                                goto exit_idle;
-                       }
 
                        rcu_idle_exit();
 
index 1d4e94c..877fb08 100644 (file)
@@ -1682,7 +1682,7 @@ static inline int task_on_rq_migrating(struct task_struct *p)
 #define WF_SYNC                        0x01            /* Waker goes to sleep after wakeup */
 #define WF_FORK                        0x02            /* Child wakeup after fork */
 #define WF_MIGRATED            0x04            /* Internal use, task got migrated */
-#define WF_ON_RQ               0x08            /* Wakee is on_rq */
+#define WF_ON_CPU              0x08            /* Wakee is on_cpu */
 
 /*
  * To aid in avoiding the subversion of "niceness" due to uneven distribution
index 5ca48cc..ee22ec7 100644 (file)
@@ -2529,9 +2529,6 @@ bool get_signal(struct ksignal *ksig)
        struct signal_struct *signal = current->signal;
        int signr;
 
-       if (unlikely(current->task_works))
-               task_work_run();
-
        if (unlikely(uprobe_deny_signal()))
                return false;
 
@@ -2544,6 +2541,13 @@ bool get_signal(struct ksignal *ksig)
 
 relock:
        spin_lock_irq(&sighand->siglock);
+       current->jobctl &= ~JOBCTL_TASK_WORK;
+       if (unlikely(current->task_works)) {
+               spin_unlock_irq(&sighand->siglock);
+               task_work_run();
+               goto relock;
+       }
+
        /*
         * Every stopped thread goes here after wakeup. Check to see if
         * we should notify the parent, prepare_signal(SIGCONT) encodes
index 472c2b2..aa17eed 100644 (file)
@@ -669,24 +669,6 @@ void __init smp_init(void)
 {
        int num_nodes, num_cpus;
 
-       /*
-        * Ensure struct irq_work layout matches so that
-        * flush_smp_call_function_queue() can do horrible things.
-        */
-       BUILD_BUG_ON(offsetof(struct irq_work, llnode) !=
-                    offsetof(struct __call_single_data, llist));
-       BUILD_BUG_ON(offsetof(struct irq_work, func) !=
-                    offsetof(struct __call_single_data, func));
-       BUILD_BUG_ON(offsetof(struct irq_work, flags) !=
-                    offsetof(struct __call_single_data, flags));
-
-       /*
-        * Assert the CSD_TYPE_TTWU layout is similar enough
-        * for task_struct to be on the @call_single_queue.
-        */
-       BUILD_BUG_ON(offsetof(struct task_struct, wake_entry_type) - offsetof(struct task_struct, wake_entry) !=
-                    offsetof(struct __call_single_data, flags) - offsetof(struct __call_single_data, llist));
-
        idle_threads_init();
        cpuhp_threads_init();
 
index 825f282..5c0848c 100644 (file)
@@ -25,9 +25,10 @@ static struct callback_head work_exited; /* all we need is ->next == NULL */
  * 0 if succeeds or -ESRCH.
  */
 int
-task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
+task_work_add(struct task_struct *task, struct callback_head *work, int notify)
 {
        struct callback_head *head;
+       unsigned long flags;
 
        do {
                head = READ_ONCE(task->task_works);
@@ -36,8 +37,19 @@ task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
                work->next = head;
        } while (cmpxchg(&task->task_works, head, work) != head);
 
-       if (notify)
+       switch (notify) {
+       case TWA_RESUME:
                set_notify_resume(task);
+               break;
+       case TWA_SIGNAL:
+               if (lock_task_sighand(task, &flags)) {
+                       task->jobctl |= JOBCTL_TASK_WORK;
+                       signal_wake_up(task, 0);
+                       unlock_task_sighand(task, &flags);
+               }
+               break;
+       }
+
        return 0;
 }
 
index 6575bb0..aeba5ee 100644 (file)
@@ -31,6 +31,8 @@ ifdef CONFIG_GCOV_PROFILE_FTRACE
 GCOV_PROFILE := y
 endif
 
+CFLAGS_bpf_trace.o := -I$(src)
+
 CFLAGS_trace_benchmark.o := -I$(src)
 CFLAGS_trace_events_filter.o := -I$(src)
 
index 5773f0b..5ef0484 100644 (file)
@@ -3,6 +3,9 @@
  * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
  *
  */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/blkdev.h>
 #include <linux/blktrace_api.h>
@@ -344,7 +347,8 @@ static int __blk_trace_remove(struct request_queue *q)
 {
        struct blk_trace *bt;
 
-       bt = xchg(&q->blk_trace, NULL);
+       bt = rcu_replace_pointer(q->blk_trace, NULL,
+                                lockdep_is_held(&q->blk_trace_mutex));
        if (!bt)
                return -EINVAL;
 
@@ -494,6 +498,17 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
         */
        strreplace(buts->name, '/', '_');
 
+       /*
+        * bdev can be NULL, as with scsi-generic, this is a helpful as
+        * we can be.
+        */
+       if (rcu_dereference_protected(q->blk_trace,
+                                     lockdep_is_held(&q->blk_trace_mutex))) {
+               pr_warn("Concurrent blktraces are not allowed on %s\n",
+                       buts->name);
+               return -EBUSY;
+       }
+
        bt = kzalloc(sizeof(*bt), GFP_KERNEL);
        if (!bt)
                return -ENOMEM;
@@ -543,10 +558,7 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
        bt->pid = buts->pid;
        bt->trace_state = Blktrace_setup;
 
-       ret = -EBUSY;
-       if (cmpxchg(&q->blk_trace, NULL, bt))
-               goto err;
-
+       rcu_assign_pointer(q->blk_trace, bt);
        get_probe_ref();
 
        ret = 0;
@@ -1629,7 +1641,8 @@ static int blk_trace_remove_queue(struct request_queue *q)
 {
        struct blk_trace *bt;
 
-       bt = xchg(&q->blk_trace, NULL);
+       bt = rcu_replace_pointer(q->blk_trace, NULL,
+                                lockdep_is_held(&q->blk_trace_mutex));
        if (bt == NULL)
                return -EINVAL;
 
@@ -1661,10 +1674,7 @@ static int blk_trace_setup_queue(struct request_queue *q,
 
        blk_trace_setup_lba(bt, bdev);
 
-       ret = -EBUSY;
-       if (cmpxchg(&q->blk_trace, NULL, bt))
-               goto free_bt;
-
+       rcu_assign_pointer(q->blk_trace, bt);
        get_probe_ref();
        return 0;
 
index e729c9e..3cc0dcb 100644 (file)
 #include <linux/uaccess.h>
 #include <linux/ctype.h>
 #include <linux/kprobes.h>
+#include <linux/spinlock.h>
 #include <linux/syscalls.h>
 #include <linux/error-injection.h>
+#include <linux/btf_ids.h>
 
 #include <asm/tlb.h>
 
 #include "trace_probe.h"
 #include "trace.h"
 
+#define CREATE_TRACE_POINTS
+#include "bpf_trace.h"
+
 #define bpf_event_rcu_dereference(p)                                   \
        rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
 
@@ -141,7 +146,7 @@ bpf_probe_read_user_common(void *dst, u32 size, const void __user *unsafe_ptr)
 {
        int ret;
 
-       ret = probe_user_read(dst, unsafe_ptr, size);
+       ret = copy_from_user_nofault(dst, unsafe_ptr, size);
        if (unlikely(ret < 0))
                memset(dst, 0, size);
        return ret;
@@ -196,7 +201,7 @@ bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr)
 
        if (unlikely(ret < 0))
                goto fail;
-       ret = probe_kernel_read(dst, unsafe_ptr, size);
+       ret = copy_from_kernel_nofault(dst, unsafe_ptr, size);
        if (unlikely(ret < 0))
                goto fail;
        return ret;
@@ -241,7 +246,7 @@ bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr)
        if (unlikely(ret < 0))
                goto fail;
 
-       return 0;
+       return ret;
 fail:
        memset(dst, 0, size);
        return ret;
@@ -326,7 +331,7 @@ BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
        if (unlikely(!nmi_uaccess_okay()))
                return -EPERM;
 
-       return probe_user_write(unsafe_ptr, src, size);
+       return copy_to_user_nofault(unsafe_ptr, src, size);
 }
 
 static const struct bpf_func_proto bpf_probe_write_user_proto = {
@@ -374,9 +379,33 @@ static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype,
        }
 }
 
+static DEFINE_RAW_SPINLOCK(trace_printk_lock);
+
+#define BPF_TRACE_PRINTK_SIZE   1024
+
+static inline __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...)
+{
+       static char buf[BPF_TRACE_PRINTK_SIZE];
+       unsigned long flags;
+       va_list ap;
+       int ret;
+
+       raw_spin_lock_irqsave(&trace_printk_lock, flags);
+       va_start(ap, fmt);
+       ret = vsnprintf(buf, sizeof(buf), fmt, ap);
+       va_end(ap);
+       /* vsnprintf() will not append null for zero-length strings */
+       if (ret == 0)
+               buf[0] = '\0';
+       trace_bpf_trace_printk(buf);
+       raw_spin_unlock_irqrestore(&trace_printk_lock, flags);
+
+       return ret;
+}
+
 /*
  * Only limited trace_printk() conversion specifiers allowed:
- * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pks %pus %s
+ * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s
  */
 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
           u64, arg2, u64, arg3)
@@ -420,6 +449,11 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
                                goto fmt_str;
                        }
 
+                       if (fmt[i + 1] == 'B') {
+                               i++;
+                               goto fmt_next;
+                       }
+
                        /* disallow any further format extensions */
                        if (fmt[i + 1] != 0 &&
                            !isspace(fmt[i + 1]) &&
@@ -478,8 +512,7 @@ fmt_next:
  */
 #define __BPF_TP_EMIT()        __BPF_ARG3_TP()
 #define __BPF_TP(...)                                                  \
-       __trace_printk(0 /* Fake ip */,                                 \
-                      fmt, ##__VA_ARGS__)
+       bpf_do_trace_printk(fmt, ##__VA_ARGS__)
 
 #define __BPF_ARG1_TP(...)                                             \
        ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64))        \
@@ -516,10 +549,15 @@ static const struct bpf_func_proto bpf_trace_printk_proto = {
 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
 {
        /*
-        * this program might be calling bpf_trace_printk,
-        * so allocate per-cpu printk buffers
+        * This program might be calling bpf_trace_printk,
+        * so enable the associated bpf_trace/bpf_trace_printk event.
+        * Repeat this each time as it is possible a user has
+        * disabled bpf_trace_printk events.  By loading a program
+        * calling bpf_trace_printk() however the user has expressed
+        * the intent to see such events.
         */
-       trace_printk_init_buffers();
+       if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1))
+               pr_warn_ratelimited("could not enable bpf_trace_printk events");
 
        return &bpf_trace_printk_proto;
 }
@@ -636,7 +674,8 @@ BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
                if (fmt[i] == 'p') {
                        if (fmt[i + 1] == 0 ||
                            fmt[i + 1] == 'K' ||
-                           fmt[i + 1] == 'x') {
+                           fmt[i + 1] == 'x' ||
+                           fmt[i + 1] == 'B') {
                                /* just kernel pointers */
                                params[fmt_cnt] = args[fmt_cnt];
                                fmt_cnt++;
@@ -661,7 +700,7 @@ BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
 
                        copy_size = (fmt[i + 2] == '4') ? 4 : 16;
 
-                       err = probe_kernel_read(bufs->buf[memcpy_cnt],
+                       err = copy_from_kernel_nofault(bufs->buf[memcpy_cnt],
                                                (void *) (long) args[fmt_cnt],
                                                copy_size);
                        if (err < 0)
@@ -681,7 +720,8 @@ BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size,
                }
 
                if (fmt[i] != 'i' && fmt[i] != 'd' &&
-                   fmt[i] != 'u' && fmt[i] != 'x') {
+                   fmt[i] != 'u' && fmt[i] != 'x' &&
+                   fmt[i] != 'X') {
                        err = -EINVAL;
                        goto out;
                }
@@ -703,7 +743,9 @@ out:
        return err;
 }
 
-static int bpf_seq_printf_btf_ids[5];
+BTF_ID_LIST(bpf_seq_printf_btf_ids)
+BTF_ID(struct, seq_file)
+
 static const struct bpf_func_proto bpf_seq_printf_proto = {
        .func           = bpf_seq_printf,
        .gpl_only       = true,
@@ -721,7 +763,9 @@ BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
        return seq_write(m, data, len) ? -EOVERFLOW : 0;
 }
 
-static int bpf_seq_write_btf_ids[5];
+BTF_ID_LIST(bpf_seq_write_btf_ids)
+BTF_ID(struct, seq_file)
+
 static const struct bpf_func_proto bpf_seq_write_proto = {
        .func           = bpf_seq_write,
        .gpl_only       = true,
@@ -1134,6 +1178,10 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return &bpf_ringbuf_discard_proto;
        case BPF_FUNC_ringbuf_query:
                return &bpf_ringbuf_query_proto;
+       case BPF_FUNC_jiffies64:
+               return &bpf_jiffies64_proto;
+       case BPF_FUNC_get_task_stack:
+               return &bpf_get_task_stack_proto;
        default:
                return NULL;
        }
@@ -1512,6 +1560,16 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return &bpf_skb_output_proto;
        case BPF_FUNC_xdp_output:
                return &bpf_xdp_output_proto;
+       case BPF_FUNC_skc_to_tcp6_sock:
+               return &bpf_skc_to_tcp6_sock_proto;
+       case BPF_FUNC_skc_to_tcp_sock:
+               return &bpf_skc_to_tcp_sock_proto;
+       case BPF_FUNC_skc_to_tcp_timewait_sock:
+               return &bpf_skc_to_tcp_timewait_sock_proto;
+       case BPF_FUNC_skc_to_tcp_request_sock:
+               return &bpf_skc_to_tcp_request_sock_proto;
+       case BPF_FUNC_skc_to_udp6_sock:
+               return &bpf_skc_to_udp6_sock_proto;
 #endif
        case BPF_FUNC_seq_printf:
                return prog->expected_attach_type == BPF_TRACE_ITER ?
diff --git a/kernel/trace/bpf_trace.h b/kernel/trace/bpf_trace.h
new file mode 100644 (file)
index 0000000..9acbc11
--- /dev/null
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM bpf_trace
+
+#if !defined(_TRACE_BPF_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+
+#define _TRACE_BPF_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(bpf_trace_printk,
+
+       TP_PROTO(const char *bpf_string),
+
+       TP_ARGS(bpf_string),
+
+       TP_STRUCT__entry(
+               __string(bpf_string, bpf_string)
+       ),
+
+       TP_fast_assign(
+               __assign_str(bpf_string, bpf_string);
+       ),
+
+       TP_printk("%s", __get_str(bpf_string))
+);
+
+#endif /* _TRACE_BPF_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE bpf_trace
+
+#include <trace/define_trace.h>
index c163c35..1903b80 100644 (file)
@@ -2260,7 +2260,7 @@ ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
 
                if (hash_contains_ip(ip, op->func_hash))
                        return op;
-       } 
+       }
 
        return NULL;
 }
@@ -3599,7 +3599,7 @@ static int t_show(struct seq_file *m, void *v)
                        if (direct)
                                seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
                }
-       }       
+       }
 
        seq_putc(m, '\n');
 
@@ -7151,6 +7151,10 @@ static int pid_open(struct inode *inode, struct file *file, int type)
        case TRACE_NO_PIDS:
                seq_ops = &ftrace_no_pid_sops;
                break;
+       default:
+               trace_array_put(tr);
+               WARN_ON_ONCE(1);
+               return -EINVAL;
        }
 
        ret = seq_open(file, seq_ops);
@@ -7229,6 +7233,10 @@ pid_write(struct file *filp, const char __user *ubuf,
                other_pids = rcu_dereference_protected(tr->function_pids,
                                             lockdep_is_held(&ftrace_lock));
                break;
+       default:
+               ret = -EINVAL;
+               WARN_ON_ONCE(1);
+               goto out;
        }
 
        ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
index b8e1ca4..00867ff 100644 (file)
@@ -2427,7 +2427,7 @@ rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
        if (unlikely(info->add_timestamp)) {
                bool abs = ring_buffer_time_stamp_abs(cpu_buffer->buffer);
 
-               event = rb_add_time_stamp(event, info->delta, abs);
+               event = rb_add_time_stamp(event, abs ? info->delta : delta, abs);
                length -= RB_LEN_TIME_EXTEND;
                delta = 0;
        }
index ec44b0e..bb62269 100644 (file)
@@ -3570,7 +3570,6 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
 
 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
 {
-       struct ring_buffer_event *event;
        struct ring_buffer_iter *buf_iter;
        unsigned long entries = 0;
        u64 ts;
@@ -3588,7 +3587,7 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
         * that a reset never took place on a cpu. This is evident
         * by the timestamp being before the start of the buffer.
         */
-       while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
+       while (ring_buffer_iter_peek(buf_iter, &ts)) {
                if (ts >= iter->array_buffer->time_start)
                        break;
                entries++;
index def769d..13db400 100644 (file)
@@ -61,6 +61,9 @@ enum trace_type {
 #undef __field_desc
 #define __field_desc(type, container, item)
 
+#undef __field_packed
+#define __field_packed(type, container, item)
+
 #undef __array
 #define __array(type, item, size)      type    item[size];
 
index 9de29bb..fa0fc08 100644 (file)
@@ -101,12 +101,16 @@ trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
                kprobe_event_cmd_init(&cmd, buf, MAX_BUF_LEN);
 
                ret = kprobe_event_gen_cmd_start(&cmd, event, val);
-               if (ret)
+               if (ret) {
+                       pr_err("Failed to generate probe: %s\n", buf);
                        break;
+               }
 
                ret = kprobe_event_gen_cmd_end(&cmd);
-               if (ret)
+               if (ret) {
                        pr_err("Failed to add probe: %s\n", buf);
+                       break;
+               }
        }
 
        return ret;
@@ -120,7 +124,7 @@ trace_boot_add_kprobe_event(struct xbc_node *node, const char *event)
 }
 #endif
 
-#ifdef CONFIG_HIST_TRIGGERS
+#ifdef CONFIG_SYNTH_EVENTS
 static int __init
 trace_boot_add_synth_event(struct xbc_node *node, const char *event)
 {
index a523da0..18c4a58 100644 (file)
@@ -78,8 +78,8 @@ FTRACE_ENTRY_PACKED(funcgraph_entry, ftrace_graph_ent_entry,
 
        F_STRUCT(
                __field_struct( struct ftrace_graph_ent,        graph_ent       )
-               __field_desc(   unsigned long,  graph_ent,      func            )
-               __field_desc(   int,            graph_ent,      depth           )
+               __field_packed( unsigned long,  graph_ent,      func            )
+               __field_packed( int,            graph_ent,      depth           )
        ),
 
        F_printk("--> %ps (%d)", (void *)__entry->func, __entry->depth)
@@ -92,11 +92,11 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftrace_graph_ret_entry,
 
        F_STRUCT(
                __field_struct( struct ftrace_graph_ret,        ret     )
-               __field_desc(   unsigned long,  ret,            func    )
-               __field_desc(   unsigned long,  ret,            overrun )
-               __field_desc(   unsigned long long, ret,        calltime)
-               __field_desc(   unsigned long long, ret,        rettime )
-               __field_desc(   int,            ret,            depth   )
+               __field_packed( unsigned long,  ret,            func    )
+               __field_packed( unsigned long,  ret,            overrun )
+               __field_packed( unsigned long long, ret,        calltime)
+               __field_packed( unsigned long long, ret,        rettime )
+               __field_packed( int,            ret,            depth   )
        ),
 
        F_printk("<-- %ps (%d) (start: %llx  end: %llx) over: %d",
index 3a74736..f725802 100644 (file)
@@ -216,11 +216,17 @@ static int event_trigger_regex_open(struct inode *inode, struct file *file)
 
 int trigger_process_regex(struct trace_event_file *file, char *buff)
 {
-       char *command, *next = buff;
+       char *command, *next;
        struct event_command *p;
        int ret = -EINVAL;
 
+       next = buff = skip_spaces(buff);
        command = strsep(&next, ": \t");
+       if (next) {
+               next = skip_spaces(next);
+               if (!*next)
+                       next = NULL;
+       }
        command = (command[0] != '!') ? command : command + 1;
 
        mutex_lock(&trigger_cmd_mutex);
@@ -630,8 +636,14 @@ event_trigger_callback(struct event_command *cmd_ops,
        int ret;
 
        /* separate the trigger from the filter (t:n [if filter]) */
-       if (param && isdigit(param[0]))
+       if (param && isdigit(param[0])) {
                trigger = strsep(&param, " \t");
+               if (param) {
+                       param = skip_spaces(param);
+                       if (!*param)
+                               param = NULL;
+               }
+       }
 
        trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
 
@@ -1368,6 +1380,11 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
        trigger = strsep(&param, " \t");
        if (!trigger)
                return -EINVAL;
+       if (param) {
+               param = skip_spaces(param);
+               if (!*param)
+                       param = NULL;
+       }
 
        system = strsep(&trigger, ":");
        if (!trigger)
index 77ce5a3..70d3d0a 100644 (file)
@@ -45,6 +45,9 @@ static int ftrace_event_register(struct trace_event_call *call,
 #undef __field_desc
 #define __field_desc(type, container, item)            type item;
 
+#undef __field_packed
+#define __field_packed(type, container, item)          type item;
+
 #undef __array
 #define __array(type, item, size)                      type item[size];
 
@@ -85,6 +88,13 @@ static void __always_unused ____ftrace_check_##name(void)            \
        .size = sizeof(_type), .align = __alignof__(_type),             \
        is_signed_type(_type), .filter_type = _filter_type },
 
+
+#undef __field_ext_packed
+#define __field_ext_packed(_type, _item, _filter_type) {       \
+       .type = #_type, .name = #_item,                         \
+       .size = sizeof(_type), .align = 1,                      \
+       is_signed_type(_type), .filter_type = _filter_type },
+
 #undef __field
 #define __field(_type, _item) __field_ext(_type, _item, FILTER_OTHER)
 
@@ -94,6 +104,9 @@ static void __always_unused ____ftrace_check_##name(void)            \
 #undef __field_desc
 #define __field_desc(_type, _container, _item) __field_ext(_type, _item, FILTER_OTHER)
 
+#undef __field_packed
+#define __field_packed(_type, _container, _item) __field_ext_packed(_type, _item, FILTER_OTHER)
+
 #undef __array
 #define __array(_type, _item, _len) {                                  \
        .type = #_type"["__stringify(_len)"]", .name = #_item,          \
@@ -129,6 +142,9 @@ static struct trace_event_fields ftrace_event_fields_##name[] = {   \
 #undef __field_desc
 #define __field_desc(type, container, item)
 
+#undef __field_packed
+#define __field_packed(type, container, item)
+
 #undef __array
 #define __array(type, item, len)
 
index 8a4c8d5..dd4dff7 100644 (file)
@@ -42,7 +42,7 @@ static int allocate_ftrace_ops(struct trace_array *tr)
        if (!ops)
                return -ENOMEM;
 
-       /* Currently only the non stack verision is supported */
+       /* Currently only the non stack version is supported */
        ops->func = function_trace_call;
        ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
 
index 6048f1b..aefb606 100644 (file)
@@ -1222,7 +1222,7 @@ fetch_store_strlen(unsigned long addr)
 #endif
 
        do {
-               ret = probe_kernel_read(&c, (u8 *)addr + len, 1);
+               ret = copy_from_kernel_nofault(&c, (u8 *)addr + len, 1);
                len++;
        } while (c && ret == 0 && len < MAX_STRING_SIZE);
 
@@ -1290,7 +1290,7 @@ probe_mem_read_user(void *dest, void *src, size_t size)
 {
        const void __user *uaddr =  (__force const void __user *)src;
 
-       return probe_user_read(dest, uaddr, size);
+       return copy_from_user_nofault(dest, uaddr, size);
 }
 
 static nokprobe_inline int
@@ -1300,7 +1300,7 @@ probe_mem_read(void *dest, void *src, size_t size)
        if ((unsigned long)src < TASK_SIZE)
                return probe_mem_read_user(dest, src, size);
 #endif
-       return probe_kernel_read(dest, src, size);
+       return copy_from_kernel_nofault(dest, src, size);
 }
 
 /* Note that we don't verify it, since the code does not come from user space */
index b8a928e..d2867cc 100644 (file)
@@ -639,8 +639,8 @@ static int traceprobe_parse_probe_arg_body(char *arg, ssize_t *size,
                        ret = -EINVAL;
                        goto fail;
                }
-               if ((code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM) ||
-                    parg->count) {
+               if ((code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM ||
+                    code->op == FETCH_OP_DATA) || parg->count) {
                        /*
                         * IMM, DATA and COMM is pointing actual address, those
                         * must be kept, and if parg->count != 0, this is an
index a0ff9e2..a22b628 100644 (file)
@@ -236,7 +236,7 @@ struct trace_probe_event {
        struct trace_event_call         call;
        struct list_head                files;
        struct list_head                probes;
-       struct trace_uprobe_filter      filter[0];
+       struct trace_uprobe_filter      filter[];
 };
 
 struct trace_probe {
index 79f139a..6ca2096 100644 (file)
@@ -26,8 +26,6 @@
 #include <linux/ptrace.h>
 #include <linux/async.h>
 #include <linux/uaccess.h>
-#include <linux/shmem_fs.h>
-#include <linux/pipe_fs_i.h>
 
 #include <trace/events/module.h>
 
@@ -38,8 +36,6 @@ static kernel_cap_t usermodehelper_bset = CAP_FULL_SET;
 static kernel_cap_t usermodehelper_inheritable = CAP_FULL_SET;
 static DEFINE_SPINLOCK(umh_sysctl_lock);
 static DECLARE_RWSEM(umhelper_sem);
-static LIST_HEAD(umh_list);
-static DEFINE_MUTEX(umh_list_lock);
 
 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
 {
@@ -102,16 +98,9 @@ static int call_usermodehelper_exec_async(void *data)
 
        commit_creds(new);
 
-       sub_info->pid = task_pid_nr(current);
-       if (sub_info->file) {
-               retval = do_execve_file(sub_info->file,
-                                       sub_info->argv, sub_info->envp);
-               if (!retval)
-                       current->flags |= PF_UMH;
-       } else
-               retval = do_execve(getname_kernel(sub_info->path),
-                                  (const char __user *const __user *)sub_info->argv,
-                                  (const char __user *const __user *)sub_info->envp);
+       retval = do_execve(getname_kernel(sub_info->path),
+                          (const char __user *const __user *)sub_info->argv,
+                          (const char __user *const __user *)sub_info->envp);
 out:
        sub_info->retval = retval;
        /*
@@ -405,140 +394,6 @@ struct subprocess_info *call_usermodehelper_setup(const char *path, char **argv,
 }
 EXPORT_SYMBOL(call_usermodehelper_setup);
 
-struct subprocess_info *call_usermodehelper_setup_file(struct file *file,
-               int (*init)(struct subprocess_info *info, struct cred *new),
-               void (*cleanup)(struct subprocess_info *info), void *data)
-{
-       struct subprocess_info *sub_info;
-       struct umh_info *info = data;
-       const char *cmdline = (info->cmdline) ? info->cmdline : "usermodehelper";
-
-       sub_info = kzalloc(sizeof(struct subprocess_info), GFP_KERNEL);
-       if (!sub_info)
-               return NULL;
-
-       sub_info->argv = argv_split(GFP_KERNEL, cmdline, NULL);
-       if (!sub_info->argv) {
-               kfree(sub_info);
-               return NULL;
-       }
-
-       INIT_WORK(&sub_info->work, call_usermodehelper_exec_work);
-       sub_info->path = "none";
-       sub_info->file = file;
-       sub_info->init = init;
-       sub_info->cleanup = cleanup;
-       sub_info->data = data;
-       return sub_info;
-}
-
-static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
-{
-       struct umh_info *umh_info = info->data;
-       struct file *from_umh[2];
-       struct file *to_umh[2];
-       int err;
-
-       /* create pipe to send data to umh */
-       err = create_pipe_files(to_umh, 0);
-       if (err)
-               return err;
-       err = replace_fd(0, to_umh[0], 0);
-       fput(to_umh[0]);
-       if (err < 0) {
-               fput(to_umh[1]);
-               return err;
-       }
-
-       /* create pipe to receive data from umh */
-       err = create_pipe_files(from_umh, 0);
-       if (err) {
-               fput(to_umh[1]);
-               replace_fd(0, NULL, 0);
-               return err;
-       }
-       err = replace_fd(1, from_umh[1], 0);
-       fput(from_umh[1]);
-       if (err < 0) {
-               fput(to_umh[1]);
-               replace_fd(0, NULL, 0);
-               fput(from_umh[0]);
-               return err;
-       }
-
-       umh_info->pipe_to_umh = to_umh[1];
-       umh_info->pipe_from_umh = from_umh[0];
-       return 0;
-}
-
-static void umh_clean_and_save_pid(struct subprocess_info *info)
-{
-       struct umh_info *umh_info = info->data;
-
-       /* cleanup if umh_pipe_setup() was successful but exec failed */
-       if (info->pid && info->retval) {
-               fput(umh_info->pipe_to_umh);
-               fput(umh_info->pipe_from_umh);
-       }
-
-       argv_free(info->argv);
-       umh_info->pid = info->pid;
-}
-
-/**
- * fork_usermode_blob - fork a blob of bytes as a usermode process
- * @data: a blob of bytes that can be do_execv-ed as a file
- * @len: length of the blob
- * @info: information about usermode process (shouldn't be NULL)
- *
- * If info->cmdline is set it will be used as command line for the
- * user process, else "usermodehelper" is used.
- *
- * Returns either negative error or zero which indicates success
- * in executing a blob of bytes as a usermode process. In such
- * case 'struct umh_info *info' is populated with two pipes
- * and a pid of the process. The caller is responsible for health
- * check of the user process, killing it via pid, and closing the
- * pipes when user process is no longer needed.
- */
-int fork_usermode_blob(void *data, size_t len, struct umh_info *info)
-{
-       struct subprocess_info *sub_info;
-       struct file *file;
-       ssize_t written;
-       loff_t pos = 0;
-       int err;
-
-       file = shmem_kernel_file_setup("", len, 0);
-       if (IS_ERR(file))
-               return PTR_ERR(file);
-
-       written = kernel_write(file, data, len, &pos);
-       if (written != len) {
-               err = written;
-               if (err >= 0)
-                       err = -ENOMEM;
-               goto out;
-       }
-
-       err = -ENOMEM;
-       sub_info = call_usermodehelper_setup_file(file, umh_pipe_setup,
-                                                 umh_clean_and_save_pid, info);
-       if (!sub_info)
-               goto out;
-
-       err = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC);
-       if (!err) {
-               mutex_lock(&umh_list_lock);
-               list_add(&info->list, &umh_list);
-               mutex_unlock(&umh_list_lock);
-       }
-out:
-       fput(file);
-       return err;
-}
-EXPORT_SYMBOL_GPL(fork_usermode_blob);
-
 /**
  * call_usermodehelper_exec - start a usermode application
  * @sub_info: information about the subprocessa
@@ -700,26 +555,6 @@ static int proc_cap_handler(struct ctl_table *table, int write,
        return 0;
 }
 
-void __exit_umh(struct task_struct *tsk)
-{
-       struct umh_info *info;
-       pid_t pid = tsk->pid;
-
-       mutex_lock(&umh_list_lock);
-       list_for_each_entry(info, &umh_list, list) {
-               if (info->pid == pid) {
-                       list_del(&info->list);
-                       mutex_unlock(&umh_list_lock);
-                       goto out;
-               }
-       }
-       mutex_unlock(&umh_list_lock);
-       return;
-out:
-       if (info->cleanup)
-               info->cleanup(info);
-}
-
 struct ctl_table usermodehelper_table[] = {
        {
                .procname       = "bset",
diff --git a/kernel/usermode_driver.c b/kernel/usermode_driver.c
new file mode 100644 (file)
index 0000000..0b35212
--- /dev/null
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * umd - User mode driver support
+ */
+#include <linux/shmem_fs.h>
+#include <linux/pipe_fs_i.h>
+#include <linux/mount.h>
+#include <linux/fs_struct.h>
+#include <linux/task_work.h>
+#include <linux/usermode_driver.h>
+
+static struct vfsmount *blob_to_mnt(const void *data, size_t len, const char *name)
+{
+       struct file_system_type *type;
+       struct vfsmount *mnt;
+       struct file *file;
+       ssize_t written;
+       loff_t pos = 0;
+
+       type = get_fs_type("tmpfs");
+       if (!type)
+               return ERR_PTR(-ENODEV);
+
+       mnt = kern_mount(type);
+       put_filesystem(type);
+       if (IS_ERR(mnt))
+               return mnt;
+
+       file = file_open_root(mnt->mnt_root, mnt, name, O_CREAT | O_WRONLY, 0700);
+       if (IS_ERR(file)) {
+               mntput(mnt);
+               return ERR_CAST(file);
+       }
+
+       written = kernel_write(file, data, len, &pos);
+       if (written != len) {
+               int err = written;
+               if (err >= 0)
+                       err = -ENOMEM;
+               filp_close(file, NULL);
+               mntput(mnt);
+               return ERR_PTR(err);
+       }
+
+       fput(file);
+
+       /* Flush delayed fput so exec can open the file read-only */
+       flush_delayed_fput();
+       task_work_run();
+       return mnt;
+}
+
+/**
+ * umd_load_blob - Remember a blob of bytes for fork_usermode_driver
+ * @info: information about usermode driver
+ * @data: a blob of bytes that can be executed as a file
+ * @len:  The lentgh of the blob
+ *
+ */
+int umd_load_blob(struct umd_info *info, const void *data, size_t len)
+{
+       struct vfsmount *mnt;
+
+       if (WARN_ON_ONCE(info->wd.dentry || info->wd.mnt))
+               return -EBUSY;
+
+       mnt = blob_to_mnt(data, len, info->driver_name);
+       if (IS_ERR(mnt))
+               return PTR_ERR(mnt);
+
+       info->wd.mnt = mnt;
+       info->wd.dentry = mnt->mnt_root;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(umd_load_blob);
+
+/**
+ * umd_unload_blob - Disassociate @info from a previously loaded blob
+ * @info: information about usermode driver
+ *
+ */
+int umd_unload_blob(struct umd_info *info)
+{
+       if (WARN_ON_ONCE(!info->wd.mnt ||
+                        !info->wd.dentry ||
+                        info->wd.mnt->mnt_root != info->wd.dentry))
+               return -EINVAL;
+
+       kern_unmount(info->wd.mnt);
+       info->wd.mnt = NULL;
+       info->wd.dentry = NULL;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(umd_unload_blob);
+
+static int umd_setup(struct subprocess_info *info, struct cred *new)
+{
+       struct umd_info *umd_info = info->data;
+       struct file *from_umh[2];
+       struct file *to_umh[2];
+       int err;
+
+       /* create pipe to send data to umh */
+       err = create_pipe_files(to_umh, 0);
+       if (err)
+               return err;
+       err = replace_fd(0, to_umh[0], 0);
+       fput(to_umh[0]);
+       if (err < 0) {
+               fput(to_umh[1]);
+               return err;
+       }
+
+       /* create pipe to receive data from umh */
+       err = create_pipe_files(from_umh, 0);
+       if (err) {
+               fput(to_umh[1]);
+               replace_fd(0, NULL, 0);
+               return err;
+       }
+       err = replace_fd(1, from_umh[1], 0);
+       fput(from_umh[1]);
+       if (err < 0) {
+               fput(to_umh[1]);
+               replace_fd(0, NULL, 0);
+               fput(from_umh[0]);
+               return err;
+       }
+
+       set_fs_pwd(current->fs, &umd_info->wd);
+       umd_info->pipe_to_umh = to_umh[1];
+       umd_info->pipe_from_umh = from_umh[0];
+       umd_info->tgid = get_pid(task_tgid(current));
+       return 0;
+}
+
+static void umd_cleanup(struct subprocess_info *info)
+{
+       struct umd_info *umd_info = info->data;
+
+       /* cleanup if umh_setup() was successful but exec failed */
+       if (info->retval) {
+               fput(umd_info->pipe_to_umh);
+               fput(umd_info->pipe_from_umh);
+               put_pid(umd_info->tgid);
+               umd_info->tgid = NULL;
+       }
+}
+
+/**
+ * fork_usermode_driver - fork a usermode driver
+ * @info: information about usermode driver (shouldn't be NULL)
+ *
+ * Returns either negative error or zero which indicates success in
+ * executing a usermode driver. In such case 'struct umd_info *info'
+ * is populated with two pipes and a tgid of the process. The caller is
+ * responsible for health check of the user process, killing it via
+ * tgid, and closing the pipes when user process is no longer needed.
+ */
+int fork_usermode_driver(struct umd_info *info)
+{
+       struct subprocess_info *sub_info;
+       const char *argv[] = { info->driver_name, NULL };
+       int err;
+
+       if (WARN_ON_ONCE(info->tgid))
+               return -EBUSY;
+
+       err = -ENOMEM;
+       sub_info = call_usermodehelper_setup(info->driver_name,
+                                            (char **)argv, NULL, GFP_KERNEL,
+                                            umd_setup, umd_cleanup, info);
+       if (!sub_info)
+               goto out;
+
+       err = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC);
+out:
+       return err;
+}
+EXPORT_SYMBOL_GPL(fork_usermode_driver);
+
+
index 9fbe1e2..c41c3c1 100644 (file)
@@ -4638,11 +4638,11 @@ void print_worker_info(const char *log_lvl, struct task_struct *task)
         * Carefully copy the associated workqueue's workfn, name and desc.
         * Keep the original last '\0' in case the original is garbage.
         */
-       probe_kernel_read(&fn, &worker->current_func, sizeof(fn));
-       probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq));
-       probe_kernel_read(&wq, &pwq->wq, sizeof(wq));
-       probe_kernel_read(name, wq->name, sizeof(name) - 1);
-       probe_kernel_read(desc, worker->desc, sizeof(desc) - 1);
+       copy_from_kernel_nofault(&fn, &worker->current_func, sizeof(fn));
+       copy_from_kernel_nofault(&pwq, &worker->current_pwq, sizeof(pwq));
+       copy_from_kernel_nofault(&wq, &pwq->wq, sizeof(wq));
+       copy_from_kernel_nofault(name, wq->name, sizeof(name) - 1);
+       copy_from_kernel_nofault(desc, worker->desc, sizeof(desc) - 1);
 
        if (fn || name[0] || desc[0]) {
                printk("%sWorkqueue: %s %ps", log_lvl, name, fn);
index d74ac0f..9ad9210 100644 (file)
@@ -229,7 +229,6 @@ config DEBUG_INFO_COMPRESSED
        bool "Compressed debugging information"
        depends on DEBUG_INFO
        depends on $(cc-option,-gz=zlib)
-       depends on $(as-option,-Wa$(comma)--compress-debug-sections=zlib)
        depends on $(ld-option,--compress-debug-sections=zlib)
        help
          Compress the debug information using zlib.  Requires GCC 5.0+ or Clang
index 81f5464..34b84bc 100644 (file)
@@ -15,11 +15,15 @@ config CC_HAS_KASAN_GENERIC
 config CC_HAS_KASAN_SW_TAGS
        def_bool $(cc-option, -fsanitize=kernel-hwaddress)
 
+config CC_HAS_WORKING_NOSANITIZE_ADDRESS
+       def_bool !CC_IS_GCC || GCC_VERSION >= 80300
+
 config KASAN
        bool "KASAN: runtime memory debugger"
        depends on (HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \
                   (HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)
        depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
+       depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
        help
          Enables KASAN (KernelAddressSANitizer) - runtime memory debugger,
          designed to find out-of-bounds accesses and use-after-free bugs.
index 7ac65a0..c7861e8 100644 (file)
@@ -149,12 +149,12 @@ EXPORT_SYMBOL(ip_compute_csum);
  * copy from ds while checksumming, otherwise like csum_partial
  */
 __wsum
-csum_partial_copy(const void *src, void *dst, int len, __wsum sum)
+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
 {
        memcpy(dst, src, len);
        return csum_partial(dst, len, sum);
 }
-EXPORT_SYMBOL(csum_partial_copy);
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
 
 #ifndef csum_tcpudp_nofold
 static inline u32 from64to32(u64 x)
index 50d1e9f..6ed72dc 100644 (file)
@@ -73,6 +73,7 @@ static void adjust_for_msb_right_quirk(u64 *to_write, int *box_start_bit,
  * @endbit: The index (in logical notation, compensated for quirks) where
  *         the packed value ends within pbuf. Must be smaller than, or equal
  *         to, startbit.
+ * @pbuflen: The length in bytes of the packed buffer pointed to by @pbuf.
  * @op: If PACK, then uval will be treated as const pointer and copied (packed)
  *     into pbuf, between startbit and endbit.
  *     If UNPACK, then pbuf will be treated as const pointer and the logical
index 4e865d4..707453f 100644 (file)
@@ -91,6 +91,7 @@ int seq_buf_printf(struct seq_buf *s, const char *fmt, ...)
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(seq_buf_printf);
 
 #ifdef CONFIG_BINARY_PRINTF
 /**
index a5fddf9..ca7d635 100644 (file)
@@ -5275,31 +5275,21 @@ static struct bpf_test tests[] = {
        {       /* Mainly checking JIT here. */
                "BPF_MAXINSNS: Ctx heavy transformations",
                { },
-#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
-               CLASSIC | FLAG_EXPECTED_FAIL,
-#else
                CLASSIC,
-#endif
                { },
                {
                        {  1, SKB_VLAN_PRESENT },
                        { 10, SKB_VLAN_PRESENT }
                },
                .fill_helper = bpf_fill_maxinsns6,
-               .expected_errcode = -ENOTSUPP,
        },
        {       /* Mainly checking JIT here. */
                "BPF_MAXINSNS: Call heavy transformations",
                { },
-#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
-               CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
-#else
                CLASSIC | FLAG_NO_DATA,
-#endif
                { },
                { { 1, 0 }, { 10, 0 } },
                .fill_helper = bpf_fill_maxinsns7,
-               .expected_errcode = -ENOTSUPP,
        },
        {       /* Mainly checking JIT here. */
                "BPF_MAXINSNS: Jump heavy test",
@@ -5350,28 +5340,18 @@ static struct bpf_test tests[] = {
        {
                "BPF_MAXINSNS: exec all MSH",
                { },
-#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
-               CLASSIC | FLAG_EXPECTED_FAIL,
-#else
                CLASSIC,
-#endif
                { 0xfa, 0xfb, 0xfc, 0xfd, },
                { { 4, 0xababab83 } },
                .fill_helper = bpf_fill_maxinsns13,
-               .expected_errcode = -ENOTSUPP,
        },
        {
                "BPF_MAXINSNS: ld_abs+get_processor_id",
                { },
-#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
-               CLASSIC | FLAG_EXPECTED_FAIL,
-#else
                CLASSIC,
-#endif
                { },
                { { 1, 0xbee } },
                .fill_helper = bpf_fill_ld_abs_get_processor_id,
-               .expected_errcode = -ENOTSUPP,
        },
        /*
         * LD_IND / LD_ABS on fragmented SKBs
index 2852828..a2a8226 100644 (file)
@@ -520,8 +520,7 @@ static bool dmirror_allocate_chunk(struct dmirror_device *mdevice,
 err_free:
        kfree(devmem);
 err_release:
-       release_mem_region(devmem->pagemap.res.start,
-                          resource_size(&devmem->pagemap.res));
+       release_mem_region(res->start, resource_size(res));
 err:
        mutex_unlock(&mdevice->devmem_lock);
        return false;
index f258743..bd7c7ff 100644 (file)
@@ -419,8 +419,8 @@ static bool test_kernel_ptr(unsigned long addr, int size)
        /* should be at least readable kernel address */
        if (access_ok(ptr, 1) ||
            access_ok(ptr + size - 1, 1) ||
-           probe_kernel_address(ptr, buf) ||
-           probe_kernel_address(ptr + size - 1, buf)) {
+           get_kernel_nofault(buf, ptr) ||
+           get_kernel_nofault(buf, ptr + size - 1)) {
                pr_err("invalid kernel ptr: %#lx\n", addr);
                return true;
        }
@@ -437,7 +437,7 @@ static bool __maybe_unused test_magic(unsigned long addr, int offset,
        if (!addr)
                return false;
 
-       if (probe_kernel_address(ptr, magic) || magic != expected) {
+       if (get_kernel_nofault(magic, ptr) || magic != expected) {
                pr_err("invalid magic at %#lx + %#x = %#x, expected %#x\n",
                       addr, offset, magic, expected);
                return true;
index 72c1abf..da13793 100644 (file)
@@ -979,10 +979,10 @@ err_check_expect_stats2:
 err_world2_obj_get:
        for (i--; i >= 0; i--)
                world_obj_put(&world2, objagg, hints_case->key_ids[i]);
-       objagg_hints_put(hints);
-       objagg_destroy(objagg2);
        i = hints_case->key_ids_count;
+       objagg_destroy(objagg2);
 err_check_expect_hints_stats:
+       objagg_hints_put(hints);
 err_hints_get:
 err_check_expect_stats:
 err_world_obj_get:
index 0463ad2..26ecff8 100644 (file)
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -339,13 +339,13 @@ int __init cma_declare_contiguous_nid(phys_addr_t base,
                 */
                if (base < highmem_start && limit > highmem_start) {
                        addr = memblock_alloc_range_nid(size, alignment,
-                                       highmem_start, limit, nid, false);
+                                       highmem_start, limit, nid, true);
                        limit = highmem_start;
                }
 
                if (!addr) {
                        addr = memblock_alloc_range_nid(size, alignment, base,
-                                       limit, nid, false);
+                                       limit, nid, true);
                        if (!addr) {
                                ret = -ENOMEM;
                                goto err;
index 33c0b51..6698fa6 100644 (file)
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -2,6 +2,8 @@
 #ifndef __MM_CMA_H__
 #define __MM_CMA_H__
 
+#include <linux/debugfs.h>
+
 struct cma {
        unsigned long   base_pfn;
        unsigned long   count;
@@ -11,6 +13,7 @@ struct cma {
 #ifdef CONFIG_CMA_DEBUGFS
        struct hlist_head mem_head;
        spinlock_t mem_head_lock;
+       struct debugfs_u32_array dfs_bitmap;
 #endif
        const char *name;
 };
index 4e6cbe2..d5bf8aa 100644 (file)
@@ -164,7 +164,6 @@ static void cma_debugfs_add_one(struct cma *cma, struct dentry *root_dentry)
 {
        struct dentry *tmp;
        char name[16];
-       int u32s;
 
        scnprintf(name, sizeof(name), "cma-%s", cma->name);
 
@@ -180,8 +179,10 @@ static void cma_debugfs_add_one(struct cma *cma, struct dentry *root_dentry)
        debugfs_create_file("used", 0444, tmp, cma, &cma_used_fops);
        debugfs_create_file("maxchunk", 0444, tmp, cma, &cma_maxchunk_fops);
 
-       u32s = DIV_ROUND_UP(cma_bitmap_maxno(cma), BITS_PER_BYTE * sizeof(u32));
-       debugfs_create_u32_array("bitmap", 0444, tmp, (u32 *)cma->bitmap, u32s);
+       cma->dfs_bitmap.array = (u32 *)cma->bitmap;
+       cma->dfs_bitmap.n_elements = DIV_ROUND_UP(cma_bitmap_maxno(cma),
+                                                 BITS_PER_BYTE * sizeof(u32));
+       debugfs_create_u32_array("bitmap", 0444, tmp, &cma->dfs_bitmap);
 }
 
 static int __init cma_debugfs_init(void)
index fd988b7..8637560 100644 (file)
@@ -2316,15 +2316,26 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
                .page = NULL,
        };
 
-       current->capture_control = &capc;
+       /*
+        * Make sure the structs are really initialized before we expose the
+        * capture control, in case we are interrupted and the interrupt handler
+        * frees a page.
+        */
+       barrier();
+       WRITE_ONCE(current->capture_control, &capc);
 
        ret = compact_zone(&cc, &capc);
 
        VM_BUG_ON(!list_empty(&cc.freepages));
        VM_BUG_ON(!list_empty(&cc.migratepages));
 
-       *capture = capc.page;
-       current->capture_control = NULL;
+       /*
+        * Make sure we hide capture control first before we read the captured
+        * page pointer, otherwise an interrupt could free and capture a page
+        * and we would leak it.
+        */
+       WRITE_ONCE(current->capture_control, NULL);
+       *capture = READ_ONCE(capc.page);
 
        return ret;
 }
index b5b1de8..4f37651 100644 (file)
@@ -120,9 +120,9 @@ void __dump_page(struct page *page, const char *reason)
                 * mapping can be invalid pointer and we don't want to crash
                 * accessing it, so probe everything depending on it carefully
                 */
-               if (probe_kernel_read(&host, &mapping->host,
+               if (copy_from_kernel_nofault(&host, &mapping->host,
                                        sizeof(struct inode *)) ||
-                   probe_kernel_read(&a_ops, &mapping->a_ops,
+                   copy_from_kernel_nofault(&a_ops, &mapping->a_ops,
                                sizeof(struct address_space_operations *))) {
                        pr_warn("failed to read mapping->host or a_ops, mapping not a valid kernel address?\n");
                        goto out_mapping;
@@ -133,7 +133,7 @@ void __dump_page(struct page *page, const char *reason)
                        goto out_mapping;
                }
 
-               if (probe_kernel_read(&dentry_first,
+               if (copy_from_kernel_nofault(&dentry_first,
                        &host->i_dentry.first, sizeof(struct hlist_node *))) {
                        pr_warn("mapping->a_ops:%ps with invalid mapping->host inode address %px\n",
                                a_ops, host);
@@ -146,7 +146,7 @@ void __dump_page(struct page *page, const char *reason)
                }
 
                dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias);
-               if (probe_kernel_read(&dentry, dentry_ptr,
+               if (copy_from_kernel_nofault(&dentry, dentry_ptr,
                                                        sizeof(struct dentry))) {
                        pr_warn("mapping->aops:%ps with invalid mapping->host->i_dentry.first %px\n",
                                a_ops, dentry_ptr);
index e456230..61ab16f 100644 (file)
@@ -246,13 +246,13 @@ static void __init pgd_populate_tests(struct mm_struct *mm, pgd_t *pgdp,
 static void __init pte_clear_tests(struct mm_struct *mm, pte_t *ptep,
                                   unsigned long vaddr)
 {
-       pte_t pte = READ_ONCE(*ptep);
+       pte_t pte = ptep_get(ptep);
 
        pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
        set_pte_at(mm, vaddr, ptep, pte);
        barrier();
        pte_clear(mm, vaddr, ptep);
-       pte = READ_ONCE(*ptep);
+       pte = ptep_get(ptep);
        WARN_ON(!pte_none(pte));
 }
 
index f0ae9a6..385759c 100644 (file)
@@ -2028,7 +2028,7 @@ find_page:
 
                page = find_get_page(mapping, index);
                if (!page) {
-                       if (iocb->ki_flags & IOCB_NOWAIT)
+                       if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO))
                                goto would_block;
                        page_cache_sync_readahead(mapping,
                                        ra, filp,
@@ -2038,6 +2038,10 @@ find_page:
                                goto no_cached_page;
                }
                if (PageReadahead(page)) {
+                       if (iocb->ki_flags & IOCB_NOIO) {
+                               put_page(page);
+                               goto out;
+                       }
                        page_cache_async_readahead(mapping,
                                        ra, filp, page,
                                        index, last_index - index);
@@ -2160,6 +2164,11 @@ page_not_up_to_date_locked:
                }
 
 readpage:
+               if (iocb->ki_flags & IOCB_NOIO) {
+                       unlock_page(page);
+                       put_page(page);
+                       goto would_block;
+               }
                /*
                 * A previous I/O error may have been due to temporary
                 * failures, eg. multipath errors.
@@ -2249,9 +2258,19 @@ EXPORT_SYMBOL_GPL(generic_file_buffered_read);
  *
  * This is the "read_iter()" routine for all filesystems
  * that can use the page cache directly.
+ *
+ * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall
+ * be returned when no data can be read without waiting for I/O requests
+ * to complete; it doesn't prevent readahead.
+ *
+ * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O
+ * requests shall be made for the read or for readahead.  When no data
+ * can be read, -EAGAIN shall be returned.  When readahead would be
+ * triggered, a partial, possibly empty read shall be returned.
+ *
  * Return:
  * * number of bytes copied, even for partial reads
- * * negative error code if nothing was read
+ * * negative error code (or 0 if IOCB_NOIO) if nothing was read
  */
 ssize_t
 generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
index de9e362..6f47697 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2196,7 +2196,7 @@ static inline pte_t gup_get_pte(pte_t *ptep)
  */
 static inline pte_t gup_get_pte(pte_t *ptep)
 {
-       return READ_ONCE(*ptep);
+       return ptep_get(ptep);
 }
 #endif /* CONFIG_GUP_GET_PTE_LOW_HIGH */
 
@@ -2425,7 +2425,7 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
        if (pte_end < end)
                end = pte_end;
 
-       pte = READ_ONCE(*ptep);
+       pte = huge_ptep_get(ptep);
 
        if (!pte_access_permitted(pte, flags & FOLL_WRITE))
                return 0;
index 57ece74..fab4485 100644 (file)
@@ -1593,7 +1593,7 @@ static struct address_space *_get_hugetlb_page_mapping(struct page *hpage)
 
        /* Use first found vma */
        pgoff_start = page_to_pgoff(hpage);
-       pgoff_end = pgoff_start + hpage_nr_pages(hpage) - 1;
+       pgoff_end = pgoff_start + pages_per_huge_page(page_hstate(hpage)) - 1;
        anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
                                        pgoff_start, pgoff_end) {
                struct vm_area_struct *vma = avc->vma;
index 88845ed..f98ff91 100644 (file)
@@ -6,14 +6,15 @@
 #include <linux/mm.h>
 #include <linux/uaccess.h>
 
-bool __weak probe_kernel_read_allowed(const void *unsafe_src, size_t size)
+bool __weak copy_from_kernel_nofault_allowed(const void *unsafe_src,
+               size_t size)
 {
        return true;
 }
 
 #ifdef HAVE_GET_KERNEL_NOFAULT
 
-#define probe_kernel_read_loop(dst, src, len, type, err_label)         \
+#define copy_from_kernel_nofault_loop(dst, src, len, type, err_label)  \
        while (len >= sizeof(type)) {                                   \
                __get_kernel_nofault(dst, src, type, err_label);                \
                dst += sizeof(type);                                    \
@@ -21,25 +22,25 @@ bool __weak probe_kernel_read_allowed(const void *unsafe_src, size_t size)
                len -= sizeof(type);                                    \
        }
 
-long probe_kernel_read(void *dst, const void *src, size_t size)
+long copy_from_kernel_nofault(void *dst, const void *src, size_t size)
 {
-       if (!probe_kernel_read_allowed(src, size))
+       if (!copy_from_kernel_nofault_allowed(src, size))
                return -ERANGE;
 
        pagefault_disable();
-       probe_kernel_read_loop(dst, src, size, u64, Efault);
-       probe_kernel_read_loop(dst, src, size, u32, Efault);
-       probe_kernel_read_loop(dst, src, size, u16, Efault);
-       probe_kernel_read_loop(dst, src, size, u8, Efault);
+       copy_from_kernel_nofault_loop(dst, src, size, u64, Efault);
+       copy_from_kernel_nofault_loop(dst, src, size, u32, Efault);
+       copy_from_kernel_nofault_loop(dst, src, size, u16, Efault);
+       copy_from_kernel_nofault_loop(dst, src, size, u8, Efault);
        pagefault_enable();
        return 0;
 Efault:
        pagefault_enable();
        return -EFAULT;
 }
-EXPORT_SYMBOL_GPL(probe_kernel_read);
+EXPORT_SYMBOL_GPL(copy_from_kernel_nofault);
 
-#define probe_kernel_write_loop(dst, src, len, type, err_label)                \
+#define copy_to_kernel_nofault_loop(dst, src, len, type, err_label)    \
        while (len >= sizeof(type)) {                                   \
                __put_kernel_nofault(dst, src, type, err_label);                \
                dst += sizeof(type);                                    \
@@ -47,13 +48,13 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
                len -= sizeof(type);                                    \
        }
 
-long probe_kernel_write(void *dst, const void *src, size_t size)
+long copy_to_kernel_nofault(void *dst, const void *src, size_t size)
 {
        pagefault_disable();
-       probe_kernel_write_loop(dst, src, size, u64, Efault);
-       probe_kernel_write_loop(dst, src, size, u32, Efault);
-       probe_kernel_write_loop(dst, src, size, u16, Efault);
-       probe_kernel_write_loop(dst, src, size, u8, Efault);
+       copy_to_kernel_nofault_loop(dst, src, size, u64, Efault);
+       copy_to_kernel_nofault_loop(dst, src, size, u32, Efault);
+       copy_to_kernel_nofault_loop(dst, src, size, u16, Efault);
+       copy_to_kernel_nofault_loop(dst, src, size, u8, Efault);
        pagefault_enable();
        return 0;
 Efault:
@@ -67,7 +68,7 @@ long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count)
 
        if (unlikely(count <= 0))
                return 0;
-       if (!probe_kernel_read_allowed(unsafe_addr, count))
+       if (!copy_from_kernel_nofault_allowed(unsafe_addr, count))
                return -ERANGE;
 
        pagefault_disable();
@@ -87,7 +88,7 @@ Efault:
 }
 #else /* HAVE_GET_KERNEL_NOFAULT */
 /**
- * probe_kernel_read(): safely attempt to read from kernel-space
+ * copy_from_kernel_nofault(): safely attempt to read from kernel-space
  * @dst: pointer to the buffer that shall take the data
  * @src: address to read from
  * @size: size of the data chunk
@@ -98,15 +99,15 @@ Efault:
  *
  * We ensure that the copy_from_user is executed in atomic context so that
  * do_page_fault() doesn't attempt to take mmap_lock.  This makes
- * probe_kernel_read() suitable for use within regions where the caller
+ * copy_from_kernel_nofault() suitable for use within regions where the caller
  * already holds mmap_lock, or other locks which nest inside mmap_lock.
  */
-long probe_kernel_read(void *dst, const void *src, size_t size)
+long copy_from_kernel_nofault(void *dst, const void *src, size_t size)
 {
        long ret;
        mm_segment_t old_fs = get_fs();
 
-       if (!probe_kernel_read_allowed(src, size))
+       if (!copy_from_kernel_nofault_allowed(src, size))
                return -ERANGE;
 
        set_fs(KERNEL_DS);
@@ -120,10 +121,10 @@ long probe_kernel_read(void *dst, const void *src, size_t size)
                return -EFAULT;
        return 0;
 }
-EXPORT_SYMBOL_GPL(probe_kernel_read);
+EXPORT_SYMBOL_GPL(copy_from_kernel_nofault);
 
 /**
- * probe_kernel_write(): safely attempt to write to a location
+ * copy_to_kernel_nofault(): safely attempt to write to a location
  * @dst: address to write to
  * @src: pointer to the data that shall be written
  * @size: size of the data chunk
@@ -131,7 +132,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
  * Safely write to address @dst from the buffer at @src.  If a kernel fault
  * happens, handle that and return -EFAULT.
  */
-long probe_kernel_write(void *dst, const void *src, size_t size)
+long copy_to_kernel_nofault(void *dst, const void *src, size_t size)
 {
        long ret;
        mm_segment_t old_fs = get_fs();
@@ -174,7 +175,7 @@ long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count)
 
        if (unlikely(count <= 0))
                return 0;
-       if (!probe_kernel_read_allowed(unsafe_addr, count))
+       if (!copy_from_kernel_nofault_allowed(unsafe_addr, count))
                return -ERANGE;
 
        set_fs(KERNEL_DS);
@@ -193,7 +194,7 @@ long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count)
 #endif /* HAVE_GET_KERNEL_NOFAULT */
 
 /**
- * probe_user_read(): safely attempt to read from a user-space location
+ * copy_from_user_nofault(): safely attempt to read from a user-space location
  * @dst: pointer to the buffer that shall take the data
  * @src: address to read from. This must be a user address.
  * @size: size of the data chunk
@@ -201,7 +202,7 @@ long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, long count)
  * Safely read from user address @src to the buffer at @dst. If a kernel fault
  * happens, handle that and return -EFAULT.
  */
-long probe_user_read(void *dst, const void __user *src, size_t size)
+long copy_from_user_nofault(void *dst, const void __user *src, size_t size)
 {
        long ret = -EFAULT;
        mm_segment_t old_fs = get_fs();
@@ -218,10 +219,10 @@ long probe_user_read(void *dst, const void __user *src, size_t size)
                return -EFAULT;
        return 0;
 }
-EXPORT_SYMBOL_GPL(probe_user_read);
+EXPORT_SYMBOL_GPL(copy_from_user_nofault);
 
 /**
- * probe_user_write(): safely attempt to write to a user-space location
+ * copy_to_user_nofault(): safely attempt to write to a user-space location
  * @dst: address to write to
  * @src: pointer to the data that shall be written
  * @size: size of the data chunk
@@ -229,7 +230,7 @@ EXPORT_SYMBOL_GPL(probe_user_read);
  * Safely write to address @dst from the buffer at @src.  If a kernel fault
  * happens, handle that and return -EFAULT.
  */
-long probe_user_write(void __user *dst, const void *src, size_t size)
+long copy_to_user_nofault(void __user *dst, const void *src, size_t size)
 {
        long ret = -EFAULT;
        mm_segment_t old_fs = get_fs();
@@ -246,7 +247,7 @@ long probe_user_write(void __user *dst, const void *src, size_t size)
                return -EFAULT;
        return 0;
 }
-EXPORT_SYMBOL_GPL(probe_user_write);
+EXPORT_SYMBOL_GPL(copy_to_user_nofault);
 
 /**
  * strncpy_from_user_nofault: - Copy a NUL terminated string from unsafe user
index 0b38b6a..1962232 100644 (file)
@@ -2772,8 +2772,10 @@ static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
                return;
 
        cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN);
-       if (!cw)
+       if (!cw) {
+               css_put(&memcg->css);
                return;
+       }
 
        cw->memcg = memcg;
        cw->cachep = cachep;
@@ -6360,11 +6362,16 @@ static unsigned long effective_protection(unsigned long usage,
         * We're using unprotected memory for the weight so that if
         * some cgroups DO claim explicit protection, we don't protect
         * the same bytes twice.
+        *
+        * Check both usage and parent_usage against the respective
+        * protected values. One should imply the other, but they
+        * aren't read atomically - make sure the division is sane.
         */
        if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
                return ep;
-
-       if (parent_effective > siblings_protected && usage > protected) {
+       if (parent_effective > siblings_protected &&
+           parent_usage > siblings_protected &&
+           usage > protected) {
                unsigned long unclaimed;
 
                unclaimed = parent_effective - siblings_protected;
@@ -6416,7 +6423,7 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
 
        if (parent == root) {
                memcg->memory.emin = READ_ONCE(memcg->memory.min);
-               memcg->memory.elow = memcg->memory.low;
+               memcg->memory.elow = READ_ONCE(memcg->memory.low);
                goto out;
        }
 
@@ -6428,7 +6435,8 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
                        atomic_long_read(&parent->memory.children_min_usage)));
 
        WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
-                       memcg->memory.low, READ_ONCE(parent->memory.elow),
+                       READ_ONCE(memcg->memory.low),
+                       READ_ONCE(parent->memory.elow),
                        atomic_long_read(&parent->memory.children_low_usage)));
 
 out:
index dc7f354..87ec87c 100644 (file)
@@ -1498,7 +1498,7 @@ out:
 }
 
 #ifdef pte_index
-static int insert_page_in_batch_locked(struct mm_struct *mm, pmd_t *pmd,
+static int insert_page_in_batch_locked(struct mm_struct *mm, pte_t *pte,
                        unsigned long addr, struct page *page, pgprot_t prot)
 {
        int err;
@@ -1506,8 +1506,9 @@ static int insert_page_in_batch_locked(struct mm_struct *mm, pmd_t *pmd,
        if (!page_count(page))
                return -EINVAL;
        err = validate_page_before_insert(page);
-       return err ? err : insert_page_into_pte_locked(
-               mm, pte_offset_map(pmd, addr), addr, page, prot);
+       if (err)
+               return err;
+       return insert_page_into_pte_locked(mm, pte, addr, page, prot);
 }
 
 /* insert_pages() amortizes the cost of spinlock operations
@@ -1517,7 +1518,8 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
                        struct page **pages, unsigned long *num, pgprot_t prot)
 {
        pmd_t *pmd = NULL;
-       spinlock_t *pte_lock = NULL;
+       pte_t *start_pte, *pte;
+       spinlock_t *pte_lock;
        struct mm_struct *const mm = vma->vm_mm;
        unsigned long curr_page_idx = 0;
        unsigned long remaining_pages_total = *num;
@@ -1536,18 +1538,17 @@ more:
        ret = -ENOMEM;
        if (pte_alloc(mm, pmd))
                goto out;
-       pte_lock = pte_lockptr(mm, pmd);
 
        while (pages_to_write_in_pmd) {
                int pte_idx = 0;
                const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
 
-               spin_lock(pte_lock);
-               for (; pte_idx < batch_size; ++pte_idx) {
-                       int err = insert_page_in_batch_locked(mm, pmd,
+               start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
+               for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
+                       int err = insert_page_in_batch_locked(mm, pte,
                                addr, pages[curr_page_idx], prot);
                        if (unlikely(err)) {
-                               spin_unlock(pte_lock);
+                               pte_unmap_unlock(start_pte, pte_lock);
                                ret = err;
                                remaining_pages_total -= pte_idx;
                                goto out;
@@ -1555,7 +1556,7 @@ more:
                        addr += PAGE_SIZE;
                        ++curr_page_idx;
                }
-               spin_unlock(pte_lock);
+               pte_unmap_unlock(start_pte, pte_lock);
                pages_to_write_in_pmd -= batch_size;
                remaining_pages_total -= batch_size;
        }
@@ -3140,8 +3141,18 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                                err = mem_cgroup_charge(page, vma->vm_mm,
                                                        GFP_KERNEL);
                                ClearPageSwapCache(page);
-                               if (err)
+                               if (err) {
+                                       ret = VM_FAULT_OOM;
                                        goto out_page;
+                               }
+
+                               /*
+                                * XXX: Move to lru_cache_add() when it
+                                * supports new vs putback
+                                */
+                               spin_lock_irq(&page_pgdat(page)->lru_lock);
+                               lru_note_cost_page(page);
+                               spin_unlock_irq(&page_pgdat(page)->lru_lock);
 
                                lru_cache_add(page);
                                swap_readpage(page, true);
index 9b34e03..da374cd 100644 (file)
@@ -471,11 +471,20 @@ void __ref remove_pfn_range_from_zone(struct zone *zone,
                                      unsigned long start_pfn,
                                      unsigned long nr_pages)
 {
+       const unsigned long end_pfn = start_pfn + nr_pages;
        struct pglist_data *pgdat = zone->zone_pgdat;
-       unsigned long flags;
+       unsigned long pfn, cur_nr_pages, flags;
 
        /* Poison struct pages because they are now uninitialized again. */
-       page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages);
+       for (pfn = start_pfn; pfn < end_pfn; pfn += cur_nr_pages) {
+               cond_resched();
+
+               /* Select all remaining pages up to the next section boundary */
+               cur_nr_pages =
+                       min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn);
+               page_init_poison(pfn_to_page(pfn),
+                                sizeof(struct page) * cur_nr_pages);
+       }
 
 #ifdef CONFIG_ZONE_DEVICE
        /*
index f377296..40cd701 100644 (file)
@@ -1161,21 +1161,10 @@ out:
 }
 
 /*
- * gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move().  Work
- * around it.
- */
-#if defined(CONFIG_ARM) && \
-       defined(GCC_VERSION) && GCC_VERSION < 40900 && GCC_VERSION >= 40700
-#define ICE_noinline noinline
-#else
-#define ICE_noinline
-#endif
-
-/*
  * Obtain the lock on page, remove all ptes and migrate the page
  * to the newly allocated page in newpage.
  */
-static ICE_noinline int unmap_and_move(new_page_t get_new_page,
+static int unmap_and_move(new_page_t get_new_page,
                                   free_page_t put_new_page,
                                   unsigned long private, struct page *page,
                                   int force, enum migrate_mode mode,
index cdcad5d..f32a690 100644 (file)
@@ -291,23 +291,6 @@ void *vzalloc_node(unsigned long size, int node)
 EXPORT_SYMBOL(vzalloc_node);
 
 /**
- *     vmalloc_exec  -  allocate virtually contiguous, executable memory
- *     @size:          allocation size
- *
- *     Kernel-internal function to allocate enough pages to cover @size
- *     the page level allocator and map them into contiguous and
- *     executable kernel virtual space.
- *
- *     For tight control over page level allocator and protection flags
- *     use __vmalloc() instead.
- */
-
-void *vmalloc_exec(unsigned long size)
-{
-       return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM);
-}
-
-/**
  * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
  *     @size:          allocation size
  *
index 48eb0f1..e028b87 100644 (file)
@@ -7832,7 +7832,7 @@ void setup_per_zone_wmarks(void)
  * Initialise min_free_kbytes.
  *
  * For small machines we want it small (128k min).  For large machines
- * we want it large (64MB max).  But it is not linear, because network
+ * we want it large (256MB max).  But it is not linear, because network
  * bandwidth does not increase linearly with machine size.  We use
  *
  *     min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
index 5e313fa..2a99df7 100644 (file)
@@ -25,7 +25,7 @@ void rodata_test(void)
        }
 
        /* test 2: write to the variable; this should fault */
-       if (!probe_kernel_write((void *)&rodata_test_data,
+       if (!copy_to_kernel_nofault((void *)&rodata_test_data,
                                (void *)&zero, sizeof(zero))) {
                pr_err("test data was not read only\n");
                return;
index 207c83e..74f7e09 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -348,7 +348,7 @@ static __always_inline int memcg_charge_slab(struct page *page,
                                             gfp_t gfp, int order,
                                             struct kmem_cache *s)
 {
-       unsigned int nr_pages = 1 << order;
+       int nr_pages = 1 << order;
        struct mem_cgroup *memcg;
        struct lruvec *lruvec;
        int ret;
@@ -388,7 +388,7 @@ out:
 static __always_inline void memcg_uncharge_slab(struct page *page, int order,
                                                struct kmem_cache *s)
 {
-       unsigned int nr_pages = 1 << order;
+       int nr_pages = 1 << order;
        struct mem_cgroup *memcg;
        struct lruvec *lruvec;
 
index 9e72ba2..37d48a5 100644 (file)
@@ -1726,7 +1726,7 @@ void kzfree(const void *p)
        if (unlikely(ZERO_OR_NULL_PTR(mem)))
                return;
        ks = ksize(mem);
-       memset(mem, 0, ks);
+       memzero_explicit(mem, ks);
        kfree(mem);
 }
 EXPORT_SYMBOL(kzfree);
index b8f798b..ef30307 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -292,7 +292,7 @@ static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
                return get_freepointer(s, object);
 
        freepointer_addr = (unsigned long)object + s->offset;
-       probe_kernel_read(&p, (void **)freepointer_addr, sizeof(p));
+       copy_from_kernel_nofault(&p, (void **)freepointer_addr, sizeof(p));
        return freelist_ptr(s, p, freepointer_addr);
 }
 
@@ -3766,15 +3766,13 @@ error:
 }
 
 static void list_slab_objects(struct kmem_cache *s, struct page *page,
-                             const char *text, unsigned long *map)
+                             const char *text)
 {
 #ifdef CONFIG_SLUB_DEBUG
        void *addr = page_address(page);
+       unsigned long *map;
        void *p;
 
-       if (!map)
-               return;
-
        slab_err(s, page, text, s->name);
        slab_lock(page);
 
@@ -3786,6 +3784,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
                        print_tracking(s, p);
                }
        }
+       put_map(map);
        slab_unlock(page);
 #endif
 }
@@ -3799,11 +3798,6 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
 {
        LIST_HEAD(discard);
        struct page *page, *h;
-       unsigned long *map = NULL;
-
-#ifdef CONFIG_SLUB_DEBUG
-       map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
-#endif
 
        BUG_ON(irqs_disabled());
        spin_lock_irq(&n->list_lock);
@@ -3813,16 +3807,11 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
                        list_add(&page->slab_list, &discard);
                } else {
                        list_slab_objects(s, page,
-                         "Objects remaining in %s on __kmem_cache_shutdown()",
-                         map);
+                         "Objects remaining in %s on __kmem_cache_shutdown()");
                }
        }
        spin_unlock_irq(&n->list_lock);
 
-#ifdef CONFIG_SLUB_DEBUG
-       bitmap_free(map);
-#endif
-
        list_for_each_entry_safe(page, h, &discard, slab_list)
                discard_slab(s, page);
 }
index dbcab84..a82efc3 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -443,8 +443,7 @@ void mark_page_accessed(struct page *page)
                else
                        __lru_cache_activate_page(page);
                ClearPageReferenced(page);
-               if (page_is_file_lru(page))
-                       workingset_activation(page);
+               workingset_activation(page);
        }
        if (page_is_idle(page))
                clear_page_idle(page);
index e98ff46..05889e8 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/vmalloc.h>
 #include <linux/swap_slots.h>
 #include <linux/huge_mm.h>
-
+#include "internal.h"
 
 /*
  * swapper_space is a fiction, retained to simplify the path through
@@ -429,7 +429,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
        __SetPageSwapBacked(page);
 
        /* May fail (-ENOMEM) if XArray node allocation failed. */
-       if (add_to_swap_cache(page, entry, gfp_mask & GFP_KERNEL)) {
+       if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK)) {
                put_swap_page(page, entry);
                goto fail_unlock;
        }
index 3091c2c..5a2b55c 100644 (file)
@@ -1862,7 +1862,6 @@ EXPORT_SYMBOL(vm_unmap_ram);
  * @pages: an array of pointers to the pages to be mapped
  * @count: number of pages
  * @node: prefer to allocate data structures on this node
- * @prot: memory protection to use. PAGE_KERNEL for regular RAM
  *
  * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
  * faster than vmap so it's good.  But if you mix long-life and short-life
@@ -2696,26 +2695,6 @@ void *vzalloc_node(unsigned long size, int node)
 }
 EXPORT_SYMBOL(vzalloc_node);
 
-/**
- * vmalloc_exec - allocate virtually contiguous, executable memory
- * @size:        allocation size
- *
- * Kernel-internal function to allocate enough pages to cover @size
- * the page level allocator and map them into contiguous and
- * executable kernel virtual space.
- *
- * For tight control over page level allocator and protection flags
- * use __vmalloc() instead.
- *
- * Return: pointer to the allocated memory or %NULL on error
- */
-void *vmalloc_exec(unsigned long size)
-{
-       return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
-                       GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
-                       NUMA_NO_NODE, __builtin_return_address(0));
-}
-
 #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
 #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
 #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
index b6d8432..749d239 100644 (file)
@@ -904,6 +904,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
                __delete_from_swap_cache(page, swap);
                xa_unlock_irqrestore(&mapping->i_pages, flags);
                put_swap_page(page, swap);
+               workingset_eviction(page, target_memcg);
        } else {
                void (*freepage)(struct page *);
                void *shadow = NULL;
@@ -1884,6 +1885,8 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
                                list_add(&page->lru, &pages_to_free);
                } else {
                        nr_moved += nr_pages;
+                       if (PageActive(page))
+                               workingset_age_nonresident(lruvec, nr_pages);
                }
        }
 
index d481ea4..50b7937 100644 (file)
  *
  *             Implementation
  *
- * For each node's file LRU lists, a counter for inactive evictions
- * and activations is maintained (node->inactive_age).
+ * For each node's LRU lists, a counter for inactive evictions and
+ * activations is maintained (node->nonresident_age).
  *
  * On eviction, a snapshot of this counter (along with some bits to
  * identify the node) is stored in the now empty page cache
@@ -213,7 +213,17 @@ static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
        *workingsetp = workingset;
 }
 
-static void advance_inactive_age(struct mem_cgroup *memcg, pg_data_t *pgdat)
+/**
+ * workingset_age_nonresident - age non-resident entries as LRU ages
+ * @memcg: the lruvec that was aged
+ * @nr_pages: the number of pages to count
+ *
+ * As in-memory pages are aged, non-resident pages need to be aged as
+ * well, in order for the refault distances later on to be comparable
+ * to the in-memory dimensions. This function allows reclaim and LRU
+ * operations to drive the non-resident aging along in parallel.
+ */
+void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages)
 {
        /*
         * Reclaiming a cgroup means reclaiming all its children in a
@@ -227,11 +237,8 @@ static void advance_inactive_age(struct mem_cgroup *memcg, pg_data_t *pgdat)
         * the root cgroup's, age as well.
         */
        do {
-               struct lruvec *lruvec;
-
-               lruvec = mem_cgroup_lruvec(memcg, pgdat);
-               atomic_long_inc(&lruvec->inactive_age);
-       } while (memcg && (memcg = parent_mem_cgroup(memcg)));
+               atomic_long_add(nr_pages, &lruvec->nonresident_age);
+       } while ((lruvec = parent_lruvec(lruvec)));
 }
 
 /**
@@ -254,12 +261,11 @@ void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg)
        VM_BUG_ON_PAGE(page_count(page), page);
        VM_BUG_ON_PAGE(!PageLocked(page), page);
 
-       advance_inactive_age(page_memcg(page), pgdat);
-
        lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
+       workingset_age_nonresident(lruvec, hpage_nr_pages(page));
        /* XXX: target_memcg can be NULL, go through lruvec */
        memcgid = mem_cgroup_id(lruvec_memcg(lruvec));
-       eviction = atomic_long_read(&lruvec->inactive_age);
+       eviction = atomic_long_read(&lruvec->nonresident_age);
        return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page));
 }
 
@@ -309,20 +315,20 @@ void workingset_refault(struct page *page, void *shadow)
        if (!mem_cgroup_disabled() && !eviction_memcg)
                goto out;
        eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat);
-       refault = atomic_long_read(&eviction_lruvec->inactive_age);
+       refault = atomic_long_read(&eviction_lruvec->nonresident_age);
 
        /*
         * Calculate the refault distance
         *
         * The unsigned subtraction here gives an accurate distance
-        * across inactive_age overflows in most cases. There is a
+        * across nonresident_age overflows in most cases. There is a
         * special case: usually, shadow entries have a short lifetime
         * and are either refaulted or reclaimed along with the inode
         * before they get too old.  But it is not impossible for the
-        * inactive_age to lap a shadow entry in the field, which can
-        * then result in a false small refault distance, leading to a
-        * false activation should this old entry actually refault
-        * again.  However, earlier kernels used to deactivate
+        * nonresident_age to lap a shadow entry in the field, which
+        * can then result in a false small refault distance, leading
+        * to a false activation should this old entry actually
+        * refault again.  However, earlier kernels used to deactivate
         * unconditionally with *every* reclaim invocation for the
         * longest time, so the occasional inappropriate activation
         * leading to pressure on the active list is not a problem.
@@ -359,7 +365,7 @@ void workingset_refault(struct page *page, void *shadow)
                goto out;
 
        SetPageActive(page);
-       advance_inactive_age(memcg, pgdat);
+       workingset_age_nonresident(lruvec, hpage_nr_pages(page));
        inc_lruvec_state(lruvec, WORKINGSET_ACTIVATE);
 
        /* Page was active prior to eviction */
@@ -382,6 +388,7 @@ out:
 void workingset_activation(struct page *page)
 {
        struct mem_cgroup *memcg;
+       struct lruvec *lruvec;
 
        rcu_read_lock();
        /*
@@ -394,7 +401,8 @@ void workingset_activation(struct page *page)
        memcg = page_memcg_rcu(page);
        if (!mem_cgroup_disabled() && !memcg)
                goto out;
-       advance_inactive_age(memcg, page_pgdat(page));
+       lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
+       workingset_age_nonresident(lruvec, hpage_nr_pages(page));
 out:
        rcu_read_unlock();
 }
index c8d6a07..3dd7c97 100644 (file)
@@ -503,11 +503,10 @@ static void vlan_dev_set_lockdep_one(struct net_device *dev,
        lockdep_set_class(&txq->_xmit_lock, &vlan_netdev_xmit_lock_key);
 }
 
-static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
+static void vlan_dev_set_lockdep_class(struct net_device *dev)
 {
-       lockdep_set_class_and_subclass(&dev->addr_list_lock,
-                                      &vlan_netdev_addr_lock_key,
-                                      subclass);
+       lockdep_set_class(&dev->addr_list_lock,
+                         &vlan_netdev_addr_lock_key);
        netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, NULL);
 }
 
@@ -601,7 +600,7 @@ static int vlan_dev_init(struct net_device *dev)
 
        SET_NETDEV_DEVTYPE(dev, &vlan_type);
 
-       vlan_dev_set_lockdep_class(dev, dev->lower_level);
+       vlan_dev_set_lockdep_class(dev);
 
        vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
        if (!vlan->vlan_pcpu_stats)
index fc1f363..09f1ec5 100644 (file)
@@ -811,7 +811,7 @@ reterr:
  * @uodata: source for zero copy write
  * @inlen: read buffer size
  * @olen: write buffer size
- * @hdrlen: reader header size, This is the size of response protocol data
+ * @in_hdrlen: reader header size, This is the size of response protocol data
  * @fmt: protocol format string (see protocol.c)
  *
  * Returns request structure (which client must free using p9_tag_remove)
index c1b6242..5126566 100644 (file)
@@ -189,3 +189,4 @@ MODULE_AUTHOR("Latchesar Ionkov <lucho@ionkov.net>");
 MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>");
 MODULE_AUTHOR("Ron Minnich <rminnich@lanl.gov>");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Plan 9 Resource Sharing Support (9P2000)");
index b21c3c2..2885ff9 100644 (file)
@@ -94,14 +94,15 @@ struct p9_trans_rdma {
        struct completion cm_done;
 };
 
+struct p9_rdma_req;
+
 /**
- * p9_rdma_context - Keeps track of in-process WR
+ * struct p9_rdma_context - Keeps track of in-process WR
  *
  * @busa: Bus address to unmap when the WR completes
  * @req: Keeps track of requests (send)
  * @rc: Keepts track of replies (receive)
  */
-struct p9_rdma_req;
 struct p9_rdma_context {
        struct ib_cqe cqe;
        dma_addr_t busa;
@@ -112,7 +113,7 @@ struct p9_rdma_context {
 };
 
 /**
- * p9_rdma_opts - Collection of mount options
+ * struct p9_rdma_opts - Collection of mount options
  * @port: port of connection
  * @sq_depth: The requested depth of the SQ. This really doesn't need
  * to be any deeper than the number of threads used in the client
index d167228..3831206 100644 (file)
@@ -455,7 +455,6 @@ config FAILOVER
 config ETHTOOL_NETLINK
        bool "Netlink interface for ethtool"
        default y
-       depends on PHYLIB=y || PHYLIB=n
        help
          An alternative userspace interface for ethtool based on generic
          netlink. It provides better extensibility and some new features,
index 15787e8..1d48708 100644 (file)
@@ -1917,8 +1917,6 @@ static const struct proto_ops atalk_dgram_ops = {
 #endif
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
-       .setsockopt     = sock_no_setsockopt,
-       .getsockopt     = sock_no_getsockopt,
        .sendmsg        = atalk_sendmsg,
        .recvmsg        = atalk_recvmsg,
        .mmap           = sock_no_mmap,
index 8575f5d..84367b8 100644 (file)
@@ -745,7 +745,7 @@ static int check_qos(const struct atm_qos *qos)
 }
 
 int vcc_setsockopt(struct socket *sock, int level, int optname,
-                  char __user *optval, unsigned int optlen)
+                  sockptr_t optval, unsigned int optlen)
 {
        struct atm_vcc *vcc;
        unsigned long value;
@@ -760,7 +760,7 @@ int vcc_setsockopt(struct socket *sock, int level, int optname,
        {
                struct atm_qos qos;
 
-               if (copy_from_user(&qos, optval, sizeof(qos)))
+               if (copy_from_sockptr(&qos, optval, sizeof(qos)))
                        return -EFAULT;
                error = check_qos(&qos);
                if (error)
@@ -774,7 +774,7 @@ int vcc_setsockopt(struct socket *sock, int level, int optname,
                return 0;
        }
        case SO_SETCLP:
-               if (get_user(value, (unsigned long __user *)optval))
+               if (copy_from_sockptr(&value, optval, sizeof(value)))
                        return -EFAULT;
                if (value)
                        vcc->atm_options |= ATM_ATMOPT_CLP;
@@ -782,13 +782,8 @@ int vcc_setsockopt(struct socket *sock, int level, int optname,
                        vcc->atm_options &= ~ATM_ATMOPT_CLP;
                return 0;
        default:
-               if (level == SOL_SOCKET)
-                       return -EINVAL;
-               break;
-       }
-       if (!vcc->dev || !vcc->dev->ops->setsockopt)
                return -EINVAL;
-       return vcc->dev->ops->setsockopt(vcc, level, optname, optval, optlen);
+       }
 }
 
 int vcc_getsockopt(struct socket *sock, int level, int optname,
@@ -826,13 +821,8 @@ int vcc_getsockopt(struct socket *sock, int level, int optname,
                return copy_to_user(optval, &pvc, sizeof(pvc)) ? -EFAULT : 0;
        }
        default:
-               if (level == SOL_SOCKET)
-                       return -EINVAL;
-               break;
-       }
-       if (!vcc->dev || !vcc->dev->ops->getsockopt)
                return -EINVAL;
-       return vcc->dev->ops->getsockopt(vcc, level, optname, optval, len);
+       }
 }
 
 int register_atmdevice_notifier(struct notifier_block *nb)
index 5850649..a1e56e8 100644 (file)
@@ -21,7 +21,7 @@ __poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait);
 int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
 int vcc_setsockopt(struct socket *sock, int level, int optname,
-                  char __user *optval, unsigned int optlen);
+                  sockptr_t optval, unsigned int optlen);
 int vcc_getsockopt(struct socket *sock, int level, int optname,
                   char __user *optval, int __user *optlen);
 void vcc_process_recv_queue(struct atm_vcc *vcc);
index 1205d87..39115fe 100644 (file)
@@ -44,7 +44,7 @@ struct lec_arp_table {
        u8 *tlvs;
        u32 sizeoftlvs;                 /*
                                         * LANE2: Each MAC address can have TLVs
-                                        * associated with it.  sizeoftlvs tells the
+                                        * associated with it.  sizeoftlvs tells
                                         * the length of the tlvs array
                                         */
        struct sk_buff_head tx_wait;    /* wait queue for outgoing packets */
index 02bd2a4..53e7d3f 100644 (file)
@@ -63,7 +63,7 @@ static int pvc_connect(struct socket *sock, struct sockaddr *sockaddr,
 }
 
 static int pvc_setsockopt(struct socket *sock, int level, int optname,
-                         char __user *optval, unsigned int optlen)
+                         sockptr_t optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        int error;
index ba144d0..4a02bca 100644 (file)
@@ -451,7 +451,7 @@ int svc_change_qos(struct atm_vcc *vcc, struct atm_qos *qos)
 }
 
 static int svc_setsockopt(struct socket *sock, int level, int optname,
-                         char __user *optval, unsigned int optlen)
+                         sockptr_t optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct atm_vcc *vcc = ATM_SD(sock);
@@ -464,7 +464,7 @@ static int svc_setsockopt(struct socket *sock, int level, int optname,
                        error = -EINVAL;
                        goto out;
                }
-               if (copy_from_user(&vcc->sap, optval, optlen)) {
+               if (copy_from_sockptr(&vcc->sap, optval, optlen)) {
                        error = -EFAULT;
                        goto out;
                }
@@ -475,7 +475,7 @@ static int svc_setsockopt(struct socket *sock, int level, int optname,
                        error = -EINVAL;
                        goto out;
                }
-               if (get_user(value, (int __user *)optval)) {
+               if (copy_from_sockptr(&value, optval, sizeof(int))) {
                        error = -EFAULT;
                        goto out;
                }
index 97d686d..d3a9843 100644 (file)
@@ -8,7 +8,7 @@ menuconfig HAMRADIO
        bool "Amateur Radio support"
        help
          If you want to connect your Linux box to an amateur radio, answer Y
-         here. You want to read <http://www.tapr.org/>
+         here. You want to read <https://www.tapr.org/>
          and more specifically about AX.25 on Linux
          <http://www.linux-ax25.org/>.
 
@@ -39,11 +39,11 @@ config AX25
          Information about where to get supporting software for Linux amateur
          radio as well as information about how to configure an AX.25 port is
          contained in the AX25-HOWTO, available from
-         <http://www.tldp.org/docs.html#howto>. You might also want to
+         <https://www.tldp.org/docs.html#howto>. You might also want to
          check out the file <file:Documentation/networking/ax25.rst> in the
          kernel source. More information about digital amateur radio in
          general is on the WWW at
-         <http://www.tapr.org/>.
+         <https://www.tapr.org/>.
 
          To compile this driver as a module, choose M here: the
          module will be called ax25.
@@ -90,7 +90,7 @@ config NETROM
          <http://www.linux-ax25.org>. You also might want to check out the
          file <file:Documentation/networking/ax25.rst>. More information about
          digital amateur radio in general is on the WWW at
-         <http://www.tapr.org/>.
+         <https://www.tapr.org/>.
 
          To compile this driver as a module, choose M here: the
          module will be called netrom.
@@ -109,7 +109,7 @@ config ROSE
          <http://www.linux-ax25.org>.  You also might want to check out the
          file <file:Documentation/networking/ax25.rst>. More information about
          digital amateur radio in general is on the WWW at
-         <http://www.tapr.org/>.
+         <https://www.tapr.org/>.
 
          To compile this driver as a module, choose M here: the
          module will be called rose.
index fd91cd3..17bf31a 100644 (file)
@@ -528,7 +528,7 @@ ax25_cb *ax25_create_cb(void)
  */
 
 static int ax25_setsockopt(struct socket *sock, int level, int optname,
-       char __user *optval, unsigned int optlen)
+               sockptr_t optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        ax25_cb *ax25;
@@ -543,7 +543,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
        if (optlen < sizeof(unsigned int))
                return -EINVAL;
 
-       if (get_user(opt, (unsigned int __user *)optval))
+       if (copy_from_sockptr(&opt, optval, sizeof(unsigned int)))
                return -EFAULT;
 
        lock_sock(sk);
@@ -640,7 +640,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
 
                memset(devname, 0, sizeof(devname));
 
-               if (copy_from_user(devname, optval, optlen)) {
+               if (copy_from_sockptr(devname, optval, optlen)) {
                        res = -EFAULT;
                        break;
                }
index e87f19c..a4faf5f 100644 (file)
@@ -134,7 +134,7 @@ static u8 batadv_ring_buffer_avg(const u8 lq_recv[])
  *
  * Return: the originator object corresponding to the passed mac address or NULL
  * on failure.
- * If the object does not exists it is created an initialised.
+ * If the object does not exist, it is created and initialised.
  */
 static struct batadv_orig_node *
 batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr)
@@ -871,7 +871,7 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
 }
 
 /**
- * batadv_iv_orig_ifinfo_sum() - Get bcast_own sum for originator over iterface
+ * batadv_iv_orig_ifinfo_sum() - Get bcast_own sum for originator over interface
  * @orig_node: originator which reproadcasted the OGMs directly
  * @if_outgoing: interface which transmitted the original OGM and received the
  *  direct rebroadcast
@@ -1075,10 +1075,10 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
        struct batadv_neigh_ifinfo *neigh_ifinfo;
        u8 total_count;
        u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
+       unsigned int tq_iface_hop_penalty = BATADV_TQ_MAX_VALUE;
        unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
        unsigned int tq_asym_penalty, inv_asym_penalty;
        unsigned int combined_tq;
-       unsigned int tq_iface_penalty;
        bool ret = false;
 
        /* find corresponding one hop neighbor */
@@ -1157,31 +1157,32 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
        inv_asym_penalty = BATADV_TQ_MAX_VALUE * neigh_rq_inv_cube;
        inv_asym_penalty /= neigh_rq_max_cube;
        tq_asym_penalty = BATADV_TQ_MAX_VALUE - inv_asym_penalty;
+       tq_iface_hop_penalty -= atomic_read(&if_incoming->hop_penalty);
 
        /* penalize if the OGM is forwarded on the same interface. WiFi
         * interfaces and other half duplex devices suffer from throughput
         * drops as they can't send and receive at the same time.
         */
-       tq_iface_penalty = BATADV_TQ_MAX_VALUE;
        if (if_outgoing && if_incoming == if_outgoing &&
            batadv_is_wifi_hardif(if_outgoing))
-               tq_iface_penalty = batadv_hop_penalty(BATADV_TQ_MAX_VALUE,
-                                                     bat_priv);
+               tq_iface_hop_penalty = batadv_hop_penalty(tq_iface_hop_penalty,
+                                                         bat_priv);
 
        combined_tq = batadv_ogm_packet->tq *
                      tq_own *
                      tq_asym_penalty *
-                     tq_iface_penalty;
+                     tq_iface_hop_penalty;
        combined_tq /= BATADV_TQ_MAX_VALUE *
                       BATADV_TQ_MAX_VALUE *
                       BATADV_TQ_MAX_VALUE;
        batadv_ogm_packet->tq = combined_tq;
 
        batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
-                  "bidirectional: orig = %pM neigh = %pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, iface_penalty: %3i, total tq: %3i, if_incoming = %s, if_outgoing = %s\n",
+                  "bidirectional: orig = %pM neigh = %pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, iface_hop_penalty: %3i, total tq: %3i, if_incoming = %s, if_outgoing = %s\n",
                   orig_node->orig, orig_neigh_node->orig, total_count,
-                  neigh_rq_count, tq_own, tq_asym_penalty, tq_iface_penalty,
-                  batadv_ogm_packet->tq, if_incoming->net_dev->name,
+                  neigh_rq_count, tq_own, tq_asym_penalty,
+                  tq_iface_hop_penalty, batadv_ogm_packet->tq,
+                  if_incoming->net_dev->name,
                   if_outgoing ? if_outgoing->net_dev->name : "DEFAULT");
 
        /* if link has the minimum required transmission quality
@@ -1554,7 +1555,7 @@ static void batadv_iv_ogm_process_reply(struct batadv_ogm_packet *ogm_packet,
  * batadv_iv_ogm_process() - process an incoming batman iv OGM
  * @skb: the skb containing the OGM
  * @ogm_offset: offset to the OGM which should be processed (for aggregates)
- * @if_incoming: the interface where this packet was receved
+ * @if_incoming: the interface where this packet was received
  */
 static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
                                  struct batadv_hard_iface *if_incoming)
@@ -2288,7 +2289,7 @@ batadv_iv_ogm_neigh_dump_hardif(struct sk_buff *msg, u32 portid, u32 seq,
  * @msg: Netlink message to dump into
  * @cb: Control block containing additional options
  * @bat_priv: The bat priv with all the soft interface information
- * @single_hardif: Limit dump to this hard interfaace
+ * @single_hardif: Limit dump to this hard interface
  */
 static void
 batadv_iv_ogm_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb,
index 0bdefa3..d35aca0 100644 (file)
@@ -60,7 +60,7 @@ static void batadv_v_elp_start_timer(struct batadv_hard_iface *hard_iface)
  * @neigh: the neighbour for which the throughput has to be obtained
  *
  * Return: The throughput towards the given neighbour in multiples of 100kpbs
- *         (a value of '1' equals to 0.1Mbps, '10' equals 1Mbps, etc).
+ *         (a value of '1' equals 0.1Mbps, '10' equals 1Mbps, etc).
  */
 static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
 {
@@ -183,8 +183,8 @@ void batadv_v_elp_throughput_metric_update(struct work_struct *work)
  *
  * Sends a predefined number of unicast wifi packets to a given neighbour in
  * order to trigger the throughput estimation on this link by the RC algorithm.
- * Packets are sent only if there there is not enough payload unicast traffic
- * towards this neighbour..
+ * Packets are sent only if there is not enough payload unicast traffic towards
+ * this neighbour..
  *
  * Return: True on success and false in case of error during skb preparation.
  */
@@ -244,7 +244,7 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
  * batadv_v_elp_periodic_work() - ELP periodic task per interface
  * @work: work queue item
  *
- * Emits broadcast ELP message in regular intervals.
+ * Emits broadcast ELP messages in regular intervals.
  */
 static void batadv_v_elp_periodic_work(struct work_struct *work)
 {
@@ -499,7 +499,7 @@ orig_free:
  * @skb: the received packet
  * @if_incoming: the interface this packet was received through
  *
- * Return: NET_RX_SUCCESS and consumes the skb if the packet was peoperly
+ * Return: NET_RX_SUCCESS and consumes the skb if the packet was properly
  * processed or NET_RX_DROP in case of failure.
  */
 int batadv_v_elp_packet_recv(struct sk_buff *skb,
index 18028b9..0f8495b 100644 (file)
@@ -47,9 +47,9 @@
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the address of the originator
  *
- * Return: the orig_node corresponding to the specified address. If such object
- * does not exist it is allocated here. In case of allocation failure returns
- * NULL.
+ * Return: the orig_node corresponding to the specified address. If such an
+ * object does not exist, it is allocated here. In case of allocation failure
+ * returns NULL.
  */
 struct batadv_orig_node *batadv_v_ogm_orig_get(struct batadv_priv *bat_priv,
                                               const u8 *addr)
@@ -172,7 +172,7 @@ static bool batadv_v_ogm_queue_left(struct sk_buff *skb,
  * batadv_v_ogm_aggr_list_free - free all elements in an aggregation queue
  * @hard_iface: the interface holding the aggregation queue
  *
- * Empties the OGMv2 aggregation queue and frees all the skbs it contained.
+ * Empties the OGMv2 aggregation queue and frees all the skbs it contains.
  *
  * Caller needs to hold the hard_iface->bat_v.aggr_list.lock.
  */
@@ -378,7 +378,7 @@ static void batadv_v_ogm_send(struct work_struct *work)
  * batadv_v_ogm_aggr_work() - OGM queue periodic task per interface
  * @work: work queue item
  *
- * Emits aggregated OGM message in regular intervals.
+ * Emits aggregated OGM messages in regular intervals.
  */
 void batadv_v_ogm_aggr_work(struct work_struct *work)
 {
@@ -399,7 +399,7 @@ void batadv_v_ogm_aggr_work(struct work_struct *work)
  * batadv_v_ogm_iface_enable() - prepare an interface for B.A.T.M.A.N. V
  * @hard_iface: the interface to prepare
  *
- * Takes care of scheduling own OGM sending routine for this interface.
+ * Takes care of scheduling its own OGM sending routine for this interface.
  *
  * Return: 0 on success or a negative error code otherwise
  */
@@ -455,15 +455,17 @@ unlock:
  * @throughput: the current throughput
  *
  * Apply a penalty on the current throughput metric value based on the
- * characteristic of the interface where the OGM has been received. The return
- * value is computed as follows:
+ * characteristic of the interface where the OGM has been received.
+ *
+ * Initially the per hardif hop penalty is applied to the throughput. After
+ * that the return value is then computed as follows:
  * - throughput * 50%          if the incoming and outgoing interface are the
  *                             same WiFi interface and the throughput is above
  *                             1MBit/s
  * - throughput                if the outgoing interface is the default
  *                             interface (i.e. this OGM is processed for the
  *                             internal table and not forwarded)
- * - throughput * hop penalty  otherwise
+ * - throughput * node hop penalty  otherwise
  *
  * Return: the penalised throughput metric.
  */
@@ -472,9 +474,14 @@ static u32 batadv_v_forward_penalty(struct batadv_priv *bat_priv,
                                    struct batadv_hard_iface *if_outgoing,
                                    u32 throughput)
 {
+       int if_hop_penalty = atomic_read(&if_incoming->hop_penalty);
        int hop_penalty = atomic_read(&bat_priv->hop_penalty);
        int hop_penalty_max = BATADV_TQ_MAX_VALUE;
 
+       /* Apply per hardif hop penalty */
+       throughput = throughput * (hop_penalty_max - if_hop_penalty) /
+                    hop_penalty_max;
+
        /* Don't apply hop penalty in default originator table. */
        if (if_outgoing == BATADV_IF_DEFAULT)
                return throughput;
@@ -847,7 +854,7 @@ batadv_v_ogm_aggr_packet(int buff_pos, int packet_len,
  * batadv_v_ogm_process() - process an incoming batman v OGM
  * @skb: the skb containing the OGM
  * @ogm_offset: offset to the OGM which should be processed (for aggregates)
- * @if_incoming: the interface where this packet was receved
+ * @if_incoming: the interface where this packet was received
  */
 static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
                                 struct batadv_hard_iface *if_incoming)
index 41cc87f..91a04ca 100644 (file)
@@ -992,7 +992,7 @@ static bool batadv_handle_claim(struct batadv_priv *bat_priv,
  * @hw_dst: the Hardware destination in the ARP Header
  * @ethhdr: pointer to the Ethernet header of the claim frame
  *
- * checks if it is a claim packet and if its on the same group.
+ * checks if it is a claim packet and if it's on the same group.
  * This function also applies the group ID of the sender
  * if it is in the same mesh.
  *
@@ -1757,7 +1757,7 @@ void batadv_bla_free(struct batadv_priv *bat_priv)
  * @vid: the VLAN ID of the frame
  *
  * Checks if this packet is a loop detect frame which has been sent by us,
- * throw an uevent and log the event if that is the case.
+ * throws an uevent and logs the event if that is the case.
  *
  * Return: true if it is a loop detect frame which is to be dropped, false
  * otherwise.
@@ -1815,7 +1815,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
  *  * we have to race for a claim
  *  * if the frame is allowed on the LAN
  *
- * in these cases, the skb is further handled by this function
+ * In these cases, the skb is further handled by this function
  *
  * Return: true if handled, otherwise it returns false and the caller shall
  * further process the skb.
index b85da4b..0e6e53e 100644 (file)
@@ -666,7 +666,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
  * @vid: VLAN identifier
  * @packet_subtype: unicast4addr packet subtype to use
  *
- * This function copies the skb with pskb_copy() and is sent as unicast packet
+ * This function copies the skb with pskb_copy() and is sent as unicast packet
  * to each of the selected candidates.
  *
  * Return: true if the packet is sent to at least one candidate, false
index 7cad976..9fdbe30 100644 (file)
@@ -102,8 +102,8 @@ static int batadv_frag_size_limit(void)
  *
  * Caller must hold chain->lock.
  *
- * Return: true if chain is empty and caller can just insert the new fragment
- * without searching for the right position.
+ * Return: true if chain is empty and the caller can just insert the new
+ * fragment without searching for the right position.
  */
 static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
                                   u16 seqno)
@@ -306,7 +306,7 @@ free:
  * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb
  * to NULL; 3) Error: Return false and free skb.
  *
- * Return: true when packet is merged or buffered, false when skb is not not
+ * Return: true when the packet is merged or buffered, false when skb is not not
  * used.
  */
 bool batadv_frag_skb_buffer(struct sk_buff **skb,
index 3a256af..fa06b51 100644 (file)
@@ -138,10 +138,10 @@ static bool batadv_mutual_parents(const struct net_device *dev1,
  * @net_dev: the device to check
  *
  * If the user creates any virtual device on top of a batman-adv interface, it
- * is important to prevent this new interface to be used to create a new mesh
- * network (this behaviour would lead to a batman-over-batman configuration).
- * This function recursively checks all the fathers of the device passed as
- * argument looking for a batman-adv soft interface.
+ * is important to prevent this new interface from being used to create a new
+ * mesh network (this behaviour would lead to a batman-over-batman
+ * configuration). This function recursively checks all the fathers of the
+ * device passed as argument looking for a batman-adv soft interface.
  *
  * Return: true if the device is descendant of a batman-adv mesh interface (or
  * if it is a batman-adv interface itself), false otherwise
@@ -680,8 +680,8 @@ batadv_hardif_deactivate_interface(struct batadv_hard_iface *hard_iface)
  * @slave: the interface enslaved in another master
  * @master: the master from which slave has to be removed
  *
- * Invoke ndo_del_slave on master passing slave as argument. In this way slave
- * is free'd and master can correctly change its internal state.
+ * Invoke ndo_del_slave on master passing slave as argument. In this way the
+ * slave is free'd and the master can correctly change its internal state.
  *
  * Return: 0 on success, a negative value representing the error otherwise
  */
@@ -818,7 +818,7 @@ err:
  * @soft_iface: soft interface to check
  *
  * This function is only using RCU for locking - the result can therefore be
- * off when another functions is modifying the list at the same time. The
+ * off when another function is modifying the list at the same time. The
  * caller can use the rtnl_lock to make sure that the count is accurate.
  *
  * Return: number of connected/enslaved hard interfaces
@@ -939,6 +939,8 @@ batadv_hardif_add_interface(struct net_device *net_dev)
        if (batadv_is_wifi_hardif(hard_iface))
                hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS;
 
+       atomic_set(&hard_iface->hop_penalty, 0);
+
        batadv_v_hardif_init(hard_iface);
 
        batadv_check_known_mac_addr(hard_iface->net_dev);
index f9884dc..979864c 100644 (file)
@@ -69,7 +69,7 @@ int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
 __printf(2, 3);
 
 /**
- * _batadv_dbg() - Store debug output with(out) ratelimiting
+ * _batadv_dbg() - Store debug output with(out) rate limiting
  * @type: type of debug message
  * @bat_priv: the bat priv with all the soft interface information
  * @ratelimited: whether output should be rate limited
@@ -95,7 +95,7 @@ static inline void _batadv_dbg(int type __always_unused,
 #endif
 
 /**
- * batadv_dbg() - Store debug output without ratelimiting
+ * batadv_dbg() - Store debug output without rate limiting
  * @type: type of debug message
  * @bat_priv: the bat priv with all the soft interface information
  * @arg: format string and variable arguments
@@ -104,7 +104,7 @@ static inline void _batadv_dbg(int type __always_unused,
        _batadv_dbg(type, bat_priv, 0, ## arg)
 
 /**
- * batadv_dbg_ratelimited() - Store debug output with ratelimiting
+ * batadv_dbg_ratelimited() - Store debug output with rate limiting
  * @type: type of debug message
  * @bat_priv: the bat priv with all the soft interface information
  * @arg: format string and variable arguments
index d8a255c..519c08c 100644 (file)
@@ -666,7 +666,7 @@ unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
  * @vid: the VLAN identifier for which the AP isolation attributed as to be
  *  looked up
  *
- * Return: true if AP isolation is on for the VLAN idenfied by vid, false
+ * Return: true if AP isolation is on for the VLAN identified by vid, false
  * otherwise
  */
 bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid)
index 61d8dbe..0393bb9 100644 (file)
@@ -13,7 +13,7 @@
 #define BATADV_DRIVER_DEVICE "batman-adv"
 
 #ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2020.2"
+#define BATADV_SOURCE_VERSION "2020.3"
 #endif
 
 /* B.A.T.M.A.N. parameters */
@@ -308,7 +308,7 @@ static inline bool batadv_has_timed_out(unsigned long timestamp,
  * @y: value to compare @x against
  *
  * It handles overflows/underflows and can correctly check for a predecessor
- * unless the variable sequence number has grown by more then
+ * unless the variable sequence number has grown by more than
  * 2**(bitwidth(x)-1)-1.
  *
  * This means that for a u8 with the maximum value 255, it would think:
@@ -330,11 +330,11 @@ static inline bool batadv_has_timed_out(unsigned long timestamp,
 
 /**
  * batadv_seq_after() - Checks if a sequence number x is a successor of y
- * @x: potential sucessor of @y
+ * @x: potential successor of @y
  * @y: value to compare @x against
  *
  * It handles overflows/underflows and can correctly check for a successor
- * unless the variable sequence number has grown by more then
+ * unless the variable sequence number has grown by more than
  * 2**(bitwidth(x)-1)-1.
  *
  * This means that for a u8 with the maximum value 255, it would think:
index 9ebdc1e..bdc4a1f 100644 (file)
@@ -510,7 +510,7 @@ batadv_mcast_mla_softif_get_ipv6(struct net_device *dev,
  * the given mcast_list. In general, multicast listeners provided by
  * your multicast receiving applications run directly on this node.
  *
- * If there is a bridge interface on top of dev, collects from that one
+ * If there is a bridge interface on top of dev, collect from that one
  * instead. Just like with IP addresses and routes, multicast listeners
  * will(/should) register to the bridge interface instead of an
  * enslaved bat0.
@@ -832,8 +832,8 @@ batadv_mcast_bridge_log(struct batadv_priv *bat_priv,
  * @bat_priv: the bat priv with all the soft interface information
  * @flags: TVLV flags indicating the new multicast state
  *
- * Whenever the multicast TVLV flags this nodes announces change this notifies
- * userspace via the 'mcast' log level.
+ * Whenever the multicast TVLV flags this node announces change, this function
+ * should be used to notify userspace about the change.
  */
 static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags)
 {
@@ -1244,7 +1244,7 @@ batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv)
  * @ethhdr: an ethernet header to determine the protocol family from
  *
  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 or
- * BATADV_MCAST_WANT_ALL_IPV6 flag, depending on the provided ethhdr, set and
+ * BATADV_MCAST_WANT_ALL_IPV6 flag, depending on the provided ethhdr, sets and
  * increases its refcount.
  */
 static struct batadv_orig_node *
@@ -1693,7 +1693,7 @@ batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_mcast_forw_send() - send packet to any detected multicast recpient
+ * batadv_mcast_forw_send() - send packet to any detected multicast recipient
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the multicast packet to transmit
  * @vid: the vlan identifier
@@ -1742,7 +1742,8 @@ int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
  * @mcast_flags: flags indicating the new multicast state
  *
  * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator,
- * orig, has toggled then this method updates counter and list accordingly.
+ * orig, has toggled then this method updates the counter and the list
+ * accordingly.
  *
  * Caller needs to hold orig->mcast_handler_lock.
  */
@@ -1787,7 +1788,7 @@ static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
  * @mcast_flags: flags indicating the new multicast state
  *
  * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has
- * toggled then this method updates counter and list accordingly.
+ * toggled then this method updates the counter and the list accordingly.
  *
  * Caller needs to hold orig->mcast_handler_lock.
  */
@@ -1832,7 +1833,7 @@ static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
  * @mcast_flags: flags indicating the new multicast state
  *
  * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has
- * toggled then this method updates counter and list accordingly.
+ * toggled then this method updates the counter and the list accordingly.
  *
  * Caller needs to hold orig->mcast_handler_lock.
  */
@@ -1877,7 +1878,7 @@ static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
  * @mcast_flags: flags indicating the new multicast state
  *
  * If the BATADV_MCAST_WANT_NO_RTR4 flag of this originator, orig, has
- * toggled then this method updates counter and list accordingly.
+ * toggled then this method updates the counter and the list accordingly.
  *
  * Caller needs to hold orig->mcast_handler_lock.
  */
@@ -1922,7 +1923,7 @@ static void batadv_mcast_want_rtr4_update(struct batadv_priv *bat_priv,
  * @mcast_flags: flags indicating the new multicast state
  *
  * If the BATADV_MCAST_WANT_NO_RTR6 flag of this originator, orig, has
- * toggled then this method updates counter and list accordingly.
+ * toggled then this method updates the counter and the list accordingly.
  *
  * Caller needs to hold orig->mcast_handler_lock.
  */
index 02ed073..dc19361 100644 (file)
@@ -640,7 +640,7 @@ batadv_netlink_tp_meter_put(struct sk_buff *msg, u32 cookie)
  * @bat_priv: the bat priv with all the soft interface information
  * @dst: destination of tp_meter session
  * @result: reason for tp meter session stop
- * @test_time: total time ot the tp_meter session
+ * @test_time: total time of the tp_meter session
  * @total_bytes: bytes acked to the receiver
  * @cookie: cookie of tp_meter session
  *
@@ -826,6 +826,10 @@ static int batadv_netlink_hardif_fill(struct sk_buff *msg,
                        goto nla_put_failure;
        }
 
+       if (nla_put_u8(msg, BATADV_ATTR_HOP_PENALTY,
+                      atomic_read(&hard_iface->hop_penalty)))
+               goto nla_put_failure;
+
 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
        if (nla_put_u32(msg, BATADV_ATTR_ELP_INTERVAL,
                        atomic_read(&hard_iface->bat_v.elp_interval)))
@@ -920,9 +924,15 @@ static int batadv_netlink_set_hardif(struct sk_buff *skb,
 {
        struct batadv_hard_iface *hard_iface = info->user_ptr[1];
        struct batadv_priv *bat_priv = info->user_ptr[0];
+       struct nlattr *attr;
+
+       if (info->attrs[BATADV_ATTR_HOP_PENALTY]) {
+               attr = info->attrs[BATADV_ATTR_HOP_PENALTY];
+
+               atomic_set(&hard_iface->hop_penalty, nla_get_u8(attr));
+       }
 
 #ifdef CONFIG_BATMAN_ADV_BATMAN_V
-       struct nlattr *attr;
 
        if (info->attrs[BATADV_ATTR_ELP_INTERVAL]) {
                attr = info->attrs[BATADV_ATTR_ELP_INTERVAL];
index b0469d1..48d7078 100644 (file)
@@ -134,7 +134,7 @@ static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_mesh_init() - initialise coding hash table and start house keeping
+ * batadv_nc_mesh_init() - initialise coding hash table and start housekeeping
  * @bat_priv: the bat priv with all the soft interface information
  *
  * Return: 0 on success or negative error number in case of failure
@@ -700,7 +700,7 @@ batadv_nc_process_nc_paths(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_worker() - periodic task for house keeping related to network
+ * batadv_nc_worker() - periodic task for housekeeping related to network
  *  coding
  * @work: kernel work struct
  */
@@ -1316,7 +1316,7 @@ batadv_nc_path_search(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_nc_skb_src_search() - Loops through the list of neighoring nodes of
+ * batadv_nc_skb_src_search() - Loops through the list of neighboring nodes of
  *  the skb's sender (may be equal to the originator).
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: data skb to forward
@@ -1402,10 +1402,10 @@ static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv,
  * @neigh_node: next hop to forward packet to
  * @ethhdr: pointer to the ethernet header inside the skb
  *
- * Loops through list of neighboring nodes the next hop has a good connection to
- * (receives OGMs with a sufficient quality). We need to find a neighbor of our
- * next hop that potentially sent a packet which our next hop also received
- * (overheard) and has stored for later decoding.
+ * Loops through the list of neighboring nodes the next hop has a good
+ * connection to (receives OGMs with a sufficient quality). We need to find a
+ * neighbor of our next hop that potentially sent a packet which our next hop
+ * also received (overheard) and has stored for later decoding.
  *
  * Return: true if the skb was consumed (encoded packet sent) or false otherwise
  */
index 5b0c2ff..805d896 100644 (file)
@@ -325,7 +325,7 @@ void batadv_neigh_node_put(struct batadv_neigh_node *neigh_node)
  * @if_outgoing: the interface where the payload packet has been received or
  *  the OGM should be sent to
  *
- * Return: the neighbor which should be router for this orig_node/iface.
+ * Return: the neighbor which should be the router for this orig_node/iface.
  *
  * The object is returned with refcounter increased by 1.
  */
@@ -515,7 +515,7 @@ out:
  * Looks for and possibly returns a neighbour belonging to this originator list
  * which is connected through the provided hard interface.
  *
- * Return: neighbor when found. Othwerwise NULL
+ * Return: neighbor when found. Otherwise NULL
  */
 static struct batadv_neigh_node *
 batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
@@ -620,7 +620,7 @@ batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
  *
  * Looks for and possibly returns a neighbour belonging to this hard interface.
  *
- * Return: neighbor when found. Othwerwise NULL
+ * Return: neighbor when found. Otherwise NULL
  */
 struct batadv_hardif_neigh_node *
 batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
@@ -999,7 +999,7 @@ void batadv_originator_free(struct batadv_priv *bat_priv)
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the originator
  *
- * Creates a new originator object and initialise all the generic fields.
+ * Creates a new originator object and initialises all the generic fields.
  * The new object is not added to the originator list.
  *
  * Return: the newly created object or NULL on failure.
index d343382..27cdf5e 100644 (file)
@@ -449,7 +449,7 @@ free_skb:
  * @skb: packet to check
  * @hdr_size: size of header to pull
  *
- * Check for short header and bad addresses in given packet.
+ * Checks for short header and bad addresses in the given packet.
  *
  * Return: negative value when check fails and 0 otherwise. The negative value
  * depends on the reason: -ENODATA for bad header, -EBADR for broadcast
@@ -1113,7 +1113,7 @@ free_skb:
  * @recv_if: interface that the skb is received on
  *
  * This function does one of the three following things: 1) Forward fragment, if
- * the assembled packet will exceed our MTU; 2) Buffer fragment, if we till
+ * the assembled packet will exceed our MTU; 2) Buffer fragment, if we still
  * lack further fragments; 3) Merge fragments, if we have all needed parts.
  *
  * Return: NET_RX_DROP if the skb is not consumed, NET_RX_SUCCESS otherwise.
index 7f8ade0..d267b94 100644 (file)
@@ -605,8 +605,8 @@ bool batadv_forw_packet_steal(struct batadv_forw_packet *forw_packet,
  * given hard_iface. If hard_iface is NULL forwarding packets on all hard
  * interfaces will be claimed.
  *
- * The packets are being moved from the forw_list to the cleanup_list and
- * by that allows already running threads to notice the claiming.
+ * The packets are being moved from the forw_list to the cleanup_list. This
+ * makes it possible for already running threads to notice the claim.
  */
 static void
 batadv_forw_packet_list_steal(struct hlist_head *forw_list,
index f1f1c86..23833a0 100644 (file)
@@ -406,7 +406,7 @@ end:
  * @hdr_size: size of already parsed batman-adv header
  * @orig_node: originator from which the batman-adv packet was sent
  *
- * Sends a ethernet frame to the receive path of the local @soft_iface.
+ * Sends an ethernet frame to the receive path of the local @soft_iface.
  * skb->data has still point to the batman-adv header with the size @hdr_size.
  * The caller has to have parsed this header already and made sure that at least
  * @hdr_size bytes are still available for pull in @skb.
index bd2ac57..db7e377 100644 (file)
@@ -66,7 +66,7 @@
 
 /**
  * BATADV_TP_MAX_RTO - Maximum sender timeout. If the sender RTO gets beyond
- * such amound of milliseconds, the receiver is considered unreachable and the
+ * such amount of milliseconds, the receiver is considered unreachable and the
  * connection is killed
  */
 #define BATADV_TP_MAX_RTO 30000
@@ -108,10 +108,10 @@ static u32 batadv_tp_session_cookie(const u8 session[2], u8 icmp_uid)
  * batadv_tp_cwnd() - compute the new cwnd size
  * @base: base cwnd size value
  * @increment: the value to add to base to get the new size
- * @min: minumim cwnd value (usually MSS)
+ * @min: minimum cwnd value (usually MSS)
  *
- * Return the new cwnd size and ensures it does not exceed the Advertised
- * Receiver Window size. It is wrap around safe.
+ * Return the new cwnd size and ensure it does not exceed the Advertised
+ * Receiver Window size. It is wrapped around safely.
  * For details refer to Section 3.1 of RFC5681
  *
  * Return: new congestion window size in bytes
@@ -254,7 +254,7 @@ static void batadv_tp_batctl_error_notify(enum batadv_tp_meter_reason reason,
  * @dst: the other endpoint MAC address to look for
  *
  * Look for a tp_vars object matching dst as end_point and return it after
- * having incremented the refcounter. Return NULL is not found
+ * having increment the refcounter. Return NULL is not found
  *
  * Return: matching tp_vars or NULL when no tp_vars with @dst was found
  */
@@ -291,7 +291,7 @@ static struct batadv_tp_vars *batadv_tp_list_find(struct batadv_priv *bat_priv,
  * @session: session identifier
  *
  * Look for a tp_vars object matching dst as end_point, session as tp meter
- * session and return it after having incremented the refcounter. Return NULL
+ * session and return it after having increment the refcounter. Return NULL
  * is not found
  *
  * Return: matching tp_vars or NULL when no tp_vars was found
index a9635c8..98a0aaa 100644 (file)
@@ -301,7 +301,7 @@ void batadv_tt_global_entry_put(struct batadv_tt_global_entry *tt_global_entry)
  * @vid: VLAN identifier
  *
  * Return: the number of originators advertising the given address/data
- * (excluding ourself).
+ * (excluding our self).
  */
 int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
                                const u8 *addr, unsigned short vid)
@@ -842,7 +842,7 @@ out:
  *  table. In case of success the value is updated with the real amount of
  *  reserved bytes
  * Allocate the needed amount of memory for the entire TT TVLV and write its
- * header made up by one tvlv_tt_data object and a series of tvlv_tt_vlan_data
+ * header made up of one tvlv_tt_data object and a series of tvlv_tt_vlan_data
  * objects, one per active VLAN served by the originator node.
  *
  * Return: the size of the allocated buffer or 0 in case of failure.
@@ -1674,7 +1674,7 @@ out:
  * the function argument.
  * If a TT local entry exists for this non-mesh client remove it.
  *
- * The caller must hold orig_node refcount.
+ * The caller must hold the orig_node refcount.
  *
  * Return: true if the new entry has been added, false otherwise
  */
@@ -1839,7 +1839,7 @@ out:
  * @bat_priv: the bat priv with all the soft interface information
  * @tt_global_entry: global translation table entry to be analyzed
  *
- * This functon assumes the caller holds rcu_read_lock().
+ * This function assumes the caller holds rcu_read_lock().
  * Return: best originator list entry or NULL on errors.
  */
 static struct batadv_tt_orig_list_entry *
@@ -1887,7 +1887,7 @@ batadv_transtable_best_orig(struct batadv_priv *bat_priv,
  * @tt_global_entry: global translation table entry to be printed
  * @seq: debugfs table seq_file struct
  *
- * This functon assumes the caller holds rcu_read_lock().
+ * This function assumes the caller holds rcu_read_lock().
  */
 static void
 batadv_tt_global_print_entry(struct batadv_priv *bat_priv,
index 0963a43..6a23a56 100644 (file)
@@ -353,8 +353,8 @@ end:
  * @tvlv_value: tvlv content
  * @tvlv_value_len: tvlv content length
  *
- * Return: success if handler was not found or the return value of the handler
- * callback.
+ * Return: success if the handler was not found or the return value of the
+ * handler callback.
  */
 static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
                                    struct batadv_tvlv_handler *tvlv_handler,
index d152b8e..ed519ef 100644 (file)
@@ -208,6 +208,12 @@ struct batadv_hard_iface {
        /** @rcu: struct used for freeing in an RCU-safe manner */
        struct rcu_head rcu;
 
+       /**
+        * @hop_penalty: penalty which will be applied to the tq-field
+        * of an OGM received via this interface
+        */
+       atomic_t hop_penalty;
+
        /** @bat_iv: per hard-interface B.A.T.M.A.N. IV data */
        struct batadv_hard_iface_bat_iv bat_iv;
 
@@ -455,8 +461,8 @@ struct batadv_orig_node {
        spinlock_t tt_buff_lock;
 
        /**
-        * @tt_lock: prevents from updating the table while reading it. Table
-        *  update is made up by two operations (data structure update and
+        * @tt_lock: avoids concurrent read from and write to the table. Table
+        *  update is made up of two operations (data structure update and
         *  metadata -CRC/TTVN-recalculation) and they have to be executed
         *  atomically in order to avoid another thread to read the
         *  table/metadata between those.
@@ -748,7 +754,7 @@ struct batadv_neigh_ifinfo {
  * struct batadv_bcast_duplist_entry - structure for LAN broadcast suppression
  */
 struct batadv_bcast_duplist_entry {
-       /** @orig: mac address of orig node orginating the broadcast */
+       /** @orig: mac address of orig node originating the broadcast */
        u8 orig[ETH_ALEN];
 
        /** @crc: crc32 checksum of broadcast payload */
@@ -1010,7 +1016,7 @@ struct batadv_priv_tt {
 
        /**
         * @commit_lock: prevents from executing a local TT commit while reading
-        *  the local table. The local TT commit is made up by two operations
+        *  the local table. The local TT commit is made up of two operations
         *  (data structure update and metadata -CRC/TTVN- recalculation) and
         *  they have to be executed atomically in order to avoid another thread
         *  to read the table/metadata between those.
@@ -1024,7 +1030,7 @@ struct batadv_priv_tt {
 #ifdef CONFIG_BATMAN_ADV_BLA
 
 /**
- * struct batadv_priv_bla - per mesh interface bridge loope avoidance data
+ * struct batadv_priv_bla - per mesh interface bridge loop avoidance data
  */
 struct batadv_priv_bla {
        /** @num_requests: number of bla requests in flight */
@@ -1718,7 +1724,7 @@ struct batadv_priv {
        spinlock_t softif_vlan_list_lock;
 
 #ifdef CONFIG_BATMAN_ADV_BLA
-       /** @bla: bridge loope avoidance data */
+       /** @bla: bridge loop avoidance data */
        struct batadv_priv_bla bla;
 #endif
 
index cfd83c5..d515571 100644 (file)
@@ -182,8 +182,6 @@ static const struct proto_ops bnep_sock_ops = {
        .recvmsg        = sock_no_recvmsg,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
-       .setsockopt     = sock_no_setsockopt,
-       .getsockopt     = sock_no_getsockopt,
        .connect        = sock_no_connect,
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
index defdd48..96d49d9 100644 (file)
@@ -185,8 +185,6 @@ static const struct proto_ops cmtp_sock_ops = {
        .recvmsg        = sock_no_recvmsg,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
-       .setsockopt     = sock_no_setsockopt,
-       .getsockopt     = sock_no_getsockopt,
        .connect        = sock_no_connect,
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
index caf38a8..d5eff27 100644 (file)
@@ -1842,7 +1842,7 @@ drop:
 }
 
 static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
-                              char __user *optval, unsigned int len)
+                              sockptr_t optval, unsigned int len)
 {
        struct hci_ufilter uf = { .opcode = 0 };
        struct sock *sk = sock->sk;
@@ -1862,7 +1862,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
 
        switch (optname) {
        case HCI_DATA_DIR:
-               if (get_user(opt, (int __user *)optval)) {
+               if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
                        err = -EFAULT;
                        break;
                }
@@ -1874,7 +1874,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
                break;
 
        case HCI_TIME_STAMP:
-               if (get_user(opt, (int __user *)optval)) {
+               if (copy_from_sockptr(&opt, optval, sizeof(opt))) {
                        err = -EFAULT;
                        break;
                }
@@ -1896,7 +1896,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
                }
 
                len = min_t(unsigned int, len, sizeof(uf));
-               if (copy_from_user(&uf, optval, len)) {
+               if (copy_from_sockptr(&uf, optval, len)) {
                        err = -EFAULT;
                        break;
                }
index 03be6a4..595fb3c 100644 (file)
@@ -233,8 +233,6 @@ static const struct proto_ops hidp_sock_ops = {
        .recvmsg        = sock_no_recvmsg,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
-       .setsockopt     = sock_no_setsockopt,
-       .getsockopt     = sock_no_getsockopt,
        .connect        = sock_no_connect,
        .socketpair     = sock_no_socketpair,
        .accept         = sock_no_accept,
index a995d2c..a3d1041 100644 (file)
@@ -703,7 +703,7 @@ static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu)
 }
 
 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
-                                    char __user *optval, unsigned int optlen)
+                                    sockptr_t optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct l2cap_chan *chan = l2cap_pi(sk)->chan;
@@ -736,7 +736,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
                opts.txwin_size = chan->tx_win;
 
                len = min_t(unsigned int, sizeof(opts), optlen);
-               if (copy_from_user((char *) &opts, optval, len)) {
+               if (copy_from_sockptr(&opts, optval, len)) {
                        err = -EFAULT;
                        break;
                }
@@ -782,7 +782,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
                break;
 
        case L2CAP_LM:
-               if (get_user(opt, (u32 __user *) optval)) {
+               if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
                        err = -EFAULT;
                        break;
                }
@@ -859,7 +859,7 @@ static int l2cap_set_mode(struct l2cap_chan *chan, u8 mode)
 }
 
 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
-                                char __user *optval, unsigned int optlen)
+                                sockptr_t optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct l2cap_chan *chan = l2cap_pi(sk)->chan;
@@ -891,7 +891,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
                sec.level = BT_SECURITY_LOW;
 
                len = min_t(unsigned int, sizeof(sec), optlen);
-               if (copy_from_user((char *) &sec, optval, len)) {
+               if (copy_from_sockptr(&sec, optval, len)) {
                        err = -EFAULT;
                        break;
                }
@@ -939,7 +939,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
                        break;
                }
 
-               if (get_user(opt, (u32 __user *) optval)) {
+               if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
                        err = -EFAULT;
                        break;
                }
@@ -954,7 +954,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
                break;
 
        case BT_FLUSHABLE:
-               if (get_user(opt, (u32 __user *) optval)) {
+               if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
                        err = -EFAULT;
                        break;
                }
@@ -990,7 +990,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
                pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
 
                len = min_t(unsigned int, sizeof(pwr), optlen);
-               if (copy_from_user((char *) &pwr, optval, len)) {
+               if (copy_from_sockptr(&pwr, optval, len)) {
                        err = -EFAULT;
                        break;
                }
@@ -1002,7 +1002,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
                break;
 
        case BT_CHANNEL_POLICY:
-               if (get_user(opt, (u32 __user *) optval)) {
+               if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
                        err = -EFAULT;
                        break;
                }
@@ -1050,7 +1050,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
                        break;
                }
 
-               if (get_user(opt, (u16 __user *) optval)) {
+               if (copy_from_sockptr(&opt, optval, sizeof(u16))) {
                        err = -EFAULT;
                        break;
                }
@@ -1081,7 +1081,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
                        break;
                }
 
-               if (get_user(opt, (u8 __user *) optval)) {
+               if (copy_from_sockptr(&opt, optval, sizeof(u8))) {
                        err = -EFAULT;
                        break;
                }
index df14eeb..dba4ea0 100644 (file)
@@ -644,7 +644,8 @@ static int rfcomm_sock_recvmsg(struct socket *sock, struct msghdr *msg,
        return len;
 }
 
-static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
+static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname,
+               sockptr_t optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        int err = 0;
@@ -656,7 +657,7 @@ static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __u
 
        switch (optname) {
        case RFCOMM_LM:
-               if (get_user(opt, (u32 __user *) optval)) {
+               if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
                        err = -EFAULT;
                        break;
                }
@@ -685,7 +686,8 @@ static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __u
        return err;
 }
 
-static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
+static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname,
+               sockptr_t optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct bt_security sec;
@@ -713,7 +715,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
                sec.level = BT_SECURITY_LOW;
 
                len = min_t(unsigned int, sizeof(sec), optlen);
-               if (copy_from_user((char *) &sec, optval, len)) {
+               if (copy_from_sockptr(&sec, optval, len)) {
                        err = -EFAULT;
                        break;
                }
@@ -732,7 +734,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
                        break;
                }
 
-               if (get_user(opt, (u32 __user *) optval)) {
+               if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
                        err = -EFAULT;
                        break;
                }
index c8c3d38..37260ba 100644 (file)
@@ -791,7 +791,7 @@ static int sco_sock_recvmsg(struct socket *sock, struct msghdr *msg,
 }
 
 static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
-                              char __user *optval, unsigned int optlen)
+                              sockptr_t optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        int len, err = 0;
@@ -810,7 +810,7 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
                        break;
                }
 
-               if (get_user(opt, (u32 __user *) optval)) {
+               if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
                        err = -EFAULT;
                        break;
                }
@@ -831,7 +831,7 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname,
                voice.setting = sco_pi(sk)->setting;
 
                len = min_t(unsigned int, sizeof(voice), optlen);
-               if (copy_from_user((char *)&voice, optval, len)) {
+               if (copy_from_sockptr(&voice, optval, len)) {
                        err = -EFAULT;
                        break;
                }
index bfd4ccd..b03c469 100644 (file)
@@ -147,6 +147,20 @@ int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
        return a + (long)b + c + d + (long)e + f;
 }
 
+struct bpf_fentry_test_t {
+       struct bpf_fentry_test_t *a;
+};
+
+int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
+{
+       return (long)arg;
+}
+
+int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
+{
+       return (long)arg->a;
+}
+
 int noinline bpf_modify_return_test(int a, int *b)
 {
        *b += 1;
@@ -185,6 +199,7 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog,
                              const union bpf_attr *kattr,
                              union bpf_attr __user *uattr)
 {
+       struct bpf_fentry_test_t arg = {};
        u16 side_effect = 0, ret = 0;
        int b = 2, err = -EFAULT;
        u32 retval = 0;
@@ -197,7 +212,9 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog,
                    bpf_fentry_test3(4, 5, 6) != 15 ||
                    bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
                    bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
-                   bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111)
+                   bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
+                   bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
+                   bpf_fentry_test8(&arg) != 0)
                        goto out;
                break;
        case BPF_MODIFY_RETURN:
index 84015ef..73d0b12 100644 (file)
@@ -9,12 +9,14 @@ menuconfig BPFILTER
 if BPFILTER
 config BPFILTER_UMH
        tristate "bpfilter kernel module with user mode helper"
-       depends on CC_CAN_LINK_STATIC
+       depends on CC_CAN_LINK
+       depends on m || CC_CAN_LINK_STATIC
        default m
        help
          This builds bpfilter kernel module with embedded user mode helper
 
-         Note: your toolchain must support building static binaries, since
-         rootfs isn't mounted at the time when __init functions are called
-         and do_execv won't be able to find the elf interpreter.
+         Note: To compile this as built-in, your toolchain must support
+         building static binaries, since rootfs isn't mounted at the time
+         when __init functions are called and do_execv won't be able to find
+         the elf interpreter.
 endif
index f23b532..cdac82b 100644 (file)
@@ -7,10 +7,12 @@ userprogs := bpfilter_umh
 bpfilter_umh-objs := main.o
 userccflags += -I $(srctree)/tools/include/ -I $(srctree)/tools/include/uapi
 
+ifeq ($(CONFIG_BPFILTER_UMH), y)
 # builtin bpfilter_umh should be linked with -static
 # since rootfs isn't mounted at the time of __init
 # function is called and do_execv won't find elf interpreter
 userldflags += -static
+endif
 
 $(obj)/bpfilter_umh_blob.o: $(obj)/bpfilter_umh
 
index c0f0990..f580c33 100644 (file)
@@ -15,15 +15,13 @@ extern char bpfilter_umh_end;
 
 static void shutdown_umh(void)
 {
-       struct task_struct *tsk;
+       struct umd_info *info = &bpfilter_ops.info;
+       struct pid *tgid = info->tgid;
 
-       if (bpfilter_ops.stop)
-               return;
-
-       tsk = get_pid_task(find_vpid(bpfilter_ops.info.pid), PIDTYPE_PID);
-       if (tsk) {
-               send_sig(SIGKILL, tsk, 1);
-               put_task_struct(tsk);
+       if (tgid) {
+               kill_pid(tgid, SIGKILL, 1);
+               wait_event(tgid->wait_pidfd, thread_group_exited(tgid));
+               bpfilter_umh_cleanup(info);
        }
 }
 
@@ -33,60 +31,65 @@ static void __stop_umh(void)
                shutdown_umh();
 }
 
-static int __bpfilter_process_sockopt(struct sock *sk, int optname,
-                                     char __user *optval,
-                                     unsigned int optlen, bool is_set)
+static int bpfilter_send_req(struct mbox_request *req)
 {
-       struct mbox_request req;
        struct mbox_reply reply;
        loff_t pos;
        ssize_t n;
-       int ret = -EFAULT;
-
-       req.is_set = is_set;
-       req.pid = current->pid;
-       req.cmd = optname;
-       req.addr = (long __force __user)optval;
-       req.len = optlen;
-       if (!bpfilter_ops.info.pid)
-               goto out;
-       n = __kernel_write(bpfilter_ops.info.pipe_to_umh, &req, sizeof(req),
+
+       if (!bpfilter_ops.info.tgid)
+               return -EFAULT;
+       pos = 0;
+       n = kernel_write(bpfilter_ops.info.pipe_to_umh, req, sizeof(*req),
                           &pos);
-       if (n != sizeof(req)) {
+       if (n != sizeof(*req)) {
                pr_err("write fail %zd\n", n);
-               __stop_umh();
-               ret = -EFAULT;
-               goto out;
+               goto stop;
        }
        pos = 0;
        n = kernel_read(bpfilter_ops.info.pipe_from_umh, &reply, sizeof(reply),
                        &pos);
        if (n != sizeof(reply)) {
                pr_err("read fail %zd\n", n);
-               __stop_umh();
-               ret = -EFAULT;
-               goto out;
+               goto stop;
        }
-       ret = reply.status;
-out:
-       return ret;
+       return reply.status;
+stop:
+       __stop_umh();
+       return -EFAULT;
+}
+
+static int bpfilter_process_sockopt(struct sock *sk, int optname,
+                                   sockptr_t optval, unsigned int optlen,
+                                   bool is_set)
+{
+       struct mbox_request req = {
+               .is_set         = is_set,
+               .pid            = current->pid,
+               .cmd            = optname,
+               .addr           = (uintptr_t)optval.user,
+               .len            = optlen,
+       };
+       if (uaccess_kernel() || sockptr_is_kernel(optval)) {
+               pr_err("kernel access not supported\n");
+               return -EFAULT;
+       }
+       return bpfilter_send_req(&req);
 }
 
 static int start_umh(void)
 {
+       struct mbox_request req = { .pid = current->pid };
        int err;
 
        /* fork usermode process */
-       err = fork_usermode_blob(&bpfilter_umh_start,
-                                &bpfilter_umh_end - &bpfilter_umh_start,
-                                &bpfilter_ops.info);
+       err = fork_usermode_driver(&bpfilter_ops.info);
        if (err)
                return err;
-       bpfilter_ops.stop = false;
-       pr_info("Loaded bpfilter_umh pid %d\n", bpfilter_ops.info.pid);
+       pr_info("Loaded bpfilter_umh pid %d\n", pid_nr(bpfilter_ops.info.tgid));
 
        /* health check that usermode process started correctly */
-       if (__bpfilter_process_sockopt(NULL, 0, NULL, 0, 0) != 0) {
+       if (bpfilter_send_req(&req) != 0) {
                shutdown_umh();
                return -EFAULT;
        }
@@ -98,18 +101,21 @@ static int __init load_umh(void)
 {
        int err;
 
+       err = umd_load_blob(&bpfilter_ops.info,
+                           &bpfilter_umh_start,
+                           &bpfilter_umh_end - &bpfilter_umh_start);
+       if (err)
+               return err;
+
        mutex_lock(&bpfilter_ops.lock);
-       if (!bpfilter_ops.stop) {
-               err = -EFAULT;
-               goto out;
-       }
        err = start_umh();
        if (!err && IS_ENABLED(CONFIG_INET)) {
-               bpfilter_ops.sockopt = &__bpfilter_process_sockopt;
+               bpfilter_ops.sockopt = &bpfilter_process_sockopt;
                bpfilter_ops.start = &start_umh;
        }
-out:
        mutex_unlock(&bpfilter_ops.lock);
+       if (err)
+               umd_unload_blob(&bpfilter_ops.info);
        return err;
 }
 
@@ -122,6 +128,8 @@ static void __exit fini_umh(void)
                bpfilter_ops.sockopt = NULL;
        }
        mutex_unlock(&bpfilter_ops.lock);
+
+       umd_unload_blob(&bpfilter_ops.info);
 }
 module_init(load_umh);
 module_exit(fini_umh);
index 9ea6100..40311d1 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-       .section .rodata, "a"
+       .section .init.rodata, "a"
        .global bpfilter_umh_start
 bpfilter_umh_start:
        .incbin "net/bpfilter/bpfilter_umh"
index 4877a0d..9db504b 100644 (file)
@@ -349,12 +349,21 @@ void br_fdb_cleanup(struct work_struct *work)
         */
        rcu_read_lock();
        hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
-               unsigned long this_timer;
+               unsigned long this_timer = f->updated + delay;
 
                if (test_bit(BR_FDB_STATIC, &f->flags) ||
-                   test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags))
+                   test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags)) {
+                       if (test_bit(BR_FDB_NOTIFY, &f->flags)) {
+                               if (time_after(this_timer, now))
+                                       work_delay = min(work_delay,
+                                                        this_timer - now);
+                               else if (!test_and_set_bit(BR_FDB_NOTIFY_INACTIVE,
+                                                          &f->flags))
+                                       fdb_notify(br, f, RTM_NEWNEIGH, false);
+                       }
                        continue;
-               this_timer = f->updated + delay;
+               }
+
                if (time_after(this_timer, now)) {
                        work_delay = min(work_delay, this_timer - now);
                } else {
@@ -556,11 +565,17 @@ int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
        return ret;
 }
 
+/* returns true if the fdb was modified */
+static bool __fdb_mark_active(struct net_bridge_fdb_entry *fdb)
+{
+       return !!(test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags) &&
+                 test_and_clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags));
+}
+
 void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
                   const unsigned char *addr, u16 vid, unsigned long flags)
 {
        struct net_bridge_fdb_entry *fdb;
-       bool fdb_modified = false;
 
        /* some users want to always flood. */
        if (hold_time(br) == 0)
@@ -575,6 +590,12 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
                                        source->dev->name, addr, vid);
                } else {
                        unsigned long now = jiffies;
+                       bool fdb_modified = false;
+
+                       if (now != fdb->updated) {
+                               fdb->updated = now;
+                               fdb_modified = __fdb_mark_active(fdb);
+                       }
 
                        /* fastpath: update of existing entry */
                        if (unlikely(source != fdb->dst &&
@@ -587,8 +608,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
                                        clear_bit(BR_FDB_ADDED_BY_EXT_LEARN,
                                                  &fdb->flags);
                        }
-                       if (now != fdb->updated)
-                               fdb->updated = now;
+
                        if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags)))
                                set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
                        if (unlikely(fdb_modified)) {
@@ -667,6 +687,23 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
                                        &fdb->key.vlan_id))
                goto nla_put_failure;
 
+       if (test_bit(BR_FDB_NOTIFY, &fdb->flags)) {
+               struct nlattr *nest = nla_nest_start(skb, NDA_FDB_EXT_ATTRS);
+               u8 notify_bits = FDB_NOTIFY_BIT;
+
+               if (!nest)
+                       goto nla_put_failure;
+               if (test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
+                       notify_bits |= FDB_NOTIFY_INACTIVE_BIT;
+
+               if (nla_put_u8(skb, NFEA_ACTIVITY_NOTIFY, notify_bits)) {
+                       nla_nest_cancel(skb, nest);
+                       goto nla_put_failure;
+               }
+
+               nla_nest_end(skb, nest);
+       }
+
        nlmsg_end(skb, nlh);
        return 0;
 
@@ -681,7 +718,9 @@ static inline size_t fdb_nlmsg_size(void)
                + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
                + nla_total_size(sizeof(u32)) /* NDA_MASTER */
                + nla_total_size(sizeof(u16)) /* NDA_VLAN */
-               + nla_total_size(sizeof(struct nda_cacheinfo));
+               + nla_total_size(sizeof(struct nda_cacheinfo))
+               + nla_total_size(0) /* NDA_FDB_EXT_ATTRS */
+               + nla_total_size(sizeof(u8)); /* NFEA_ACTIVITY_NOTIFY */
 }
 
 static void fdb_notify(struct net_bridge *br,
@@ -791,14 +830,41 @@ errout:
        return err;
 }
 
+/* returns true if the fdb is modified */
+static bool fdb_handle_notify(struct net_bridge_fdb_entry *fdb, u8 notify)
+{
+       bool modified = false;
+
+       /* allow to mark an entry as inactive, usually done on creation */
+       if ((notify & FDB_NOTIFY_INACTIVE_BIT) &&
+           !test_and_set_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
+               modified = true;
+
+       if ((notify & FDB_NOTIFY_BIT) &&
+           !test_and_set_bit(BR_FDB_NOTIFY, &fdb->flags)) {
+               /* enabled activity tracking */
+               modified = true;
+       } else if (!(notify & FDB_NOTIFY_BIT) &&
+                  test_and_clear_bit(BR_FDB_NOTIFY, &fdb->flags)) {
+               /* disabled activity tracking, clear notify state */
+               clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags);
+               modified = true;
+       }
+
+       return modified;
+}
+
 /* Update (create or replace) forwarding database entry */
 static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
-                        const u8 *addr, u16 state, u16 flags, u16 vid,
-                        u8 ndm_flags)
+                        const u8 *addr, struct ndmsg *ndm, u16 flags, u16 vid,
+                        struct nlattr *nfea_tb[])
 {
-       bool is_sticky = !!(ndm_flags & NTF_STICKY);
+       bool is_sticky = !!(ndm->ndm_flags & NTF_STICKY);
+       bool refresh = !nfea_tb[NFEA_DONT_REFRESH];
        struct net_bridge_fdb_entry *fdb;
+       u16 state = ndm->ndm_state;
        bool modified = false;
+       u8 notify = 0;
 
        /* If the port cannot learn allow only local and static entries */
        if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
@@ -815,6 +881,13 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
        if (is_sticky && (state & NUD_PERMANENT))
                return -EINVAL;
 
+       if (nfea_tb[NFEA_ACTIVITY_NOTIFY]) {
+               notify = nla_get_u8(nfea_tb[NFEA_ACTIVITY_NOTIFY]);
+               if ((notify & ~BR_FDB_NOTIFY_SETTABLE_BITS) ||
+                   (notify & BR_FDB_NOTIFY_SETTABLE_BITS) == FDB_NOTIFY_INACTIVE_BIT)
+                       return -EINVAL;
+       }
+
        fdb = br_fdb_find(br, addr, vid);
        if (fdb == NULL) {
                if (!(flags & NLM_F_CREATE))
@@ -858,11 +931,15 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
                modified = true;
        }
 
+       if (fdb_handle_notify(fdb, notify))
+               modified = true;
+
        set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
 
        fdb->used = jiffies;
        if (modified) {
-               fdb->updated = jiffies;
+               if (refresh)
+                       fdb->updated = jiffies;
                fdb_notify(br, fdb, RTM_NEWNEIGH, true);
        }
 
@@ -871,7 +948,7 @@ static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
 
 static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
                        struct net_bridge_port *p, const unsigned char *addr,
-                       u16 nlh_flags, u16 vid)
+                       u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[])
 {
        int err = 0;
 
@@ -893,20 +970,25 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
                err = br_fdb_external_learn_add(br, p, addr, vid, true);
        } else {
                spin_lock_bh(&br->hash_lock);
-               err = fdb_add_entry(br, p, addr, ndm->ndm_state,
-                                   nlh_flags, vid, ndm->ndm_flags);
+               err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid, nfea_tb);
                spin_unlock_bh(&br->hash_lock);
        }
 
        return err;
 }
 
+static const struct nla_policy br_nda_fdb_pol[NFEA_MAX + 1] = {
+       [NFEA_ACTIVITY_NOTIFY]  = { .type = NLA_U8 },
+       [NFEA_DONT_REFRESH]     = { .type = NLA_FLAG },
+};
+
 /* Add new permanent fdb entry with RTM_NEWNEIGH */
 int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
               struct net_device *dev,
               const unsigned char *addr, u16 vid, u16 nlh_flags,
               struct netlink_ext_ack *extack)
 {
+       struct nlattr *nfea_tb[NFEA_MAX + 1], *attr;
        struct net_bridge_vlan_group *vg;
        struct net_bridge_port *p = NULL;
        struct net_bridge_vlan *v;
@@ -939,6 +1021,16 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                vg = nbp_vlan_group(p);
        }
 
+       if (tb[NDA_FDB_EXT_ATTRS]) {
+               attr = tb[NDA_FDB_EXT_ATTRS];
+               err = nla_parse_nested(nfea_tb, NFEA_MAX, attr,
+                                      br_nda_fdb_pol, extack);
+               if (err)
+                       return err;
+       } else {
+               memset(nfea_tb, 0, sizeof(struct nlattr *) * (NFEA_MAX + 1));
+       }
+
        if (vid) {
                v = br_vlan_find(vg, vid);
                if (!v || !br_vlan_should_use(v)) {
@@ -947,9 +1039,9 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                }
 
                /* VID was specified, so use it. */
-               err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid);
+               err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb);
        } else {
-               err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0);
+               err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb);
                if (err || !vg || !vg->num_vlans)
                        goto out;
 
@@ -960,7 +1052,8 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
                list_for_each_entry(v, &vg->vlan_list, vlist) {
                        if (!br_vlan_should_use(v))
                                continue;
-                       err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid);
+                       err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid,
+                                          nfea_tb);
                        if (err)
                                goto out;
                }
index 24986ec..b36689e 100644 (file)
@@ -4,6 +4,27 @@
 #include "br_private_mrp.h"
 
 static const u8 mrp_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x1 };
+static const u8 mrp_in_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x3 };
+
+static bool br_mrp_is_ring_port(struct net_bridge_port *p_port,
+                               struct net_bridge_port *s_port,
+                               struct net_bridge_port *port)
+{
+       if (port == p_port ||
+           port == s_port)
+               return true;
+
+       return false;
+}
+
+static bool br_mrp_is_in_port(struct net_bridge_port *i_port,
+                             struct net_bridge_port *port)
+{
+       if (port == i_port)
+               return true;
+
+       return false;
+}
 
 static struct net_bridge_port *br_mrp_get_port(struct net_bridge *br,
                                               u32 ifindex)
@@ -37,6 +58,22 @@ static struct br_mrp *br_mrp_find_id(struct net_bridge *br, u32 ring_id)
        return res;
 }
 
+static struct br_mrp *br_mrp_find_in_id(struct net_bridge *br, u32 in_id)
+{
+       struct br_mrp *res = NULL;
+       struct br_mrp *mrp;
+
+       list_for_each_entry_rcu(mrp, &br->mrp_list, list,
+                               lockdep_rtnl_is_held()) {
+               if (mrp->in_id == in_id) {
+                       res = mrp;
+                       break;
+               }
+       }
+
+       return res;
+}
+
 static bool br_mrp_unique_ifindex(struct net_bridge *br, u32 ifindex)
 {
        struct br_mrp *mrp;
@@ -52,6 +89,10 @@ static bool br_mrp_unique_ifindex(struct net_bridge *br, u32 ifindex)
                p = rtnl_dereference(mrp->s_port);
                if (p && p->dev->ifindex == ifindex)
                        return false;
+
+               p = rtnl_dereference(mrp->i_port);
+               if (p && p->dev->ifindex == ifindex)
+                       return false;
        }
 
        return true;
@@ -66,7 +107,8 @@ static struct br_mrp *br_mrp_find_port(struct net_bridge *br,
        list_for_each_entry_rcu(mrp, &br->mrp_list, list,
                                lockdep_rtnl_is_held()) {
                if (rcu_access_pointer(mrp->p_port) == p ||
-                   rcu_access_pointer(mrp->s_port) == p) {
+                   rcu_access_pointer(mrp->s_port) == p ||
+                   rcu_access_pointer(mrp->i_port) == p) {
                        res = mrp;
                        break;
                }
@@ -86,7 +128,7 @@ static struct sk_buff *br_mrp_skb_alloc(struct net_bridge_port *p,
 {
        struct ethhdr *eth_hdr;
        struct sk_buff *skb;
-       u16 *version;
+       __be16 *version;
 
        skb = dev_alloc_skb(MRP_MAX_FRAME_LENGTH);
        if (!skb)
@@ -160,6 +202,36 @@ static struct sk_buff *br_mrp_alloc_test_skb(struct br_mrp *mrp,
        return skb;
 }
 
+static struct sk_buff *br_mrp_alloc_in_test_skb(struct br_mrp *mrp,
+                                               struct net_bridge_port *p,
+                                               enum br_mrp_port_role_type port_role)
+{
+       struct br_mrp_in_test_hdr *hdr = NULL;
+       struct sk_buff *skb = NULL;
+
+       if (!p)
+               return NULL;
+
+       skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_in_test_dmac);
+       if (!skb)
+               return NULL;
+
+       br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_IN_TEST, sizeof(*hdr));
+       hdr = skb_put(skb, sizeof(*hdr));
+
+       hdr->id = cpu_to_be16(mrp->in_id);
+       ether_addr_copy(hdr->sa, p->br->dev->dev_addr);
+       hdr->port_role = cpu_to_be16(port_role);
+       hdr->state = cpu_to_be16(mrp->in_state);
+       hdr->transitions = cpu_to_be16(mrp->in_transitions);
+       hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies));
+
+       br_mrp_skb_common(skb, mrp);
+       br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0);
+
+       return skb;
+}
+
 /* This function is continuously called in the following cases:
  * - when node role is MRM, in this case test_monitor is always set to false
  *   because it needs to notify the userspace that the ring is open and needs to
@@ -213,7 +285,7 @@ static void br_mrp_test_work_expired(struct work_struct *work)
                }
 
                if (notify_open && !mrp->ring_role_offloaded)
-                       br_mrp_port_open(p->dev, true);
+                       br_mrp_ring_port_open(p->dev, true);
        }
 
        p = rcu_dereference(mrp->s_port);
@@ -229,7 +301,7 @@ static void br_mrp_test_work_expired(struct work_struct *work)
                }
 
                if (notify_open && !mrp->ring_role_offloaded)
-                       br_mrp_port_open(p->dev, true);
+                       br_mrp_ring_port_open(p->dev, true);
        }
 
 out:
@@ -239,6 +311,83 @@ out:
                           usecs_to_jiffies(mrp->test_interval));
 }
 
+/* This function is continuously called when the node has the interconnect role
+ * MIM. It would generate interconnect test frames and will send them on all 3
+ * ports. But will also check if it stop receiving interconnect test frames.
+ */
+static void br_mrp_in_test_work_expired(struct work_struct *work)
+{
+       struct delayed_work *del_work = to_delayed_work(work);
+       struct br_mrp *mrp = container_of(del_work, struct br_mrp, in_test_work);
+       struct net_bridge_port *p;
+       bool notify_open = false;
+       struct sk_buff *skb;
+
+       if (time_before_eq(mrp->in_test_end, jiffies))
+               return;
+
+       if (mrp->in_test_count_miss < mrp->in_test_max_miss) {
+               mrp->in_test_count_miss++;
+       } else {
+               /* Notify that the interconnect ring is open only if the
+                * interconnect ring state is closed, otherwise it would
+                * continue to notify at every interval.
+                */
+               if (mrp->in_state == BR_MRP_IN_STATE_CLOSED)
+                       notify_open = true;
+       }
+
+       rcu_read_lock();
+
+       p = rcu_dereference(mrp->p_port);
+       if (p) {
+               skb = br_mrp_alloc_in_test_skb(mrp, p,
+                                              BR_MRP_PORT_ROLE_PRIMARY);
+               if (!skb)
+                       goto out;
+
+               skb_reset_network_header(skb);
+               dev_queue_xmit(skb);
+
+               if (notify_open && !mrp->in_role_offloaded)
+                       br_mrp_in_port_open(p->dev, true);
+       }
+
+       p = rcu_dereference(mrp->s_port);
+       if (p) {
+               skb = br_mrp_alloc_in_test_skb(mrp, p,
+                                              BR_MRP_PORT_ROLE_SECONDARY);
+               if (!skb)
+                       goto out;
+
+               skb_reset_network_header(skb);
+               dev_queue_xmit(skb);
+
+               if (notify_open && !mrp->in_role_offloaded)
+                       br_mrp_in_port_open(p->dev, true);
+       }
+
+       p = rcu_dereference(mrp->i_port);
+       if (p) {
+               skb = br_mrp_alloc_in_test_skb(mrp, p,
+                                              BR_MRP_PORT_ROLE_INTER);
+               if (!skb)
+                       goto out;
+
+               skb_reset_network_header(skb);
+               dev_queue_xmit(skb);
+
+               if (notify_open && !mrp->in_role_offloaded)
+                       br_mrp_in_port_open(p->dev, true);
+       }
+
+out:
+       rcu_read_unlock();
+
+       queue_delayed_work(system_wq, &mrp->in_test_work,
+                          usecs_to_jiffies(mrp->in_test_interval));
+}
+
 /* Deletes the MRP instance.
  * note: called under rtnl_lock
  */
@@ -251,6 +400,10 @@ static void br_mrp_del_impl(struct net_bridge *br, struct br_mrp *mrp)
        cancel_delayed_work_sync(&mrp->test_work);
        br_mrp_switchdev_send_ring_test(br, mrp, 0, 0, 0, 0);
 
+       /* Stop sending MRP_InTest frames if has an interconnect role */
+       cancel_delayed_work_sync(&mrp->in_test_work);
+       br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0);
+
        br_mrp_switchdev_del(br, mrp);
 
        /* Reset the ports */
@@ -278,6 +431,18 @@ static void br_mrp_del_impl(struct net_bridge *br, struct br_mrp *mrp)
                rcu_assign_pointer(mrp->s_port, NULL);
        }
 
+       p = rtnl_dereference(mrp->i_port);
+       if (p) {
+               spin_lock_bh(&br->lock);
+               state = netif_running(br->dev) ?
+                               BR_STATE_FORWARDING : BR_STATE_DISABLED;
+               p->state = state;
+               p->flags &= ~BR_MRP_AWARE;
+               spin_unlock_bh(&br->lock);
+               br_mrp_port_switchdev_set_state(p, state);
+               rcu_assign_pointer(mrp->i_port, NULL);
+       }
+
        list_del_rcu(&mrp->list);
        kfree_rcu(mrp, rcu);
 }
@@ -329,6 +494,7 @@ int br_mrp_add(struct net_bridge *br, struct br_mrp_instance *instance)
        rcu_assign_pointer(mrp->s_port, p);
 
        INIT_DELAYED_WORK(&mrp->test_work, br_mrp_test_work_expired);
+       INIT_DELAYED_WORK(&mrp->in_test_work, br_mrp_in_test_work_expired);
        list_add_tail_rcu(&mrp->list, &br->mrp_list);
 
        err = br_mrp_switchdev_add(br, mrp);
@@ -411,10 +577,16 @@ int br_mrp_set_port_role(struct net_bridge_port *p,
        if (!mrp)
                return -EINVAL;
 
-       if (role == BR_MRP_PORT_ROLE_PRIMARY)
+       switch (role) {
+       case BR_MRP_PORT_ROLE_PRIMARY:
                rcu_assign_pointer(mrp->p_port, p);
-       else
+               break;
+       case BR_MRP_PORT_ROLE_SECONDARY:
                rcu_assign_pointer(mrp->s_port, p);
+               break;
+       default:
+               return -EINVAL;
+       }
 
        br_mrp_port_switchdev_set_role(p, role);
 
@@ -505,6 +677,180 @@ int br_mrp_start_test(struct net_bridge *br,
        return 0;
 }
 
+/* Set in state, int state can be only Open or Closed
+ * note: already called with rtnl_lock
+ */
+int br_mrp_set_in_state(struct net_bridge *br, struct br_mrp_in_state *state)
+{
+       struct br_mrp *mrp = br_mrp_find_in_id(br, state->in_id);
+
+       if (!mrp)
+               return -EINVAL;
+
+       if (mrp->in_state == BR_MRP_IN_STATE_CLOSED &&
+           state->in_state != BR_MRP_IN_STATE_CLOSED)
+               mrp->in_transitions++;
+
+       mrp->in_state = state->in_state;
+
+       br_mrp_switchdev_set_in_state(br, mrp, state->in_state);
+
+       return 0;
+}
+
+/* Set in role, in role can be only MIM(Media Interconnection Manager) or
+ * MIC(Media Interconnection Client).
+ * note: already called with rtnl_lock
+ */
+int br_mrp_set_in_role(struct net_bridge *br, struct br_mrp_in_role *role)
+{
+       struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id);
+       struct net_bridge_port *p;
+       int err;
+
+       if (!mrp)
+               return -EINVAL;
+
+       if (!br_mrp_get_port(br, role->i_ifindex))
+               return -EINVAL;
+
+       if (role->in_role == BR_MRP_IN_ROLE_DISABLED) {
+               u8 state;
+
+               /* It is not allowed to disable a port that doesn't exist */
+               p = rtnl_dereference(mrp->i_port);
+               if (!p)
+                       return -EINVAL;
+
+               /* Stop the generating MRP_InTest frames */
+               cancel_delayed_work_sync(&mrp->in_test_work);
+               br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0);
+
+               /* Remove the port */
+               spin_lock_bh(&br->lock);
+               state = netif_running(br->dev) ?
+                               BR_STATE_FORWARDING : BR_STATE_DISABLED;
+               p->state = state;
+               p->flags &= ~BR_MRP_AWARE;
+               spin_unlock_bh(&br->lock);
+               br_mrp_port_switchdev_set_state(p, state);
+               rcu_assign_pointer(mrp->i_port, NULL);
+
+               mrp->in_role = role->in_role;
+               mrp->in_id = 0;
+
+               return 0;
+       }
+
+       /* It is not possible to have the same port part of multiple rings */
+       if (!br_mrp_unique_ifindex(br, role->i_ifindex))
+               return -EINVAL;
+
+       /* It is not allowed to set a different interconnect port if the mrp
+        * instance has already one. First it needs to be disabled and after
+        * that set the new port
+        */
+       if (rcu_access_pointer(mrp->i_port))
+               return -EINVAL;
+
+       p = br_mrp_get_port(br, role->i_ifindex);
+       spin_lock_bh(&br->lock);
+       p->state = BR_STATE_FORWARDING;
+       p->flags |= BR_MRP_AWARE;
+       spin_unlock_bh(&br->lock);
+       rcu_assign_pointer(mrp->i_port, p);
+
+       mrp->in_role = role->in_role;
+       mrp->in_id = role->in_id;
+
+       /* If there is an error just bailed out */
+       err = br_mrp_switchdev_set_in_role(br, mrp, role->in_id,
+                                          role->ring_id, role->in_role);
+       if (err && err != -EOPNOTSUPP)
+               return err;
+
+       /* Now detect if the HW actually applied the role or not. If the HW
+        * applied the role it means that the SW will not to do those operations
+        * anymore. For example if the role is MIM then the HW will notify the
+        * SW when interconnect ring is open, but if the is not pushed to the HW
+        * the SW will need to detect when the interconnect ring is open.
+        */
+       mrp->in_role_offloaded = err == -EOPNOTSUPP ? 0 : 1;
+
+       return 0;
+}
+
+/* Start to generate MRP_InTest frames, the frames are generated by
+ * HW and if it fails, they are generated by the SW.
+ * note: already called with rtnl_lock
+ */
+int br_mrp_start_in_test(struct net_bridge *br,
+                        struct br_mrp_start_in_test *in_test)
+{
+       struct br_mrp *mrp = br_mrp_find_in_id(br, in_test->in_id);
+
+       if (!mrp)
+               return -EINVAL;
+
+       if (mrp->in_role != BR_MRP_IN_ROLE_MIM)
+               return -EINVAL;
+
+       /* Try to push it to the HW and if it fails then continue with SW
+        * implementation and if that also fails then return error.
+        */
+       if (!br_mrp_switchdev_send_in_test(br, mrp, in_test->interval,
+                                          in_test->max_miss, in_test->period))
+               return 0;
+
+       mrp->in_test_interval = in_test->interval;
+       mrp->in_test_end = jiffies + usecs_to_jiffies(in_test->period);
+       mrp->in_test_max_miss = in_test->max_miss;
+       mrp->in_test_count_miss = 0;
+       queue_delayed_work(system_wq, &mrp->in_test_work,
+                          usecs_to_jiffies(in_test->interval));
+
+       return 0;
+}
+
+/* Determin if the frame type is a ring frame */
+static bool br_mrp_ring_frame(struct sk_buff *skb)
+{
+       const struct br_mrp_tlv_hdr *hdr;
+       struct br_mrp_tlv_hdr _hdr;
+
+       hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
+       if (!hdr)
+               return false;
+
+       if (hdr->type == BR_MRP_TLV_HEADER_RING_TEST ||
+           hdr->type == BR_MRP_TLV_HEADER_RING_TOPO ||
+           hdr->type == BR_MRP_TLV_HEADER_RING_LINK_DOWN ||
+           hdr->type == BR_MRP_TLV_HEADER_RING_LINK_UP ||
+           hdr->type == BR_MRP_TLV_HEADER_OPTION)
+               return true;
+
+       return false;
+}
+
+/* Determin if the frame type is an interconnect frame */
+static bool br_mrp_in_frame(struct sk_buff *skb)
+{
+       const struct br_mrp_tlv_hdr *hdr;
+       struct br_mrp_tlv_hdr _hdr;
+
+       hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
+       if (!hdr)
+               return false;
+
+       if (hdr->type == BR_MRP_TLV_HEADER_IN_TEST ||
+           hdr->type == BR_MRP_TLV_HEADER_IN_TOPO ||
+           hdr->type == BR_MRP_TLV_HEADER_IN_LINK_DOWN ||
+           hdr->type == BR_MRP_TLV_HEADER_IN_LINK_UP)
+               return true;
+
+       return false;
+}
+
 /* Process only MRP Test frame. All the other MRP frames are processed by
  * userspace application
  * note: already called with rcu_read_lock
@@ -531,7 +877,7 @@ static void br_mrp_mrm_process(struct br_mrp *mrp, struct net_bridge_port *port,
         * not closed
         */
        if (mrp->ring_state != BR_MRP_RING_STATE_CLOSED)
-               br_mrp_port_open(port->dev, false);
+               br_mrp_ring_port_open(port->dev, false);
 }
 
 /* Determin if the test hdr has a better priority than the node */
@@ -585,17 +931,92 @@ static void br_mrp_mra_process(struct br_mrp *mrp, struct net_bridge *br,
                mrp->test_count_miss = 0;
 }
 
-/* This will just forward the frame to the other mrp ring port(MRC role) or will
- * not do anything.
+/* Process only MRP InTest frame. All the other MRP frames are processed by
+ * userspace application
+ * note: already called with rcu_read_lock
+ */
+static bool br_mrp_mim_process(struct br_mrp *mrp, struct net_bridge_port *port,
+                              struct sk_buff *skb)
+{
+       const struct br_mrp_in_test_hdr *in_hdr;
+       struct br_mrp_in_test_hdr _in_hdr;
+       const struct br_mrp_tlv_hdr *hdr;
+       struct br_mrp_tlv_hdr _hdr;
+
+       /* Each MRP header starts with a version field which is 16 bits.
+        * Therefore skip the version and get directly the TLV header.
+        */
+       hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
+       if (!hdr)
+               return false;
+
+       /* The check for InTest frame type was already done */
+       in_hdr = skb_header_pointer(skb, sizeof(uint16_t) + sizeof(_hdr),
+                                   sizeof(_in_hdr), &_in_hdr);
+       if (!in_hdr)
+               return false;
+
+       /* It needs to process only it's own InTest frames. */
+       if (mrp->in_id != ntohs(in_hdr->id))
+               return false;
+
+       mrp->in_test_count_miss = 0;
+
+       /* Notify the userspace that the ring is closed only when the ring is
+        * not closed
+        */
+       if (mrp->in_state != BR_MRP_IN_STATE_CLOSED)
+               br_mrp_in_port_open(port->dev, false);
+
+       return true;
+}
+
+/* Get the MRP frame type
+ * note: already called with rcu_read_lock
+ */
+static u8 br_mrp_get_frame_type(struct sk_buff *skb)
+{
+       const struct br_mrp_tlv_hdr *hdr;
+       struct br_mrp_tlv_hdr _hdr;
+
+       /* Each MRP header starts with a version field which is 16 bits.
+        * Therefore skip the version and get directly the TLV header.
+        */
+       hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
+       if (!hdr)
+               return 0xff;
+
+       return hdr->type;
+}
+
+static bool br_mrp_mrm_behaviour(struct br_mrp *mrp)
+{
+       if (mrp->ring_role == BR_MRP_RING_ROLE_MRM ||
+           (mrp->ring_role == BR_MRP_RING_ROLE_MRA && !mrp->test_monitor))
+               return true;
+
+       return false;
+}
+
+static bool br_mrp_mrc_behaviour(struct br_mrp *mrp)
+{
+       if (mrp->ring_role == BR_MRP_RING_ROLE_MRC ||
+           (mrp->ring_role == BR_MRP_RING_ROLE_MRA && mrp->test_monitor))
+               return true;
+
+       return false;
+}
+
+/* This will just forward the frame to the other mrp ring ports, depending on
+ * the frame type, ring role and interconnect role
  * note: already called with rcu_read_lock
  */
 static int br_mrp_rcv(struct net_bridge_port *p,
                      struct sk_buff *skb, struct net_device *dev)
 {
-       struct net_device *s_dev, *p_dev, *d_dev;
-       struct net_bridge_port *p_port, *s_port;
+       struct net_bridge_port *p_port, *s_port, *i_port = NULL;
+       struct net_bridge_port *p_dst, *s_dst, *i_dst = NULL;
        struct net_bridge *br;
-       struct sk_buff *nskb;
        struct br_mrp *mrp;
 
        /* If port is disabled don't accept any frames */
@@ -610,46 +1031,139 @@ static int br_mrp_rcv(struct net_bridge_port *p,
        p_port = rcu_dereference(mrp->p_port);
        if (!p_port)
                return 0;
+       p_dst = p_port;
 
        s_port = rcu_dereference(mrp->s_port);
        if (!s_port)
                return 0;
+       s_dst = s_port;
 
-       /* If the role is MRM then don't forward the frames */
-       if (mrp->ring_role == BR_MRP_RING_ROLE_MRM) {
-               br_mrp_mrm_process(mrp, p, skb);
-               return 1;
-       }
-
-       /* If the role is MRA then don't forward the frames if it behaves as
-        * MRM node
+       /* If the frame is a ring frame then it is not required to check the
+        * interconnect role and ports to process or forward the frame
         */
-       if (mrp->ring_role == BR_MRP_RING_ROLE_MRA) {
-               if (!mrp->test_monitor) {
+       if (br_mrp_ring_frame(skb)) {
+               /* If the role is MRM then don't forward the frames */
+               if (mrp->ring_role == BR_MRP_RING_ROLE_MRM) {
                        br_mrp_mrm_process(mrp, p, skb);
-                       return 1;
+                       goto no_forward;
                }
 
-               br_mrp_mra_process(mrp, br, p, skb);
+               /* If the role is MRA then don't forward the frames if it
+                * behaves as MRM node
+                */
+               if (mrp->ring_role == BR_MRP_RING_ROLE_MRA) {
+                       if (!mrp->test_monitor) {
+                               br_mrp_mrm_process(mrp, p, skb);
+                               goto no_forward;
+                       }
+
+                       br_mrp_mra_process(mrp, br, p, skb);
+               }
+
+               goto forward;
        }
 
-       /* Clone the frame and forward it on the other MRP port */
-       nskb = skb_clone(skb, GFP_ATOMIC);
-       if (!nskb)
-               return 0;
+       if (br_mrp_in_frame(skb)) {
+               u8 in_type = br_mrp_get_frame_type(skb);
 
-       p_dev = p_port->dev;
-       s_dev = s_port->dev;
+               i_port = rcu_dereference(mrp->i_port);
+               i_dst = i_port;
 
-       if (p_dev == dev)
-               d_dev = s_dev;
-       else
-               d_dev = p_dev;
+               /* If the ring port is in block state it should not forward
+                * In_Test frames
+                */
+               if (br_mrp_is_ring_port(p_port, s_port, p) &&
+                   p->state == BR_STATE_BLOCKING &&
+                   in_type == BR_MRP_TLV_HEADER_IN_TEST)
+                       goto no_forward;
+
+               /* Nodes that behaves as MRM needs to stop forwarding the
+                * frames in case the ring is closed, otherwise will be a loop.
+                * In this case the frame is no forward between the ring ports.
+                */
+               if (br_mrp_mrm_behaviour(mrp) &&
+                   br_mrp_is_ring_port(p_port, s_port, p) &&
+                   (s_port->state != BR_STATE_FORWARDING ||
+                    p_port->state != BR_STATE_FORWARDING)) {
+                       p_dst = NULL;
+                       s_dst = NULL;
+               }
+
+               /* A node that behaves as MRC and doesn't have a interconnect
+                * role then it should forward all frames between the ring ports
+                * because it doesn't have an interconnect port
+                */
+               if (br_mrp_mrc_behaviour(mrp) &&
+                   mrp->in_role == BR_MRP_IN_ROLE_DISABLED)
+                       goto forward;
+
+               if (mrp->in_role == BR_MRP_IN_ROLE_MIM) {
+                       if (in_type == BR_MRP_TLV_HEADER_IN_TEST) {
+                               /* MIM should not forward it's own InTest
+                                * frames
+                                */
+                               if (br_mrp_mim_process(mrp, p, skb)) {
+                                       goto no_forward;
+                               } else {
+                                       if (br_mrp_is_ring_port(p_port, s_port,
+                                                               p))
+                                               i_dst = NULL;
+
+                                       if (br_mrp_is_in_port(i_port, p))
+                                               goto no_forward;
+                               }
+                       } else {
+                               /* MIM should forward IntLinkChange and
+                                * IntTopoChange between ring ports but MIM
+                                * should not forward IntLinkChange and
+                                * IntTopoChange if the frame was received at
+                                * the interconnect port
+                                */
+                               if (br_mrp_is_ring_port(p_port, s_port, p))
+                                       i_dst = NULL;
+
+                               if (br_mrp_is_in_port(i_port, p))
+                                       goto no_forward;
+                       }
+               }
+
+               if (mrp->in_role == BR_MRP_IN_ROLE_MIC) {
+                       /* MIC should forward InTest frames on all ports
+                        * regardless of the received port
+                        */
+                       if (in_type == BR_MRP_TLV_HEADER_IN_TEST)
+                               goto forward;
+
+                       /* MIC should forward IntLinkChange frames only if they
+                        * are received on ring ports to all the ports
+                        */
+                       if (br_mrp_is_ring_port(p_port, s_port, p) &&
+                           (in_type == BR_MRP_TLV_HEADER_IN_LINK_UP ||
+                            in_type == BR_MRP_TLV_HEADER_IN_LINK_DOWN))
+                               goto forward;
+
+                       /* Should forward the InTopo frames only between the
+                        * ring ports
+                        */
+                       if (in_type == BR_MRP_TLV_HEADER_IN_TOPO) {
+                               i_dst = NULL;
+                               goto forward;
+                       }
+
+                       /* In all the other cases don't forward the frames */
+                       goto no_forward;
+               }
+       }
 
-       nskb->dev = d_dev;
-       skb_push(nskb, ETH_HLEN);
-       dev_queue_xmit(nskb);
+forward:
+       if (p_dst)
+               br_forward(p_dst, skb, true, false);
+       if (s_dst)
+               br_forward(s_dst, skb, true, false);
+       if (i_dst)
+               br_forward(i_dst, skb, true, false);
 
+no_forward:
        return 1;
 }
 
index 34b3a87..2a2fdf3 100644 (file)
@@ -14,6 +14,9 @@ static const struct nla_policy br_mrp_policy[IFLA_BRIDGE_MRP_MAX + 1] = {
        [IFLA_BRIDGE_MRP_RING_STATE]    = { .type = NLA_NESTED },
        [IFLA_BRIDGE_MRP_RING_ROLE]     = { .type = NLA_NESTED },
        [IFLA_BRIDGE_MRP_START_TEST]    = { .type = NLA_NESTED },
+       [IFLA_BRIDGE_MRP_IN_ROLE]       = { .type = NLA_NESTED },
+       [IFLA_BRIDGE_MRP_IN_STATE]      = { .type = NLA_NESTED },
+       [IFLA_BRIDGE_MRP_START_IN_TEST] = { .type = NLA_NESTED },
 };
 
 static const struct nla_policy
@@ -235,6 +238,121 @@ static int br_mrp_start_test_parse(struct net_bridge *br, struct nlattr *attr,
        return br_mrp_start_test(br, &test);
 }
 
+static const struct nla_policy
+br_mrp_in_state_policy[IFLA_BRIDGE_MRP_IN_STATE_MAX + 1] = {
+       [IFLA_BRIDGE_MRP_IN_STATE_UNSPEC]       = { .type = NLA_REJECT },
+       [IFLA_BRIDGE_MRP_IN_STATE_IN_ID]        = { .type = NLA_U32 },
+       [IFLA_BRIDGE_MRP_IN_STATE_STATE]        = { .type = NLA_U32 },
+};
+
+static int br_mrp_in_state_parse(struct net_bridge *br, struct nlattr *attr,
+                                struct netlink_ext_ack *extack)
+{
+       struct nlattr *tb[IFLA_BRIDGE_MRP_IN_STATE_MAX + 1];
+       struct br_mrp_in_state state;
+       int err;
+
+       err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_IN_STATE_MAX, attr,
+                              br_mrp_in_state_policy, extack);
+       if (err)
+               return err;
+
+       if (!tb[IFLA_BRIDGE_MRP_IN_STATE_IN_ID] ||
+           !tb[IFLA_BRIDGE_MRP_IN_STATE_STATE]) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Missing attribute: IN_ID or STATE");
+               return -EINVAL;
+       }
+
+       memset(&state, 0x0, sizeof(state));
+
+       state.in_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_STATE_IN_ID]);
+       state.in_state = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_STATE_STATE]);
+
+       return br_mrp_set_in_state(br, &state);
+}
+
+static const struct nla_policy
+br_mrp_in_role_policy[IFLA_BRIDGE_MRP_IN_ROLE_MAX + 1] = {
+       [IFLA_BRIDGE_MRP_IN_ROLE_UNSPEC]        = { .type = NLA_REJECT },
+       [IFLA_BRIDGE_MRP_IN_ROLE_RING_ID]       = { .type = NLA_U32 },
+       [IFLA_BRIDGE_MRP_IN_ROLE_IN_ID]         = { .type = NLA_U16 },
+       [IFLA_BRIDGE_MRP_IN_ROLE_ROLE]          = { .type = NLA_U32 },
+       [IFLA_BRIDGE_MRP_IN_ROLE_I_IFINDEX]     = { .type = NLA_U32 },
+};
+
+static int br_mrp_in_role_parse(struct net_bridge *br, struct nlattr *attr,
+                               struct netlink_ext_ack *extack)
+{
+       struct nlattr *tb[IFLA_BRIDGE_MRP_IN_ROLE_MAX + 1];
+       struct br_mrp_in_role role;
+       int err;
+
+       err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_IN_ROLE_MAX, attr,
+                              br_mrp_in_role_policy, extack);
+       if (err)
+               return err;
+
+       if (!tb[IFLA_BRIDGE_MRP_IN_ROLE_RING_ID] ||
+           !tb[IFLA_BRIDGE_MRP_IN_ROLE_IN_ID] ||
+           !tb[IFLA_BRIDGE_MRP_IN_ROLE_I_IFINDEX] ||
+           !tb[IFLA_BRIDGE_MRP_IN_ROLE_ROLE]) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Missing attribute: RING_ID or ROLE or IN_ID or I_IFINDEX");
+               return -EINVAL;
+       }
+
+       memset(&role, 0x0, sizeof(role));
+
+       role.ring_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_ROLE_RING_ID]);
+       role.in_id = nla_get_u16(tb[IFLA_BRIDGE_MRP_IN_ROLE_IN_ID]);
+       role.i_ifindex = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_ROLE_I_IFINDEX]);
+       role.in_role = nla_get_u32(tb[IFLA_BRIDGE_MRP_IN_ROLE_ROLE]);
+
+       return br_mrp_set_in_role(br, &role);
+}
+
+static const struct nla_policy
+br_mrp_start_in_test_policy[IFLA_BRIDGE_MRP_START_IN_TEST_MAX + 1] = {
+       [IFLA_BRIDGE_MRP_START_IN_TEST_UNSPEC]  = { .type = NLA_REJECT },
+       [IFLA_BRIDGE_MRP_START_IN_TEST_IN_ID]   = { .type = NLA_U32 },
+       [IFLA_BRIDGE_MRP_START_IN_TEST_INTERVAL]        = { .type = NLA_U32 },
+       [IFLA_BRIDGE_MRP_START_IN_TEST_MAX_MISS]        = { .type = NLA_U32 },
+       [IFLA_BRIDGE_MRP_START_IN_TEST_PERIOD]  = { .type = NLA_U32 },
+};
+
+static int br_mrp_start_in_test_parse(struct net_bridge *br,
+                                     struct nlattr *attr,
+                                     struct netlink_ext_ack *extack)
+{
+       struct nlattr *tb[IFLA_BRIDGE_MRP_START_IN_TEST_MAX + 1];
+       struct br_mrp_start_in_test test;
+       int err;
+
+       err = nla_parse_nested(tb, IFLA_BRIDGE_MRP_START_IN_TEST_MAX, attr,
+                              br_mrp_start_in_test_policy, extack);
+       if (err)
+               return err;
+
+       if (!tb[IFLA_BRIDGE_MRP_START_IN_TEST_IN_ID] ||
+           !tb[IFLA_BRIDGE_MRP_START_IN_TEST_INTERVAL] ||
+           !tb[IFLA_BRIDGE_MRP_START_IN_TEST_MAX_MISS] ||
+           !tb[IFLA_BRIDGE_MRP_START_IN_TEST_PERIOD]) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Missing attribute: RING_ID or INTERVAL or MAX_MISS or PERIOD");
+               return -EINVAL;
+       }
+
+       memset(&test, 0x0, sizeof(test));
+
+       test.in_id = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_IN_TEST_IN_ID]);
+       test.interval = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_IN_TEST_INTERVAL]);
+       test.max_miss = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_IN_TEST_MAX_MISS]);
+       test.period = nla_get_u32(tb[IFLA_BRIDGE_MRP_START_IN_TEST_PERIOD]);
+
+       return br_mrp_start_in_test(br, &test);
+}
+
 int br_mrp_parse(struct net_bridge *br, struct net_bridge_port *p,
                 struct nlattr *attr, int cmd, struct netlink_ext_ack *extack)
 {
@@ -301,10 +419,114 @@ int br_mrp_parse(struct net_bridge *br, struct net_bridge_port *p,
                        return err;
        }
 
+       if (tb[IFLA_BRIDGE_MRP_IN_STATE]) {
+               err = br_mrp_in_state_parse(br, tb[IFLA_BRIDGE_MRP_IN_STATE],
+                                           extack);
+               if (err)
+                       return err;
+       }
+
+       if (tb[IFLA_BRIDGE_MRP_IN_ROLE]) {
+               err = br_mrp_in_role_parse(br, tb[IFLA_BRIDGE_MRP_IN_ROLE],
+                                          extack);
+               if (err)
+                       return err;
+       }
+
+       if (tb[IFLA_BRIDGE_MRP_START_IN_TEST]) {
+               err = br_mrp_start_in_test_parse(br,
+                                                tb[IFLA_BRIDGE_MRP_START_IN_TEST],
+                                                extack);
+               if (err)
+                       return err;
+       }
+
        return 0;
 }
 
-int br_mrp_port_open(struct net_device *dev, u8 loc)
+int br_mrp_fill_info(struct sk_buff *skb, struct net_bridge *br)
+{
+       struct nlattr *tb, *mrp_tb;
+       struct br_mrp *mrp;
+
+       mrp_tb = nla_nest_start_noflag(skb, IFLA_BRIDGE_MRP);
+       if (!mrp_tb)
+               return -EMSGSIZE;
+
+       list_for_each_entry_rcu(mrp, &br->mrp_list, list) {
+               struct net_bridge_port *p;
+
+               tb = nla_nest_start_noflag(skb, IFLA_BRIDGE_MRP_INFO);
+               if (!tb)
+                       goto nla_info_failure;
+
+               if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_RING_ID,
+                               mrp->ring_id))
+                       goto nla_put_failure;
+
+               p = rcu_dereference(mrp->p_port);
+               if (p && nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_P_IFINDEX,
+                                    p->dev->ifindex))
+                       goto nla_put_failure;
+
+               p = rcu_dereference(mrp->s_port);
+               if (p && nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_S_IFINDEX,
+                                    p->dev->ifindex))
+                       goto nla_put_failure;
+
+               p = rcu_dereference(mrp->i_port);
+               if (p && nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_I_IFINDEX,
+                                    p->dev->ifindex))
+                       goto nla_put_failure;
+
+               if (nla_put_u16(skb, IFLA_BRIDGE_MRP_INFO_PRIO,
+                               mrp->prio))
+                       goto nla_put_failure;
+               if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_RING_STATE,
+                               mrp->ring_state))
+                       goto nla_put_failure;
+               if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_RING_ROLE,
+                               mrp->ring_role))
+                       goto nla_put_failure;
+               if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_TEST_INTERVAL,
+                               mrp->test_interval))
+                       goto nla_put_failure;
+               if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_TEST_MAX_MISS,
+                               mrp->test_max_miss))
+                       goto nla_put_failure;
+               if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_TEST_MONITOR,
+                               mrp->test_monitor))
+                       goto nla_put_failure;
+
+               if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_IN_STATE,
+                               mrp->in_state))
+                       goto nla_put_failure;
+               if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_IN_ROLE,
+                               mrp->in_role))
+                       goto nla_put_failure;
+               if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_IN_TEST_INTERVAL,
+                               mrp->in_test_interval))
+                       goto nla_put_failure;
+               if (nla_put_u32(skb, IFLA_BRIDGE_MRP_INFO_IN_TEST_MAX_MISS,
+                               mrp->in_test_max_miss))
+                       goto nla_put_failure;
+
+               nla_nest_end(skb, tb);
+       }
+       nla_nest_end(skb, mrp_tb);
+
+       return 0;
+
+nla_put_failure:
+       nla_nest_cancel(skb, tb);
+
+nla_info_failure:
+       nla_nest_cancel(skb, mrp_tb);
+
+       return -EMSGSIZE;
+}
+
+int br_mrp_ring_port_open(struct net_device *dev, u8 loc)
 {
        struct net_bridge_port *p;
        int err = 0;
@@ -325,3 +547,25 @@ int br_mrp_port_open(struct net_device *dev, u8 loc)
 out:
        return err;
 }
+
+int br_mrp_in_port_open(struct net_device *dev, u8 loc)
+{
+       struct net_bridge_port *p;
+       int err = 0;
+
+       p = br_port_get_rcu(dev);
+       if (!p) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       if (loc)
+               p->flags |= BR_MRP_LOST_IN_CONT;
+       else
+               p->flags &= ~BR_MRP_LOST_IN_CONT;
+
+       br_ifinfo_notify(RTM_NEWLINK, NULL, p);
+
+out:
+       return err;
+}
index 0da68a0..ed547e0 100644 (file)
@@ -107,6 +107,68 @@ int br_mrp_switchdev_set_ring_state(struct net_bridge *br,
        return 0;
 }
 
+int br_mrp_switchdev_set_in_role(struct net_bridge *br, struct br_mrp *mrp,
+                                u16 in_id, u32 ring_id,
+                                enum br_mrp_in_role_type role)
+{
+       struct switchdev_obj_in_role_mrp mrp_role = {
+               .obj.orig_dev = br->dev,
+               .obj.id = SWITCHDEV_OBJ_ID_IN_ROLE_MRP,
+               .in_role = role,
+               .in_id = mrp->in_id,
+               .ring_id = mrp->ring_id,
+               .i_port = rtnl_dereference(mrp->i_port)->dev,
+       };
+       int err;
+
+       if (role == BR_MRP_IN_ROLE_DISABLED)
+               err = switchdev_port_obj_del(br->dev, &mrp_role.obj);
+       else
+               err = switchdev_port_obj_add(br->dev, &mrp_role.obj, NULL);
+
+       return err;
+}
+
+int br_mrp_switchdev_set_in_state(struct net_bridge *br, struct br_mrp *mrp,
+                                 enum br_mrp_in_state_type state)
+{
+       struct switchdev_obj_in_state_mrp mrp_state = {
+               .obj.orig_dev = br->dev,
+               .obj.id = SWITCHDEV_OBJ_ID_IN_STATE_MRP,
+               .in_state = state,
+               .in_id = mrp->in_id,
+       };
+       int err;
+
+       err = switchdev_port_obj_add(br->dev, &mrp_state.obj, NULL);
+
+       if (err && err != -EOPNOTSUPP)
+               return err;
+
+       return 0;
+}
+
+int br_mrp_switchdev_send_in_test(struct net_bridge *br, struct br_mrp *mrp,
+                                 u32 interval, u8 max_miss, u32 period)
+{
+       struct switchdev_obj_in_test_mrp test = {
+               .obj.orig_dev = br->dev,
+               .obj.id = SWITCHDEV_OBJ_ID_IN_TEST_MRP,
+               .interval = interval,
+               .max_miss = max_miss,
+               .in_id = mrp->in_id,
+               .period = period,
+       };
+       int err;
+
+       if (interval == 0)
+               err = switchdev_port_obj_del(br->dev, &test.obj);
+       else
+               err = switchdev_port_obj_add(br->dev, &test.obj, NULL);
+
+       return err;
+}
+
 int br_mrp_port_switchdev_set_state(struct net_bridge_port *p,
                                    enum br_mrp_port_state_type state)
 {
index 83490bf..4c4a93a 100644 (file)
@@ -1007,7 +1007,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
                nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
 
                if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
-                   nsrcs_offset + sizeof(_nsrcs))
+                   nsrcs_offset + sizeof(__nsrcs))
                        return -EINVAL;
 
                _nsrcs = skb_header_pointer(skb, nsrcs_offset,
index 240e260..147d525 100644 (file)
@@ -152,6 +152,7 @@ static inline size_t br_port_info_size(void)
 #endif
                + nla_total_size(sizeof(u16))   /* IFLA_BRPORT_GROUP_FWD_MASK */
                + nla_total_size(sizeof(u8))    /* IFLA_BRPORT_MRP_RING_OPEN */
+               + nla_total_size(sizeof(u8))    /* IFLA_BRPORT_MRP_IN_OPEN */
                + 0;
 }
 
@@ -216,6 +217,8 @@ static int br_port_fill_attrs(struct sk_buff *skb,
                       !!(p->flags & BR_NEIGH_SUPPRESS)) ||
            nla_put_u8(skb, IFLA_BRPORT_MRP_RING_OPEN, !!(p->flags &
                                                          BR_MRP_LOST_CONT)) ||
+           nla_put_u8(skb, IFLA_BRPORT_MRP_IN_OPEN,
+                      !!(p->flags & BR_MRP_LOST_IN_CONT)) ||
            nla_put_u8(skb, IFLA_BRPORT_ISOLATED, !!(p->flags & BR_ISOLATED)))
                return -EMSGSIZE;
 
@@ -453,6 +456,28 @@ static int br_fill_ifinfo(struct sk_buff *skb,
                rcu_read_unlock();
                if (err)
                        goto nla_put_failure;
+
+               nla_nest_end(skb, af);
+       }
+
+       if (filter_mask & RTEXT_FILTER_MRP) {
+               struct nlattr *af;
+               int err;
+
+               if (!br_mrp_enabled(br) || port)
+                       goto done;
+
+               af = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
+               if (!af)
+                       goto nla_put_failure;
+
+               rcu_read_lock();
+               err = br_mrp_fill_info(skb, br);
+               rcu_read_unlock();
+
+               if (err)
+                       goto nla_put_failure;
+
                nla_nest_end(skb, af);
        }
 
@@ -516,7 +541,8 @@ int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
        struct net_bridge_port *port = br_port_get_rtnl(dev);
 
        if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) &&
-           !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
+           !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) &&
+           !(filter_mask & RTEXT_FILTER_MRP))
                return 0;
 
        return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags,
index 162998e..8914290 100644 (file)
@@ -250,6 +250,36 @@ int br_parse_vlan_tunnel_info(struct nlattr *attr,
        return 0;
 }
 
+/* send a notification if v_curr can't enter the range and start a new one */
+static void __vlan_tunnel_handle_range(const struct net_bridge_port *p,
+                                      struct net_bridge_vlan **v_start,
+                                      struct net_bridge_vlan **v_end,
+                                      int v_curr, bool curr_change)
+{
+       struct net_bridge_vlan_group *vg;
+       struct net_bridge_vlan *v;
+
+       vg = nbp_vlan_group(p);
+       if (!vg)
+               return;
+
+       v = br_vlan_find(vg, v_curr);
+
+       if (!*v_start)
+               goto out_init;
+
+       if (v && curr_change && br_vlan_can_enter_range(v, *v_end)) {
+               *v_end = v;
+               return;
+       }
+
+       br_vlan_notify(p->br, p, (*v_start)->vid, (*v_end)->vid, RTM_NEWVLAN);
+out_init:
+       /* we start a range only if there are any changes to notify about */
+       *v_start = curr_change ? v : NULL;
+       *v_end = *v_start;
+}
+
 int br_process_vlan_tunnel_info(const struct net_bridge *br,
                                const struct net_bridge_port *p, int cmd,
                                struct vtunnel_info *tinfo_curr,
@@ -263,6 +293,7 @@ int br_process_vlan_tunnel_info(const struct net_bridge *br,
                        return -EINVAL;
                memcpy(tinfo_last, tinfo_curr, sizeof(struct vtunnel_info));
        } else if (tinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_END) {
+               struct net_bridge_vlan *v_start = NULL, *v_end = NULL;
                int t, v;
 
                if (!(tinfo_last->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN))
@@ -272,11 +303,24 @@ int br_process_vlan_tunnel_info(const struct net_bridge *br,
                        return -EINVAL;
                t = tinfo_last->tunid;
                for (v = tinfo_last->vid; v <= tinfo_curr->vid; v++) {
-                       err = br_vlan_tunnel_info(p, cmd, v, t, changed);
+                       bool curr_change = false;
+
+                       err = br_vlan_tunnel_info(p, cmd, v, t, &curr_change);
                        if (err)
-                               return err;
+                               break;
                        t++;
+
+                       if (curr_change)
+                               *changed = curr_change;
+                        __vlan_tunnel_handle_range(p, &v_start, &v_end, v,
+                                                   curr_change);
                }
+               if (v_start && v_end)
+                       br_vlan_notify(br, p, v_start->vid, v_end->vid,
+                                      RTM_NEWVLAN);
+               if (err)
+                       return err;
+
                memset(tinfo_last, 0, sizeof(struct vtunnel_info));
                memset(tinfo_curr, 0, sizeof(struct vtunnel_info));
        } else {
@@ -286,6 +330,7 @@ int br_process_vlan_tunnel_info(const struct net_bridge *br,
                                          tinfo_curr->tunid, changed);
                if (err)
                        return err;
+               br_vlan_notify(br, p, tinfo_curr->vid, 0, RTM_NEWVLAN);
                memset(tinfo_last, 0, sizeof(struct vtunnel_info));
                memset(tinfo_curr, 0, sizeof(struct vtunnel_info));
        }
index 7501be4..baa1500 100644 (file)
@@ -48,6 +48,8 @@ enum {
 /* Path to usermode spanning tree program */
 #define BR_STP_PROG    "/sbin/bridge-stp"
 
+#define BR_FDB_NOTIFY_SETTABLE_BITS (FDB_NOTIFY_BIT | FDB_NOTIFY_INACTIVE_BIT)
+
 typedef struct bridge_id bridge_id;
 typedef struct mac_addr mac_addr;
 typedef __u16 port_id;
@@ -184,6 +186,8 @@ enum {
        BR_FDB_ADDED_BY_USER,
        BR_FDB_ADDED_BY_EXT_LEARN,
        BR_FDB_OFFLOADED,
+       BR_FDB_NOTIFY,
+       BR_FDB_NOTIFY_INACTIVE
 };
 
 struct net_bridge_fdb_key {
@@ -217,8 +221,8 @@ struct net_bridge_port_group {
        struct rcu_head                 rcu;
        struct timer_list               timer;
        struct br_ip                    addr;
+       unsigned char                   eth_addr[ETH_ALEN] __aligned(2);
        unsigned char                   flags;
-       unsigned char                   eth_addr[ETH_ALEN];
 };
 
 struct net_bridge_mdb_entry {
@@ -430,7 +434,7 @@ struct net_bridge {
        struct hlist_head               fdb_list;
 
 #if IS_ENABLED(CONFIG_BRIDGE_MRP)
-       struct list_head                __rcu mrp_list;
+       struct list_head                mrp_list;
 #endif
 };
 
@@ -1196,6 +1200,12 @@ static inline void br_vlan_notify(const struct net_bridge *br,
                                  int cmd)
 {
 }
+
+static inline bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr,
+                                          const struct net_bridge_vlan *range_end)
+{
+       return true;
+}
 #endif
 
 /* br_vlan_options.c */
@@ -1313,6 +1323,7 @@ int br_mrp_parse(struct net_bridge *br, struct net_bridge_port *p,
 int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb);
 bool br_mrp_enabled(struct net_bridge *br);
 void br_mrp_port_del(struct net_bridge *br, struct net_bridge_port *p);
+int br_mrp_fill_info(struct sk_buff *skb, struct net_bridge *br);
 #else
 static inline int br_mrp_parse(struct net_bridge *br, struct net_bridge_port *p,
                               struct nlattr *attr, int cmd,
@@ -1335,6 +1346,12 @@ static inline void br_mrp_port_del(struct net_bridge *br,
                                   struct net_bridge_port *p)
 {
 }
+
+static inline int br_mrp_fill_info(struct sk_buff *skb, struct net_bridge *br)
+{
+       return 0;
+}
+
 #endif
 
 /* br_netlink.c */
index 33b255e..af0e9ef 100644 (file)
@@ -8,12 +8,14 @@
 
 struct br_mrp {
        /* list of mrp instances */
-       struct list_head                __rcu list;
+       struct list_head                list;
 
        struct net_bridge_port __rcu    *p_port;
        struct net_bridge_port __rcu    *s_port;
+       struct net_bridge_port __rcu    *i_port;
 
        u32                             ring_id;
+       u16                             in_id;
        u16                             prio;
 
        enum br_mrp_ring_role_type      ring_role;
@@ -21,6 +23,11 @@ struct br_mrp {
        enum br_mrp_ring_state_type     ring_state;
        u32                             ring_transitions;
 
+       enum br_mrp_in_role_type        in_role;
+       u8                              in_role_offloaded;
+       enum br_mrp_in_state_type       in_state;
+       u32                             in_transitions;
+
        struct delayed_work             test_work;
        u32                             test_interval;
        unsigned long                   test_end;
@@ -28,6 +35,12 @@ struct br_mrp {
        u32                             test_max_miss;
        bool                            test_monitor;
 
+       struct delayed_work             in_test_work;
+       u32                             in_test_interval;
+       unsigned long                   in_test_end;
+       u32                             in_test_count_miss;
+       u32                             in_test_max_miss;
+
        u32                             seq_id;
 
        struct rcu_head                 rcu;
@@ -44,6 +57,10 @@ int br_mrp_set_ring_state(struct net_bridge *br,
                          struct br_mrp_ring_state *state);
 int br_mrp_set_ring_role(struct net_bridge *br, struct br_mrp_ring_role *role);
 int br_mrp_start_test(struct net_bridge *br, struct br_mrp_start_test *test);
+int br_mrp_set_in_state(struct net_bridge *br, struct br_mrp_in_state *state);
+int br_mrp_set_in_role(struct net_bridge *br, struct br_mrp_in_role *role);
+int br_mrp_start_in_test(struct net_bridge *br,
+                        struct br_mrp_start_in_test *test);
 
 /* br_mrp_switchdev.c */
 int br_mrp_switchdev_add(struct net_bridge *br, struct br_mrp *mrp);
@@ -59,8 +76,16 @@ int br_mrp_port_switchdev_set_state(struct net_bridge_port *p,
                                    enum br_mrp_port_state_type state);
 int br_mrp_port_switchdev_set_role(struct net_bridge_port *p,
                                   enum br_mrp_port_role_type role);
+int br_mrp_switchdev_set_in_role(struct net_bridge *br, struct br_mrp *mrp,
+                                u16 in_id, u32 ring_id,
+                                enum br_mrp_in_role_type role);
+int br_mrp_switchdev_set_in_state(struct net_bridge *br, struct br_mrp *mrp,
+                                 enum br_mrp_in_state_type state);
+int br_mrp_switchdev_send_in_test(struct net_bridge *br, struct br_mrp *mrp,
+                                 u32 interval, u8 max_miss, u32 period);
 
 /* br_mrp_netlink.c  */
-int br_mrp_port_open(struct net_device *dev, u8 loc);
+int br_mrp_ring_port_open(struct net_device *dev, u8 loc);
+int br_mrp_in_port_open(struct net_device *dev, u8 loc);
 
 #endif /* _BR_PRIVATE_MRP_H */
index c83ffe9..d35173e 100644 (file)
@@ -1063,14 +1063,13 @@ free_counterstmp:
 }
 
 /* replace the table */
-static int do_replace(struct net *net, const void __user *user,
-                     unsigned int len)
+static int do_replace(struct net *net, sockptr_t arg, unsigned int len)
 {
        int ret, countersize;
        struct ebt_table_info *newinfo;
        struct ebt_replace tmp;
 
-       if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
+       if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
                return -EFAULT;
 
        if (len != sizeof(tmp) + tmp.entries_size)
@@ -1242,9 +1241,8 @@ void ebt_unregister_table(struct net *net, struct ebt_table *table,
 
 /* userspace just supplied us with counters */
 static int do_update_counters(struct net *net, const char *name,
-                               struct ebt_counter __user *counters,
-                               unsigned int num_counters,
-                               const void __user *user, unsigned int len)
+                             struct ebt_counter __user *counters,
+                             unsigned int num_counters, unsigned int len)
 {
        int i, ret;
        struct ebt_counter *tmp;
@@ -1287,19 +1285,18 @@ free_tmp:
        return ret;
 }
 
-static int update_counters(struct net *net, const void __user *user,
-                           unsigned int len)
+static int update_counters(struct net *net, sockptr_t arg, unsigned int len)
 {
        struct ebt_replace hlp;
 
-       if (copy_from_user(&hlp, user, sizeof(hlp)))
+       if (copy_from_sockptr(&hlp, arg, sizeof(hlp)))
                return -EFAULT;
 
        if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
                return -EINVAL;
 
        return do_update_counters(net, hlp.name, hlp.counters,
-                               hlp.num_counters, user, len);
+                                 hlp.num_counters, len);
 }
 
 static inline int ebt_obj_to_user(char __user *um, const char *_name,
@@ -1451,86 +1448,6 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
           ebt_entry_to_user, entries, tmp.entries);
 }
 
-static int do_ebt_set_ctl(struct sock *sk,
-       int cmd, void __user *user, unsigned int len)
-{
-       int ret;
-       struct net *net = sock_net(sk);
-
-       if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
-               return -EPERM;
-
-       switch (cmd) {
-       case EBT_SO_SET_ENTRIES:
-               ret = do_replace(net, user, len);
-               break;
-       case EBT_SO_SET_COUNTERS:
-               ret = update_counters(net, user, len);
-               break;
-       default:
-               ret = -EINVAL;
-       }
-       return ret;
-}
-
-static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
-{
-       int ret;
-       struct ebt_replace tmp;
-       struct ebt_table *t;
-       struct net *net = sock_net(sk);
-
-       if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
-               return -EPERM;
-
-       if (copy_from_user(&tmp, user, sizeof(tmp)))
-               return -EFAULT;
-
-       tmp.name[sizeof(tmp.name) - 1] = '\0';
-
-       t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
-       if (!t)
-               return ret;
-
-       switch (cmd) {
-       case EBT_SO_GET_INFO:
-       case EBT_SO_GET_INIT_INFO:
-               if (*len != sizeof(struct ebt_replace)) {
-                       ret = -EINVAL;
-                       mutex_unlock(&ebt_mutex);
-                       break;
-               }
-               if (cmd == EBT_SO_GET_INFO) {
-                       tmp.nentries = t->private->nentries;
-                       tmp.entries_size = t->private->entries_size;
-                       tmp.valid_hooks = t->valid_hooks;
-               } else {
-                       tmp.nentries = t->table->nentries;
-                       tmp.entries_size = t->table->entries_size;
-                       tmp.valid_hooks = t->table->valid_hooks;
-               }
-               mutex_unlock(&ebt_mutex);
-               if (copy_to_user(user, &tmp, *len) != 0) {
-                       ret = -EFAULT;
-                       break;
-               }
-               ret = 0;
-               break;
-
-       case EBT_SO_GET_ENTRIES:
-       case EBT_SO_GET_INIT_ENTRIES:
-               ret = copy_everything_to_user(t, user, len, cmd);
-               mutex_unlock(&ebt_mutex);
-               break;
-
-       default:
-               mutex_unlock(&ebt_mutex);
-               ret = -EINVAL;
-       }
-
-       return ret;
-}
-
 #ifdef CONFIG_COMPAT
 /* 32 bit-userspace compatibility definitions. */
 struct compat_ebt_replace {
@@ -2160,7 +2077,7 @@ static int compat_copy_entries(unsigned char *data, unsigned int size_user,
 
 
 static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
-                                           void __user *user, unsigned int len)
+                                            sockptr_t arg, unsigned int len)
 {
        struct compat_ebt_replace tmp;
        int i;
@@ -2168,7 +2085,7 @@ static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
        if (len < sizeof(tmp))
                return -EINVAL;
 
-       if (copy_from_user(&tmp, user, sizeof(tmp)))
+       if (copy_from_sockptr(&tmp, arg, sizeof(tmp)))
                return -EFAULT;
 
        if (len != sizeof(tmp) + tmp.entries_size)
@@ -2195,8 +2112,7 @@ static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
        return 0;
 }
 
-static int compat_do_replace(struct net *net, void __user *user,
-                            unsigned int len)
+static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
 {
        int ret, i, countersize, size64;
        struct ebt_table_info *newinfo;
@@ -2204,10 +2120,10 @@ static int compat_do_replace(struct net *net, void __user *user,
        struct ebt_entries_buf_state state;
        void *entries_tmp;
 
-       ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
+       ret = compat_copy_ebt_replace_from_user(&tmp, arg, len);
        if (ret) {
                /* try real handler in case userland supplied needed padding */
-               if (ret == -EINVAL && do_replace(net, user, len) == 0)
+               if (ret == -EINVAL && do_replace(net, arg, len) == 0)
                        ret = 0;
                return ret;
        }
@@ -2298,42 +2214,20 @@ out_unlock:
        goto free_entries;
 }
 
-static int compat_update_counters(struct net *net, void __user *user,
+static int compat_update_counters(struct net *net, sockptr_t arg,
                                  unsigned int len)
 {
        struct compat_ebt_replace hlp;
 
-       if (copy_from_user(&hlp, user, sizeof(hlp)))
+       if (copy_from_sockptr(&hlp, arg, sizeof(hlp)))
                return -EFAULT;
 
        /* try real handler in case userland supplied needed padding */
        if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
-               return update_counters(net, user, len);
+               return update_counters(net, arg, len);
 
        return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
-                                       hlp.num_counters, user, len);
-}
-
-static int compat_do_ebt_set_ctl(struct sock *sk,
-               int cmd, void __user *user, unsigned int len)
-{
-       int ret;
-       struct net *net = sock_net(sk);
-
-       if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
-               return -EPERM;
-
-       switch (cmd) {
-       case EBT_SO_SET_ENTRIES:
-               ret = compat_do_replace(net, user, len);
-               break;
-       case EBT_SO_SET_COUNTERS:
-               ret = compat_update_counters(net, user, len);
-               break;
-       default:
-               ret = -EINVAL;
-       }
-       return ret;
+                                 hlp.num_counters, len);
 }
 
 static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
@@ -2344,14 +2238,6 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
        struct ebt_table *t;
        struct net *net = sock_net(sk);
 
-       if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
-               return -EPERM;
-
-       /* try real handler in case userland supplied needed padding */
-       if ((cmd == EBT_SO_GET_INFO ||
-            cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
-                       return do_ebt_get_ctl(sk, cmd, user, len);
-
        if (copy_from_user(&tmp, user, sizeof(tmp)))
                return -EFAULT;
 
@@ -2413,20 +2299,112 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
 }
 #endif
 
+static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
+{
+       struct net *net = sock_net(sk);
+       struct ebt_replace tmp;
+       struct ebt_table *t;
+       int ret;
+
+       if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+               return -EPERM;
+
+#ifdef CONFIG_COMPAT
+       /* try real handler in case userland supplied needed padding */
+       if (in_compat_syscall() &&
+           ((cmd != EBT_SO_GET_INFO && cmd != EBT_SO_GET_INIT_INFO) ||
+            *len != sizeof(tmp)))
+               return compat_do_ebt_get_ctl(sk, cmd, user, len);
+#endif
+
+       if (copy_from_user(&tmp, user, sizeof(tmp)))
+               return -EFAULT;
+
+       tmp.name[sizeof(tmp.name) - 1] = '\0';
+
+       t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
+       if (!t)
+               return ret;
+
+       switch (cmd) {
+       case EBT_SO_GET_INFO:
+       case EBT_SO_GET_INIT_INFO:
+               if (*len != sizeof(struct ebt_replace)) {
+                       ret = -EINVAL;
+                       mutex_unlock(&ebt_mutex);
+                       break;
+               }
+               if (cmd == EBT_SO_GET_INFO) {
+                       tmp.nentries = t->private->nentries;
+                       tmp.entries_size = t->private->entries_size;
+                       tmp.valid_hooks = t->valid_hooks;
+               } else {
+                       tmp.nentries = t->table->nentries;
+                       tmp.entries_size = t->table->entries_size;
+                       tmp.valid_hooks = t->table->valid_hooks;
+               }
+               mutex_unlock(&ebt_mutex);
+               if (copy_to_user(user, &tmp, *len) != 0) {
+                       ret = -EFAULT;
+                       break;
+               }
+               ret = 0;
+               break;
+
+       case EBT_SO_GET_ENTRIES:
+       case EBT_SO_GET_INIT_ENTRIES:
+               ret = copy_everything_to_user(t, user, len, cmd);
+               mutex_unlock(&ebt_mutex);
+               break;
+
+       default:
+               mutex_unlock(&ebt_mutex);
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+
+static int do_ebt_set_ctl(struct sock *sk, int cmd, sockptr_t arg,
+               unsigned int len)
+{
+       struct net *net = sock_net(sk);
+       int ret;
+
+       if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+               return -EPERM;
+
+       switch (cmd) {
+       case EBT_SO_SET_ENTRIES:
+#ifdef CONFIG_COMPAT
+               if (in_compat_syscall())
+                       ret = compat_do_replace(net, arg, len);
+               else
+#endif
+                       ret = do_replace(net, arg, len);
+               break;
+       case EBT_SO_SET_COUNTERS:
+#ifdef CONFIG_COMPAT
+               if (in_compat_syscall())
+                       ret = compat_update_counters(net, arg, len);
+               else
+#endif
+                       ret = update_counters(net, arg, len);
+               break;
+       default:
+               ret = -EINVAL;
+       }
+       return ret;
+}
+
 static struct nf_sockopt_ops ebt_sockopts = {
        .pf             = PF_INET,
        .set_optmin     = EBT_BASE_CTL,
        .set_optmax     = EBT_SO_SET_MAX + 1,
        .set            = do_ebt_set_ctl,
-#ifdef CONFIG_COMPAT
-       .compat_set     = compat_do_ebt_set_ctl,
-#endif
        .get_optmin     = EBT_BASE_CTL,
        .get_optmax     = EBT_SO_GET_MAX + 1,
        .get            = do_ebt_get_ctl,
-#ifdef CONFIG_COMPAT
-       .compat_get     = compat_do_ebt_get_ctl,
-#endif
        .owner          = THIS_MODULE,
 };
 
index 7c9e92b..8e8ffac 100644 (file)
@@ -155,3 +155,4 @@ module_exit(nft_meta_bridge_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("wenxu <wenxu@ucloud.cn>");
 MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "meta");
+MODULE_DESCRIPTION("Support for bridge dedicated meta key");
index f48cf4c..deae2c9 100644 (file)
@@ -455,3 +455,4 @@ module_exit(nft_reject_bridge_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "reject");
+MODULE_DESCRIPTION("Reject packets from bridge via nftables");
index ef14da5..3ad0a1d 100644 (file)
@@ -669,8 +669,8 @@ out_err:
        return sent ? : err;
 }
 
-static int setsockopt(struct socket *sock,
-                     int lvl, int opt, char __user *ov, unsigned int ol)
+static int setsockopt(struct socket *sock, int lvl, int opt, sockptr_t ov,
+               unsigned int ol)
 {
        struct sock *sk = sock->sk;
        struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
@@ -685,7 +685,7 @@ static int setsockopt(struct socket *sock,
                        return -EINVAL;
                if (lvl != SOL_CAIF)
                        goto bad_sol;
-               if (copy_from_user(&linksel, ov, sizeof(int)))
+               if (copy_from_sockptr(&linksel, ov, sizeof(int)))
                        return -EINVAL;
                lock_sock(&(cf_sk->sk));
                cf_sk->conn_req.link_selector = linksel;
@@ -699,7 +699,7 @@ static int setsockopt(struct socket *sock,
                        return -ENOPROTOOPT;
                lock_sock(&(cf_sk->sk));
                if (ol > sizeof(cf_sk->conn_req.param.data) ||
-                       copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) {
+                   copy_from_sockptr(&cf_sk->conn_req.param.data, ov, ol)) {
                        release_sock(&cf_sk->sk);
                        return -EINVAL;
                }
@@ -981,7 +981,6 @@ static const struct proto_ops caif_seqpacket_ops = {
        .listen = sock_no_listen,
        .shutdown = sock_no_shutdown,
        .setsockopt = setsockopt,
-       .getsockopt = sock_no_getsockopt,
        .sendmsg = caif_seqpkt_sendmsg,
        .recvmsg = caif_seqpkt_recvmsg,
        .mmap = sock_no_mmap,
@@ -1002,7 +1001,6 @@ static const struct proto_ops caif_stream_ops = {
        .listen = sock_no_listen,
        .shutdown = sock_no_shutdown,
        .setsockopt = setsockopt,
-       .getsockopt = sock_no_getsockopt,
        .sendmsg = caif_stream_sendmsg,
        .recvmsg = caif_stream_recvmsg,
        .mmap = sock_no_mmap,
index 128d37a..5c06404 100644 (file)
@@ -410,6 +410,7 @@ static struct hlist_head *can_rcv_list_find(canid_t *can_id, canid_t *mask,
 
 /**
  * can_rx_register - subscribe CAN frames from a specific interface
+ * @net: the applicable net namespace
  * @dev: pointer to netdevice (NULL => subcribe from 'all' CAN devices list)
  * @can_id: CAN identifier (see description)
  * @mask: CAN mask (see description)
@@ -498,6 +499,7 @@ static void can_rx_delete_receiver(struct rcu_head *rp)
 
 /**
  * can_rx_unregister - unsubscribe CAN frames from a specific interface
+ * @net: the applicable net namespace
  * @dev: pointer to netdevice (NULL => unsubscribe from 'all' CAN devices list)
  * @can_id: CAN identifier
  * @mask: CAN mask
index c96fa0f..d14ea12 100644 (file)
@@ -1648,8 +1648,6 @@ static const struct proto_ops bcm_ops = {
        .gettstamp     = sock_gettstamp,
        .listen        = sock_no_listen,
        .shutdown      = sock_no_shutdown,
-       .setsockopt    = sock_no_setsockopt,
-       .getsockopt    = sock_no_getsockopt,
        .sendmsg       = bcm_sendmsg,
        .recvmsg       = bcm_recvmsg,
        .mmap          = sock_no_mmap,
index f758742..78ff9b3 100644 (file)
@@ -627,14 +627,14 @@ static int j1939_sk_release(struct socket *sock)
        return 0;
 }
 
-static int j1939_sk_setsockopt_flag(struct j1939_sock *jsk, char __user *optval,
+static int j1939_sk_setsockopt_flag(struct j1939_sock *jsk, sockptr_t optval,
                                    unsigned int optlen, int flag)
 {
        int tmp;
 
        if (optlen != sizeof(tmp))
                return -EINVAL;
-       if (copy_from_user(&tmp, optval, optlen))
+       if (copy_from_sockptr(&tmp, optval, optlen))
                return -EFAULT;
        lock_sock(&jsk->sk);
        if (tmp)
@@ -646,7 +646,7 @@ static int j1939_sk_setsockopt_flag(struct j1939_sock *jsk, char __user *optval,
 }
 
 static int j1939_sk_setsockopt(struct socket *sock, int level, int optname,
-                              char __user *optval, unsigned int optlen)
+                              sockptr_t optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct j1939_sock *jsk = j1939_sk(sk);
@@ -658,7 +658,7 @@ static int j1939_sk_setsockopt(struct socket *sock, int level, int optname,
 
        switch (optname) {
        case SO_J1939_FILTER:
-               if (optval) {
+               if (!sockptr_is_null(optval)) {
                        struct j1939_filter *f;
                        int c;
 
@@ -670,7 +670,7 @@ static int j1939_sk_setsockopt(struct socket *sock, int level, int optname,
                                return -EINVAL;
 
                        count = optlen / sizeof(*filters);
-                       filters = memdup_user(optval, optlen);
+                       filters = memdup_sockptr(optval, optlen);
                        if (IS_ERR(filters))
                                return PTR_ERR(filters);
 
@@ -703,7 +703,7 @@ static int j1939_sk_setsockopt(struct socket *sock, int level, int optname,
        case SO_J1939_SEND_PRIO:
                if (optlen != sizeof(tmp))
                        return -EINVAL;
-               if (copy_from_user(&tmp, optval, optlen))
+               if (copy_from_sockptr(&tmp, optval, optlen))
                        return -EFAULT;
                if (tmp < 0 || tmp > 7)
                        return -EDOM;
index 59c039d..94a9405 100644 (file)
@@ -485,7 +485,7 @@ static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
 }
 
 static int raw_setsockopt(struct socket *sock, int level, int optname,
-                         char __user *optval, unsigned int optlen)
+                         sockptr_t optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct raw_sock *ro = raw_sk(sk);
@@ -511,11 +511,11 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
 
                if (count > 1) {
                        /* filter does not fit into dfilter => alloc space */
-                       filter = memdup_user(optval, optlen);
+                       filter = memdup_sockptr(optval, optlen);
                        if (IS_ERR(filter))
                                return PTR_ERR(filter);
                } else if (count == 1) {
-                       if (copy_from_user(&sfilter, optval, sizeof(sfilter)))
+                       if (copy_from_sockptr(&sfilter, optval, sizeof(sfilter)))
                                return -EFAULT;
                }
 
@@ -568,7 +568,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
                if (optlen != sizeof(err_mask))
                        return -EINVAL;
 
-               if (copy_from_user(&err_mask, optval, optlen))
+               if (copy_from_sockptr(&err_mask, optval, optlen))
                        return -EFAULT;
 
                err_mask &= CAN_ERR_MASK;
@@ -607,7 +607,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
                if (optlen != sizeof(ro->loopback))
                        return -EINVAL;
 
-               if (copy_from_user(&ro->loopback, optval, optlen))
+               if (copy_from_sockptr(&ro->loopback, optval, optlen))
                        return -EFAULT;
 
                break;
@@ -616,7 +616,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
                if (optlen != sizeof(ro->recv_own_msgs))
                        return -EINVAL;
 
-               if (copy_from_user(&ro->recv_own_msgs, optval, optlen))
+               if (copy_from_sockptr(&ro->recv_own_msgs, optval, optlen))
                        return -EFAULT;
 
                break;
@@ -625,7 +625,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
                if (optlen != sizeof(ro->fd_frames))
                        return -EINVAL;
 
-               if (copy_from_user(&ro->fd_frames, optval, optlen))
+               if (copy_from_sockptr(&ro->fd_frames, optval, optlen))
                        return -EFAULT;
 
                break;
@@ -634,7 +634,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
                if (optlen != sizeof(ro->join_filters))
                        return -EINVAL;
 
-               if (copy_from_user(&ro->join_filters, optval, optlen))
+               if (copy_from_sockptr(&ro->join_filters, optval, optlen))
                        return -EFAULT;
 
                break;
index afe0e81..4e7edd7 100644 (file)
@@ -332,6 +332,7 @@ struct ceph_options *ceph_alloc_options(void)
        opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT;
        opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT;
        opt->osd_request_timeout = CEPH_OSD_REQUEST_TIMEOUT_DEFAULT;
+       opt->read_from_replica = CEPH_READ_FROM_REPLICA_DEFAULT;
        return opt;
 }
 EXPORT_SYMBOL(ceph_alloc_options);
@@ -490,16 +491,13 @@ int ceph_parse_param(struct fs_parameter *param, struct ceph_options *opt,
        case Opt_read_from_replica:
                switch (result.uint_32) {
                case Opt_read_from_replica_no:
-                       opt->osd_req_flags &= ~(CEPH_OSD_FLAG_BALANCE_READS |
-                                               CEPH_OSD_FLAG_LOCALIZE_READS);
+                       opt->read_from_replica = 0;
                        break;
                case Opt_read_from_replica_balance:
-                       opt->osd_req_flags |= CEPH_OSD_FLAG_BALANCE_READS;
-                       opt->osd_req_flags &= ~CEPH_OSD_FLAG_LOCALIZE_READS;
+                       opt->read_from_replica = CEPH_OSD_FLAG_BALANCE_READS;
                        break;
                case Opt_read_from_replica_localize:
-                       opt->osd_req_flags |= CEPH_OSD_FLAG_LOCALIZE_READS;
-                       opt->osd_req_flags &= ~CEPH_OSD_FLAG_BALANCE_READS;
+                       opt->read_from_replica = CEPH_OSD_FLAG_LOCALIZE_READS;
                        break;
                default:
                        BUG();
@@ -613,9 +611,9 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client,
                }
                seq_putc(m, ',');
        }
-       if (opt->osd_req_flags & CEPH_OSD_FLAG_BALANCE_READS) {
+       if (opt->read_from_replica == CEPH_OSD_FLAG_BALANCE_READS) {
                seq_puts(m, "read_from_replica=balance,");
-       } else if (opt->osd_req_flags & CEPH_OSD_FLAG_LOCALIZE_READS) {
+       } else if (opt->read_from_replica == CEPH_OSD_FLAG_LOCALIZE_READS) {
                seq_puts(m, "read_from_replica=localize,");
        }
 
index 4fea3c3..2db8b44 100644 (file)
@@ -445,8 +445,10 @@ static void target_copy(struct ceph_osd_request_target *dest,
        dest->size = src->size;
        dest->min_size = src->min_size;
        dest->sort_bitwise = src->sort_bitwise;
+       dest->recovery_deletes = src->recovery_deletes;
 
        dest->flags = src->flags;
+       dest->used_replica = src->used_replica;
        dest->paused = src->paused;
 
        dest->epoch = src->epoch;
@@ -1117,10 +1119,10 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
                                       truncate_size, truncate_seq);
        }
 
-       req->r_flags = flags;
        req->r_base_oloc.pool = layout->pool_id;
        req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
        ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
+       req->r_flags = flags | osdc->client->options->read_from_replica;
 
        req->r_snapid = vino.snap;
        if (flags & CEPH_OSD_FLAG_WRITE)
@@ -2431,14 +2433,11 @@ promote:
 
 static void account_request(struct ceph_osd_request *req)
 {
-       struct ceph_osd_client *osdc = req->r_osdc;
-
        WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
        WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE)));
 
        req->r_flags |= CEPH_OSD_FLAG_ONDISK;
-       req->r_flags |= osdc->client->options->osd_req_flags;
-       atomic_inc(&osdc->num_requests);
+       atomic_inc(&req->r_osdc->num_requests);
 
        req->r_start_stamp = jiffies;
        req->r_start_latency = ktime_get();
index 5e3041a..091875b 100644 (file)
@@ -335,120 +335,6 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
        __scm_destroy(scm);
 }
 
-/* allocate a 64-bit sock_fprog on the user stack for duration of syscall. */
-struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval)
-{
-       struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval;
-       struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog));
-       struct compat_sock_fprog f32;
-       struct sock_fprog f;
-
-       if (copy_from_user(&f32, fprog32, sizeof(*fprog32)))
-               return NULL;
-       memset(&f, 0, sizeof(f));
-       f.len = f32.len;
-       f.filter = compat_ptr(f32.filter);
-       if (copy_to_user(kfprog, &f, sizeof(struct sock_fprog)))
-               return NULL;
-
-       return kfprog;
-}
-EXPORT_SYMBOL_GPL(get_compat_bpf_fprog);
-
-static int do_set_attach_filter(struct socket *sock, int level, int optname,
-                               char __user *optval, unsigned int optlen)
-{
-       struct sock_fprog __user *kfprog;
-
-       kfprog = get_compat_bpf_fprog(optval);
-       if (!kfprog)
-               return -EFAULT;
-
-       return sock_setsockopt(sock, level, optname, (char __user *)kfprog,
-                             sizeof(struct sock_fprog));
-}
-
-static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
-                               char __user *optval, unsigned int optlen)
-{
-       if (optname == SO_ATTACH_FILTER ||
-           optname == SO_ATTACH_REUSEPORT_CBPF)
-               return do_set_attach_filter(sock, level, optname,
-                                           optval, optlen);
-       return sock_setsockopt(sock, level, optname, optval, optlen);
-}
-
-static int __compat_sys_setsockopt(int fd, int level, int optname,
-                                  char __user *optval, unsigned int optlen)
-{
-       int err;
-       struct socket *sock;
-
-       if (optlen > INT_MAX)
-               return -EINVAL;
-
-       sock = sockfd_lookup(fd, &err);
-       if (sock) {
-               err = security_socket_setsockopt(sock, level, optname);
-               if (err) {
-                       sockfd_put(sock);
-                       return err;
-               }
-
-               if (level == SOL_SOCKET)
-                       err = compat_sock_setsockopt(sock, level,
-                                       optname, optval, optlen);
-               else if (sock->ops->compat_setsockopt)
-                       err = sock->ops->compat_setsockopt(sock, level,
-                                       optname, optval, optlen);
-               else
-                       err = sock->ops->setsockopt(sock, level,
-                                       optname, optval, optlen);
-               sockfd_put(sock);
-       }
-       return err;
-}
-
-COMPAT_SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname,
-                      char __user *, optval, unsigned int, optlen)
-{
-       return __compat_sys_setsockopt(fd, level, optname, optval, optlen);
-}
-
-static int __compat_sys_getsockopt(int fd, int level, int optname,
-                                  char __user *optval,
-                                  int __user *optlen)
-{
-       int err;
-       struct socket *sock = sockfd_lookup(fd, &err);
-
-       if (sock) {
-               err = security_socket_getsockopt(sock, level, optname);
-               if (err) {
-                       sockfd_put(sock);
-                       return err;
-               }
-
-               if (level == SOL_SOCKET)
-                       err = sock_getsockopt(sock, level,
-                                       optname, optval, optlen);
-               else if (sock->ops->compat_getsockopt)
-                       err = sock->ops->compat_getsockopt(sock, level,
-                                       optname, optval, optlen);
-               else
-                       err = sock->ops->getsockopt(sock, level,
-                                       optname, optval, optlen);
-               sockfd_put(sock);
-       }
-       return err;
-}
-
-COMPAT_SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname,
-                      char __user *, optval, int __user *, optlen)
-{
-       return __compat_sys_getsockopt(fd, level, optname, optval, optlen);
-}
-
 /* Argument list sizes for compat_sys_socketcall */
 #define AL(x) ((x) * sizeof(u32))
 static unsigned char nas[21] = {
@@ -608,13 +494,11 @@ COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
                ret = __sys_shutdown(a0, a1);
                break;
        case SYS_SETSOCKOPT:
-               ret = __compat_sys_setsockopt(a0, a1, a[2],
-                                             compat_ptr(a[3]), a[4]);
+               ret = __sys_setsockopt(a0, a1, a[2], compat_ptr(a[3]), a[4]);
                break;
        case SYS_GETSOCKOPT:
-               ret = __compat_sys_getsockopt(a0, a1, a[2],
-                                             compat_ptr(a[3]),
-                                             compat_ptr(a[4]));
+               ret = __sys_getsockopt(a0, a1, a[2], compat_ptr(a[3]),
+                                      compat_ptr(a[4]));
                break;
        case SYS_SENDMSG:
                ret = __compat_sys_sendmsg(a0, compat_ptr(a1), a[2]);
index d2c4d16..6f921c4 100644 (file)
@@ -11,8 +11,6 @@
 #include <uapi/linux/sock_diag.h>
 #include <uapi/linux/btf.h>
 
-static atomic_t cache_idx;
-
 #define SK_STORAGE_CREATE_FLAG_MASK                                    \
        (BPF_F_NO_PREALLOC | BPF_F_CLONE)
 
@@ -81,6 +79,9 @@ struct bpf_sk_storage_elem {
 #define SDATA(_SELEM) (&(_SELEM)->sdata)
 #define BPF_SK_STORAGE_CACHE_SIZE      16
 
+static DEFINE_SPINLOCK(cache_idx_lock);
+static u64 cache_idx_usage_counts[BPF_SK_STORAGE_CACHE_SIZE];
+
 struct bpf_sk_storage {
        struct bpf_sk_storage_data __rcu *cache[BPF_SK_STORAGE_CACHE_SIZE];
        struct hlist_head list; /* List of bpf_sk_storage_elem */
@@ -512,6 +513,37 @@ static int sk_storage_delete(struct sock *sk, struct bpf_map *map)
        return 0;
 }
 
+static u16 cache_idx_get(void)
+{
+       u64 min_usage = U64_MAX;
+       u16 i, res = 0;
+
+       spin_lock(&cache_idx_lock);
+
+       for (i = 0; i < BPF_SK_STORAGE_CACHE_SIZE; i++) {
+               if (cache_idx_usage_counts[i] < min_usage) {
+                       min_usage = cache_idx_usage_counts[i];
+                       res = i;
+
+                       /* Found a free cache_idx */
+                       if (!min_usage)
+                               break;
+               }
+       }
+       cache_idx_usage_counts[res]++;
+
+       spin_unlock(&cache_idx_lock);
+
+       return res;
+}
+
+static void cache_idx_free(u16 idx)
+{
+       spin_lock(&cache_idx_lock);
+       cache_idx_usage_counts[idx]--;
+       spin_unlock(&cache_idx_lock);
+}
+
 /* Called by __sk_destruct() & bpf_sk_storage_clone() */
 void bpf_sk_storage_free(struct sock *sk)
 {
@@ -560,6 +592,8 @@ static void bpf_sk_storage_map_free(struct bpf_map *map)
 
        smap = (struct bpf_sk_storage_map *)map;
 
+       cache_idx_free(smap->cache_idx);
+
        /* Note that this map might be concurrently cloned from
         * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
         * RCU read section to finish before proceeding. New RCU
@@ -673,8 +707,7 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
        }
 
        smap->elem_size = sizeof(struct bpf_sk_storage_elem) + attr->value_size;
-       smap->cache_idx = (unsigned int)atomic_inc_return(&cache_idx) %
-               BPF_SK_STORAGE_CACHE_SIZE;
+       smap->cache_idx = cache_idx_get();
 
        return &smap->map;
 }
@@ -886,6 +919,7 @@ BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
        return -ENOENT;
 }
 
+static int sk_storage_map_btf_id;
 const struct bpf_map_ops sk_storage_map_ops = {
        .map_alloc_check = bpf_sk_storage_map_alloc_check,
        .map_alloc = bpf_sk_storage_map_alloc,
@@ -895,6 +929,8 @@ const struct bpf_map_ops sk_storage_map_ops = {
        .map_update_elem = bpf_fd_sk_storage_update_elem,
        .map_delete_elem = bpf_fd_sk_storage_delete_elem,
        .map_check_btf = bpf_sk_storage_map_check_btf,
+       .map_btf_name = "bpf_sk_storage_map",
+       .map_btf_id = &sk_storage_map_btf_id,
 };
 
 const struct bpf_func_proto bpf_sk_storage_get_proto = {
index 6bc2388..a986b07 100644 (file)
 #include <linux/net_namespace.h>
 #include <linux/indirect_call_wrapper.h>
 #include <net/devlink.h>
+#include <linux/pm_runtime.h>
 
 #include "net-sysfs.h"
 
@@ -1492,8 +1493,13 @@ static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack)
 
        ASSERT_RTNL();
 
-       if (!netif_device_present(dev))
-               return -ENODEV;
+       if (!netif_device_present(dev)) {
+               /* may be detached because parent is runtime-suspended */
+               if (dev->dev.parent)
+                       pm_runtime_resume(dev->dev.parent);
+               if (!netif_device_present(dev))
+                       return -ENODEV;
+       }
 
        /* Block netpoll from trying to do any rx path servicing.
         * If we don't do this there is a chance ndo_poll_controller
@@ -4192,10 +4198,12 @@ int dev_direct_xmit(struct sk_buff *skb, u16 queue_id)
 
        local_bh_disable();
 
+       dev_xmit_recursion_inc();
        HARD_TX_LOCK(dev, txq, smp_processor_id());
        if (!netif_xmit_frozen_or_drv_stopped(txq))
                ret = netdev_start_xmit(skb, dev, txq, false);
        HARD_TX_UNLOCK(dev, txq);
+       dev_xmit_recursion_dec();
 
        local_bh_enable();
 
@@ -5440,6 +5448,8 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
                for (i = 0; i < new->aux->used_map_cnt; i++) {
                        if (dev_map_can_have_prog(new->aux->used_maps[i]))
                                return -EINVAL;
+                       if (cpu_map_prog_allowed(new->aux->used_maps[i]))
+                               return -EINVAL;
                }
        }
 
@@ -5583,7 +5593,7 @@ void netif_receive_skb_list(struct list_head *head)
 }
 EXPORT_SYMBOL(netif_receive_skb_list);
 
-DEFINE_PER_CPU(struct work_struct, flush_works);
+static DEFINE_PER_CPU(struct work_struct, flush_works);
 
 /* Network device is going away, flush any packets still pending */
 static void flush_backlog(struct work_struct *work)
@@ -6683,7 +6693,9 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
                trace_napi_poll(n, work, weight);
        }
 
-       WARN_ON_ONCE(work > weight);
+       if (unlikely(work > weight))
+               pr_err_once("NAPI poll function %pS returned %d, exceeding its budget of %d.\n",
+                           n->poll, work, weight);
 
        if (likely(work < weight))
                goto out_unlock;
@@ -7898,6 +7910,7 @@ EXPORT_SYMBOL(netdev_bonding_info_change);
 
 /**
  * netdev_get_xmit_slave - Get the xmit slave of master device
+ * @dev: device
  * @skb: The packet
  * @all_slaves: assume all the slaves are active
  *
@@ -8864,6 +8877,13 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
                        return -EINVAL;
                }
 
+               if (prog->expected_attach_type == BPF_XDP_CPUMAP) {
+                       NL_SET_ERR_MSG(extack,
+                                      "BPF_XDP_CPUMAP programs can not be attached to a device");
+                       bpf_prog_put(prog);
+                       return -EINVAL;
+               }
+
                /* prog->aux->id may be 0 for orphaned device-bound progs */
                if (prog->aux->id && prog->aux->id == prog_id) {
                        bpf_prog_put(prog);
@@ -9547,6 +9567,13 @@ int register_netdevice(struct net_device *dev)
                rcu_barrier();
 
                dev->reg_state = NETREG_UNREGISTERED;
+               /* We should put the kobject that hold in
+                * netdev_unregister_kobject(), otherwise
+                * the net device cannot be freed when
+                * driver calls free_netdev(), because the
+                * kobject is being hold.
+                */
+               kobject_put(&dev->dev.kobj);
        }
        /*
         *      Prevent userspace races by waiting until the network
index 6393ba9..54cd568 100644 (file)
@@ -690,6 +690,15 @@ void dev_uc_unsync(struct net_device *to, struct net_device *from)
        if (to->addr_len != from->addr_len)
                return;
 
+       /* netif_addr_lock_bh() uses lockdep subclass 0, this is okay for two
+        * reasons:
+        * 1) This is always called without any addr_list_lock, so as the
+        *    outermost one here, it must be 0.
+        * 2) This is called by some callers after unlinking the upper device,
+        *    so the dev->lower_level becomes 1 again.
+        * Therefore, the subclass for 'from' is 0, for 'to' is either 1 or
+        * larger.
+        */
        netif_addr_lock_bh(from);
        netif_addr_lock_nested(to);
        __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
@@ -911,6 +920,7 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from)
        if (to->addr_len != from->addr_len)
                return;
 
+       /* See the above comments inside dev_uc_unsync(). */
        netif_addr_lock_bh(from);
        netif_addr_lock_nested(to);
        __hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
index 547b587..b2cf9b7 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/rtnetlink.h>
 #include <linux/net_tstamp.h>
 #include <linux/wireless.h>
+#include <net/dsa.h>
 #include <net/wext.h>
 
 /*
@@ -225,6 +226,26 @@ static int net_hwtstamp_validate(struct ifreq *ifr)
        return 0;
 }
 
+static int dev_do_ioctl(struct net_device *dev,
+                       struct ifreq *ifr, unsigned int cmd)
+{
+       const struct net_device_ops *ops = dev->netdev_ops;
+       int err = -EOPNOTSUPP;
+
+       err = dsa_ndo_do_ioctl(dev, ifr, cmd);
+       if (err == 0 || err != -EOPNOTSUPP)
+               return err;
+
+       if (ops->ndo_do_ioctl) {
+               if (netif_device_present(dev))
+                       err = ops->ndo_do_ioctl(dev, ifr, cmd);
+               else
+                       err = -ENODEV;
+       }
+
+       return err;
+}
+
 /*
  *     Perform the SIOCxIFxxx calls, inside rtnl_lock()
  */
@@ -323,13 +344,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
                    cmd == SIOCSHWTSTAMP ||
                    cmd == SIOCGHWTSTAMP ||
                    cmd == SIOCWANDEV) {
-                       err = -EOPNOTSUPP;
-                       if (ops->ndo_do_ioctl) {
-                               if (netif_device_present(dev))
-                                       err = ops->ndo_do_ioctl(dev, ifr, cmd);
-                               else
-                                       err = -ENODEV;
-                       }
+                       err = dev_do_ioctl(dev, ifr, cmd);
                } else
                        err = -EINVAL;
 
index 2cafbc8..0ca8919 100644 (file)
@@ -85,6 +85,10 @@ EXPORT_SYMBOL(devlink_dpipe_header_ipv6);
 EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwmsg);
 EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwerr);
 
+static const struct nla_policy devlink_function_nl_policy[DEVLINK_PORT_FUNCTION_ATTR_MAX + 1] = {
+       [DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR] = { .type = NLA_BINARY },
+};
+
 static LIST_HEAD(devlink_list);
 
 /* devlink_mutex
@@ -382,19 +386,19 @@ devlink_region_snapshot_get_by_id(struct devlink_region *region, u32 id)
        return NULL;
 }
 
-#define DEVLINK_NL_FLAG_NEED_DEVLINK   BIT(0)
-#define DEVLINK_NL_FLAG_NEED_PORT      BIT(1)
-#define DEVLINK_NL_FLAG_NEED_SB                BIT(2)
+#define DEVLINK_NL_FLAG_NEED_PORT              BIT(0)
+#define DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT   BIT(1)
 
 /* The per devlink instance lock is taken by default in the pre-doit
  * operation, yet several commands do not require this. The global
  * devlink lock is taken and protects from disruption by user-calls.
  */
-#define DEVLINK_NL_FLAG_NO_LOCK                BIT(3)
+#define DEVLINK_NL_FLAG_NO_LOCK                        BIT(2)
 
 static int devlink_nl_pre_doit(const struct genl_ops *ops,
                               struct sk_buff *skb, struct genl_info *info)
 {
+       struct devlink_port *devlink_port;
        struct devlink *devlink;
        int err;
 
@@ -406,27 +410,18 @@ static int devlink_nl_pre_doit(const struct genl_ops *ops,
        }
        if (~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK)
                mutex_lock(&devlink->lock);
-       if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_DEVLINK) {
-               info->user_ptr[0] = devlink;
-       } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) {
-               struct devlink_port *devlink_port;
-
+       info->user_ptr[0] = devlink;
+       if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_PORT) {
                devlink_port = devlink_port_get_from_info(devlink, info);
                if (IS_ERR(devlink_port)) {
                        err = PTR_ERR(devlink_port);
                        goto unlock;
                }
-               info->user_ptr[0] = devlink_port;
-       }
-       if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_SB) {
-               struct devlink_sb *devlink_sb;
-
-               devlink_sb = devlink_sb_get_from_info(devlink, info);
-               if (IS_ERR(devlink_sb)) {
-                       err = PTR_ERR(devlink_sb);
-                       goto unlock;
-               }
-               info->user_ptr[1] = devlink_sb;
+               info->user_ptr[1] = devlink_port;
+       } else if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT) {
+               devlink_port = devlink_port_get_from_info(devlink, info);
+               if (!IS_ERR(devlink_port))
+                       info->user_ptr[1] = devlink_port;
        }
        return 0;
 
@@ -442,16 +437,8 @@ static void devlink_nl_post_doit(const struct genl_ops *ops,
 {
        struct devlink *devlink;
 
-       /* When devlink changes netns, it would not be found
-        * by devlink_get_from_info(). So try if it is stored first.
-        */
-       if (ops->internal_flags & DEVLINK_NL_FLAG_NEED_DEVLINK) {
-               devlink = info->user_ptr[0];
-       } else {
-               devlink = devlink_get_from_info(info);
-               WARN_ON(IS_ERR(devlink));
-       }
-       if (!IS_ERR(devlink) && ~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK)
+       devlink = info->user_ptr[0];
+       if (~ops->internal_flags & DEVLINK_NL_FLAG_NO_LOCK)
                mutex_unlock(&devlink->lock);
        mutex_unlock(&devlink_mutex);
 }
@@ -524,8 +511,14 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg,
 {
        struct devlink_port_attrs *attrs = &devlink_port->attrs;
 
-       if (!attrs->set)
+       if (!devlink_port->attrs_set)
                return 0;
+       if (attrs->lanes) {
+               if (nla_put_u32(msg, DEVLINK_ATTR_PORT_LANES, attrs->lanes))
+                       return -EMSGSIZE;
+       }
+       if (nla_put_u8(msg, DEVLINK_ATTR_PORT_SPLITTABLE, attrs->splittable))
+               return -EMSGSIZE;
        if (nla_put_u16(msg, DEVLINK_ATTR_PORT_FLAVOUR, attrs->flavour))
                return -EMSGSIZE;
        switch (devlink_port->attrs.flavour) {
@@ -563,10 +556,54 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg,
        return 0;
 }
 
+static int
+devlink_nl_port_function_attrs_put(struct sk_buff *msg, struct devlink_port *port,
+                                  struct netlink_ext_ack *extack)
+{
+       struct devlink *devlink = port->devlink;
+       const struct devlink_ops *ops;
+       struct nlattr *function_attr;
+       bool empty_nest = true;
+       int err = 0;
+
+       function_attr = nla_nest_start_noflag(msg, DEVLINK_ATTR_PORT_FUNCTION);
+       if (!function_attr)
+               return -EMSGSIZE;
+
+       ops = devlink->ops;
+       if (ops->port_function_hw_addr_get) {
+               int hw_addr_len;
+               u8 hw_addr[MAX_ADDR_LEN];
+
+               err = ops->port_function_hw_addr_get(devlink, port, hw_addr, &hw_addr_len, extack);
+               if (err == -EOPNOTSUPP) {
+                       /* Port function attributes are optional for a port. If port doesn't
+                        * support function attribute, returning -EOPNOTSUPP is not an error.
+                        */
+                       err = 0;
+                       goto out;
+               } else if (err) {
+                       goto out;
+               }
+               err = nla_put(msg, DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR, hw_addr_len, hw_addr);
+               if (err)
+                       goto out;
+               empty_nest = false;
+       }
+
+out:
+       if (err || empty_nest)
+               nla_nest_cancel(msg, function_attr);
+       else
+               nla_nest_end(msg, function_attr);
+       return err;
+}
+
 static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink,
                                struct devlink_port *devlink_port,
                                enum devlink_command cmd, u32 portid,
-                               u32 seq, int flags)
+                               u32 seq, int flags,
+                               struct netlink_ext_ack *extack)
 {
        void *hdr;
 
@@ -607,6 +644,8 @@ static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink,
        spin_unlock_bh(&devlink_port->type_lock);
        if (devlink_nl_port_attrs_put(msg, devlink_port))
                goto nla_put_failure;
+       if (devlink_nl_port_function_attrs_put(msg, devlink_port, extack))
+               goto nla_put_failure;
 
        genlmsg_end(msg, hdr);
        return 0;
@@ -634,7 +673,8 @@ static void devlink_port_notify(struct devlink_port *devlink_port,
        if (!msg)
                return;
 
-       err = devlink_nl_port_fill(msg, devlink, devlink_port, cmd, 0, 0, 0);
+       err = devlink_nl_port_fill(msg, devlink, devlink_port, cmd, 0, 0, 0,
+                                  NULL);
        if (err) {
                nlmsg_free(msg);
                return;
@@ -697,7 +737,7 @@ out:
 static int devlink_nl_cmd_port_get_doit(struct sk_buff *skb,
                                        struct genl_info *info)
 {
-       struct devlink_port *devlink_port = info->user_ptr[0];
+       struct devlink_port *devlink_port = info->user_ptr[1];
        struct devlink *devlink = devlink_port->devlink;
        struct sk_buff *msg;
        int err;
@@ -708,7 +748,8 @@ static int devlink_nl_cmd_port_get_doit(struct sk_buff *skb,
 
        err = devlink_nl_port_fill(msg, devlink, devlink_port,
                                   DEVLINK_CMD_PORT_NEW,
-                                  info->snd_portid, info->snd_seq, 0);
+                                  info->snd_portid, info->snd_seq, 0,
+                                  info->extack);
        if (err) {
                nlmsg_free(msg);
                return err;
@@ -740,7 +781,8 @@ static int devlink_nl_cmd_port_get_dumpit(struct sk_buff *msg,
                                                   DEVLINK_CMD_NEW,
                                                   NETLINK_CB(cb->skb).portid,
                                                   cb->nlh->nlmsg_seq,
-                                                  NLM_F_MULTI);
+                                                  NLM_F_MULTI,
+                                                  cb->extack);
                        if (err) {
                                mutex_unlock(&devlink->lock);
                                goto out;
@@ -778,10 +820,71 @@ static int devlink_port_type_set(struct devlink *devlink,
        return -EOPNOTSUPP;
 }
 
+static int
+devlink_port_function_hw_addr_set(struct devlink *devlink, struct devlink_port *port,
+                                 const struct nlattr *attr, struct netlink_ext_ack *extack)
+{
+       const struct devlink_ops *ops;
+       const u8 *hw_addr;
+       int hw_addr_len;
+       int err;
+
+       hw_addr = nla_data(attr);
+       hw_addr_len = nla_len(attr);
+       if (hw_addr_len > MAX_ADDR_LEN) {
+               NL_SET_ERR_MSG_MOD(extack, "Port function hardware address too long");
+               return -EINVAL;
+       }
+       if (port->type == DEVLINK_PORT_TYPE_ETH) {
+               if (hw_addr_len != ETH_ALEN) {
+                       NL_SET_ERR_MSG_MOD(extack, "Address must be 6 bytes for Ethernet device");
+                       return -EINVAL;
+               }
+               if (!is_unicast_ether_addr(hw_addr)) {
+                       NL_SET_ERR_MSG_MOD(extack, "Non-unicast hardware address unsupported");
+                       return -EINVAL;
+               }
+       }
+
+       ops = devlink->ops;
+       if (!ops->port_function_hw_addr_set) {
+               NL_SET_ERR_MSG_MOD(extack, "Port doesn't support function attributes");
+               return -EOPNOTSUPP;
+       }
+
+       err = ops->port_function_hw_addr_set(devlink, port, hw_addr, hw_addr_len, extack);
+       if (err)
+               return err;
+
+       devlink_port_notify(port, DEVLINK_CMD_PORT_NEW);
+       return 0;
+}
+
+static int
+devlink_port_function_set(struct devlink *devlink, struct devlink_port *port,
+                         const struct nlattr *attr, struct netlink_ext_ack *extack)
+{
+       struct nlattr *tb[DEVLINK_PORT_FUNCTION_ATTR_MAX + 1];
+       int err;
+
+       err = nla_parse_nested(tb, DEVLINK_PORT_FUNCTION_ATTR_MAX, attr,
+                              devlink_function_nl_policy, extack);
+       if (err < 0) {
+               NL_SET_ERR_MSG_MOD(extack, "Fail to parse port function attributes");
+               return err;
+       }
+
+       attr = tb[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR];
+       if (attr)
+               err = devlink_port_function_hw_addr_set(devlink, port, attr, extack);
+
+       return err;
+}
+
 static int devlink_nl_cmd_port_set_doit(struct sk_buff *skb,
                                        struct genl_info *info)
 {
-       struct devlink_port *devlink_port = info->user_ptr[0];
+       struct devlink_port *devlink_port = info->user_ptr[1];
        struct devlink *devlink = devlink_port->devlink;
        int err;
 
@@ -793,6 +896,16 @@ static int devlink_nl_cmd_port_set_doit(struct sk_buff *skb,
                if (err)
                        return err;
        }
+
+       if (info->attrs[DEVLINK_ATTR_PORT_FUNCTION]) {
+               struct nlattr *attr = info->attrs[DEVLINK_ATTR_PORT_FUNCTION];
+               struct netlink_ext_ack *extack = info->extack;
+
+               err = devlink_port_function_set(devlink, devlink_port, attr, extack);
+               if (err)
+                       return err;
+       }
+
        return 0;
 }
 
@@ -810,6 +923,7 @@ static int devlink_nl_cmd_port_split_doit(struct sk_buff *skb,
                                          struct genl_info *info)
 {
        struct devlink *devlink = info->user_ptr[0];
+       struct devlink_port *devlink_port;
        u32 port_index;
        u32 count;
 
@@ -817,8 +931,27 @@ static int devlink_nl_cmd_port_split_doit(struct sk_buff *skb,
            !info->attrs[DEVLINK_ATTR_PORT_SPLIT_COUNT])
                return -EINVAL;
 
+       devlink_port = devlink_port_get_from_info(devlink, info);
        port_index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
        count = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_SPLIT_COUNT]);
+
+       if (IS_ERR(devlink_port))
+               return -EINVAL;
+
+       if (!devlink_port->attrs.splittable) {
+               /* Split ports cannot be split. */
+               if (devlink_port->attrs.split)
+                       NL_SET_ERR_MSG_MOD(info->extack, "Port cannot be split further");
+               else
+                       NL_SET_ERR_MSG_MOD(info->extack, "Port cannot be split");
+               return -EINVAL;
+       }
+
+       if (count < 2 || !is_power_of_2(count) || count > devlink_port->attrs.lanes) {
+               NL_SET_ERR_MSG_MOD(info->extack, "Invalid split count");
+               return -EINVAL;
+       }
+
        return devlink_port_split(devlink, port_index, count, info->extack);
 }
 
@@ -886,10 +1019,14 @@ static int devlink_nl_cmd_sb_get_doit(struct sk_buff *skb,
                                      struct genl_info *info)
 {
        struct devlink *devlink = info->user_ptr[0];
-       struct devlink_sb *devlink_sb = info->user_ptr[1];
+       struct devlink_sb *devlink_sb;
        struct sk_buff *msg;
        int err;
 
+       devlink_sb = devlink_sb_get_from_info(devlink, info);
+       if (IS_ERR(devlink_sb))
+               return PTR_ERR(devlink_sb);
+
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
        if (!msg)
                return -ENOMEM;
@@ -991,11 +1128,15 @@ static int devlink_nl_cmd_sb_pool_get_doit(struct sk_buff *skb,
                                           struct genl_info *info)
 {
        struct devlink *devlink = info->user_ptr[0];
-       struct devlink_sb *devlink_sb = info->user_ptr[1];
+       struct devlink_sb *devlink_sb;
        struct sk_buff *msg;
        u16 pool_index;
        int err;
 
+       devlink_sb = devlink_sb_get_from_info(devlink, info);
+       if (IS_ERR(devlink_sb))
+               return PTR_ERR(devlink_sb);
+
        err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
                                                  &pool_index);
        if (err)
@@ -1100,12 +1241,16 @@ static int devlink_nl_cmd_sb_pool_set_doit(struct sk_buff *skb,
                                           struct genl_info *info)
 {
        struct devlink *devlink = info->user_ptr[0];
-       struct devlink_sb *devlink_sb = info->user_ptr[1];
        enum devlink_sb_threshold_type threshold_type;
+       struct devlink_sb *devlink_sb;
        u16 pool_index;
        u32 size;
        int err;
 
+       devlink_sb = devlink_sb_get_from_info(devlink, info);
+       if (IS_ERR(devlink_sb))
+               return PTR_ERR(devlink_sb);
+
        err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
                                                  &pool_index);
        if (err)
@@ -1184,13 +1329,17 @@ nla_put_failure:
 static int devlink_nl_cmd_sb_port_pool_get_doit(struct sk_buff *skb,
                                                struct genl_info *info)
 {
-       struct devlink_port *devlink_port = info->user_ptr[0];
+       struct devlink_port *devlink_port = info->user_ptr[1];
        struct devlink *devlink = devlink_port->devlink;
-       struct devlink_sb *devlink_sb = info->user_ptr[1];
+       struct devlink_sb *devlink_sb;
        struct sk_buff *msg;
        u16 pool_index;
        int err;
 
+       devlink_sb = devlink_sb_get_from_info(devlink, info);
+       if (IS_ERR(devlink_sb))
+               return PTR_ERR(devlink_sb);
+
        err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
                                                  &pool_index);
        if (err)
@@ -1300,12 +1449,17 @@ static int devlink_sb_port_pool_set(struct devlink_port *devlink_port,
 static int devlink_nl_cmd_sb_port_pool_set_doit(struct sk_buff *skb,
                                                struct genl_info *info)
 {
-       struct devlink_port *devlink_port = info->user_ptr[0];
-       struct devlink_sb *devlink_sb = info->user_ptr[1];
+       struct devlink_port *devlink_port = info->user_ptr[1];
+       struct devlink *devlink = info->user_ptr[0];
+       struct devlink_sb *devlink_sb;
        u16 pool_index;
        u32 threshold;
        int err;
 
+       devlink_sb = devlink_sb_get_from_info(devlink, info);
+       if (IS_ERR(devlink_sb))
+               return PTR_ERR(devlink_sb);
+
        err = devlink_sb_pool_index_get_from_info(devlink_sb, info,
                                                  &pool_index);
        if (err)
@@ -1387,14 +1541,18 @@ nla_put_failure:
 static int devlink_nl_cmd_sb_tc_pool_bind_get_doit(struct sk_buff *skb,
                                                   struct genl_info *info)
 {
-       struct devlink_port *devlink_port = info->user_ptr[0];
+       struct devlink_port *devlink_port = info->user_ptr[1];
        struct devlink *devlink = devlink_port->devlink;
-       struct devlink_sb *devlink_sb = info->user_ptr[1];
+       struct devlink_sb *devlink_sb;
        struct sk_buff *msg;
        enum devlink_sb_pool_type pool_type;
        u16 tc_index;
        int err;
 
+       devlink_sb = devlink_sb_get_from_info(devlink, info);
+       if (IS_ERR(devlink_sb))
+               return PTR_ERR(devlink_sb);
+
        err = devlink_sb_pool_type_get_from_info(info, &pool_type);
        if (err)
                return err;
@@ -1534,14 +1692,19 @@ static int devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
 static int devlink_nl_cmd_sb_tc_pool_bind_set_doit(struct sk_buff *skb,
                                                   struct genl_info *info)
 {
-       struct devlink_port *devlink_port = info->user_ptr[0];
-       struct devlink_sb *devlink_sb = info->user_ptr[1];
+       struct devlink_port *devlink_port = info->user_ptr[1];
+       struct devlink *devlink = info->user_ptr[0];
        enum devlink_sb_pool_type pool_type;
+       struct devlink_sb *devlink_sb;
        u16 tc_index;
        u16 pool_index;
        u32 threshold;
        int err;
 
+       devlink_sb = devlink_sb_get_from_info(devlink, info);
+       if (IS_ERR(devlink_sb))
+               return PTR_ERR(devlink_sb);
+
        err = devlink_sb_pool_type_get_from_info(info, &pool_type);
        if (err)
                return err;
@@ -1569,8 +1732,12 @@ static int devlink_nl_cmd_sb_occ_snapshot_doit(struct sk_buff *skb,
                                               struct genl_info *info)
 {
        struct devlink *devlink = info->user_ptr[0];
-       struct devlink_sb *devlink_sb = info->user_ptr[1];
        const struct devlink_ops *ops = devlink->ops;
+       struct devlink_sb *devlink_sb;
+
+       devlink_sb = devlink_sb_get_from_info(devlink, info);
+       if (IS_ERR(devlink_sb))
+               return PTR_ERR(devlink_sb);
 
        if (ops->sb_occ_snapshot)
                return ops->sb_occ_snapshot(devlink, devlink_sb->index);
@@ -1581,8 +1748,12 @@ static int devlink_nl_cmd_sb_occ_max_clear_doit(struct sk_buff *skb,
                                                struct genl_info *info)
 {
        struct devlink *devlink = info->user_ptr[0];
-       struct devlink_sb *devlink_sb = info->user_ptr[1];
        const struct devlink_ops *ops = devlink->ops;
+       struct devlink_sb *devlink_sb;
+
+       devlink_sb = devlink_sb_get_from_info(devlink, info);
+       if (IS_ERR(devlink_sb))
+               return PTR_ERR(devlink_sb);
 
        if (ops->sb_occ_max_clear)
                return ops->sb_occ_max_clear(devlink, devlink_sb->index);
@@ -2766,7 +2937,7 @@ static void devlink_reload_netns_change(struct devlink *devlink,
                                     DEVLINK_CMD_PARAM_NEW);
 }
 
-static bool devlink_reload_supported(struct devlink *devlink)
+static bool devlink_reload_supported(const struct devlink *devlink)
 {
        return devlink->ops->reload_down && devlink->ops->reload_up;
 }
@@ -2812,7 +2983,7 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
        struct net *dest_net = NULL;
        int err;
 
-       if (!devlink_reload_supported(devlink) || !devlink->reload_enabled)
+       if (!devlink_reload_supported(devlink))
                return -EOPNOTSUPP;
 
        err = devlink_resources_validate(devlink, NULL, info);
@@ -4378,6 +4549,14 @@ int devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn)
 }
 EXPORT_SYMBOL_GPL(devlink_info_serial_number_put);
 
+int devlink_info_board_serial_number_put(struct devlink_info_req *req,
+                                        const char *bsn)
+{
+       return nla_put_string(req->msg, DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER,
+                             bsn);
+}
+EXPORT_SYMBOL_GPL(devlink_info_board_serial_number_put);
+
 static int devlink_info_version_put(struct devlink_info_req *req, int attr,
                                    const char *version_name,
                                    const char *version_value)
@@ -5129,6 +5308,7 @@ struct devlink_health_reporter {
        void *priv;
        const struct devlink_health_reporter_ops *ops;
        struct devlink *devlink;
+       struct devlink_port *devlink_port;
        struct devlink_fmsg *dump_fmsg;
        struct mutex dump_lock; /* lock parallel read/write from dump buffers */
        u64 graceful_period;
@@ -5151,18 +5331,98 @@ devlink_health_reporter_priv(struct devlink_health_reporter *reporter)
 EXPORT_SYMBOL_GPL(devlink_health_reporter_priv);
 
 static struct devlink_health_reporter *
-devlink_health_reporter_find_by_name(struct devlink *devlink,
-                                    const char *reporter_name)
+__devlink_health_reporter_find_by_name(struct list_head *reporter_list,
+                                      struct mutex *list_lock,
+                                      const char *reporter_name)
 {
        struct devlink_health_reporter *reporter;
 
-       lockdep_assert_held(&devlink->reporters_lock);
-       list_for_each_entry(reporter, &devlink->reporter_list, list)
+       lockdep_assert_held(list_lock);
+       list_for_each_entry(reporter, reporter_list, list)
                if (!strcmp(reporter->ops->name, reporter_name))
                        return reporter;
        return NULL;
 }
 
+static struct devlink_health_reporter *
+devlink_health_reporter_find_by_name(struct devlink *devlink,
+                                    const char *reporter_name)
+{
+       return __devlink_health_reporter_find_by_name(&devlink->reporter_list,
+                                                     &devlink->reporters_lock,
+                                                     reporter_name);
+}
+
+static struct devlink_health_reporter *
+devlink_port_health_reporter_find_by_name(struct devlink_port *devlink_port,
+                                         const char *reporter_name)
+{
+       return __devlink_health_reporter_find_by_name(&devlink_port->reporter_list,
+                                                     &devlink_port->reporters_lock,
+                                                     reporter_name);
+}
+
+static struct devlink_health_reporter *
+__devlink_health_reporter_create(struct devlink *devlink,
+                                const struct devlink_health_reporter_ops *ops,
+                                u64 graceful_period, void *priv)
+{
+       struct devlink_health_reporter *reporter;
+
+       if (WARN_ON(graceful_period && !ops->recover))
+               return ERR_PTR(-EINVAL);
+
+       reporter = kzalloc(sizeof(*reporter), GFP_KERNEL);
+       if (!reporter)
+               return ERR_PTR(-ENOMEM);
+
+       reporter->priv = priv;
+       reporter->ops = ops;
+       reporter->devlink = devlink;
+       reporter->graceful_period = graceful_period;
+       reporter->auto_recover = !!ops->recover;
+       reporter->auto_dump = !!ops->dump;
+       mutex_init(&reporter->dump_lock);
+       refcount_set(&reporter->refcount, 1);
+       return reporter;
+}
+
+/**
+ *     devlink_port_health_reporter_create - create devlink health reporter for
+ *                                           specified port instance
+ *
+ *     @port: devlink_port which should contain the new reporter
+ *     @ops: ops
+ *     @graceful_period: to avoid recovery loops, in msecs
+ *     @priv: priv
+ */
+struct devlink_health_reporter *
+devlink_port_health_reporter_create(struct devlink_port *port,
+                                   const struct devlink_health_reporter_ops *ops,
+                                   u64 graceful_period, void *priv)
+{
+       struct devlink_health_reporter *reporter;
+
+       mutex_lock(&port->reporters_lock);
+       if (__devlink_health_reporter_find_by_name(&port->reporter_list,
+                                                  &port->reporters_lock, ops->name)) {
+               reporter = ERR_PTR(-EEXIST);
+               goto unlock;
+       }
+
+       reporter = __devlink_health_reporter_create(port->devlink, ops,
+                                                   graceful_period, priv);
+       if (IS_ERR(reporter))
+               goto unlock;
+
+       reporter->devlink_port = port;
+       list_add_tail(&reporter->list, &port->reporter_list);
+unlock:
+       mutex_unlock(&port->reporters_lock);
+       return reporter;
+}
+EXPORT_SYMBOL_GPL(devlink_port_health_reporter_create);
+
 /**
  *     devlink_health_reporter_create - create devlink health reporter
  *
@@ -5184,25 +5444,11 @@ devlink_health_reporter_create(struct devlink *devlink,
                goto unlock;
        }
 
-       if (WARN_ON(graceful_period && !ops->recover)) {
-               reporter = ERR_PTR(-EINVAL);
-               goto unlock;
-       }
-
-       reporter = kzalloc(sizeof(*reporter), GFP_KERNEL);
-       if (!reporter) {
-               reporter = ERR_PTR(-ENOMEM);
+       reporter = __devlink_health_reporter_create(devlink, ops,
+                                                   graceful_period, priv);
+       if (IS_ERR(reporter))
                goto unlock;
-       }
 
-       reporter->priv = priv;
-       reporter->ops = ops;
-       reporter->devlink = devlink;
-       reporter->graceful_period = graceful_period;
-       reporter->auto_recover = !!ops->recover;
-       reporter->auto_dump = !!ops->dump;
-       mutex_init(&reporter->dump_lock);
-       refcount_set(&reporter->refcount, 1);
        list_add_tail(&reporter->list, &devlink->reporter_list);
 unlock:
        mutex_unlock(&devlink->reporters_lock);
@@ -5210,6 +5456,29 @@ unlock:
 }
 EXPORT_SYMBOL_GPL(devlink_health_reporter_create);
 
+static void
+devlink_health_reporter_free(struct devlink_health_reporter *reporter)
+{
+       mutex_destroy(&reporter->dump_lock);
+       if (reporter->dump_fmsg)
+               devlink_fmsg_free(reporter->dump_fmsg);
+       kfree(reporter);
+}
+
+static void
+devlink_health_reporter_put(struct devlink_health_reporter *reporter)
+{
+       if (refcount_dec_and_test(&reporter->refcount))
+               devlink_health_reporter_free(reporter);
+}
+
+static void
+__devlink_health_reporter_destroy(struct devlink_health_reporter *reporter)
+{
+       list_del(&reporter->list);
+       devlink_health_reporter_put(reporter);
+}
+
 /**
  *     devlink_health_reporter_destroy - destroy devlink health reporter
  *
@@ -5218,18 +5487,30 @@ EXPORT_SYMBOL_GPL(devlink_health_reporter_create);
 void
 devlink_health_reporter_destroy(struct devlink_health_reporter *reporter)
 {
-       mutex_lock(&reporter->devlink->reporters_lock);
-       list_del(&reporter->list);
-       mutex_unlock(&reporter->devlink->reporters_lock);
-       while (refcount_read(&reporter->refcount) > 1)
-               msleep(100);
-       mutex_destroy(&reporter->dump_lock);
-       if (reporter->dump_fmsg)
-               devlink_fmsg_free(reporter->dump_fmsg);
-       kfree(reporter);
+       struct mutex *lock = &reporter->devlink->reporters_lock;
+
+       mutex_lock(lock);
+       __devlink_health_reporter_destroy(reporter);
+       mutex_unlock(lock);
 }
 EXPORT_SYMBOL_GPL(devlink_health_reporter_destroy);
 
+/**
+ *     devlink_port_health_reporter_destroy - destroy devlink port health reporter
+ *
+ *     @reporter: devlink health reporter to destroy
+ */
+void
+devlink_port_health_reporter_destroy(struct devlink_health_reporter *reporter)
+{
+       struct mutex *lock = &reporter->devlink_port->reporters_lock;
+
+       mutex_lock(lock);
+       __devlink_health_reporter_destroy(reporter);
+       mutex_unlock(lock);
+}
+EXPORT_SYMBOL_GPL(devlink_port_health_reporter_destroy);
+
 static int
 devlink_nl_health_reporter_fill(struct sk_buff *msg,
                                struct devlink *devlink,
@@ -5247,6 +5528,10 @@ devlink_nl_health_reporter_fill(struct sk_buff *msg,
        if (devlink_nl_put_handle(msg, devlink))
                goto genlmsg_cancel;
 
+       if (reporter->devlink_port) {
+               if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, reporter->devlink_port->index))
+                       goto genlmsg_cancel;
+       }
        reporter_attr = nla_nest_start_noflag(msg,
                                              DEVLINK_ATTR_HEALTH_REPORTER);
        if (!reporter_attr)
@@ -5454,17 +5739,28 @@ devlink_health_reporter_get_from_attrs(struct devlink *devlink,
                                       struct nlattr **attrs)
 {
        struct devlink_health_reporter *reporter;
+       struct devlink_port *devlink_port;
        char *reporter_name;
 
        if (!attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME])
                return NULL;
 
        reporter_name = nla_data(attrs[DEVLINK_ATTR_HEALTH_REPORTER_NAME]);
-       mutex_lock(&devlink->reporters_lock);
-       reporter = devlink_health_reporter_find_by_name(devlink, reporter_name);
-       if (reporter)
-               refcount_inc(&reporter->refcount);
-       mutex_unlock(&devlink->reporters_lock);
+       devlink_port = devlink_port_get_from_attrs(devlink, attrs);
+       if (IS_ERR(devlink_port)) {
+               mutex_lock(&devlink->reporters_lock);
+               reporter = devlink_health_reporter_find_by_name(devlink, reporter_name);
+               if (reporter)
+                       refcount_inc(&reporter->refcount);
+               mutex_unlock(&devlink->reporters_lock);
+       } else {
+               mutex_lock(&devlink_port->reporters_lock);
+               reporter = devlink_port_health_reporter_find_by_name(devlink_port, reporter_name);
+               if (reporter)
+                       refcount_inc(&reporter->refcount);
+               mutex_unlock(&devlink_port->reporters_lock);
+       }
+
        return reporter;
 }
 
@@ -5496,12 +5792,6 @@ unlock:
        return NULL;
 }
 
-static void
-devlink_health_reporter_put(struct devlink_health_reporter *reporter)
-{
-       refcount_dec(&reporter->refcount);
-}
-
 void
 devlink_health_reporter_state_update(struct devlink_health_reporter *reporter,
                                     enum devlink_health_reporter_state state)
@@ -5558,6 +5848,7 @@ devlink_nl_cmd_health_reporter_get_dumpit(struct sk_buff *msg,
                                          struct netlink_callback *cb)
 {
        struct devlink_health_reporter *reporter;
+       struct devlink_port *port;
        struct devlink *devlink;
        int start = cb->args[0];
        int idx = 0;
@@ -5588,6 +5879,31 @@ devlink_nl_cmd_health_reporter_get_dumpit(struct sk_buff *msg,
                }
                mutex_unlock(&devlink->reporters_lock);
        }
+
+       list_for_each_entry(devlink, &devlink_list, list) {
+               if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
+                       continue;
+               list_for_each_entry(port, &devlink->port_list, list) {
+                       mutex_lock(&port->reporters_lock);
+                       list_for_each_entry(reporter, &port->reporter_list, list) {
+                               if (idx < start) {
+                                       idx++;
+                                       continue;
+                               }
+                               err = devlink_nl_health_reporter_fill(msg, devlink, reporter,
+                                                                     DEVLINK_CMD_HEALTH_REPORTER_GET,
+                                                                     NETLINK_CB(cb->skb).portid,
+                                                                     cb->nlh->nlmsg_seq,
+                                                                     NLM_F_MULTI);
+                               if (err) {
+                                       mutex_unlock(&port->reporters_lock);
+                                       goto out;
+                               }
+                               idx++;
+                       }
+                       mutex_unlock(&port->reporters_lock);
+               }
+       }
 out:
        mutex_unlock(&devlink_mutex);
 
@@ -6709,6 +7025,7 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
        [DEVLINK_ATTR_TRAP_POLICER_ID] = { .type = NLA_U32 },
        [DEVLINK_ATTR_TRAP_POLICER_RATE] = { .type = NLA_U64 },
        [DEVLINK_ATTR_TRAP_POLICER_BURST] = { .type = NLA_U64 },
+       [DEVLINK_ATTR_PORT_FUNCTION] = { .type = NLA_NESTED },
 };
 
 static const struct genl_ops devlink_nl_ops[] = {
@@ -6717,7 +7034,6 @@ static const struct genl_ops devlink_nl_ops[] = {
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_get_doit,
                .dumpit = devlink_nl_cmd_get_dumpit,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
                /* can be retrieved by unprivileged users */
        },
        {
@@ -6740,24 +7056,20 @@ static const struct genl_ops devlink_nl_ops[] = {
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_port_split_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
-                                 DEVLINK_NL_FLAG_NO_LOCK,
+               .internal_flags = DEVLINK_NL_FLAG_NO_LOCK,
        },
        {
                .cmd = DEVLINK_CMD_PORT_UNSPLIT,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_port_unsplit_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
-                                 DEVLINK_NL_FLAG_NO_LOCK,
+               .internal_flags = DEVLINK_NL_FLAG_NO_LOCK,
        },
        {
                .cmd = DEVLINK_CMD_SB_GET,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_sb_get_doit,
                .dumpit = devlink_nl_cmd_sb_get_dumpit,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
-                                 DEVLINK_NL_FLAG_NEED_SB,
                /* can be retrieved by unprivileged users */
        },
        {
@@ -6765,8 +7077,6 @@ static const struct genl_ops devlink_nl_ops[] = {
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_sb_pool_get_doit,
                .dumpit = devlink_nl_cmd_sb_pool_get_dumpit,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
-                                 DEVLINK_NL_FLAG_NEED_SB,
                /* can be retrieved by unprivileged users */
        },
        {
@@ -6774,16 +7084,13 @@ static const struct genl_ops devlink_nl_ops[] = {
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_sb_pool_set_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
-                                 DEVLINK_NL_FLAG_NEED_SB,
        },
        {
                .cmd = DEVLINK_CMD_SB_PORT_POOL_GET,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_sb_port_pool_get_doit,
                .dumpit = devlink_nl_cmd_sb_port_pool_get_dumpit,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
-                                 DEVLINK_NL_FLAG_NEED_SB,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
                /* can be retrieved by unprivileged users */
        },
        {
@@ -6791,16 +7098,14 @@ static const struct genl_ops devlink_nl_ops[] = {
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_sb_port_pool_set_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
-                                 DEVLINK_NL_FLAG_NEED_SB,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
        },
        {
                .cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_sb_tc_pool_bind_get_doit,
                .dumpit = devlink_nl_cmd_sb_tc_pool_bind_get_dumpit,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
-                                 DEVLINK_NL_FLAG_NEED_SB,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
                /* can be retrieved by unprivileged users */
        },
        {
@@ -6808,60 +7113,50 @@ static const struct genl_ops devlink_nl_ops[] = {
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_sb_tc_pool_bind_set_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT |
-                                 DEVLINK_NL_FLAG_NEED_SB,
+               .internal_flags = DEVLINK_NL_FLAG_NEED_PORT,
        },
        {
                .cmd = DEVLINK_CMD_SB_OCC_SNAPSHOT,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_sb_occ_snapshot_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
-                                 DEVLINK_NL_FLAG_NEED_SB,
        },
        {
                .cmd = DEVLINK_CMD_SB_OCC_MAX_CLEAR,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_sb_occ_max_clear_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
-                                 DEVLINK_NL_FLAG_NEED_SB,
        },
        {
                .cmd = DEVLINK_CMD_ESWITCH_GET,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_eswitch_get_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
-                                 DEVLINK_NL_FLAG_NO_LOCK,
+               .internal_flags = DEVLINK_NL_FLAG_NO_LOCK,
        },
        {
                .cmd = DEVLINK_CMD_ESWITCH_SET,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_eswitch_set_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
-                                 DEVLINK_NL_FLAG_NO_LOCK,
+               .internal_flags = DEVLINK_NL_FLAG_NO_LOCK,
        },
        {
                .cmd = DEVLINK_CMD_DPIPE_TABLE_GET,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_dpipe_table_get,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
                /* can be retrieved by unprivileged users */
        },
        {
                .cmd = DEVLINK_CMD_DPIPE_ENTRIES_GET,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_dpipe_entries_get,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
                /* can be retrieved by unprivileged users */
        },
        {
                .cmd = DEVLINK_CMD_DPIPE_HEADERS_GET,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_dpipe_headers_get,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
                /* can be retrieved by unprivileged users */
        },
        {
@@ -6869,20 +7164,17 @@ static const struct genl_ops devlink_nl_ops[] = {
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_dpipe_table_counters_set,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
        },
        {
                .cmd = DEVLINK_CMD_RESOURCE_SET,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_resource_set,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
        },
        {
                .cmd = DEVLINK_CMD_RESOURCE_DUMP,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_resource_dump,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
                /* can be retrieved by unprivileged users */
        },
        {
@@ -6890,15 +7182,13 @@ static const struct genl_ops devlink_nl_ops[] = {
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_reload,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
-                                 DEVLINK_NL_FLAG_NO_LOCK,
+               .internal_flags = DEVLINK_NL_FLAG_NO_LOCK,
        },
        {
                .cmd = DEVLINK_CMD_PARAM_GET,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_param_get_doit,
                .dumpit = devlink_nl_cmd_param_get_dumpit,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
                /* can be retrieved by unprivileged users */
        },
        {
@@ -6906,7 +7196,6 @@ static const struct genl_ops devlink_nl_ops[] = {
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_param_set_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
        },
        {
                .cmd = DEVLINK_CMD_PORT_PARAM_GET,
@@ -6929,21 +7218,18 @@ static const struct genl_ops devlink_nl_ops[] = {
                .doit = devlink_nl_cmd_region_get_doit,
                .dumpit = devlink_nl_cmd_region_get_dumpit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
        },
        {
                .cmd = DEVLINK_CMD_REGION_NEW,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_region_new,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
        },
        {
                .cmd = DEVLINK_CMD_REGION_DEL,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_region_del,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
        },
        {
                .cmd = DEVLINK_CMD_REGION_READ,
@@ -6951,14 +7237,12 @@ static const struct genl_ops devlink_nl_ops[] = {
                            GENL_DONT_VALIDATE_DUMP_STRICT,
                .dumpit = devlink_nl_cmd_region_read_dumpit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
        },
        {
                .cmd = DEVLINK_CMD_INFO_GET,
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_info_get_doit,
                .dumpit = devlink_nl_cmd_info_get_dumpit,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
                /* can be retrieved by unprivileged users */
        },
        {
@@ -6966,7 +7250,7 @@ static const struct genl_ops devlink_nl_ops[] = {
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_health_reporter_get_doit,
                .dumpit = devlink_nl_cmd_health_reporter_get_dumpit,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT |
                                  DEVLINK_NL_FLAG_NO_LOCK,
                /* can be retrieved by unprivileged users */
        },
@@ -6975,7 +7259,7 @@ static const struct genl_ops devlink_nl_ops[] = {
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_health_reporter_set_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT |
                                  DEVLINK_NL_FLAG_NO_LOCK,
        },
        {
@@ -6983,7 +7267,7 @@ static const struct genl_ops devlink_nl_ops[] = {
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_health_reporter_recover_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT |
                                  DEVLINK_NL_FLAG_NO_LOCK,
        },
        {
@@ -6991,7 +7275,7 @@ static const struct genl_ops devlink_nl_ops[] = {
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_health_reporter_diagnose_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT |
                                  DEVLINK_NL_FLAG_NO_LOCK,
        },
        {
@@ -7000,7 +7284,7 @@ static const struct genl_ops devlink_nl_ops[] = {
                            GENL_DONT_VALIDATE_DUMP_STRICT,
                .dumpit = devlink_nl_cmd_health_reporter_dump_get_dumpit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT |
                                  DEVLINK_NL_FLAG_NO_LOCK,
        },
        {
@@ -7008,7 +7292,7 @@ static const struct genl_ops devlink_nl_ops[] = {
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_health_reporter_dump_clear_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK |
+               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT |
                                  DEVLINK_NL_FLAG_NO_LOCK,
        },
        {
@@ -7016,46 +7300,39 @@ static const struct genl_ops devlink_nl_ops[] = {
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = devlink_nl_cmd_flash_update,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
        },
        {
                .cmd = DEVLINK_CMD_TRAP_GET,
                .doit = devlink_nl_cmd_trap_get_doit,
                .dumpit = devlink_nl_cmd_trap_get_dumpit,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
                /* can be retrieved by unprivileged users */
        },
        {
                .cmd = DEVLINK_CMD_TRAP_SET,
                .doit = devlink_nl_cmd_trap_set_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
        },
        {
                .cmd = DEVLINK_CMD_TRAP_GROUP_GET,
                .doit = devlink_nl_cmd_trap_group_get_doit,
                .dumpit = devlink_nl_cmd_trap_group_get_dumpit,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
                /* can be retrieved by unprivileged users */
        },
        {
                .cmd = DEVLINK_CMD_TRAP_GROUP_SET,
                .doit = devlink_nl_cmd_trap_group_set_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
        },
        {
                .cmd = DEVLINK_CMD_TRAP_POLICER_GET,
                .doit = devlink_nl_cmd_trap_policer_get_doit,
                .dumpit = devlink_nl_cmd_trap_policer_get_dumpit,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
                /* can be retrieved by unprivileged users */
        },
        {
                .cmd = DEVLINK_CMD_TRAP_POLICER_SET,
                .doit = devlink_nl_cmd_trap_policer_set_doit,
                .flags = GENL_ADMIN_PERM,
-               .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK,
        },
 };
 
@@ -7120,9 +7397,9 @@ EXPORT_SYMBOL_GPL(devlink_alloc);
  */
 int devlink_register(struct devlink *devlink, struct device *dev)
 {
-       mutex_lock(&devlink_mutex);
        devlink->dev = dev;
        devlink->registered = true;
+       mutex_lock(&devlink_mutex);
        list_add_tail(&devlink->list, &devlink_list);
        devlink_notify(devlink, DEVLINK_CMD_NEW);
        mutex_unlock(&devlink_mutex);
@@ -7268,6 +7545,8 @@ int devlink_port_register(struct devlink *devlink,
        list_add_tail(&devlink_port->list, &devlink->port_list);
        INIT_LIST_HEAD(&devlink_port->param_list);
        mutex_unlock(&devlink->lock);
+       INIT_LIST_HEAD(&devlink_port->reporter_list);
+       mutex_init(&devlink_port->reporters_lock);
        INIT_DELAYED_WORK(&devlink_port->type_warn_dw, &devlink_port_type_warn);
        devlink_port_type_warn_schedule(devlink_port);
        devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
@@ -7284,6 +7563,8 @@ void devlink_port_unregister(struct devlink_port *devlink_port)
 {
        struct devlink *devlink = devlink_port->devlink;
 
+       WARN_ON(!list_empty(&devlink_port->reporter_list));
+       mutex_destroy(&devlink_port->reporters_lock);
        devlink_port_type_warn_cancel(devlink_port);
        devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
        mutex_lock(&devlink->lock);
@@ -7377,24 +7658,20 @@ void devlink_port_type_clear(struct devlink_port *devlink_port)
 EXPORT_SYMBOL_GPL(devlink_port_type_clear);
 
 static int __devlink_port_attrs_set(struct devlink_port *devlink_port,
-                                   enum devlink_port_flavour flavour,
-                                   const unsigned char *switch_id,
-                                   unsigned char switch_id_len)
+                                   enum devlink_port_flavour flavour)
 {
        struct devlink_port_attrs *attrs = &devlink_port->attrs;
 
        if (WARN_ON(devlink_port->registered))
                return -EEXIST;
-       attrs->set = true;
+       devlink_port->attrs_set = true;
        attrs->flavour = flavour;
-       if (switch_id) {
-               attrs->switch_port = true;
-               if (WARN_ON(switch_id_len > MAX_PHYS_ITEM_ID_LEN))
-                       switch_id_len = MAX_PHYS_ITEM_ID_LEN;
-               memcpy(attrs->switch_id.id, switch_id, switch_id_len);
-               attrs->switch_id.id_len = switch_id_len;
+       if (attrs->switch_id.id_len) {
+               devlink_port->switch_port = true;
+               if (WARN_ON(attrs->switch_id.id_len > MAX_PHYS_ITEM_ID_LEN))
+                       attrs->switch_id.id_len = MAX_PHYS_ITEM_ID_LEN;
        } else {
-               attrs->switch_port = false;
+               devlink_port->switch_port = false;
        }
        return 0;
 }
@@ -7403,33 +7680,18 @@ static int __devlink_port_attrs_set(struct devlink_port *devlink_port,
  *     devlink_port_attrs_set - Set port attributes
  *
  *     @devlink_port: devlink port
- *     @flavour: flavour of the port
- *     @port_number: number of the port that is facing user, for example
- *                   the front panel port number
- *     @split: indicates if this is split port
- *     @split_subport_number: if the port is split, this is the number
- *                            of subport.
- *     @switch_id: if the port is part of switch, this is buffer with ID,
- *                 otwerwise this is NULL
- *     @switch_id_len: length of the switch_id buffer
+ *     @attrs: devlink port attrs
  */
 void devlink_port_attrs_set(struct devlink_port *devlink_port,
-                           enum devlink_port_flavour flavour,
-                           u32 port_number, bool split,
-                           u32 split_subport_number,
-                           const unsigned char *switch_id,
-                           unsigned char switch_id_len)
+                           struct devlink_port_attrs *attrs)
 {
-       struct devlink_port_attrs *attrs = &devlink_port->attrs;
        int ret;
 
-       ret = __devlink_port_attrs_set(devlink_port, flavour,
-                                      switch_id, switch_id_len);
+       devlink_port->attrs = *attrs;
+       ret = __devlink_port_attrs_set(devlink_port, attrs->flavour);
        if (ret)
                return;
-       attrs->split = split;
-       attrs->phys.port_number = port_number;
-       attrs->phys.split_subport_number = split_subport_number;
+       WARN_ON(attrs->splittable && attrs->split);
 }
 EXPORT_SYMBOL_GPL(devlink_port_attrs_set);
 
@@ -7438,20 +7700,14 @@ EXPORT_SYMBOL_GPL(devlink_port_attrs_set);
  *
  *     @devlink_port: devlink port
  *     @pf: associated PF for the devlink port instance
- *     @switch_id: if the port is part of switch, this is buffer with ID,
- *                 otherwise this is NULL
- *     @switch_id_len: length of the switch_id buffer
  */
-void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port,
-                                  const unsigned char *switch_id,
-                                  unsigned char switch_id_len, u16 pf)
+void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u16 pf)
 {
        struct devlink_port_attrs *attrs = &devlink_port->attrs;
        int ret;
 
        ret = __devlink_port_attrs_set(devlink_port,
-                                      DEVLINK_PORT_FLAVOUR_PCI_PF,
-                                      switch_id, switch_id_len);
+                                      DEVLINK_PORT_FLAVOUR_PCI_PF);
        if (ret)
                return;
 
@@ -7465,21 +7721,15 @@ EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_pf_set);
  *     @devlink_port: devlink port
  *     @pf: associated PF for the devlink port instance
  *     @vf: associated VF of a PF for the devlink port instance
- *     @switch_id: if the port is part of switch, this is buffer with ID,
- *                 otherwise this is NULL
- *     @switch_id_len: length of the switch_id buffer
  */
 void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port,
-                                  const unsigned char *switch_id,
-                                  unsigned char switch_id_len,
                                   u16 pf, u16 vf)
 {
        struct devlink_port_attrs *attrs = &devlink_port->attrs;
        int ret;
 
        ret = __devlink_port_attrs_set(devlink_port,
-                                      DEVLINK_PORT_FLAVOUR_PCI_VF,
-                                      switch_id, switch_id_len);
+                                      DEVLINK_PORT_FLAVOUR_PCI_VF);
        if (ret)
                return;
        attrs->pci_vf.pf = pf;
@@ -7493,7 +7743,7 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
        struct devlink_port_attrs *attrs = &devlink_port->attrs;
        int n = 0;
 
-       if (!attrs->set)
+       if (!devlink_port->attrs_set)
                return -EOPNOTSUPP;
 
        switch (attrs->flavour) {
@@ -9328,7 +9578,7 @@ int devlink_compat_switch_id_get(struct net_device *dev,
         * any devlink lock as only permanent values are accessed.
         */
        devlink_port = netdev_to_devlink_port(dev);
-       if (!devlink_port || !devlink_port->attrs.switch_port)
+       if (!devlink_port || !devlink_port->switch_port)
                return -EOPNOTSUPP;
 
        memcpy(ppid, &devlink_port->attrs.switch_id, sizeof(*ppid));
index 2ee7bc4..b09bebe 100644 (file)
@@ -1721,3 +1721,4 @@ module_exit(exit_net_drop_monitor);
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
 MODULE_ALIAS_GENL_FAMILY("NET_DM");
+MODULE_DESCRIPTION("Monitoring code for network dropped packet alerts");
index 7339538..29e3455 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/seccomp.h>
 #include <linux/if_vlan.h>
 #include <linux/bpf.h>
+#include <linux/btf.h>
 #include <net/sch_generic.h>
 #include <net/cls_cgroup.h>
 #include <net/dst_metadata.h>
 #include <net/lwtunnel.h>
 #include <net/ipv6_stubs.h>
 #include <net/bpf_sk_storage.h>
+#include <net/transp_v6.h>
+#include <linux/btf_ids.h>
+
+int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len)
+{
+       if (in_compat_syscall()) {
+               struct compat_sock_fprog f32;
+
+               if (len != sizeof(f32))
+                       return -EINVAL;
+               if (copy_from_sockptr(&f32, src, sizeof(f32)))
+                       return -EFAULT;
+               memset(dst, 0, sizeof(*dst));
+               dst->len = f32.len;
+               dst->filter = compat_ptr(f32.filter);
+       } else {
+               if (len != sizeof(*dst))
+                       return -EINVAL;
+               if (copy_from_sockptr(dst, src, sizeof(*dst)))
+                       return -EFAULT;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(copy_bpf_fprog_from_user);
 
 /**
  *     sk_filter_trim_cap - run a packet through a socket filter
@@ -3777,7 +3803,9 @@ static const struct bpf_func_proto bpf_skb_event_output_proto = {
        .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
 };
 
-static int bpf_skb_output_btf_ids[5];
+BTF_ID_LIST(bpf_skb_output_btf_ids)
+BTF_ID(struct, sk_buff)
+
 const struct bpf_func_proto bpf_skb_output_proto = {
        .func           = bpf_skb_event_output,
        .gpl_only       = true,
@@ -4171,7 +4199,9 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = {
        .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
 };
 
-static int bpf_xdp_output_btf_ids[5];
+BTF_ID_LIST(bpf_xdp_output_btf_ids)
+BTF_ID(struct, xdp_buff)
+
 const struct bpf_func_proto bpf_xdp_output_proto = {
        .func           = bpf_xdp_event_output,
        .gpl_only       = true,
@@ -4289,10 +4319,10 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
                           char *optval, int optlen, u32 flags)
 {
        char devname[IFNAMSIZ];
+       int val, valbool;
        struct net *net;
        int ifindex;
        int ret = 0;
-       int val;
 
        if (!sk_fullsock(sk))
                return -EINVAL;
@@ -4303,6 +4333,7 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
                if (optlen != sizeof(int) && optname != SO_BINDTODEVICE)
                        return -EINVAL;
                val = *((int *)optval);
+               valbool = val ? 1 : 0;
 
                /* Only some socketops are supported */
                switch (optname) {
@@ -4361,6 +4392,11 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
                        }
                        ret = sock_bindtoindex(sk, ifindex, false);
                        break;
+               case SO_KEEPALIVE:
+                       if (sk->sk_prot->keepalive)
+                               sk->sk_prot->keepalive(sk, valbool);
+                       sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
+                       break;
                default:
                        ret = -EINVAL;
                }
@@ -4421,6 +4457,7 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
                        ret = tcp_set_congestion_control(sk, name, false,
                                                         reinit, true);
                } else {
+                       struct inet_connection_sock *icsk = inet_csk(sk);
                        struct tcp_sock *tp = tcp_sk(sk);
 
                        if (optlen != sizeof(int))
@@ -4449,6 +4486,33 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
                                else
                                        tp->save_syn = val;
                                break;
+                       case TCP_KEEPIDLE:
+                               ret = tcp_sock_set_keepidle_locked(sk, val);
+                               break;
+                       case TCP_KEEPINTVL:
+                               if (val < 1 || val > MAX_TCP_KEEPINTVL)
+                                       ret = -EINVAL;
+                               else
+                                       tp->keepalive_intvl = val * HZ;
+                               break;
+                       case TCP_KEEPCNT:
+                               if (val < 1 || val > MAX_TCP_KEEPCNT)
+                                       ret = -EINVAL;
+                               else
+                                       tp->keepalive_probes = val;
+                               break;
+                       case TCP_SYNCNT:
+                               if (val < 1 || val > MAX_TCP_SYNCNT)
+                                       ret = -EINVAL;
+                               else
+                                       icsk->icsk_syn_retries = val;
+                               break;
+                       case TCP_USER_TIMEOUT:
+                               if (val < 0)
+                                       ret = -EINVAL;
+                               else
+                                       icsk->icsk_user_timeout = val;
+                               break;
                        default:
                                ret = -EINVAL;
                        }
@@ -5853,12 +5917,16 @@ BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb)
 {
        unsigned int iphdr_len;
 
-       if (skb->protocol == cpu_to_be16(ETH_P_IP))
+       switch (skb_protocol(skb, true)) {
+       case cpu_to_be16(ETH_P_IP):
                iphdr_len = sizeof(struct iphdr);
-       else if (skb->protocol == cpu_to_be16(ETH_P_IPV6))
+               break;
+       case cpu_to_be16(ETH_P_IPV6):
                iphdr_len = sizeof(struct ipv6hdr);
-       else
+               break;
+       default:
                return 0;
+       }
 
        if (skb_headlen(skb) < iphdr_len)
                return 0;
@@ -6854,6 +6922,7 @@ static bool __sock_filter_check_attach_type(int off,
        case offsetof(struct bpf_sock, priority):
                switch (attach_type) {
                case BPF_CGROUP_INET_SOCK_CREATE:
+               case BPF_CGROUP_INET_SOCK_RELEASE:
                        goto full_access;
                default:
                        return false;
@@ -9183,6 +9252,189 @@ const struct bpf_verifier_ops sk_reuseport_verifier_ops = {
 
 const struct bpf_prog_ops sk_reuseport_prog_ops = {
 };
+
+DEFINE_STATIC_KEY_FALSE(bpf_sk_lookup_enabled);
+EXPORT_SYMBOL(bpf_sk_lookup_enabled);
+
+BPF_CALL_3(bpf_sk_lookup_assign, struct bpf_sk_lookup_kern *, ctx,
+          struct sock *, sk, u64, flags)
+{
+       if (unlikely(flags & ~(BPF_SK_LOOKUP_F_REPLACE |
+                              BPF_SK_LOOKUP_F_NO_REUSEPORT)))
+               return -EINVAL;
+       if (unlikely(sk && sk_is_refcounted(sk)))
+               return -ESOCKTNOSUPPORT; /* reject non-RCU freed sockets */
+       if (unlikely(sk && sk->sk_state == TCP_ESTABLISHED))
+               return -ESOCKTNOSUPPORT; /* reject connected sockets */
+
+       /* Check if socket is suitable for packet L3/L4 protocol */
+       if (sk && sk->sk_protocol != ctx->protocol)
+               return -EPROTOTYPE;
+       if (sk && sk->sk_family != ctx->family &&
+           (sk->sk_family == AF_INET || ipv6_only_sock(sk)))
+               return -EAFNOSUPPORT;
+
+       if (ctx->selected_sk && !(flags & BPF_SK_LOOKUP_F_REPLACE))
+               return -EEXIST;
+
+       /* Select socket as lookup result */
+       ctx->selected_sk = sk;
+       ctx->no_reuseport = flags & BPF_SK_LOOKUP_F_NO_REUSEPORT;
+       return 0;
+}
+
+static const struct bpf_func_proto bpf_sk_lookup_assign_proto = {
+       .func           = bpf_sk_lookup_assign,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_PTR_TO_SOCKET_OR_NULL,
+       .arg3_type      = ARG_ANYTHING,
+};
+
+static const struct bpf_func_proto *
+sk_lookup_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+       switch (func_id) {
+       case BPF_FUNC_perf_event_output:
+               return &bpf_event_output_data_proto;
+       case BPF_FUNC_sk_assign:
+               return &bpf_sk_lookup_assign_proto;
+       case BPF_FUNC_sk_release:
+               return &bpf_sk_release_proto;
+       default:
+               return bpf_base_func_proto(func_id);
+       }
+}
+
+static bool sk_lookup_is_valid_access(int off, int size,
+                                     enum bpf_access_type type,
+                                     const struct bpf_prog *prog,
+                                     struct bpf_insn_access_aux *info)
+{
+       if (off < 0 || off >= sizeof(struct bpf_sk_lookup))
+               return false;
+       if (off % size != 0)
+               return false;
+       if (type != BPF_READ)
+               return false;
+
+       switch (off) {
+       case offsetof(struct bpf_sk_lookup, sk):
+               info->reg_type = PTR_TO_SOCKET_OR_NULL;
+               return size == sizeof(__u64);
+
+       case bpf_ctx_range(struct bpf_sk_lookup, family):
+       case bpf_ctx_range(struct bpf_sk_lookup, protocol):
+       case bpf_ctx_range(struct bpf_sk_lookup, remote_ip4):
+       case bpf_ctx_range(struct bpf_sk_lookup, local_ip4):
+       case bpf_ctx_range_till(struct bpf_sk_lookup, remote_ip6[0], remote_ip6[3]):
+       case bpf_ctx_range_till(struct bpf_sk_lookup, local_ip6[0], local_ip6[3]):
+       case bpf_ctx_range(struct bpf_sk_lookup, remote_port):
+       case bpf_ctx_range(struct bpf_sk_lookup, local_port):
+               bpf_ctx_record_field_size(info, sizeof(__u32));
+               return bpf_ctx_narrow_access_ok(off, size, sizeof(__u32));
+
+       default:
+               return false;
+       }
+}
+
+static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type,
+                                       const struct bpf_insn *si,
+                                       struct bpf_insn *insn_buf,
+                                       struct bpf_prog *prog,
+                                       u32 *target_size)
+{
+       struct bpf_insn *insn = insn_buf;
+
+       switch (si->off) {
+       case offsetof(struct bpf_sk_lookup, sk):
+               *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg,
+                                     offsetof(struct bpf_sk_lookup_kern, selected_sk));
+               break;
+
+       case offsetof(struct bpf_sk_lookup, family):
+               *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
+                                     bpf_target_off(struct bpf_sk_lookup_kern,
+                                                    family, 2, target_size));
+               break;
+
+       case offsetof(struct bpf_sk_lookup, protocol):
+               *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
+                                     bpf_target_off(struct bpf_sk_lookup_kern,
+                                                    protocol, 2, target_size));
+               break;
+
+       case offsetof(struct bpf_sk_lookup, remote_ip4):
+               *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
+                                     bpf_target_off(struct bpf_sk_lookup_kern,
+                                                    v4.saddr, 4, target_size));
+               break;
+
+       case offsetof(struct bpf_sk_lookup, local_ip4):
+               *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
+                                     bpf_target_off(struct bpf_sk_lookup_kern,
+                                                    v4.daddr, 4, target_size));
+               break;
+
+       case bpf_ctx_range_till(struct bpf_sk_lookup,
+                               remote_ip6[0], remote_ip6[3]): {
+#if IS_ENABLED(CONFIG_IPV6)
+               int off = si->off;
+
+               off -= offsetof(struct bpf_sk_lookup, remote_ip6[0]);
+               off += bpf_target_off(struct in6_addr, s6_addr32[0], 4, target_size);
+               *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg,
+                                     offsetof(struct bpf_sk_lookup_kern, v6.saddr));
+               *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
+               *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, off);
+#else
+               *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
+#endif
+               break;
+       }
+       case bpf_ctx_range_till(struct bpf_sk_lookup,
+                               local_ip6[0], local_ip6[3]): {
+#if IS_ENABLED(CONFIG_IPV6)
+               int off = si->off;
+
+               off -= offsetof(struct bpf_sk_lookup, local_ip6[0]);
+               off += bpf_target_off(struct in6_addr, s6_addr32[0], 4, target_size);
+               *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg,
+                                     offsetof(struct bpf_sk_lookup_kern, v6.daddr));
+               *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
+               *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, off);
+#else
+               *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
+#endif
+               break;
+       }
+       case offsetof(struct bpf_sk_lookup, remote_port):
+               *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
+                                     bpf_target_off(struct bpf_sk_lookup_kern,
+                                                    sport, 2, target_size));
+               break;
+
+       case offsetof(struct bpf_sk_lookup, local_port):
+               *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
+                                     bpf_target_off(struct bpf_sk_lookup_kern,
+                                                    dport, 2, target_size));
+               break;
+       }
+
+       return insn - insn_buf;
+}
+
+const struct bpf_prog_ops sk_lookup_prog_ops = {
+};
+
+const struct bpf_verifier_ops sk_lookup_verifier_ops = {
+       .get_func_proto         = sk_lookup_func_proto,
+       .is_valid_access        = sk_lookup_is_valid_access,
+       .convert_ctx_access     = sk_lookup_convert_ctx_access,
+};
+
 #endif /* CONFIG_INET */
 
 DEFINE_BPF_DISPATCHER(xdp)
@@ -9191,3 +9443,132 @@ void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog)
 {
        bpf_dispatcher_change_prog(BPF_DISPATCHER_PTR(xdp), prev_prog, prog);
 }
+
+#ifdef CONFIG_DEBUG_INFO_BTF
+BTF_ID_LIST_GLOBAL(btf_sock_ids)
+#define BTF_SOCK_TYPE(name, type) BTF_ID(struct, type)
+BTF_SOCK_TYPE_xxx
+#undef BTF_SOCK_TYPE
+#else
+u32 btf_sock_ids[MAX_BTF_SOCK_TYPE];
+#endif
+
+static bool check_arg_btf_id(u32 btf_id, u32 arg)
+{
+       int i;
+
+       /* only one argument, no need to check arg */
+       for (i = 0; i < MAX_BTF_SOCK_TYPE; i++)
+               if (btf_sock_ids[i] == btf_id)
+                       return true;
+       return false;
+}
+
+BPF_CALL_1(bpf_skc_to_tcp6_sock, struct sock *, sk)
+{
+       /* tcp6_sock type is not generated in dwarf and hence btf,
+        * trigger an explicit type generation here.
+        */
+       BTF_TYPE_EMIT(struct tcp6_sock);
+       if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP &&
+           sk->sk_family == AF_INET6)
+               return (unsigned long)sk;
+
+       return (unsigned long)NULL;
+}
+
+const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto = {
+       .func                   = bpf_skc_to_tcp6_sock,
+       .gpl_only               = false,
+       .ret_type               = RET_PTR_TO_BTF_ID_OR_NULL,
+       .arg1_type              = ARG_PTR_TO_BTF_ID,
+       .check_btf_id           = check_arg_btf_id,
+       .ret_btf_id             = &btf_sock_ids[BTF_SOCK_TYPE_TCP6],
+};
+
+BPF_CALL_1(bpf_skc_to_tcp_sock, struct sock *, sk)
+{
+       if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP)
+               return (unsigned long)sk;
+
+       return (unsigned long)NULL;
+}
+
+const struct bpf_func_proto bpf_skc_to_tcp_sock_proto = {
+       .func                   = bpf_skc_to_tcp_sock,
+       .gpl_only               = false,
+       .ret_type               = RET_PTR_TO_BTF_ID_OR_NULL,
+       .arg1_type              = ARG_PTR_TO_BTF_ID,
+       .check_btf_id           = check_arg_btf_id,
+       .ret_btf_id             = &btf_sock_ids[BTF_SOCK_TYPE_TCP],
+};
+
+BPF_CALL_1(bpf_skc_to_tcp_timewait_sock, struct sock *, sk)
+{
+#ifdef CONFIG_INET
+       if (sk->sk_prot == &tcp_prot && sk->sk_state == TCP_TIME_WAIT)
+               return (unsigned long)sk;
+#endif
+
+#if IS_BUILTIN(CONFIG_IPV6)
+       if (sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_TIME_WAIT)
+               return (unsigned long)sk;
+#endif
+
+       return (unsigned long)NULL;
+}
+
+const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto = {
+       .func                   = bpf_skc_to_tcp_timewait_sock,
+       .gpl_only               = false,
+       .ret_type               = RET_PTR_TO_BTF_ID_OR_NULL,
+       .arg1_type              = ARG_PTR_TO_BTF_ID,
+       .check_btf_id           = check_arg_btf_id,
+       .ret_btf_id             = &btf_sock_ids[BTF_SOCK_TYPE_TCP_TW],
+};
+
+BPF_CALL_1(bpf_skc_to_tcp_request_sock, struct sock *, sk)
+{
+#ifdef CONFIG_INET
+       if (sk->sk_prot == &tcp_prot  && sk->sk_state == TCP_NEW_SYN_RECV)
+               return (unsigned long)sk;
+#endif
+
+#if IS_BUILTIN(CONFIG_IPV6)
+       if (sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_NEW_SYN_RECV)
+               return (unsigned long)sk;
+#endif
+
+       return (unsigned long)NULL;
+}
+
+const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto = {
+       .func                   = bpf_skc_to_tcp_request_sock,
+       .gpl_only               = false,
+       .ret_type               = RET_PTR_TO_BTF_ID_OR_NULL,
+       .arg1_type              = ARG_PTR_TO_BTF_ID,
+       .check_btf_id           = check_arg_btf_id,
+       .ret_btf_id             = &btf_sock_ids[BTF_SOCK_TYPE_TCP_REQ],
+};
+
+BPF_CALL_1(bpf_skc_to_udp6_sock, struct sock *, sk)
+{
+       /* udp6_sock type is not generated in dwarf and hence btf,
+        * trigger an explicit type generation here.
+        */
+       BTF_TYPE_EMIT(struct udp6_sock);
+       if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_UDP &&
+           sk->sk_type == SOCK_DGRAM && sk->sk_family == AF_INET6)
+               return (unsigned long)sk;
+
+       return (unsigned long)NULL;
+}
+
+const struct bpf_func_proto bpf_skc_to_udp6_sock_proto = {
+       .func                   = bpf_skc_to_udp6_sock,
+       .gpl_only               = false,
+       .ret_type               = RET_PTR_TO_BTF_ID_OR_NULL,
+       .arg1_type              = ARG_PTR_TO_BTF_ID,
+       .check_btf_id           = check_arg_btf_id,
+       .ret_btf_id             = &btf_sock_ids[BTF_SOCK_TYPE_UDP6],
+};
index d02df0b..29806eb 100644 (file)
@@ -70,10 +70,10 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
 EXPORT_SYMBOL(skb_flow_dissector_init);
 
 #ifdef CONFIG_BPF_SYSCALL
-int flow_dissector_bpf_prog_attach(struct net *net, struct bpf_prog *prog)
+int flow_dissector_bpf_prog_attach_check(struct net *net,
+                                        struct bpf_prog *prog)
 {
        enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
-       struct bpf_prog *attached;
 
        if (net == &init_net) {
                /* BPF flow dissector in the root namespace overrides
@@ -86,26 +86,17 @@ int flow_dissector_bpf_prog_attach(struct net *net, struct bpf_prog *prog)
                for_each_net(ns) {
                        if (ns == &init_net)
                                continue;
-                       if (rcu_access_pointer(ns->bpf.progs[type]))
+                       if (rcu_access_pointer(ns->bpf.run_array[type]))
                                return -EEXIST;
                }
        } else {
                /* Make sure root flow dissector is not attached
                 * when attaching to the non-root namespace.
                 */
-               if (rcu_access_pointer(init_net.bpf.progs[type]))
+               if (rcu_access_pointer(init_net.bpf.run_array[type]))
                        return -EEXIST;
        }
 
-       attached = rcu_dereference_protected(net->bpf.progs[type],
-                                            lockdep_is_held(&netns_bpf_mutex));
-       if (attached == prog)
-               /* The same program cannot be attached twice */
-               return -EINVAL;
-
-       rcu_assign_pointer(net->bpf.progs[type], prog);
-       if (attached)
-               bpf_prog_put(attached);
        return 0;
 }
 #endif /* CONFIG_BPF_SYSCALL */
@@ -392,6 +383,23 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb,
 }
 EXPORT_SYMBOL(skb_flow_dissect_tunnel_info);
 
+void skb_flow_dissect_hash(const struct sk_buff *skb,
+                          struct flow_dissector *flow_dissector,
+                          void *target_container)
+{
+       struct flow_dissector_key_hash *key;
+
+       if (!dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_HASH))
+               return;
+
+       key = skb_flow_dissector_target(flow_dissector,
+                                       FLOW_DISSECTOR_KEY_HASH,
+                                       target_container);
+
+       key->hash = skb_get_hash_raw(skb);
+}
+EXPORT_SYMBOL(skb_flow_dissect_hash);
+
 static enum flow_dissect_ret
 __skb_flow_dissect_mpls(const struct sk_buff *skb,
                        struct flow_dissector *flow_dissector,
@@ -903,7 +911,6 @@ bool __skb_flow_dissect(const struct net *net,
        struct flow_dissector_key_addrs *key_addrs;
        struct flow_dissector_key_tags *key_tags;
        struct flow_dissector_key_vlan *key_vlan;
-       struct bpf_prog *attached = NULL;
        enum flow_dissect_ret fdret;
        enum flow_dissector_key_id dissector_vlan = FLOW_DISSECTOR_KEY_MAX;
        bool mpls_el = false;
@@ -960,14 +967,14 @@ bool __skb_flow_dissect(const struct net *net,
        WARN_ON_ONCE(!net);
        if (net) {
                enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR;
+               struct bpf_prog_array *run_array;
 
                rcu_read_lock();
-               attached = rcu_dereference(init_net.bpf.progs[type]);
-
-               if (!attached)
-                       attached = rcu_dereference(net->bpf.progs[type]);
+               run_array = rcu_dereference(init_net.bpf.run_array[type]);
+               if (!run_array)
+                       run_array = rcu_dereference(net->bpf.run_array[type]);
 
-               if (attached) {
+               if (run_array) {
                        struct bpf_flow_keys flow_keys;
                        struct bpf_flow_dissector ctx = {
                                .flow_keys = &flow_keys,
@@ -975,6 +982,7 @@ bool __skb_flow_dissect(const struct net *net,
                                .data_end = data + hlen,
                        };
                        __be16 n_proto = proto;
+                       struct bpf_prog *prog;
 
                        if (skb) {
                                ctx.skb = skb;
@@ -985,7 +993,8 @@ bool __skb_flow_dissect(const struct net *net,
                                n_proto = skb->protocol;
                        }
 
-                       ret = bpf_flow_dissect(attached, &ctx, n_proto, nhoff,
+                       prog = READ_ONCE(run_array->items[0].prog);
+                       ret = bpf_flow_dissect(prog, &ctx, n_proto, nhoff,
                                               hlen, flags);
                        __skb_flow_bpf_to_target(&flow_keys, flow_dissector,
                                                 target_container);
index 0cfc35e..b8cf6ff 100644 (file)
@@ -372,14 +372,15 @@ int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
 }
 EXPORT_SYMBOL(flow_indr_dev_register);
 
-static void __flow_block_indr_cleanup(flow_setup_cb_t *setup_cb, void *cb_priv,
+static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
+                                     void *cb_priv,
                                      struct list_head *cleanup_list)
 {
        struct flow_block_cb *this, *next;
 
        list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
-               if (this->cb == setup_cb &&
-                   this->cb_priv == cb_priv) {
+               if (this->release == release &&
+                   this->indr.cb_priv == cb_priv) {
                        list_move(&this->indr.list, cleanup_list);
                        return;
                }
@@ -397,7 +398,7 @@ static void flow_block_indr_notify(struct list_head *cleanup_list)
 }
 
 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
-                             flow_setup_cb_t *setup_cb)
+                             void (*release)(void *cb_priv))
 {
        struct flow_indr_dev *this, *next, *indr_dev = NULL;
        LIST_HEAD(cleanup_list);
@@ -418,7 +419,7 @@ void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
                return;
        }
 
-       __flow_block_indr_cleanup(setup_cb, cb_priv, &cleanup_list);
+       __flow_block_indr_cleanup(release, cb_priv, &cleanup_list);
        mutex_unlock(&flow_indr_block_lock);
 
        flow_block_indr_notify(&cleanup_list);
@@ -428,35 +429,42 @@ EXPORT_SYMBOL(flow_indr_dev_unregister);
 
 static void flow_block_indr_init(struct flow_block_cb *flow_block,
                                 struct flow_block_offload *bo,
-                                struct net_device *dev, void *data,
+                                struct net_device *dev, struct Qdisc *sch, void *data,
+                                void *cb_priv,
                                 void (*cleanup)(struct flow_block_cb *block_cb))
 {
        flow_block->indr.binder_type = bo->binder_type;
        flow_block->indr.data = data;
+       flow_block->indr.cb_priv = cb_priv;
        flow_block->indr.dev = dev;
+       flow_block->indr.sch = sch;
        flow_block->indr.cleanup = cleanup;
 }
 
-static void __flow_block_indr_binding(struct flow_block_offload *bo,
-                                     struct net_device *dev, void *data,
-                                     void (*cleanup)(struct flow_block_cb *block_cb))
+struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
+                                              void *cb_ident, void *cb_priv,
+                                              void (*release)(void *cb_priv),
+                                              struct flow_block_offload *bo,
+                                              struct net_device *dev,
+                                              struct Qdisc *sch, void *data,
+                                              void *indr_cb_priv,
+                                              void (*cleanup)(struct flow_block_cb *block_cb))
 {
        struct flow_block_cb *block_cb;
 
-       list_for_each_entry(block_cb, &bo->cb_list, list) {
-               switch (bo->command) {
-               case FLOW_BLOCK_BIND:
-                       flow_block_indr_init(block_cb, bo, dev, data, cleanup);
-                       list_add(&block_cb->indr.list, &flow_block_indr_list);
-                       break;
-               case FLOW_BLOCK_UNBIND:
-                       list_del(&block_cb->indr.list);
-                       break;
-               }
-       }
+       block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release);
+       if (IS_ERR(block_cb))
+               goto out;
+
+       flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup);
+       list_add(&block_cb->indr.list, &flow_block_indr_list);
+
+out:
+       return block_cb;
 }
+EXPORT_SYMBOL(flow_indr_block_cb_alloc);
 
-int flow_indr_dev_setup_offload(struct net_device *dev,
+int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
                                enum tc_setup_type type, void *data,
                                struct flow_block_offload *bo,
                                void (*cleanup)(struct flow_block_cb *block_cb))
@@ -465,9 +473,8 @@ int flow_indr_dev_setup_offload(struct net_device *dev,
 
        mutex_lock(&flow_indr_block_lock);
        list_for_each_entry(this, &flow_block_indr_dev_list, list)
-               this->cb(dev, this->cb_priv, type, bo);
+               this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
 
-       __flow_block_indr_binding(bo, dev, data, cleanup);
        mutex_unlock(&flow_indr_block_lock);
 
        return list_empty(&bo->cb_list) ? -EOPNOTSUPP : 0;
index ef6b5a8..8e39e28 100644 (file)
@@ -1783,6 +1783,7 @@ const struct nla_policy nda_policy[NDA_MAX+1] = {
        [NDA_MASTER]            = { .type = NLA_U32 },
        [NDA_PROTOCOL]          = { .type = NLA_U8 },
        [NDA_NH_ID]             = { .type = NLA_U32 },
+       [NDA_FDB_EXT_ATTRS]     = { .type = NLA_NESTED },
 };
 
 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh,
index 351afbf..6a32a1f 100644 (file)
@@ -683,7 +683,7 @@ static struct sk_psock *sk_psock_from_strp(struct strparser *strp)
        return container_of(parser, struct sk_psock, parser);
 }
 
-static void sk_psock_skb_redirect(struct sk_psock *psock, struct sk_buff *skb)
+static void sk_psock_skb_redirect(struct sk_buff *skb)
 {
        struct sk_psock *psock_other;
        struct sock *sk_other;
@@ -715,12 +715,11 @@ static void sk_psock_skb_redirect(struct sk_psock *psock, struct sk_buff *skb)
        }
 }
 
-static void sk_psock_tls_verdict_apply(struct sk_psock *psock,
-                                      struct sk_buff *skb, int verdict)
+static void sk_psock_tls_verdict_apply(struct sk_buff *skb, int verdict)
 {
        switch (verdict) {
        case __SK_REDIRECT:
-               sk_psock_skb_redirect(psock, skb);
+               sk_psock_skb_redirect(skb);
                break;
        case __SK_PASS:
        case __SK_DROP:
@@ -741,8 +740,8 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
                ret = sk_psock_bpf_run(psock, prog, skb);
                ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
        }
+       sk_psock_tls_verdict_apply(skb, ret);
        rcu_read_unlock();
-       sk_psock_tls_verdict_apply(psock, skb, ret);
        return ret;
 }
 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
@@ -770,7 +769,7 @@ static void sk_psock_verdict_apply(struct sk_psock *psock,
                }
                goto out_free;
        case __SK_REDIRECT:
-               sk_psock_skb_redirect(psock, skb);
+               sk_psock_skb_redirect(skb);
                break;
        case __SK_DROP:
                /* fall-through */
@@ -782,11 +781,18 @@ out_free:
 
 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
 {
-       struct sk_psock *psock = sk_psock_from_strp(strp);
+       struct sk_psock *psock;
        struct bpf_prog *prog;
        int ret = __SK_DROP;
+       struct sock *sk;
 
        rcu_read_lock();
+       sk = strp->sk;
+       psock = sk_psock(sk);
+       if (unlikely(!psock)) {
+               kfree_skb(skb);
+               goto out;
+       }
        prog = READ_ONCE(psock->progs.skb_verdict);
        if (likely(prog)) {
                skb_orphan(skb);
@@ -794,8 +800,9 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
                ret = sk_psock_bpf_run(psock, prog, skb);
                ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
        }
-       rcu_read_unlock();
        sk_psock_verdict_apply(psock, skb, ret);
+out:
+       rcu_read_unlock();
 }
 
 static int sk_psock_strp_read_done(struct strparser *strp, int err)
index 6c4acf1..2c5dd13 100644 (file)
 #include <linux/static_key.h>
 #include <linux/memcontrol.h>
 #include <linux/prefetch.h>
+#include <linux/compat.h>
 
 #include <linux/uaccess.h>
 
@@ -360,7 +361,8 @@ static int sock_get_timeout(long timeo, void *optval, bool old_timeval)
        return sizeof(tv);
 }
 
-static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen, bool old_timeval)
+static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen,
+                           bool old_timeval)
 {
        struct __kernel_sock_timeval tv;
 
@@ -370,7 +372,7 @@ static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen, bool
                if (optlen < sizeof(tv32))
                        return -EINVAL;
 
-               if (copy_from_user(&tv32, optval, sizeof(tv32)))
+               if (copy_from_sockptr(&tv32, optval, sizeof(tv32)))
                        return -EFAULT;
                tv.tv_sec = tv32.tv_sec;
                tv.tv_usec = tv32.tv_usec;
@@ -379,14 +381,14 @@ static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen, bool
 
                if (optlen < sizeof(old_tv))
                        return -EINVAL;
-               if (copy_from_user(&old_tv, optval, sizeof(old_tv)))
+               if (copy_from_sockptr(&old_tv, optval, sizeof(old_tv)))
                        return -EFAULT;
                tv.tv_sec = old_tv.tv_sec;
                tv.tv_usec = old_tv.tv_usec;
        } else {
                if (optlen < sizeof(tv))
                        return -EINVAL;
-               if (copy_from_user(&tv, optval, sizeof(tv)))
+               if (copy_from_sockptr(&tv, optval, sizeof(tv)))
                        return -EFAULT;
        }
        if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
@@ -608,8 +610,7 @@ int sock_bindtoindex(struct sock *sk, int ifindex, bool lock_sk)
 }
 EXPORT_SYMBOL(sock_bindtoindex);
 
-static int sock_setbindtodevice(struct sock *sk, char __user *optval,
-                               int optlen)
+static int sock_setbindtodevice(struct sock *sk, sockptr_t optval, int optlen)
 {
        int ret = -ENOPROTOOPT;
 #ifdef CONFIG_NETDEVICES
@@ -631,7 +632,7 @@ static int sock_setbindtodevice(struct sock *sk, char __user *optval,
        memset(devname, 0, sizeof(devname));
 
        ret = -EFAULT;
-       if (copy_from_user(devname, optval, optlen))
+       if (copy_from_sockptr(devname, optval, optlen))
                goto out;
 
        index = 0;
@@ -695,15 +696,6 @@ out:
        return ret;
 }
 
-static inline void sock_valbool_flag(struct sock *sk, enum sock_flags bit,
-                                    int valbool)
-{
-       if (valbool)
-               sock_set_flag(sk, bit);
-       else
-               sock_reset_flag(sk, bit);
-}
-
 bool sk_mc_loop(struct sock *sk)
 {
        if (dev_recursion_level())
@@ -718,7 +710,7 @@ bool sk_mc_loop(struct sock *sk)
                return inet6_sk(sk)->mc_loop;
 #endif
        }
-       WARN_ON(1);
+       WARN_ON_ONCE(1);
        return true;
 }
 EXPORT_SYMBOL(sk_mc_loop);
@@ -834,7 +826,7 @@ EXPORT_SYMBOL(sock_set_rcvbuf);
  */
 
 int sock_setsockopt(struct socket *sock, int level, int optname,
-                   char __user *optval, unsigned int optlen)
+                   sockptr_t optval, unsigned int optlen)
 {
        struct sock_txtime sk_txtime;
        struct sock *sk = sock->sk;
@@ -853,7 +845,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
        if (optlen < sizeof(int))
                return -EINVAL;
 
-       if (get_user(val, (int __user *)optval))
+       if (copy_from_sockptr(&val, optval, sizeof(val)))
                return -EFAULT;
 
        valbool = val ? 1 : 0;
@@ -966,7 +958,7 @@ set_sndbuf:
                        ret = -EINVAL;  /* 1003.1g */
                        break;
                }
-               if (copy_from_user(&ling, optval, sizeof(ling))) {
+               if (copy_from_sockptr(&ling, optval, sizeof(ling))) {
                        ret = -EFAULT;
                        break;
                }
@@ -1060,60 +1052,52 @@ set_sndbuf:
 
        case SO_RCVTIMEO_OLD:
        case SO_RCVTIMEO_NEW:
-               ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen, optname == SO_RCVTIMEO_OLD);
+               ret = sock_set_timeout(&sk->sk_rcvtimeo, optval,
+                                      optlen, optname == SO_RCVTIMEO_OLD);
                break;
 
        case SO_SNDTIMEO_OLD:
        case SO_SNDTIMEO_NEW:
-               ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen, optname == SO_SNDTIMEO_OLD);
+               ret = sock_set_timeout(&sk->sk_sndtimeo, optval,
+                                      optlen, optname == SO_SNDTIMEO_OLD);
                break;
 
-       case SO_ATTACH_FILTER:
-               ret = -EINVAL;
-               if (optlen == sizeof(struct sock_fprog)) {
-                       struct sock_fprog fprog;
-
-                       ret = -EFAULT;
-                       if (copy_from_user(&fprog, optval, sizeof(fprog)))
-                               break;
+       case SO_ATTACH_FILTER: {
+               struct sock_fprog fprog;
 
+               ret = copy_bpf_fprog_from_user(&fprog, optval, optlen);
+               if (!ret)
                        ret = sk_attach_filter(&fprog, sk);
-               }
                break;
-
+       }
        case SO_ATTACH_BPF:
                ret = -EINVAL;
                if (optlen == sizeof(u32)) {
                        u32 ufd;
 
                        ret = -EFAULT;
-                       if (copy_from_user(&ufd, optval, sizeof(ufd)))
+                       if (copy_from_sockptr(&ufd, optval, sizeof(ufd)))
                                break;
 
                        ret = sk_attach_bpf(ufd, sk);
                }
                break;
 
-       case SO_ATTACH_REUSEPORT_CBPF:
-               ret = -EINVAL;
-               if (optlen == sizeof(struct sock_fprog)) {
-                       struct sock_fprog fprog;
-
-                       ret = -EFAULT;
-                       if (copy_from_user(&fprog, optval, sizeof(fprog)))
-                               break;
+       case SO_ATTACH_REUSEPORT_CBPF: {
+               struct sock_fprog fprog;
 
+               ret = copy_bpf_fprog_from_user(&fprog, optval, optlen);
+               if (!ret)
                        ret = sk_reuseport_attach_filter(&fprog, sk);
-               }
                break;
-
+       }
        case SO_ATTACH_REUSEPORT_EBPF:
                ret = -EINVAL;
                if (optlen == sizeof(u32)) {
                        u32 ufd;
 
                        ret = -EFAULT;
-                       if (copy_from_user(&ufd, optval, sizeof(ufd)))
+                       if (copy_from_sockptr(&ufd, optval, sizeof(ufd)))
                                break;
 
                        ret = sk_reuseport_attach_bpf(ufd, sk);
@@ -1193,7 +1177,7 @@ set_sndbuf:
 
                if (sizeof(ulval) != sizeof(val) &&
                    optlen >= sizeof(ulval) &&
-                   get_user(ulval, (unsigned long __user *)optval)) {
+                   copy_from_sockptr(&ulval, optval, sizeof(ulval))) {
                        ret = -EFAULT;
                        break;
                }
@@ -1236,7 +1220,7 @@ set_sndbuf:
                if (optlen != sizeof(struct sock_txtime)) {
                        ret = -EINVAL;
                        break;
-               } else if (copy_from_user(&sk_txtime, optval,
+               } else if (copy_from_sockptr(&sk_txtime, optval,
                           sizeof(struct sock_txtime))) {
                        ret = -EFAULT;
                        break;
@@ -1767,6 +1751,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
                cgroup_sk_alloc(&sk->sk_cgrp_data);
                sock_update_classid(&sk->sk_cgrp_data);
                sock_update_netprioidx(&sk->sk_cgrp_data);
+               sk_tx_queue_clear(sk);
        }
 
        return sk;
@@ -1925,7 +1910,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                /* sk->sk_memcg will be populated at accept() time */
                newsk->sk_memcg = NULL;
 
-               cgroup_sk_alloc(&newsk->sk_cgrp_data);
+               cgroup_sk_clone(&newsk->sk_cgrp_data);
 
                rcu_read_lock();
                filter = rcu_dereference(sk->sk_filter);
@@ -1990,6 +1975,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                 */
                sk_refcnt_debug_inc(newsk);
                sk_set_socket(newsk, NULL);
+               sk_tx_queue_clear(newsk);
                RCU_INIT_POINTER(newsk->sk_wq, NULL);
 
                if (newsk->sk_prot->sockets_allocated)
@@ -2800,20 +2786,6 @@ int sock_no_shutdown(struct socket *sock, int how)
 }
 EXPORT_SYMBOL(sock_no_shutdown);
 
-int sock_no_setsockopt(struct socket *sock, int level, int optname,
-                   char __user *optval, unsigned int optlen)
-{
-       return -EOPNOTSUPP;
-}
-EXPORT_SYMBOL(sock_no_setsockopt);
-
-int sock_no_getsockopt(struct socket *sock, int level, int optname,
-                   char __user *optval, int __user *optlen)
-{
-       return -EOPNOTSUPP;
-}
-EXPORT_SYMBOL(sock_no_getsockopt);
-
 int sock_no_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
 {
        return -EOPNOTSUPP;
@@ -3220,20 +3192,6 @@ int sock_common_getsockopt(struct socket *sock, int level, int optname,
 }
 EXPORT_SYMBOL(sock_common_getsockopt);
 
-#ifdef CONFIG_COMPAT
-int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
-                                 char __user *optval, int __user *optlen)
-{
-       struct sock *sk = sock->sk;
-
-       if (sk->sk_prot->compat_getsockopt != NULL)
-               return sk->sk_prot->compat_getsockopt(sk, level, optname,
-                                                     optval, optlen);
-       return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
-}
-EXPORT_SYMBOL(compat_sock_common_getsockopt);
-#endif
-
 int sock_common_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
                        int flags)
 {
@@ -3253,7 +3211,7 @@ EXPORT_SYMBOL(sock_common_recvmsg);
  *     Set socket options on an inet socket.
  */
 int sock_common_setsockopt(struct socket *sock, int level, int optname,
-                          char __user *optval, unsigned int optlen)
+                          sockptr_t optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
 
@@ -3261,20 +3219,6 @@ int sock_common_setsockopt(struct socket *sock, int level, int optname,
 }
 EXPORT_SYMBOL(sock_common_setsockopt);
 
-#ifdef CONFIG_COMPAT
-int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
-                                 char __user *optval, unsigned int optlen)
-{
-       struct sock *sk = sock->sk;
-
-       if (sk->sk_prot->compat_setsockopt != NULL)
-               return sk->sk_prot->compat_setsockopt(sk, level, optname,
-                                                     optval, optlen);
-       return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
-}
-EXPORT_SYMBOL(compat_sock_common_setsockopt);
-#endif
-
 void sk_common_release(struct sock *sk)
 {
        if (sk->sk_prot->destroy)
@@ -3573,6 +3517,7 @@ int sock_load_diag_module(int family, int protocol)
 #ifdef CONFIG_INET
        if (family == AF_INET &&
            protocol != IPPROTO_RAW &&
+           protocol < MAX_INET_PROTOS &&
            !rcu_access_pointer(inet_protos[protocol]))
                return -ENOENT;
 #endif
index 4059f94..119f52a 100644 (file)
@@ -70,11 +70,49 @@ int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog)
        struct fd f;
        int ret;
 
+       if (attr->attach_flags || attr->replace_bpf_fd)
+               return -EINVAL;
+
        f = fdget(ufd);
        map = __bpf_map_get(f);
        if (IS_ERR(map))
                return PTR_ERR(map);
-       ret = sock_map_prog_update(map, prog, attr->attach_type);
+       ret = sock_map_prog_update(map, prog, NULL, attr->attach_type);
+       fdput(f);
+       return ret;
+}
+
+int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
+{
+       u32 ufd = attr->target_fd;
+       struct bpf_prog *prog;
+       struct bpf_map *map;
+       struct fd f;
+       int ret;
+
+       if (attr->attach_flags || attr->replace_bpf_fd)
+               return -EINVAL;
+
+       f = fdget(ufd);
+       map = __bpf_map_get(f);
+       if (IS_ERR(map))
+               return PTR_ERR(map);
+
+       prog = bpf_prog_get(attr->attach_bpf_fd);
+       if (IS_ERR(prog)) {
+               ret = PTR_ERR(prog);
+               goto put_map;
+       }
+
+       if (prog->type != ptype) {
+               ret = -EINVAL;
+               goto put_prog;
+       }
+
+       ret = sock_map_prog_update(map, NULL, prog, attr->attach_type);
+put_prog:
+       bpf_prog_put(prog);
+put_map:
        fdput(f);
        return ret;
 }
@@ -643,6 +681,7 @@ const struct bpf_func_proto bpf_msg_redirect_map_proto = {
        .arg4_type      = ARG_ANYTHING,
 };
 
+static int sock_map_btf_id;
 const struct bpf_map_ops sock_map_ops = {
        .map_alloc              = sock_map_alloc,
        .map_free               = sock_map_free,
@@ -653,9 +692,11 @@ const struct bpf_map_ops sock_map_ops = {
        .map_lookup_elem        = sock_map_lookup,
        .map_release_uref       = sock_map_release_progs,
        .map_check_btf          = map_check_no_btf,
+       .map_btf_name           = "bpf_stab",
+       .map_btf_id             = &sock_map_btf_id,
 };
 
-struct bpf_htab_elem {
+struct bpf_shtab_elem {
        struct rcu_head rcu;
        u32 hash;
        struct sock *sk;
@@ -663,14 +704,14 @@ struct bpf_htab_elem {
        u8 key[];
 };
 
-struct bpf_htab_bucket {
+struct bpf_shtab_bucket {
        struct hlist_head head;
        raw_spinlock_t lock;
 };
 
-struct bpf_htab {
+struct bpf_shtab {
        struct bpf_map map;
-       struct bpf_htab_bucket *buckets;
+       struct bpf_shtab_bucket *buckets;
        u32 buckets_num;
        u32 elem_size;
        struct sk_psock_progs progs;
@@ -682,17 +723,17 @@ static inline u32 sock_hash_bucket_hash(const void *key, u32 len)
        return jhash(key, len, 0);
 }
 
-static struct bpf_htab_bucket *sock_hash_select_bucket(struct bpf_htab *htab,
-                                                      u32 hash)
+static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab,
+                                                       u32 hash)
 {
        return &htab->buckets[hash & (htab->buckets_num - 1)];
 }
 
-static struct bpf_htab_elem *
+static struct bpf_shtab_elem *
 sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key,
                          u32 key_size)
 {
-       struct bpf_htab_elem *elem;
+       struct bpf_shtab_elem *elem;
 
        hlist_for_each_entry_rcu(elem, head, node) {
                if (elem->hash == hash &&
@@ -705,10 +746,10 @@ sock_hash_lookup_elem_raw(struct hlist_head *head, u32 hash, void *key,
 
 static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
 {
-       struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+       struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
        u32 key_size = map->key_size, hash;
-       struct bpf_htab_bucket *bucket;
-       struct bpf_htab_elem *elem;
+       struct bpf_shtab_bucket *bucket;
+       struct bpf_shtab_elem *elem;
 
        WARN_ON_ONCE(!rcu_read_lock_held());
 
@@ -719,8 +760,8 @@ static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
        return elem ? elem->sk : NULL;
 }
 
-static void sock_hash_free_elem(struct bpf_htab *htab,
-                               struct bpf_htab_elem *elem)
+static void sock_hash_free_elem(struct bpf_shtab *htab,
+                               struct bpf_shtab_elem *elem)
 {
        atomic_dec(&htab->count);
        kfree_rcu(elem, rcu);
@@ -729,9 +770,9 @@ static void sock_hash_free_elem(struct bpf_htab *htab,
 static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
                                       void *link_raw)
 {
-       struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
-       struct bpf_htab_elem *elem_probe, *elem = link_raw;
-       struct bpf_htab_bucket *bucket;
+       struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
+       struct bpf_shtab_elem *elem_probe, *elem = link_raw;
+       struct bpf_shtab_bucket *bucket;
 
        WARN_ON_ONCE(!rcu_read_lock_held());
        bucket = sock_hash_select_bucket(htab, elem->hash);
@@ -753,10 +794,10 @@ static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
 
 static int sock_hash_delete_elem(struct bpf_map *map, void *key)
 {
-       struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+       struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
        u32 hash, key_size = map->key_size;
-       struct bpf_htab_bucket *bucket;
-       struct bpf_htab_elem *elem;
+       struct bpf_shtab_bucket *bucket;
+       struct bpf_shtab_elem *elem;
        int ret = -ENOENT;
 
        hash = sock_hash_bucket_hash(key, key_size);
@@ -774,12 +815,12 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key)
        return ret;
 }
 
-static struct bpf_htab_elem *sock_hash_alloc_elem(struct bpf_htab *htab,
-                                                 void *key, u32 key_size,
-                                                 u32 hash, struct sock *sk,
-                                                 struct bpf_htab_elem *old)
+static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab,
+                                                  void *key, u32 key_size,
+                                                  u32 hash, struct sock *sk,
+                                                  struct bpf_shtab_elem *old)
 {
-       struct bpf_htab_elem *new;
+       struct bpf_shtab_elem *new;
 
        if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
                if (!old) {
@@ -803,10 +844,10 @@ static struct bpf_htab_elem *sock_hash_alloc_elem(struct bpf_htab *htab,
 static int sock_hash_update_common(struct bpf_map *map, void *key,
                                   struct sock *sk, u64 flags)
 {
-       struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+       struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
        u32 key_size = map->key_size, hash;
-       struct bpf_htab_elem *elem, *elem_new;
-       struct bpf_htab_bucket *bucket;
+       struct bpf_shtab_elem *elem, *elem_new;
+       struct bpf_shtab_bucket *bucket;
        struct sk_psock_link *link;
        struct sk_psock *psock;
        int ret;
@@ -916,8 +957,8 @@ out:
 static int sock_hash_get_next_key(struct bpf_map *map, void *key,
                                  void *key_next)
 {
-       struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
-       struct bpf_htab_elem *elem, *elem_next;
+       struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
+       struct bpf_shtab_elem *elem, *elem_next;
        u32 hash, key_size = map->key_size;
        struct hlist_head *head;
        int i = 0;
@@ -931,7 +972,7 @@ static int sock_hash_get_next_key(struct bpf_map *map, void *key,
                goto find_first_elem;
 
        elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&elem->node)),
-                                    struct bpf_htab_elem, node);
+                                    struct bpf_shtab_elem, node);
        if (elem_next) {
                memcpy(key_next, elem_next->key, key_size);
                return 0;
@@ -943,7 +984,7 @@ find_first_elem:
        for (; i < htab->buckets_num; i++) {
                head = &sock_hash_select_bucket(htab, i)->head;
                elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
-                                            struct bpf_htab_elem, node);
+                                            struct bpf_shtab_elem, node);
                if (elem_next) {
                        memcpy(key_next, elem_next->key, key_size);
                        return 0;
@@ -955,7 +996,7 @@ find_first_elem:
 
 static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
 {
-       struct bpf_htab *htab;
+       struct bpf_shtab *htab;
        int i, err;
        u64 cost;
 
@@ -977,15 +1018,15 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
        bpf_map_init_from_attr(&htab->map, attr);
 
        htab->buckets_num = roundup_pow_of_two(htab->map.max_entries);
-       htab->elem_size = sizeof(struct bpf_htab_elem) +
+       htab->elem_size = sizeof(struct bpf_shtab_elem) +
                          round_up(htab->map.key_size, 8);
        if (htab->buckets_num == 0 ||
-           htab->buckets_num > U32_MAX / sizeof(struct bpf_htab_bucket)) {
+           htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) {
                err = -EINVAL;
                goto free_htab;
        }
 
-       cost = (u64) htab->buckets_num * sizeof(struct bpf_htab_bucket) +
+       cost = (u64) htab->buckets_num * sizeof(struct bpf_shtab_bucket) +
               (u64) htab->elem_size * htab->map.max_entries;
        if (cost >= U32_MAX - PAGE_SIZE) {
                err = -EINVAL;
@@ -996,7 +1037,7 @@ static struct bpf_map *sock_hash_alloc(union bpf_attr *attr)
                goto free_htab;
 
        htab->buckets = bpf_map_area_alloc(htab->buckets_num *
-                                          sizeof(struct bpf_htab_bucket),
+                                          sizeof(struct bpf_shtab_bucket),
                                           htab->map.numa_node);
        if (!htab->buckets) {
                bpf_map_charge_finish(&htab->map.memory);
@@ -1017,10 +1058,10 @@ free_htab:
 
 static void sock_hash_free(struct bpf_map *map)
 {
-       struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
-       struct bpf_htab_bucket *bucket;
+       struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
+       struct bpf_shtab_bucket *bucket;
        struct hlist_head unlink_list;
-       struct bpf_htab_elem *elem;
+       struct bpf_shtab_elem *elem;
        struct hlist_node *node;
        int i;
 
@@ -1096,7 +1137,7 @@ static void *sock_hash_lookup(struct bpf_map *map, void *key)
 
 static void sock_hash_release_progs(struct bpf_map *map)
 {
-       psock_progs_drop(&container_of(map, struct bpf_htab, map)->progs);
+       psock_progs_drop(&container_of(map, struct bpf_shtab, map)->progs);
 }
 
 BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, sops,
@@ -1176,6 +1217,7 @@ const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
        .arg4_type      = ARG_ANYTHING,
 };
 
+static int sock_hash_map_btf_id;
 const struct bpf_map_ops sock_hash_ops = {
        .map_alloc              = sock_hash_alloc,
        .map_free               = sock_hash_free,
@@ -1186,6 +1228,8 @@ const struct bpf_map_ops sock_hash_ops = {
        .map_lookup_elem_sys_only = sock_hash_lookup_sys,
        .map_release_uref       = sock_hash_release_progs,
        .map_check_btf          = map_check_no_btf,
+       .map_btf_name           = "bpf_shtab",
+       .map_btf_id             = &sock_hash_map_btf_id,
 };
 
 static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
@@ -1194,7 +1238,7 @@ static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
        case BPF_MAP_TYPE_SOCKMAP:
                return &container_of(map, struct bpf_stab, map)->progs;
        case BPF_MAP_TYPE_SOCKHASH:
-               return &container_of(map, struct bpf_htab, map)->progs;
+               return &container_of(map, struct bpf_shtab, map)->progs;
        default:
                break;
        }
@@ -1203,27 +1247,32 @@ static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
 }
 
 int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
-                        u32 which)
+                        struct bpf_prog *old, u32 which)
 {
        struct sk_psock_progs *progs = sock_map_progs(map);
+       struct bpf_prog **pprog;
 
        if (!progs)
                return -EOPNOTSUPP;
 
        switch (which) {
        case BPF_SK_MSG_VERDICT:
-               psock_set_prog(&progs->msg_parser, prog);
+               pprog = &progs->msg_parser;
                break;
        case BPF_SK_SKB_STREAM_PARSER:
-               psock_set_prog(&progs->skb_parser, prog);
+               pprog = &progs->skb_parser;
                break;
        case BPF_SK_SKB_STREAM_VERDICT:
-               psock_set_prog(&progs->skb_verdict, prog);
+               pprog = &progs->skb_verdict;
                break;
        default:
                return -EOPNOTSUPP;
        }
 
+       if (old)
+               return psock_replace_prog(pprog, prog, old);
+
+       psock_set_prog(pprog, prog);
        return 0;
 }
 
index f93f8ac..6ada114 100644 (file)
@@ -274,7 +274,7 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
        ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
        if (write && !ret) {
                if (jit_enable < 2 ||
-                   (jit_enable == 2 && bpf_dump_raw_ok())) {
+                   (jit_enable == 2 && bpf_dump_raw_ok(current_cred()))) {
                        *(int *)table->data = jit_enable;
                        if (jit_enable == 2)
                                pr_warn("bpf_jit_enable = 2 was set! NEVER use this in production, only for JIT debugging!\n");
index d4d5c07..4148f6d 100644 (file)
@@ -6,18 +6,17 @@
 #include <asm/unaligned.h>
 
 /* Calculate expected number of TX descriptors */
-int tso_count_descs(struct sk_buff *skb)
+int tso_count_descs(const struct sk_buff *skb)
 {
        /* The Marvell Way */
        return skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags;
 }
 EXPORT_SYMBOL(tso_count_descs);
 
-void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
+void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso,
                   int size, bool is_last)
 {
-       struct tcphdr *tcph;
-       int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       int hdr_len = skb_transport_offset(skb) + tso->tlen;
        int mac_hdr_len = skb_network_offset(skb);
 
        memcpy(hdr, skb->data, hdr_len);
@@ -30,23 +29,31 @@ void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
        } else {
                struct ipv6hdr *iph = (void *)(hdr + mac_hdr_len);
 
-               iph->payload_len = htons(size + tcp_hdrlen(skb));
+               iph->payload_len = htons(size + tso->tlen);
        }
-       tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb));
-       put_unaligned_be32(tso->tcp_seq, &tcph->seq);
+       hdr += skb_transport_offset(skb);
+       if (tso->tlen != sizeof(struct udphdr)) {
+               struct tcphdr *tcph = (struct tcphdr *)hdr;
 
-       if (!is_last) {
-               /* Clear all special flags for not last packet */
-               tcph->psh = 0;
-               tcph->fin = 0;
-               tcph->rst = 0;
+               put_unaligned_be32(tso->tcp_seq, &tcph->seq);
+
+               if (!is_last) {
+                       /* Clear all special flags for not last packet */
+                       tcph->psh = 0;
+                       tcph->fin = 0;
+                       tcph->rst = 0;
+               }
+       } else {
+               struct udphdr *uh = (struct udphdr *)hdr;
+
+               uh->len = htons(sizeof(*uh) + size);
        }
 }
 EXPORT_SYMBOL(tso_build_hdr);
 
-void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size)
+void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size)
 {
-       tso->tcp_seq += size;
+       tso->tcp_seq += size; /* not worth avoiding this operation for UDP */
        tso->size -= size;
        tso->data += size;
 
@@ -62,12 +69,14 @@ void tso_build_data(struct sk_buff *skb, struct tso_t *tso, int size)
 }
 EXPORT_SYMBOL(tso_build_data);
 
-void tso_start(struct sk_buff *skb, struct tso_t *tso)
+int tso_start(struct sk_buff *skb, struct tso_t *tso)
 {
-       int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+       int tlen = skb_is_gso_tcp(skb) ? tcp_hdrlen(skb) : sizeof(struct udphdr);
+       int hdr_len = skb_transport_offset(skb) + tlen;
 
+       tso->tlen = tlen;
        tso->ip_id = ntohs(ip_hdr(skb)->id);
-       tso->tcp_seq = ntohl(tcp_hdr(skb)->seq);
+       tso->tcp_seq = (tlen != sizeof(struct udphdr)) ? ntohl(tcp_hdr(skb)->seq) : 0;
        tso->next_frag_idx = 0;
        tso->ipv6 = vlan_get_protocol(skb) == htons(ETH_P_IPV6);
 
@@ -83,5 +92,6 @@ void tso_start(struct sk_buff *skb, struct tso_t *tso)
                tso->data = skb_frag_address(frag);
                tso->next_frag_idx++;
        }
+       return hdr_len;
 }
 EXPORT_SYMBOL(tso_start);
index 90f44f3..3c45f99 100644 (file)
@@ -462,6 +462,7 @@ struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
        xdpf->len = totsize - metasize;
        xdpf->headroom = 0;
        xdpf->metasize = metasize;
+       xdpf->frame_sz = PAGE_SIZE;
        xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
 
        xsk_buff_free(xdp);
index d2a4553..84dde5a 100644 (file)
@@ -1736,7 +1736,7 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct net_device *netdev;
        struct dcbmsg *dcb = nlmsg_data(nlh);
        struct nlattr *tb[DCB_ATTR_MAX + 1];
-       u32 portid = skb ? NETLINK_CB(skb).portid : 0;
+       u32 portid = NETLINK_CB(skb).portid;
        int ret = -EINVAL;
        struct sk_buff *reply_skb;
        struct nlmsghdr *reply_nlh = NULL;
index 51ac263..0c7d2f6 100644 (file)
@@ -5,7 +5,7 @@ menuconfig IP_DCCP
        help
          Datagram Congestion Control Protocol (RFC 4340)
 
-         From http://www.ietf.org/rfc/rfc4340.txt:
+         From https://www.ietf.org/rfc/rfc4340.txt:
 
          The Datagram Congestion Control Protocol (DCCP) is a transport
          protocol that implements bidirectional, unicast connections of
index 4d7771f..a3eeb84 100644 (file)
@@ -26,13 +26,13 @@ config IP_DCCP_CCID3
          relatively smooth sending rate is of importance.
 
          CCID-3 is further described in RFC 4342,
-         http://www.ietf.org/rfc/rfc4342.txt
+         https://www.ietf.org/rfc/rfc4342.txt
 
          The TFRC congestion control algorithms were initially described in
          RFC 5348.
 
          This text was extracted from RFC 4340 (sec. 10.2),
-         http://www.ietf.org/rfc/rfc4340.txt
+         https://www.ietf.org/rfc/rfc4340.txt
 
          If in doubt, say N.
 
index 9ef9bee..aef72f6 100644 (file)
@@ -7,7 +7,7 @@
  *  An implementation of the DCCP protocol
  *
  *  This code has been developed by the University of Waikato WAND
- *  research group. For further information please see http://www.wand.net.nz/
+ *  research group. For further information please see https://www.wand.net.nz/
  *
  *  This code also uses code from Lulea University, rereleased as GPL by its
  *  authors:
index 081c195..02e0fc9 100644 (file)
@@ -6,7 +6,7 @@
  *  An implementation of the DCCP protocol
  *
  *  This code has been developed by the University of Waikato WAND
- *  research group. For further information please see http://www.wand.net.nz/
+ *  research group. For further information please see https://www.wand.net.nz/
  *  or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz
  *
  *  This code also uses code from Lulea University, rereleased as GPL by its
index 2d41bb0..af08e2d 100644 (file)
@@ -6,7 +6,7 @@
  *  An implementation of the DCCP protocol
  *
  *  This code has been developed by the University of Waikato WAND
- *  research group. For further information please see http://www.wand.net.nz/
+ *  research group. For further information please see https://www.wand.net.nz/
  *  or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz
  *
  *  This code also uses code from Lulea University, rereleased as GPL by its
@@ -365,6 +365,7 @@ void tfrc_rx_hist_purge(struct tfrc_rx_hist *h)
 
 /**
  * tfrc_rx_hist_rtt_last_s - reference entry to compute RTT samples against
+ * @h: The non-empty RX history object
  */
 static inline struct tfrc_rx_hist_entry *
                        tfrc_rx_hist_rtt_last_s(const struct tfrc_rx_hist *h)
@@ -374,6 +375,7 @@ static inline struct tfrc_rx_hist_entry *
 
 /**
  * tfrc_rx_hist_rtt_prev_s - previously suitable (wrt rtt_last_s) RTT-sampling entry
+ * @h: The non-empty RX history object
  */
 static inline struct tfrc_rx_hist_entry *
                        tfrc_rx_hist_rtt_prev_s(const struct tfrc_rx_hist *h)
index a157d87..159cc93 100644 (file)
@@ -6,7 +6,7 @@
  *  Copyright (c) 2005-6 The University of Waikato, Hamilton, New Zealand.
  *
  *  This code has been developed by the University of Waikato WAND
- *  research group. For further information please see http://www.wand.net.nz/
+ *  research group. For further information please see https://www.wand.net.nz/
  *  or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz
  *
  *  This code also uses code from Lulea University, rereleased as GPL by its
index 7dce4f6..9cc9d1e 100644 (file)
@@ -295,13 +295,7 @@ int dccp_disconnect(struct sock *sk, int flags);
 int dccp_getsockopt(struct sock *sk, int level, int optname,
                    char __user *optval, int __user *optlen);
 int dccp_setsockopt(struct sock *sk, int level, int optname,
-                   char __user *optval, unsigned int optlen);
-#ifdef CONFIG_COMPAT
-int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
-                          char __user *optval, int __user *optlen);
-int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
-                          char __user *optval, unsigned int optlen);
-#endif
+                   sockptr_t optval, unsigned int optlen);
 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg);
 int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
 int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
index 9c3b5e0..afc071e 100644 (file)
@@ -165,6 +165,8 @@ static const struct {
 
 /**
  * dccp_feat_index  -  Hash function to map feature number into array position
+ * @feat_num: feature to hash, one of %dccp_feature_numbers
+ *
  * Returns consecutive array index or -1 if the feature is not understood.
  */
 static int dccp_feat_index(u8 feat_num)
@@ -567,6 +569,8 @@ cloning_failed:
 
 /**
  * dccp_feat_valid_nn_length  -  Enforce length constraints on NN options
+ * @feat_num: feature to return length of, one of %dccp_feature_numbers
+ *
  * Length is between 0 and %DCCP_OPTVAL_MAXLEN. Used for outgoing packets only,
  * incoming options are accepted as long as their values are valid.
  */
@@ -1429,6 +1433,8 @@ int dccp_feat_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
 
 /**
  * dccp_feat_init  -  Seed feature negotiation with host-specific defaults
+ * @sk: Socket to initialize.
+ *
  * This initialises global defaults, depending on the value of the sysctls.
  * These can later be overridden by registering changes via setsockopt calls.
  * The last link in the chain is finalise_settings, to make sure that between
index 6dce68a..bd9cfdb 100644 (file)
@@ -715,6 +715,7 @@ EXPORT_SYMBOL_GPL(dccp_rcv_state_process);
 
 /**
  *  dccp_sample_rtt  -  Validate and finalise computation of RTT sample
+ *  @sk:       socket structure
  *  @delta:    number of microseconds between packet and acknowledgment
  *
  *  The routine is kept generic to work in different contexts. It should be
index d19557c..9c28c82 100644 (file)
@@ -694,6 +694,8 @@ EXPORT_SYMBOL_GPL(dccp_v4_do_rcv);
 
 /**
  *     dccp_invalid_packet  -  check for malformed packets
+ *     @skb: Packet to validate
+ *
  *     Implements RFC 4340, 8.5:  Step 1: Check header basics
  *     Packets that fail these checks are ignored and do not receive Resets.
  */
@@ -911,10 +913,6 @@ static const struct inet_connection_sock_af_ops dccp_ipv4_af_ops = {
        .getsockopt        = ip_getsockopt,
        .addr2sockaddr     = inet_csk_addr2sockaddr,
        .sockaddr_len      = sizeof(struct sockaddr_in),
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_ip_setsockopt,
-       .compat_getsockopt = compat_ip_getsockopt,
-#endif
 };
 
 static int dccp_v4_init_sock(struct sock *sk)
@@ -961,10 +959,6 @@ static struct proto dccp_v4_prot = {
        .rsk_prot               = &dccp_request_sock_ops,
        .twsk_prot              = &dccp_timewait_sock_ops,
        .h.hashinfo             = &dccp_hashinfo,
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt      = compat_dccp_setsockopt,
-       .compat_getsockopt      = compat_dccp_getsockopt,
-#endif
 };
 
 static const struct net_protocol dccp_v4_protocol = {
@@ -997,10 +991,6 @@ static const struct proto_ops inet_dccp_ops = {
        .recvmsg           = sock_common_recvmsg,
        .mmap              = sock_no_mmap,
        .sendpage          = sock_no_sendpage,
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_sock_common_setsockopt,
-       .compat_getsockopt = compat_sock_common_getsockopt,
-#endif
 };
 
 static struct inet_protosw dccp_v4_protosw = {
index 650187d..ef4ab28 100644 (file)
@@ -970,10 +970,6 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
        .getsockopt        = ipv6_getsockopt,
        .addr2sockaddr     = inet6_csk_addr2sockaddr,
        .sockaddr_len      = sizeof(struct sockaddr_in6),
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_ipv6_setsockopt,
-       .compat_getsockopt = compat_ipv6_getsockopt,
-#endif
 };
 
 /*
@@ -990,10 +986,6 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
        .getsockopt        = ipv6_getsockopt,
        .addr2sockaddr     = inet6_csk_addr2sockaddr,
        .sockaddr_len      = sizeof(struct sockaddr_in6),
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_ipv6_setsockopt,
-       .compat_getsockopt = compat_ipv6_getsockopt,
-#endif
 };
 
 /* NOTE: A lot of things set to zero explicitly by call to
@@ -1049,10 +1041,6 @@ static struct proto dccp_v6_prot = {
        .rsk_prot          = &dccp6_request_sock_ops,
        .twsk_prot         = &dccp6_timewait_sock_ops,
        .h.hashinfo        = &dccp_hashinfo,
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_dccp_setsockopt,
-       .compat_getsockopt = compat_dccp_getsockopt,
-#endif
 };
 
 static const struct inet6_protocol dccp_v6_protocol = {
@@ -1083,8 +1071,6 @@ static const struct proto_ops inet6_dccp_ops = {
        .sendpage          = sock_no_sendpage,
 #ifdef CONFIG_COMPAT
        .compat_ioctl      = inet6_compat_ioctl,
-       .compat_setsockopt = compat_sock_common_setsockopt,
-       .compat_getsockopt = compat_sock_common_getsockopt,
 #endif
 };
 
index 3b42f5c..daa9eed 100644 (file)
@@ -43,6 +43,7 @@ u64 dccp_decode_value_var(const u8 *bf, const u8 len)
  * dccp_parse_options  -  Parse DCCP options present in @skb
  * @sk: client|server|listening dccp socket (when @dreq != NULL)
  * @dreq: request socket to use during connection setup, or NULL
+ * @skb: frame to parse
  */
 int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
                       struct sk_buff *skb)
@@ -471,6 +472,8 @@ static int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb)
 
 /**
  * dccp_insert_option_mandatory  -  Mandatory option (5.8.2)
+ * @skb: frame into which to insert option
+ *
  * Note that since we are using skb_push, this function needs to be called
  * _after_ inserting the option it is supposed to influence (stack order).
  */
@@ -486,6 +489,7 @@ int dccp_insert_option_mandatory(struct sk_buff *skb)
 
 /**
  * dccp_insert_fn_opt  -  Insert single Feature-Negotiation option into @skb
+ * @skb: frame to insert feature negotiation option into
  * @type: %DCCPO_CHANGE_L, %DCCPO_CHANGE_R, %DCCPO_CONFIRM_L, %DCCPO_CONFIRM_R
  * @feat: one out of %dccp_feature_numbers
  * @val: NN value or SP array (preferred element first) to copy
index c13b660..2e9e844 100644 (file)
@@ -375,6 +375,15 @@ int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
                goto out;
 
        switch (cmd) {
+       case SIOCOUTQ: {
+               int amount = sk_wmem_alloc_get(sk);
+               /* Using sk_wmem_alloc here because sk_wmem_queued is not used by DCCP and
+                * always 0, comparably to UDP.
+                */
+
+               rc = put_user(amount, (int __user *)arg);
+       }
+               break;
        case SIOCINQ: {
                struct sk_buff *skb;
                unsigned long amount = 0;
@@ -402,7 +411,7 @@ out:
 EXPORT_SYMBOL_GPL(dccp_ioctl);
 
 static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
-                                  char __user *optval, unsigned int optlen)
+                                  sockptr_t optval, unsigned int optlen)
 {
        struct dccp_sock *dp = dccp_sk(sk);
        struct dccp_service_list *sl = NULL;
@@ -417,9 +426,9 @@ static int dccp_setsockopt_service(struct sock *sk, const __be32 service,
                        return -ENOMEM;
 
                sl->dccpsl_nr = optlen / sizeof(u32) - 1;
-               if (copy_from_user(sl->dccpsl_list,
-                                  optval + sizeof(service),
-                                  optlen - sizeof(service)) ||
+               sockptr_advance(optval, sizeof(service));
+               if (copy_from_sockptr(sl->dccpsl_list, optval,
+                                     optlen - sizeof(service)) ||
                    dccp_list_has_service(sl, DCCP_SERVICE_INVALID_VALUE)) {
                        kfree(sl);
                        return -EFAULT;
@@ -473,7 +482,7 @@ static int dccp_setsockopt_cscov(struct sock *sk, int cscov, bool rx)
 }
 
 static int dccp_setsockopt_ccid(struct sock *sk, int type,
-                               char __user *optval, unsigned int optlen)
+                               sockptr_t optval, unsigned int optlen)
 {
        u8 *val;
        int rc = 0;
@@ -481,7 +490,7 @@ static int dccp_setsockopt_ccid(struct sock *sk, int type,
        if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS)
                return -EINVAL;
 
-       val = memdup_user(optval, optlen);
+       val = memdup_sockptr(optval, optlen);
        if (IS_ERR(val))
                return PTR_ERR(val);
 
@@ -498,7 +507,7 @@ static int dccp_setsockopt_ccid(struct sock *sk, int type,
 }
 
 static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
-               char __user *optval, unsigned int optlen)
+               sockptr_t optval, unsigned int optlen)
 {
        struct dccp_sock *dp = dccp_sk(sk);
        int val, err = 0;
@@ -520,7 +529,7 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
        if (optlen < (int)sizeof(int))
                return -EINVAL;
 
-       if (get_user(val, (int __user *)optval))
+       if (copy_from_sockptr(&val, optval, sizeof(int)))
                return -EFAULT;
 
        if (optname == DCCP_SOCKOPT_SERVICE)
@@ -563,8 +572,8 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
        return err;
 }
 
-int dccp_setsockopt(struct sock *sk, int level, int optname,
-                   char __user *optval, unsigned int optlen)
+int dccp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+                   unsigned int optlen)
 {
        if (level != SOL_DCCP)
                return inet_csk(sk)->icsk_af_ops->setsockopt(sk, level,
@@ -575,19 +584,6 @@ int dccp_setsockopt(struct sock *sk, int level, int optname,
 
 EXPORT_SYMBOL_GPL(dccp_setsockopt);
 
-#ifdef CONFIG_COMPAT
-int compat_dccp_setsockopt(struct sock *sk, int level, int optname,
-                          char __user *optval, unsigned int optlen)
-{
-       if (level != SOL_DCCP)
-               return inet_csk_compat_setsockopt(sk, level, optname,
-                                                 optval, optlen);
-       return do_dccp_setsockopt(sk, level, optname, optval, optlen);
-}
-
-EXPORT_SYMBOL_GPL(compat_dccp_setsockopt);
-#endif
-
 static int dccp_getsockopt_service(struct sock *sk, int len,
                                   __be32 __user *optval,
                                   int __user *optlen)
@@ -696,19 +692,6 @@ int dccp_getsockopt(struct sock *sk, int level, int optname,
 
 EXPORT_SYMBOL_GPL(dccp_getsockopt);
 
-#ifdef CONFIG_COMPAT
-int compat_dccp_getsockopt(struct sock *sk, int level, int optname,
-                          char __user *optval, int __user *optlen)
-{
-       if (level != SOL_DCCP)
-               return inet_csk_compat_getsockopt(sk, level, optname,
-                                                 optval, optlen);
-       return do_dccp_getsockopt(sk, level, optname, optval, optlen);
-}
-
-EXPORT_SYMBOL_GPL(compat_dccp_getsockopt);
-#endif
-
 static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb)
 {
        struct cmsghdr *cmsg;
index c0b3672..0e06dfc 100644 (file)
@@ -216,6 +216,8 @@ out:
 
 /**
  * dccp_write_xmitlet  -  Workhorse for CCID packet dequeueing interface
+ * @data: Socket to act on
+ *
  * See the comments above %ccid_dequeueing_decision for supported modes.
  */
 static void dccp_write_xmitlet(unsigned long data)
index 0a46ea3..3b53d76 100644 (file)
@@ -150,7 +150,8 @@ static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE];
 static struct hlist_head dn_wild_sk;
 static atomic_long_t decnet_memory_allocated;
 
-static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags);
+static int __dn_setsockopt(struct socket *sock, int level, int optname,
+               sockptr_t optval, unsigned int optlen, int flags);
 static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags);
 
 static struct hlist_head *dn_find_list(struct sock *sk)
@@ -1320,7 +1321,8 @@ out:
        return err;
 }
 
-static int dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
+static int dn_setsockopt(struct socket *sock, int level, int optname,
+               sockptr_t optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        int err;
@@ -1338,7 +1340,8 @@ static int dn_setsockopt(struct socket *sock, int level, int optname, char __use
        return err;
 }
 
-static int __dn_setsockopt(struct socket *sock, int level,int optname, char __user *optval, unsigned int optlen, int flags)
+static int __dn_setsockopt(struct socket *sock, int level, int optname,
+               sockptr_t optval, unsigned int optlen, int flags)
 {
        struct  sock *sk = sock->sk;
        struct dn_scp *scp = DN_SK(sk);
@@ -1354,13 +1357,13 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
        } u;
        int err;
 
-       if (optlen && !optval)
+       if (optlen && sockptr_is_null(optval))
                return -EINVAL;
 
        if (optlen > sizeof(u))
                return -EINVAL;
 
-       if (copy_from_user(&u, optval, optlen))
+       if (copy_from_sockptr(&u, optval, optlen))
                return -EFAULT;
 
        switch (optname) {
@@ -2134,14 +2137,11 @@ static struct sock *dn_socket_get_next(struct seq_file *seq,
        struct dn_iter_state *state = seq->private;
 
        n = sk_next(n);
-try_again:
-       if (n)
-               goto out;
-       if (++state->bucket >= DN_SK_HASH_SIZE)
-               goto out;
-       n = sk_head(&dn_sk_hash[state->bucket]);
-       goto try_again;
-out:
+       while (!n) {
+               if (++state->bucket >= DN_SK_HASH_SIZE)
+                       break;
+               n = sk_head(&dn_sk_hash[state->bucket]);
+       }
        return n;
 }
 
index 65abcf1..15d4235 100644 (file)
@@ -462,7 +462,9 @@ int dn_dev_ioctl(unsigned int cmd, void __user *arg)
        switch (cmd) {
        case SIOCGIFADDR:
                *((__le16 *)sdn->sdn_nodeaddr) = ifa->ifa_local;
-               goto rarok;
+               if (copy_to_user(arg, ifr, DN_IFREQ_SIZE))
+                       ret = -EFAULT;
+               break;
 
        case SIOCSIFADDR:
                if (!ifa) {
@@ -485,10 +487,6 @@ done:
        rtnl_unlock();
 
        return ret;
-rarok:
-       if (copy_to_user(arg, ifr, DN_IFREQ_SIZE))
-               ret = -EFAULT;
-       goto done;
 }
 
 struct net_device *dn_dev_get_default(void)
index 06b9983..4cac31d 100644 (file)
@@ -494,6 +494,8 @@ static int dn_return_long(struct sk_buff *skb)
 
 /**
  * dn_route_rx_packet - Try and find a route for an incoming packet
+ * @net: The applicable net namespace
+ * @sk: Socket packet transmitted on
  * @skb: The packet to find a route for
  *
  * Returns: result of input function if route is found, error code otherwise
@@ -670,7 +672,7 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
        if (decnet_debug_level & 1)
                printk(KERN_DEBUG
                        "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n",
-                       (int)flags, (dev) ? dev->name : "???", len, skb->len,
+                       (int)flags, dev->name, len, skb->len,
                        padlen);
 
        if (flags & DN_RT_PKT_CNTL) {
index 57a6a88..1f9be21 100644 (file)
@@ -39,7 +39,7 @@ struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
 }
 EXPORT_SYMBOL(devm_alloc_etherdev_mqs);
 
-static void devm_netdev_release(struct device *dev, void *this)
+static void devm_unregister_netdev(struct device *dev, void *this)
 {
        struct net_device_devres *res = this;
 
@@ -77,7 +77,7 @@ int devm_register_netdev(struct device *dev, struct net_device *ndev)
                                 netdev_devres_match, ndev)))
                return -EINVAL;
 
-       dr = devres_alloc(devm_netdev_release, sizeof(*dr), GFP_KERNEL);
+       dr = devres_alloc(devm_unregister_netdev, sizeof(*dr), GFP_KERNEL);
        if (!dr)
                return -ENOMEM;
 
index d5bc6ac..1f9b9b1 100644 (file)
@@ -86,6 +86,13 @@ config NET_DSA_TAG_KSZ
          Say Y if you want to enable support for tagging frames for the
          Microchip 8795/9477/9893 families of switches.
 
+config NET_DSA_TAG_RTL4_A
+       tristate "Tag driver for Realtek 4 byte protocol A tags"
+       help
+         Say Y or M if you want to enable support for tagging frames for the
+         Realtek switches with 4 byte protocol A tags, sich as found in
+         the Realtek RTL8366RB.
+
 config NET_DSA_TAG_OCELOT
        tristate "Tag driver for Ocelot family of switches"
        select PACKING
index 108486c..4f47b20 100644 (file)
@@ -11,6 +11,7 @@ obj-$(CONFIG_NET_DSA_TAG_DSA) += tag_dsa.o
 obj-$(CONFIG_NET_DSA_TAG_EDSA) += tag_edsa.o
 obj-$(CONFIG_NET_DSA_TAG_GSWIP) += tag_gswip.o
 obj-$(CONFIG_NET_DSA_TAG_KSZ) += tag_ksz.o
+obj-$(CONFIG_NET_DSA_TAG_RTL4_A) += tag_rtl4_a.o
 obj-$(CONFIG_NET_DSA_TAG_LAN9303) += tag_lan9303.o
 obj-$(CONFIG_NET_DSA_TAG_MTK) += tag_mtk.o
 obj-$(CONFIG_NET_DSA_TAG_OCELOT) += tag_ocelot.o
index 076908f..c0ffc7a 100644 (file)
@@ -261,10 +261,15 @@ static int dsa_port_setup(struct dsa_port *dp)
        struct devlink_port *dlp = &dp->devlink_port;
        bool dsa_port_link_registered = false;
        bool devlink_port_registered = false;
+       struct devlink_port_attrs attrs = {};
        struct devlink *dl = ds->devlink;
        bool dsa_port_enabled = false;
        int err = 0;
 
+       attrs.phys.port_number = dp->index;
+       memcpy(attrs.switch_id.id, id, len);
+       attrs.switch_id.id_len = len;
+
        if (dp->setup)
                return 0;
 
@@ -274,8 +279,8 @@ static int dsa_port_setup(struct dsa_port *dp)
                break;
        case DSA_PORT_TYPE_CPU:
                memset(dlp, 0, sizeof(*dlp));
-               devlink_port_attrs_set(dlp, DEVLINK_PORT_FLAVOUR_CPU,
-                                      dp->index, false, 0, id, len);
+               attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
+               devlink_port_attrs_set(dlp, &attrs);
                err = devlink_port_register(dl, dlp, dp->index);
                if (err)
                        break;
@@ -294,8 +299,8 @@ static int dsa_port_setup(struct dsa_port *dp)
                break;
        case DSA_PORT_TYPE_DSA:
                memset(dlp, 0, sizeof(*dlp));
-               devlink_port_attrs_set(dlp, DEVLINK_PORT_FLAVOUR_DSA,
-                                      dp->index, false, 0, id, len);
+               attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
+               devlink_port_attrs_set(dlp, &attrs);
                err = devlink_port_register(dl, dlp, dp->index);
                if (err)
                        break;
@@ -314,8 +319,8 @@ static int dsa_port_setup(struct dsa_port *dp)
                break;
        case DSA_PORT_TYPE_USER:
                memset(dlp, 0, sizeof(*dlp));
-               devlink_port_attrs_set(dlp, DEVLINK_PORT_FLAVOUR_PHYSICAL,
-                                      dp->index, false, 0, id, len);
+               attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+               devlink_port_attrs_set(dlp, &attrs);
                err = devlink_port_register(dl, dlp, dp->index);
                if (err)
                        break;
@@ -722,8 +727,12 @@ static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
 
        ports = of_get_child_by_name(dn, "ports");
        if (!ports) {
-               dev_err(ds->dev, "no ports child node found\n");
-               return -EINVAL;
+               /* The second possibility is "ethernet-ports" */
+               ports = of_get_child_by_name(dn, "ethernet-ports");
+               if (!ports) {
+                       dev_err(ds->dev, "no ports child node found\n");
+                       return -EINVAL;
+               }
        }
 
        for_each_available_child_of_node(ports, port) {
index adecf73..1653e33 100644 (file)
@@ -77,7 +77,7 @@ struct dsa_slave_priv {
        struct sk_buff *        (*xmit)(struct sk_buff *skb,
                                        struct net_device *dev);
 
-       struct pcpu_sw_netstats *stats64;
+       struct pcpu_sw_netstats __percpu *stats64;
 
        struct gro_cells        gcells;
 
index 480a614..61615eb 100644 (file)
@@ -186,17 +186,6 @@ static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset,
        }
 }
 
-static int dsa_master_get_phys_port_name(struct net_device *dev,
-                                        char *name, size_t len)
-{
-       struct dsa_port *cpu_dp = dev->dsa_ptr;
-
-       if (snprintf(name, len, "p%d", cpu_dp->index) >= len)
-               return -EINVAL;
-
-       return 0;
-}
-
 static int dsa_master_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
        struct dsa_port *cpu_dp = dev->dsa_ptr;
@@ -220,12 +209,16 @@ static int dsa_master_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                break;
        }
 
-       if (cpu_dp->orig_ndo_ops && cpu_dp->orig_ndo_ops->ndo_do_ioctl)
-               err = cpu_dp->orig_ndo_ops->ndo_do_ioctl(dev, ifr, cmd);
+       if (dev->netdev_ops->ndo_do_ioctl)
+               err = dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd);
 
        return err;
 }
 
+static const struct dsa_netdevice_ops dsa_netdev_ops = {
+       .ndo_do_ioctl = dsa_master_ioctl,
+};
+
 static int dsa_master_ethtool_setup(struct net_device *dev)
 {
        struct dsa_port *cpu_dp = dev->dsa_ptr;
@@ -260,38 +253,10 @@ static void dsa_master_ethtool_teardown(struct net_device *dev)
        cpu_dp->orig_ethtool_ops = NULL;
 }
 
-static int dsa_master_ndo_setup(struct net_device *dev)
+static void dsa_netdev_ops_set(struct net_device *dev,
+                              const struct dsa_netdevice_ops *ops)
 {
-       struct dsa_port *cpu_dp = dev->dsa_ptr;
-       struct dsa_switch *ds = cpu_dp->ds;
-       struct net_device_ops *ops;
-
-       if (dev->netdev_ops->ndo_get_phys_port_name)
-               return 0;
-
-       ops = devm_kzalloc(ds->dev, sizeof(*ops), GFP_KERNEL);
-       if (!ops)
-               return -ENOMEM;
-
-       cpu_dp->orig_ndo_ops = dev->netdev_ops;
-       if (cpu_dp->orig_ndo_ops)
-               memcpy(ops, cpu_dp->orig_ndo_ops, sizeof(*ops));
-
-       ops->ndo_get_phys_port_name = dsa_master_get_phys_port_name;
-       ops->ndo_do_ioctl = dsa_master_ioctl;
-
-       dev->netdev_ops  = ops;
-
-       return 0;
-}
-
-static void dsa_master_ndo_teardown(struct net_device *dev)
-{
-       struct dsa_port *cpu_dp = dev->dsa_ptr;
-
-       if (cpu_dp->orig_ndo_ops)
-               dev->netdev_ops = cpu_dp->orig_ndo_ops;
-       cpu_dp->orig_ndo_ops = NULL;
+       dev->dsa_ptr->netdev_ops = ops;
 }
 
 static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
@@ -353,9 +318,7 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
        if (ret)
                return ret;
 
-       ret = dsa_master_ndo_setup(dev);
-       if (ret)
-               goto out_err_ethtool_teardown;
+       dsa_netdev_ops_set(dev, &dsa_netdev_ops);
 
        ret = sysfs_create_group(&dev->dev.kobj, &dsa_group);
        if (ret)
@@ -364,8 +327,7 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
        return ret;
 
 out_err_ndo_teardown:
-       dsa_master_ndo_teardown(dev);
-out_err_ethtool_teardown:
+       dsa_netdev_ops_set(dev, NULL);
        dsa_master_ethtool_teardown(dev);
        return ret;
 }
@@ -373,7 +335,7 @@ out_err_ethtool_teardown:
 void dsa_master_teardown(struct net_device *dev)
 {
        sysfs_remove_group(&dev->dev.kobj, &dsa_group);
-       dsa_master_ndo_teardown(dev);
+       dsa_netdev_ops_set(dev, NULL);
        dsa_master_ethtool_teardown(dev);
        dsa_master_reset_mtu(dev);
 
index 4c7f086..41d60ee 100644 (file)
@@ -1754,11 +1754,8 @@ int dsa_slave_create(struct dsa_port *port)
                eth_hw_addr_inherit(slave_dev, master);
        slave_dev->priv_flags |= IFF_NO_QUEUE;
        slave_dev->netdev_ops = &dsa_slave_netdev_ops;
-       slave_dev->min_mtu = 0;
        if (ds->ops->port_max_mtu)
                slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index);
-       else
-               slave_dev->max_mtu = ETH_MAX_MTU;
        SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
 
        netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
@@ -1795,7 +1792,8 @@ int dsa_slave_create(struct dsa_port *port)
 
        ret = dsa_slave_phy_setup(slave_dev);
        if (ret) {
-               netdev_err(master, "error %d setting up slave phy\n", ret);
+               netdev_err(master, "error %d setting up slave PHY for %s\n",
+                          ret, slave_dev->name);
                goto out_gcells;
        }
 
index e8eaa80..d6200ff 100644 (file)
 #define DSA_HLEN       4
 #define EDSA_HLEN      8
 
+#define FRAME_TYPE_TO_CPU      0x00
+#define FRAME_TYPE_FORWARD     0x03
+
+#define TO_CPU_CODE_MGMT_TRAP          0x00
+#define TO_CPU_CODE_FRAME2REG          0x01
+#define TO_CPU_CODE_IGMP_MLD_TRAP      0x02
+#define TO_CPU_CODE_POLICY_TRAP                0x03
+#define TO_CPU_CODE_ARP_MIRROR         0x04
+#define TO_CPU_CODE_POLICY_MIRROR      0x05
+
 static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct dsa_port *dp = dsa_slave_to_port(dev);
@@ -77,6 +87,8 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
                                struct packet_type *pt)
 {
        u8 *edsa_header;
+       int frame_type;
+       int code;
        int source_device;
        int source_port;
 
@@ -91,8 +103,29 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
        /*
         * Check that frame type is either TO_CPU or FORWARD.
         */
-       if ((edsa_header[0] & 0xc0) != 0x00 && (edsa_header[0] & 0xc0) != 0xc0)
+       frame_type = edsa_header[0] >> 6;
+
+       switch (frame_type) {
+       case FRAME_TYPE_TO_CPU:
+               code = (edsa_header[1] & 0x6) | ((edsa_header[2] >> 4) & 1);
+
+               /*
+                * Mark the frame to never egress on any port of the same switch
+                * unless it's a trapped IGMP/MLD packet, in which case the
+                * bridge might want to forward it.
+                */
+               if (code != TO_CPU_CODE_IGMP_MLD_TRAP)
+                       skb->offload_fwd_mark = 1;
+
+               break;
+
+       case FRAME_TYPE_FORWARD:
+               skb->offload_fwd_mark = 1;
+               break;
+
+       default:
                return NULL;
+       }
 
        /*
         * Determine source device and port.
@@ -156,8 +189,6 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
                        2 * ETH_ALEN);
        }
 
-       skb->offload_fwd_mark = 1;
-
        return skb;
 }
 
index 90d055c..bd1a315 100644 (file)
@@ -156,8 +156,9 @@ static struct sk_buff *ksz9477_xmit(struct sk_buff *skb,
 {
        struct dsa_port *dp = dsa_slave_to_port(dev);
        struct sk_buff *nskb;
-       u16 *tag;
+       __be16 *tag;
        u8 *addr;
+       u16 val;
 
        nskb = ksz_common_xmit(skb, dev, KSZ9477_INGRESS_TAG_LEN);
        if (!nskb)
@@ -167,12 +168,12 @@ static struct sk_buff *ksz9477_xmit(struct sk_buff *skb,
        tag = skb_put(nskb, KSZ9477_INGRESS_TAG_LEN);
        addr = skb_mac_header(nskb);
 
-       *tag = BIT(dp->index);
+       val = BIT(dp->index);
 
        if (is_link_local_ether_addr(addr))
-               *tag |= KSZ9477_TAIL_TAG_OVERRIDE;
+               val |= KSZ9477_TAIL_TAG_OVERRIDE;
 
-       *tag = cpu_to_be16(*tag);
+       *tag = cpu_to_be16(val);
 
        return nskb;
 }
index eb0e7a3..ccfb6f6 100644 (file)
@@ -55,7 +55,8 @@ static int lan9303_xmit_use_arl(struct dsa_port *dp, u8 *dest_addr)
 static struct sk_buff *lan9303_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct dsa_port *dp = dsa_slave_to_port(dev);
-       u16 *lan9303_tag;
+       __be16 *lan9303_tag;
+       u16 tag;
 
        /* insert a special VLAN tag between the MAC addresses
         * and the current ethertype field.
@@ -72,12 +73,12 @@ static struct sk_buff *lan9303_xmit(struct sk_buff *skb, struct net_device *dev)
        /* make room between MACs and Ether-Type */
        memmove(skb->data, skb->data + LAN9303_TAG_LEN, 2 * ETH_ALEN);
 
-       lan9303_tag = (u16 *)(skb->data + 2 * ETH_ALEN);
+       lan9303_tag = (__be16 *)(skb->data + 2 * ETH_ALEN);
+       tag = lan9303_xmit_use_arl(dp, skb->data) ?
+               LAN9303_TAG_TX_USE_ALR :
+               dp->index | LAN9303_TAG_TX_STP_OVERRIDE;
        lan9303_tag[0] = htons(ETH_P_8021Q);
-       lan9303_tag[1] = lan9303_xmit_use_arl(dp, skb->data) ?
-                               LAN9303_TAG_TX_USE_ALR :
-                               dp->index | LAN9303_TAG_TX_STP_OVERRIDE;
-       lan9303_tag[1] = htons(lan9303_tag[1]);
+       lan9303_tag[1] = htons(tag);
 
        return skb;
 }
@@ -85,7 +86,7 @@ static struct sk_buff *lan9303_xmit(struct sk_buff *skb, struct net_device *dev)
 static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev,
                                   struct packet_type *pt)
 {
-       u16 *lan9303_tag;
+       __be16 *lan9303_tag;
        u16 lan9303_tag1;
        unsigned int source_port;
 
@@ -101,7 +102,7 @@ static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev,
         *                           ^
         *                        ->data
         */
-       lan9303_tag = (u16 *)(skb->data - 2);
+       lan9303_tag = (__be16 *)(skb->data - 2);
 
        if (lan9303_tag[0] != htons(ETH_P_8021Q)) {
                dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid VLAN marker\n");
index d6619ed..f602fc7 100644 (file)
@@ -67,8 +67,9 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb,
 static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev,
                                   struct packet_type *pt)
 {
+       u16 hdr;
        int port;
-       __be16 *phdr, hdr;
+       __be16 *phdr;
        unsigned char *dest = eth_hdr(skb)->h_dest;
        bool is_multicast_skb = is_multicast_ether_addr(dest) &&
                                !is_broadcast_ether_addr(dest);
index b0c98ee..42f327c 100644 (file)
@@ -137,11 +137,10 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
                                   struct net_device *netdev)
 {
        struct dsa_port *dp = dsa_slave_to_port(netdev);
-       u64 bypass, dest, src, qos_class, rew_op;
        struct dsa_switch *ds = dp->ds;
-       int port = dp->index;
        struct ocelot *ocelot = ds->priv;
-       struct ocelot_port *ocelot_port = ocelot->ports[port];
+       struct ocelot_port *ocelot_port;
+       u64 qos_class, rew_op;
        u8 *injection;
 
        if (unlikely(skb_cow_head(skb, OCELOT_TAG_LEN) < 0)) {
@@ -149,19 +148,15 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
                return NULL;
        }
 
-       injection = skb_push(skb, OCELOT_TAG_LEN);
+       ocelot_port = ocelot->ports[dp->index];
 
-       memset(injection, 0, OCELOT_TAG_LEN);
+       injection = skb_push(skb, OCELOT_TAG_LEN);
 
-       /* Set the source port as the CPU port module and not the NPI port */
-       src = ocelot->num_phys_ports;
-       dest = BIT(port);
-       bypass = true;
+       memcpy(injection, ocelot_port->xmit_template, OCELOT_TAG_LEN);
+       /* Fix up the fields which are not statically determined
+        * in the template
+        */
        qos_class = skb->priority;
-
-       packing(injection, &bypass,   127, 127, OCELOT_TAG_LEN, PACK, 0);
-       packing(injection, &dest,      68,  56, OCELOT_TAG_LEN, PACK, 0);
-       packing(injection, &src,       46,  43, OCELOT_TAG_LEN, PACK, 0);
        packing(injection, &qos_class, 19,  17, OCELOT_TAG_LEN, PACK, 0);
 
        if (ocelot->ptp && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
index 70db7c9..7066f5e 100644 (file)
@@ -31,7 +31,8 @@
 static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct dsa_port *dp = dsa_slave_to_port(dev);
-       u16 *phdr, hdr;
+       __be16 *phdr;
+       u16 hdr;
 
        if (skb_cow_head(skb, QCA_HDR_LEN) < 0)
                return NULL;
@@ -39,7 +40,7 @@ static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
        skb_push(skb, QCA_HDR_LEN);
 
        memmove(skb->data, skb->data + QCA_HDR_LEN, 2 * ETH_ALEN);
-       phdr = (u16 *)(skb->data + 2 * ETH_ALEN);
+       phdr = (__be16 *)(skb->data + 2 * ETH_ALEN);
 
        /* Set the version field, and set destination port information */
        hdr = QCA_HDR_VERSION << QCA_HDR_XMIT_VERSION_S |
@@ -54,8 +55,9 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev,
                                   struct packet_type *pt)
 {
        u8 ver;
+       u16  hdr;
        int port;
-       __be16 *phdr, hdr;
+       __be16 *phdr;
 
        if (unlikely(!pskb_may_pull(skb, QCA_HDR_LEN)))
                return NULL;
diff --git a/net/dsa/tag_rtl4_a.c b/net/dsa/tag_rtl4_a.c
new file mode 100644 (file)
index 0000000..7b63010
--- /dev/null
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Handler for Realtek 4 byte DSA switch tags
+ * Currently only supports protocol "A" found in RTL8366RB
+ * Copyright (c) 2020 Linus Walleij <linus.walleij@linaro.org>
+ *
+ * This "proprietary tag" header looks like so:
+ *
+ * -------------------------------------------------
+ * | MAC DA | MAC SA | 0x8899 | 2 bytes tag | Type |
+ * -------------------------------------------------
+ *
+ * The 2 bytes tag form a 16 bit big endian word. The exact
+ * meaning has been guessed from packet dumps from ingress
+ * frames, as no working egress traffic has been available
+ * we do not know the format of the egress tags or if they
+ * are even supported.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/bits.h>
+
+#include "dsa_priv.h"
+
+#define RTL4_A_HDR_LEN         4
+#define RTL4_A_ETHERTYPE       0x8899
+#define RTL4_A_PROTOCOL_SHIFT  12
+/*
+ * 0x1 = Realtek Remote Control protocol (RRCP)
+ * 0x2/0x3 seems to be used for loopback testing
+ * 0x9 = RTL8306 DSA protocol
+ * 0xa = RTL8366RB DSA protocol
+ */
+#define RTL4_A_PROTOCOL_RTL8366RB      0xa
+
+static struct sk_buff *rtl4a_tag_xmit(struct sk_buff *skb,
+                                     struct net_device *dev)
+{
+       /*
+        * Just let it pass thru, we don't know if it is possible
+        * to tag a frame with the 0x8899 ethertype and direct it
+        * to a specific port, all attempts at reverse-engineering have
+        * ended up with the frames getting dropped.
+        *
+        * The VLAN set-up needs to restrict the frames to the right port.
+        *
+        * If you have documentation on the tagging format for RTL8366RB
+        * (tag type A) then please contribute.
+        */
+       return skb;
+}
+
+static struct sk_buff *rtl4a_tag_rcv(struct sk_buff *skb,
+                                    struct net_device *dev,
+                                    struct packet_type *pt)
+{
+       u16 protport;
+       __be16 *p;
+       u16 etype;
+       u8 *tag;
+       u8 prot;
+       u8 port;
+
+       if (unlikely(!pskb_may_pull(skb, RTL4_A_HDR_LEN)))
+               return NULL;
+
+       /* The RTL4 header has its own custom Ethertype 0x8899 and that
+        * starts right at the beginning of the packet, after the src
+        * ethernet addr. Apparantly skb->data always points 2 bytes in,
+        * behind the Ethertype.
+        */
+       tag = skb->data - 2;
+       p = (__be16 *)tag;
+       etype = ntohs(*p);
+       if (etype != RTL4_A_ETHERTYPE) {
+               /* Not custom, just pass through */
+               netdev_dbg(dev, "non-realtek ethertype 0x%04x\n", etype);
+               return skb;
+       }
+       p = (__be16 *)(tag + 2);
+       protport = ntohs(*p);
+       /* The 4 upper bits are the protocol */
+       prot = (protport >> RTL4_A_PROTOCOL_SHIFT) & 0x0f;
+       if (prot != RTL4_A_PROTOCOL_RTL8366RB) {
+               netdev_err(dev, "unknown realtek protocol 0x%01x\n", prot);
+               return NULL;
+       }
+       port = protport & 0xff;
+
+       skb->dev = dsa_master_find_slave(dev, 0, port);
+       if (!skb->dev) {
+               netdev_dbg(dev, "could not find slave for port %d\n", port);
+               return NULL;
+       }
+
+       /* Remove RTL4 tag and recalculate checksum */
+       skb_pull_rcsum(skb, RTL4_A_HDR_LEN);
+
+       /* Move ethernet DA and SA in front of the data */
+       memmove(skb->data - ETH_HLEN,
+               skb->data - ETH_HLEN - RTL4_A_HDR_LEN,
+               2 * ETH_ALEN);
+
+       skb->offload_fwd_mark = 1;
+
+       return skb;
+}
+
+static int rtl4a_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto,
+                                 int *offset)
+{
+       *offset = RTL4_A_HDR_LEN;
+       /* Skip past the tag and fetch the encapsulated Ethertype */
+       *proto = ((__be16 *)skb->data)[1];
+
+       return 0;
+}
+
+static const struct dsa_device_ops rtl4a_netdev_ops = {
+       .name   = "rtl4a",
+       .proto  = DSA_TAG_PROTO_RTL4_A,
+       .xmit   = rtl4a_tag_xmit,
+       .rcv    = rtl4a_tag_rcv,
+       .flow_dissect = rtl4a_tag_flow_dissect,
+       .overhead = RTL4_A_HDR_LEN,
+};
+module_dsa_tag_driver(rtl4a_netdev_ops);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_RTL4_A);
index 0c2b94f..7a849ff 100644 (file)
@@ -6,4 +6,5 @@ obj-$(CONFIG_ETHTOOL_NETLINK)   += ethtool_nl.o
 
 ethtool_nl-y   := netlink.o bitset.o strset.o linkinfo.o linkmodes.o \
                   linkstate.o debug.o wol.o features.o privflags.o rings.o \
-                  channels.o coalesce.o pause.o eee.o tsinfo.o cabletest.o
+                  channels.o coalesce.o pause.o eee.o tsinfo.o cabletest.o \
+                  tunnels.o
index 7b7a045..888f6e1 100644 (file)
@@ -58,6 +58,7 @@ int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info)
 {
        struct nlattr *tb[ETHTOOL_A_CABLE_TEST_MAX + 1];
        struct ethnl_req_info req_info = {};
+       const struct ethtool_phy_ops *ops;
        struct net_device *dev;
        int ret;
 
@@ -81,11 +82,17 @@ int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info)
        }
 
        rtnl_lock();
+       ops = ethtool_phy_ops;
+       if (!ops || !ops->start_cable_test) {
+               ret = -EOPNOTSUPP;
+               goto out_rtnl;
+       }
+
        ret = ethnl_ops_begin(dev);
        if (ret < 0)
                goto out_rtnl;
 
-       ret = phy_start_cable_test(dev->phydev, info->extack);
+       ret = ops->start_cable_test(dev->phydev, info->extack);
 
        ethnl_ops_complete(dev);
 
@@ -234,6 +241,14 @@ static int ethnl_act_cable_test_tdr_cfg(const struct nlattr *nest,
        struct nlattr *tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX + 1];
        int ret;
 
+       cfg->first = 100;
+       cfg->step = 100;
+       cfg->last = MAX_CABLE_LENGTH_CM;
+       cfg->pair = PHY_PAIR_ALL;
+
+       if (!nest)
+               return 0;
+
        ret = nla_parse_nested(tb, ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX, nest,
                               cable_test_tdr_act_cfg_policy, info->extack);
        if (ret < 0)
@@ -242,17 +257,12 @@ static int ethnl_act_cable_test_tdr_cfg(const struct nlattr *nest,
        if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST])
                cfg->first = nla_get_u32(
                        tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST]);
-       else
-               cfg->first = 100;
+
        if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST])
                cfg->last = nla_get_u32(tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST]);
-       else
-               cfg->last = MAX_CABLE_LENGTH_CM;
 
        if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP])
                cfg->step = nla_get_u32(tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP]);
-       else
-               cfg->step = 100;
 
        if (tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR]) {
                cfg->pair = nla_get_u8(tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR]);
@@ -263,8 +273,6 @@ static int ethnl_act_cable_test_tdr_cfg(const struct nlattr *nest,
                                "invalid pair parameter");
                        return -EINVAL;
                }
-       } else {
-               cfg->pair = PHY_PAIR_ALL;
        }
 
        if (cfg->first > MAX_CABLE_LENGTH_CM) {
@@ -307,6 +315,7 @@ int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info)
 {
        struct nlattr *tb[ETHTOOL_A_CABLE_TEST_TDR_MAX + 1];
        struct ethnl_req_info req_info = {};
+       const struct ethtool_phy_ops *ops;
        struct phy_tdr_config cfg;
        struct net_device *dev;
        int ret;
@@ -336,11 +345,17 @@ int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info)
                goto out_dev_put;
 
        rtnl_lock();
+       ops = ethtool_phy_ops;
+       if (!ops || !ops->start_cable_test_tdr) {
+               ret = -EOPNOTSUPP;
+               goto out_rtnl;
+       }
+
        ret = ethnl_ops_begin(dev);
        if (ret < 0)
                goto out_rtnl;
 
-       ret = phy_start_cable_test_tdr(dev->phydev, info->extack, &cfg);
+       ret = ops->start_cable_test_tdr(dev->phydev, info->extack, &cfg);
 
        ethnl_ops_complete(dev);
 
index 423e640..ed19573 100644 (file)
@@ -1,7 +1,9 @@
 // SPDX-License-Identifier: GPL-2.0-only
 
+#include <linux/ethtool_netlink.h>
 #include <linux/net_tstamp.h>
 #include <linux/phy.h>
+#include <linux/rtnetlink.h>
 
 #include "common.h"
 
@@ -40,9 +42,11 @@ const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] = {
        [NETIF_F_GSO_UDP_TUNNEL_BIT] =   "tx-udp_tnl-segmentation",
        [NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = "tx-udp_tnl-csum-segmentation",
        [NETIF_F_GSO_PARTIAL_BIT] =      "tx-gso-partial",
+       [NETIF_F_GSO_TUNNEL_REMCSUM_BIT] = "tx-tunnel-remcsum-segmentation",
        [NETIF_F_GSO_SCTP_BIT] =         "tx-sctp-segmentation",
        [NETIF_F_GSO_ESP_BIT] =          "tx-esp-segmentation",
        [NETIF_F_GSO_UDP_L4_BIT] =       "tx-udp-segmentation",
+       [NETIF_F_GSO_FRAGLIST_BIT] =     "tx-gso-list",
 
        [NETIF_F_FCOE_CRC_BIT] =         "tx-checksum-fcoe-crc",
        [NETIF_F_SCTP_CRC_BIT] =        "tx-checksum-sctp",
@@ -173,6 +177,21 @@ const char link_mode_names[][ETH_GSTRING_LEN] = {
        __DEFINE_LINK_MODE_NAME(400000, DR8, Full),
        __DEFINE_LINK_MODE_NAME(400000, CR8, Full),
        __DEFINE_SPECIAL_MODE_NAME(FEC_LLRS, "LLRS"),
+       __DEFINE_LINK_MODE_NAME(100000, KR, Full),
+       __DEFINE_LINK_MODE_NAME(100000, SR, Full),
+       __DEFINE_LINK_MODE_NAME(100000, LR_ER_FR, Full),
+       __DEFINE_LINK_MODE_NAME(100000, DR, Full),
+       __DEFINE_LINK_MODE_NAME(100000, CR, Full),
+       __DEFINE_LINK_MODE_NAME(200000, KR2, Full),
+       __DEFINE_LINK_MODE_NAME(200000, SR2, Full),
+       __DEFINE_LINK_MODE_NAME(200000, LR2_ER2_FR2, Full),
+       __DEFINE_LINK_MODE_NAME(200000, DR2, Full),
+       __DEFINE_LINK_MODE_NAME(200000, CR2, Full),
+       __DEFINE_LINK_MODE_NAME(400000, KR4, Full),
+       __DEFINE_LINK_MODE_NAME(400000, SR4, Full),
+       __DEFINE_LINK_MODE_NAME(400000, LR4_ER4_FR4, Full),
+       __DEFINE_LINK_MODE_NAME(400000, DR4, Full),
+       __DEFINE_LINK_MODE_NAME(400000, CR4, Full),
 };
 static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS);
 
@@ -254,6 +273,14 @@ const char ts_rx_filter_names[][ETH_GSTRING_LEN] = {
 };
 static_assert(ARRAY_SIZE(ts_rx_filter_names) == __HWTSTAMP_FILTER_CNT);
 
+const char udp_tunnel_type_names[][ETH_GSTRING_LEN] = {
+       [ETHTOOL_UDP_TUNNEL_TYPE_VXLAN]         = "vxlan",
+       [ETHTOOL_UDP_TUNNEL_TYPE_GENEVE]        = "geneve",
+       [ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE]     = "vxlan-gpe",
+};
+static_assert(ARRAY_SIZE(udp_tunnel_type_names) ==
+             __ETHTOOL_UDP_TUNNEL_TYPE_CNT);
+
 /* return false if legacy contained non-0 deprecated fields
  * maxtxpkt/maxrxpkt. rest of ksettings always updated
  */
@@ -371,3 +398,13 @@ int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
 
        return 0;
 }
+
+const struct ethtool_phy_ops *ethtool_phy_ops;
+
+void ethtool_set_ethtool_phy_ops(const struct ethtool_phy_ops *ops)
+{
+       rtnl_lock();
+       ethtool_phy_ops = ops;
+       rtnl_unlock();
+}
+EXPORT_SYMBOL_GPL(ethtool_set_ethtool_phy_ops);
index a62f68c..3d9251c 100644 (file)
@@ -28,6 +28,7 @@ extern const char wol_mode_names[][ETH_GSTRING_LEN];
 extern const char sof_timestamping_names[][ETH_GSTRING_LEN];
 extern const char ts_tx_type_names[][ETH_GSTRING_LEN];
 extern const char ts_rx_filter_names[][ETH_GSTRING_LEN];
+extern const char udp_tunnel_type_names[][ETH_GSTRING_LEN];
 
 int __ethtool_get_link(struct net_device *dev);
 
@@ -37,4 +38,6 @@ bool convert_legacy_settings_to_link_ksettings(
 int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max);
 int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info);
 
+extern const struct ethtool_phy_ops *ethtool_phy_ops;
+
 #endif /* _ETHTOOL_COMMON_H */
index b5df90c..441794e 100644 (file)
@@ -135,6 +135,7 @@ static int ethtool_set_features(struct net_device *dev, void __user *useraddr)
 
 static int __ethtool_get_sset_count(struct net_device *dev, int sset)
 {
+       const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops;
        const struct ethtool_ops *ops = dev->ethtool_ops;
 
        if (sset == ETH_SS_FEATURES)
@@ -150,8 +151,9 @@ static int __ethtool_get_sset_count(struct net_device *dev, int sset)
                return ARRAY_SIZE(phy_tunable_strings);
 
        if (sset == ETH_SS_PHY_STATS && dev->phydev &&
-           !ops->get_ethtool_phy_stats)
-               return phy_ethtool_get_sset_count(dev->phydev);
+           !ops->get_ethtool_phy_stats &&
+           phy_ops && phy_ops->get_sset_count)
+               return phy_ops->get_sset_count(dev->phydev);
 
        if (sset == ETH_SS_LINK_MODES)
                return __ETHTOOL_LINK_MODE_MASK_NBITS;
@@ -165,6 +167,7 @@ static int __ethtool_get_sset_count(struct net_device *dev, int sset)
 static void __ethtool_get_strings(struct net_device *dev,
        u32 stringset, u8 *data)
 {
+       const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops;
        const struct ethtool_ops *ops = dev->ethtool_ops;
 
        if (stringset == ETH_SS_FEATURES)
@@ -178,8 +181,9 @@ static void __ethtool_get_strings(struct net_device *dev,
        else if (stringset == ETH_SS_PHY_TUNABLES)
                memcpy(data, phy_tunable_strings, sizeof(phy_tunable_strings));
        else if (stringset == ETH_SS_PHY_STATS && dev->phydev &&
-                !ops->get_ethtool_phy_stats)
-               phy_ethtool_get_strings(dev->phydev, data);
+                !ops->get_ethtool_phy_stats && phy_ops &&
+                phy_ops->get_strings)
+               phy_ops->get_strings(dev->phydev, data);
        else if (stringset == ETH_SS_LINK_MODES)
                memcpy(data, link_mode_names,
                       __ETHTOOL_LINK_MODE_MASK_NBITS * ETH_GSTRING_LEN);
@@ -1918,7 +1922,7 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
        if (copy_to_user(useraddr, &stats, sizeof(stats)))
                goto out;
        useraddr += sizeof(stats);
-       if (n_stats && copy_to_user(useraddr, data, n_stats * sizeof(u64)))
+       if (n_stats && copy_to_user(useraddr, data, array_size(n_stats, sizeof(u64))))
                goto out;
        ret = 0;
 
@@ -1929,6 +1933,7 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
 
 static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
 {
+       const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops;
        const struct ethtool_ops *ops = dev->ethtool_ops;
        struct phy_device *phydev = dev->phydev;
        struct ethtool_stats stats;
@@ -1938,8 +1943,9 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
        if (!phydev && (!ops->get_ethtool_phy_stats || !ops->get_sset_count))
                return -EOPNOTSUPP;
 
-       if (dev->phydev && !ops->get_ethtool_phy_stats)
-               n_stats = phy_ethtool_get_sset_count(dev->phydev);
+       if (dev->phydev && !ops->get_ethtool_phy_stats &&
+           phy_ops && phy_ops->get_sset_count)
+               n_stats = phy_ops->get_sset_count(dev->phydev);
        else
                n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
        if (n_stats < 0)
@@ -1958,8 +1964,9 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
                if (!data)
                        return -ENOMEM;
 
-               if (dev->phydev && !ops->get_ethtool_phy_stats) {
-                       ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
+               if (dev->phydev && !ops->get_ethtool_phy_stats &&
+                   phy_ops && phy_ops->get_stats) {
+                       ret = phy_ops->get_stats(dev->phydev, &stats, data);
                        if (ret < 0)
                                goto out;
                } else {
@@ -1973,7 +1980,7 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
        if (copy_to_user(useraddr, &stats, sizeof(stats)))
                goto out;
        useraddr += sizeof(stats);
-       if (n_stats && copy_to_user(useraddr, data, n_stats * sizeof(u64)))
+       if (n_stats && copy_to_user(useraddr, data, array_size(n_stats, sizeof(u64))))
                goto out;
        ret = 0;
 
@@ -2978,7 +2985,7 @@ ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input)
                               sizeof(match->mask.ipv6.dst));
                }
                if (memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr)) ||
-                   memcmp(v6_m_spec->ip6src, &zero_addr, sizeof(zero_addr))) {
+                   memcmp(v6_m_spec->ip6dst, &zero_addr, sizeof(zero_addr))) {
                        match->dissector.used_keys |=
                                BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
                        match->dissector.offset[FLOW_DISSECTOR_KEY_IPV6_ADDRS] =
index fd4f3e5..317a931 100644 (file)
@@ -257,6 +257,21 @@ static const struct link_mode_info link_mode_params[] = {
        __DEFINE_LINK_MODE_PARAMS(400000, DR8, Full),
        __DEFINE_LINK_MODE_PARAMS(400000, CR8, Full),
        __DEFINE_SPECIAL_MODE_PARAMS(FEC_LLRS),
+       __DEFINE_LINK_MODE_PARAMS(100000, KR, Full),
+       __DEFINE_LINK_MODE_PARAMS(100000, SR, Full),
+       __DEFINE_LINK_MODE_PARAMS(100000, LR_ER_FR, Full),
+       __DEFINE_LINK_MODE_PARAMS(100000, DR, Full),
+       __DEFINE_LINK_MODE_PARAMS(100000, CR, Full),
+       __DEFINE_LINK_MODE_PARAMS(200000, KR2, Full),
+       __DEFINE_LINK_MODE_PARAMS(200000, SR2, Full),
+       __DEFINE_LINK_MODE_PARAMS(200000, LR2_ER2_FR2, Full),
+       __DEFINE_LINK_MODE_PARAMS(200000, DR2, Full),
+       __DEFINE_LINK_MODE_PARAMS(200000, CR2, Full),
+       __DEFINE_LINK_MODE_PARAMS(400000, KR4, Full),
+       __DEFINE_LINK_MODE_PARAMS(400000, SR4, Full),
+       __DEFINE_LINK_MODE_PARAMS(400000, LR4_ER4_FR4, Full),
+       __DEFINE_LINK_MODE_PARAMS(400000, DR4, Full),
+       __DEFINE_LINK_MODE_PARAMS(400000, CR4, Full),
 };
 
 static const struct nla_policy
index 7f47ba8..4834091 100644 (file)
@@ -9,10 +9,12 @@ struct linkstate_req_info {
 };
 
 struct linkstate_reply_data {
-       struct ethnl_reply_data         base;
-       int                             link;
-       int                             sqi;
-       int                             sqi_max;
+       struct ethnl_reply_data                 base;
+       int                                     link;
+       int                                     sqi;
+       int                                     sqi_max;
+       bool                                    link_ext_state_provided;
+       struct ethtool_link_ext_state_info      ethtool_link_ext_state_info;
 };
 
 #define LINKSTATE_REPDATA(__reply_base) \
@@ -25,6 +27,8 @@ linkstate_get_policy[ETHTOOL_A_LINKSTATE_MAX + 1] = {
        [ETHTOOL_A_LINKSTATE_LINK]              = { .type = NLA_REJECT },
        [ETHTOOL_A_LINKSTATE_SQI]               = { .type = NLA_REJECT },
        [ETHTOOL_A_LINKSTATE_SQI_MAX]           = { .type = NLA_REJECT },
+       [ETHTOOL_A_LINKSTATE_EXT_STATE]         = { .type = NLA_REJECT },
+       [ETHTOOL_A_LINKSTATE_EXT_SUBSTATE]      = { .type = NLA_REJECT },
 };
 
 static int linkstate_get_sqi(struct net_device *dev)
@@ -61,6 +65,23 @@ static int linkstate_get_sqi_max(struct net_device *dev)
        mutex_unlock(&phydev->lock);
 
        return ret;
+};
+
+static int linkstate_get_link_ext_state(struct net_device *dev,
+                                       struct linkstate_reply_data *data)
+{
+       int err;
+
+       if (!dev->ethtool_ops->get_link_ext_state)
+               return -EOPNOTSUPP;
+
+       err = dev->ethtool_ops->get_link_ext_state(dev, &data->ethtool_link_ext_state_info);
+       if (err)
+               return err;
+
+       data->link_ext_state_provided = true;
+
+       return 0;
 }
 
 static int linkstate_prepare_data(const struct ethnl_req_info *req_base,
@@ -78,19 +99,24 @@ static int linkstate_prepare_data(const struct ethnl_req_info *req_base,
 
        ret = linkstate_get_sqi(dev);
        if (ret < 0 && ret != -EOPNOTSUPP)
-               return ret;
-
+               goto out;
        data->sqi = ret;
 
        ret = linkstate_get_sqi_max(dev);
        if (ret < 0 && ret != -EOPNOTSUPP)
-               return ret;
-
+               goto out;
        data->sqi_max = ret;
 
-       ethnl_ops_complete(dev);
+       if (dev->flags & IFF_UP) {
+               ret = linkstate_get_link_ext_state(dev, data);
+               if (ret < 0 && ret != -EOPNOTSUPP && ret != -ENODATA)
+                       goto out;
+       }
 
-       return 0;
+       ret = 0;
+out:
+       ethnl_ops_complete(dev);
+       return ret;
 }
 
 static int linkstate_reply_size(const struct ethnl_req_info *req_base,
@@ -108,6 +134,12 @@ static int linkstate_reply_size(const struct ethnl_req_info *req_base,
        if (data->sqi_max != -EOPNOTSUPP)
                len += nla_total_size(sizeof(u32));
 
+       if (data->link_ext_state_provided)
+               len += nla_total_size(sizeof(u8)); /* LINKSTATE_EXT_STATE */
+
+       if (data->ethtool_link_ext_state_info.__link_ext_substate)
+               len += nla_total_size(sizeof(u8)); /* LINKSTATE_EXT_SUBSTATE */
+
        return len;
 }
 
@@ -129,6 +161,17 @@ static int linkstate_fill_reply(struct sk_buff *skb,
            nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI_MAX, data->sqi_max))
                return -EMSGSIZE;
 
+       if (data->link_ext_state_provided) {
+               if (nla_put_u8(skb, ETHTOOL_A_LINKSTATE_EXT_STATE,
+                              data->ethtool_link_ext_state_info.link_ext_state))
+                       return -EMSGSIZE;
+
+               if (data->ethtool_link_ext_state_info.__link_ext_substate &&
+                   nla_put_u8(skb, ETHTOOL_A_LINKSTATE_EXT_SUBSTATE,
+                              data->ethtool_link_ext_state_info.__link_ext_substate))
+                       return -EMSGSIZE;
+       }
+
        return 0;
 }
 
index 88fd07f..5c20727 100644 (file)
@@ -181,6 +181,12 @@ err:
        return NULL;
 }
 
+void *ethnl_dump_put(struct sk_buff *skb, struct netlink_callback *cb, u8 cmd)
+{
+       return genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+                          &ethtool_genl_family, 0, cmd);
+}
+
 void *ethnl_bcastmsg_put(struct sk_buff *skb, u8 cmd)
 {
        return genlmsg_put(skb, 0, ++ethnl_bcast_seq, &ethtool_genl_family, 0,
@@ -376,10 +382,17 @@ err_dev:
 }
 
 static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev,
-                                 const struct ethnl_dump_ctx *ctx)
+                                 const struct ethnl_dump_ctx *ctx,
+                                 struct netlink_callback *cb)
 {
+       void *ehdr;
        int ret;
 
+       ehdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
+                          &ethtool_genl_family, 0, ctx->ops->reply_cmd);
+       if (!ehdr)
+               return -EMSGSIZE;
+
        ethnl_init_reply_data(ctx->reply_data, ctx->ops, dev);
        rtnl_lock();
        ret = ctx->ops->prepare_data(ctx->req_info, ctx->reply_data, NULL);
@@ -395,6 +408,10 @@ out:
        if (ctx->ops->cleanup_data)
                ctx->ops->cleanup_data(ctx->reply_data);
        ctx->reply_data->dev = NULL;
+       if (ret < 0)
+               genlmsg_cancel(skb, ehdr);
+       else
+               genlmsg_end(skb, ehdr);
        return ret;
 }
 
@@ -411,7 +428,6 @@ static int ethnl_default_dumpit(struct sk_buff *skb,
        int s_idx = ctx->pos_idx;
        int h, idx = 0;
        int ret = 0;
-       void *ehdr;
 
        rtnl_lock();
        for (h = ctx->pos_hash; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
@@ -431,26 +447,15 @@ restart_chain:
                        dev_hold(dev);
                        rtnl_unlock();
 
-                       ehdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
-                                          cb->nlh->nlmsg_seq,
-                                          &ethtool_genl_family, 0,
-                                          ctx->ops->reply_cmd);
-                       if (!ehdr) {
-                               dev_put(dev);
-                               ret = -EMSGSIZE;
-                               goto out;
-                       }
-                       ret = ethnl_default_dump_one(skb, dev, ctx);
+                       ret = ethnl_default_dump_one(skb, dev, ctx, cb);
                        dev_put(dev);
                        if (ret < 0) {
-                               genlmsg_cancel(skb, ehdr);
                                if (ret == -EOPNOTSUPP)
                                        goto lock_and_cont;
                                if (likely(skb->len))
                                        ret = skb->len;
                                goto out;
                        }
-                       genlmsg_end(skb, ehdr);
 lock_and_cont:
                        rtnl_lock();
                        if (net->dev_base_seq != seq) {
@@ -849,6 +854,12 @@ static const struct genl_ops ethtool_genl_ops[] = {
                .flags  = GENL_UNS_ADMIN_PERM,
                .doit   = ethnl_act_cable_test_tdr,
        },
+       {
+               .cmd    = ETHTOOL_MSG_TUNNEL_INFO_GET,
+               .doit   = ethnl_tunnel_info_doit,
+               .start  = ethnl_tunnel_info_start,
+               .dumpit = ethnl_tunnel_info_dumpit,
+       },
 };
 
 static const struct genl_multicast_group ethtool_nl_mcgrps[] = {
index 9a96b6e..e208500 100644 (file)
@@ -19,6 +19,7 @@ int ethnl_fill_reply_header(struct sk_buff *skb, struct net_device *dev,
 struct sk_buff *ethnl_reply_init(size_t payload, struct net_device *dev, u8 cmd,
                                 u16 hdr_attrtype, struct genl_info *info,
                                 void **ehdrp);
+void *ethnl_dump_put(struct sk_buff *skb, struct netlink_callback *cb, u8 cmd);
 void *ethnl_bcastmsg_put(struct sk_buff *skb, u8 cmd);
 int ethnl_multicast(struct sk_buff *skb, struct net_device *dev);
 
@@ -361,5 +362,8 @@ int ethnl_set_pause(struct sk_buff *skb, struct genl_info *info);
 int ethnl_set_eee(struct sk_buff *skb, struct genl_info *info);
 int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info);
 int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info);
+int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info);
+int ethnl_tunnel_info_start(struct netlink_callback *cb);
+int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
 
 #endif /* _NET_ETHTOOL_NETLINK_H */
index 0eed4e4..82707b6 100644 (file)
@@ -75,6 +75,11 @@ static const struct strset_info info_template[] = {
                .count          = __HWTSTAMP_FILTER_CNT,
                .strings        = ts_rx_filter_names,
        },
+       [ETH_SS_UDP_TUNNEL_TYPES] = {
+               .per_dev        = false,
+               .count          = __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
+               .strings        = udp_tunnel_type_names,
+       },
 };
 
 struct strset_req_info {
@@ -209,13 +214,15 @@ static void strset_cleanup_data(struct ethnl_reply_data *reply_base)
 static int strset_prepare_set(struct strset_info *info, struct net_device *dev,
                              unsigned int id, bool counts_only)
 {
+       const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops;
        const struct ethtool_ops *ops = dev->ethtool_ops;
        void *strings;
        int count, ret;
 
        if (id == ETH_SS_PHY_STATS && dev->phydev &&
-           !ops->get_ethtool_phy_stats)
-               ret = phy_ethtool_get_sset_count(dev->phydev);
+           !ops->get_ethtool_phy_stats && phy_ops &&
+           phy_ops->get_sset_count)
+               ret = phy_ops->get_sset_count(dev->phydev);
        else if (ops->get_sset_count && ops->get_strings)
                ret = ops->get_sset_count(dev, id);
        else
@@ -231,8 +238,9 @@ static int strset_prepare_set(struct strset_info *info, struct net_device *dev,
                if (!strings)
                        return -ENOMEM;
                if (id == ETH_SS_PHY_STATS && dev->phydev &&
-                   !ops->get_ethtool_phy_stats)
-                       phy_ethtool_get_strings(dev->phydev, strings);
+                   !ops->get_ethtool_phy_stats && phy_ops &&
+                   phy_ops->get_strings)
+                       phy_ops->get_strings(dev->phydev, strings);
                else
                        ops->get_strings(dev, id, strings);
                info->strings = strings;
diff --git a/net/ethtool/tunnels.c b/net/ethtool/tunnels.c
new file mode 100644 (file)
index 0000000..6b89255
--- /dev/null
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/ethtool_netlink.h>
+#include <net/udp_tunnel.h>
+
+#include "bitset.h"
+#include "common.h"
+#include "netlink.h"
+
+static const struct nla_policy
+ethtool_tunnel_info_policy[ETHTOOL_A_TUNNEL_INFO_MAX + 1] = {
+       [ETHTOOL_A_TUNNEL_INFO_UNSPEC]          = { .type = NLA_REJECT },
+       [ETHTOOL_A_TUNNEL_INFO_HEADER]          = { .type = NLA_NESTED },
+};
+
+static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN == ilog2(UDP_TUNNEL_TYPE_VXLAN));
+static_assert(ETHTOOL_UDP_TUNNEL_TYPE_GENEVE == ilog2(UDP_TUNNEL_TYPE_GENEVE));
+static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE ==
+             ilog2(UDP_TUNNEL_TYPE_VXLAN_GPE));
+
+static ssize_t
+ethnl_tunnel_info_reply_size(const struct ethnl_req_info *req_base,
+                            struct netlink_ext_ack *extack)
+{
+       bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
+       const struct udp_tunnel_nic_info *info;
+       unsigned int i;
+       size_t size;
+       int ret;
+
+       info = req_base->dev->udp_tunnel_nic_info;
+       if (!info) {
+               NL_SET_ERR_MSG(extack,
+                              "device does not report tunnel offload info");
+               return -EOPNOTSUPP;
+       }
+
+       size =  nla_total_size(0); /* _INFO_UDP_PORTS */
+
+       for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
+               if (!info->tables[i].n_entries)
+                       return size;
+
+               size += nla_total_size(0); /* _UDP_TABLE */
+               size += nla_total_size(sizeof(u32)); /* _UDP_TABLE_SIZE */
+               ret = ethnl_bitset32_size(&info->tables[i].tunnel_types, NULL,
+                                         __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
+                                         udp_tunnel_type_names, compact);
+               if (ret < 0)
+                       return ret;
+               size += ret;
+
+               size += udp_tunnel_nic_dump_size(req_base->dev, i);
+       }
+
+       return size;
+}
+
+static int
+ethnl_tunnel_info_fill_reply(const struct ethnl_req_info *req_base,
+                            struct sk_buff *skb)
+{
+       bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
+       const struct udp_tunnel_nic_info *info;
+       struct nlattr *ports, *table;
+       unsigned int i;
+
+       info = req_base->dev->udp_tunnel_nic_info;
+       if (!info)
+               return -EOPNOTSUPP;
+
+       ports = nla_nest_start(skb, ETHTOOL_A_TUNNEL_INFO_UDP_PORTS);
+       if (!ports)
+               return -EMSGSIZE;
+
+       for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
+               if (!info->tables[i].n_entries)
+                       break;
+
+               table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE);
+               if (!table)
+                       goto err_cancel_ports;
+
+               if (nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE,
+                               info->tables[i].n_entries))
+                       goto err_cancel_table;
+
+               if (ethnl_put_bitset32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES,
+                                      &info->tables[i].tunnel_types, NULL,
+                                      __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
+                                      udp_tunnel_type_names, compact))
+                       goto err_cancel_table;
+
+               if (udp_tunnel_nic_dump_write(req_base->dev, i, skb))
+                       goto err_cancel_table;
+
+               nla_nest_end(skb, table);
+       }
+
+       nla_nest_end(skb, ports);
+
+       return 0;
+
+err_cancel_table:
+       nla_nest_cancel(skb, table);
+err_cancel_ports:
+       nla_nest_cancel(skb, ports);
+       return -EMSGSIZE;
+}
+
+static int
+ethnl_tunnel_info_req_parse(struct ethnl_req_info *req_info,
+                           const struct nlmsghdr *nlhdr, struct net *net,
+                           struct netlink_ext_ack *extack, bool require_dev)
+{
+       struct nlattr *tb[ETHTOOL_A_TUNNEL_INFO_MAX + 1];
+       int ret;
+
+       ret = nlmsg_parse(nlhdr, GENL_HDRLEN, tb, ETHTOOL_A_TUNNEL_INFO_MAX,
+                         ethtool_tunnel_info_policy, extack);
+       if (ret < 0)
+               return ret;
+
+       return ethnl_parse_header_dev_get(req_info,
+                                         tb[ETHTOOL_A_TUNNEL_INFO_HEADER],
+                                         net, extack, require_dev);
+}
+
+int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info)
+{
+       struct ethnl_req_info req_info = {};
+       struct sk_buff *rskb;
+       void *reply_payload;
+       int reply_len;
+       int ret;
+
+       ret = ethnl_tunnel_info_req_parse(&req_info, info->nlhdr,
+                                         genl_info_net(info), info->extack,
+                                         true);
+       if (ret < 0)
+               return ret;
+
+       rtnl_lock();
+       ret = ethnl_tunnel_info_reply_size(&req_info, info->extack);
+       if (ret < 0)
+               goto err_unlock_rtnl;
+       reply_len = ret + ethnl_reply_header_size();
+
+       rskb = ethnl_reply_init(reply_len, req_info.dev,
+                               ETHTOOL_MSG_TUNNEL_INFO_GET,
+                               ETHTOOL_A_TUNNEL_INFO_HEADER,
+                               info, &reply_payload);
+       if (!rskb) {
+               ret = -ENOMEM;
+               goto err_unlock_rtnl;
+       }
+
+       ret = ethnl_tunnel_info_fill_reply(&req_info, rskb);
+       if (ret)
+               goto err_free_msg;
+       rtnl_unlock();
+       dev_put(req_info.dev);
+       genlmsg_end(rskb, reply_payload);
+
+       return genlmsg_reply(rskb, info);
+
+err_free_msg:
+       nlmsg_free(rskb);
+err_unlock_rtnl:
+       rtnl_unlock();
+       dev_put(req_info.dev);
+       return ret;
+}
+
+struct ethnl_tunnel_info_dump_ctx {
+       struct ethnl_req_info   req_info;
+       int                     pos_hash;
+       int                     pos_idx;
+};
+
+int ethnl_tunnel_info_start(struct netlink_callback *cb)
+{
+       struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx;
+       int ret;
+
+       BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
+
+       memset(ctx, 0, sizeof(*ctx));
+
+       ret = ethnl_tunnel_info_req_parse(&ctx->req_info, cb->nlh,
+                                         sock_net(cb->skb->sk), cb->extack,
+                                         false);
+       if (ctx->req_info.dev) {
+               dev_put(ctx->req_info.dev);
+               ctx->req_info.dev = NULL;
+       }
+
+       return ret;
+}
+
+int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx;
+       struct net *net = sock_net(skb->sk);
+       int s_idx = ctx->pos_idx;
+       int h, idx = 0;
+       int ret = 0;
+       void *ehdr;
+
+       rtnl_lock();
+       cb->seq = net->dev_base_seq;
+       for (h = ctx->pos_hash; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+               struct hlist_head *head;
+               struct net_device *dev;
+
+               head = &net->dev_index_head[h];
+               idx = 0;
+               hlist_for_each_entry(dev, head, index_hlist) {
+                       if (idx < s_idx)
+                               goto cont;
+
+                       ehdr = ethnl_dump_put(skb, cb,
+                                             ETHTOOL_MSG_TUNNEL_INFO_GET);
+                       if (!ehdr) {
+                               ret = -EMSGSIZE;
+                               goto out;
+                       }
+
+                       ret = ethnl_fill_reply_header(skb, dev, ETHTOOL_A_TUNNEL_INFO_HEADER);
+                       if (ret < 0) {
+                               genlmsg_cancel(skb, ehdr);
+                               goto out;
+                       }
+
+                       ctx->req_info.dev = dev;
+                       ret = ethnl_tunnel_info_fill_reply(&ctx->req_info, skb);
+                       ctx->req_info.dev = NULL;
+                       if (ret < 0) {
+                               genlmsg_cancel(skb, ehdr);
+                               if (ret == -EOPNOTSUPP)
+                                       goto cont;
+                               goto out;
+                       }
+                       genlmsg_end(skb, ehdr);
+cont:
+                       idx++;
+               }
+       }
+out:
+       rtnl_unlock();
+
+       ctx->pos_hash = h;
+       ctx->pos_idx = idx;
+       nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+
+       if (ret == -EMSGSIZE && skb->len)
+               return skb->len;
+       return ret;
+}
index cd99f54..8a927b6 100644 (file)
@@ -210,7 +210,7 @@ static netdev_features_t hsr_fix_features(struct net_device *dev,
        return hsr_features_recompute(hsr, features);
 }
 
-static int hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct hsr_priv *hsr = netdev_priv(dev);
        struct hsr_port *master;
@@ -339,7 +339,7 @@ static void hsr_announce(struct timer_list *t)
        rcu_read_unlock();
 }
 
-static void hsr_del_ports(struct hsr_priv *hsr)
+void hsr_del_ports(struct hsr_priv *hsr)
 {
        struct hsr_port *port;
 
@@ -356,31 +356,12 @@ static void hsr_del_ports(struct hsr_priv *hsr)
                hsr_del_port(port);
 }
 
-/* This has to be called after all the readers are gone.
- * Otherwise we would have to check the return value of
- * hsr_port_get_hsr().
- */
-static void hsr_dev_destroy(struct net_device *hsr_dev)
-{
-       struct hsr_priv *hsr = netdev_priv(hsr_dev);
-
-       hsr_debugfs_term(hsr);
-       hsr_del_ports(hsr);
-
-       del_timer_sync(&hsr->prune_timer);
-       del_timer_sync(&hsr->announce_timer);
-
-       hsr_del_self_node(hsr);
-       hsr_del_nodes(&hsr->node_db);
-}
-
 static const struct net_device_ops hsr_device_ops = {
        .ndo_change_mtu = hsr_dev_change_mtu,
        .ndo_open = hsr_dev_open,
        .ndo_stop = hsr_dev_close,
        .ndo_start_xmit = hsr_dev_xmit,
        .ndo_fix_features = hsr_fix_features,
-       .ndo_uninit = hsr_dev_destroy,
 };
 
 static struct device_type hsr_type = {
@@ -434,6 +415,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
                     unsigned char multicast_spec, u8 protocol_version,
                     struct netlink_ext_ack *extack)
 {
+       bool unregister = false;
        struct hsr_priv *hsr;
        int res;
 
@@ -485,25 +467,27 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
        if (res)
                goto err_unregister;
 
+       unregister = true;
+
        res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A, extack);
        if (res)
-               goto err_add_slaves;
+               goto err_unregister;
 
        res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B, extack);
        if (res)
-               goto err_add_slaves;
+               goto err_unregister;
 
        hsr_debugfs_init(hsr, hsr_dev);
        mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD));
 
        return 0;
 
-err_add_slaves:
-       unregister_netdevice(hsr_dev);
 err_unregister:
        hsr_del_ports(hsr);
 err_add_master:
        hsr_del_self_node(hsr);
 
+       if (unregister)
+               unregister_netdevice(hsr_dev);
        return res;
 }
index a099d7d..b8f9262 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/netdevice.h>
 #include "hsr_main.h"
 
+void hsr_del_ports(struct hsr_priv *hsr);
 void hsr_dev_setup(struct net_device *dev);
 int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
                     unsigned char multicast_spec, u8 protocol_version,
@@ -18,5 +19,4 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
 void hsr_check_carrier_and_operstate(struct hsr_priv *hsr);
 bool is_hsr_master(struct net_device *dev);
 int hsr_get_max_mtu(struct hsr_priv *hsr);
-
 #endif /* __HSR_DEVICE_H */
index ed13760..92c8ad7 100644 (file)
@@ -367,10 +367,8 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
                port->dev->stats.tx_bytes += skb->len;
        }
 
-       if (frame.skb_hsr)
-               kfree_skb(frame.skb_hsr);
-       if (frame.skb_std)
-               kfree_skb(frame.skb_std);
+       kfree_skb(frame.skb_hsr);
+       kfree_skb(frame.skb_std);
        return;
 
 out_drop:
index e2564de..144da15 100644 (file)
@@ -6,6 +6,7 @@
  */
 
 #include <linux/netdevice.h>
+#include <net/rtnetlink.h>
 #include <linux/rculist.h>
 #include <linux/timer.h>
 #include <linux/etherdevice.h>
@@ -100,8 +101,10 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
                        master = hsr_port_get_hsr(port->hsr, HSR_PT_MASTER);
                        hsr_del_port(port);
                        if (hsr_slave_empty(master->hsr)) {
-                               unregister_netdevice_queue(master->dev,
-                                                          &list_kill);
+                               const struct rtnl_link_ops *ops;
+
+                               ops = master->dev->rtnl_link_ops;
+                               ops->dellink(master->dev, &list_kill);
                                unregister_netdevice_many(&list_kill);
                        }
                }
@@ -144,9 +147,9 @@ static int __init hsr_init(void)
 
 static void __exit hsr_exit(void)
 {
-       unregister_netdevice_notifier(&hsr_nb);
        hsr_netlink_exit();
        hsr_debugfs_remove_root();
+       unregister_netdevice_notifier(&hsr_nb);
 }
 
 module_init(hsr_init);
index 1decb25..6e14b7d 100644 (file)
@@ -83,6 +83,22 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
        return hsr_dev_finalize(dev, link, multicast_spec, hsr_version, extack);
 }
 
+static void hsr_dellink(struct net_device *dev, struct list_head *head)
+{
+       struct hsr_priv *hsr = netdev_priv(dev);
+
+       del_timer_sync(&hsr->prune_timer);
+       del_timer_sync(&hsr->announce_timer);
+
+       hsr_debugfs_term(hsr);
+       hsr_del_ports(hsr);
+
+       hsr_del_self_node(hsr);
+       hsr_del_nodes(&hsr->node_db);
+
+       unregister_netdevice_queue(dev, head);
+}
+
 static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
 {
        struct hsr_priv *hsr = netdev_priv(dev);
@@ -118,6 +134,7 @@ static struct rtnl_link_ops hsr_link_ops __read_mostly = {
        .priv_size      = sizeof(struct hsr_priv),
        .setup          = hsr_dev_setup,
        .newlink        = hsr_newlink,
+       .dellink        = hsr_dellink,
        .fill_info      = hsr_fill_info,
 };
 
index d93d453..a45a040 100644 (file)
@@ -382,7 +382,7 @@ static int raw_getsockopt(struct sock *sk, int level, int optname,
 }
 
 static int raw_setsockopt(struct sock *sk, int level, int optname,
-                         char __user *optval, unsigned int optlen)
+                         sockptr_t optval, unsigned int optlen)
 {
        return -EOPNOTSUPP;
 }
@@ -423,10 +423,6 @@ static const struct proto_ops ieee802154_raw_ops = {
        .recvmsg           = sock_common_recvmsg,
        .mmap              = sock_no_mmap,
        .sendpage          = sock_no_sendpage,
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_sock_common_setsockopt,
-       .compat_getsockopt = compat_sock_common_getsockopt,
-#endif
 };
 
 /* DGRAM Sockets (802.15.4 dataframes) */
@@ -876,7 +872,7 @@ static int dgram_getsockopt(struct sock *sk, int level, int optname,
 }
 
 static int dgram_setsockopt(struct sock *sk, int level, int optname,
-                           char __user *optval, unsigned int optlen)
+                           sockptr_t optval, unsigned int optlen)
 {
        struct dgram_sock *ro = dgram_sk(sk);
        struct net *net = sock_net(sk);
@@ -886,7 +882,7 @@ static int dgram_setsockopt(struct sock *sk, int level, int optname,
        if (optlen < sizeof(int))
                return -EINVAL;
 
-       if (get_user(val, (int __user *)optval))
+       if (copy_from_sockptr(&val, optval, sizeof(int)))
                return -EFAULT;
 
        lock_sock(sk);
@@ -986,10 +982,6 @@ static const struct proto_ops ieee802154_dgram_ops = {
        .recvmsg           = sock_common_recvmsg,
        .mmap              = sock_no_mmap,
        .sendpage          = sock_no_sendpage,
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_sock_common_setsockopt,
-       .compat_getsockopt = compat_sock_common_getsockopt,
-#endif
 };
 
 /* Create a socket. Initialise the socket, blank the addresses
index 6ecbb0c..60db5a6 100644 (file)
@@ -10,7 +10,7 @@ config IP_MULTICAST
          intend to participate in the MBONE, a high bandwidth network on top
          of the Internet which carries audio and video broadcasts. More
          information about the MBONE is on the WWW at
-         <http://www.savetz.com/mbone/>. For most people, it's safe to say N.
+         <https://www.savetz.com/mbone/>. For most people, it's safe to say N.
 
 config IP_ADVANCED_ROUTER
        bool "IP: advanced router"
@@ -73,7 +73,7 @@ config IP_MULTIPLE_TABLES
 
          If you need more information, see the Linux Advanced
          Routing and Traffic Control documentation at
-         <http://lartc.org/howto/lartc.rpdb.html>
+         <https://lartc.org/howto/lartc.rpdb.html>
 
          If unsure, say N.
 
@@ -280,7 +280,7 @@ config SYN_COOKIES
          continue to connect, even when your machine is under attack. There
          is no need for the legitimate users to change their TCP/IP software;
          SYN cookies work transparently to them. For technical information
-         about SYN cookies, check out <http://cr.yp.to/syncookies.html>.
+         about SYN cookies, check out <https://cr.yp.to/syncookies.html>.
 
          If you are SYN flooded, the source address reported by the kernel is
          likely to have been forged by the attacker; it is only reported as
@@ -340,29 +340,31 @@ config NET_FOU_IP_TUNNELS
 
 config INET_AH
        tristate "IP: AH transformation"
-       select XFRM_ALGO
-       select CRYPTO
-       select CRYPTO_HMAC
-       select CRYPTO_MD5
-       select CRYPTO_SHA1
+       select XFRM_AH
        help
-         Support for IPsec AH.
+         Support for IPsec AH (Authentication Header).
+
+         AH can be used with various authentication algorithms.  Besides
+         enabling AH support itself, this option enables the generic
+         implementations of the algorithms that RFC 8221 lists as MUST be
+         implemented.  If you need any other algorithms, you'll need to enable
+         them in the crypto API.  You should also enable accelerated
+         implementations of any needed algorithms when available.
 
          If unsure, say Y.
 
 config INET_ESP
        tristate "IP: ESP transformation"
-       select XFRM_ALGO
-       select CRYPTO
-       select CRYPTO_AUTHENC
-       select CRYPTO_HMAC
-       select CRYPTO_MD5
-       select CRYPTO_CBC
-       select CRYPTO_SHA1
-       select CRYPTO_DES
-       select CRYPTO_ECHAINIV
+       select XFRM_ESP
        help
-         Support for IPsec ESP.
+         Support for IPsec ESP (Encapsulating Security Payload).
+
+         ESP can be used with various encryption and authentication algorithms.
+         Besides enabling ESP support itself, this option enables the generic
+         implementations of the algorithms that RFC 8221 lists as MUST be
+         implemented.  If you need any other algorithms, you'll need to enable
+         them in the crypto API.  You should also enable accelerated
+         implementations of any needed algorithms when available.
 
          If unsure, say Y.
 
@@ -523,7 +525,7 @@ config TCP_CONG_HSTCP
          A modification to TCP's congestion control mechanism for use
          with large congestion windows. A table indicates how much to
          increase the congestion window by when an ACK is received.
-         For more detail see http://www.icir.org/floyd/hstcp.html
+         For more detail see https://www.icir.org/floyd/hstcp.html
 
 config TCP_CONG_HYBLA
        tristate "TCP-Hybla congestion control algorithm"
index 9e1a186..5b77a46 100644 (file)
@@ -14,7 +14,7 @@ obj-y     := route.o inetpeer.o protocol.o \
             udp_offload.o arp.o icmp.o devinet.o af_inet.o igmp.o \
             fib_frontend.o fib_semantics.o fib_trie.o fib_notifier.o \
             inet_fragment.o ping.o ip_tunnel_core.o gre_offload.o \
-            metrics.o netlink.o nexthop.o
+            metrics.o netlink.o nexthop.o udp_tunnel_stub.o
 
 obj-$(CONFIG_BPFILTER) += bpfilter/
 
@@ -29,6 +29,7 @@ gre-y := gre_demux.o
 obj-$(CONFIG_NET_FOU) += fou.o
 obj-$(CONFIG_NET_IPGRE_DEMUX) += gre.o
 obj-$(CONFIG_NET_IPGRE) += ip_gre.o
+udp_tunnel-y := udp_tunnel_core.o udp_tunnel_nic.o
 obj-$(CONFIG_NET_UDP_TUNNEL) += udp_tunnel.o
 obj-$(CONFIG_NET_IPVTI) += ip_vti.o
 obj-$(CONFIG_SYN_COOKIES) += syncookies.o
index 02aa5cb..4307503 100644 (file)
@@ -411,6 +411,9 @@ int inet_release(struct socket *sock)
        if (sk) {
                long timeout;
 
+               if (!sk->sk_kern_sock)
+                       BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk);
+
                /* Applications forget to leave groups before exiting */
                ip_mc_drop_socket(sk);
 
@@ -1040,8 +1043,6 @@ const struct proto_ops inet_stream_ops = {
        .sendpage_locked   = tcp_sendpage_locked,
        .peek_len          = tcp_peek_len,
 #ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_sock_common_setsockopt,
-       .compat_getsockopt = compat_sock_common_getsockopt,
        .compat_ioctl      = inet_compat_ioctl,
 #endif
        .set_rcvlowat      = tcp_set_rcvlowat,
@@ -1070,8 +1071,6 @@ const struct proto_ops inet_dgram_ops = {
        .sendpage          = inet_sendpage,
        .set_peek_off      = sk_set_peek_off,
 #ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_sock_common_setsockopt,
-       .compat_getsockopt = compat_sock_common_getsockopt,
        .compat_ioctl      = inet_compat_ioctl,
 #endif
 };
@@ -1102,8 +1101,6 @@ static const struct proto_ops inet_sockraw_ops = {
        .mmap              = sock_no_mmap,
        .sendpage          = inet_sendpage,
 #ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_sock_common_setsockopt,
-       .compat_getsockopt = compat_sock_common_getsockopt,
        .compat_ioctl      = inet_compat_ioctl,
 #endif
 };
@@ -1432,10 +1429,6 @@ static struct sk_buff *ipip_gso_segment(struct sk_buff *skb,
        return inet_gso_segment(skb, features);
 }
 
-INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp4_gro_receive(struct list_head *,
-                                                          struct sk_buff *));
-INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
-                                                          struct sk_buff *));
 struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
 {
        const struct net_offload *ops;
@@ -1608,8 +1601,6 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
        return -EINVAL;
 }
 
-INDIRECT_CALLABLE_DECLARE(int tcp4_gro_complete(struct sk_buff *, int));
-INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
 int inet_gro_complete(struct sk_buff *skb, int nhoff)
 {
        __be16 newlen = htons(skb->len - nhoff);
index 0480918..94f18d2 100644 (file)
 struct bpfilter_umh_ops bpfilter_ops;
 EXPORT_SYMBOL_GPL(bpfilter_ops);
 
-static void bpfilter_umh_cleanup(struct umh_info *info)
+void bpfilter_umh_cleanup(struct umd_info *info)
 {
-       mutex_lock(&bpfilter_ops.lock);
-       bpfilter_ops.stop = true;
        fput(info->pipe_to_umh);
        fput(info->pipe_from_umh);
-       info->pid = 0;
-       mutex_unlock(&bpfilter_ops.lock);
+       put_pid(info->tgid);
+       info->tgid = NULL;
 }
+EXPORT_SYMBOL_GPL(bpfilter_umh_cleanup);
 
-static int bpfilter_mbox_request(struct sock *sk, int optname,
-                                char __user *optval,
+static int bpfilter_mbox_request(struct sock *sk, int optname, sockptr_t optval,
                                 unsigned int optlen, bool is_set)
 {
        int err;
@@ -38,7 +36,11 @@ static int bpfilter_mbox_request(struct sock *sk, int optname,
                        goto out;
                }
        }
-       if (bpfilter_ops.stop) {
+       if (bpfilter_ops.info.tgid &&
+           thread_group_exited(bpfilter_ops.info.tgid))
+               bpfilter_umh_cleanup(&bpfilter_ops.info);
+
+       if (!bpfilter_ops.info.tgid) {
                err = bpfilter_ops.start();
                if (err)
                        goto out;
@@ -49,29 +51,31 @@ out:
        return err;
 }
 
-int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
+int bpfilter_ip_set_sockopt(struct sock *sk, int optname, sockptr_t optval,
                            unsigned int optlen)
 {
        return bpfilter_mbox_request(sk, optname, optval, optlen, true);
 }
 
-int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
-                           int __user *optlen)
+int bpfilter_ip_get_sockopt(struct sock *sk, int optname,
+                           char __user *user_optval, int __user *optlen)
 {
-       int len;
+       sockptr_t optval;
+       int err, len;
 
        if (get_user(len, optlen))
                return -EFAULT;
-
+       err = init_user_sockptr(&optval, user_optval);
+       if (err)
+               return err;
        return bpfilter_mbox_request(sk, optname, optval, len, false);
 }
 
 static int __init bpfilter_sockopt_init(void)
 {
        mutex_init(&bpfilter_ops.lock);
-       bpfilter_ops.stop = true;
-       bpfilter_ops.info.cmdline = "bpfilter_umh";
-       bpfilter_ops.info.cleanup = &bpfilter_umh_cleanup;
+       bpfilter_ops.info.tgid = NULL;
+       bpfilter_ops.info.driver_name = "bpfilter_umh";
 
        return 0;
 }
index a23094b..2eb7157 100644 (file)
@@ -10,9 +10,9 @@
  *
  * The CIPSO draft specification can be found in the kernel's Documentation
  * directory as well as the following URL:
- *   http://tools.ietf.org/id/draft-ietf-cipso-ipsecurity-01.txt
+ *   https://tools.ietf.org/id/draft-ietf-cipso-ipsecurity-01.txt
  * The FIPS-188 specification can be found at the following URL:
- *   http://www.itl.nist.gov/fipspubs/fip188.htm
+ *   https://www.itl.nist.gov/fipspubs/fip188.htm
  *
  * Author: Paul Moore <paul.moore@hp.com>
  */
@@ -283,7 +283,7 @@ static int cipso_v4_cache_check(const unsigned char *key,
 
 /**
  * cipso_v4_cache_add - Add an entry to the CIPSO cache
- * @skb: the packet
+ * @cipso_ptr: pointer to CIPSO IP option
  * @secattr: the packet's security attributes
  *
  * Description:
@@ -1535,6 +1535,7 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
 
 /**
  * cipso_v4_validate - Validate a CIPSO option
+ * @skb: the packet
  * @option: the start of the option, on error it is set to point to the error
  *
  * Description:
@@ -2066,7 +2067,7 @@ void cipso_v4_sock_delattr(struct sock *sk)
 
 /**
  * cipso_v4_req_delattr - Delete the CIPSO option from a request socket
- * @reg: the request socket
+ * @req: the request socket
  *
  * Description:
  * Removes the CIPSO option from a request socket, if present.
@@ -2158,6 +2159,7 @@ int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr)
 /**
  * cipso_v4_skbuff_setattr - Set the CIPSO option on a packet
  * @skb: the packet
+ * @doi_def: the DOI structure
  * @secattr: the security attributes
  *
  * Description:
index d14133e..5bda5ae 100644 (file)
@@ -361,3 +361,4 @@ module_exit(esp4_offload_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
+MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");
index e53871e..1f75dc6 100644 (file)
@@ -1109,7 +1109,7 @@ static int fib_check_nh_v4_gw(struct net *net, struct fib_nh *nh, u32 table,
                if (fl4.flowi4_scope < RT_SCOPE_LINK)
                        fl4.flowi4_scope = RT_SCOPE_LINK;
 
-               if (table)
+               if (table && table != RT_TABLE_MAIN)
                        tbl = fib_get_table(net, table);
 
                if (tbl)
index 248f1c1..dcb0802 100644 (file)
@@ -13,7 +13,7 @@
  *
  * An experimental study of compression methods for dynamic tries
  * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
- * http://www.csc.kth.se/~snilsson/software/dyntrie2/
+ * https://www.csc.kth.se/~snilsson/software/dyntrie2/
  *
  * IP-address lookup using LC-tries. Stefan Nilsson and Gunnar Karlsson
  * IEEE Journal on Selected Areas in Communications, 17(6):1083-1092, June 1999
index dcc79ff..abd0834 100644 (file)
@@ -1304,3 +1304,4 @@ module_init(fou_init);
 module_exit(fou_fini);
 MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Foo over UDP");
index 956a806..cf36f95 100644 (file)
@@ -427,7 +427,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
 
        ipcm_init(&ipc);
        inet->tos = ip_hdr(skb)->tos;
-       sk->sk_mark = mark;
+       ipc.sockc.mark = mark;
        daddr = ipc.addr = ip_hdr(skb)->saddr;
        saddr = fib_compute_spec_dst(skb);
 
@@ -710,10 +710,10 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
        icmp_param.skb    = skb_in;
        icmp_param.offset = skb_network_offset(skb_in);
        inet_sk(sk)->tos = tos;
-       sk->sk_mark = mark;
        ipcm_init(&ipc);
        ipc.addr = iph->saddr;
        ipc.opt = &icmp_param.replyopts.opt;
+       ipc.sockc.mark = mark;
 
        rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark,
                               type, code, &icmp_param);
@@ -1116,6 +1116,65 @@ error:
        goto drop;
 }
 
+static bool ip_icmp_error_rfc4884_validate(const struct sk_buff *skb, int off)
+{
+       struct icmp_extobj_hdr *objh, _objh;
+       struct icmp_ext_hdr *exth, _exth;
+       u16 olen;
+
+       exth = skb_header_pointer(skb, off, sizeof(_exth), &_exth);
+       if (!exth)
+               return false;
+       if (exth->version != 2)
+               return true;
+
+       if (exth->checksum &&
+           csum_fold(skb_checksum(skb, off, skb->len - off, 0)))
+               return false;
+
+       off += sizeof(_exth);
+       while (off < skb->len) {
+               objh = skb_header_pointer(skb, off, sizeof(_objh), &_objh);
+               if (!objh)
+                       return false;
+
+               olen = ntohs(objh->length);
+               if (olen < sizeof(_objh))
+                       return false;
+
+               off += olen;
+               if (off > skb->len)
+                       return false;
+       }
+
+       return true;
+}
+
+void ip_icmp_error_rfc4884(const struct sk_buff *skb,
+                          struct sock_ee_data_rfc4884 *out,
+                          int thlen, int off)
+{
+       int hlen;
+
+       /* original datagram headers: end of icmph to payload (skb->data) */
+       hlen = -skb_transport_offset(skb) - thlen;
+
+       /* per rfc 4884: minimal datagram length of 128 bytes */
+       if (off < 128 || off < hlen)
+               return;
+
+       /* kernel has stripped headers: return payload offset in bytes */
+       off -= hlen;
+       if (off + sizeof(struct icmp_ext_hdr) > skb->len)
+               return;
+
+       out->len = off;
+
+       if (!ip_icmp_error_rfc4884_validate(skb, off))
+               out->flags |= SO_EE_RFC4884_FLAG_INVALID;
+}
+EXPORT_SYMBOL_GPL(ip_icmp_error_rfc4884);
+
 int icmp_err(struct sk_buff *skb, u32 info)
 {
        struct iphdr *iph = (struct iphdr *)skb->data;
index afaf582..d1a3913 100644 (file)
@@ -648,20 +648,19 @@ no_route:
 EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
 
 /* Decide when to expire the request and when to resend SYN-ACK */
-static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
-                                 const int max_retries,
-                                 const u8 rskq_defer_accept,
-                                 int *expire, int *resend)
+static void syn_ack_recalc(struct request_sock *req,
+                          const int max_syn_ack_retries,
+                          const u8 rskq_defer_accept,
+                          int *expire, int *resend)
 {
        if (!rskq_defer_accept) {
-               *expire = req->num_timeout >= thresh;
+               *expire = req->num_timeout >= max_syn_ack_retries;
                *resend = 1;
                return;
        }
-       *expire = req->num_timeout >= thresh &&
-                 (!inet_rsk(req)->acked || req->num_timeout >= max_retries);
-       /*
-        * Do not resend while waiting for data after ACK,
+       *expire = req->num_timeout >= max_syn_ack_retries &&
+                 (!inet_rsk(req)->acked || req->num_timeout >= rskq_defer_accept);
+       /* Do not resend while waiting for data after ACK,
         * start to resend on end of deferring period to give
         * last chance for data or ACK to create established socket.
         */
@@ -720,15 +719,12 @@ static void reqsk_timer_handler(struct timer_list *t)
        struct net *net = sock_net(sk_listener);
        struct inet_connection_sock *icsk = inet_csk(sk_listener);
        struct request_sock_queue *queue = &icsk->icsk_accept_queue;
-       int qlen, expire = 0, resend = 0;
-       int max_retries, thresh;
-       u8 defer_accept;
+       int max_syn_ack_retries, qlen, expire = 0, resend = 0;
 
        if (inet_sk_state_load(sk_listener) != TCP_LISTEN)
                goto drop;
 
-       max_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries;
-       thresh = max_retries;
+       max_syn_ack_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries;
        /* Normally all the openreqs are young and become mature
         * (i.e. converted to established socket) for first timeout.
         * If synack was not acknowledged for 1 second, it means
@@ -750,17 +746,14 @@ static void reqsk_timer_handler(struct timer_list *t)
        if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) {
                int young = reqsk_queue_len_young(queue) << 1;
 
-               while (thresh > 2) {
+               while (max_syn_ack_retries > 2) {
                        if (qlen < young)
                                break;
-                       thresh--;
+                       max_syn_ack_retries--;
                        young <<= 1;
                }
        }
-       defer_accept = READ_ONCE(queue->rskq_defer_accept);
-       if (defer_accept)
-               max_retries = defer_accept;
-       syn_ack_recalc(req, thresh, max_retries, defer_accept,
+       syn_ack_recalc(req, max_syn_ack_retries, READ_ONCE(queue->rskq_defer_accept),
                       &expire, &resend);
        req->rsk_ops->syn_ack_timeout(req);
        if (!expire &&
@@ -1064,34 +1057,6 @@ void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
 }
 EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
 
-#ifdef CONFIG_COMPAT
-int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
-                              char __user *optval, int __user *optlen)
-{
-       const struct inet_connection_sock *icsk = inet_csk(sk);
-
-       if (icsk->icsk_af_ops->compat_getsockopt)
-               return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
-                                                           optval, optlen);
-       return icsk->icsk_af_ops->getsockopt(sk, level, optname,
-                                            optval, optlen);
-}
-EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
-
-int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
-                              char __user *optval, unsigned int optlen)
-{
-       const struct inet_connection_sock *icsk = inet_csk(sk);
-
-       if (icsk->icsk_af_ops->compat_setsockopt)
-               return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
-                                                           optval, optlen);
-       return icsk->icsk_af_ops->setsockopt(sk, level, optname,
-                                            optval, optlen);
-}
-EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
-#endif
-
 static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
 {
        const struct inet_sock *inet = inet_sk(sk);
index 125f4f8..4a98dd7 100644 (file)
@@ -52,6 +52,11 @@ static DEFINE_MUTEX(inet_diag_table_mutex);
 
 static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
 {
+       if (proto < 0 || proto >= IPPROTO_MAX) {
+               mutex_lock(&inet_diag_table_mutex);
+               return ERR_PTR(-ENOENT);
+       }
+
        if (!inet_diag_table[proto])
                sock_load_diag_module(AF_INET, proto);
 
@@ -181,6 +186,28 @@ errout:
 }
 EXPORT_SYMBOL_GPL(inet_diag_msg_attrs_fill);
 
+static void inet_diag_parse_attrs(const struct nlmsghdr *nlh, int hdrlen,
+                                 struct nlattr **req_nlas)
+{
+       struct nlattr *nla;
+       int remaining;
+
+       nlmsg_for_each_attr(nla, nlh, hdrlen, remaining) {
+               int type = nla_type(nla);
+
+               if (type < __INET_DIAG_REQ_MAX)
+                       req_nlas[type] = nla;
+       }
+}
+
+static int inet_diag_get_protocol(const struct inet_diag_req_v2 *req,
+                                 const struct inet_diag_dump_data *data)
+{
+       if (data->req_nlas[INET_DIAG_REQ_PROTOCOL])
+               return nla_get_u32(data->req_nlas[INET_DIAG_REQ_PROTOCOL]);
+       return req->sdiag_protocol;
+}
+
 #define MAX_DUMP_ALLOC_SIZE (KMALLOC_MAX_SIZE - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 
 int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
@@ -198,7 +225,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
        void *info = NULL;
 
        cb_data = cb->data;
-       handler = inet_diag_table[req->sdiag_protocol];
+       handler = inet_diag_table[inet_diag_get_protocol(req, cb_data)];
        BUG_ON(!handler);
 
        nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
@@ -539,20 +566,25 @@ EXPORT_SYMBOL_GPL(inet_diag_dump_one_icsk);
 
 static int inet_diag_cmd_exact(int cmd, struct sk_buff *in_skb,
                               const struct nlmsghdr *nlh,
+                              int hdrlen,
                               const struct inet_diag_req_v2 *req)
 {
        const struct inet_diag_handler *handler;
-       int err;
+       struct inet_diag_dump_data dump_data;
+       int err, protocol;
 
-       handler = inet_diag_lock_handler(req->sdiag_protocol);
+       memset(&dump_data, 0, sizeof(dump_data));
+       inet_diag_parse_attrs(nlh, hdrlen, dump_data.req_nlas);
+       protocol = inet_diag_get_protocol(req, &dump_data);
+
+       handler = inet_diag_lock_handler(protocol);
        if (IS_ERR(handler)) {
                err = PTR_ERR(handler);
        } else if (cmd == SOCK_DIAG_BY_FAMILY) {
-               struct inet_diag_dump_data empty_dump_data = {};
                struct netlink_callback cb = {
                        .nlh = nlh,
                        .skb = in_skb,
-                       .data = &empty_dump_data,
+                       .data = &dump_data,
                };
                err = handler->dump_one(&cb, req);
        } else if (cmd == SOCK_DESTROY && handler->destroy) {
@@ -1103,13 +1135,16 @@ EXPORT_SYMBOL_GPL(inet_diag_dump_icsk);
 static int __inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
                            const struct inet_diag_req_v2 *r)
 {
+       struct inet_diag_dump_data *cb_data = cb->data;
        const struct inet_diag_handler *handler;
        u32 prev_min_dump_alloc;
-       int err = 0;
+       int protocol, err = 0;
+
+       protocol = inet_diag_get_protocol(r, cb_data);
 
 again:
        prev_min_dump_alloc = cb->min_dump_alloc;
-       handler = inet_diag_lock_handler(r->sdiag_protocol);
+       handler = inet_diag_lock_handler(protocol);
        if (!IS_ERR(handler))
                handler->dump(skb, cb, r);
        else
@@ -1139,19 +1174,13 @@ static int __inet_diag_dump_start(struct netlink_callback *cb, int hdrlen)
        struct inet_diag_dump_data *cb_data;
        struct sk_buff *skb = cb->skb;
        struct nlattr *nla;
-       int rem, err;
+       int err;
 
        cb_data = kzalloc(sizeof(*cb_data), GFP_KERNEL);
        if (!cb_data)
                return -ENOMEM;
 
-       nla_for_each_attr(nla, nlmsg_attrdata(nlh, hdrlen),
-                         nlmsg_attrlen(nlh, hdrlen), rem) {
-               int type = nla_type(nla);
-
-               if (type < __INET_DIAG_REQ_MAX)
-                       cb_data->req_nlas[type] = nla;
-       }
+       inet_diag_parse_attrs(nlh, hdrlen, cb_data->req_nlas);
 
        nla = cb_data->inet_diag_nla_bc;
        if (nla) {
@@ -1237,7 +1266,8 @@ static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
        req.idiag_states = rc->idiag_states;
        req.id = rc->id;
 
-       return inet_diag_cmd_exact(SOCK_DIAG_BY_FAMILY, in_skb, nlh, &req);
+       return inet_diag_cmd_exact(SOCK_DIAG_BY_FAMILY, in_skb, nlh,
+                                  sizeof(struct inet_diag_req), &req);
 }
 
 static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
@@ -1279,7 +1309,8 @@ static int inet_diag_handler_cmd(struct sk_buff *skb, struct nlmsghdr *h)
                return netlink_dump_start(net->diag_nlsk, skb, h, &c);
        }
 
-       return inet_diag_cmd_exact(h->nlmsg_type, skb, h, nlmsg_data(h));
+       return inet_diag_cmd_exact(h->nlmsg_type, skb, h, hdrlen,
+                                  nlmsg_data(h));
 }
 
 static
index 2bbaaf0..4eb4cd8 100644 (file)
@@ -246,6 +246,21 @@ static inline int compute_score(struct sock *sk, struct net *net,
        return score;
 }
 
+static inline struct sock *lookup_reuseport(struct net *net, struct sock *sk,
+                                           struct sk_buff *skb, int doff,
+                                           __be32 saddr, __be16 sport,
+                                           __be32 daddr, unsigned short hnum)
+{
+       struct sock *reuse_sk = NULL;
+       u32 phash;
+
+       if (sk->sk_reuseport) {
+               phash = inet_ehashfn(net, daddr, hnum, saddr, sport);
+               reuse_sk = reuseport_select_sock(sk, phash, skb, doff);
+       }
+       return reuse_sk;
+}
+
 /*
  * Here are some nice properties to exploit here. The BSD API
  * does not allow a listening sock to specify the remote port nor the
@@ -265,21 +280,17 @@ static struct sock *inet_lhash2_lookup(struct net *net,
        struct inet_connection_sock *icsk;
        struct sock *sk, *result = NULL;
        int score, hiscore = 0;
-       u32 phash = 0;
 
        inet_lhash2_for_each_icsk_rcu(icsk, &ilb2->head) {
                sk = (struct sock *)icsk;
                score = compute_score(sk, net, hnum, daddr,
                                      dif, sdif, exact_dif);
                if (score > hiscore) {
-                       if (sk->sk_reuseport) {
-                               phash = inet_ehashfn(net, daddr, hnum,
-                                                    saddr, sport);
-                               result = reuseport_select_sock(sk, phash,
-                                                              skb, doff);
-                               if (result)
-                                       return result;
-                       }
+                       result = lookup_reuseport(net, sk, skb, doff,
+                                                 saddr, sport, daddr, hnum);
+                       if (result)
+                               return result;
+
                        result = sk;
                        hiscore = score;
                }
@@ -288,6 +299,29 @@ static struct sock *inet_lhash2_lookup(struct net *net,
        return result;
 }
 
+static inline struct sock *inet_lookup_run_bpf(struct net *net,
+                                              struct inet_hashinfo *hashinfo,
+                                              struct sk_buff *skb, int doff,
+                                              __be32 saddr, __be16 sport,
+                                              __be32 daddr, u16 hnum)
+{
+       struct sock *sk, *reuse_sk;
+       bool no_reuseport;
+
+       if (hashinfo != &tcp_hashinfo)
+               return NULL; /* only TCP is supported */
+
+       no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_TCP,
+                                           saddr, sport, daddr, hnum, &sk);
+       if (no_reuseport || IS_ERR_OR_NULL(sk))
+               return sk;
+
+       reuse_sk = lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum);
+       if (reuse_sk)
+               sk = reuse_sk;
+       return sk;
+}
+
 struct sock *__inet_lookup_listener(struct net *net,
                                    struct inet_hashinfo *hashinfo,
                                    struct sk_buff *skb, int doff,
@@ -299,6 +333,14 @@ struct sock *__inet_lookup_listener(struct net *net,
        struct sock *result = NULL;
        unsigned int hash2;
 
+       /* Lookup redirect from BPF */
+       if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
+               result = inet_lookup_run_bpf(net, hashinfo, skb, doff,
+                                            saddr, sport, daddr, hnum);
+               if (result)
+                       goto done;
+       }
+
        hash2 = ipv4_portaddr_hash(net, daddr, hnum);
        ilb2 = inet_lhash2_bucket(hashinfo, hash2);
 
index ddaa01e..948747a 100644 (file)
@@ -519,15 +519,20 @@ void ip_options_undo(struct ip_options *opt)
        }
 }
 
-static struct ip_options_rcu *ip_options_get_alloc(const int optlen)
+int ip_options_get(struct net *net, struct ip_options_rcu **optp,
+                  sockptr_t data, int optlen)
 {
-       return kzalloc(sizeof(struct ip_options_rcu) + ((optlen + 3) & ~3),
+       struct ip_options_rcu *opt;
+
+       opt = kzalloc(sizeof(struct ip_options_rcu) + ((optlen + 3) & ~3),
                       GFP_KERNEL);
-}
+       if (!opt)
+               return -ENOMEM;
+       if (optlen && copy_from_sockptr(opt->opt.__data, data, optlen)) {
+               kfree(opt);
+               return -EFAULT;
+       }
 
-static int ip_options_get_finish(struct net *net, struct ip_options_rcu **optp,
-                                struct ip_options_rcu *opt, int optlen)
-{
        while (optlen & 3)
                opt->opt.__data[optlen++] = IPOPT_END;
        opt->opt.optlen = optlen;
@@ -540,32 +545,6 @@ static int ip_options_get_finish(struct net *net, struct ip_options_rcu **optp,
        return 0;
 }
 
-int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
-                            unsigned char __user *data, int optlen)
-{
-       struct ip_options_rcu *opt = ip_options_get_alloc(optlen);
-
-       if (!opt)
-               return -ENOMEM;
-       if (optlen && copy_from_user(opt->opt.__data, data, optlen)) {
-               kfree(opt);
-               return -EFAULT;
-       }
-       return ip_options_get_finish(net, optp, opt, optlen);
-}
-
-int ip_options_get(struct net *net, struct ip_options_rcu **optp,
-                  unsigned char *data, int optlen)
-{
-       struct ip_options_rcu *opt = ip_options_get_alloc(optlen);
-
-       if (!opt)
-               return -ENOMEM;
-       if (optlen)
-               memcpy(opt->opt.__data, data, optlen);
-       return ip_options_get_finish(net, optp, opt, optlen);
-}
-
 void ip_forward_options(struct sk_buff *skb)
 {
        struct   ip_options *opt        = &(IPCB(skb)->opt);
index 090d309..61f802d 100644 (file)
@@ -539,6 +539,12 @@ no_route:
 }
 EXPORT_SYMBOL(__ip_queue_xmit);
 
+int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
+{
+       return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
+}
+EXPORT_SYMBOL(ip_queue_xmit);
+
 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
 {
        to->pkt_type = from->pkt_type;
@@ -1702,7 +1708,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
        sk->sk_protocol = ip_hdr(skb)->protocol;
        sk->sk_bound_dev_if = arg->bound_dev_if;
        sk->sk_sndbuf = sysctl_wmem_default;
-       sk->sk_mark = fl4.flowi4_mark;
+       ipc.sockc.mark = fl4.flowi4_mark;
        err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
                             len, 0, &ipc, &rt, MSG_DONTWAIT);
        if (unlikely(err)) {
index 84ec370..d2c2235 100644 (file)
@@ -280,7 +280,8 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
                        err = cmsg->cmsg_len - sizeof(struct cmsghdr);
 
                        /* Our caller is responsible for freeing ipc->opt */
-                       err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
+                       err = ip_options_get(net, &ipc->opt,
+                                            KERNEL_SOCKPTR(CMSG_DATA(cmsg)),
                                             err < 40 ? err : 40);
                        if (err)
                                return err;
@@ -389,6 +390,18 @@ int ip_ra_control(struct sock *sk, unsigned char on,
        return 0;
 }
 
+static void ipv4_icmp_error_rfc4884(const struct sk_buff *skb,
+                                   struct sock_ee_data_rfc4884 *out)
+{
+       switch (icmp_hdr(skb)->type) {
+       case ICMP_DEST_UNREACH:
+       case ICMP_TIME_EXCEEDED:
+       case ICMP_PARAMETERPROB:
+               ip_icmp_error_rfc4884(skb, out, sizeof(struct icmphdr),
+                                     icmp_hdr(skb)->un.reserved[1] * 4);
+       }
+}
+
 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
                   __be16 port, u32 info, u8 *payload)
 {
@@ -411,6 +424,9 @@ void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
        serr->port = port;
 
        if (skb_pull(skb, payload - skb->data)) {
+               if (inet_sk(sk)->recverr_rfc4884)
+                       ipv4_icmp_error_rfc4884(skb, &serr->ee.ee_rfc4884);
+
                skb_reset_transport_header(skb);
                if (sock_queue_err_skb(sk, skb) == 0)
                        return;
@@ -679,20 +695,48 @@ Eaddrnotavail:
        return -EADDRNOTAVAIL;
 }
 
+static int copy_group_source_from_sockptr(struct group_source_req *greqs,
+               sockptr_t optval, int optlen)
+{
+       if (in_compat_syscall()) {
+               struct compat_group_source_req gr32;
+
+               if (optlen != sizeof(gr32))
+                       return -EINVAL;
+               if (copy_from_sockptr(&gr32, optval, sizeof(gr32)))
+                       return -EFAULT;
+               greqs->gsr_interface = gr32.gsr_interface;
+               greqs->gsr_group = gr32.gsr_group;
+               greqs->gsr_source = gr32.gsr_source;
+       } else {
+               if (optlen != sizeof(*greqs))
+                       return -EINVAL;
+               if (copy_from_sockptr(greqs, optval, sizeof(*greqs)))
+                       return -EFAULT;
+       }
+
+       return 0;
+}
+
 static int do_mcast_group_source(struct sock *sk, int optname,
-                                struct group_source_req *greqs)
+               sockptr_t optval, int optlen)
 {
+       struct group_source_req greqs;
        struct ip_mreq_source mreqs;
        struct sockaddr_in *psin;
        int omode, add, err;
 
-       if (greqs->gsr_group.ss_family != AF_INET ||
-           greqs->gsr_source.ss_family != AF_INET)
+       err = copy_group_source_from_sockptr(&greqs, optval, optlen);
+       if (err)
+               return err;
+
+       if (greqs.gsr_group.ss_family != AF_INET ||
+           greqs.gsr_source.ss_family != AF_INET)
                return -EADDRNOTAVAIL;
 
-       psin = (struct sockaddr_in *)&greqs->gsr_group;
+       psin = (struct sockaddr_in *)&greqs.gsr_group;
        mreqs.imr_multiaddr = psin->sin_addr.s_addr;
-       psin = (struct sockaddr_in *)&greqs->gsr_source;
+       psin = (struct sockaddr_in *)&greqs.gsr_source;
        mreqs.imr_sourceaddr = psin->sin_addr.s_addr;
        mreqs.imr_interface = 0; /* use index for mc_source */
 
@@ -705,25 +749,145 @@ static int do_mcast_group_source(struct sock *sk, int optname,
        } else if (optname == MCAST_JOIN_SOURCE_GROUP) {
                struct ip_mreqn mreq;
 
-               psin = (struct sockaddr_in *)&greqs->gsr_group;
+               psin = (struct sockaddr_in *)&greqs.gsr_group;
                mreq.imr_multiaddr = psin->sin_addr;
                mreq.imr_address.s_addr = 0;
-               mreq.imr_ifindex = greqs->gsr_interface;
+               mreq.imr_ifindex = greqs.gsr_interface;
                err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
                if (err && err != -EADDRINUSE)
                        return err;
-               greqs->gsr_interface = mreq.imr_ifindex;
+               greqs.gsr_interface = mreq.imr_ifindex;
                omode = MCAST_INCLUDE;
                add = 1;
        } else /* MCAST_LEAVE_SOURCE_GROUP */ {
                omode = MCAST_INCLUDE;
                add = 0;
        }
-       return ip_mc_source(add, omode, sk, &mreqs, greqs->gsr_interface);
+       return ip_mc_source(add, omode, sk, &mreqs, greqs.gsr_interface);
+}
+
+static int ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen)
+{
+       struct group_filter *gsf = NULL;
+       int err;
+
+       if (optlen < GROUP_FILTER_SIZE(0))
+               return -EINVAL;
+       if (optlen > sysctl_optmem_max)
+               return -ENOBUFS;
+
+       gsf = memdup_sockptr(optval, optlen);
+       if (IS_ERR(gsf))
+               return PTR_ERR(gsf);
+
+       /* numsrc >= (4G-140)/128 overflow in 32 bits */
+       err = -ENOBUFS;
+       if (gsf->gf_numsrc >= 0x1ffffff ||
+           gsf->gf_numsrc > sock_net(sk)->ipv4.sysctl_igmp_max_msf)
+               goto out_free_gsf;
+
+       err = -EINVAL;
+       if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen)
+               goto out_free_gsf;
+
+       err = set_mcast_msfilter(sk, gsf->gf_interface, gsf->gf_numsrc,
+                                gsf->gf_fmode, &gsf->gf_group, gsf->gf_slist);
+out_free_gsf:
+       kfree(gsf);
+       return err;
+}
+
+static int compat_ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
+               int optlen)
+{
+       const int size0 = offsetof(struct compat_group_filter, gf_slist);
+       struct compat_group_filter *gf32;
+       unsigned int n;
+       void *p;
+       int err;
+
+       if (optlen < size0)
+               return -EINVAL;
+       if (optlen > sysctl_optmem_max - 4)
+               return -ENOBUFS;
+
+       p = kmalloc(optlen + 4, GFP_KERNEL);
+       if (!p)
+               return -ENOMEM;
+       gf32 = p + 4; /* we want ->gf_group and ->gf_slist aligned */
+
+       err = -EFAULT;
+       if (copy_from_sockptr(gf32, optval, optlen))
+               goto out_free_gsf;
+
+       /* numsrc >= (4G-140)/128 overflow in 32 bits */
+       n = gf32->gf_numsrc;
+       err = -ENOBUFS;
+       if (n >= 0x1ffffff)
+               goto out_free_gsf;
+
+       err = -EINVAL;
+       if (offsetof(struct compat_group_filter, gf_slist[n]) > optlen)
+               goto out_free_gsf;
+
+       /* numsrc >= (4G-140)/128 overflow in 32 bits */
+       err = -ENOBUFS;
+       if (n > sock_net(sk)->ipv4.sysctl_igmp_max_msf)
+               goto out_free_gsf;
+       err = set_mcast_msfilter(sk, gf32->gf_interface, n, gf32->gf_fmode,
+                                &gf32->gf_group, gf32->gf_slist);
+out_free_gsf:
+       kfree(p);
+       return err;
 }
 
-static int do_ip_setsockopt(struct sock *sk, int level,
-                           int optname, char __user *optval, unsigned int optlen)
+static int ip_mcast_join_leave(struct sock *sk, int optname,
+               sockptr_t optval, int optlen)
+{
+       struct ip_mreqn mreq = { };
+       struct sockaddr_in *psin;
+       struct group_req greq;
+
+       if (optlen < sizeof(struct group_req))
+               return -EINVAL;
+       if (copy_from_sockptr(&greq, optval, sizeof(greq)))
+               return -EFAULT;
+
+       psin = (struct sockaddr_in *)&greq.gr_group;
+       if (psin->sin_family != AF_INET)
+               return -EINVAL;
+       mreq.imr_multiaddr = psin->sin_addr;
+       mreq.imr_ifindex = greq.gr_interface;
+       if (optname == MCAST_JOIN_GROUP)
+               return ip_mc_join_group(sk, &mreq);
+       return ip_mc_leave_group(sk, &mreq);
+}
+
+static int compat_ip_mcast_join_leave(struct sock *sk, int optname,
+               sockptr_t optval, int optlen)
+{
+       struct compat_group_req greq;
+       struct ip_mreqn mreq = { };
+       struct sockaddr_in *psin;
+
+       if (optlen < sizeof(struct compat_group_req))
+               return -EINVAL;
+       if (copy_from_sockptr(&greq, optval, sizeof(greq)))
+               return -EFAULT;
+
+       psin = (struct sockaddr_in *)&greq.gr_group;
+       if (psin->sin_family != AF_INET)
+               return -EINVAL;
+       mreq.imr_multiaddr = psin->sin_addr;
+       mreq.imr_ifindex = greq.gr_interface;
+
+       if (optname == MCAST_JOIN_GROUP)
+               return ip_mc_join_group(sk, &mreq);
+       return ip_mc_leave_group(sk, &mreq);
+}
+
+static int do_ip_setsockopt(struct sock *sk, int level, int optname,
+               sockptr_t optval, unsigned int optlen)
 {
        struct inet_sock *inet = inet_sk(sk);
        struct net *net = sock_net(sk);
@@ -755,13 +919,14 @@ static int do_ip_setsockopt(struct sock *sk, int level,
        case IP_RECVORIGDSTADDR:
        case IP_CHECKSUM:
        case IP_RECVFRAGSIZE:
+       case IP_RECVERR_RFC4884:
                if (optlen >= sizeof(int)) {
-                       if (get_user(val, (int __user *) optval))
+                       if (copy_from_sockptr(&val, optval, sizeof(val)))
                                return -EFAULT;
                } else if (optlen >= sizeof(char)) {
                        unsigned char ucval;
 
-                       if (get_user(ucval, (unsigned char __user *) optval))
+                       if (copy_from_sockptr(&ucval, optval, sizeof(ucval)))
                                return -EFAULT;
                        val = (int) ucval;
                }
@@ -786,8 +951,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
 
                if (optlen > 40)
                        goto e_inval;
-               err = ip_options_get_from_user(sock_net(sk), &opt,
-                                              optval, optlen);
+               err = ip_options_get(sock_net(sk), &opt, optval, optlen);
                if (err)
                        break;
                old = rcu_dereference_protected(inet->inet_opt,
@@ -914,6 +1078,11 @@ static int do_ip_setsockopt(struct sock *sk, int level,
                if (!val)
                        skb_queue_purge(&sk->sk_error_queue);
                break;
+       case IP_RECVERR_RFC4884:
+               if (val < 0 || val > 1)
+                       goto e_inval;
+               inet->recverr_rfc4884 = !!val;
+               break;
        case IP_MULTICAST_TTL:
                if (sk->sk_type == SOCK_STREAM)
                        goto e_inval;
@@ -980,17 +1149,17 @@ static int do_ip_setsockopt(struct sock *sk, int level,
 
                err = -EFAULT;
                if (optlen >= sizeof(struct ip_mreqn)) {
-                       if (copy_from_user(&mreq, optval, sizeof(mreq)))
+                       if (copy_from_sockptr(&mreq, optval, sizeof(mreq)))
                                break;
                } else {
                        memset(&mreq, 0, sizeof(mreq));
                        if (optlen >= sizeof(struct ip_mreq)) {
-                               if (copy_from_user(&mreq, optval,
-                                                  sizeof(struct ip_mreq)))
+                               if (copy_from_sockptr(&mreq, optval,
+                                                     sizeof(struct ip_mreq)))
                                        break;
                        } else if (optlen >= sizeof(struct in_addr)) {
-                               if (copy_from_user(&mreq.imr_address, optval,
-                                                  sizeof(struct in_addr)))
+                               if (copy_from_sockptr(&mreq.imr_address, optval,
+                                                     sizeof(struct in_addr)))
                                        break;
                        }
                }
@@ -1042,11 +1211,12 @@ static int do_ip_setsockopt(struct sock *sk, int level,
                        goto e_inval;
                err = -EFAULT;
                if (optlen >= sizeof(struct ip_mreqn)) {
-                       if (copy_from_user(&mreq, optval, sizeof(mreq)))
+                       if (copy_from_sockptr(&mreq, optval, sizeof(mreq)))
                                break;
                } else {
                        memset(&mreq, 0, sizeof(mreq));
-                       if (copy_from_user(&mreq, optval, sizeof(struct ip_mreq)))
+                       if (copy_from_sockptr(&mreq, optval,
+                                             sizeof(struct ip_mreq)))
                                break;
                }
 
@@ -1066,7 +1236,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
                        err = -ENOBUFS;
                        break;
                }
-               msf = memdup_user(optval, optlen);
+               msf = memdup_sockptr(optval, optlen);
                if (IS_ERR(msf)) {
                        err = PTR_ERR(msf);
                        break;
@@ -1097,7 +1267,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
 
                if (optlen != sizeof(struct ip_mreq_source))
                        goto e_inval;
-               if (copy_from_user(&mreqs, optval, sizeof(mreqs))) {
+               if (copy_from_sockptr(&mreqs, optval, sizeof(mreqs))) {
                        err = -EFAULT;
                        break;
                }
@@ -1127,77 +1297,24 @@ static int do_ip_setsockopt(struct sock *sk, int level,
        }
        case MCAST_JOIN_GROUP:
        case MCAST_LEAVE_GROUP:
-       {
-               struct group_req greq;
-               struct sockaddr_in *psin;
-               struct ip_mreqn mreq;
-
-               if (optlen < sizeof(struct group_req))
-                       goto e_inval;
-               err = -EFAULT;
-               if (copy_from_user(&greq, optval, sizeof(greq)))
-                       break;
-               psin = (struct sockaddr_in *)&greq.gr_group;
-               if (psin->sin_family != AF_INET)
-                       goto e_inval;
-               memset(&mreq, 0, sizeof(mreq));
-               mreq.imr_multiaddr = psin->sin_addr;
-               mreq.imr_ifindex = greq.gr_interface;
-
-               if (optname == MCAST_JOIN_GROUP)
-                       err = ip_mc_join_group(sk, &mreq);
+               if (in_compat_syscall())
+                       err = compat_ip_mcast_join_leave(sk, optname, optval,
+                                                        optlen);
                else
-                       err = ip_mc_leave_group(sk, &mreq);
+                       err = ip_mcast_join_leave(sk, optname, optval, optlen);
                break;
-       }
        case MCAST_JOIN_SOURCE_GROUP:
        case MCAST_LEAVE_SOURCE_GROUP:
        case MCAST_BLOCK_SOURCE:
        case MCAST_UNBLOCK_SOURCE:
-       {
-               struct group_source_req greqs;
-
-               if (optlen != sizeof(struct group_source_req))
-                       goto e_inval;
-               if (copy_from_user(&greqs, optval, sizeof(greqs))) {
-                       err = -EFAULT;
-                       break;
-               }
-               err = do_mcast_group_source(sk, optname, &greqs);
+               err = do_mcast_group_source(sk, optname, optval, optlen);
                break;
-       }
        case MCAST_MSFILTER:
-       {
-               struct group_filter *gsf = NULL;
-
-               if (optlen < GROUP_FILTER_SIZE(0))
-                       goto e_inval;
-               if (optlen > sysctl_optmem_max) {
-                       err = -ENOBUFS;
-                       break;
-               }
-               gsf = memdup_user(optval, optlen);
-               if (IS_ERR(gsf)) {
-                       err = PTR_ERR(gsf);
-                       break;
-               }
-               /* numsrc >= (4G-140)/128 overflow in 32 bits */
-               if (gsf->gf_numsrc >= 0x1ffffff ||
-                   gsf->gf_numsrc > net->ipv4.sysctl_igmp_max_msf) {
-                       err = -ENOBUFS;
-                       goto mc_msf_out;
-               }
-               if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) {
-                       err = -EINVAL;
-                       goto mc_msf_out;
-               }
-               err = set_mcast_msfilter(sk, gsf->gf_interface,
-                                        gsf->gf_numsrc, gsf->gf_fmode,
-                                        &gsf->gf_group, gsf->gf_slist);
-mc_msf_out:
-               kfree(gsf);
+               if (in_compat_syscall())
+                       err = compat_ip_set_mcast_msfilter(sk, optval, optlen);
+               else
+                       err = ip_set_mcast_msfilter(sk, optval, optlen);
                break;
-       }
        case IP_MULTICAST_ALL:
                if (optlen < 1)
                        goto e_inval;
@@ -1296,8 +1413,8 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
        skb_dst_drop(skb);
 }
 
-int ip_setsockopt(struct sock *sk, int level,
-               int optname, char __user *optval, unsigned int optlen)
+int ip_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+               unsigned int optlen)
 {
        int err;
 
@@ -1322,138 +1439,6 @@ int ip_setsockopt(struct sock *sk, int level,
 }
 EXPORT_SYMBOL(ip_setsockopt);
 
-#ifdef CONFIG_COMPAT
-int compat_ip_setsockopt(struct sock *sk, int level, int optname,
-                        char __user *optval, unsigned int optlen)
-{
-       int err;
-
-       if (level != SOL_IP)
-               return -ENOPROTOOPT;
-
-       switch (optname) {
-       case MCAST_JOIN_GROUP:
-       case MCAST_LEAVE_GROUP:
-       {
-               struct compat_group_req __user *gr32 = (void __user *)optval;
-               struct group_req greq;
-               struct sockaddr_in *psin = (struct sockaddr_in *)&greq.gr_group;
-               struct ip_mreqn mreq;
-
-               if (optlen < sizeof(struct compat_group_req))
-                       return -EINVAL;
-
-               if (get_user(greq.gr_interface, &gr32->gr_interface) ||
-                   copy_from_user(&greq.gr_group, &gr32->gr_group,
-                               sizeof(greq.gr_group)))
-                       return -EFAULT;
-
-               if (psin->sin_family != AF_INET)
-                       return -EINVAL;
-
-               memset(&mreq, 0, sizeof(mreq));
-               mreq.imr_multiaddr = psin->sin_addr;
-               mreq.imr_ifindex = greq.gr_interface;
-
-               rtnl_lock();
-               lock_sock(sk);
-               if (optname == MCAST_JOIN_GROUP)
-                       err = ip_mc_join_group(sk, &mreq);
-               else
-                       err = ip_mc_leave_group(sk, &mreq);
-               release_sock(sk);
-               rtnl_unlock();
-               return err;
-       }
-       case MCAST_JOIN_SOURCE_GROUP:
-       case MCAST_LEAVE_SOURCE_GROUP:
-       case MCAST_BLOCK_SOURCE:
-       case MCAST_UNBLOCK_SOURCE:
-       {
-               struct compat_group_source_req __user *gsr32 = (void __user *)optval;
-               struct group_source_req greqs;
-
-               if (optlen != sizeof(struct compat_group_source_req))
-                       return -EINVAL;
-
-               if (get_user(greqs.gsr_interface, &gsr32->gsr_interface) ||
-                   copy_from_user(&greqs.gsr_group, &gsr32->gsr_group,
-                               sizeof(greqs.gsr_group)) ||
-                   copy_from_user(&greqs.gsr_source, &gsr32->gsr_source,
-                               sizeof(greqs.gsr_source)))
-                       return -EFAULT;
-
-               rtnl_lock();
-               lock_sock(sk);
-               err = do_mcast_group_source(sk, optname, &greqs);
-               release_sock(sk);
-               rtnl_unlock();
-               return err;
-       }
-       case MCAST_MSFILTER:
-       {
-               const int size0 = offsetof(struct compat_group_filter, gf_slist);
-               struct compat_group_filter *gf32;
-               unsigned int n;
-               void *p;
-
-               if (optlen < size0)
-                       return -EINVAL;
-               if (optlen > sysctl_optmem_max - 4)
-                       return -ENOBUFS;
-
-               p = kmalloc(optlen + 4, GFP_KERNEL);
-               if (!p)
-                       return -ENOMEM;
-               gf32 = p + 4; /* we want ->gf_group and ->gf_slist aligned */
-               if (copy_from_user(gf32, optval, optlen)) {
-                       err = -EFAULT;
-                       goto mc_msf_out;
-               }
-
-               n = gf32->gf_numsrc;
-               /* numsrc >= (4G-140)/128 overflow in 32 bits */
-               if (n >= 0x1ffffff) {
-                       err = -ENOBUFS;
-                       goto mc_msf_out;
-               }
-               if (offsetof(struct compat_group_filter, gf_slist[n]) > optlen) {
-                       err = -EINVAL;
-                       goto mc_msf_out;
-               }
-
-               rtnl_lock();
-               lock_sock(sk);
-               /* numsrc >= (4G-140)/128 overflow in 32 bits */
-               if (n > sock_net(sk)->ipv4.sysctl_igmp_max_msf)
-                       err = -ENOBUFS;
-               else
-                       err = set_mcast_msfilter(sk, gf32->gf_interface,
-                                                n, gf32->gf_fmode,
-                                                &gf32->gf_group, gf32->gf_slist);
-               release_sock(sk);
-               rtnl_unlock();
-mc_msf_out:
-               kfree(p);
-               return err;
-       }
-       }
-
-       err = do_ip_setsockopt(sk, level, optname, optval, optlen);
-#ifdef CONFIG_NETFILTER
-       /* we need to exclude all possible ENOPROTOOPTs except default case */
-       if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
-                       optname != IP_IPSEC_POLICY &&
-                       optname != IP_XFRM_POLICY &&
-                       !ip_mroute_opt(optname))
-               err = compat_nf_setsockopt(sk, PF_INET, optname, optval,
-                                          optlen);
-#endif
-       return err;
-}
-EXPORT_SYMBOL(compat_ip_setsockopt);
-#endif
-
 /*
  *     Get the options. Note for future reference. The GET of IP options gets
  *     the _received_ ones. The set sets the _sent_ ones.
@@ -1469,8 +1454,67 @@ static bool getsockopt_needs_rtnl(int optname)
        return false;
 }
 
+static int ip_get_mcast_msfilter(struct sock *sk, void __user *optval,
+               int __user *optlen, int len)
+{
+       const int size0 = offsetof(struct group_filter, gf_slist);
+       struct group_filter __user *p = optval;
+       struct group_filter gsf;
+       int num;
+       int err;
+
+       if (len < size0)
+               return -EINVAL;
+       if (copy_from_user(&gsf, p, size0))
+               return -EFAULT;
+
+       num = gsf.gf_numsrc;
+       err = ip_mc_gsfget(sk, &gsf, p->gf_slist);
+       if (err)
+               return err;
+       if (gsf.gf_numsrc < num)
+               num = gsf.gf_numsrc;
+       if (put_user(GROUP_FILTER_SIZE(num), optlen) ||
+           copy_to_user(p, &gsf, size0))
+               return -EFAULT;
+       return 0;
+}
+
+static int compat_ip_get_mcast_msfilter(struct sock *sk, void __user *optval,
+               int __user *optlen, int len)
+{
+       const int size0 = offsetof(struct compat_group_filter, gf_slist);
+       struct compat_group_filter __user *p = optval;
+       struct compat_group_filter gf32;
+       struct group_filter gf;
+       int num;
+       int err;
+
+       if (len < size0)
+               return -EINVAL;
+       if (copy_from_user(&gf32, p, size0))
+               return -EFAULT;
+
+       gf.gf_interface = gf32.gf_interface;
+       gf.gf_fmode = gf32.gf_fmode;
+       num = gf.gf_numsrc = gf32.gf_numsrc;
+       gf.gf_group = gf32.gf_group;
+
+       err = ip_mc_gsfget(sk, &gf, p->gf_slist);
+       if (err)
+               return err;
+       if (gf.gf_numsrc < num)
+               num = gf.gf_numsrc;
+       len = GROUP_FILTER_SIZE(num) - (sizeof(gf) - sizeof(gf32));
+       if (put_user(len, optlen) ||
+           put_user(gf.gf_fmode, &p->gf_fmode) ||
+           put_user(gf.gf_numsrc, &p->gf_numsrc))
+               return -EFAULT;
+       return 0;
+}
+
 static int do_ip_getsockopt(struct sock *sk, int level, int optname,
-                           char __user *optval, int __user *optlen, unsigned int flags)
+                           char __user *optval, int __user *optlen)
 {
        struct inet_sock *inet = inet_sk(sk);
        bool needs_rtnl = getsockopt_needs_rtnl(optname);
@@ -1588,6 +1632,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
        case IP_RECVERR:
                val = inet->recverr;
                break;
+       case IP_RECVERR_RFC4884:
+               val = inet->recverr_rfc4884;
+               break;
        case IP_MULTICAST_TTL:
                val = inet->mc_ttl;
                break;
@@ -1627,31 +1674,12 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
                goto out;
        }
        case MCAST_MSFILTER:
-       {
-               struct group_filter __user *p = (void __user *)optval;
-               struct group_filter gsf;
-               const int size0 = offsetof(struct group_filter, gf_slist);
-               int num;
-
-               if (len < size0) {
-                       err = -EINVAL;
-                       goto out;
-               }
-               if (copy_from_user(&gsf, p, size0)) {
-                       err = -EFAULT;
-                       goto out;
-               }
-               num = gsf.gf_numsrc;
-               err = ip_mc_gsfget(sk, &gsf, p->gf_slist);
-               if (err)
-                       goto out;
-               if (gsf.gf_numsrc < num)
-                       num = gsf.gf_numsrc;
-               if (put_user(GROUP_FILTER_SIZE(num), optlen) ||
-                   copy_to_user(p, &gsf, size0))
-                       err = -EFAULT;
+               if (in_compat_syscall())
+                       err = compat_ip_get_mcast_msfilter(sk, optval, optlen,
+                                                          len);
+               else
+                       err = ip_get_mcast_msfilter(sk, optval, optlen, len);
                goto out;
-       }
        case IP_MULTICAST_ALL:
                val = inet->mc_all;
                break;
@@ -1667,7 +1695,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
                msg.msg_control_is_user = true;
                msg.msg_control_user = optval;
                msg.msg_controllen = len;
-               msg.msg_flags = flags;
+               msg.msg_flags = in_compat_syscall() ? MSG_CMSG_COMPAT : 0;
 
                if (inet->cmsg_flags & IP_CMSG_PKTINFO) {
                        struct in_pktinfo info;
@@ -1731,7 +1759,8 @@ int ip_getsockopt(struct sock *sk, int level,
 {
        int err;
 
-       err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0);
+       err = do_ip_getsockopt(sk, level, optname, optval, optlen);
+
 #if IS_ENABLED(CONFIG_BPFILTER_UMH)
        if (optname >= BPFILTER_IPT_SO_GET_INFO &&
            optname < BPFILTER_IPT_GET_MAX)
@@ -1755,79 +1784,3 @@ int ip_getsockopt(struct sock *sk, int level,
        return err;
 }
 EXPORT_SYMBOL(ip_getsockopt);
-
-#ifdef CONFIG_COMPAT
-int compat_ip_getsockopt(struct sock *sk, int level, int optname,
-                        char __user *optval, int __user *optlen)
-{
-       int err;
-
-       if (optname == MCAST_MSFILTER) {
-               const int size0 = offsetof(struct compat_group_filter, gf_slist);
-               struct compat_group_filter __user *p = (void __user *)optval;
-               struct compat_group_filter gf32;
-               struct group_filter gf;
-               int ulen, err;
-               int num;
-
-               if (level != SOL_IP)
-                       return -EOPNOTSUPP;
-
-               if (get_user(ulen, optlen))
-                       return -EFAULT;
-
-               if (ulen < size0)
-                       return -EINVAL;
-
-               if (copy_from_user(&gf32, p, size0))
-                       return -EFAULT;
-
-               gf.gf_interface = gf32.gf_interface;
-               gf.gf_fmode = gf32.gf_fmode;
-               num = gf.gf_numsrc = gf32.gf_numsrc;
-               gf.gf_group = gf32.gf_group;
-
-               rtnl_lock();
-               lock_sock(sk);
-               err = ip_mc_gsfget(sk, &gf, p->gf_slist);
-               release_sock(sk);
-               rtnl_unlock();
-               if (err)
-                       return err;
-               if (gf.gf_numsrc < num)
-                       num = gf.gf_numsrc;
-               ulen = GROUP_FILTER_SIZE(num) - (sizeof(gf) - sizeof(gf32));
-               if (put_user(ulen, optlen) ||
-                   put_user(gf.gf_fmode, &p->gf_fmode) ||
-                   put_user(gf.gf_numsrc, &p->gf_numsrc))
-                       return -EFAULT;
-               return 0;
-       }
-
-       err = do_ip_getsockopt(sk, level, optname, optval, optlen,
-               MSG_CMSG_COMPAT);
-
-#if IS_ENABLED(CONFIG_BPFILTER_UMH)
-       if (optname >= BPFILTER_IPT_SO_GET_INFO &&
-           optname < BPFILTER_IPT_GET_MAX)
-               err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
-#endif
-#ifdef CONFIG_NETFILTER
-       /* we need to exclude all possible ENOPROTOOPTs except default case */
-       if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
-                       !ip_mroute_opt(optname)) {
-               int len;
-
-               if (get_user(len, optlen))
-                       return -EFAULT;
-
-               err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len);
-               if (err >= 0)
-                       err = put_user(len, optlen);
-               return err;
-       }
-#endif
-       return err;
-}
-EXPORT_SYMBOL(compat_ip_getsockopt);
-#endif
index f4f1d11..0c1f364 100644 (file)
@@ -85,9 +85,10 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
                                   __be32 remote, __be32 local,
                                   __be32 key)
 {
-       unsigned int hash;
        struct ip_tunnel *t, *cand = NULL;
        struct hlist_head *head;
+       struct net_device *ndev;
+       unsigned int hash;
 
        hash = ip_tunnel_hash(key, remote);
        head = &itn->tunnels[hash];
@@ -162,8 +163,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
        if (t && t->dev->flags & IFF_UP)
                return t;
 
-       if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
-               return netdev_priv(itn->fb_tunnel_dev);
+       ndev = READ_ONCE(itn->fb_tunnel_dev);
+       if (ndev && ndev->flags & IFF_UP)
+               return netdev_priv(ndev);
 
        return NULL;
 }
@@ -1259,9 +1261,9 @@ void ip_tunnel_uninit(struct net_device *dev)
        struct ip_tunnel_net *itn;
 
        itn = net_generic(net, tunnel->ip_tnl_net_id);
-       /* fb_tunnel_dev will be unregisted in net-exit call. */
-       if (itn->fb_tunnel_dev != dev)
-               ip_tunnel_del(itn, netdev_priv(dev));
+       ip_tunnel_del(itn, netdev_priv(dev));
+       if (itn->fb_tunnel_dev == dev)
+               WRITE_ONCE(itn->fb_tunnel_dev, NULL);
 
        dst_cache_reset(&tunnel->dst_cache);
 }
index 181b7a2..f8b419e 100644 (file)
@@ -844,3 +844,21 @@ void ip_tunnel_unneed_metadata(void)
        static_branch_dec(&ip_tunnel_metadata_cnt);
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_unneed_metadata);
+
+/* Returns either the correct skb->protocol value, or 0 if invalid. */
+__be16 ip_tunnel_parse_protocol(const struct sk_buff *skb)
+{
+       if (skb_network_header(skb) >= skb->head &&
+           (skb_network_header(skb) + sizeof(struct iphdr)) <= skb_tail_pointer(skb) &&
+           ip_hdr(skb)->version == 4)
+               return htons(ETH_P_IP);
+       if (skb_network_header(skb) >= skb->head &&
+           (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= skb_tail_pointer(skb) &&
+           ipv6_hdr(skb)->version == 6)
+               return htons(ETH_P_IPV6);
+       return 0;
+}
+EXPORT_SYMBOL(ip_tunnel_parse_protocol);
+
+const struct header_ops ip_tunnel_header_ops = { .parse_protocol = ip_tunnel_parse_protocol };
+EXPORT_SYMBOL(ip_tunnel_header_ops);
index 1d9c8cf..460ca10 100644 (file)
@@ -441,6 +441,7 @@ static const struct net_device_ops vti_netdev_ops = {
 static void vti_tunnel_setup(struct net_device *dev)
 {
        dev->netdev_ops         = &vti_netdev_ops;
+       dev->header_ops         = &ip_tunnel_header_ops;
        dev->type               = ARPHRD_TUNNEL;
        ip_tunnel_setup(dev, vti_net_id);
 }
index 40fea52..75d35e7 100644 (file)
@@ -361,6 +361,7 @@ static const struct net_device_ops ipip_netdev_ops = {
 static void ipip_tunnel_setup(struct net_device *dev)
 {
        dev->netdev_ops         = &ipip_netdev_ops;
+       dev->header_ops         = &ip_tunnel_header_ops;
 
        dev->type               = ARPHRD_TUNNEL;
        dev->flags              = IFF_NOARP;
index f5c7a58..cdf3a40 100644 (file)
@@ -636,7 +636,10 @@ static int call_ipmr_mfc_entry_notifiers(struct net *net,
 
 /**
  *     vif_delete - Delete a VIF entry
+ *     @mrt: Table to delete from
+ *     @vifi: VIF identifier to delete
  *     @notify: Set to 1, if the caller is a notifier_call
+ *     @head: if unregistering the VIF, place it on this queue
  */
 static int vif_delete(struct mr_table *mrt, int vifi, int notify,
                      struct list_head *head)
@@ -1338,7 +1341,7 @@ static void mrtsock_destruct(struct sock *sk)
  * MOSPF/PIM router set up we can clean this up.
  */
 
-int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
+int ip_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval,
                         unsigned int optlen)
 {
        struct net *net = sock_net(sk);
@@ -1410,7 +1413,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
                        ret = -EINVAL;
                        break;
                }
-               if (copy_from_user(&vif, optval, sizeof(vif))) {
+               if (copy_from_sockptr(&vif, optval, sizeof(vif))) {
                        ret = -EFAULT;
                        break;
                }
@@ -1438,7 +1441,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
                        ret = -EINVAL;
                        break;
                }
-               if (copy_from_user(&mfc, optval, sizeof(mfc))) {
+               if (copy_from_sockptr(&val, optval, sizeof(val))) {
                        ret = -EFAULT;
                        break;
                }
@@ -1456,7 +1459,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
                        ret = -EINVAL;
                        break;
                }
-               if (get_user(val, (int __user *)optval)) {
+               if (copy_from_sockptr(&val, optval, sizeof(val))) {
                        ret = -EFAULT;
                        break;
                }
@@ -1468,7 +1471,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
                        ret = -EINVAL;
                        break;
                }
-               if (get_user(val, (int __user *)optval)) {
+               if (copy_from_sockptr(&val, optval, sizeof(val))) {
                        ret = -EFAULT;
                        break;
                }
@@ -1483,7 +1486,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
                        ret = -EINVAL;
                        break;
                }
-               if (get_user(val, (int __user *)optval)) {
+               if (copy_from_sockptr(&val, optval, sizeof(val))) {
                        ret = -EFAULT;
                        break;
                }
@@ -1505,7 +1508,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
                        ret = -EINVAL;
                        break;
                }
-               if (get_user(uval, (u32 __user *)optval)) {
+               if (copy_from_sockptr(&uval, optval, sizeof(uval))) {
                        ret = -EFAULT;
                        break;
                }
index b167f4a..f5b26ef 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+
 /*
  * Packet matching code for ARP packets.
  *
@@ -787,8 +787,7 @@ static int compat_table_info(const struct xt_table_info *info,
 }
 #endif
 
-static int get_info(struct net *net, void __user *user,
-                   const int *len, int compat)
+static int get_info(struct net *net, void __user *user, const int *len)
 {
        char name[XT_TABLE_MAXNAMELEN];
        struct xt_table *t;
@@ -802,7 +801,7 @@ static int get_info(struct net *net, void __user *user,
 
        name[XT_TABLE_MAXNAMELEN-1] = '\0';
 #ifdef CONFIG_COMPAT
-       if (compat)
+       if (in_compat_syscall())
                xt_compat_lock(NFPROTO_ARP);
 #endif
        t = xt_request_find_table_lock(net, NFPROTO_ARP, name);
@@ -812,7 +811,7 @@ static int get_info(struct net *net, void __user *user,
 #ifdef CONFIG_COMPAT
                struct xt_table_info tmp;
 
-               if (compat) {
+               if (in_compat_syscall()) {
                        ret = compat_table_info(private, &tmp);
                        xt_compat_flush_offsets(NFPROTO_ARP);
                        private = &tmp;
@@ -837,7 +836,7 @@ static int get_info(struct net *net, void __user *user,
        } else
                ret = PTR_ERR(t);
 #ifdef CONFIG_COMPAT
-       if (compat)
+       if (in_compat_syscall())
                xt_compat_unlock(NFPROTO_ARP);
 #endif
        return ret;
@@ -948,8 +947,7 @@ static int __do_replace(struct net *net, const char *name,
        return ret;
 }
 
-static int do_replace(struct net *net, const void __user *user,
-                     unsigned int len)
+static int do_replace(struct net *net, sockptr_t arg, unsigned int len)
 {
        int ret;
        struct arpt_replace tmp;
@@ -957,7 +955,7 @@ static int do_replace(struct net *net, const void __user *user,
        void *loc_cpu_entry;
        struct arpt_entry *iter;
 
-       if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
+       if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
                return -EFAULT;
 
        /* overflow check */
@@ -973,8 +971,8 @@ static int do_replace(struct net *net, const void __user *user,
                return -ENOMEM;
 
        loc_cpu_entry = newinfo->entries;
-       if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
-                          tmp.size) != 0) {
+       sockptr_advance(arg, sizeof(tmp));
+       if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) {
                ret = -EFAULT;
                goto free_newinfo;
        }
@@ -997,8 +995,7 @@ static int do_replace(struct net *net, const void __user *user,
        return ret;
 }
 
-static int do_add_counters(struct net *net, const void __user *user,
-                          unsigned int len, int compat)
+static int do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
 {
        unsigned int i;
        struct xt_counters_info tmp;
@@ -1009,7 +1006,7 @@ static int do_add_counters(struct net *net, const void __user *user,
        struct arpt_entry *iter;
        unsigned int addend;
 
-       paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
+       paddc = xt_copy_counters(arg, len, &tmp);
        if (IS_ERR(paddc))
                return PTR_ERR(paddc);
 
@@ -1246,8 +1243,7 @@ out_unlock:
        return ret;
 }
 
-static int compat_do_replace(struct net *net, void __user *user,
-                            unsigned int len)
+static int compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
 {
        int ret;
        struct compat_arpt_replace tmp;
@@ -1255,7 +1251,7 @@ static int compat_do_replace(struct net *net, void __user *user,
        void *loc_cpu_entry;
        struct arpt_entry *iter;
 
-       if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
+       if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
                return -EFAULT;
 
        /* overflow check */
@@ -1271,7 +1267,8 @@ static int compat_do_replace(struct net *net, void __user *user,
                return -ENOMEM;
 
        loc_cpu_entry = newinfo->entries;
-       if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) {
+       sockptr_advance(arg, sizeof(tmp));
+       if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) {
                ret = -EFAULT;
                goto free_newinfo;
        }
@@ -1294,30 +1291,6 @@ static int compat_do_replace(struct net *net, void __user *user,
        return ret;
 }
 
-static int compat_do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user,
-                                 unsigned int len)
-{
-       int ret;
-
-       if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
-               return -EPERM;
-
-       switch (cmd) {
-       case ARPT_SO_SET_REPLACE:
-               ret = compat_do_replace(sock_net(sk), user, len);
-               break;
-
-       case ARPT_SO_SET_ADD_COUNTERS:
-               ret = do_add_counters(sock_net(sk), user, len, 1);
-               break;
-
-       default:
-               ret = -EINVAL;
-       }
-
-       return ret;
-}
-
 static int compat_copy_entry_to_user(struct arpt_entry *e, void __user **dstptr,
                                     compat_uint_t *size,
                                     struct xt_counters *counters,
@@ -1425,32 +1398,10 @@ static int compat_get_entries(struct net *net,
        xt_compat_unlock(NFPROTO_ARP);
        return ret;
 }
-
-static int do_arpt_get_ctl(struct sock *, int, void __user *, int *);
-
-static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
-                                 int *len)
-{
-       int ret;
-
-       if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
-               return -EPERM;
-
-       switch (cmd) {
-       case ARPT_SO_GET_INFO:
-               ret = get_info(sock_net(sk), user, len, 1);
-               break;
-       case ARPT_SO_GET_ENTRIES:
-               ret = compat_get_entries(sock_net(sk), user, len);
-               break;
-       default:
-               ret = do_arpt_get_ctl(sk, cmd, user, len);
-       }
-       return ret;
-}
 #endif
 
-static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
+static int do_arpt_set_ctl(struct sock *sk, int cmd, sockptr_t arg,
+               unsigned int len)
 {
        int ret;
 
@@ -1459,11 +1410,16 @@ static int do_arpt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned
 
        switch (cmd) {
        case ARPT_SO_SET_REPLACE:
-               ret = do_replace(sock_net(sk), user, len);
+#ifdef CONFIG_COMPAT
+               if (in_compat_syscall())
+                       ret = compat_do_replace(sock_net(sk), arg, len);
+               else
+#endif
+                       ret = do_replace(sock_net(sk), arg, len);
                break;
 
        case ARPT_SO_SET_ADD_COUNTERS:
-               ret = do_add_counters(sock_net(sk), user, len, 0);
+               ret = do_add_counters(sock_net(sk), arg, len);
                break;
 
        default:
@@ -1482,11 +1438,16 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
 
        switch (cmd) {
        case ARPT_SO_GET_INFO:
-               ret = get_info(sock_net(sk), user, len, 0);
+               ret = get_info(sock_net(sk), user, len);
                break;
 
        case ARPT_SO_GET_ENTRIES:
-               ret = get_entries(sock_net(sk), user, len);
+#ifdef CONFIG_COMPAT
+               if (in_compat_syscall())
+                       ret = compat_get_entries(sock_net(sk), user, len);
+               else
+#endif
+                       ret = get_entries(sock_net(sk), user, len);
                break;
 
        case ARPT_SO_GET_REVISION_TARGET: {
@@ -1610,15 +1571,9 @@ static struct nf_sockopt_ops arpt_sockopts = {
        .set_optmin     = ARPT_BASE_CTL,
        .set_optmax     = ARPT_SO_SET_MAX+1,
        .set            = do_arpt_set_ctl,
-#ifdef CONFIG_COMPAT
-       .compat_set     = compat_do_arpt_set_ctl,
-#endif
        .get_optmin     = ARPT_BASE_CTL,
        .get_optmax     = ARPT_SO_GET_MAX+1,
        .get            = do_arpt_get_ctl,
-#ifdef CONFIG_COMPAT
-       .compat_get     = compat_do_arpt_get_ctl,
-#endif
        .owner          = THIS_MODULE,
 };
 
index c2670ea..f2a9680 100644 (file)
@@ -944,8 +944,7 @@ static int compat_table_info(const struct xt_table_info *info,
 }
 #endif
 
-static int get_info(struct net *net, void __user *user,
-                   const int *len, int compat)
+static int get_info(struct net *net, void __user *user, const int *len)
 {
        char name[XT_TABLE_MAXNAMELEN];
        struct xt_table *t;
@@ -959,7 +958,7 @@ static int get_info(struct net *net, void __user *user,
 
        name[XT_TABLE_MAXNAMELEN-1] = '\0';
 #ifdef CONFIG_COMPAT
-       if (compat)
+       if (in_compat_syscall())
                xt_compat_lock(AF_INET);
 #endif
        t = xt_request_find_table_lock(net, AF_INET, name);
@@ -969,7 +968,7 @@ static int get_info(struct net *net, void __user *user,
 #ifdef CONFIG_COMPAT
                struct xt_table_info tmp;
 
-               if (compat) {
+               if (in_compat_syscall()) {
                        ret = compat_table_info(private, &tmp);
                        xt_compat_flush_offsets(AF_INET);
                        private = &tmp;
@@ -995,7 +994,7 @@ static int get_info(struct net *net, void __user *user,
        } else
                ret = PTR_ERR(t);
 #ifdef CONFIG_COMPAT
-       if (compat)
+       if (in_compat_syscall())
                xt_compat_unlock(AF_INET);
 #endif
        return ret;
@@ -1103,7 +1102,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
 }
 
 static int
-do_replace(struct net *net, const void __user *user, unsigned int len)
+do_replace(struct net *net, sockptr_t arg, unsigned int len)
 {
        int ret;
        struct ipt_replace tmp;
@@ -1111,7 +1110,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
        void *loc_cpu_entry;
        struct ipt_entry *iter;
 
-       if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
+       if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
                return -EFAULT;
 
        /* overflow check */
@@ -1127,8 +1126,8 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
                return -ENOMEM;
 
        loc_cpu_entry = newinfo->entries;
-       if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
-                          tmp.size) != 0) {
+       sockptr_advance(arg, sizeof(tmp));
+       if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) {
                ret = -EFAULT;
                goto free_newinfo;
        }
@@ -1152,8 +1151,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
 }
 
 static int
-do_add_counters(struct net *net, const void __user *user,
-               unsigned int len, int compat)
+do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
 {
        unsigned int i;
        struct xt_counters_info tmp;
@@ -1164,7 +1162,7 @@ do_add_counters(struct net *net, const void __user *user,
        struct ipt_entry *iter;
        unsigned int addend;
 
-       paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
+       paddc = xt_copy_counters(arg, len, &tmp);
        if (IS_ERR(paddc))
                return PTR_ERR(paddc);
 
@@ -1486,7 +1484,7 @@ out_unlock:
 }
 
 static int
-compat_do_replace(struct net *net, void __user *user, unsigned int len)
+compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
 {
        int ret;
        struct compat_ipt_replace tmp;
@@ -1494,7 +1492,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
        void *loc_cpu_entry;
        struct ipt_entry *iter;
 
-       if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
+       if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
                return -EFAULT;
 
        /* overflow check */
@@ -1510,8 +1508,8 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
                return -ENOMEM;
 
        loc_cpu_entry = newinfo->entries;
-       if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
-                          tmp.size) != 0) {
+       sockptr_advance(arg, sizeof(tmp));
+       if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) {
                ret = -EFAULT;
                goto free_newinfo;
        }
@@ -1534,31 +1532,6 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
        return ret;
 }
 
-static int
-compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
-                     unsigned int len)
-{
-       int ret;
-
-       if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
-               return -EPERM;
-
-       switch (cmd) {
-       case IPT_SO_SET_REPLACE:
-               ret = compat_do_replace(sock_net(sk), user, len);
-               break;
-
-       case IPT_SO_SET_ADD_COUNTERS:
-               ret = do_add_counters(sock_net(sk), user, len, 1);
-               break;
-
-       default:
-               ret = -EINVAL;
-       }
-
-       return ret;
-}
-
 struct compat_ipt_get_entries {
        char name[XT_TABLE_MAXNAMELEN];
        compat_uint_t size;
@@ -1634,33 +1607,10 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
        xt_compat_unlock(AF_INET);
        return ret;
 }
-
-static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
-
-static int
-compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
-{
-       int ret;
-
-       if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
-               return -EPERM;
-
-       switch (cmd) {
-       case IPT_SO_GET_INFO:
-               ret = get_info(sock_net(sk), user, len, 1);
-               break;
-       case IPT_SO_GET_ENTRIES:
-               ret = compat_get_entries(sock_net(sk), user, len);
-               break;
-       default:
-               ret = do_ipt_get_ctl(sk, cmd, user, len);
-       }
-       return ret;
-}
 #endif
 
 static int
-do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
+do_ipt_set_ctl(struct sock *sk, int cmd, sockptr_t arg, unsigned int len)
 {
        int ret;
 
@@ -1669,11 +1619,16 @@ do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
 
        switch (cmd) {
        case IPT_SO_SET_REPLACE:
-               ret = do_replace(sock_net(sk), user, len);
+#ifdef CONFIG_COMPAT
+               if (in_compat_syscall())
+                       ret = compat_do_replace(sock_net(sk), arg, len);
+               else
+#endif
+                       ret = do_replace(sock_net(sk), arg, len);
                break;
 
        case IPT_SO_SET_ADD_COUNTERS:
-               ret = do_add_counters(sock_net(sk), user, len, 0);
+               ret = do_add_counters(sock_net(sk), arg, len);
                break;
 
        default:
@@ -1693,11 +1648,16 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
 
        switch (cmd) {
        case IPT_SO_GET_INFO:
-               ret = get_info(sock_net(sk), user, len, 0);
+               ret = get_info(sock_net(sk), user, len);
                break;
 
        case IPT_SO_GET_ENTRIES:
-               ret = get_entries(sock_net(sk), user, len);
+#ifdef CONFIG_COMPAT
+               if (in_compat_syscall())
+                       ret = compat_get_entries(sock_net(sk), user, len);
+               else
+#endif
+                       ret = get_entries(sock_net(sk), user, len);
                break;
 
        case IPT_SO_GET_REVISION_MATCH:
@@ -1797,11 +1757,22 @@ out_free:
        return ret;
 }
 
+void ipt_unregister_table_pre_exit(struct net *net, struct xt_table *table,
+                                  const struct nf_hook_ops *ops)
+{
+       nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+}
+
+void ipt_unregister_table_exit(struct net *net, struct xt_table *table)
+{
+       __ipt_unregister_table(net, table);
+}
+
 void ipt_unregister_table(struct net *net, struct xt_table *table,
                          const struct nf_hook_ops *ops)
 {
        if (ops)
-               nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+               ipt_unregister_table_pre_exit(net, table, ops);
        __ipt_unregister_table(net, table);
 }
 
@@ -1875,15 +1846,9 @@ static struct nf_sockopt_ops ipt_sockopts = {
        .set_optmin     = IPT_BASE_CTL,
        .set_optmax     = IPT_SO_SET_MAX+1,
        .set            = do_ipt_set_ctl,
-#ifdef CONFIG_COMPAT
-       .compat_set     = compat_do_ipt_set_ctl,
-#endif
        .get_optmin     = IPT_BASE_CTL,
        .get_optmax     = IPT_SO_GET_MAX+1,
        .get            = do_ipt_get_ctl,
-#ifdef CONFIG_COMPAT
-       .compat_get     = compat_do_ipt_get_ctl,
-#endif
        .owner          = THIS_MODULE,
 };
 
@@ -1958,6 +1923,8 @@ static void __exit ip_tables_fini(void)
 
 EXPORT_SYMBOL(ipt_register_table);
 EXPORT_SYMBOL(ipt_unregister_table);
+EXPORT_SYMBOL(ipt_unregister_table_pre_exit);
+EXPORT_SYMBOL(ipt_unregister_table_exit);
 EXPORT_SYMBOL(ipt_do_table);
 module_init(ip_tables_init);
 module_exit(ip_tables_fini);
index f8755a4..a8b980a 100644 (file)
@@ -3,7 +3,7 @@
  * (C) 2003-2004 by Harald Welte <laforge@netfilter.org>
  * based on ideas of Fabio Olive Leite <olive@unixforge.org>
  *
- * Development of this code funded by SuSE Linux AG, http://www.suse.com/
+ * Development of this code funded by SuSE Linux AG, https://www.suse.com/
  */
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 #include <linux/module.h>
index 748dc3c..f2984c7 100644 (file)
@@ -118,3 +118,4 @@ module_exit(synproxy_tg4_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("Intercept TCP connections and establish them using syncookies");
index 9d54b40..8f7bc1e 100644 (file)
@@ -72,16 +72,24 @@ static int __net_init iptable_filter_net_init(struct net *net)
        return 0;
 }
 
+static void __net_exit iptable_filter_net_pre_exit(struct net *net)
+{
+       if (net->ipv4.iptable_filter)
+               ipt_unregister_table_pre_exit(net, net->ipv4.iptable_filter,
+                                             filter_ops);
+}
+
 static void __net_exit iptable_filter_net_exit(struct net *net)
 {
        if (!net->ipv4.iptable_filter)
                return;
-       ipt_unregister_table(net, net->ipv4.iptable_filter, filter_ops);
+       ipt_unregister_table_exit(net, net->ipv4.iptable_filter);
        net->ipv4.iptable_filter = NULL;
 }
 
 static struct pernet_operations iptable_filter_net_ops = {
        .init = iptable_filter_net_init,
+       .pre_exit = iptable_filter_net_pre_exit,
        .exit = iptable_filter_net_exit,
 };
 
index bb9266e..f703a71 100644 (file)
@@ -100,15 +100,23 @@ static int __net_init iptable_mangle_table_init(struct net *net)
        return ret;
 }
 
+static void __net_exit iptable_mangle_net_pre_exit(struct net *net)
+{
+       if (net->ipv4.iptable_mangle)
+               ipt_unregister_table_pre_exit(net, net->ipv4.iptable_mangle,
+                                             mangle_ops);
+}
+
 static void __net_exit iptable_mangle_net_exit(struct net *net)
 {
        if (!net->ipv4.iptable_mangle)
                return;
-       ipt_unregister_table(net, net->ipv4.iptable_mangle, mangle_ops);
+       ipt_unregister_table_exit(net, net->ipv4.iptable_mangle);
        net->ipv4.iptable_mangle = NULL;
 }
 
 static struct pernet_operations iptable_mangle_net_ops = {
+       .pre_exit = iptable_mangle_net_pre_exit,
        .exit = iptable_mangle_net_exit,
 };
 
index ad33687..b0143b1 100644 (file)
@@ -113,16 +113,22 @@ static int __net_init iptable_nat_table_init(struct net *net)
        return ret;
 }
 
+static void __net_exit iptable_nat_net_pre_exit(struct net *net)
+{
+       if (net->ipv4.nat_table)
+               ipt_nat_unregister_lookups(net);
+}
+
 static void __net_exit iptable_nat_net_exit(struct net *net)
 {
        if (!net->ipv4.nat_table)
                return;
-       ipt_nat_unregister_lookups(net);
-       ipt_unregister_table(net, net->ipv4.nat_table, NULL);
+       ipt_unregister_table_exit(net, net->ipv4.nat_table);
        net->ipv4.nat_table = NULL;
 }
 
 static struct pernet_operations iptable_nat_net_ops = {
+       .pre_exit = iptable_nat_net_pre_exit,
        .exit   = iptable_nat_net_exit,
 };
 
index 69697eb..9abfe6b 100644 (file)
@@ -67,15 +67,23 @@ static int __net_init iptable_raw_table_init(struct net *net)
        return ret;
 }
 
+static void __net_exit iptable_raw_net_pre_exit(struct net *net)
+{
+       if (net->ipv4.iptable_raw)
+               ipt_unregister_table_pre_exit(net, net->ipv4.iptable_raw,
+                                             rawtable_ops);
+}
+
 static void __net_exit iptable_raw_net_exit(struct net *net)
 {
        if (!net->ipv4.iptable_raw)
                return;
-       ipt_unregister_table(net, net->ipv4.iptable_raw, rawtable_ops);
+       ipt_unregister_table_exit(net, net->ipv4.iptable_raw);
        net->ipv4.iptable_raw = NULL;
 }
 
 static struct pernet_operations iptable_raw_net_ops = {
+       .pre_exit = iptable_raw_net_pre_exit,
        .exit = iptable_raw_net_exit,
 };
 
index ac633c1..415c197 100644 (file)
@@ -62,16 +62,23 @@ static int __net_init iptable_security_table_init(struct net *net)
        return ret;
 }
 
+static void __net_exit iptable_security_net_pre_exit(struct net *net)
+{
+       if (net->ipv4.iptable_security)
+               ipt_unregister_table_pre_exit(net, net->ipv4.iptable_security,
+                                             sectbl_ops);
+}
+
 static void __net_exit iptable_security_net_exit(struct net *net)
 {
        if (!net->ipv4.iptable_security)
                return;
-
-       ipt_unregister_table(net, net->ipv4.iptable_security, sectbl_ops);
+       ipt_unregister_table_exit(net, net->ipv4.iptable_security);
        net->ipv4.iptable_security = NULL;
 }
 
 static struct pernet_operations iptable_security_net_ops = {
+       .pre_exit = iptable_security_net_pre_exit,
        .exit = iptable_security_net_exit,
 };
 
index e32e41b..aba65fe 100644 (file)
@@ -34,3 +34,4 @@ module_exit(nf_flow_ipv4_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NF_FLOWTABLE(AF_INET);
+MODULE_DESCRIPTION("Netfilter flow table support");
index 2361fda..9dcfa4e 100644 (file)
@@ -96,6 +96,21 @@ void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
 }
 EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_put);
 
+static int nf_reject_fill_skb_dst(struct sk_buff *skb_in)
+{
+       struct dst_entry *dst = NULL;
+       struct flowi fl;
+
+       memset(&fl, 0, sizeof(struct flowi));
+       fl.u.ip4.daddr = ip_hdr(skb_in)->saddr;
+       nf_ip_route(dev_net(skb_in->dev), &dst, &fl, false);
+       if (!dst)
+               return -1;
+
+       skb_dst_set(skb_in, dst);
+       return 0;
+}
+
 /* Send RST reply */
 void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
 {
@@ -109,6 +124,9 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
        if (!oth)
                return;
 
+       if (hook == NF_INET_PRE_ROUTING && nf_reject_fill_skb_dst(oldskb))
+               return;
+
        if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
                return;
 
@@ -175,6 +193,9 @@ void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
        if (iph->frag_off & htons(IP_OFFSET))
                return;
 
+       if (hook == NF_INET_PRE_ROUTING && nf_reject_fill_skb_dst(skb_in))
+               return;
+
        if (skb_csum_unnecessary(skb_in) || !nf_reject_verify_csum(proto)) {
                icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
                return;
index abf89b9..bcdb37f 100644 (file)
@@ -107,3 +107,4 @@ module_exit(nft_dup_ipv4_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "dup");
+MODULE_DESCRIPTION("IPv4 nftables packet duplication support");
index ce29411..03df986 100644 (file)
@@ -210,3 +210,4 @@ module_exit(nft_fib4_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
 MODULE_ALIAS_NFT_AF_EXPR(2, "fib");
+MODULE_DESCRIPTION("nftables fib / ip route lookup support");
index 7e6fd5c..e408f81 100644 (file)
@@ -71,3 +71,4 @@ module_exit(nft_reject_ipv4_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
 MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "reject");
+MODULE_DESCRIPTION("IPv4 packet rejection for nftables");
index 5354272..df6fbef 100644 (file)
@@ -786,6 +786,9 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                           inet_sk_flowi_flags(sk), faddr, saddr, 0, 0,
                           sk->sk_uid);
 
+       fl4.fl4_icmp_type = user_icmph.type;
+       fl4.fl4_icmp_code = user_icmph.code;
+
        security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
        rt = ip_route_output_flow(net, &fl4, sk);
        if (IS_ERR(rt)) {
index 75545a8..1074df7 100644 (file)
@@ -292,6 +292,7 @@ static const struct snmp_mib snmp4_net_list[] = {
        SNMP_MIB_ITEM("TCPFastOpenPassiveAltKey", LINUX_MIB_TCPFASTOPENPASSIVEALTKEY),
        SNMP_MIB_ITEM("TcpTimeoutRehash", LINUX_MIB_TCPTIMEOUTREHASH),
        SNMP_MIB_ITEM("TcpDuplicateDataRehash", LINUX_MIB_TCPDUPLICATEDATAREHASH),
+       SNMP_MIB_ITEM("TCPDSACKRecvSegs", LINUX_MIB_TCPDSACKRECVSEGS),
        SNMP_MIB_SENTINEL
 };
 
index 4766591..6fd4330 100644 (file)
@@ -809,11 +809,11 @@ static int raw_sk_init(struct sock *sk)
        return 0;
 }
 
-static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
+static int raw_seticmpfilter(struct sock *sk, sockptr_t optval, int optlen)
 {
        if (optlen > sizeof(struct icmp_filter))
                optlen = sizeof(struct icmp_filter);
-       if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
+       if (copy_from_sockptr(&raw_sk(sk)->filter, optval, optlen))
                return -EFAULT;
        return 0;
 }
@@ -838,7 +838,7 @@ out:        return ret;
 }
 
 static int do_raw_setsockopt(struct sock *sk, int level, int optname,
-                         char __user *optval, unsigned int optlen)
+                            sockptr_t optval, unsigned int optlen)
 {
        if (optname == ICMP_FILTER) {
                if (inet_sk(sk)->inet_num != IPPROTO_ICMP)
@@ -850,23 +850,13 @@ static int do_raw_setsockopt(struct sock *sk, int level, int optname,
 }
 
 static int raw_setsockopt(struct sock *sk, int level, int optname,
-                         char __user *optval, unsigned int optlen)
+                         sockptr_t optval, unsigned int optlen)
 {
        if (level != SOL_RAW)
                return ip_setsockopt(sk, level, optname, optval, optlen);
        return do_raw_setsockopt(sk, level, optname, optval, optlen);
 }
 
-#ifdef CONFIG_COMPAT
-static int compat_raw_setsockopt(struct sock *sk, int level, int optname,
-                                char __user *optval, unsigned int optlen)
-{
-       if (level != SOL_RAW)
-               return compat_ip_setsockopt(sk, level, optname, optval, optlen);
-       return do_raw_setsockopt(sk, level, optname, optval, optlen);
-}
-#endif
-
 static int do_raw_getsockopt(struct sock *sk, int level, int optname,
                          char __user *optval, int __user *optlen)
 {
@@ -887,16 +877,6 @@ static int raw_getsockopt(struct sock *sk, int level, int optname,
        return do_raw_getsockopt(sk, level, optname, optval, optlen);
 }
 
-#ifdef CONFIG_COMPAT
-static int compat_raw_getsockopt(struct sock *sk, int level, int optname,
-                                char __user *optval, int __user *optlen)
-{
-       if (level != SOL_RAW)
-               return compat_ip_getsockopt(sk, level, optname, optval, optlen);
-       return do_raw_getsockopt(sk, level, optname, optval, optlen);
-}
-#endif
-
 static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
 {
        switch (cmd) {
@@ -980,8 +960,6 @@ struct proto raw_prot = {
        .usersize          = sizeof_field(struct raw_sock, filter),
        .h.raw_hash        = &raw_v4_hashinfo,
 #ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_raw_setsockopt,
-       .compat_getsockopt = compat_raw_getsockopt,
        .compat_ioctl      = compat_raw_ioctl,
 #endif
        .diag_destroy      = raw_abort,
index 1d7076b..a01efa0 100644 (file)
@@ -2027,7 +2027,7 @@ int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
                      const struct sk_buff *hint)
 {
        struct in_device *in_dev = __in_dev_get_rcu(dev);
-       struct rtable *rt = (struct rtable *)hint;
+       struct rtable *rt = skb_rtable(hint);
        struct net *net = dev_net(dev);
        int err = -EINVAL;
        u32 tag = 0;
index 810cc16..27de938 100644 (file)
@@ -2691,6 +2691,9 @@ int tcp_disconnect(struct sock *sk, int flags)
        tp->window_clamp = 0;
        tp->delivered = 0;
        tp->delivered_ce = 0;
+       if (icsk->icsk_ca_ops->release)
+               icsk->icsk_ca_ops->release(sk);
+       memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
        tcp_set_ca_state(sk, TCP_CA_Open);
        tp->is_sack_reneg = 0;
        tcp_clear_retrans(tp);
@@ -2761,7 +2764,7 @@ static inline bool tcp_can_repair_sock(const struct sock *sk)
                (sk->sk_state != TCP_LISTEN);
 }
 
-static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int len)
+static int tcp_repair_set_window(struct tcp_sock *tp, sockptr_t optbuf, int len)
 {
        struct tcp_repair_window opt;
 
@@ -2771,7 +2774,7 @@ static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int l
        if (len != sizeof(opt))
                return -EINVAL;
 
-       if (copy_from_user(&opt, optbuf, sizeof(opt)))
+       if (copy_from_sockptr(&opt, optbuf, sizeof(opt)))
                return -EFAULT;
 
        if (opt.max_window < opt.snd_wnd)
@@ -2793,17 +2796,17 @@ static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int l
        return 0;
 }
 
-static int tcp_repair_options_est(struct sock *sk,
-               struct tcp_repair_opt __user *optbuf, unsigned int len)
+static int tcp_repair_options_est(struct sock *sk, sockptr_t optbuf,
+               unsigned int len)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct tcp_repair_opt opt;
 
        while (len >= sizeof(opt)) {
-               if (copy_from_user(&opt, optbuf, sizeof(opt)))
+               if (copy_from_sockptr(&opt, optbuf, sizeof(opt)))
                        return -EFAULT;
 
-               optbuf++;
+               sockptr_advance(optbuf, sizeof(opt));
                len -= sizeof(opt);
 
                switch (opt.opt_code) {
@@ -2957,7 +2960,7 @@ void tcp_sock_set_user_timeout(struct sock *sk, u32 val)
 }
 EXPORT_SYMBOL(tcp_sock_set_user_timeout);
 
-static int __tcp_sock_set_keepidle(struct sock *sk, int val)
+int tcp_sock_set_keepidle_locked(struct sock *sk, int val)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
@@ -2984,7 +2987,7 @@ int tcp_sock_set_keepidle(struct sock *sk, int val)
        int err;
 
        lock_sock(sk);
-       err = __tcp_sock_set_keepidle(sk, val);
+       err = tcp_sock_set_keepidle_locked(sk, val);
        release_sock(sk);
        return err;
 }
@@ -3017,8 +3020,8 @@ EXPORT_SYMBOL(tcp_sock_set_keepcnt);
 /*
  *     Socket option code for TCP.
  */
-static int do_tcp_setsockopt(struct sock *sk, int level,
-               int optname, char __user *optval, unsigned int optlen)
+static int do_tcp_setsockopt(struct sock *sk, int level, int optname,
+               sockptr_t optval, unsigned int optlen)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
@@ -3034,7 +3037,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                if (optlen < 1)
                        return -EINVAL;
 
-               val = strncpy_from_user(name, optval,
+               val = strncpy_from_sockptr(name, optval,
                                        min_t(long, TCP_CA_NAME_MAX-1, optlen));
                if (val < 0)
                        return -EFAULT;
@@ -3053,7 +3056,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                if (optlen < 1)
                        return -EINVAL;
 
-               val = strncpy_from_user(name, optval,
+               val = strncpy_from_sockptr(name, optval,
                                        min_t(long, TCP_ULP_NAME_MAX - 1,
                                              optlen));
                if (val < 0)
@@ -3076,7 +3079,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                    optlen != TCP_FASTOPEN_KEY_BUF_LENGTH)
                        return -EINVAL;
 
-               if (copy_from_user(key, optval, optlen))
+               if (copy_from_sockptr(key, optval, optlen))
                        return -EFAULT;
 
                if (optlen == TCP_FASTOPEN_KEY_BUF_LENGTH)
@@ -3092,7 +3095,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
        if (optlen < sizeof(int))
                return -EINVAL;
 
-       if (get_user(val, (int __user *)optval))
+       if (copy_from_sockptr(&val, optval, sizeof(val)))
                return -EFAULT;
 
        lock_sock(sk);
@@ -3171,9 +3174,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                if (!tp->repair)
                        err = -EINVAL;
                else if (sk->sk_state == TCP_ESTABLISHED)
-                       err = tcp_repair_options_est(sk,
-                                       (struct tcp_repair_opt __user *)optval,
-                                       optlen);
+                       err = tcp_repair_options_est(sk, optval, optlen);
                else
                        err = -EPERM;
                break;
@@ -3183,7 +3184,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                break;
 
        case TCP_KEEPIDLE:
-               err = __tcp_sock_set_keepidle(sk, val);
+               err = tcp_sock_set_keepidle_locked(sk, val);
                break;
        case TCP_KEEPINTVL:
                if (val < 1 || val > MAX_TCP_KEEPINTVL)
@@ -3246,10 +3247,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
 #ifdef CONFIG_TCP_MD5SIG
        case TCP_MD5SIG:
        case TCP_MD5SIG_EXT:
-               if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
-                       err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
-               else
-                       err = -EINVAL;
+               err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
                break;
 #endif
        case TCP_USER_TIMEOUT:
@@ -3325,7 +3323,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
        return err;
 }
 
-int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
+int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
                   unsigned int optlen)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -3337,18 +3335,6 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
 }
 EXPORT_SYMBOL(tcp_setsockopt);
 
-#ifdef CONFIG_COMPAT
-int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
-                         char __user *optval, unsigned int optlen)
-{
-       if (level != SOL_TCP)
-               return inet_csk_compat_setsockopt(sk, level, optname,
-                                                 optval, optlen);
-       return do_tcp_setsockopt(sk, level, optname, optval, optlen);
-}
-EXPORT_SYMBOL(compat_tcp_setsockopt);
-#endif
-
 static void tcp_get_info_chrono_stats(const struct tcp_sock *tp,
                                      struct tcp_info *info)
 {
@@ -3896,18 +3882,6 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
 }
 EXPORT_SYMBOL(tcp_getsockopt);
 
-#ifdef CONFIG_COMPAT
-int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
-                         char __user *optval, int __user *optlen)
-{
-       if (level != SOL_TCP)
-               return inet_csk_compat_getsockopt(sk, level, optname,
-                                                 optval, optlen);
-       return do_tcp_getsockopt(sk, level, optname, optval, optlen);
-}
-EXPORT_SYMBOL(compat_tcp_getsockopt);
-#endif
-
 #ifdef CONFIG_TCP_MD5SIG
 static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool);
 static DEFINE_MUTEX(tcp_md5sig_mutex);
@@ -4033,11 +4007,14 @@ EXPORT_SYMBOL(tcp_md5_hash_skb_data);
 
 int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
 {
+       u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */
        struct scatterlist sg;
 
-       sg_init_one(&sg, key->key, key->keylen);
-       ahash_request_set_crypt(hp->md5_req, &sg, NULL, key->keylen);
-       return crypto_ahash_update(hp->md5_req);
+       sg_init_one(&sg, key->key, keylen);
+       ahash_request_set_crypt(hp->md5_req, &sg, NULL, keylen);
+
+       /* We use data_race() because tcp_md5_do_add() might change key->key under us */
+       return data_race(crypto_ahash_update(hp->md5_req));
 }
 EXPORT_SYMBOL(tcp_md5_hash_key);
 
index 3172e31..62878cf 100644 (file)
@@ -197,7 +197,7 @@ static void tcp_reinit_congestion_control(struct sock *sk,
        icsk->icsk_ca_setsockopt = 1;
        memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
 
-       if (sk->sk_state != TCP_CLOSE)
+       if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
                tcp_init_congestion_control(sk);
 }
 
index 8f8eefd..c7bf5b2 100644 (file)
@@ -432,10 +432,9 @@ static void hystart_update(struct sock *sk, u32 delay)
 
        if (hystart_detect & HYSTART_DELAY) {
                /* obtain the minimum delay of more than sampling packets */
+               if (ca->curr_rtt > delay)
+                       ca->curr_rtt = delay;
                if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
-                       if (ca->curr_rtt > delay)
-                               ca->curr_rtt = delay;
-
                        ca->sample_cnt++;
                } else {
                        if (ca->curr_rtt > ca->delay_min +
index bfdfbb9..349069d 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Sally Floyd's High Speed TCP (RFC 3649) congestion control
  *
- * See http://www.icir.org/floyd/hstcp.html
+ * See https://www.icir.org/floyd/hstcp.html
  *
  * John Heffner <jheffner@psc.edu>
  */
index 88e1f01..55adcfc 100644 (file)
@@ -4,7 +4,7 @@
  * R.N.Shorten, D.J.Leith:
  *   "H-TCP: TCP for high-speed and long-distance networks"
  *   Proc. PFLDnet, Argonne, 2004.
- * http://www.hamilton.ie/net/htcp3.pdf
+ * https://www.hamilton.ie/net/htcp3.pdf
  */
 
 #include <linux/mm.h>
index 83330a6..82906de 100644 (file)
@@ -261,7 +261,8 @@ static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb)
                 * cwnd may be very low (even just 1 packet), so we should ACK
                 * immediately.
                 */
-               inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
+               if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq)
+                       inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
        }
 }
 
@@ -517,7 +518,7 @@ EXPORT_SYMBOL(tcp_initialize_rcv_mss);
  *
  * The algorithm for RTT estimation w/o timestamps is based on
  * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL.
- * <http://public.lanl.gov/radiant/pubs.html#DRS>
+ * <https://public.lanl.gov/radiant/pubs.html#DRS>
  *
  * More detail on this code can be found at
  * <http://staff.psc.edu/jheffner/>,
@@ -870,12 +871,41 @@ __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
        return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
 }
 
+struct tcp_sacktag_state {
+       /* Timestamps for earliest and latest never-retransmitted segment
+        * that was SACKed. RTO needs the earliest RTT to stay conservative,
+        * but congestion control should still get an accurate delay signal.
+        */
+       u64     first_sackt;
+       u64     last_sackt;
+       u32     reord;
+       u32     sack_delivered;
+       int     flag;
+       unsigned int mss_now;
+       struct rate_sample *rate;
+};
+
 /* Take a notice that peer is sending D-SACKs */
-static void tcp_dsack_seen(struct tcp_sock *tp)
+static u32 tcp_dsack_seen(struct tcp_sock *tp, u32 start_seq,
+                         u32 end_seq, struct tcp_sacktag_state *state)
 {
+       u32 seq_len, dup_segs = 1;
+
+       if (before(start_seq, end_seq)) {
+               seq_len = end_seq - start_seq;
+               if (seq_len > tp->mss_cache)
+                       dup_segs = DIV_ROUND_UP(seq_len, tp->mss_cache);
+       }
+
        tp->rx_opt.sack_ok |= TCP_DSACK_SEEN;
        tp->rack.dsack_seen = 1;
-       tp->dsack_dups++;
+       tp->dsack_dups += dup_segs;
+
+       state->flag |= FLAG_DSACKING_ACK;
+       /* A spurious retransmission is delivered */
+       state->sack_delivered += dup_segs;
+
+       return dup_segs;
 }
 
 /* It's reordering when higher sequence was delivered (i.e. sacked) before
@@ -961,6 +991,15 @@ void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb)
        }
 }
 
+/* Updates the delivered and delivered_ce counts */
+static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered,
+                               bool ece_ack)
+{
+       tp->delivered += delivered;
+       if (ece_ack)
+               tp->delivered_ce += delivered;
+}
+
 /* This procedure tags the retransmission queue when SACKs arrive.
  *
  * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L).
@@ -1093,52 +1132,38 @@ static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack,
 
 static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
                            struct tcp_sack_block_wire *sp, int num_sacks,
-                           u32 prior_snd_una)
+                           u32 prior_snd_una, struct tcp_sacktag_state *state)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
        u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
-       bool dup_sack = false;
+       u32 dup_segs;
 
        if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
-               dup_sack = true;
-               tcp_dsack_seen(tp);
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
        } else if (num_sacks > 1) {
                u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
                u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
 
-               if (!after(end_seq_0, end_seq_1) &&
-                   !before(start_seq_0, start_seq_1)) {
-                       dup_sack = true;
-                       tcp_dsack_seen(tp);
-                       NET_INC_STATS(sock_net(sk),
-                                       LINUX_MIB_TCPDSACKOFORECV);
-               }
+               if (after(end_seq_0, end_seq_1) || before(start_seq_0, start_seq_1))
+                       return false;
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKOFORECV);
+       } else {
+               return false;
        }
 
+       dup_segs = tcp_dsack_seen(tp, start_seq_0, end_seq_0, state);
+       NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECVSEGS, dup_segs);
+
        /* D-SACK for already forgotten data... Do dumb counting. */
-       if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 &&
+       if (tp->undo_marker && tp->undo_retrans > 0 &&
            !after(end_seq_0, prior_snd_una) &&
            after(end_seq_0, tp->undo_marker))
-               tp->undo_retrans--;
+               tp->undo_retrans = max_t(int, 0, tp->undo_retrans - dup_segs);
 
-       return dup_sack;
+       return true;
 }
 
-struct tcp_sacktag_state {
-       u32     reord;
-       /* Timestamps for earliest and latest never-retransmitted segment
-        * that was SACKed. RTO needs the earliest RTT to stay conservative,
-        * but congestion control should still get an accurate delay signal.
-        */
-       u64     first_sackt;
-       u64     last_sackt;
-       struct rate_sample *rate;
-       int     flag;
-       unsigned int mss_now;
-};
-
 /* Check if skb is fully within the SACK block. In presence of GSO skbs,
  * the incoming SACK may not exactly match but we can find smaller MSS
  * aligned portion of it that matches. Therefore we might need to fragment
@@ -1257,7 +1282,8 @@ static u8 tcp_sacktag_one(struct sock *sk,
                sacked |= TCPCB_SACKED_ACKED;
                state->flag |= FLAG_DATA_SACKED;
                tp->sacked_out += pcount;
-               tp->delivered += pcount;  /* Out-of-order packets delivered */
+               /* Out-of-order packets delivered */
+               state->sack_delivered += pcount;
 
                /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
                if (tp->lost_skb_hint &&
@@ -1680,11 +1706,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
                tcp_highest_sack_reset(sk);
 
        found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
-                                        num_sacks, prior_snd_una);
-       if (found_dup_sack) {
-               state->flag |= FLAG_DSACKING_ACK;
-               tp->delivered++; /* A spurious retransmission is delivered */
-       }
+                                        num_sacks, prior_snd_una, state);
 
        /* Eliminate too old ACKs, but take into
         * account more or less fresh ones, they can
@@ -1892,7 +1914,7 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend)
 
 /* Emulate SACKs for SACKless connection: account for a new dupack. */
 
-static void tcp_add_reno_sack(struct sock *sk, int num_dupack)
+static void tcp_add_reno_sack(struct sock *sk, int num_dupack, bool ece_ack)
 {
        if (num_dupack) {
                struct tcp_sock *tp = tcp_sk(sk);
@@ -1903,20 +1925,21 @@ static void tcp_add_reno_sack(struct sock *sk, int num_dupack)
                tcp_check_reno_reordering(sk, 0);
                delivered = tp->sacked_out - prior_sacked;
                if (delivered > 0)
-                       tp->delivered += delivered;
+                       tcp_count_delivered(tp, delivered, ece_ack);
                tcp_verify_left_out(tp);
        }
 }
 
 /* Account for ACK, ACKing some data in Reno Recovery phase. */
 
-static void tcp_remove_reno_sacks(struct sock *sk, int acked)
+static void tcp_remove_reno_sacks(struct sock *sk, int acked, bool ece_ack)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
        if (acked > 0) {
                /* One ACK acked hole. The rest eat duplicate ACKs. */
-               tp->delivered += max_t(int, acked - tp->sacked_out, 1);
+               tcp_count_delivered(tp, max_t(int, acked - tp->sacked_out, 1),
+                                   ece_ack);
                if (acked - 1 >= tp->sacked_out)
                        tp->sacked_out = 0;
                else
@@ -2696,7 +2719,7 @@ static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
                 * delivered. Lower inflight to clock out (re)tranmissions.
                 */
                if (after(tp->snd_nxt, tp->high_seq) && num_dupack)
-                       tcp_add_reno_sack(sk, num_dupack);
+                       tcp_add_reno_sack(sk, num_dupack, flag & FLAG_ECE);
                else if (flag & FLAG_SND_UNA_ADVANCED)
                        tcp_reset_reno_sack(tp);
        }
@@ -2778,6 +2801,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        int fast_rexmit = 0, flag = *ack_flag;
+       bool ece_ack = flag & FLAG_ECE;
        bool do_lost = num_dupack || ((flag & FLAG_DATA_SACKED) &&
                                      tcp_force_fast_retransmit(sk));
 
@@ -2786,7 +2810,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
 
        /* Now state machine starts.
         * A. ECE, hence prohibit cwnd undoing, the reduction is required. */
-       if (flag & FLAG_ECE)
+       if (ece_ack)
                tp->prior_ssthresh = 0;
 
        /* B. In all the states check for reneging SACKs. */
@@ -2827,7 +2851,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
        case TCP_CA_Recovery:
                if (!(flag & FLAG_SND_UNA_ADVANCED)) {
                        if (tcp_is_reno(tp))
-                               tcp_add_reno_sack(sk, num_dupack);
+                               tcp_add_reno_sack(sk, num_dupack, ece_ack);
                } else {
                        if (tcp_try_undo_partial(sk, prior_snd_una))
                                return;
@@ -2852,7 +2876,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
                if (tcp_is_reno(tp)) {
                        if (flag & FLAG_SND_UNA_ADVANCED)
                                tcp_reset_reno_sack(tp);
-                       tcp_add_reno_sack(sk, num_dupack);
+                       tcp_add_reno_sack(sk, num_dupack, ece_ack);
                }
 
                if (icsk->icsk_ca_state <= TCP_CA_Disorder)
@@ -2876,7 +2900,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
                }
 
                /* Otherwise enter Recovery state */
-               tcp_enter_recovery(sk, (flag & FLAG_ECE));
+               tcp_enter_recovery(sk, ece_ack);
                fast_rexmit = 1;
        }
 
@@ -3052,7 +3076,7 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
  */
 static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
                               u32 prior_snd_una,
-                              struct tcp_sacktag_state *sack)
+                              struct tcp_sacktag_state *sack, bool ece_ack)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        u64 first_ackt, last_ackt;
@@ -3077,8 +3101,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
                u8 sacked = scb->sacked;
                u32 acked_pcount;
 
-               tcp_ack_tstamp(sk, skb, prior_snd_una);
-
                /* Determine how many packets and what bytes were acked, tso and else */
                if (after(scb->end_seq, tp->snd_una)) {
                        if (tcp_skb_pcount(skb) == 1 ||
@@ -3113,7 +3135,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
                if (sacked & TCPCB_SACKED_ACKED) {
                        tp->sacked_out -= acked_pcount;
                } else if (tcp_is_sack(tp)) {
-                       tp->delivered += acked_pcount;
+                       tcp_count_delivered(tp, acked_pcount, ece_ack);
                        if (!tcp_skb_spurious_retrans(tp, skb))
                                tcp_rack_advance(tp, sacked, scb->end_seq,
                                                 tcp_skb_timestamp_us(skb));
@@ -3142,6 +3164,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
                if (!fully_acked)
                        break;
 
+               tcp_ack_tstamp(sk, skb, prior_snd_una);
+
                next = skb_rb_next(skb);
                if (unlikely(skb == tp->retransmit_skb_hint))
                        tp->retransmit_skb_hint = NULL;
@@ -3157,8 +3181,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
        if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una)))
                tp->snd_up = tp->snd_una;
 
-       if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
-               flag |= FLAG_SACK_RENEGING;
+       if (skb) {
+               tcp_ack_tstamp(sk, skb, prior_snd_una);
+               if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
+                       flag |= FLAG_SACK_RENEGING;
+       }
 
        if (likely(first_ackt) && !(flag & FLAG_RETRANS_DATA_ACKED)) {
                seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt);
@@ -3190,7 +3217,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
                }
 
                if (tcp_is_reno(tp)) {
-                       tcp_remove_reno_sacks(sk, pkts_acked);
+                       tcp_remove_reno_sacks(sk, pkts_acked, ece_ack);
 
                        /* If any of the cumulatively ACKed segments was
                         * retransmitted, non-SACK case cannot confirm that
@@ -3557,10 +3584,9 @@ static u32 tcp_newly_delivered(struct sock *sk, u32 prior_delivered, int flag)
 
        delivered = tp->delivered - prior_delivered;
        NET_ADD_STATS(net, LINUX_MIB_TCPDELIVERED, delivered);
-       if (flag & FLAG_ECE) {
-               tp->delivered_ce += delivered;
+       if (flag & FLAG_ECE)
                NET_ADD_STATS(net, LINUX_MIB_TCPDELIVEREDCE, delivered);
-       }
+
        return delivered;
 }
 
@@ -3584,6 +3610,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 
        sack_state.first_sackt = 0;
        sack_state.rate = &rs;
+       sack_state.sack_delivered = 0;
 
        /* We very likely will need to access rtx queue. */
        prefetch(sk->tcp_rtx_queue.rb_node);
@@ -3659,12 +3686,25 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
                        ack_ev_flags |= CA_ACK_ECE;
                }
 
+               if (sack_state.sack_delivered)
+                       tcp_count_delivered(tp, sack_state.sack_delivered,
+                                           flag & FLAG_ECE);
+
                if (flag & FLAG_WIN_UPDATE)
                        ack_ev_flags |= CA_ACK_WIN_UPDATE;
 
                tcp_in_ack_event(sk, ack_ev_flags);
        }
 
+       /* This is a deviation from RFC3168 since it states that:
+        * "When the TCP data sender is ready to set the CWR bit after reducing
+        * the congestion window, it SHOULD set the CWR bit only on the first
+        * new data packet that it transmits."
+        * We accept CWR on pure ACKs to be more robust
+        * with widely-deployed TCP implementations that do this.
+        */
+       tcp_ecn_accept_cwr(sk, skb);
+
        /* We passed data and got it acked, remove any soft error
         * log. Something worked...
         */
@@ -3675,7 +3715,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
                goto no_queue;
 
        /* See if we can take anything off of the retransmit queue. */
-       flag |= tcp_clean_rtx_queue(sk, prior_fack, prior_snd_una, &sack_state);
+       flag |= tcp_clean_rtx_queue(sk, prior_fack, prior_snd_una, &sack_state,
+                                   flag & FLAG_ECE);
 
        tcp_rack_update_reo_wnd(sk, &rs);
 
@@ -4416,7 +4457,6 @@ static void tcp_sack_remove(struct tcp_sock *tp)
 /**
  * tcp_try_coalesce - try to merge skb to prior one
  * @sk: socket
- * @dest: destination queue
  * @to: prior buffer
  * @from: buffer to add in queue
  * @fragstolen: pointer to boolean
@@ -4572,6 +4612,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
 
        if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
+               sk->sk_data_ready(sk);
                tcp_drop(sk, skb);
                return;
        }
@@ -4605,7 +4646,11 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
        if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
                                 skb, &fragstolen)) {
 coalesce_done:
-               tcp_grow_window(sk, skb);
+               /* For non sack flows, do not grow window to force DUPACK
+                * and trigger fast retransmit.
+                */
+               if (tcp_is_sack(tp))
+                       tcp_grow_window(sk, skb);
                kfree_skb_partial(skb, fragstolen);
                skb = NULL;
                goto add_sack;
@@ -4689,7 +4734,11 @@ add_sack:
                tcp_sack_new_ofo_skb(sk, seq, end_seq);
 end:
        if (skb) {
-               tcp_grow_window(sk, skb);
+               /* For non sack flows, do not grow window to force DUPACK
+                * and trigger fast retransmit.
+                */
+               if (tcp_is_sack(tp))
+                       tcp_grow_window(sk, skb);
                skb_condense(skb);
                skb_set_owner_r(skb, sk);
        }
@@ -4792,8 +4841,6 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
        skb_dst_drop(skb);
        __skb_pull(skb, tcp_hdr(skb)->doff * 4);
 
-       tcp_ecn_accept_cwr(sk, skb);
-
        tp->rx_opt.dsack = 0;
 
        /*  Queue data for delivery to the user.
@@ -4812,6 +4859,7 @@ queue_and_out:
                        sk_forced_mem_schedule(sk, skb->truesize);
                else if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
                        NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
+                       sk->sk_data_ready(sk);
                        goto drop;
                }
 
index ad6435b..f891392 100644 (file)
@@ -76,6 +76,7 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 #include <linux/inetdevice.h>
+#include <linux/btf_ids.h>
 
 #include <crypto/hash.h>
 #include <linux/scatterlist.h>
@@ -1111,9 +1112,21 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
 
        key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index);
        if (key) {
-               /* Pre-existing entry - just update that one. */
-               memcpy(key->key, newkey, newkeylen);
-               key->keylen = newkeylen;
+               /* Pre-existing entry - just update that one.
+                * Note that the key might be used concurrently.
+                * data_race() is telling kcsan that we do not care of
+                * key mismatches, since changing MD5 key on live flows
+                * can lead to packet drops.
+                */
+               data_race(memcpy(key->key, newkey, newkeylen));
+
+               /* Pairs with READ_ONCE() in tcp_md5_hash_key().
+                * Also note that a reader could catch new key->keylen value
+                * but old key->key[], this is the reason we use __GFP_ZERO
+                * at sock_kmalloc() time below these lines.
+                */
+               WRITE_ONCE(key->keylen, newkeylen);
+
                return 0;
        }
 
@@ -1129,7 +1142,7 @@ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
                rcu_assign_pointer(tp->md5sig_info, md5sig);
        }
 
-       key = sock_kmalloc(sk, sizeof(*key), gfp);
+       key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO);
        if (!key)
                return -ENOMEM;
        if (!tcp_alloc_md5sig_pool()) {
@@ -1182,7 +1195,7 @@ static void tcp_clear_md5_list(struct sock *sk)
 }
 
 static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
-                                char __user *optval, int optlen)
+                                sockptr_t optval, int optlen)
 {
        struct tcp_md5sig cmd;
        struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
@@ -1193,7 +1206,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
        if (optlen < sizeof(cmd))
                return -EINVAL;
 
-       if (copy_from_user(&cmd, optval, sizeof(cmd)))
+       if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
                return -EFAULT;
 
        if (sin->sin_family != AF_INET)
@@ -2122,10 +2135,6 @@ const struct inet_connection_sock_af_ops ipv4_specific = {
        .getsockopt        = ip_getsockopt,
        .addr2sockaddr     = inet_csk_addr2sockaddr,
        .sockaddr_len      = sizeof(struct sockaddr_in),
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_ip_setsockopt,
-       .compat_getsockopt = compat_ip_getsockopt,
-#endif
        .mtu_reduced       = tcp_v4_mtu_reduced,
 };
 EXPORT_SYMBOL(ipv4_specific);
@@ -2211,13 +2220,18 @@ EXPORT_SYMBOL(tcp_v4_destroy_sock);
  */
 static void *listening_get_next(struct seq_file *seq, void *cur)
 {
-       struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
+       struct tcp_seq_afinfo *afinfo;
        struct tcp_iter_state *st = seq->private;
        struct net *net = seq_file_net(seq);
        struct inet_listen_hashbucket *ilb;
        struct hlist_nulls_node *node;
        struct sock *sk = cur;
 
+       if (st->bpf_seq_afinfo)
+               afinfo = st->bpf_seq_afinfo;
+       else
+               afinfo = PDE_DATA(file_inode(seq->file));
+
        if (!sk) {
 get_head:
                ilb = &tcp_hashinfo.listening_hash[st->bucket];
@@ -2235,7 +2249,8 @@ get_sk:
        sk_nulls_for_each_from(sk, node) {
                if (!net_eq(sock_net(sk), net))
                        continue;
-               if (sk->sk_family == afinfo->family)
+               if (afinfo->family == AF_UNSPEC ||
+                   sk->sk_family == afinfo->family)
                        return sk;
        }
        spin_unlock(&ilb->lock);
@@ -2272,11 +2287,16 @@ static inline bool empty_bucket(const struct tcp_iter_state *st)
  */
 static void *established_get_first(struct seq_file *seq)
 {
-       struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
+       struct tcp_seq_afinfo *afinfo;
        struct tcp_iter_state *st = seq->private;
        struct net *net = seq_file_net(seq);
        void *rc = NULL;
 
+       if (st->bpf_seq_afinfo)
+               afinfo = st->bpf_seq_afinfo;
+       else
+               afinfo = PDE_DATA(file_inode(seq->file));
+
        st->offset = 0;
        for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
                struct sock *sk;
@@ -2289,7 +2309,8 @@ static void *established_get_first(struct seq_file *seq)
 
                spin_lock_bh(lock);
                sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
-                       if (sk->sk_family != afinfo->family ||
+                       if ((afinfo->family != AF_UNSPEC &&
+                            sk->sk_family != afinfo->family) ||
                            !net_eq(sock_net(sk), net)) {
                                continue;
                        }
@@ -2304,19 +2325,25 @@ out:
 
 static void *established_get_next(struct seq_file *seq, void *cur)
 {
-       struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
+       struct tcp_seq_afinfo *afinfo;
        struct sock *sk = cur;
        struct hlist_nulls_node *node;
        struct tcp_iter_state *st = seq->private;
        struct net *net = seq_file_net(seq);
 
+       if (st->bpf_seq_afinfo)
+               afinfo = st->bpf_seq_afinfo;
+       else
+               afinfo = PDE_DATA(file_inode(seq->file));
+
        ++st->num;
        ++st->offset;
 
        sk = sk_nulls_next(sk);
 
        sk_nulls_for_each_from(sk, node) {
-               if (sk->sk_family == afinfo->family &&
+               if ((afinfo->family == AF_UNSPEC ||
+                    sk->sk_family == afinfo->family) &&
                    net_eq(sock_net(sk), net))
                        return sk;
        }
@@ -2595,6 +2622,74 @@ out:
        return 0;
 }
 
+#ifdef CONFIG_BPF_SYSCALL
+struct bpf_iter__tcp {
+       __bpf_md_ptr(struct bpf_iter_meta *, meta);
+       __bpf_md_ptr(struct sock_common *, sk_common);
+       uid_t uid __aligned(8);
+};
+
+static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
+                            struct sock_common *sk_common, uid_t uid)
+{
+       struct bpf_iter__tcp ctx;
+
+       meta->seq_num--;  /* skip SEQ_START_TOKEN */
+       ctx.meta = meta;
+       ctx.sk_common = sk_common;
+       ctx.uid = uid;
+       return bpf_iter_run_prog(prog, &ctx);
+}
+
+static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v)
+{
+       struct bpf_iter_meta meta;
+       struct bpf_prog *prog;
+       struct sock *sk = v;
+       uid_t uid;
+
+       if (v == SEQ_START_TOKEN)
+               return 0;
+
+       if (sk->sk_state == TCP_TIME_WAIT) {
+               uid = 0;
+       } else if (sk->sk_state == TCP_NEW_SYN_RECV) {
+               const struct request_sock *req = v;
+
+               uid = from_kuid_munged(seq_user_ns(seq),
+                                      sock_i_uid(req->rsk_listener));
+       } else {
+               uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
+       }
+
+       meta.seq = seq;
+       prog = bpf_iter_get_info(&meta, false);
+       return tcp_prog_seq_show(prog, &meta, v, uid);
+}
+
+static void bpf_iter_tcp_seq_stop(struct seq_file *seq, void *v)
+{
+       struct bpf_iter_meta meta;
+       struct bpf_prog *prog;
+
+       if (!v) {
+               meta.seq = seq;
+               prog = bpf_iter_get_info(&meta, true);
+               if (prog)
+                       (void)tcp_prog_seq_show(prog, &meta, v, 0);
+       }
+
+       tcp_seq_stop(seq, v);
+}
+
+static const struct seq_operations bpf_iter_tcp_seq_ops = {
+       .show           = bpf_iter_tcp_seq_show,
+       .start          = tcp_seq_start,
+       .next           = tcp_seq_next,
+       .stop           = bpf_iter_tcp_seq_stop,
+};
+#endif
+
 static const struct seq_operations tcp4_seq_ops = {
        .show           = tcp4_seq_show,
        .start          = tcp_seq_start,
@@ -2675,10 +2770,6 @@ struct proto tcp_prot = {
        .rsk_prot               = &tcp_request_sock_ops,
        .h.hashinfo             = &tcp_hashinfo,
        .no_autobind            = true,
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt      = compat_tcp_setsockopt,
-       .compat_getsockopt      = compat_tcp_getsockopt,
-#endif
        .diag_destroy           = tcp_abort,
 };
 EXPORT_SYMBOL(tcp_prot);
@@ -2826,8 +2917,64 @@ static struct pernet_operations __net_initdata tcp_sk_ops = {
        .exit_batch = tcp_sk_exit_batch,
 };
 
+#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
+DEFINE_BPF_ITER_FUNC(tcp, struct bpf_iter_meta *meta,
+                    struct sock_common *sk_common, uid_t uid)
+
+static int bpf_iter_init_tcp(void *priv_data)
+{
+       struct tcp_iter_state *st = priv_data;
+       struct tcp_seq_afinfo *afinfo;
+       int ret;
+
+       afinfo = kmalloc(sizeof(*afinfo), GFP_USER | __GFP_NOWARN);
+       if (!afinfo)
+               return -ENOMEM;
+
+       afinfo->family = AF_UNSPEC;
+       st->bpf_seq_afinfo = afinfo;
+       ret = bpf_iter_init_seq_net(priv_data);
+       if (ret)
+               kfree(afinfo);
+       return ret;
+}
+
+static void bpf_iter_fini_tcp(void *priv_data)
+{
+       struct tcp_iter_state *st = priv_data;
+
+       kfree(st->bpf_seq_afinfo);
+       bpf_iter_fini_seq_net(priv_data);
+}
+
+static struct bpf_iter_reg tcp_reg_info = {
+       .target                 = "tcp",
+       .seq_ops                = &bpf_iter_tcp_seq_ops,
+       .init_seq_private       = bpf_iter_init_tcp,
+       .fini_seq_private       = bpf_iter_fini_tcp,
+       .seq_priv_size          = sizeof(struct tcp_iter_state),
+       .ctx_arg_info_size      = 1,
+       .ctx_arg_info           = {
+               { offsetof(struct bpf_iter__tcp, sk_common),
+                 PTR_TO_BTF_ID_OR_NULL },
+       },
+};
+
+static void __init bpf_iter_register(void)
+{
+       tcp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON];
+       if (bpf_iter_reg_target(&tcp_reg_info))
+               pr_warn("Warning: could not register bpf iterator tcp\n");
+}
+
+#endif
+
 void __init tcp_v4_init(void)
 {
        if (register_pernet_subsys(&tcp_sk_ops))
                panic("Failed to create the TCP control socket.\n");
+
+#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
+       bpf_iter_register();
+#endif
 }
index a50e199..dc01170 100644 (file)
@@ -700,7 +700,8 @@ static unsigned int tcp_synack_options(const struct sock *sk,
                                       unsigned int mss, struct sk_buff *skb,
                                       struct tcp_out_options *opts,
                                       const struct tcp_md5sig_key *md5,
-                                      struct tcp_fastopen_cookie *foc)
+                                      struct tcp_fastopen_cookie *foc,
+                                      enum tcp_synack_type synack_type)
 {
        struct inet_request_sock *ireq = inet_rsk(req);
        unsigned int remaining = MAX_TCP_OPTION_SPACE;
@@ -715,7 +716,8 @@ static unsigned int tcp_synack_options(const struct sock *sk,
                 * rather than TS in order to fit in better with old,
                 * buggy kernels, but that was deemed to be unnecessary.
                 */
-               ireq->tstamp_ok &= !ireq->sack_ok;
+               if (synack_type != TCP_SYNACK_COOKIE)
+                       ireq->tstamp_ok &= !ireq->sack_ok;
        }
 #endif
 
@@ -1064,6 +1066,10 @@ static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
        list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
 }
 
+INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
+INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
+INDIRECT_CALLABLE_DECLARE(void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb));
+
 /* This routine actually transmits TCP packets queued in by
  * tcp_do_sendmsg().  This is used by both the initial
  * transmission and possible later retransmissions.
@@ -1207,7 +1213,9 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
        }
 #endif
 
-       icsk->icsk_af_ops->send_check(sk, skb);
+       INDIRECT_CALL_INET(icsk->icsk_af_ops->send_check,
+                          tcp_v6_send_check, tcp_v4_send_check,
+                          sk, skb);
 
        if (likely(tcb->tcp_flags & TCPHDR_ACK))
                tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
@@ -1235,7 +1243,9 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
 
        tcp_add_tx_delay(skb, tp);
 
-       err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
+       err = INDIRECT_CALL_INET(icsk->icsk_af_ops->queue_xmit,
+                                inet6_csk_xmit, ip_queue_xmit,
+                                sk, skb, &inet->cork.fl);
 
        if (unlikely(err > 0)) {
                tcp_enter_cwr(sk);
@@ -3327,6 +3337,8 @@ int tcp_send_synack(struct sock *sk)
  * sk: listener socket
  * dst: dst entry attached to the SYNACK
  * req: request_sock pointer
+ * foc: cookie for tcp fast open
+ * synack_type: Type of synback to prepare
  *
  * Allocate one skb and build a SYNACK packet.
  * @dst is consumed : Caller should not use it again.
@@ -3394,7 +3406,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
 #endif
        skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
        tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
-                                            foc) + sizeof(*th);
+                                            foc, synack_type) + sizeof(*th);
 
        skb_push(skb, tcp_header_size);
        skb_reset_transport_header(skb);
index ada046f..0c08c42 100644 (file)
@@ -314,7 +314,7 @@ out:
 
 /**
  *  tcp_delack_timer() - The TCP delayed ACK timeout handler
- *  @data:  Pointer to the current socket. (gets casted to struct sock *)
+ *  @t:  Pointer to the timer. (gets casted to struct sock *)
  *
  *  This function gets (indirectly) called when the kernel timer for a TCP packet
  *  of this socket expires. Calls tcp_delack_timer_handler() to do the actual work.
index 50a9a6e..cd50a61 100644 (file)
@@ -7,7 +7,7 @@
  *    "TCP Veno: TCP Enhancement for Transmission over Wireless Access Networks."
  *    IEEE Journal on Selected Areas in Communication,
  *    Feb. 2003.
- *     See http://www.ie.cuhk.edu.hk/fileadmin/staff_upload/soung/Journal/J3.pdf
+ *     See https://www.ie.cuhk.edu.hk/fileadmin/staff_upload/soung/Journal/J3.pdf
  */
 
 #include <linux/mm.h>
index 1b7ebbc..5a6a2f6 100644 (file)
 #include <net/xfrm.h>
 #include <trace/events/udp.h>
 #include <linux/static_key.h>
+#include <linux/btf_ids.h>
 #include <trace/events/skb.h>
 #include <net/busy_poll.h>
 #include "udp_impl.h"
@@ -408,6 +409,25 @@ static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
                              udp_ehash_secret + net_hash_mix(net));
 }
 
+static inline struct sock *lookup_reuseport(struct net *net, struct sock *sk,
+                                           struct sk_buff *skb,
+                                           __be32 saddr, __be16 sport,
+                                           __be32 daddr, unsigned short hnum)
+{
+       struct sock *reuse_sk = NULL;
+       u32 hash;
+
+       if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) {
+               hash = udp_ehashfn(net, daddr, hnum, saddr, sport);
+               reuse_sk = reuseport_select_sock(sk, hash, skb,
+                                                sizeof(struct udphdr));
+               /* Fall back to scoring if group has connections */
+               if (reuseport_has_conns(sk, false))
+                       return NULL;
+       }
+       return reuse_sk;
+}
+
 /* called with rcu_read_lock() */
 static struct sock *udp4_lib_lookup2(struct net *net,
                                     __be32 saddr, __be16 sport,
@@ -418,7 +438,6 @@ static struct sock *udp4_lib_lookup2(struct net *net,
 {
        struct sock *sk, *result;
        int score, badness;
-       u32 hash = 0;
 
        result = NULL;
        badness = 0;
@@ -426,15 +445,11 @@ static struct sock *udp4_lib_lookup2(struct net *net,
                score = compute_score(sk, net, saddr, sport,
                                      daddr, hnum, dif, sdif);
                if (score > badness) {
-                       if (sk->sk_reuseport &&
-                           sk->sk_state != TCP_ESTABLISHED) {
-                               hash = udp_ehashfn(net, daddr, hnum,
-                                                  saddr, sport);
-                               result = reuseport_select_sock(sk, hash, skb,
-                                                       sizeof(struct udphdr));
-                               if (result && !reuseport_has_conns(sk, false))
-                                       return result;
-                       }
+                       result = lookup_reuseport(net, sk, skb,
+                                                 saddr, sport, daddr, hnum);
+                       if (result)
+                               return result;
+
                        badness = score;
                        result = sk;
                }
@@ -442,6 +457,29 @@ static struct sock *udp4_lib_lookup2(struct net *net,
        return result;
 }
 
+static inline struct sock *udp4_lookup_run_bpf(struct net *net,
+                                              struct udp_table *udptable,
+                                              struct sk_buff *skb,
+                                              __be32 saddr, __be16 sport,
+                                              __be32 daddr, u16 hnum)
+{
+       struct sock *sk, *reuse_sk;
+       bool no_reuseport;
+
+       if (udptable != &udp_table)
+               return NULL; /* only UDP is supported */
+
+       no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_UDP,
+                                           saddr, sport, daddr, hnum, &sk);
+       if (no_reuseport || IS_ERR_OR_NULL(sk))
+               return sk;
+
+       reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
+       if (reuse_sk)
+               sk = reuse_sk;
+       return sk;
+}
+
 /* UDP is nearly always wildcards out the wazoo, it makes no sense to try
  * harder than this. -DaveM
  */
@@ -449,27 +487,45 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
                __be16 sport, __be32 daddr, __be16 dport, int dif,
                int sdif, struct udp_table *udptable, struct sk_buff *skb)
 {
-       struct sock *result;
        unsigned short hnum = ntohs(dport);
        unsigned int hash2, slot2;
        struct udp_hslot *hslot2;
+       struct sock *result, *sk;
 
        hash2 = ipv4_portaddr_hash(net, daddr, hnum);
        slot2 = hash2 & udptable->mask;
        hslot2 = &udptable->hash2[slot2];
 
+       /* Lookup connected or non-wildcard socket */
        result = udp4_lib_lookup2(net, saddr, sport,
                                  daddr, hnum, dif, sdif,
                                  hslot2, skb);
-       if (!result) {
-               hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
-               slot2 = hash2 & udptable->mask;
-               hslot2 = &udptable->hash2[slot2];
-
-               result = udp4_lib_lookup2(net, saddr, sport,
-                                         htonl(INADDR_ANY), hnum, dif, sdif,
-                                         hslot2, skb);
+       if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
+               goto done;
+
+       /* Lookup redirect from BPF */
+       if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
+               sk = udp4_lookup_run_bpf(net, udptable, skb,
+                                        saddr, sport, daddr, hnum);
+               if (sk) {
+                       result = sk;
+                       goto done;
+               }
        }
+
+       /* Got non-wildcard socket or error on first lookup */
+       if (result)
+               goto done;
+
+       /* Lookup wildcard sockets */
+       hash2 = ipv4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
+       slot2 = hash2 & udptable->mask;
+       hslot2 = &udptable->hash2[slot2];
+
+       result = udp4_lib_lookup2(net, saddr, sport,
+                                 htonl(INADDR_ANY), hnum, dif, sdif,
+                                 hslot2, skb);
+done:
        if (IS_ERR(result))
                return NULL;
        return result;
@@ -2532,7 +2588,7 @@ void udp_destroy_sock(struct sock *sk)
  *     Socket option code for UDP
  */
 int udp_lib_setsockopt(struct sock *sk, int level, int optname,
-                      char __user *optval, unsigned int optlen,
+                      sockptr_t optval, unsigned int optlen,
                       int (*push_pending_frames)(struct sock *))
 {
        struct udp_sock *up = udp_sk(sk);
@@ -2543,7 +2599,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
        if (optlen < sizeof(int))
                return -EINVAL;
 
-       if (get_user(val, (int __user *)optval))
+       if (copy_from_sockptr(&val, optval, sizeof(val)))
                return -EFAULT;
 
        valbool = val ? 1 : 0;
@@ -2647,26 +2703,16 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
 }
 EXPORT_SYMBOL(udp_lib_setsockopt);
 
-int udp_setsockopt(struct sock *sk, int level, int optname,
-                  char __user *optval, unsigned int optlen)
+int udp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+                  unsigned int optlen)
 {
        if (level == SOL_UDP  ||  level == SOL_UDPLITE)
-               return udp_lib_setsockopt(sk, level, optname, optval, optlen,
+               return udp_lib_setsockopt(sk, level, optname,
+                                         optval, optlen,
                                          udp_push_pending_frames);
        return ip_setsockopt(sk, level, optname, optval, optlen);
 }
 
-#ifdef CONFIG_COMPAT
-int compat_udp_setsockopt(struct sock *sk, int level, int optname,
-                         char __user *optval, unsigned int optlen)
-{
-       if (level == SOL_UDP  ||  level == SOL_UDPLITE)
-               return udp_lib_setsockopt(sk, level, optname, optval, optlen,
-                                         udp_push_pending_frames);
-       return compat_ip_setsockopt(sk, level, optname, optval, optlen);
-}
-#endif
-
 int udp_lib_getsockopt(struct sock *sk, int level, int optname,
                       char __user *optval, int __user *optlen)
 {
@@ -2732,20 +2778,11 @@ int udp_getsockopt(struct sock *sk, int level, int optname,
        return ip_getsockopt(sk, level, optname, optval, optlen);
 }
 
-#ifdef CONFIG_COMPAT
-int compat_udp_getsockopt(struct sock *sk, int level, int optname,
-                                char __user *optval, int __user *optlen)
-{
-       if (level == SOL_UDP  ||  level == SOL_UDPLITE)
-               return udp_lib_getsockopt(sk, level, optname, optval, optlen);
-       return compat_ip_getsockopt(sk, level, optname, optval, optlen);
-}
-#endif
 /**
  *     udp_poll - wait for a UDP event.
- *     @file - file struct
- *     @sock - socket
- *     @wait - poll table
+ *     @file: - file struct
+ *     @sock: - socket
+ *     @wait: - poll table
  *
  *     This is same as datagram poll, except for the special case of
  *     blocking sockets. If application is using a blocking fd
@@ -2812,10 +2849,6 @@ struct proto udp_prot = {
        .sysctl_rmem_offset     = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
        .obj_size               = sizeof(struct udp_sock),
        .h.udp_table            = &udp_table,
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt      = compat_udp_setsockopt,
-       .compat_getsockopt      = compat_udp_getsockopt,
-#endif
        .diag_destroy           = udp_abort,
 };
 EXPORT_SYMBOL(udp_prot);
@@ -2826,10 +2859,15 @@ EXPORT_SYMBOL(udp_prot);
 static struct sock *udp_get_first(struct seq_file *seq, int start)
 {
        struct sock *sk;
-       struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
+       struct udp_seq_afinfo *afinfo;
        struct udp_iter_state *state = seq->private;
        struct net *net = seq_file_net(seq);
 
+       if (state->bpf_seq_afinfo)
+               afinfo = state->bpf_seq_afinfo;
+       else
+               afinfo = PDE_DATA(file_inode(seq->file));
+
        for (state->bucket = start; state->bucket <= afinfo->udp_table->mask;
             ++state->bucket) {
                struct udp_hslot *hslot = &afinfo->udp_table->hash[state->bucket];
@@ -2841,7 +2879,8 @@ static struct sock *udp_get_first(struct seq_file *seq, int start)
                sk_for_each(sk, &hslot->head) {
                        if (!net_eq(sock_net(sk), net))
                                continue;
-                       if (sk->sk_family == afinfo->family)
+                       if (afinfo->family == AF_UNSPEC ||
+                           sk->sk_family == afinfo->family)
                                goto found;
                }
                spin_unlock_bh(&hslot->lock);
@@ -2853,13 +2892,20 @@ found:
 
 static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
 {
-       struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
+       struct udp_seq_afinfo *afinfo;
        struct udp_iter_state *state = seq->private;
        struct net *net = seq_file_net(seq);
 
+       if (state->bpf_seq_afinfo)
+               afinfo = state->bpf_seq_afinfo;
+       else
+               afinfo = PDE_DATA(file_inode(seq->file));
+
        do {
                sk = sk_next(sk);
-       } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != afinfo->family));
+       } while (sk && (!net_eq(sock_net(sk), net) ||
+                       (afinfo->family != AF_UNSPEC &&
+                        sk->sk_family != afinfo->family)));
 
        if (!sk) {
                if (state->bucket <= afinfo->udp_table->mask)
@@ -2904,9 +2950,14 @@ EXPORT_SYMBOL(udp_seq_next);
 
 void udp_seq_stop(struct seq_file *seq, void *v)
 {
-       struct udp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
+       struct udp_seq_afinfo *afinfo;
        struct udp_iter_state *state = seq->private;
 
+       if (state->bpf_seq_afinfo)
+               afinfo = state->bpf_seq_afinfo;
+       else
+               afinfo = PDE_DATA(file_inode(seq->file));
+
        if (state->bucket <= afinfo->udp_table->mask)
                spin_unlock_bh(&afinfo->udp_table->hash[state->bucket].lock);
 }
@@ -2950,6 +3001,67 @@ int udp4_seq_show(struct seq_file *seq, void *v)
        return 0;
 }
 
+#ifdef CONFIG_BPF_SYSCALL
+struct bpf_iter__udp {
+       __bpf_md_ptr(struct bpf_iter_meta *, meta);
+       __bpf_md_ptr(struct udp_sock *, udp_sk);
+       uid_t uid __aligned(8);
+       int bucket __aligned(8);
+};
+
+static int udp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
+                            struct udp_sock *udp_sk, uid_t uid, int bucket)
+{
+       struct bpf_iter__udp ctx;
+
+       meta->seq_num--;  /* skip SEQ_START_TOKEN */
+       ctx.meta = meta;
+       ctx.udp_sk = udp_sk;
+       ctx.uid = uid;
+       ctx.bucket = bucket;
+       return bpf_iter_run_prog(prog, &ctx);
+}
+
+static int bpf_iter_udp_seq_show(struct seq_file *seq, void *v)
+{
+       struct udp_iter_state *state = seq->private;
+       struct bpf_iter_meta meta;
+       struct bpf_prog *prog;
+       struct sock *sk = v;
+       uid_t uid;
+
+       if (v == SEQ_START_TOKEN)
+               return 0;
+
+       uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
+       meta.seq = seq;
+       prog = bpf_iter_get_info(&meta, false);
+       return udp_prog_seq_show(prog, &meta, v, uid, state->bucket);
+}
+
+static void bpf_iter_udp_seq_stop(struct seq_file *seq, void *v)
+{
+       struct bpf_iter_meta meta;
+       struct bpf_prog *prog;
+
+       if (!v) {
+               meta.seq = seq;
+               prog = bpf_iter_get_info(&meta, true);
+               if (prog)
+                       (void)udp_prog_seq_show(prog, &meta, v, 0, 0);
+       }
+
+       udp_seq_stop(seq, v);
+}
+
+static const struct seq_operations bpf_iter_udp_seq_ops = {
+       .start          = udp_seq_start,
+       .next           = udp_seq_next,
+       .stop           = bpf_iter_udp_seq_stop,
+       .show           = bpf_iter_udp_seq_show,
+};
+#endif
+
 const struct seq_operations udp_seq_ops = {
        .start          = udp_seq_start,
        .next           = udp_seq_next,
@@ -3067,6 +3179,58 @@ static struct pernet_operations __net_initdata udp_sysctl_ops = {
        .init   = udp_sysctl_init,
 };
 
+#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
+DEFINE_BPF_ITER_FUNC(udp, struct bpf_iter_meta *meta,
+                    struct udp_sock *udp_sk, uid_t uid, int bucket)
+
+static int bpf_iter_init_udp(void *priv_data)
+{
+       struct udp_iter_state *st = priv_data;
+       struct udp_seq_afinfo *afinfo;
+       int ret;
+
+       afinfo = kmalloc(sizeof(*afinfo), GFP_USER | __GFP_NOWARN);
+       if (!afinfo)
+               return -ENOMEM;
+
+       afinfo->family = AF_UNSPEC;
+       afinfo->udp_table = &udp_table;
+       st->bpf_seq_afinfo = afinfo;
+       ret = bpf_iter_init_seq_net(priv_data);
+       if (ret)
+               kfree(afinfo);
+       return ret;
+}
+
+static void bpf_iter_fini_udp(void *priv_data)
+{
+       struct udp_iter_state *st = priv_data;
+
+       kfree(st->bpf_seq_afinfo);
+       bpf_iter_fini_seq_net(priv_data);
+}
+
+static struct bpf_iter_reg udp_reg_info = {
+       .target                 = "udp",
+       .seq_ops                = &bpf_iter_udp_seq_ops,
+       .init_seq_private       = bpf_iter_init_udp,
+       .fini_seq_private       = bpf_iter_fini_udp,
+       .seq_priv_size          = sizeof(struct udp_iter_state),
+       .ctx_arg_info_size      = 1,
+       .ctx_arg_info           = {
+               { offsetof(struct bpf_iter__udp, udp_sk),
+                 PTR_TO_BTF_ID_OR_NULL },
+       },
+};
+
+static void __init bpf_iter_register(void)
+{
+       udp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UDP];
+       if (bpf_iter_reg_target(&udp_reg_info))
+               pr_warn("Warning: could not register bpf iterator udp\n");
+}
+#endif
+
 void __init udp_init(void)
 {
        unsigned long limit;
@@ -3092,4 +3256,8 @@ void __init udp_init(void)
 
        if (register_pernet_subsys(&udp_sysctl_ops))
                panic("UDP: failed to init sysctl parameters.\n");
+
+#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
+       bpf_iter_register();
+#endif
 }
index 6b2fa77..2878d82 100644 (file)
@@ -12,17 +12,11 @@ int __udp4_lib_err(struct sk_buff *, u32, struct udp_table *);
 int udp_v4_get_port(struct sock *sk, unsigned short snum);
 void udp_v4_rehash(struct sock *sk);
 
-int udp_setsockopt(struct sock *sk, int level, int optname,
-                  char __user *optval, unsigned int optlen);
+int udp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+                  unsigned int optlen);
 int udp_getsockopt(struct sock *sk, int level, int optname,
                   char __user *optval, int __user *optlen);
 
-#ifdef CONFIG_COMPAT
-int compat_udp_setsockopt(struct sock *sk, int level, int optname,
-                         char __user *optval, unsigned int optlen);
-int compat_udp_getsockopt(struct sock *sk, int level, int optname,
-                         char __user *optval, int __user *optlen);
-#endif
 int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
                int flags, int *addr_len);
 int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c
new file mode 100644 (file)
index 0000000..f0dbd99
--- /dev/null
@@ -0,0 +1,890 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2020 Facebook Inc.
+
+#include <linux/ethtool_netlink.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <net/udp_tunnel.h>
+
+enum udp_tunnel_nic_table_entry_flags {
+       UDP_TUNNEL_NIC_ENTRY_ADD        = BIT(0),
+       UDP_TUNNEL_NIC_ENTRY_DEL        = BIT(1),
+       UDP_TUNNEL_NIC_ENTRY_OP_FAIL    = BIT(2),
+       UDP_TUNNEL_NIC_ENTRY_FROZEN     = BIT(3),
+};
+
+struct udp_tunnel_nic_table_entry {
+       __be16 port;
+       u8 type;
+       u8 use_cnt;
+       u8 flags;
+       u8 hw_priv;
+};
+
+/**
+ * struct udp_tunnel_nic - UDP tunnel port offload state
+ * @work:      async work for talking to hardware from process context
+ * @dev:       netdev pointer
+ * @need_sync: at least one port start changed
+ * @need_replay: space was freed, we need a replay of all ports
+ * @work_pending: @work is currently scheduled
+ * @n_tables:  number of tables under @entries
+ * @missed:    bitmap of tables which overflown
+ * @entries:   table of tables of ports currently offloaded
+ */
+struct udp_tunnel_nic {
+       struct work_struct work;
+
+       struct net_device *dev;
+
+       u8 need_sync:1;
+       u8 need_replay:1;
+       u8 work_pending:1;
+
+       unsigned int n_tables;
+       unsigned long missed;
+       struct udp_tunnel_nic_table_entry **entries;
+};
+
+/* We ensure all work structs are done using driver state, but not the code.
+ * We need a workqueue we can flush before module gets removed.
+ */
+static struct workqueue_struct *udp_tunnel_nic_workqueue;
+
+static const char *udp_tunnel_nic_tunnel_type_name(unsigned int type)
+{
+       switch (type) {
+       case UDP_TUNNEL_TYPE_VXLAN:
+               return "vxlan";
+       case UDP_TUNNEL_TYPE_GENEVE:
+               return "geneve";
+       case UDP_TUNNEL_TYPE_VXLAN_GPE:
+               return "vxlan-gpe";
+       default:
+               return "unknown";
+       }
+}
+
+static bool
+udp_tunnel_nic_entry_is_free(struct udp_tunnel_nic_table_entry *entry)
+{
+       return entry->use_cnt == 0 && !entry->flags;
+}
+
+static bool
+udp_tunnel_nic_entry_is_present(struct udp_tunnel_nic_table_entry *entry)
+{
+       return entry->use_cnt && !(entry->flags & ~UDP_TUNNEL_NIC_ENTRY_FROZEN);
+}
+
+static bool
+udp_tunnel_nic_entry_is_frozen(struct udp_tunnel_nic_table_entry *entry)
+{
+       return entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN;
+}
+
+static void
+udp_tunnel_nic_entry_freeze_used(struct udp_tunnel_nic_table_entry *entry)
+{
+       if (!udp_tunnel_nic_entry_is_free(entry))
+               entry->flags |= UDP_TUNNEL_NIC_ENTRY_FROZEN;
+}
+
+static void
+udp_tunnel_nic_entry_unfreeze(struct udp_tunnel_nic_table_entry *entry)
+{
+       entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_FROZEN;
+}
+
+static bool
+udp_tunnel_nic_entry_is_queued(struct udp_tunnel_nic_table_entry *entry)
+{
+       return entry->flags & (UDP_TUNNEL_NIC_ENTRY_ADD |
+                              UDP_TUNNEL_NIC_ENTRY_DEL);
+}
+
+static void
+udp_tunnel_nic_entry_queue(struct udp_tunnel_nic *utn,
+                          struct udp_tunnel_nic_table_entry *entry,
+                          unsigned int flag)
+{
+       entry->flags |= flag;
+       utn->need_sync = 1;
+}
+
+static void
+udp_tunnel_nic_ti_from_entry(struct udp_tunnel_nic_table_entry *entry,
+                            struct udp_tunnel_info *ti)
+{
+       memset(ti, 0, sizeof(*ti));
+       ti->port = entry->port;
+       ti->type = entry->type;
+       ti->hw_priv = entry->hw_priv;
+}
+
+static bool
+udp_tunnel_nic_is_empty(struct net_device *dev, struct udp_tunnel_nic *utn)
+{
+       const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
+       unsigned int i, j;
+
+       for (i = 0; i < utn->n_tables; i++)
+               for (j = 0; j < info->tables[i].n_entries; j++)
+                       if (!udp_tunnel_nic_entry_is_free(&utn->entries[i][j]))
+                               return false;
+       return true;
+}
+
+static bool
+udp_tunnel_nic_should_replay(struct net_device *dev, struct udp_tunnel_nic *utn)
+{
+       const struct udp_tunnel_nic_table_info *table;
+       unsigned int i, j;
+
+       if (!utn->missed)
+               return false;
+
+       for (i = 0; i < utn->n_tables; i++) {
+               table = &dev->udp_tunnel_nic_info->tables[i];
+               if (!test_bit(i, &utn->missed))
+                       continue;
+
+               for (j = 0; j < table->n_entries; j++)
+                       if (udp_tunnel_nic_entry_is_free(&utn->entries[i][j]))
+                               return true;
+       }
+
+       return false;
+}
+
+static void
+__udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table,
+                         unsigned int idx, struct udp_tunnel_info *ti)
+{
+       struct udp_tunnel_nic_table_entry *entry;
+       struct udp_tunnel_nic *utn;
+
+       utn = dev->udp_tunnel_nic;
+       entry = &utn->entries[table][idx];
+
+       if (entry->use_cnt)
+               udp_tunnel_nic_ti_from_entry(entry, ti);
+}
+
+static void
+__udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table,
+                              unsigned int idx, u8 priv)
+{
+       dev->udp_tunnel_nic->entries[table][idx].hw_priv = priv;
+}
+
+static void
+udp_tunnel_nic_entry_update_done(struct udp_tunnel_nic_table_entry *entry,
+                                int err)
+{
+       bool dodgy = entry->flags & UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
+
+       WARN_ON_ONCE(entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD &&
+                    entry->flags & UDP_TUNNEL_NIC_ENTRY_DEL);
+
+       if (entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD &&
+           (!err || (err == -EEXIST && dodgy)))
+               entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_ADD;
+
+       if (entry->flags & UDP_TUNNEL_NIC_ENTRY_DEL &&
+           (!err || (err == -ENOENT && dodgy)))
+               entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_DEL;
+
+       if (!err)
+               entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
+       else
+               entry->flags |= UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
+}
+
+static void
+udp_tunnel_nic_device_sync_one(struct net_device *dev,
+                              struct udp_tunnel_nic *utn,
+                              unsigned int table, unsigned int idx)
+{
+       struct udp_tunnel_nic_table_entry *entry;
+       struct udp_tunnel_info ti;
+       int err;
+
+       entry = &utn->entries[table][idx];
+       if (!udp_tunnel_nic_entry_is_queued(entry))
+               return;
+
+       udp_tunnel_nic_ti_from_entry(entry, &ti);
+       if (entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD)
+               err = dev->udp_tunnel_nic_info->set_port(dev, table, idx, &ti);
+       else
+               err = dev->udp_tunnel_nic_info->unset_port(dev, table, idx,
+                                                          &ti);
+       udp_tunnel_nic_entry_update_done(entry, err);
+
+       if (err)
+               netdev_warn(dev,
+                           "UDP tunnel port sync failed port %d type %s: %d\n",
+                           be16_to_cpu(entry->port),
+                           udp_tunnel_nic_tunnel_type_name(entry->type),
+                           err);
+}
+
+static void
+udp_tunnel_nic_device_sync_by_port(struct net_device *dev,
+                                  struct udp_tunnel_nic *utn)
+{
+       const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
+       unsigned int i, j;
+
+       for (i = 0; i < utn->n_tables; i++)
+               for (j = 0; j < info->tables[i].n_entries; j++)
+                       udp_tunnel_nic_device_sync_one(dev, utn, i, j);
+}
+
+static void
+udp_tunnel_nic_device_sync_by_table(struct net_device *dev,
+                                   struct udp_tunnel_nic *utn)
+{
+       const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
+       unsigned int i, j;
+       int err;
+
+       for (i = 0; i < utn->n_tables; i++) {
+               /* Find something that needs sync in this table */
+               for (j = 0; j < info->tables[i].n_entries; j++)
+                       if (udp_tunnel_nic_entry_is_queued(&utn->entries[i][j]))
+                               break;
+               if (j == info->tables[i].n_entries)
+                       continue;
+
+               err = info->sync_table(dev, i);
+               if (err)
+                       netdev_warn(dev, "UDP tunnel port sync failed for table %d: %d\n",
+                                   i, err);
+
+               for (j = 0; j < info->tables[i].n_entries; j++) {
+                       struct udp_tunnel_nic_table_entry *entry;
+
+                       entry = &utn->entries[i][j];
+                       if (udp_tunnel_nic_entry_is_queued(entry))
+                               udp_tunnel_nic_entry_update_done(entry, err);
+               }
+       }
+}
+
+static void
+__udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn)
+{
+       if (!utn->need_sync)
+               return;
+
+       if (dev->udp_tunnel_nic_info->sync_table)
+               udp_tunnel_nic_device_sync_by_table(dev, utn);
+       else
+               udp_tunnel_nic_device_sync_by_port(dev, utn);
+
+       utn->need_sync = 0;
+       /* Can't replay directly here, in case we come from the tunnel driver's
+        * notification - trying to replay may deadlock inside tunnel driver.
+        */
+       utn->need_replay = udp_tunnel_nic_should_replay(dev, utn);
+}
+
+static void
+udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn)
+{
+       const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
+       bool may_sleep;
+
+       if (!utn->need_sync)
+               return;
+
+       /* Drivers which sleep in the callback need to update from
+        * the workqueue, if we come from the tunnel driver's notification.
+        */
+       may_sleep = info->flags & UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
+       if (!may_sleep)
+               __udp_tunnel_nic_device_sync(dev, utn);
+       if (may_sleep || utn->need_replay) {
+               queue_work(udp_tunnel_nic_workqueue, &utn->work);
+               utn->work_pending = 1;
+       }
+}
+
+static bool
+udp_tunnel_nic_table_is_capable(const struct udp_tunnel_nic_table_info *table,
+                               struct udp_tunnel_info *ti)
+{
+       return table->tunnel_types & ti->type;
+}
+
+static bool
+udp_tunnel_nic_is_capable(struct net_device *dev, struct udp_tunnel_nic *utn,
+                         struct udp_tunnel_info *ti)
+{
+       const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
+       unsigned int i;
+
+       /* Special case IPv4-only NICs */
+       if (info->flags & UDP_TUNNEL_NIC_INFO_IPV4_ONLY &&
+           ti->sa_family != AF_INET)
+               return false;
+
+       for (i = 0; i < utn->n_tables; i++)
+               if (udp_tunnel_nic_table_is_capable(&info->tables[i], ti))
+                       return true;
+       return false;
+}
+
+static int
+udp_tunnel_nic_has_collision(struct net_device *dev, struct udp_tunnel_nic *utn,
+                            struct udp_tunnel_info *ti)
+{
+       const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
+       struct udp_tunnel_nic_table_entry *entry;
+       unsigned int i, j;
+
+       for (i = 0; i < utn->n_tables; i++)
+               for (j = 0; j < info->tables[i].n_entries; j++) {
+                       entry = &utn->entries[i][j];
+
+                       if (!udp_tunnel_nic_entry_is_free(entry) &&
+                           entry->port == ti->port &&
+                           entry->type != ti->type) {
+                               __set_bit(i, &utn->missed);
+                               return true;
+                       }
+               }
+       return false;
+}
+
+static void
+udp_tunnel_nic_entry_adj(struct udp_tunnel_nic *utn,
+                        unsigned int table, unsigned int idx, int use_cnt_adj)
+{
+       struct udp_tunnel_nic_table_entry *entry =  &utn->entries[table][idx];
+       bool dodgy = entry->flags & UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
+       unsigned int from, to;
+
+       /* If not going from used to unused or vice versa - all done.
+        * For dodgy entries make sure we try to sync again (queue the entry).
+        */
+       entry->use_cnt += use_cnt_adj;
+       if (!dodgy && !entry->use_cnt == !(entry->use_cnt - use_cnt_adj))
+               return;
+
+       /* Cancel the op before it was sent to the device, if possible,
+        * otherwise we'd need to take special care to issue commands
+        * in the same order the ports arrived.
+        */
+       if (use_cnt_adj < 0) {
+               from = UDP_TUNNEL_NIC_ENTRY_ADD;
+               to = UDP_TUNNEL_NIC_ENTRY_DEL;
+       } else {
+               from = UDP_TUNNEL_NIC_ENTRY_DEL;
+               to = UDP_TUNNEL_NIC_ENTRY_ADD;
+       }
+
+       if (entry->flags & from) {
+               entry->flags &= ~from;
+               if (!dodgy)
+                       return;
+       }
+
+       udp_tunnel_nic_entry_queue(utn, entry, to);
+}
+
+static bool
+udp_tunnel_nic_entry_try_adj(struct udp_tunnel_nic *utn,
+                            unsigned int table, unsigned int idx,
+                            struct udp_tunnel_info *ti, int use_cnt_adj)
+{
+       struct udp_tunnel_nic_table_entry *entry =  &utn->entries[table][idx];
+
+       if (udp_tunnel_nic_entry_is_free(entry) ||
+           entry->port != ti->port ||
+           entry->type != ti->type)
+               return false;
+
+       if (udp_tunnel_nic_entry_is_frozen(entry))
+               return true;
+
+       udp_tunnel_nic_entry_adj(utn, table, idx, use_cnt_adj);
+       return true;
+}
+
+/* Try to find existing matching entry and adjust its use count, instead of
+ * adding a new one. Returns true if entry was found. In case of delete the
+ * entry may have gotten removed in the process, in which case it will be
+ * queued for removal.
+ */
+static bool
+udp_tunnel_nic_try_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
+                           struct udp_tunnel_info *ti, int use_cnt_adj)
+{
+       const struct udp_tunnel_nic_table_info *table;
+       unsigned int i, j;
+
+       for (i = 0; i < utn->n_tables; i++) {
+               table = &dev->udp_tunnel_nic_info->tables[i];
+               if (!udp_tunnel_nic_table_is_capable(table, ti))
+                       continue;
+
+               for (j = 0; j < table->n_entries; j++)
+                       if (udp_tunnel_nic_entry_try_adj(utn, i, j, ti,
+                                                        use_cnt_adj))
+                               return true;
+       }
+
+       return false;
+}
+
+static bool
+udp_tunnel_nic_add_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
+                           struct udp_tunnel_info *ti)
+{
+       return udp_tunnel_nic_try_existing(dev, utn, ti, +1);
+}
+
+static bool
+udp_tunnel_nic_del_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
+                           struct udp_tunnel_info *ti)
+{
+       return udp_tunnel_nic_try_existing(dev, utn, ti, -1);
+}
+
+static bool
+udp_tunnel_nic_add_new(struct net_device *dev, struct udp_tunnel_nic *utn,
+                      struct udp_tunnel_info *ti)
+{
+       const struct udp_tunnel_nic_table_info *table;
+       unsigned int i, j;
+
+       for (i = 0; i < utn->n_tables; i++) {
+               table = &dev->udp_tunnel_nic_info->tables[i];
+               if (!udp_tunnel_nic_table_is_capable(table, ti))
+                       continue;
+
+               for (j = 0; j < table->n_entries; j++) {
+                       struct udp_tunnel_nic_table_entry *entry;
+
+                       entry = &utn->entries[i][j];
+                       if (!udp_tunnel_nic_entry_is_free(entry))
+                               continue;
+
+                       entry->port = ti->port;
+                       entry->type = ti->type;
+                       entry->use_cnt = 1;
+                       udp_tunnel_nic_entry_queue(utn, entry,
+                                                  UDP_TUNNEL_NIC_ENTRY_ADD);
+                       return true;
+               }
+
+               /* The different table may still fit this port in, but there
+                * are no devices currently which have multiple tables accepting
+                * the same tunnel type, and false positives are okay.
+                */
+               __set_bit(i, &utn->missed);
+       }
+
+       return false;
+}
+
+static void
+__udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti)
+{
+       const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
+       struct udp_tunnel_nic *utn;
+
+       utn = dev->udp_tunnel_nic;
+       if (!utn)
+               return;
+       if (!netif_running(dev) && info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY)
+               return;
+
+       if (!udp_tunnel_nic_is_capable(dev, utn, ti))
+               return;
+
+       /* It may happen that a tunnel of one type is removed and different
+        * tunnel type tries to reuse its port before the device was informed.
+        * Rely on utn->missed to re-add this port later.
+        */
+       if (udp_tunnel_nic_has_collision(dev, utn, ti))
+               return;
+
+       if (!udp_tunnel_nic_add_existing(dev, utn, ti))
+               udp_tunnel_nic_add_new(dev, utn, ti);
+
+       udp_tunnel_nic_device_sync(dev, utn);
+}
+
+static void
+__udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti)
+{
+       struct udp_tunnel_nic *utn;
+
+       utn = dev->udp_tunnel_nic;
+       if (!utn)
+               return;
+
+       if (!udp_tunnel_nic_is_capable(dev, utn, ti))
+               return;
+
+       udp_tunnel_nic_del_existing(dev, utn, ti);
+
+       udp_tunnel_nic_device_sync(dev, utn);
+}
+
+static void __udp_tunnel_nic_reset_ntf(struct net_device *dev)
+{
+       const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
+       struct udp_tunnel_nic *utn;
+       unsigned int i, j;
+
+       ASSERT_RTNL();
+
+       utn = dev->udp_tunnel_nic;
+       if (!utn)
+               return;
+
+       utn->need_sync = false;
+       for (i = 0; i < utn->n_tables; i++)
+               for (j = 0; j < info->tables[i].n_entries; j++) {
+                       struct udp_tunnel_nic_table_entry *entry;
+
+                       entry = &utn->entries[i][j];
+
+                       entry->flags &= ~(UDP_TUNNEL_NIC_ENTRY_DEL |
+                                         UDP_TUNNEL_NIC_ENTRY_OP_FAIL);
+                       /* We don't release rtnl across ops */
+                       WARN_ON(entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN);
+                       if (!entry->use_cnt)
+                               continue;
+
+                       udp_tunnel_nic_entry_queue(utn, entry,
+                                                  UDP_TUNNEL_NIC_ENTRY_ADD);
+               }
+
+       __udp_tunnel_nic_device_sync(dev, utn);
+}
+
+static size_t
+__udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table)
+{
+       const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
+       struct udp_tunnel_nic *utn;
+       unsigned int j;
+       size_t size;
+
+       utn = dev->udp_tunnel_nic;
+       if (!utn)
+               return 0;
+
+       size = 0;
+       for (j = 0; j < info->tables[table].n_entries; j++) {
+               if (!udp_tunnel_nic_entry_is_present(&utn->entries[table][j]))
+                       continue;
+
+               size += nla_total_size(0) +              /* _TABLE_ENTRY */
+                       nla_total_size(sizeof(__be16)) + /* _ENTRY_PORT */
+                       nla_total_size(sizeof(u32));     /* _ENTRY_TYPE */
+       }
+
+       return size;
+}
+
+static int
+__udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table,
+                           struct sk_buff *skb)
+{
+       const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
+       struct udp_tunnel_nic *utn;
+       struct nlattr *nest;
+       unsigned int j;
+
+       utn = dev->udp_tunnel_nic;
+       if (!utn)
+               return 0;
+
+       for (j = 0; j < info->tables[table].n_entries; j++) {
+               if (!udp_tunnel_nic_entry_is_present(&utn->entries[table][j]))
+                       continue;
+
+               nest = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY);
+
+               if (nla_put_be16(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT,
+                                utn->entries[table][j].port) ||
+                   nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE,
+                               ilog2(utn->entries[table][j].type)))
+                       goto err_cancel;
+
+               nla_nest_end(skb, nest);
+       }
+
+       return 0;
+
+err_cancel:
+       nla_nest_cancel(skb, nest);
+       return -EMSGSIZE;
+}
+
+static const struct udp_tunnel_nic_ops __udp_tunnel_nic_ops = {
+       .get_port       = __udp_tunnel_nic_get_port,
+       .set_port_priv  = __udp_tunnel_nic_set_port_priv,
+       .add_port       = __udp_tunnel_nic_add_port,
+       .del_port       = __udp_tunnel_nic_del_port,
+       .reset_ntf      = __udp_tunnel_nic_reset_ntf,
+       .dump_size      = __udp_tunnel_nic_dump_size,
+       .dump_write     = __udp_tunnel_nic_dump_write,
+};
+
+static void
+udp_tunnel_nic_flush(struct net_device *dev, struct udp_tunnel_nic *utn)
+{
+       const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
+       unsigned int i, j;
+
+       for (i = 0; i < utn->n_tables; i++)
+               for (j = 0; j < info->tables[i].n_entries; j++) {
+                       int adj_cnt = -utn->entries[i][j].use_cnt;
+
+                       if (adj_cnt)
+                               udp_tunnel_nic_entry_adj(utn, i, j, adj_cnt);
+               }
+
+       __udp_tunnel_nic_device_sync(dev, utn);
+
+       for (i = 0; i < utn->n_tables; i++)
+               memset(utn->entries[i], 0, array_size(info->tables[i].n_entries,
+                                                     sizeof(**utn->entries)));
+       WARN_ON(utn->need_sync);
+       utn->need_replay = 0;
+}
+
+static void
+udp_tunnel_nic_replay(struct net_device *dev, struct udp_tunnel_nic *utn)
+{
+       const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
+       unsigned int i, j;
+
+       /* Freeze all the ports we are already tracking so that the replay
+        * does not double up the refcount.
+        */
+       for (i = 0; i < utn->n_tables; i++)
+               for (j = 0; j < info->tables[i].n_entries; j++)
+                       udp_tunnel_nic_entry_freeze_used(&utn->entries[i][j]);
+       utn->missed = 0;
+       utn->need_replay = 0;
+
+       udp_tunnel_get_rx_info(dev);
+
+       for (i = 0; i < utn->n_tables; i++)
+               for (j = 0; j < info->tables[i].n_entries; j++)
+                       udp_tunnel_nic_entry_unfreeze(&utn->entries[i][j]);
+}
+
+static void udp_tunnel_nic_device_sync_work(struct work_struct *work)
+{
+       struct udp_tunnel_nic *utn =
+               container_of(work, struct udp_tunnel_nic, work);
+
+       rtnl_lock();
+       utn->work_pending = 0;
+       __udp_tunnel_nic_device_sync(utn->dev, utn);
+
+       if (utn->need_replay)
+               udp_tunnel_nic_replay(utn->dev, utn);
+       rtnl_unlock();
+}
+
+static struct udp_tunnel_nic *
+udp_tunnel_nic_alloc(const struct udp_tunnel_nic_info *info,
+                    unsigned int n_tables)
+{
+       struct udp_tunnel_nic *utn;
+       unsigned int i;
+
+       utn = kzalloc(sizeof(*utn), GFP_KERNEL);
+       if (!utn)
+               return NULL;
+       utn->n_tables = n_tables;
+       INIT_WORK(&utn->work, udp_tunnel_nic_device_sync_work);
+
+       utn->entries = kmalloc_array(n_tables, sizeof(void *), GFP_KERNEL);
+       if (!utn->entries)
+               goto err_free_utn;
+
+       for (i = 0; i < n_tables; i++) {
+               utn->entries[i] = kcalloc(info->tables[i].n_entries,
+                                         sizeof(*utn->entries[i]), GFP_KERNEL);
+               if (!utn->entries[i])
+                       goto err_free_prev_entries;
+       }
+
+       return utn;
+
+err_free_prev_entries:
+       while (i--)
+               kfree(utn->entries[i]);
+       kfree(utn->entries);
+err_free_utn:
+       kfree(utn);
+       return NULL;
+}
+
+static int udp_tunnel_nic_register(struct net_device *dev)
+{
+       const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
+       struct udp_tunnel_nic *utn;
+       unsigned int n_tables, i;
+
+       BUILD_BUG_ON(sizeof(utn->missed) * BITS_PER_BYTE <
+                    UDP_TUNNEL_NIC_MAX_TABLES);
+
+       if (WARN_ON(!info->set_port != !info->unset_port) ||
+           WARN_ON(!info->set_port == !info->sync_table) ||
+           WARN_ON(!info->tables[0].n_entries))
+               return -EINVAL;
+
+       n_tables = 1;
+       for (i = 1; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
+               if (!info->tables[i].n_entries)
+                       continue;
+
+               n_tables++;
+               if (WARN_ON(!info->tables[i - 1].n_entries))
+                       return -EINVAL;
+       }
+
+       utn = udp_tunnel_nic_alloc(info, n_tables);
+       if (!utn)
+               return -ENOMEM;
+
+       utn->dev = dev;
+       dev_hold(dev);
+       dev->udp_tunnel_nic = utn;
+
+       if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY))
+               udp_tunnel_get_rx_info(dev);
+
+       return 0;
+}
+
+static void
+udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn)
+{
+       unsigned int i;
+
+       /* Flush before we check work, so we don't waste time adding entries
+        * from the work which we will boot immediately.
+        */
+       udp_tunnel_nic_flush(dev, utn);
+
+       /* Wait for the work to be done using the state, netdev core will
+        * retry unregister until we give up our reference on this device.
+        */
+       if (utn->work_pending)
+               return;
+
+       for (i = 0; i < utn->n_tables; i++)
+               kfree(utn->entries[i]);
+       kfree(utn->entries);
+       kfree(utn);
+       dev->udp_tunnel_nic = NULL;
+       dev_put(dev);
+}
+
+static int
+udp_tunnel_nic_netdevice_event(struct notifier_block *unused,
+                              unsigned long event, void *ptr)
+{
+       struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       const struct udp_tunnel_nic_info *info;
+       struct udp_tunnel_nic *utn;
+
+       info = dev->udp_tunnel_nic_info;
+       if (!info)
+               return NOTIFY_DONE;
+
+       if (event == NETDEV_REGISTER) {
+               int err;
+
+               err = udp_tunnel_nic_register(dev);
+               if (err)
+                       netdev_WARN(dev, "failed to register for UDP tunnel offloads: %d", err);
+               return notifier_from_errno(err);
+       }
+       /* All other events will need the udp_tunnel_nic state */
+       utn = dev->udp_tunnel_nic;
+       if (!utn)
+               return NOTIFY_DONE;
+
+       if (event == NETDEV_UNREGISTER) {
+               udp_tunnel_nic_unregister(dev, utn);
+               return NOTIFY_OK;
+       }
+
+       /* All other events only matter if NIC has to be programmed open */
+       if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY))
+               return NOTIFY_DONE;
+
+       if (event == NETDEV_UP) {
+               WARN_ON(!udp_tunnel_nic_is_empty(dev, utn));
+               udp_tunnel_get_rx_info(dev);
+               return NOTIFY_OK;
+       }
+       if (event == NETDEV_GOING_DOWN) {
+               udp_tunnel_nic_flush(dev, utn);
+               return NOTIFY_OK;
+       }
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block udp_tunnel_nic_notifier_block __read_mostly = {
+       .notifier_call = udp_tunnel_nic_netdevice_event,
+};
+
+static int __init udp_tunnel_nic_init_module(void)
+{
+       int err;
+
+       udp_tunnel_nic_workqueue = alloc_workqueue("udp_tunnel_nic", 0, 0);
+       if (!udp_tunnel_nic_workqueue)
+               return -ENOMEM;
+
+       rtnl_lock();
+       udp_tunnel_nic_ops = &__udp_tunnel_nic_ops;
+       rtnl_unlock();
+
+       err = register_netdevice_notifier(&udp_tunnel_nic_notifier_block);
+       if (err)
+               goto err_unset_ops;
+
+       return 0;
+
+err_unset_ops:
+       rtnl_lock();
+       udp_tunnel_nic_ops = NULL;
+       rtnl_unlock();
+       destroy_workqueue(udp_tunnel_nic_workqueue);
+       return err;
+}
+late_initcall(udp_tunnel_nic_init_module);
+
+static void __exit udp_tunnel_nic_cleanup_module(void)
+{
+       unregister_netdevice_notifier(&udp_tunnel_nic_notifier_block);
+
+       rtnl_lock();
+       udp_tunnel_nic_ops = NULL;
+       rtnl_unlock();
+
+       destroy_workqueue(udp_tunnel_nic_workqueue);
+}
+module_exit(udp_tunnel_nic_cleanup_module);
+
+MODULE_LICENSE("GPL");
diff --git a/net/ipv4/udp_tunnel_stub.c b/net/ipv4/udp_tunnel_stub.c
new file mode 100644 (file)
index 0000000..c4b2888
--- /dev/null
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2020 Facebook Inc.
+
+#include <net/udp_tunnel.h>
+
+const struct udp_tunnel_nic_ops *udp_tunnel_nic_ops;
+EXPORT_SYMBOL_GPL(udp_tunnel_nic_ops);
index 5936d66..bd8773b 100644 (file)
@@ -56,10 +56,6 @@ struct proto         udplite_prot = {
        .sysctl_mem        = sysctl_udp_mem,
        .obj_size          = sizeof(struct udp_sock),
        .h.udp_table       = &udplite_table,
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_udp_setsockopt,
-       .compat_getsockopt = compat_udp_getsockopt,
-#endif
 };
 EXPORT_SYMBOL(udplite_prot);
 
index 992cf45..76bff79 100644 (file)
@@ -14,7 +14,7 @@ menuconfig IPV6
          <https://en.wikipedia.org/wiki/IPv6>.
          For specific information about IPv6 under Linux, see
          Documentation/networking/ipv6.rst and read the HOWTO at
-         <http://www.tldp.org/HOWTO/Linux+IPv6-HOWTO/>
+         <https://www.tldp.org/HOWTO/Linux+IPv6-HOWTO/>
 
          To compile this protocol support as a module, choose M here: the
          module will be called ipv6.
@@ -49,29 +49,31 @@ config IPV6_OPTIMISTIC_DAD
 
 config INET6_AH
        tristate "IPv6: AH transformation"
-       select XFRM_ALGO
-       select CRYPTO
-       select CRYPTO_HMAC
-       select CRYPTO_MD5
-       select CRYPTO_SHA1
+       select XFRM_AH
        help
-         Support for IPsec AH.
+         Support for IPsec AH (Authentication Header).
+
+         AH can be used with various authentication algorithms.  Besides
+         enabling AH support itself, this option enables the generic
+         implementations of the algorithms that RFC 8221 lists as MUST be
+         implemented.  If you need any other algorithms, you'll need to enable
+         them in the crypto API.  You should also enable accelerated
+         implementations of any needed algorithms when available.
 
          If unsure, say Y.
 
 config INET6_ESP
        tristate "IPv6: ESP transformation"
-       select XFRM_ALGO
-       select CRYPTO
-       select CRYPTO_AUTHENC
-       select CRYPTO_HMAC
-       select CRYPTO_MD5
-       select CRYPTO_CBC
-       select CRYPTO_SHA1
-       select CRYPTO_DES
-       select CRYPTO_ECHAINIV
+       select XFRM_ESP
        help
-         Support for IPsec ESP.
+         Support for IPsec ESP (Encapsulating Security Payload).
+
+         ESP can be used with various encryption and authentication algorithms.
+         Besides enabling ESP support itself, this option enables the generic
+         implementations of the algorithms that RFC 8221 lists as MUST be
+         implemented.  If you need any other algorithms, you'll need to enable
+         them in the crypto API.  You should also enable accelerated
+         implementations of any needed algorithms when available.
 
          If unsure, say Y.
 
index b304b88..0306509 100644 (file)
@@ -688,8 +688,6 @@ const struct proto_ops inet6_stream_ops = {
        .peek_len          = tcp_peek_len,
 #ifdef CONFIG_COMPAT
        .compat_ioctl      = inet6_compat_ioctl,
-       .compat_setsockopt = compat_sock_common_setsockopt,
-       .compat_getsockopt = compat_sock_common_getsockopt,
 #endif
        .set_rcvlowat      = tcp_set_rcvlowat,
 };
@@ -717,8 +715,6 @@ const struct proto_ops inet6_dgram_ops = {
        .set_peek_off      = sk_set_peek_off,
 #ifdef CONFIG_COMPAT
        .compat_ioctl      = inet6_compat_ioctl,
-       .compat_setsockopt = compat_sock_common_setsockopt,
-       .compat_getsockopt = compat_sock_common_getsockopt,
 #endif
 };
 
index 390bedd..cc8ad7d 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/route.h>
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <linux/icmp.h>
 
 #include <net/ipv6.h>
 #include <net/ndisc.h>
@@ -284,6 +285,17 @@ int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr,
 }
 EXPORT_SYMBOL_GPL(ip6_datagram_connect_v6_only);
 
+static void ipv6_icmp_error_rfc4884(const struct sk_buff *skb,
+                                   struct sock_ee_data_rfc4884 *out)
+{
+       switch (icmp6_hdr(skb)->icmp6_type) {
+       case ICMPV6_TIME_EXCEED:
+       case ICMPV6_DEST_UNREACH:
+               ip_icmp_error_rfc4884(skb, out, sizeof(struct icmp6hdr),
+                                     icmp6_hdr(skb)->icmp6_datagram_len * 8);
+       }
+}
+
 void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
                     __be16 port, u32 info, u8 *payload)
 {
@@ -313,6 +325,10 @@ void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
        serr->port = port;
 
        __skb_pull(skb, payload - skb->data);
+
+       if (inet6_sk(sk)->recverr_rfc4884)
+               ipv6_icmp_error_rfc4884(skb, &serr->ee.ee_rfc4884);
+
        skb_reset_transport_header(skb);
 
        if (sock_queue_err_skb(sk, skb))
index 55addea..1ca516f 100644 (file)
@@ -395,3 +395,4 @@ module_exit(esp6_offload_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
 MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET6, XFRM_PROTO_ESP);
+MODULE_DESCRIPTION("IPV6 GSO/GRO offload support");
index 5a8bbcd..374105e 100644 (file)
@@ -580,7 +580,7 @@ looped_back:
        hdr->segments_left--;
        i = n - hdr->segments_left;
 
-       buf = kzalloc(ipv6_rpl_srh_alloc_size(n + 1) * 2, GFP_ATOMIC);
+       buf = kcalloc(struct_size(hdr, segments.addr, n + 2), 2, GFP_ATOMIC);
        if (unlikely(!buf)) {
                kfree_skb(skb);
                return -1;
@@ -1232,7 +1232,6 @@ static void ipv6_renew_option(int renewtype,
  * @opt: original options
  * @newtype: option type to replace in @opt
  * @newopt: new option of type @newtype to replace (user-mem)
- * @newoptlen: length of @newopt
  *
  * Returns a new set of options which is a copy of @opt with the
  * option type @newtype replaced with @newopt.
index fafe556..6053ef8 100644 (file)
@@ -111,11 +111,13 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
        } else {
                struct rt6_info *rt;
 
-               rt = lookup(net, net->ipv6.fib6_local_tbl, fl6, skb, flags);
+               rt = pol_lookup_func(lookup,
+                            net, net->ipv6.fib6_local_tbl, fl6, skb, flags);
                if (rt != net->ipv6.ip6_null_entry && rt->dst.error != -EAGAIN)
                        return &rt->dst;
                ip6_rt_put_flags(rt, flags);
-               rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, skb, flags);
+               rt = pol_lookup_func(lookup,
+                            net, net->ipv6.fib6_main_tbl, fl6, skb, flags);
                if (rt->dst.error != -EAGAIN)
                        return &rt->dst;
                ip6_rt_put_flags(rt, flags);
@@ -226,7 +228,8 @@ static int __fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
                goto out;
        }
 
-       rt = lookup(net, table, flp6, arg->lookup_data, flags);
+       rt = pol_lookup_func(lookup,
+                            net, table, flp6, arg->lookup_data, flags);
        if (rt != net->ipv6.ip6_null_entry) {
                err = fib6_rule_saddr(net, rule, flags, flp6,
                                      ip6_dst_idev(&rt->dst)->dev);
index 091f941..430518a 100644 (file)
@@ -224,3 +224,4 @@ module_init(fou6_init);
 module_exit(fou6_fini);
 MODULE_AUTHOR("Tom Herbert <therbert@google.com>");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Foo over UDP (IPv6)");
index fc50003..a4e4912 100644 (file)
@@ -439,8 +439,8 @@ static int icmp6_iif(const struct sk_buff *skb)
 /*
  *     Send an ICMP message in response to a packet in error
  */
-static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
-                      const struct in6_addr *force_saddr)
+void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+               const struct in6_addr *force_saddr)
 {
        struct inet6_dev *idev = NULL;
        struct ipv6hdr *hdr = ipv6_hdr(skb);
@@ -566,7 +566,6 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
        fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, NULL);
        security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
 
-       sk->sk_mark = mark;
        np = inet6_sk(sk);
 
        if (!icmpv6_xrlim_allow(sk, type, &fl6))
@@ -583,6 +582,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
                fl6.flowi6_oif = np->ucast_oif;
 
        ipcm6_init_sk(&ipc6, np);
+       ipc6.sockc.mark = mark;
        fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
 
        dst = icmpv6_route_lookup(net, skb, sk, &fl6);
@@ -625,6 +625,7 @@ out:
 out_bh_enable:
        local_bh_enable();
 }
+EXPORT_SYMBOL(icmp6_send);
 
 /* Slightly more convenient version of icmp6_send.
  */
@@ -751,7 +752,6 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        sk = icmpv6_xmit_lock(net);
        if (!sk)
                goto out_bh_enable;
-       sk->sk_mark = mark;
        np = inet6_sk(sk);
 
        if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
@@ -779,6 +779,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
        ipcm6_init_sk(&ipc6, np);
        ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
        ipc6.tclass = ipv6_get_dsfield(ipv6_hdr(skb));
+       ipc6.sockc.mark = mark;
 
        if (ip6_append_data(sk, icmpv6_getfrag, &msg,
                            skb->len + sizeof(struct icmp6hdr),
index 257d2b6..36c58aa 100644 (file)
@@ -120,3 +120,4 @@ module_init(ila_init);
 module_exit(ila_fini);
 MODULE_AUTHOR("Tom Herbert <tom@herbertland.com>");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IPv6: Identifier Locator Addressing (ILA)");
index fbe9d42..2d3add9 100644 (file)
@@ -21,6 +21,8 @@
 #include <net/ip.h>
 #include <net/sock_reuseport.h>
 
+extern struct inet_hashinfo tcp_hashinfo;
+
 u32 inet6_ehashfn(const struct net *net,
                  const struct in6_addr *laddr, const u16 lport,
                  const struct in6_addr *faddr, const __be16 fport)
@@ -111,6 +113,23 @@ static inline int compute_score(struct sock *sk, struct net *net,
        return score;
 }
 
+static inline struct sock *lookup_reuseport(struct net *net, struct sock *sk,
+                                           struct sk_buff *skb, int doff,
+                                           const struct in6_addr *saddr,
+                                           __be16 sport,
+                                           const struct in6_addr *daddr,
+                                           unsigned short hnum)
+{
+       struct sock *reuse_sk = NULL;
+       u32 phash;
+
+       if (sk->sk_reuseport) {
+               phash = inet6_ehashfn(net, daddr, hnum, saddr, sport);
+               reuse_sk = reuseport_select_sock(sk, phash, skb, doff);
+       }
+       return reuse_sk;
+}
+
 /* called with rcu_read_lock() */
 static struct sock *inet6_lhash2_lookup(struct net *net,
                struct inet_listen_hashbucket *ilb2,
@@ -123,21 +142,17 @@ static struct sock *inet6_lhash2_lookup(struct net *net,
        struct inet_connection_sock *icsk;
        struct sock *sk, *result = NULL;
        int score, hiscore = 0;
-       u32 phash = 0;
 
        inet_lhash2_for_each_icsk_rcu(icsk, &ilb2->head) {
                sk = (struct sock *)icsk;
                score = compute_score(sk, net, hnum, daddr, dif, sdif,
                                      exact_dif);
                if (score > hiscore) {
-                       if (sk->sk_reuseport) {
-                               phash = inet6_ehashfn(net, daddr, hnum,
-                                                     saddr, sport);
-                               result = reuseport_select_sock(sk, phash,
-                                                              skb, doff);
-                               if (result)
-                                       return result;
-                       }
+                       result = lookup_reuseport(net, sk, skb, doff,
+                                                 saddr, sport, daddr, hnum);
+                       if (result)
+                               return result;
+
                        result = sk;
                        hiscore = score;
                }
@@ -146,6 +161,31 @@ static struct sock *inet6_lhash2_lookup(struct net *net,
        return result;
 }
 
+static inline struct sock *inet6_lookup_run_bpf(struct net *net,
+                                               struct inet_hashinfo *hashinfo,
+                                               struct sk_buff *skb, int doff,
+                                               const struct in6_addr *saddr,
+                                               const __be16 sport,
+                                               const struct in6_addr *daddr,
+                                               const u16 hnum)
+{
+       struct sock *sk, *reuse_sk;
+       bool no_reuseport;
+
+       if (hashinfo != &tcp_hashinfo)
+               return NULL; /* only TCP is supported */
+
+       no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_TCP,
+                                           saddr, sport, daddr, hnum, &sk);
+       if (no_reuseport || IS_ERR_OR_NULL(sk))
+               return sk;
+
+       reuse_sk = lookup_reuseport(net, sk, skb, doff, saddr, sport, daddr, hnum);
+       if (reuse_sk)
+               sk = reuse_sk;
+       return sk;
+}
+
 struct sock *inet6_lookup_listener(struct net *net,
                struct inet_hashinfo *hashinfo,
                struct sk_buff *skb, int doff,
@@ -157,6 +197,14 @@ struct sock *inet6_lookup_listener(struct net *net,
        struct sock *result = NULL;
        unsigned int hash2;
 
+       /* Lookup redirect from BPF */
+       if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
+               result = inet6_lookup_run_bpf(net, hashinfo, skb, doff,
+                                             saddr, sport, daddr, hnum);
+               if (result)
+                       goto done;
+       }
+
        hash2 = ipv6_portaddr_hash(net, daddr, hnum);
        ilb2 = inet_lhash2_bucket(hashinfo, hash2);
 
index 49ee89b..25a90f3 100644 (file)
@@ -314,7 +314,8 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
 {
        struct rt6_info *rt;
 
-       rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, skb, flags);
+       rt = pol_lookup_func(lookup,
+                       net, net->ipv6.fib6_main_tbl, fl6, skb, flags);
        if (rt->dst.error == -EAGAIN) {
                ip6_rt_put_flags(rt, flags);
                rt = net->ipv6.ip6_null_entry;
index ce4fbba..215b6f5 100644 (file)
@@ -371,7 +371,7 @@ static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned lo
 
 static struct ip6_flowlabel *
 fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
-         char __user *optval, int optlen, int *err_p)
+         sockptr_t optval, int optlen, int *err_p)
 {
        struct ip6_flowlabel *fl = NULL;
        int olen;
@@ -401,7 +401,8 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
                memset(fl->opt, 0, sizeof(*fl->opt));
                fl->opt->tot_len = sizeof(*fl->opt) + olen;
                err = -EFAULT;
-               if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen))
+               sockptr_advance(optval, CMSG_ALIGN(sizeof(*freq)));
+               if (copy_from_sockptr(fl->opt + 1, optval, olen))
                        goto done;
 
                msg.msg_controllen = olen;
@@ -533,187 +534,211 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
        return -ENOENT;
 }
 
-int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
+#define socklist_dereference(__sflp) \
+       rcu_dereference_protected(__sflp, lockdep_is_held(&ip6_sk_fl_lock))
+
+static int ipv6_flowlabel_put(struct sock *sk, struct in6_flowlabel_req *freq)
 {
-       int uninitialized_var(err);
-       struct net *net = sock_net(sk);
        struct ipv6_pinfo *np = inet6_sk(sk);
-       struct in6_flowlabel_req freq;
-       struct ipv6_fl_socklist *sfl1 = NULL;
-       struct ipv6_fl_socklist *sfl;
        struct ipv6_fl_socklist __rcu **sflp;
-       struct ip6_flowlabel *fl, *fl1 = NULL;
+       struct ipv6_fl_socklist *sfl;
 
+       if (freq->flr_flags & IPV6_FL_F_REFLECT) {
+               if (sk->sk_protocol != IPPROTO_TCP)
+                       return -ENOPROTOOPT;
+               if (!np->repflow)
+                       return -ESRCH;
+               np->flow_label = 0;
+               np->repflow = 0;
+               return 0;
+       }
 
-       if (optlen < sizeof(freq))
-               return -EINVAL;
+       spin_lock_bh(&ip6_sk_fl_lock);
+       for (sflp = &np->ipv6_fl_list;
+            (sfl = socklist_dereference(*sflp)) != NULL;
+            sflp = &sfl->next) {
+               if (sfl->fl->label == freq->flr_label)
+                       goto found;
+       }
+       spin_unlock_bh(&ip6_sk_fl_lock);
+       return -ESRCH;
+found:
+       if (freq->flr_label == (np->flow_label & IPV6_FLOWLABEL_MASK))
+               np->flow_label &= ~IPV6_FLOWLABEL_MASK;
+       *sflp = sfl->next;
+       spin_unlock_bh(&ip6_sk_fl_lock);
+       fl_release(sfl->fl);
+       kfree_rcu(sfl, rcu);
+       return 0;
+}
 
-       if (copy_from_user(&freq, optval, sizeof(freq)))
-               return -EFAULT;
+static int ipv6_flowlabel_renew(struct sock *sk, struct in6_flowlabel_req *freq)
+{
+       struct ipv6_pinfo *np = inet6_sk(sk);
+       struct net *net = sock_net(sk);
+       struct ipv6_fl_socklist *sfl;
+       int err;
 
-       switch (freq.flr_action) {
-       case IPV6_FL_A_PUT:
-               if (freq.flr_flags & IPV6_FL_F_REFLECT) {
-                       if (sk->sk_protocol != IPPROTO_TCP)
-                               return -ENOPROTOOPT;
-                       if (!np->repflow)
-                               return -ESRCH;
-                       np->flow_label = 0;
-                       np->repflow = 0;
-                       return 0;
-               }
-               spin_lock_bh(&ip6_sk_fl_lock);
-               for (sflp = &np->ipv6_fl_list;
-                    (sfl = rcu_dereference_protected(*sflp,
-                                                     lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
-                    sflp = &sfl->next) {
-                       if (sfl->fl->label == freq.flr_label) {
-                               if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
-                                       np->flow_label &= ~IPV6_FLOWLABEL_MASK;
-                               *sflp = sfl->next;
-                               spin_unlock_bh(&ip6_sk_fl_lock);
-                               fl_release(sfl->fl);
-                               kfree_rcu(sfl, rcu);
-                               return 0;
-                       }
+       rcu_read_lock_bh();
+       for_each_sk_fl_rcu(np, sfl) {
+               if (sfl->fl->label == freq->flr_label) {
+                       err = fl6_renew(sfl->fl, freq->flr_linger,
+                                       freq->flr_expires);
+                       rcu_read_unlock_bh();
+                       return err;
                }
-               spin_unlock_bh(&ip6_sk_fl_lock);
-               return -ESRCH;
+       }
+       rcu_read_unlock_bh();
 
-       case IPV6_FL_A_RENEW:
-               rcu_read_lock_bh();
-               for_each_sk_fl_rcu(np, sfl) {
-                       if (sfl->fl->label == freq.flr_label) {
-                               err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
-                               rcu_read_unlock_bh();
-                               return err;
-                       }
-               }
-               rcu_read_unlock_bh();
+       if (freq->flr_share == IPV6_FL_S_NONE &&
+           ns_capable(net->user_ns, CAP_NET_ADMIN)) {
+               struct ip6_flowlabel *fl = fl_lookup(net, freq->flr_label);
 
-               if (freq.flr_share == IPV6_FL_S_NONE &&
-                   ns_capable(net->user_ns, CAP_NET_ADMIN)) {
-                       fl = fl_lookup(net, freq.flr_label);
-                       if (fl) {
-                               err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
-                               fl_release(fl);
-                               return err;
-                       }
+               if (fl) {
+                       err = fl6_renew(fl, freq->flr_linger,
+                                       freq->flr_expires);
+                       fl_release(fl);
+                       return err;
                }
-               return -ESRCH;
-
-       case IPV6_FL_A_GET:
-               if (freq.flr_flags & IPV6_FL_F_REFLECT) {
-                       struct net *net = sock_net(sk);
-                       if (net->ipv6.sysctl.flowlabel_consistency) {
-                               net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n");
-                               return -EPERM;
-                       }
+       }
+       return -ESRCH;
+}
 
-                       if (sk->sk_protocol != IPPROTO_TCP)
-                               return -ENOPROTOOPT;
+static int ipv6_flowlabel_get(struct sock *sk, struct in6_flowlabel_req *freq,
+               sockptr_t optval, int optlen)
+{
+       struct ipv6_fl_socklist *sfl, *sfl1 = NULL;
+       struct ip6_flowlabel *fl, *fl1 = NULL;
+       struct ipv6_pinfo *np = inet6_sk(sk);
+       struct net *net = sock_net(sk);
+       int uninitialized_var(err);
 
-                       np->repflow = 1;
-                       return 0;
+       if (freq->flr_flags & IPV6_FL_F_REFLECT) {
+               if (net->ipv6.sysctl.flowlabel_consistency) {
+                       net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n");
+                       return -EPERM;
                }
 
-               if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
-                       return -EINVAL;
+               if (sk->sk_protocol != IPPROTO_TCP)
+                       return -ENOPROTOOPT;
+               np->repflow = 1;
+               return 0;
+       }
 
-               if (net->ipv6.sysctl.flowlabel_state_ranges &&
-                   (freq.flr_label & IPV6_FLOWLABEL_STATELESS_FLAG))
-                       return -ERANGE;
+       if (freq->flr_label & ~IPV6_FLOWLABEL_MASK)
+               return -EINVAL;
+       if (net->ipv6.sysctl.flowlabel_state_ranges &&
+           (freq->flr_label & IPV6_FLOWLABEL_STATELESS_FLAG))
+               return -ERANGE;
 
-               fl = fl_create(net, sk, &freq, optval, optlen, &err);
-               if (!fl)
-                       return err;
-               sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
+       fl = fl_create(net, sk, freq, optval, optlen, &err);
+       if (!fl)
+               return err;
 
-               if (freq.flr_label) {
-                       err = -EEXIST;
-                       rcu_read_lock_bh();
-                       for_each_sk_fl_rcu(np, sfl) {
-                               if (sfl->fl->label == freq.flr_label) {
-                                       if (freq.flr_flags&IPV6_FL_F_EXCL) {
-                                               rcu_read_unlock_bh();
-                                               goto done;
-                                       }
-                                       fl1 = sfl->fl;
-                                       if (!atomic_inc_not_zero(&fl1->users))
-                                               fl1 = NULL;
-                                       break;
+       sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
+
+       if (freq->flr_label) {
+               err = -EEXIST;
+               rcu_read_lock_bh();
+               for_each_sk_fl_rcu(np, sfl) {
+                       if (sfl->fl->label == freq->flr_label) {
+                               if (freq->flr_flags & IPV6_FL_F_EXCL) {
+                                       rcu_read_unlock_bh();
+                                       goto done;
                                }
+                               fl1 = sfl->fl;
+                               if (!atomic_inc_not_zero(&fl1->users))
+                                       fl1 = NULL;
+                               break;
                        }
-                       rcu_read_unlock_bh();
+               }
+               rcu_read_unlock_bh();
 
-                       if (!fl1)
-                               fl1 = fl_lookup(net, freq.flr_label);
-                       if (fl1) {
+               if (!fl1)
+                       fl1 = fl_lookup(net, freq->flr_label);
+               if (fl1) {
 recheck:
-                               err = -EEXIST;
-                               if (freq.flr_flags&IPV6_FL_F_EXCL)
-                                       goto release;
-                               err = -EPERM;
-                               if (fl1->share == IPV6_FL_S_EXCL ||
-                                   fl1->share != fl->share ||
-                                   ((fl1->share == IPV6_FL_S_PROCESS) &&
-                                    (fl1->owner.pid != fl->owner.pid)) ||
-                                   ((fl1->share == IPV6_FL_S_USER) &&
-                                    !uid_eq(fl1->owner.uid, fl->owner.uid)))
-                                       goto release;
-
-                               err = -ENOMEM;
-                               if (!sfl1)
-                                       goto release;
-                               if (fl->linger > fl1->linger)
-                                       fl1->linger = fl->linger;
-                               if ((long)(fl->expires - fl1->expires) > 0)
-                                       fl1->expires = fl->expires;
-                               fl_link(np, sfl1, fl1);
-                               fl_free(fl);
-                               return 0;
+                       err = -EEXIST;
+                       if (freq->flr_flags&IPV6_FL_F_EXCL)
+                               goto release;
+                       err = -EPERM;
+                       if (fl1->share == IPV6_FL_S_EXCL ||
+                           fl1->share != fl->share ||
+                           ((fl1->share == IPV6_FL_S_PROCESS) &&
+                            (fl1->owner.pid != fl->owner.pid)) ||
+                           ((fl1->share == IPV6_FL_S_USER) &&
+                            !uid_eq(fl1->owner.uid, fl->owner.uid)))
+                               goto release;
+
+                       err = -ENOMEM;
+                       if (!sfl1)
+                               goto release;
+                       if (fl->linger > fl1->linger)
+                               fl1->linger = fl->linger;
+                       if ((long)(fl->expires - fl1->expires) > 0)
+                               fl1->expires = fl->expires;
+                       fl_link(np, sfl1, fl1);
+                       fl_free(fl);
+                       return 0;
 
 release:
-                               fl_release(fl1);
-                               goto done;
-                       }
-               }
-               err = -ENOENT;
-               if (!(freq.flr_flags&IPV6_FL_F_CREATE))
+                       fl_release(fl1);
                        goto done;
+               }
+       }
+       err = -ENOENT;
+       if (!(freq->flr_flags & IPV6_FL_F_CREATE))
+               goto done;
 
-               err = -ENOMEM;
-               if (!sfl1)
-                       goto done;
+       err = -ENOMEM;
+       if (!sfl1)
+               goto done;
 
-               err = mem_check(sk);
-               if (err != 0)
-                       goto done;
+       err = mem_check(sk);
+       if (err != 0)
+               goto done;
 
-               fl1 = fl_intern(net, fl, freq.flr_label);
-               if (fl1)
-                       goto recheck;
+       fl1 = fl_intern(net, fl, freq->flr_label);
+       if (fl1)
+               goto recheck;
 
-               if (!freq.flr_label) {
-                       if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
-                                        &fl->label, sizeof(fl->label))) {
-                               /* Intentionally ignore fault. */
-                       }
+       if (!freq->flr_label) {
+               sockptr_advance(optval,
+                               offsetof(struct in6_flowlabel_req, flr_label));
+               if (copy_to_sockptr(optval, &fl->label, sizeof(fl->label))) {
+                       /* Intentionally ignore fault. */
                }
-
-               fl_link(np, sfl1, fl);
-               return 0;
-
-       default:
-               return -EINVAL;
        }
 
+       fl_link(np, sfl1, fl);
+       return 0;
 done:
        fl_free(fl);
        kfree(sfl1);
        return err;
 }
 
+int ipv6_flowlabel_opt(struct sock *sk, sockptr_t optval, int optlen)
+{
+       struct in6_flowlabel_req freq;
+
+       if (optlen < sizeof(freq))
+               return -EINVAL;
+       if (copy_from_sockptr(&freq, optval, sizeof(freq)))
+               return -EFAULT;
+
+       switch (freq.flr_action) {
+       case IPV6_FL_A_PUT:
+               return ipv6_flowlabel_put(sk, &freq);
+       case IPV6_FL_A_RENEW:
+               return ipv6_flowlabel_renew(sk, &freq);
+       case IPV6_FL_A_GET:
+               return ipv6_flowlabel_get(sk, &freq, optval, optlen);
+       default:
+               return -EINVAL;
+       }
+}
+
 #ifdef CONFIG_PROC_FS
 
 struct ip6fl_iter_state {
index 781ca8c..6532bde 100644 (file)
@@ -127,6 +127,7 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
                        gre_proto == htons(ETH_P_ERSPAN2)) ?
                       ARPHRD_ETHER : ARPHRD_IP6GRE;
        int score, cand_score = 4;
+       struct net_device *ndev;
 
        for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
                if (!ipv6_addr_equal(local, &t->parms.laddr) ||
@@ -238,9 +239,9 @@ static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
        if (t && t->dev->flags & IFF_UP)
                return t;
 
-       dev = ign->fb_tunnel_dev;
-       if (dev && dev->flags & IFF_UP)
-               return netdev_priv(dev);
+       ndev = READ_ONCE(ign->fb_tunnel_dev);
+       if (ndev && ndev->flags & IFF_UP)
+               return netdev_priv(ndev);
 
        return NULL;
 }
@@ -413,6 +414,8 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
 
        ip6gre_tunnel_unlink_md(ign, t);
        ip6gre_tunnel_unlink(ign, t);
+       if (ign->fb_tunnel_dev == dev)
+               WRITE_ONCE(ign->fb_tunnel_dev, NULL);
        dst_cache_reset(&t->dst_cache);
        dev_put(dev);
 }
index e008675..70c8c2f 100644 (file)
@@ -9,6 +9,8 @@
 
 #if IS_ENABLED(CONFIG_IPV6)
 
+#if !IS_BUILTIN(CONFIG_IPV6)
+
 static ip6_icmp_send_t __rcu *ip6_icmp_send;
 
 int inet6_register_icmp_sender(ip6_icmp_send_t *fn)
@@ -37,14 +39,12 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
 
        rcu_read_lock();
        send = rcu_dereference(ip6_icmp_send);
-
-       if (!send)
-               goto out;
-       send(skb, type, code, info, NULL);
-out:
+       if (send)
+               send(skb, type, code, info, NULL);
        rcu_read_unlock();
 }
 EXPORT_SYMBOL(icmpv6_send);
+#endif
 
 #if IS_ENABLED(CONFIG_NF_NAT)
 #include <net/netfilter/nf_conntrack.h>
index 7fbb447..a80f90b 100644 (file)
@@ -13,6 +13,8 @@
 #include <net/protocol.h>
 #include <net/ipv6.h>
 #include <net/inet_common.h>
+#include <net/tcp.h>
+#include <net/udp.h>
 
 #include "ip6_offload.h"
 
@@ -177,10 +179,6 @@ static int ipv6_exthdrs_len(struct ipv6hdr *iph,
        return len;
 }
 
-INDIRECT_CALLABLE_DECLARE(struct sk_buff *tcp6_gro_receive(struct list_head *,
-                                                          struct sk_buff *));
-INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
-                                                          struct sk_buff *));
 INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
                                                         struct sk_buff *skb)
 {
@@ -319,8 +317,6 @@ static struct sk_buff *ip4ip6_gro_receive(struct list_head *head,
        return inet_gro_receive(head, skb);
 }
 
-INDIRECT_CALLABLE_DECLARE(int tcp6_gro_complete(struct sk_buff *, int));
-INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
 INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
 {
        const struct net_offload *ops;
index 8a8c2d0..c78e67d 100644 (file)
@@ -1118,6 +1118,7 @@ out_err_release:
 
 /**
  *     ip6_dst_lookup - perform route lookup on flow
+ *     @net: Network namespace to perform lookup in
  *     @sk: socket which provides route info
  *     @dst: pointer to dst_entry * for result
  *     @fl6: flow to lookup
@@ -1136,6 +1137,7 @@ EXPORT_SYMBOL_GPL(ip6_dst_lookup);
 
 /**
  *     ip6_dst_lookup_flow - perform route lookup on flow with ipsec
+ *     @net: Network namespace to perform lookup in
  *     @sk: socket which provides route info
  *     @fl6: flow to lookup
  *     @final_dst: final destination address for ipsec lookup
@@ -1202,11 +1204,11 @@ EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
  *      @skb: Packet for which lookup is done
  *      @dev: Tunnel device
  *      @net: Network namespace of tunnel device
- *      @sk: Socket which provides route info
+ *      @sock: Socket which provides route info
  *      @saddr: Memory to store the src ip address
  *      @info: Tunnel information
  *      @protocol: IP protocol
- *      @use_cahce: Flag to enable cache usage
+ *      @use_cache: Flag to enable cache usage
  *      This function performs a route lookup on a tunnel
  *
  *      It returns a valid dst pointer and stores src address to be used in
index 821d96c..f635914 100644 (file)
@@ -124,8 +124,12 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev)
        return &dev->stats;
 }
 
+#define for_each_ip6_tunnel_rcu(start) \
+       for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
+
 /**
  * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
+ *   @net: network namespace
  *   @link: ifindex of underlying interface
  *   @remote: the address of the tunnel exit-point
  *   @local: the address of the tunnel entry-point
@@ -136,9 +140,6 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev)
  *   else %NULL
  **/
 
-#define for_each_ip6_tunnel_rcu(start) \
-       for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
-
 static struct ip6_tnl *
 ip6_tnl_lookup(struct net *net, int link,
               const struct in6_addr *remote, const struct in6_addr *local)
@@ -302,8 +303,8 @@ out:
 
 /**
  * ip6_tnl_create - create a new tunnel
+ *   @net: network namespace
  *   @p: tunnel parameters
- *   @pt: pointer to new tunnel
  *
  * Description:
  *   Create tunnel matching given parameters.
@@ -351,6 +352,7 @@ failed:
 
 /**
  * ip6_tnl_locate - find or create tunnel matching given parameters
+ *   @net: network namespace
  *   @p: tunnel parameters
  *   @create: != 0 if allowed to create new tunnel if no match found
  *
@@ -1846,6 +1848,7 @@ static const struct net_device_ops ip6_tnl_netdev_ops = {
 static void ip6_tnl_dev_setup(struct net_device *dev)
 {
        dev->netdev_ops = &ip6_tnl_netdev_ops;
+       dev->header_ops = &ip_tunnel_header_ops;
        dev->needs_free_netdev = true;
        dev->priv_destructor = ip6_dev_free;
 
index 1147f64..0d96416 100644 (file)
@@ -905,6 +905,7 @@ static const struct net_device_ops vti6_netdev_ops = {
 static void vti6_dev_setup(struct net_device *dev)
 {
        dev->netdev_ops = &vti6_netdev_ops;
+       dev->header_ops = &ip_tunnel_header_ops;
        dev->needs_free_netdev = true;
        dev->priv_destructor = vti6_dev_free;
 
index 1f4d20e..06b0d2c 100644 (file)
@@ -1629,7 +1629,8 @@ EXPORT_SYMBOL(mroute6_is_socket);
  *     MOSPF/PIM router set up we can clean this up.
  */
 
-int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
+int ip6_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval,
+                         unsigned int optlen)
 {
        int ret, parent = 0;
        struct mif6ctl vif;
@@ -1665,7 +1666,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
        case MRT6_ADD_MIF:
                if (optlen < sizeof(vif))
                        return -EINVAL;
-               if (copy_from_user(&vif, optval, sizeof(vif)))
+               if (copy_from_sockptr(&vif, optval, sizeof(vif)))
                        return -EFAULT;
                if (vif.mif6c_mifi >= MAXMIFS)
                        return -ENFILE;
@@ -1678,7 +1679,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
        case MRT6_DEL_MIF:
                if (optlen < sizeof(mifi_t))
                        return -EINVAL;
-               if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
+               if (copy_from_sockptr(&mifi, optval, sizeof(mifi_t)))
                        return -EFAULT;
                rtnl_lock();
                ret = mif6_delete(mrt, mifi, 0, NULL);
@@ -1697,7 +1698,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
        case MRT6_DEL_MFC_PROXY:
                if (optlen < sizeof(mfc))
                        return -EINVAL;
-               if (copy_from_user(&mfc, optval, sizeof(mfc)))
+               if (copy_from_sockptr(&mfc, optval, sizeof(mfc)))
                        return -EFAULT;
                if (parent == 0)
                        parent = mfc.mf6cc_parent;
@@ -1718,7 +1719,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
 
                if (optlen != sizeof(flags))
                        return -EINVAL;
-               if (get_user(flags, (int __user *)optval))
+               if (copy_from_sockptr(&flags, optval, sizeof(flags)))
                        return -EFAULT;
                rtnl_lock();
                mroute_clean_tables(mrt, flags);
@@ -1735,7 +1736,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
 
                if (optlen != sizeof(v))
                        return -EINVAL;
-               if (get_user(v, (int __user *)optval))
+               if (copy_from_sockptr(&v, optval, sizeof(v)))
                        return -EFAULT;
                mrt->mroute_do_assert = v;
                return 0;
@@ -1748,7 +1749,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
 
                if (optlen != sizeof(v))
                        return -EINVAL;
-               if (get_user(v, (int __user *)optval))
+               if (copy_from_sockptr(&v, optval, sizeof(v)))
                        return -EFAULT;
                v = !!v;
                rtnl_lock();
@@ -1769,7 +1770,7 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
 
                if (optlen != sizeof(u32))
                        return -EINVAL;
-               if (get_user(v, (u32 __user *)optval))
+               if (copy_from_sockptr(&v, optval, sizeof(v)))
                        return -EFAULT;
                /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
                if (v != RT_TABLE_DEFAULT && v >= 100000000)
index 20576e8..20c7409 100644 (file)
@@ -136,13 +136,42 @@ static bool setsockopt_needs_rtnl(int optname)
        return false;
 }
 
+static int copy_group_source_from_sockptr(struct group_source_req *greqs,
+               sockptr_t optval, int optlen)
+{
+       if (in_compat_syscall()) {
+               struct compat_group_source_req gr32;
+
+               if (optlen < sizeof(gr32))
+                       return -EINVAL;
+               if (copy_from_sockptr(&gr32, optval, sizeof(gr32)))
+                       return -EFAULT;
+               greqs->gsr_interface = gr32.gsr_interface;
+               greqs->gsr_group = gr32.gsr_group;
+               greqs->gsr_source = gr32.gsr_source;
+       } else {
+               if (optlen < sizeof(*greqs))
+                       return -EINVAL;
+               if (copy_from_sockptr(greqs, optval, sizeof(*greqs)))
+                       return -EFAULT;
+       }
+
+       return 0;
+}
+
 static int do_ipv6_mcast_group_source(struct sock *sk, int optname,
-                                     struct group_source_req *greqs)
+               sockptr_t optval, int optlen)
 {
+       struct group_source_req greqs;
        int omode, add;
+       int ret;
+
+       ret = copy_group_source_from_sockptr(&greqs, optval, optlen);
+       if (ret)
+               return ret;
 
-       if (greqs->gsr_group.ss_family != AF_INET6 ||
-           greqs->gsr_source.ss_family != AF_INET6)
+       if (greqs.gsr_group.ss_family != AF_INET6 ||
+           greqs.gsr_source.ss_family != AF_INET6)
                return -EADDRNOTAVAIL;
 
        if (optname == MCAST_BLOCK_SOURCE) {
@@ -155,8 +184,8 @@ static int do_ipv6_mcast_group_source(struct sock *sk, int optname,
                struct sockaddr_in6 *psin6;
                int retv;
 
-               psin6 = (struct sockaddr_in6 *)&greqs->gsr_group;
-               retv = ipv6_sock_mc_join_ssm(sk, greqs->gsr_interface,
+               psin6 = (struct sockaddr_in6 *)&greqs.gsr_group;
+               retv = ipv6_sock_mc_join_ssm(sk, greqs.gsr_interface,
                                             &psin6->sin6_addr,
                                             MCAST_INCLUDE);
                /* prior join w/ different source is ok */
@@ -168,11 +197,200 @@ static int do_ipv6_mcast_group_source(struct sock *sk, int optname,
                omode = MCAST_INCLUDE;
                add = 0;
        }
-       return ip6_mc_source(add, omode, sk, greqs);
+       return ip6_mc_source(add, omode, sk, &greqs);
+}
+
+static int ipv6_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
+               int optlen)
+{
+       struct group_filter *gsf;
+       int ret;
+
+       if (optlen < GROUP_FILTER_SIZE(0))
+               return -EINVAL;
+       if (optlen > sysctl_optmem_max)
+               return -ENOBUFS;
+
+       gsf = memdup_sockptr(optval, optlen);
+       if (IS_ERR(gsf))
+               return PTR_ERR(gsf);
+
+       /* numsrc >= (4G-140)/128 overflow in 32 bits */
+       ret = -ENOBUFS;
+       if (gsf->gf_numsrc >= 0x1ffffffU ||
+           gsf->gf_numsrc > sysctl_mld_max_msf)
+               goto out_free_gsf;
+
+       ret = -EINVAL;
+       if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen)
+               goto out_free_gsf;
+
+       ret = ip6_mc_msfilter(sk, gsf, gsf->gf_slist);
+out_free_gsf:
+       kfree(gsf);
+       return ret;
+}
+
+static int compat_ipv6_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
+               int optlen)
+{
+       const int size0 = offsetof(struct compat_group_filter, gf_slist);
+       struct compat_group_filter *gf32;
+       void *p;
+       int ret;
+       int n;
+
+       if (optlen < size0)
+               return -EINVAL;
+       if (optlen > sysctl_optmem_max - 4)
+               return -ENOBUFS;
+
+       p = kmalloc(optlen + 4, GFP_KERNEL);
+       if (!p)
+               return -ENOMEM;
+
+       gf32 = p + 4; /* we want ->gf_group and ->gf_slist aligned */
+       ret = -EFAULT;
+       if (copy_from_sockptr(gf32, optval, optlen))
+               goto out_free_p;
+
+       /* numsrc >= (4G-140)/128 overflow in 32 bits */
+       ret = -ENOBUFS;
+       n = gf32->gf_numsrc;
+       if (n >= 0x1ffffffU || n > sysctl_mld_max_msf)
+               goto out_free_p;
+
+       ret = -EINVAL;
+       if (offsetof(struct compat_group_filter, gf_slist[n]) > optlen)
+               goto out_free_p;
+
+       ret = ip6_mc_msfilter(sk, &(struct group_filter){
+                       .gf_interface = gf32->gf_interface,
+                       .gf_group = gf32->gf_group,
+                       .gf_fmode = gf32->gf_fmode,
+                       .gf_numsrc = gf32->gf_numsrc}, gf32->gf_slist);
+
+out_free_p:
+       kfree(p);
+       return ret;
+}
+
+static int ipv6_mcast_join_leave(struct sock *sk, int optname,
+               sockptr_t optval, int optlen)
+{
+       struct sockaddr_in6 *psin6;
+       struct group_req greq;
+
+       if (optlen < sizeof(greq))
+               return -EINVAL;
+       if (copy_from_sockptr(&greq, optval, sizeof(greq)))
+               return -EFAULT;
+
+       if (greq.gr_group.ss_family != AF_INET6)
+               return -EADDRNOTAVAIL;
+       psin6 = (struct sockaddr_in6 *)&greq.gr_group;
+       if (optname == MCAST_JOIN_GROUP)
+               return ipv6_sock_mc_join(sk, greq.gr_interface,
+                                        &psin6->sin6_addr);
+       return ipv6_sock_mc_drop(sk, greq.gr_interface, &psin6->sin6_addr);
+}
+
+static int compat_ipv6_mcast_join_leave(struct sock *sk, int optname,
+               sockptr_t optval, int optlen)
+{
+       struct compat_group_req gr32;
+       struct sockaddr_in6 *psin6;
+
+       if (optlen < sizeof(gr32))
+               return -EINVAL;
+       if (copy_from_sockptr(&gr32, optval, sizeof(gr32)))
+               return -EFAULT;
+
+       if (gr32.gr_group.ss_family != AF_INET6)
+               return -EADDRNOTAVAIL;
+       psin6 = (struct sockaddr_in6 *)&gr32.gr_group;
+       if (optname == MCAST_JOIN_GROUP)
+               return ipv6_sock_mc_join(sk, gr32.gr_interface,
+                                       &psin6->sin6_addr);
+       return ipv6_sock_mc_drop(sk, gr32.gr_interface, &psin6->sin6_addr);
+}
+
+static int ipv6_set_opt_hdr(struct sock *sk, int optname, sockptr_t optval,
+               int optlen)
+{
+       struct ipv6_pinfo *np = inet6_sk(sk);
+       struct ipv6_opt_hdr *new = NULL;
+       struct net *net = sock_net(sk);
+       struct ipv6_txoptions *opt;
+       int err;
+
+       /* hop-by-hop / destination options are privileged option */
+       if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
+               return -EPERM;
+
+       /* remove any sticky options header with a zero option
+        * length, per RFC3542.
+        */
+       if (optlen > 0) {
+               if (sockptr_is_null(optval))
+                       return -EINVAL;
+               if (optlen < sizeof(struct ipv6_opt_hdr) ||
+                   optlen & 0x7 ||
+                   optlen > 8 * 255)
+                       return -EINVAL;
+
+               new = memdup_sockptr(optval, optlen);
+               if (IS_ERR(new))
+                       return PTR_ERR(new);
+               if (unlikely(ipv6_optlen(new) > optlen)) {
+                       kfree(new);
+                       return -EINVAL;
+               }
+       }
+
+       opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
+       opt = ipv6_renew_options(sk, opt, optname, new);
+       kfree(new);
+       if (IS_ERR(opt))
+               return PTR_ERR(opt);
+
+       /* routing header option needs extra check */
+       err = -EINVAL;
+       if (optname == IPV6_RTHDR && opt && opt->srcrt) {
+               struct ipv6_rt_hdr *rthdr = opt->srcrt;
+               switch (rthdr->type) {
+#if IS_ENABLED(CONFIG_IPV6_MIP6)
+               case IPV6_SRCRT_TYPE_2:
+                       if (rthdr->hdrlen != 2 || rthdr->segments_left != 1)
+                               goto sticky_done;
+                       break;
+#endif
+               case IPV6_SRCRT_TYPE_4:
+               {
+                       struct ipv6_sr_hdr *srh =
+                               (struct ipv6_sr_hdr *)opt->srcrt;
+
+                       if (!seg6_validate_srh(srh, optlen, false))
+                               goto sticky_done;
+                       break;
+               }
+               default:
+                       goto sticky_done;
+               }
+       }
+
+       err = 0;
+       opt = ipv6_update_options(sk, opt);
+sticky_done:
+       if (opt) {
+               atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
+               txopt_put(opt);
+       }
+       return err;
 }
 
 static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
-                   char __user *optval, unsigned int optlen)
+                  sockptr_t optval, unsigned int optlen)
 {
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct net *net = sock_net(sk);
@@ -180,11 +398,11 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
        int retv = -ENOPROTOOPT;
        bool needs_rtnl = setsockopt_needs_rtnl(optname);
 
-       if (!optval)
+       if (sockptr_is_null(optval))
                val = 0;
        else {
                if (optlen >= sizeof(int)) {
-                       if (get_user(val, (int __user *) optval))
+                       if (copy_from_sockptr(&val, optval, sizeof(val)))
                                return -EFAULT;
                } else
                        val = 0;
@@ -435,82 +653,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
        case IPV6_RTHDRDSTOPTS:
        case IPV6_RTHDR:
        case IPV6_DSTOPTS:
-       {
-               struct ipv6_txoptions *opt;
-               struct ipv6_opt_hdr *new = NULL;
-
-               /* hop-by-hop / destination options are privileged option */
-               retv = -EPERM;
-               if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
-                       break;
-
-               /* remove any sticky options header with a zero option
-                * length, per RFC3542.
-                */
-               if (optlen == 0)
-                       optval = NULL;
-               else if (!optval)
-                       goto e_inval;
-               else if (optlen < sizeof(struct ipv6_opt_hdr) ||
-                        optlen & 0x7 || optlen > 8 * 255)
-                       goto e_inval;
-               else {
-                       new = memdup_user(optval, optlen);
-                       if (IS_ERR(new)) {
-                               retv = PTR_ERR(new);
-                               break;
-                       }
-                       if (unlikely(ipv6_optlen(new) > optlen)) {
-                               kfree(new);
-                               goto e_inval;
-                       }
-               }
-
-               opt = rcu_dereference_protected(np->opt,
-                                               lockdep_sock_is_held(sk));
-               opt = ipv6_renew_options(sk, opt, optname, new);
-               kfree(new);
-               if (IS_ERR(opt)) {
-                       retv = PTR_ERR(opt);
-                       break;
-               }
-
-               /* routing header option needs extra check */
-               retv = -EINVAL;
-               if (optname == IPV6_RTHDR && opt && opt->srcrt) {
-                       struct ipv6_rt_hdr *rthdr = opt->srcrt;
-                       switch (rthdr->type) {
-#if IS_ENABLED(CONFIG_IPV6_MIP6)
-                       case IPV6_SRCRT_TYPE_2:
-                               if (rthdr->hdrlen != 2 ||
-                                   rthdr->segments_left != 1)
-                                       goto sticky_done;
-
-                               break;
-#endif
-                       case IPV6_SRCRT_TYPE_4:
-                       {
-                               struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)
-                                                         opt->srcrt;
-
-                               if (!seg6_validate_srh(srh, optlen, false))
-                                       goto sticky_done;
-                               break;
-                       }
-                       default:
-                               goto sticky_done;
-                       }
-               }
-
-               retv = 0;
-               opt = ipv6_update_options(sk, opt);
-sticky_done:
-               if (opt) {
-                       atomic_sub(opt->tot_len, &sk->sk_omem_alloc);
-                       txopt_put(opt);
-               }
+               retv = ipv6_set_opt_hdr(sk, optname, optval, optlen);
                break;
-       }
 
        case IPV6_PKTINFO:
        {
@@ -518,12 +662,13 @@ sticky_done:
 
                if (optlen == 0)
                        goto e_inval;
-               else if (optlen < sizeof(struct in6_pktinfo) || !optval)
+               else if (optlen < sizeof(struct in6_pktinfo) ||
+                        sockptr_is_null(optval))
                        goto e_inval;
 
-               if (copy_from_user(&pkt, optval, sizeof(struct in6_pktinfo))) {
-                               retv = -EFAULT;
-                               break;
+               if (copy_from_sockptr(&pkt, optval, sizeof(pkt))) {
+                       retv = -EFAULT;
+                       break;
                }
                if (!sk_dev_equal_l3scope(sk, pkt.ipi6_ifindex))
                        goto e_inval;
@@ -564,7 +709,7 @@ sticky_done:
                refcount_set(&opt->refcnt, 1);
                opt->tot_len = sizeof(*opt) + optlen;
                retv = -EFAULT;
-               if (copy_from_user(opt+1, optval, optlen))
+               if (copy_from_sockptr(opt + 1, optval, optlen))
                        goto done;
 
                msg.msg_controllen = optlen;
@@ -686,7 +831,7 @@ done:
                        break;
 
                retv = -EFAULT;
-               if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq)))
+               if (copy_from_sockptr(&mreq, optval, sizeof(struct ipv6_mreq)))
                        break;
 
                if (optname == IPV6_ADD_MEMBERSHIP)
@@ -704,7 +849,7 @@ done:
                        goto e_inval;
 
                retv = -EFAULT;
-               if (copy_from_user(&mreq, optval, sizeof(struct ipv6_mreq)))
+               if (copy_from_sockptr(&mreq, optval, sizeof(struct ipv6_mreq)))
                        break;
 
                if (optname == IPV6_JOIN_ANYCAST)
@@ -722,77 +867,26 @@ done:
 
        case MCAST_JOIN_GROUP:
        case MCAST_LEAVE_GROUP:
-       {
-               struct group_req greq;
-               struct sockaddr_in6 *psin6;
-
-               if (optlen < sizeof(struct group_req))
-                       goto e_inval;
-
-               retv = -EFAULT;
-               if (copy_from_user(&greq, optval, sizeof(struct group_req)))
-                       break;
-               if (greq.gr_group.ss_family != AF_INET6) {
-                       retv = -EADDRNOTAVAIL;
-                       break;
-               }
-               psin6 = (struct sockaddr_in6 *)&greq.gr_group;
-               if (optname == MCAST_JOIN_GROUP)
-                       retv = ipv6_sock_mc_join(sk, greq.gr_interface,
-                                                &psin6->sin6_addr);
+               if (in_compat_syscall())
+                       retv = compat_ipv6_mcast_join_leave(sk, optname, optval,
+                                                           optlen);
                else
-                       retv = ipv6_sock_mc_drop(sk, greq.gr_interface,
-                                                &psin6->sin6_addr);
+                       retv = ipv6_mcast_join_leave(sk, optname, optval,
+                                                    optlen);
                break;
-       }
        case MCAST_JOIN_SOURCE_GROUP:
        case MCAST_LEAVE_SOURCE_GROUP:
        case MCAST_BLOCK_SOURCE:
        case MCAST_UNBLOCK_SOURCE:
-       {
-               struct group_source_req greqs;
-
-               if (optlen < sizeof(struct group_source_req))
-                       goto e_inval;
-               if (copy_from_user(&greqs, optval, sizeof(greqs))) {
-                       retv = -EFAULT;
-                       break;
-               }
-               retv = do_ipv6_mcast_group_source(sk, optname, &greqs);
+               retv = do_ipv6_mcast_group_source(sk, optname, optval, optlen);
                break;
-       }
        case MCAST_MSFILTER:
-       {
-               struct group_filter *gsf;
-
-               if (optlen < GROUP_FILTER_SIZE(0))
-                       goto e_inval;
-               if (optlen > sysctl_optmem_max) {
-                       retv = -ENOBUFS;
-                       break;
-               }
-               gsf = memdup_user(optval, optlen);
-               if (IS_ERR(gsf)) {
-                       retv = PTR_ERR(gsf);
-                       break;
-               }
-               /* numsrc >= (4G-140)/128 overflow in 32 bits */
-               if (gsf->gf_numsrc >= 0x1ffffffU ||
-                   gsf->gf_numsrc > sysctl_mld_max_msf) {
-                       kfree(gsf);
-                       retv = -ENOBUFS;
-                       break;
-               }
-               if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) {
-                       kfree(gsf);
-                       retv = -EINVAL;
-                       break;
-               }
-               retv = ip6_mc_msfilter(sk, gsf, gsf->gf_slist);
-               kfree(gsf);
-
+               if (in_compat_syscall())
+                       retv = compat_ipv6_set_mcast_msfilter(sk, optval,
+                                                             optlen);
+               else
+                       retv = ipv6_set_mcast_msfilter(sk, optval, optlen);
                break;
-       }
        case IPV6_ROUTER_ALERT:
                if (optlen < sizeof(int))
                        goto e_inval;
@@ -871,6 +965,14 @@ done:
                np->rxopt.bits.recvfragsize = valbool;
                retv = 0;
                break;
+       case IPV6_RECVERR_RFC4884:
+               if (optlen < sizeof(int))
+                       goto e_inval;
+               if (val < 0 || val > 1)
+                       goto e_inval;
+               np->recverr_rfc4884 = valbool;
+               retv = 0;
+               break;
        }
 
        release_sock(sk);
@@ -886,8 +988,8 @@ e_inval:
        return -EINVAL;
 }
 
-int ipv6_setsockopt(struct sock *sk, int level, int optname,
-                   char __user *optval, unsigned int optlen)
+int ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+                   unsigned int optlen)
 {
        int err;
 
@@ -908,140 +1010,6 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname,
 }
 EXPORT_SYMBOL(ipv6_setsockopt);
 
-#ifdef CONFIG_COMPAT
-int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
-                          char __user *optval, unsigned int optlen)
-{
-       int err;
-
-       if (level == SOL_IP && sk->sk_type != SOCK_RAW) {
-               if (udp_prot.compat_setsockopt != NULL)
-                       return udp_prot.compat_setsockopt(sk, level, optname,
-                                                         optval, optlen);
-               return udp_prot.setsockopt(sk, level, optname, optval, optlen);
-       }
-
-       if (level != SOL_IPV6)
-               return -ENOPROTOOPT;
-
-       switch (optname) {
-       case MCAST_JOIN_GROUP:
-       case MCAST_LEAVE_GROUP:
-       {
-               struct compat_group_req __user *gr32 = (void __user *)optval;
-               struct group_req greq;
-               struct sockaddr_in6 *psin6 = (struct sockaddr_in6 *)&greq.gr_group;
-
-               if (optlen < sizeof(struct compat_group_req))
-                       return -EINVAL;
-
-               if (get_user(greq.gr_interface, &gr32->gr_interface) ||
-                   copy_from_user(&greq.gr_group, &gr32->gr_group,
-                               sizeof(greq.gr_group)))
-                       return -EFAULT;
-
-               if (greq.gr_group.ss_family != AF_INET6)
-                       return -EADDRNOTAVAIL;
-
-               rtnl_lock();
-               lock_sock(sk);
-               if (optname == MCAST_JOIN_GROUP)
-                       err = ipv6_sock_mc_join(sk, greq.gr_interface,
-                                                &psin6->sin6_addr);
-               else
-                       err = ipv6_sock_mc_drop(sk, greq.gr_interface,
-                                                &psin6->sin6_addr);
-               release_sock(sk);
-               rtnl_unlock();
-               return err;
-       }
-       case MCAST_JOIN_SOURCE_GROUP:
-       case MCAST_LEAVE_SOURCE_GROUP:
-       case MCAST_BLOCK_SOURCE:
-       case MCAST_UNBLOCK_SOURCE:
-       {
-               struct compat_group_source_req __user *gsr32 = (void __user *)optval;
-               struct group_source_req greqs;
-
-               if (optlen < sizeof(struct compat_group_source_req))
-                       return -EINVAL;
-
-               if (get_user(greqs.gsr_interface, &gsr32->gsr_interface) ||
-                   copy_from_user(&greqs.gsr_group, &gsr32->gsr_group,
-                               sizeof(greqs.gsr_group)) ||
-                   copy_from_user(&greqs.gsr_source, &gsr32->gsr_source,
-                               sizeof(greqs.gsr_source)))
-                       return -EFAULT;
-
-               rtnl_lock();
-               lock_sock(sk);
-               err = do_ipv6_mcast_group_source(sk, optname, &greqs);
-               release_sock(sk);
-               rtnl_unlock();
-               return err;
-       }
-       case MCAST_MSFILTER:
-       {
-               const int size0 = offsetof(struct compat_group_filter, gf_slist);
-               struct compat_group_filter *gf32;
-               void *p;
-               int n;
-
-               if (optlen < size0)
-                       return -EINVAL;
-               if (optlen > sysctl_optmem_max - 4)
-                       return -ENOBUFS;
-
-               p = kmalloc(optlen + 4, GFP_KERNEL);
-               if (!p)
-                       return -ENOMEM;
-
-               gf32 = p + 4; /* we want ->gf_group and ->gf_slist aligned */
-               if (copy_from_user(gf32, optval, optlen)) {
-                       err = -EFAULT;
-                       goto mc_msf_out;
-               }
-
-               n = gf32->gf_numsrc;
-               /* numsrc >= (4G-140)/128 overflow in 32 bits */
-               if (n >= 0x1ffffffU ||
-                   n > sysctl_mld_max_msf) {
-                       err = -ENOBUFS;
-                       goto mc_msf_out;
-               }
-               if (offsetof(struct compat_group_filter, gf_slist[n]) > optlen) {
-                       err = -EINVAL;
-                       goto mc_msf_out;
-               }
-
-               rtnl_lock();
-               lock_sock(sk);
-               err = ip6_mc_msfilter(sk, &(struct group_filter){
-                               .gf_interface = gf32->gf_interface,
-                               .gf_group = gf32->gf_group,
-                               .gf_fmode = gf32->gf_fmode,
-                               .gf_numsrc = gf32->gf_numsrc}, gf32->gf_slist);
-               release_sock(sk);
-               rtnl_unlock();
-mc_msf_out:
-               kfree(p);
-               return err;
-       }
-       }
-
-       err = do_ipv6_setsockopt(sk, level, optname, optval, optlen);
-#ifdef CONFIG_NETFILTER
-       /* we need to exclude all possible ENOPROTOOPTs except default case */
-       if (err == -ENOPROTOOPT && optname != IPV6_IPSEC_POLICY &&
-           optname != IPV6_XFRM_POLICY)
-               err = compat_nf_setsockopt(sk, PF_INET6, optname, optval,
-                                          optlen);
-#endif
-       return err;
-}
-EXPORT_SYMBOL(compat_ipv6_setsockopt);
-#endif
-
 static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt,
                                  int optname, char __user *optval, int len)
 {
@@ -1076,6 +1044,75 @@ static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt,
        return len;
 }
 
+static int ipv6_get_msfilter(struct sock *sk, void __user *optval,
+               int __user *optlen, int len)
+{
+       const int size0 = offsetof(struct group_filter, gf_slist);
+       struct group_filter __user *p = optval;
+       struct group_filter gsf;
+       int num;
+       int err;
+
+       if (len < size0)
+               return -EINVAL;
+       if (copy_from_user(&gsf, p, size0))
+               return -EFAULT;
+       if (gsf.gf_group.ss_family != AF_INET6)
+               return -EADDRNOTAVAIL;
+       num = gsf.gf_numsrc;
+       lock_sock(sk);
+       err = ip6_mc_msfget(sk, &gsf, p->gf_slist);
+       if (!err) {
+               if (num > gsf.gf_numsrc)
+                       num = gsf.gf_numsrc;
+               if (put_user(GROUP_FILTER_SIZE(num), optlen) ||
+                   copy_to_user(p, &gsf, size0))
+                       err = -EFAULT;
+       }
+       release_sock(sk);
+       return err;
+}
+
+static int compat_ipv6_get_msfilter(struct sock *sk, void __user *optval,
+               int __user *optlen)
+{
+       const int size0 = offsetof(struct compat_group_filter, gf_slist);
+       struct compat_group_filter __user *p = optval;
+       struct compat_group_filter gf32;
+       struct group_filter gf;
+       int len, err;
+       int num;
+
+       if (get_user(len, optlen))
+               return -EFAULT;
+       if (len < size0)
+               return -EINVAL;
+
+       if (copy_from_user(&gf32, p, size0))
+               return -EFAULT;
+       gf.gf_interface = gf32.gf_interface;
+       gf.gf_fmode = gf32.gf_fmode;
+       num = gf.gf_numsrc = gf32.gf_numsrc;
+       gf.gf_group = gf32.gf_group;
+
+       if (gf.gf_group.ss_family != AF_INET6)
+               return -EADDRNOTAVAIL;
+
+       lock_sock(sk);
+       err = ip6_mc_msfget(sk, &gf, p->gf_slist);
+       release_sock(sk);
+       if (err)
+               return err;
+       if (num > gf.gf_numsrc)
+               num = gf.gf_numsrc;
+       len = GROUP_FILTER_SIZE(num) - (sizeof(gf)-sizeof(gf32));
+       if (put_user(len, optlen) ||
+           put_user(gf.gf_fmode, &p->gf_fmode) ||
+           put_user(gf.gf_numsrc, &p->gf_numsrc))
+               return -EFAULT;
+       return 0;
+}
+
 static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
                    char __user *optval, int __user *optlen, unsigned int flags)
 {
@@ -1099,33 +1136,9 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
                val = sk->sk_family;
                break;
        case MCAST_MSFILTER:
-       {
-               struct group_filter __user *p = (void __user *)optval;
-               struct group_filter gsf;
-               const int size0 = offsetof(struct group_filter, gf_slist);
-               int num;
-               int err;
-
-               if (len < size0)
-                       return -EINVAL;
-               if (copy_from_user(&gsf, p, size0))
-                       return -EFAULT;
-               if (gsf.gf_group.ss_family != AF_INET6)
-                       return -EADDRNOTAVAIL;
-               num = gsf.gf_numsrc;
-               lock_sock(sk);
-               err = ip6_mc_msfget(sk, &gsf, p->gf_slist);
-               if (!err) {
-                       if (num > gsf.gf_numsrc)
-                               num = gsf.gf_numsrc;
-                       if (put_user(GROUP_FILTER_SIZE(num), optlen) ||
-                           copy_to_user(p, &gsf, size0))
-                               err = -EFAULT;
-               }
-               release_sock(sk);
-               return err;
-       }
-
+               if (in_compat_syscall())
+                       return compat_ipv6_get_msfilter(sk, optval, optlen);
+               return ipv6_get_msfilter(sk, optval, optlen, len);
        case IPV6_2292PKTOPTIONS:
        {
                struct msghdr msg;
@@ -1434,6 +1447,10 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
                val = np->rtalert_isolate;
                break;
 
+       case IPV6_RECVERR_RFC4884:
+               val = np->recverr_rfc4884;
+               break;
+
        default:
                return -ENOPROTOOPT;
        }
@@ -1473,78 +1490,3 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname,
        return err;
 }
 EXPORT_SYMBOL(ipv6_getsockopt);
-
-#ifdef CONFIG_COMPAT
-int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
-                          char __user *optval, int __user *optlen)
-{
-       int err;
-
-       if (level == SOL_IP && sk->sk_type != SOCK_RAW) {
-               if (udp_prot.compat_getsockopt != NULL)
-                       return udp_prot.compat_getsockopt(sk, level, optname,
-                                                         optval, optlen);
-               return udp_prot.getsockopt(sk, level, optname, optval, optlen);
-       }
-
-       if (level != SOL_IPV6)
-               return -ENOPROTOOPT;
-
-       if (optname == MCAST_MSFILTER) {
-               const int size0 = offsetof(struct compat_group_filter, gf_slist);
-               struct compat_group_filter __user *p = (void __user *)optval;
-               struct compat_group_filter gf32;
-               struct group_filter gf;
-               int ulen, err;
-               int num;
-
-               if (get_user(ulen, optlen))
-                       return -EFAULT;
-
-               if (ulen < size0)
-                       return -EINVAL;
-
-               if (copy_from_user(&gf32, p, size0))
-                       return -EFAULT;
-
-               gf.gf_interface = gf32.gf_interface;
-               gf.gf_fmode = gf32.gf_fmode;
-               num = gf.gf_numsrc = gf32.gf_numsrc;
-               gf.gf_group = gf32.gf_group;
-
-               if (gf.gf_group.ss_family != AF_INET6)
-                       return -EADDRNOTAVAIL;
-               lock_sock(sk);
-               err = ip6_mc_msfget(sk, &gf, p->gf_slist);
-               release_sock(sk);
-               if (err)
-                       return err;
-               if (num > gf.gf_numsrc)
-                       num = gf.gf_numsrc;
-               ulen = GROUP_FILTER_SIZE(num) - (sizeof(gf)-sizeof(gf32));
-               if (put_user(ulen, optlen) ||
-                   put_user(gf.gf_fmode, &p->gf_fmode) ||
-                   put_user(gf.gf_numsrc, &p->gf_numsrc))
-                       return -EFAULT;
-               return 0;
-       }
-
-       err = do_ipv6_getsockopt(sk, level, optname, optval, optlen,
-                                MSG_CMSG_COMPAT);
-#ifdef CONFIG_NETFILTER
-       /* we need to exclude all possible ENOPROTOOPTs except default case */
-       if (err == -ENOPROTOOPT && optname != IPV6_2292PKTOPTIONS) {
-               int len;
-
-               if (get_user(len, optlen))
-                       return -EFAULT;
-
-               err = compat_nf_getsockopt(sk, PF_INET6, optname, optval, &len);
-               if (err >= 0)
-                       err = put_user(len, optlen);
-       }
-#endif
-       return err;
-}
-EXPORT_SYMBOL(compat_ipv6_getsockopt);
-#endif
index 7e12d21..8cd2782 100644 (file)
@@ -2615,6 +2615,7 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
                idev->mc_list = i->next;
 
                write_unlock_bh(&idev->lock);
+               ip6_mc_clear_src(i);
                ma_put(i);
                write_lock_bh(&idev->lock);
        }
index e273934..1d52957 100644 (file)
@@ -960,8 +960,7 @@ static int compat_table_info(const struct xt_table_info *info,
 }
 #endif
 
-static int get_info(struct net *net, void __user *user,
-                   const int *len, int compat)
+static int get_info(struct net *net, void __user *user, const int *len)
 {
        char name[XT_TABLE_MAXNAMELEN];
        struct xt_table *t;
@@ -975,7 +974,7 @@ static int get_info(struct net *net, void __user *user,
 
        name[XT_TABLE_MAXNAMELEN-1] = '\0';
 #ifdef CONFIG_COMPAT
-       if (compat)
+       if (in_compat_syscall())
                xt_compat_lock(AF_INET6);
 #endif
        t = xt_request_find_table_lock(net, AF_INET6, name);
@@ -985,7 +984,7 @@ static int get_info(struct net *net, void __user *user,
 #ifdef CONFIG_COMPAT
                struct xt_table_info tmp;
 
-               if (compat) {
+               if (in_compat_syscall()) {
                        ret = compat_table_info(private, &tmp);
                        xt_compat_flush_offsets(AF_INET6);
                        private = &tmp;
@@ -1011,7 +1010,7 @@ static int get_info(struct net *net, void __user *user,
        } else
                ret = PTR_ERR(t);
 #ifdef CONFIG_COMPAT
-       if (compat)
+       if (in_compat_syscall())
                xt_compat_unlock(AF_INET6);
 #endif
        return ret;
@@ -1120,7 +1119,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
 }
 
 static int
-do_replace(struct net *net, const void __user *user, unsigned int len)
+do_replace(struct net *net, sockptr_t arg, unsigned int len)
 {
        int ret;
        struct ip6t_replace tmp;
@@ -1128,7 +1127,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
        void *loc_cpu_entry;
        struct ip6t_entry *iter;
 
-       if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
+       if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
                return -EFAULT;
 
        /* overflow check */
@@ -1144,8 +1143,8 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
                return -ENOMEM;
 
        loc_cpu_entry = newinfo->entries;
-       if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
-                          tmp.size) != 0) {
+       sockptr_advance(arg, sizeof(tmp));
+       if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) {
                ret = -EFAULT;
                goto free_newinfo;
        }
@@ -1169,8 +1168,7 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
 }
 
 static int
-do_add_counters(struct net *net, const void __user *user, unsigned int len,
-               int compat)
+do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
 {
        unsigned int i;
        struct xt_counters_info tmp;
@@ -1181,7 +1179,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
        struct ip6t_entry *iter;
        unsigned int addend;
 
-       paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
+       paddc = xt_copy_counters(arg, len, &tmp);
        if (IS_ERR(paddc))
                return PTR_ERR(paddc);
        t = xt_find_table_lock(net, AF_INET6, tmp.name);
@@ -1495,7 +1493,7 @@ out_unlock:
 }
 
 static int
-compat_do_replace(struct net *net, void __user *user, unsigned int len)
+compat_do_replace(struct net *net, sockptr_t arg, unsigned int len)
 {
        int ret;
        struct compat_ip6t_replace tmp;
@@ -1503,7 +1501,7 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
        void *loc_cpu_entry;
        struct ip6t_entry *iter;
 
-       if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
+       if (copy_from_sockptr(&tmp, arg, sizeof(tmp)) != 0)
                return -EFAULT;
 
        /* overflow check */
@@ -1519,8 +1517,8 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
                return -ENOMEM;
 
        loc_cpu_entry = newinfo->entries;
-       if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
-                          tmp.size) != 0) {
+       sockptr_advance(arg, sizeof(tmp));
+       if (copy_from_sockptr(loc_cpu_entry, arg, tmp.size) != 0) {
                ret = -EFAULT;
                goto free_newinfo;
        }
@@ -1543,31 +1541,6 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
        return ret;
 }
 
-static int
-compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
-                      unsigned int len)
-{
-       int ret;
-
-       if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
-               return -EPERM;
-
-       switch (cmd) {
-       case IP6T_SO_SET_REPLACE:
-               ret = compat_do_replace(sock_net(sk), user, len);
-               break;
-
-       case IP6T_SO_SET_ADD_COUNTERS:
-               ret = do_add_counters(sock_net(sk), user, len, 1);
-               break;
-
-       default:
-               ret = -EINVAL;
-       }
-
-       return ret;
-}
-
 struct compat_ip6t_get_entries {
        char name[XT_TABLE_MAXNAMELEN];
        compat_uint_t size;
@@ -1643,33 +1616,10 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
        xt_compat_unlock(AF_INET6);
        return ret;
 }
-
-static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
-
-static int
-compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
-{
-       int ret;
-
-       if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
-               return -EPERM;
-
-       switch (cmd) {
-       case IP6T_SO_GET_INFO:
-               ret = get_info(sock_net(sk), user, len, 1);
-               break;
-       case IP6T_SO_GET_ENTRIES:
-               ret = compat_get_entries(sock_net(sk), user, len);
-               break;
-       default:
-               ret = do_ip6t_get_ctl(sk, cmd, user, len);
-       }
-       return ret;
-}
 #endif
 
 static int
-do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
+do_ip6t_set_ctl(struct sock *sk, int cmd, sockptr_t arg, unsigned int len)
 {
        int ret;
 
@@ -1678,11 +1628,16 @@ do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
 
        switch (cmd) {
        case IP6T_SO_SET_REPLACE:
-               ret = do_replace(sock_net(sk), user, len);
+#ifdef CONFIG_COMPAT
+               if (in_compat_syscall())
+                       ret = compat_do_replace(sock_net(sk), arg, len);
+               else
+#endif
+                       ret = do_replace(sock_net(sk), arg, len);
                break;
 
        case IP6T_SO_SET_ADD_COUNTERS:
-               ret = do_add_counters(sock_net(sk), user, len, 0);
+               ret = do_add_counters(sock_net(sk), arg, len);
                break;
 
        default:
@@ -1702,11 +1657,16 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
 
        switch (cmd) {
        case IP6T_SO_GET_INFO:
-               ret = get_info(sock_net(sk), user, len, 0);
+               ret = get_info(sock_net(sk), user, len);
                break;
 
        case IP6T_SO_GET_ENTRIES:
-               ret = get_entries(sock_net(sk), user, len);
+#ifdef CONFIG_COMPAT
+               if (in_compat_syscall())
+                       ret = compat_get_entries(sock_net(sk), user, len);
+               else
+#endif
+                       ret = get_entries(sock_net(sk), user, len);
                break;
 
        case IP6T_SO_GET_REVISION_MATCH:
@@ -1807,11 +1767,22 @@ out_free:
        return ret;
 }
 
+void ip6t_unregister_table_pre_exit(struct net *net, struct xt_table *table,
+                                   const struct nf_hook_ops *ops)
+{
+       nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+}
+
+void ip6t_unregister_table_exit(struct net *net, struct xt_table *table)
+{
+       __ip6t_unregister_table(net, table);
+}
+
 void ip6t_unregister_table(struct net *net, struct xt_table *table,
                           const struct nf_hook_ops *ops)
 {
        if (ops)
-               nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
+               ip6t_unregister_table_pre_exit(net, table, ops);
        __ip6t_unregister_table(net, table);
 }
 
@@ -1886,15 +1857,9 @@ static struct nf_sockopt_ops ip6t_sockopts = {
        .set_optmin     = IP6T_BASE_CTL,
        .set_optmax     = IP6T_SO_SET_MAX+1,
        .set            = do_ip6t_set_ctl,
-#ifdef CONFIG_COMPAT
-       .compat_set     = compat_do_ip6t_set_ctl,
-#endif
        .get_optmin     = IP6T_BASE_CTL,
        .get_optmax     = IP6T_SO_GET_MAX+1,
        .get            = do_ip6t_get_ctl,
-#ifdef CONFIG_COMPAT
-       .compat_get     = compat_do_ip6t_get_ctl,
-#endif
        .owner          = THIS_MODULE,
 };
 
@@ -1969,6 +1934,8 @@ static void __exit ip6_tables_fini(void)
 
 EXPORT_SYMBOL(ip6t_register_table);
 EXPORT_SYMBOL(ip6t_unregister_table);
+EXPORT_SYMBOL(ip6t_unregister_table_pre_exit);
+EXPORT_SYMBOL(ip6t_unregister_table_exit);
 EXPORT_SYMBOL(ip6t_do_table);
 
 module_init(ip6_tables_init);
index fd1f52a..d51d0c3 100644 (file)
@@ -121,3 +121,4 @@ module_exit(synproxy_tg6_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("Intercept IPv6 TCP connections and establish them using syncookies");
index 32667f5..88337b5 100644 (file)
@@ -73,16 +73,24 @@ static int __net_init ip6table_filter_net_init(struct net *net)
        return 0;
 }
 
+static void __net_exit ip6table_filter_net_pre_exit(struct net *net)
+{
+       if (net->ipv6.ip6table_filter)
+               ip6t_unregister_table_pre_exit(net, net->ipv6.ip6table_filter,
+                                              filter_ops);
+}
+
 static void __net_exit ip6table_filter_net_exit(struct net *net)
 {
        if (!net->ipv6.ip6table_filter)
                return;
-       ip6t_unregister_table(net, net->ipv6.ip6table_filter, filter_ops);
+       ip6t_unregister_table_exit(net, net->ipv6.ip6table_filter);
        net->ipv6.ip6table_filter = NULL;
 }
 
 static struct pernet_operations ip6table_filter_net_ops = {
        .init = ip6table_filter_net_init,
+       .pre_exit = ip6table_filter_net_pre_exit,
        .exit = ip6table_filter_net_exit,
 };
 
index 070afb9..1a27486 100644 (file)
@@ -93,16 +93,24 @@ static int __net_init ip6table_mangle_table_init(struct net *net)
        return ret;
 }
 
+static void __net_exit ip6table_mangle_net_pre_exit(struct net *net)
+{
+       if (net->ipv6.ip6table_mangle)
+               ip6t_unregister_table_pre_exit(net, net->ipv6.ip6table_mangle,
+                                              mangle_ops);
+}
+
 static void __net_exit ip6table_mangle_net_exit(struct net *net)
 {
        if (!net->ipv6.ip6table_mangle)
                return;
 
-       ip6t_unregister_table(net, net->ipv6.ip6table_mangle, mangle_ops);
+       ip6t_unregister_table_exit(net, net->ipv6.ip6table_mangle);
        net->ipv6.ip6table_mangle = NULL;
 }
 
 static struct pernet_operations ip6table_mangle_net_ops = {
+       .pre_exit = ip6table_mangle_net_pre_exit,
        .exit = ip6table_mangle_net_exit,
 };
 
index 0f48759..0a23265 100644 (file)
@@ -114,16 +114,22 @@ static int __net_init ip6table_nat_table_init(struct net *net)
        return ret;
 }
 
+static void __net_exit ip6table_nat_net_pre_exit(struct net *net)
+{
+       if (net->ipv6.ip6table_nat)
+               ip6t_nat_unregister_lookups(net);
+}
+
 static void __net_exit ip6table_nat_net_exit(struct net *net)
 {
        if (!net->ipv6.ip6table_nat)
                return;
-       ip6t_nat_unregister_lookups(net);
-       ip6t_unregister_table(net, net->ipv6.ip6table_nat, NULL);
+       ip6t_unregister_table_exit(net, net->ipv6.ip6table_nat);
        net->ipv6.ip6table_nat = NULL;
 }
 
 static struct pernet_operations ip6table_nat_net_ops = {
+       .pre_exit = ip6table_nat_net_pre_exit,
        .exit   = ip6table_nat_net_exit,
 };
 
index a22100b..8f9e742 100644 (file)
@@ -66,15 +66,23 @@ static int __net_init ip6table_raw_table_init(struct net *net)
        return ret;
 }
 
+static void __net_exit ip6table_raw_net_pre_exit(struct net *net)
+{
+       if (net->ipv6.ip6table_raw)
+               ip6t_unregister_table_pre_exit(net, net->ipv6.ip6table_raw,
+                                              rawtable_ops);
+}
+
 static void __net_exit ip6table_raw_net_exit(struct net *net)
 {
        if (!net->ipv6.ip6table_raw)
                return;
-       ip6t_unregister_table(net, net->ipv6.ip6table_raw, rawtable_ops);
+       ip6t_unregister_table_exit(net, net->ipv6.ip6table_raw);
        net->ipv6.ip6table_raw = NULL;
 }
 
 static struct pernet_operations ip6table_raw_net_ops = {
+       .pre_exit = ip6table_raw_net_pre_exit,
        .exit = ip6table_raw_net_exit,
 };
 
index a74335f..5e8c48f 100644 (file)
@@ -61,15 +61,23 @@ static int __net_init ip6table_security_table_init(struct net *net)
        return ret;
 }
 
+static void __net_exit ip6table_security_net_pre_exit(struct net *net)
+{
+       if (net->ipv6.ip6table_security)
+               ip6t_unregister_table_pre_exit(net, net->ipv6.ip6table_security,
+                                              sectbl_ops);
+}
+
 static void __net_exit ip6table_security_net_exit(struct net *net)
 {
        if (!net->ipv6.ip6table_security)
                return;
-       ip6t_unregister_table(net, net->ipv6.ip6table_security, sectbl_ops);
+       ip6t_unregister_table_exit(net, net->ipv6.ip6table_security);
        net->ipv6.ip6table_security = NULL;
 }
 
 static struct pernet_operations ip6table_security_net_ops = {
+       .pre_exit = ip6table_security_net_pre_exit,
        .exit = ip6table_security_net_exit,
 };
 
index a8566ee..667b8af 100644 (file)
@@ -35,3 +35,4 @@ module_exit(nf_flow_ipv6_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NF_FLOWTABLE(AF_INET6);
+MODULE_DESCRIPTION("Netfilter flow table IPv6 module");
index 5fae66f..4aef6ba 100644 (file)
@@ -126,6 +126,21 @@ void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
 }
 EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_put);
 
+static int nf_reject6_fill_skb_dst(struct sk_buff *skb_in)
+{
+       struct dst_entry *dst = NULL;
+       struct flowi fl;
+
+       memset(&fl, 0, sizeof(struct flowi));
+       fl.u.ip6.daddr = ipv6_hdr(skb_in)->saddr;
+       nf_ip6_route(dev_net(skb_in->dev), &dst, &fl, false);
+       if (!dst)
+               return -1;
+
+       skb_dst_set(skb_in, dst);
+       return 0;
+}
+
 void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
 {
        struct net_device *br_indev __maybe_unused;
@@ -154,6 +169,14 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
        fl6.daddr = oip6h->saddr;
        fl6.fl6_sport = otcph->dest;
        fl6.fl6_dport = otcph->source;
+
+       if (hook == NF_INET_PRE_ROUTING) {
+               nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false);
+               if (!dst)
+                       return;
+               skb_dst_set(oldskb, dst);
+       }
+
        fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev);
        fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark);
        security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
@@ -245,6 +268,9 @@ void nf_send_unreach6(struct net *net, struct sk_buff *skb_in,
        if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
                skb_in->dev = net->loopback_dev;
 
+       if (hooknum == NF_INET_PRE_ROUTING && nf_reject6_fill_skb_dst(skb_in))
+               return;
+
        icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
 }
 EXPORT_SYMBOL_GPL(nf_send_unreach6);
index 2af3220..8b5193e 100644 (file)
@@ -105,3 +105,4 @@ module_exit(nft_dup_ipv6_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "dup");
+MODULE_DESCRIPTION("IPv6 nftables packet duplication support");
index 7ece86a..e204163 100644 (file)
@@ -255,3 +255,4 @@ module_exit(nft_fib6_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
 MODULE_ALIAS_NFT_AF_EXPR(10, "fib");
+MODULE_DESCRIPTION("nftables fib / ipv6 route lookup support");
index 680a28c..c1098a1 100644 (file)
@@ -72,3 +72,4 @@ module_exit(nft_reject_ipv6_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
 MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "reject");
+MODULE_DESCRIPTION("IPv6 packet rejection for nftables");
index 98ac32b..6caa062 100644 (file)
@@ -114,6 +114,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
        ipcm6_init_sk(&ipc6, np);
+       ipc6.sockc.mark = sk->sk_mark;
        fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel);
 
        dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr, false);
index 8ef5a7b..874f01c 100644 (file)
@@ -972,13 +972,13 @@ do_confirm:
 }
 
 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
-                              char __user *optval, int optlen)
+                              sockptr_t optval, int optlen)
 {
        switch (optname) {
        case ICMPV6_FILTER:
                if (optlen > sizeof(struct icmp6_filter))
                        optlen = sizeof(struct icmp6_filter);
-               if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
+               if (copy_from_sockptr(&raw6_sk(sk)->filter, optval, optlen))
                        return -EFAULT;
                return 0;
        default:
@@ -1015,12 +1015,12 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
 
 
 static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
-                           char __user *optval, unsigned int optlen)
+                              sockptr_t optval, unsigned int optlen)
 {
        struct raw6_sock *rp = raw6_sk(sk);
        int val;
 
-       if (get_user(val, (int __user *)optval))
+       if (copy_from_sockptr(&val, optval, sizeof(val)))
                return -EFAULT;
 
        switch (optname) {
@@ -1062,7 +1062,7 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
 }
 
 static int rawv6_setsockopt(struct sock *sk, int level, int optname,
-                         char __user *optval, unsigned int optlen)
+                           sockptr_t optval, unsigned int optlen)
 {
        switch (level) {
        case SOL_RAW:
@@ -1084,30 +1084,6 @@ static int rawv6_setsockopt(struct sock *sk, int level, int optname,
        return do_rawv6_setsockopt(sk, level, optname, optval, optlen);
 }
 
-#ifdef CONFIG_COMPAT
-static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname,
-                                  char __user *optval, unsigned int optlen)
-{
-       switch (level) {
-       case SOL_RAW:
-               break;
-       case SOL_ICMPV6:
-               if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
-                       return -EOPNOTSUPP;
-               return rawv6_seticmpfilter(sk, level, optname, optval, optlen);
-       case SOL_IPV6:
-               if (optname == IPV6_CHECKSUM ||
-                   optname == IPV6_HDRINCL)
-                       break;
-               fallthrough;
-       default:
-               return compat_ipv6_setsockopt(sk, level, optname,
-                                             optval, optlen);
-       }
-       return do_rawv6_setsockopt(sk, level, optname, optval, optlen);
-}
-#endif
-
 static int do_rawv6_getsockopt(struct sock *sk, int level, int optname,
                            char __user *optval, int __user *optlen)
 {
@@ -1169,30 +1145,6 @@ static int rawv6_getsockopt(struct sock *sk, int level, int optname,
        return do_rawv6_getsockopt(sk, level, optname, optval, optlen);
 }
 
-#ifdef CONFIG_COMPAT
-static int compat_rawv6_getsockopt(struct sock *sk, int level, int optname,
-                                  char __user *optval, int __user *optlen)
-{
-       switch (level) {
-       case SOL_RAW:
-               break;
-       case SOL_ICMPV6:
-               if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
-                       return -EOPNOTSUPP;
-               return rawv6_geticmpfilter(sk, level, optname, optval, optlen);
-       case SOL_IPV6:
-               if (optname == IPV6_CHECKSUM ||
-                   optname == IPV6_HDRINCL)
-                       break;
-               fallthrough;
-       default:
-               return compat_ipv6_getsockopt(sk, level, optname,
-                                             optval, optlen);
-       }
-       return do_rawv6_getsockopt(sk, level, optname, optval, optlen);
-}
-#endif
-
 static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
 {
        switch (cmd) {
@@ -1297,8 +1249,6 @@ struct proto rawv6_prot = {
        .usersize          = sizeof_field(struct raw6_sock, filter),
        .h.raw_hash        = &raw_v6_hashinfo,
 #ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_rawv6_setsockopt,
-       .compat_getsockopt = compat_rawv6_getsockopt,
        .compat_ioctl      = compat_rawv6_ioctl,
 #endif
        .diag_destroy      = raw_abort,
@@ -1378,8 +1328,6 @@ const struct proto_ops inet6_sockraw_ops = {
        .sendpage          = sock_no_sendpage,
 #ifdef CONFIG_COMPAT
        .compat_ioctl      = inet6_compat_ioctl,
-       .compat_setsockopt = compat_sock_common_setsockopt,
-       .compat_getsockopt = compat_sock_common_getsockopt,
 #endif
 };
 
index 82cbb46..33f5efb 100644 (file)
@@ -61,6 +61,7 @@
 #include <net/l3mdev.h>
 #include <net/ip.h>
 #include <linux/uaccess.h>
+#include <linux/btf_ids.h>
 
 #ifdef CONFIG_SYSCTL
 #include <linux/sysctl.h>
@@ -431,9 +432,12 @@ void fib6_select_path(const struct net *net, struct fib6_result *res,
        struct fib6_info *sibling, *next_sibling;
        struct fib6_info *match = res->f6i;
 
-       if ((!match->fib6_nsiblings && !match->nh) || have_oif_match)
+       if (!match->nh && (!match->fib6_nsiblings || have_oif_match))
                goto out;
 
+       if (match->nh && have_oif_match && res->nh)
+               return;
+
        /* We might have already computed the hash for ICMPv6 errors. In such
         * case it will always be non-zero. Otherwise now is the time to do it.
         */
@@ -1207,7 +1211,7 @@ fallback:
        return nrt;
 }
 
-static struct rt6_info *ip6_pol_route_lookup(struct net *net,
+INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_lookup(struct net *net,
                                             struct fib6_table *table,
                                             struct flowi6 *fl6,
                                             const struct sk_buff *skb,
@@ -2274,7 +2278,7 @@ out:
 }
 EXPORT_SYMBOL_GPL(ip6_pol_route);
 
-static struct rt6_info *ip6_pol_route_input(struct net *net,
+INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_input(struct net *net,
                                            struct fib6_table *table,
                                            struct flowi6 *fl6,
                                            const struct sk_buff *skb,
@@ -2465,7 +2469,7 @@ void ip6_route_input(struct sk_buff *skb)
                                                      &fl6, skb, flags));
 }
 
-static struct rt6_info *ip6_pol_route_output(struct net *net,
+INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_output(struct net *net,
                                             struct fib6_table *table,
                                             struct flowi6 *fl6,
                                             const struct sk_buff *skb,
@@ -2912,7 +2916,7 @@ struct ip6rd_flowi {
        struct in6_addr gateway;
 };
 
-static struct rt6_info *__ip6_route_redirect(struct net *net,
+INDIRECT_CALLABLE_SCOPE struct rt6_info *__ip6_route_redirect(struct net *net,
                                             struct fib6_table *table,
                                             struct flowi6 *fl6,
                                             const struct sk_buff *skb,
@@ -3402,7 +3406,7 @@ static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type)
        if ((flags & RTF_REJECT) ||
            (dev && (dev->flags & IFF_LOOPBACK) &&
             !(addr_type & IPV6_ADDR_LOOPBACK) &&
-            !(flags & RTF_LOCAL)))
+            !(flags & (RTF_ANYCAST | RTF_LOCAL))))
                return true;
 
        return false;
@@ -6420,7 +6424,10 @@ void __init ip6_route_init_special_entries(void)
 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
 DEFINE_BPF_ITER_FUNC(ipv6_route, struct bpf_iter_meta *meta, struct fib6_info *rt)
 
-static const struct bpf_iter_reg ipv6_route_reg_info = {
+BTF_ID_LIST(btf_fib6_info_id)
+BTF_ID(struct, fib6_info)
+
+static struct bpf_iter_reg ipv6_route_reg_info = {
        .target                 = "ipv6_route",
        .seq_ops                = &ipv6_route_seq_ops,
        .init_seq_private       = bpf_iter_init_seq_net,
@@ -6435,6 +6442,7 @@ static const struct bpf_iter_reg ipv6_route_reg_info = {
 
 static int __init bpf_iter_register(void)
 {
+       ipv6_route_reg_info.ctx_arg_info[0].btf_id = *btf_fib6_info_id;
        return bpf_iter_reg_target(&ipv6_route_reg_info);
 }
 
index c3ececd..5fdf3eb 100644 (file)
@@ -136,8 +136,7 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt,
 
        oldhdr = ipv6_hdr(skb);
 
-       buf = kzalloc(ipv6_rpl_srh_alloc_size(srh->segments_left - 1) * 2,
-                     GFP_ATOMIC);
+       buf = kcalloc(struct_size(srh, segments.addr, srh->segments_left), 2, GFP_ATOMIC);
        if (!buf)
                return -ENOMEM;
 
index 1fbb4df..5e2c34c 100644 (file)
@@ -1421,6 +1421,7 @@ static void ipip6_tunnel_setup(struct net_device *dev)
        int t_hlen = tunnel->hlen + sizeof(struct iphdr);
 
        dev->netdev_ops         = &ipip6_netdev_ops;
+       dev->header_ops         = &ip_tunnel_header_ops;
        dev->needs_free_netdev  = true;
        dev->priv_destructor    = ipip6_dev_free;
 
index f67d45f..305870a 100644 (file)
@@ -567,7 +567,7 @@ static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
 }
 
 static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
-                                char __user *optval, int optlen)
+                                sockptr_t optval, int optlen)
 {
        struct tcp_md5sig cmd;
        struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
@@ -577,7 +577,7 @@ static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
        if (optlen < sizeof(cmd))
                return -EINVAL;
 
-       if (copy_from_user(&cmd, optval, sizeof(cmd)))
+       if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
                return -EFAULT;
 
        if (sin6->sin6_family != AF_INET6)
@@ -1811,6 +1811,13 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = {
        .twsk_destructor = tcp_twsk_destructor,
 };
 
+INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
+{
+       struct ipv6_pinfo *np = inet6_sk(sk);
+
+       __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr);
+}
+
 const struct inet_connection_sock_af_ops ipv6_specific = {
        .queue_xmit        = inet6_csk_xmit,
        .send_check        = tcp_v6_send_check,
@@ -1824,10 +1831,6 @@ const struct inet_connection_sock_af_ops ipv6_specific = {
        .getsockopt        = ipv6_getsockopt,
        .addr2sockaddr     = inet6_csk_addr2sockaddr,
        .sockaddr_len      = sizeof(struct sockaddr_in6),
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_ipv6_setsockopt,
-       .compat_getsockopt = compat_ipv6_getsockopt,
-#endif
        .mtu_reduced       = tcp_v6_mtu_reduced,
 };
 
@@ -1854,10 +1857,6 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = {
        .getsockopt        = ipv6_getsockopt,
        .addr2sockaddr     = inet6_csk_addr2sockaddr,
        .sockaddr_len      = sizeof(struct sockaddr_in6),
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_ipv6_setsockopt,
-       .compat_getsockopt = compat_ipv6_getsockopt,
-#endif
        .mtu_reduced       = tcp_v4_mtu_reduced,
 };
 
@@ -2115,10 +2114,6 @@ struct proto tcpv6_prot = {
        .rsk_prot               = &tcp6_request_sock_ops,
        .h.hashinfo             = &tcp_hashinfo,
        .no_autobind            = true,
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt      = compat_tcp_setsockopt,
-       .compat_getsockopt      = compat_tcp_getsockopt,
-#endif
        .diag_destroy           = tcp_abort,
 };
 EXPORT_SYMBOL_GPL(tcpv6_prot);
index 7d41517..15818e1 100644 (file)
@@ -141,6 +141,27 @@ static int compute_score(struct sock *sk, struct net *net,
        return score;
 }
 
+static inline struct sock *lookup_reuseport(struct net *net, struct sock *sk,
+                                           struct sk_buff *skb,
+                                           const struct in6_addr *saddr,
+                                           __be16 sport,
+                                           const struct in6_addr *daddr,
+                                           unsigned int hnum)
+{
+       struct sock *reuse_sk = NULL;
+       u32 hash;
+
+       if (sk->sk_reuseport && sk->sk_state != TCP_ESTABLISHED) {
+               hash = udp6_ehashfn(net, daddr, hnum, saddr, sport);
+               reuse_sk = reuseport_select_sock(sk, hash, skb,
+                                                sizeof(struct udphdr));
+               /* Fall back to scoring if group has connections */
+               if (reuseport_has_conns(sk, false))
+                       return NULL;
+       }
+       return reuse_sk;
+}
+
 /* called with rcu_read_lock() */
 static struct sock *udp6_lib_lookup2(struct net *net,
                const struct in6_addr *saddr, __be16 sport,
@@ -150,7 +171,6 @@ static struct sock *udp6_lib_lookup2(struct net *net,
 {
        struct sock *sk, *result;
        int score, badness;
-       u32 hash = 0;
 
        result = NULL;
        badness = -1;
@@ -158,16 +178,11 @@ static struct sock *udp6_lib_lookup2(struct net *net,
                score = compute_score(sk, net, saddr, sport,
                                      daddr, hnum, dif, sdif);
                if (score > badness) {
-                       if (sk->sk_reuseport &&
-                           sk->sk_state != TCP_ESTABLISHED) {
-                               hash = udp6_ehashfn(net, daddr, hnum,
-                                                   saddr, sport);
-
-                               result = reuseport_select_sock(sk, hash, skb,
-                                                       sizeof(struct udphdr));
-                               if (result && !reuseport_has_conns(sk, false))
-                                       return result;
-                       }
+                       result = lookup_reuseport(net, sk, skb,
+                                                 saddr, sport, daddr, hnum);
+                       if (result)
+                               return result;
+
                        result = sk;
                        badness = score;
                }
@@ -175,6 +190,31 @@ static struct sock *udp6_lib_lookup2(struct net *net,
        return result;
 }
 
+static inline struct sock *udp6_lookup_run_bpf(struct net *net,
+                                              struct udp_table *udptable,
+                                              struct sk_buff *skb,
+                                              const struct in6_addr *saddr,
+                                              __be16 sport,
+                                              const struct in6_addr *daddr,
+                                              u16 hnum)
+{
+       struct sock *sk, *reuse_sk;
+       bool no_reuseport;
+
+       if (udptable != &udp_table)
+               return NULL; /* only UDP is supported */
+
+       no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP,
+                                           saddr, sport, daddr, hnum, &sk);
+       if (no_reuseport || IS_ERR_OR_NULL(sk))
+               return sk;
+
+       reuse_sk = lookup_reuseport(net, sk, skb, saddr, sport, daddr, hnum);
+       if (reuse_sk)
+               sk = reuse_sk;
+       return sk;
+}
+
 /* rcu_read_lock() must be held */
 struct sock *__udp6_lib_lookup(struct net *net,
                               const struct in6_addr *saddr, __be16 sport,
@@ -185,25 +225,42 @@ struct sock *__udp6_lib_lookup(struct net *net,
        unsigned short hnum = ntohs(dport);
        unsigned int hash2, slot2;
        struct udp_hslot *hslot2;
-       struct sock *result;
+       struct sock *result, *sk;
 
        hash2 = ipv6_portaddr_hash(net, daddr, hnum);
        slot2 = hash2 & udptable->mask;
        hslot2 = &udptable->hash2[slot2];
 
+       /* Lookup connected or non-wildcard sockets */
        result = udp6_lib_lookup2(net, saddr, sport,
                                  daddr, hnum, dif, sdif,
                                  hslot2, skb);
-       if (!result) {
-               hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
-               slot2 = hash2 & udptable->mask;
+       if (!IS_ERR_OR_NULL(result) && result->sk_state == TCP_ESTABLISHED)
+               goto done;
+
+       /* Lookup redirect from BPF */
+       if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
+               sk = udp6_lookup_run_bpf(net, udptable, skb,
+                                        saddr, sport, daddr, hnum);
+               if (sk) {
+                       result = sk;
+                       goto done;
+               }
+       }
 
-               hslot2 = &udptable->hash2[slot2];
+       /* Got non-wildcard socket or error on first lookup */
+       if (result)
+               goto done;
 
-               result = udp6_lib_lookup2(net, saddr, sport,
-                                         &in6addr_any, hnum, dif, sdif,
-                                         hslot2, skb);
-       }
+       /* Lookup wildcard sockets */
+       hash2 = ipv6_portaddr_hash(net, &in6addr_any, hnum);
+       slot2 = hash2 & udptable->mask;
+       hslot2 = &udptable->hash2[slot2];
+
+       result = udp6_lib_lookup2(net, saddr, sport,
+                                 &in6addr_any, hnum, dif, sdif,
+                                 hslot2, skb);
+done:
        if (IS_ERR(result))
                return NULL;
        return result;
@@ -1059,6 +1116,9 @@ static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
  *     @sk:    socket we are sending on
  *     @skb:   sk_buff containing the filled-in UDP header
  *             (checksum field must be zeroed out)
+ *     @saddr: source address
+ *     @daddr: destination address
+ *     @len:   length of packet
  */
 static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
                                 const struct in6_addr *saddr,
@@ -1558,26 +1618,16 @@ void udpv6_destroy_sock(struct sock *sk)
 /*
  *     Socket option code for UDP
  */
-int udpv6_setsockopt(struct sock *sk, int level, int optname,
-                    char __user *optval, unsigned int optlen)
+int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+                    unsigned int optlen)
 {
        if (level == SOL_UDP  ||  level == SOL_UDPLITE)
-               return udp_lib_setsockopt(sk, level, optname, optval, optlen,
+               return udp_lib_setsockopt(sk, level, optname,
+                                         optval, optlen,
                                          udp_v6_push_pending_frames);
        return ipv6_setsockopt(sk, level, optname, optval, optlen);
 }
 
-#ifdef CONFIG_COMPAT
-int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
-                           char __user *optval, unsigned int optlen)
-{
-       if (level == SOL_UDP  ||  level == SOL_UDPLITE)
-               return udp_lib_setsockopt(sk, level, optname, optval, optlen,
-                                         udp_v6_push_pending_frames);
-       return compat_ipv6_setsockopt(sk, level, optname, optval, optlen);
-}
-#endif
-
 int udpv6_getsockopt(struct sock *sk, int level, int optname,
                     char __user *optval, int __user *optlen)
 {
@@ -1586,16 +1636,6 @@ int udpv6_getsockopt(struct sock *sk, int level, int optname,
        return ipv6_getsockopt(sk, level, optname, optval, optlen);
 }
 
-#ifdef CONFIG_COMPAT
-int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
-                           char __user *optval, int __user *optlen)
-{
-       if (level == SOL_UDP  ||  level == SOL_UDPLITE)
-               return udp_lib_getsockopt(sk, level, optname, optval, optlen);
-       return compat_ipv6_getsockopt(sk, level, optname, optval, optlen);
-}
-#endif
-
 /* thinking of making this const? Don't.
  * early_demux can change based on sysctl.
  */
@@ -1678,10 +1718,6 @@ struct proto udpv6_prot = {
        .sysctl_rmem_offset     = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
        .obj_size               = sizeof(struct udp6_sock),
        .h.udp_table            = &udp_table,
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt      = compat_udpv6_setsockopt,
-       .compat_getsockopt      = compat_udpv6_getsockopt,
-#endif
        .diag_destroy           = udp_abort,
 };
 
index 20e324b..b2fcc46 100644 (file)
@@ -17,14 +17,8 @@ void udp_v6_rehash(struct sock *sk);
 
 int udpv6_getsockopt(struct sock *sk, int level, int optname,
                     char __user *optval, int __user *optlen);
-int udpv6_setsockopt(struct sock *sk, int level, int optname,
-                    char __user *optval, unsigned int optlen);
-#ifdef CONFIG_COMPAT
-int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
-                           char __user *optval, unsigned int optlen);
-int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
-                           char __user *optval, int __user *optlen);
-#endif
+int udpv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
+                    unsigned int optlen);
 int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
 int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
                  int flags, int *addr_len);
index bf7a7ac..fbb700d 100644 (file)
@@ -52,10 +52,6 @@ struct proto udplitev6_prot = {
        .sysctl_mem        = sysctl_udp_mem,
        .obj_size          = sizeof(struct udp6_sock),
        .h.udp_table       = &udplite_table,
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_udpv6_setsockopt,
-       .compat_getsockopt = compat_udpv6_getsockopt,
-#endif
 };
 
 static struct inet_protosw udplite6_protosw = {
index ee0add1..6ee9851 100644 (file)
@@ -1494,7 +1494,7 @@ static int iucv_sock_release(struct socket *sock)
 
 /* getsockopt and setsockopt */
 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
-                               char __user *optval, unsigned int optlen)
+                               sockptr_t optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct iucv_sock *iucv = iucv_sk(sk);
@@ -1507,7 +1507,7 @@ static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
        if (optlen < sizeof(int))
                return -EINVAL;
 
-       if (get_user(val, (int __user *) optval))
+       if (copy_from_sockptr(&val, optval, sizeof(int)))
                return -EFAULT;
 
        rc = 0;
index 56fac24..56dad95 100644 (file)
@@ -1265,7 +1265,7 @@ static void kcm_recv_enable(struct kcm_sock *kcm)
 }
 
 static int kcm_setsockopt(struct socket *sock, int level, int optname,
-                         char __user *optval, unsigned int optlen)
+                         sockptr_t optval, unsigned int optlen)
 {
        struct kcm_sock *kcm = kcm_sk(sock->sk);
        int val, valbool;
@@ -1277,8 +1277,8 @@ static int kcm_setsockopt(struct socket *sock, int level, int optname,
        if (optlen < sizeof(int))
                return -EINVAL;
 
-       if (get_user(val, (int __user *)optval))
-               return -EINVAL;
+       if (copy_from_sockptr(&val, optval, sizeof(int)))
+               return -EFAULT;
 
        valbool = val ? 1 : 0;
 
index b67ed3a..f13626c 100644 (file)
@@ -3734,8 +3734,6 @@ static const struct proto_ops pfkey_ops = {
        .ioctl          =       sock_no_ioctl,
        .listen         =       sock_no_listen,
        .shutdown       =       sock_no_shutdown,
-       .setsockopt     =       sock_no_setsockopt,
-       .getsockopt     =       sock_no_getsockopt,
        .mmap           =       sock_no_mmap,
        .sendpage       =       sock_no_sendpage,
 
index 6d7ef78..e723828 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/*
- * L2TP core.
+/* L2TP core.
  *
  * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
  *
@@ -94,7 +93,7 @@ struct l2tp_skb_cb {
        unsigned long           expires;
 };
 
-#define L2TP_SKB_CB(skb)       ((struct l2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
+#define L2TP_SKB_CB(skb)       ((struct l2tp_skb_cb *)&(skb)->cb[sizeof(struct inet_skb_parm)])
 
 static struct workqueue_struct *l2tp_wq;
 
@@ -102,8 +101,10 @@ static struct workqueue_struct *l2tp_wq;
 static unsigned int l2tp_net_id;
 struct l2tp_net {
        struct list_head l2tp_tunnel_list;
+       /* Lock for write access to l2tp_tunnel_list */
        spinlock_t l2tp_tunnel_list_lock;
        struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
+       /* Lock for write access to l2tp_session_hlist */
        spinlock_t l2tp_session_hlist_lock;
 };
 
@@ -134,7 +135,6 @@ static inline struct hlist_head *
 l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
 {
        return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)];
-
 }
 
 /* Session hash list.
@@ -412,7 +412,7 @@ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *
        }
 
        /* call private receive handler */
-       if (session->recv_skb != NULL)
+       if (session->recv_skb)
                (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
        else
                kfree_skb(skb);
@@ -621,8 +621,8 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
                      int length)
 {
        struct l2tp_tunnel *tunnel = session->tunnel;
+       u32 ns = 0, nr = 0;
        int offset;
-       u32 ns, nr;
 
        /* Parse and check optional cookie */
        if (session->peer_cookie_len > 0) {
@@ -644,13 +644,12 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
         * the control of the LNS.  If no sequence numbers present but
         * we were expecting them, discard frame.
         */
-       ns = nr = 0;
        L2TP_SKB_CB(skb)->has_seq = 0;
        if (tunnel->version == L2TP_HDR_VER_2) {
                if (hdrflags & L2TP_HDRFLAG_S) {
-                       ns = ntohs(*(__be16 *) ptr);
+                       ns = ntohs(*(__be16 *)ptr);
                        ptr += 2;
-                       nr = ntohs(*(__be16 *) ptr);
+                       nr = ntohs(*(__be16 *)ptr);
                        ptr += 2;
 
                        /* Store L2TP info in the skb */
@@ -662,7 +661,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
                                 session->name, ns, nr, session->nr);
                }
        } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
-               u32 l2h = ntohl(*(__be32 *) ptr);
+               u32 l2h = ntohl(*(__be32 *)ptr);
 
                if (l2h & 0x40000000) {
                        ns = l2h & 0x00ffffff;
@@ -679,11 +678,11 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
        }
 
        if (L2TP_SKB_CB(skb)->has_seq) {
-               /* Received a packet with sequence numbers. If we're the LNS,
+               /* Received a packet with sequence numbers. If we're the LAC,
                 * check if we sre sending sequence numbers and if not,
                 * configure it so.
                 */
-               if ((!session->lns_mode) && (!session->send_seq)) {
+               if (!session->lns_mode && !session->send_seq) {
                        l2tp_info(session, L2TP_MSG_SEQ,
                                  "%s: requested to enable seq numbers by LNS\n",
                                  session->name);
@@ -707,7 +706,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
                 * If we're the LNS and we're sending sequence numbers, the
                 * LAC is broken. Discard the frame.
                 */
-               if ((!session->lns_mode) && (session->send_seq)) {
+               if (!session->lns_mode && session->send_seq) {
                        l2tp_info(session, L2TP_MSG_SEQ,
                                  "%s: requested to disable seq numbers by LNS\n",
                                  session->name);
@@ -774,16 +773,17 @@ EXPORT_SYMBOL(l2tp_recv_common);
 
 /* Drop skbs from the session's reorder_q
  */
-static int l2tp_session_queue_purge(struct l2tp_session *session)
+static void l2tp_session_queue_purge(struct l2tp_session *session)
 {
        struct sk_buff *skb = NULL;
-       BUG_ON(!session);
-       BUG_ON(session->magic != L2TP_SESSION_MAGIC);
+
+       if (WARN_ON(session->magic != L2TP_SESSION_MAGIC))
+               return;
+
        while ((skb = skb_dequeue(&session->reorder_q))) {
                atomic_long_inc(&session->stats.rx_errors);
                kfree_skb(skb);
        }
-       return 0;
 }
 
 /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
@@ -825,10 +825,11 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
        }
 
        /* Point to L2TP header */
-       optr = ptr = skb->data;
+       optr = skb->data;
+       ptr = skb->data;
 
        /* Get L2TP header flags */
-       hdrflags = ntohs(*(__be16 *) ptr);
+       hdrflags = ntohs(*(__be16 *)ptr);
 
        /* Check protocol version */
        version = hdrflags & L2TP_HDR_VER_MASK;
@@ -859,14 +860,14 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
                        ptr += 2;
 
                /* Extract tunnel and session ID */
-               tunnel_id = ntohs(*(__be16 *) ptr);
+               tunnel_id = ntohs(*(__be16 *)ptr);
                ptr += 2;
-               session_id = ntohs(*(__be16 *) ptr);
+               session_id = ntohs(*(__be16 *)ptr);
                ptr += 2;
        } else {
                ptr += 2;       /* skip reserved bits */
                tunnel_id = tunnel->tunnel_id;
-               session_id = ntohl(*(__be32 *) ptr);
+               session_id = ntohl(*(__be32 *)ptr);
                ptr += 4;
        }
 
@@ -910,7 +911,7 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
        struct l2tp_tunnel *tunnel;
 
        tunnel = rcu_dereference_sk_user_data(sk);
-       if (tunnel == NULL)
+       if (!tunnel)
                goto pass_up;
 
        l2tp_dbg(tunnel, L2TP_MSG_DATA, "%s: received %d bytes\n",
@@ -971,13 +972,13 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
         */
        if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
                u16 flags = L2TP_HDR_VER_3;
-               *((__be16 *) bufp) = htons(flags);
+               *((__be16 *)bufp) = htons(flags);
                bufp += 2;
-               *((__be16 *) bufp) = 0;
+               *((__be16 *)bufp) = 0;
                bufp += 2;
        }
 
-       *((__be32 *) bufp) = htonl(session->peer_session_id);
+       *((__be32 *)bufp) = htonl(session->peer_session_id);
        bufp += 4;
        if (session->cookie_len) {
                memcpy(bufp, &session->cookie[0], session->cookie_len);
@@ -1028,6 +1029,7 @@ static void l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
 
        /* Queue the packet to IP for output */
        skb->ignore_df = 1;
+       skb_dst_drop(skb);
 #if IS_ENABLED(CONFIG_IPV6)
        if (l2tp_sk_is_v6(tunnel->sock))
                error = inet6_csk_xmit(tunnel->sock, skb, NULL);
@@ -1099,10 +1101,6 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
                goto out_unlock;
        }
 
-       /* Get routing info from the tunnel socket */
-       skb_dst_drop(skb);
-       skb_dst_set(skb, sk_dst_check(sk, 0));
-
        inet = inet_sk(sk);
        fl = &inet->cork.fl;
        switch (tunnel->encap) {
@@ -1124,8 +1122,8 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
                                      &sk->sk_v6_daddr, udp_len);
                else
 #endif
-               udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
-                            inet->inet_daddr, udp_len);
+                       udp_set_csum(sk->sk_no_check_tx, skb, inet->inet_saddr,
+                                    inet->inet_daddr, udp_len);
                break;
 
        case L2TP_ENCAPTYPE_IP:
@@ -1152,7 +1150,7 @@ static void l2tp_tunnel_destruct(struct sock *sk)
 {
        struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
 
-       if (tunnel == NULL)
+       if (!tunnel)
                goto end;
 
        l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);
@@ -1191,8 +1189,6 @@ static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
        struct hlist_node *tmp;
        struct l2tp_session *session;
 
-       BUG_ON(tunnel == NULL);
-
        l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing all sessions...\n",
                  tunnel->name);
 
@@ -1216,7 +1212,7 @@ again:
                        __l2tp_session_unhash(session);
                        l2tp_session_queue_purge(session);
 
-                       if (session->session_close != NULL)
+                       if (session->session_close)
                                (*session->session_close)(session);
 
                        l2tp_session_dec_refcount(session);
@@ -1287,10 +1283,10 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
  * exit hook.
  */
 static int l2tp_tunnel_sock_create(struct net *net,
-                               u32 tunnel_id,
-                               u32 peer_tunnel_id,
-                               struct l2tp_tunnel_cfg *cfg,
-                               struct socket **sockp)
+                                  u32 tunnel_id,
+                                  u32 peer_tunnel_id,
+                                  struct l2tp_tunnel_cfg *cfg,
+                                  struct socket **sockp)
 {
        int err = -EINVAL;
        struct socket *sock = NULL;
@@ -1308,9 +1304,9 @@ static int l2tp_tunnel_sock_create(struct net *net,
                        memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
                               sizeof(udp_conf.peer_ip6));
                        udp_conf.use_udp6_tx_checksums =
-                         ! cfg->udp6_zero_tx_checksums;
+                         !cfg->udp6_zero_tx_checksums;
                        udp_conf.use_udp6_rx_checksums =
-                         ! cfg->udp6_zero_rx_checksums;
+                         !cfg->udp6_zero_rx_checksums;
                } else
 #endif
                {
@@ -1335,7 +1331,7 @@ static int l2tp_tunnel_sock_create(struct net *net,
                        struct sockaddr_l2tpip6 ip6_addr = {0};
 
                        err = sock_create_kern(net, AF_INET6, SOCK_DGRAM,
-                                         IPPROTO_L2TP, &sock);
+                                              IPPROTO_L2TP, &sock);
                        if (err < 0)
                                goto out;
 
@@ -1343,7 +1339,7 @@ static int l2tp_tunnel_sock_create(struct net *net,
                        memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
                               sizeof(ip6_addr.l2tp_addr));
                        ip6_addr.l2tp_conn_id = tunnel_id;
-                       err = kernel_bind(sock, (struct sockaddr *) &ip6_addr,
+                       err = kernel_bind(sock, (struct sockaddr *)&ip6_addr,
                                          sizeof(ip6_addr));
                        if (err < 0)
                                goto out;
@@ -1353,7 +1349,7 @@ static int l2tp_tunnel_sock_create(struct net *net,
                               sizeof(ip6_addr.l2tp_addr));
                        ip6_addr.l2tp_conn_id = peer_tunnel_id;
                        err = kernel_connect(sock,
-                                            (struct sockaddr *) &ip6_addr,
+                                            (struct sockaddr *)&ip6_addr,
                                             sizeof(ip6_addr), 0);
                        if (err < 0)
                                goto out;
@@ -1363,14 +1359,14 @@ static int l2tp_tunnel_sock_create(struct net *net,
                        struct sockaddr_l2tpip ip_addr = {0};
 
                        err = sock_create_kern(net, AF_INET, SOCK_DGRAM,
-                                         IPPROTO_L2TP, &sock);
+                                              IPPROTO_L2TP, &sock);
                        if (err < 0)
                                goto out;
 
                        ip_addr.l2tp_family = AF_INET;
                        ip_addr.l2tp_addr = cfg->local_ip;
                        ip_addr.l2tp_conn_id = tunnel_id;
-                       err = kernel_bind(sock, (struct sockaddr *) &ip_addr,
+                       err = kernel_bind(sock, (struct sockaddr *)&ip_addr,
                                          sizeof(ip_addr));
                        if (err < 0)
                                goto out;
@@ -1378,7 +1374,7 @@ static int l2tp_tunnel_sock_create(struct net *net,
                        ip_addr.l2tp_family = AF_INET;
                        ip_addr.l2tp_addr = cfg->peer_ip;
                        ip_addr.l2tp_conn_id = peer_tunnel_id;
-                       err = kernel_connect(sock, (struct sockaddr *) &ip_addr,
+                       err = kernel_connect(sock, (struct sockaddr *)&ip_addr,
                                             sizeof(ip_addr), 0);
                        if (err < 0)
                                goto out;
@@ -1391,7 +1387,7 @@ static int l2tp_tunnel_sock_create(struct net *net,
 
 out:
        *sockp = sock;
-       if ((err < 0) && sock) {
+       if (err < 0 && sock) {
                kernel_sock_shutdown(sock, SHUT_RDWR);
                sock_release(sock);
                *sockp = NULL;
@@ -1402,17 +1398,18 @@ out:
 
 static struct lock_class_key l2tp_socket_class;
 
-int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
+int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
+                      struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
 {
        struct l2tp_tunnel *tunnel = NULL;
        int err;
        enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
 
-       if (cfg != NULL)
+       if (cfg)
                encap = cfg->encap;
 
-       tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL);
-       if (tunnel == NULL) {
+       tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
+       if (!tunnel) {
                err = -ENOMEM;
                goto err;
        }
@@ -1427,7 +1424,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
        rwlock_init(&tunnel->hlist_lock);
        tunnel->acpt_newsess = true;
 
-       if (cfg != NULL)
+       if (cfg)
                tunnel->debug = cfg->debug;
 
        tunnel->encap = encap;
@@ -1566,13 +1563,13 @@ void l2tp_session_free(struct l2tp_session *session)
 {
        struct l2tp_tunnel *tunnel = session->tunnel;
 
-       BUG_ON(refcount_read(&session->ref_count) != 0);
-
        if (tunnel) {
-               BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
+               if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC))
+                       goto out;
                l2tp_tunnel_dec_refcount(tunnel);
        }
 
+out:
        kfree(session);
 }
 EXPORT_SYMBOL_GPL(l2tp_session_free);
@@ -1596,6 +1593,7 @@ void __l2tp_session_unhash(struct l2tp_session *session)
                /* For L2TPv3 we have a per-net hash: remove from there, too */
                if (tunnel->version != L2TP_HDR_VER_2) {
                        struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
+
                        spin_lock_bh(&pn->l2tp_session_hlist_lock);
                        hlist_del_init_rcu(&session->global_hlist);
                        spin_unlock_bh(&pn->l2tp_session_hlist_lock);
@@ -1606,7 +1604,7 @@ void __l2tp_session_unhash(struct l2tp_session *session)
 EXPORT_SYMBOL_GPL(__l2tp_session_unhash);
 
 /* This function is used by the netlink SESSION_DELETE command and by
  pseudowire modules.
* pseudowire modules.
  */
 int l2tp_session_delete(struct l2tp_session *session)
 {
@@ -1615,7 +1613,7 @@ int l2tp_session_delete(struct l2tp_session *session)
 
        __l2tp_session_unhash(session);
        l2tp_session_queue_purge(session);
-       if (session->session_close != NULL)
+       if (session->session_close)
                (*session->session_close)(session);
 
        l2tp_session_dec_refcount(session);
@@ -1639,16 +1637,16 @@ void l2tp_session_set_header_len(struct l2tp_session *session, int version)
                if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
                        session->hdr_len += 4;
        }
-
 }
 EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
 
-struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
+struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id,
+                                        u32 peer_session_id, struct l2tp_session_cfg *cfg)
 {
        struct l2tp_session *session;
 
-       session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
-       if (session != NULL) {
+       session = kzalloc(sizeof(*session) + priv_size, GFP_KERNEL);
+       if (session) {
                session->magic = L2TP_SESSION_MAGIC;
                session->tunnel = tunnel;
 
index 10cf7c3..2d2dd21 100644 (file)
@@ -1,6 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * L2TP internal definitions.
+/* L2TP internal definitions.
  *
  * Copyright (c) 2008,2009 Katalix Systems Ltd
  */
 
 /* Per tunnel, session hash table size */
 #define L2TP_HASH_BITS 4
-#define L2TP_HASH_SIZE (1 << L2TP_HASH_BITS)
+#define L2TP_HASH_SIZE BIT(L2TP_HASH_BITS)
 
 /* System-wide, session hash table size */
 #define L2TP_HASH_BITS_2       8
-#define L2TP_HASH_SIZE_2       (1 << L2TP_HASH_BITS_2)
+#define L2TP_HASH_SIZE_2       BIT(L2TP_HASH_BITS_2)
 
 struct sk_buff;
 
@@ -49,32 +48,26 @@ struct l2tp_tunnel;
  */
 struct l2tp_session_cfg {
        enum l2tp_pwtype        pw_type;
-       unsigned int            recv_seq:1;     /* expect receive packets with
-                                                * sequence numbers? */
-       unsigned int            send_seq:1;     /* send packets with sequence
-                                                * numbers? */
-       unsigned int            lns_mode:1;     /* behave as LNS? LAC enables
-                                                * sequence numbers under
-                                                * control of LNS. */
-       int                     debug;          /* bitmask of debug message
-                                                * categories */
+       unsigned int            recv_seq:1;     /* expect receive packets with sequence numbers? */
+       unsigned int            send_seq:1;     /* send packets with sequence numbers? */
+       unsigned int            lns_mode:1;     /* behave as LNS?
+                                                * LAC enables sequence numbers under LNS control.
+                                                */
+       int                     debug;          /* bitmask of debug message categories */
        u16                     l2specific_type; /* Layer 2 specific type */
        u8                      cookie[8];      /* optional cookie */
        int                     cookie_len;     /* 0, 4 or 8 bytes */
        u8                      peer_cookie[8]; /* peer's cookie */
        int                     peer_cookie_len; /* 0, 4 or 8 bytes */
-       int                     reorder_timeout; /* configured reorder timeout
-                                                 * (in jiffies) */
+       int                     reorder_timeout; /* configured reorder timeout (in jiffies) */
        char                    *ifname;
 };
 
 struct l2tp_session {
-       int                     magic;          /* should be
-                                                * L2TP_SESSION_MAGIC */
+       int                     magic;          /* should be L2TP_SESSION_MAGIC */
        long                    dead;
 
-       struct l2tp_tunnel      *tunnel;        /* back pointer to tunnel
-                                                * context */
+       struct l2tp_tunnel      *tunnel;        /* back pointer to tunnel context */
        u32                     session_id;
        u32                     peer_session_id;
        u8                      cookie[8];
@@ -89,42 +82,37 @@ struct l2tp_session {
        u32                     nr_max;         /* max NR. Depends on tunnel */
        u32                     nr_window_size; /* NR window size */
        u32                     nr_oos;         /* NR of last OOS packet */
-       int                     nr_oos_count;   /* For OOS recovery */
+       int                     nr_oos_count;   /* for OOS recovery */
        int                     nr_oos_count_max;
-       struct hlist_node       hlist;          /* Hash list node */
+       struct hlist_node       hlist;          /* hash list node */
        refcount_t              ref_count;
 
        char                    name[32];       /* for logging */
        char                    ifname[IFNAMSIZ];
-       unsigned int            recv_seq:1;     /* expect receive packets with
-                                                * sequence numbers? */
-       unsigned int            send_seq:1;     /* send packets with sequence
-                                                * numbers? */
-       unsigned int            lns_mode:1;     /* behave as LNS? LAC enables
-                                                * sequence numbers under
-                                                * control of LNS. */
-       int                     debug;          /* bitmask of debug message
-                                                * categories */
-       int                     reorder_timeout; /* configured reorder timeout
-                                                 * (in jiffies) */
+       unsigned int            recv_seq:1;     /* expect receive packets with sequence numbers? */
+       unsigned int            send_seq:1;     /* send packets with sequence numbers? */
+       unsigned int            lns_mode:1;     /* behave as LNS?
+                                                * LAC enables sequence numbers under LNS control.
+                                                */
+       int                     debug;          /* bitmask of debug message categories */
+       int                     reorder_timeout; /* configured reorder timeout (in jiffies) */
        int                     reorder_skip;   /* set if skip to next nr */
        enum l2tp_pwtype        pwtype;
        struct l2tp_stats       stats;
-       struct hlist_node       global_hlist;   /* Global hash list node */
+       struct hlist_node       global_hlist;   /* global hash list node */
 
        int (*build_header)(struct l2tp_session *session, void *buf);
        void (*recv_skb)(struct l2tp_session *session, struct sk_buff *skb, int data_len);
        void (*session_close)(struct l2tp_session *session);
        void (*show)(struct seq_file *m, void *priv);
-       u8                      priv[]; /* private data */
+       u8                      priv[];         /* private data */
 };
 
 /* Describes the tunnel. It contains info to track all the associated
  * sessions so incoming packets can be sorted out
  */
 struct l2tp_tunnel_cfg {
-       int                     debug;          /* bitmask of debug message
-                                                * categories */
+       int                     debug;          /* bitmask of debug message categories */
        enum l2tp_encap_type    encap;
 
        /* Used only for kernel-created sockets */
@@ -148,31 +136,29 @@ struct l2tp_tunnel {
 
        struct rcu_head rcu;
        rwlock_t                hlist_lock;     /* protect session_hlist */
-       bool                    acpt_newsess;   /* Indicates whether this
-                                                * tunnel accepts new sessions.
-                                                * Protected by hlist_lock.
+       bool                    acpt_newsess;   /* indicates whether this tunnel accepts
+                                                * new sessions. Protected by hlist_lock.
                                                 */
        struct hlist_head       session_hlist[L2TP_HASH_SIZE];
-                                               /* hashed list of sessions,
-                                                * hashed by id */
+                                               /* hashed list of sessions, hashed by id */
        u32                     tunnel_id;
        u32                     peer_tunnel_id;
        int                     version;        /* 2=>L2TPv2, 3=>L2TPv3 */
 
        char                    name[20];       /* for logging */
-       int                     debug;          /* bitmask of debug message
-                                                * categories */
+       int                     debug;          /* bitmask of debug message categories */
        enum l2tp_encap_type    encap;
        struct l2tp_stats       stats;
 
-       struct list_head        list;           /* Keep a list of all tunnels */
+       struct list_head        list;           /* list node on per-namespace list of tunnels */
        struct net              *l2tp_net;      /* the net we belong to */
 
        refcount_t              ref_count;
-       void (*old_sk_destruct)(struct sock *);
-       struct sock             *sock;          /* Parent socket */
-       int                     fd;             /* Parent fd, if tunnel socket
-                                                * was created by userspace */
+       void (*old_sk_destruct)(struct sock *sk);
+       struct sock             *sock;          /* parent socket */
+       int                     fd;             /* parent fd, if tunnel socket was created
+                                                * by userspace
+                                                */
 
        struct work_struct      del_work;
 };
index 35bb4f3..96cb960 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * L2TP subsystem debugfs
+/* L2TP subsystem debugfs
  *
  * Copyright (c) 2010 Katalix Systems Ltd
  */
@@ -59,11 +58,10 @@ static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
        pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx);
        pd->session_idx++;
 
-       if (pd->session == NULL) {
+       if (!pd->session) {
                pd->session_idx = 0;
                l2tp_dfs_next_tunnel(pd);
        }
-
 }
 
 static void *l2tp_dfs_seq_start(struct seq_file *m, loff_t *offs)
@@ -74,23 +72,25 @@ static void *l2tp_dfs_seq_start(struct seq_file *m, loff_t *offs)
        if (!pos)
                goto out;
 
-       BUG_ON(m->private == NULL);
+       if (WARN_ON(!m->private)) {
+               pd = NULL;
+               goto out;
+       }
        pd = m->private;
 
-       if (pd->tunnel == NULL)
+       if (!pd->tunnel)
                l2tp_dfs_next_tunnel(pd);
        else
                l2tp_dfs_next_session(pd);
 
        /* NULL tunnel and session indicates end of list */
-       if ((pd->tunnel == NULL) && (pd->session == NULL))
+       if (!pd->tunnel && !pd->session)
                pd = NULL;
 
 out:
        return pd;
 }
 
-
 static void *l2tp_dfs_seq_next(struct seq_file *m, void *v, loff_t *pos)
 {
        (*pos)++;
@@ -148,11 +148,13 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
                        const struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
 
                        seq_printf(m, " from %pI6c to %pI6c\n",
-                               &np->saddr, &tunnel->sock->sk_v6_daddr);
-               } else
+                                  &np->saddr, &tunnel->sock->sk_v6_daddr);
+               }
 #endif
-               seq_printf(m, " from %pI4 to %pI4\n",
-                          &inet->inet_saddr, &inet->inet_daddr);
+               if (tunnel->sock->sk_family == AF_INET)
+                       seq_printf(m, " from %pI4 to %pI4\n",
+                                  &inet->inet_saddr, &inet->inet_daddr);
+
                if (tunnel->encap == L2TP_ENCAPTYPE_UDP)
                        seq_printf(m, " source port %hu, dest port %hu\n",
                                   ntohs(inet->inet_sport), ntohs(inet->inet_dport));
@@ -202,7 +204,7 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
                        seq_printf(m, "%02x%02x%02x%02x",
                                   session->cookie[4], session->cookie[5],
                                   session->cookie[6], session->cookie[7]);
-               seq_printf(m, "\n");
+               seq_puts(m, "\n");
        }
        if (session->peer_cookie_len) {
                seq_printf(m, "   peer cookie %02x%02x%02x%02x",
@@ -212,7 +214,7 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
                        seq_printf(m, "%02x%02x%02x%02x",
                                   session->peer_cookie[4], session->peer_cookie[5],
                                   session->peer_cookie[6], session->peer_cookie[7]);
-               seq_printf(m, "\n");
+               seq_puts(m, "\n");
        }
 
        seq_printf(m, "   %hu/%hu tx %ld/%ld/%ld rx %ld/%ld/%ld\n",
@@ -224,7 +226,7 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
                   atomic_long_read(&session->stats.rx_bytes),
                   atomic_long_read(&session->stats.rx_errors));
 
-       if (session->show != NULL)
+       if (session->show)
                session->show(m, session);
 }
 
@@ -271,7 +273,7 @@ static int l2tp_dfs_seq_open(struct inode *inode, struct file *file)
        int rc = -ENOMEM;
 
        pd = kzalloc(sizeof(*pd), GFP_KERNEL);
-       if (pd == NULL)
+       if (!pd)
                goto out;
 
        /* Derive the network namespace from the pid opening the
index fd5ac27..7ed2b4e 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * L2TPv3 ethernet pseudowire driver
+/* L2TPv3 ethernet pseudowire driver
  *
  * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
  */
@@ -51,7 +50,6 @@ struct l2tp_eth_sess {
        struct net_device __rcu *dev;
 };
 
-
 static int l2tp_eth_dev_init(struct net_device *dev)
 {
        eth_hw_addr_random(dev);
@@ -73,7 +71,7 @@ static void l2tp_eth_dev_uninit(struct net_device *dev)
         */
 }
 
-static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct l2tp_eth *priv = netdev_priv(dev);
        struct l2tp_session *session = priv->session;
@@ -94,13 +92,12 @@ static void l2tp_eth_get_stats64(struct net_device *dev,
 {
        struct l2tp_eth *priv = netdev_priv(dev);
 
-       stats->tx_bytes   = (unsigned long) atomic_long_read(&priv->tx_bytes);
-       stats->tx_packets = (unsigned long) atomic_long_read(&priv->tx_packets);
-       stats->tx_dropped = (unsigned long) atomic_long_read(&priv->tx_dropped);
-       stats->rx_bytes   = (unsigned long) atomic_long_read(&priv->rx_bytes);
-       stats->rx_packets = (unsigned long) atomic_long_read(&priv->rx_packets);
-       stats->rx_errors  = (unsigned long) atomic_long_read(&priv->rx_errors);
-
+       stats->tx_bytes   = (unsigned long)atomic_long_read(&priv->tx_bytes);
+       stats->tx_packets = (unsigned long)atomic_long_read(&priv->tx_packets);
+       stats->tx_dropped = (unsigned long)atomic_long_read(&priv->tx_dropped);
+       stats->rx_bytes   = (unsigned long)atomic_long_read(&priv->rx_bytes);
+       stats->rx_packets = (unsigned long)atomic_long_read(&priv->rx_packets);
+       stats->rx_errors  = (unsigned long)atomic_long_read(&priv->rx_errors);
 }
 
 static const struct net_device_ops l2tp_eth_netdev_ops = {
@@ -348,13 +345,11 @@ err:
        return rc;
 }
 
-
 static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = {
        .session_create = l2tp_eth_create,
        .session_delete = l2tp_session_delete,
 };
 
-
 static int __init l2tp_eth_init(void)
 {
        int err = 0;
index 955662a..a159cb2 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * L2TPv3 IP encapsulation support
+/* L2TPv3 IP encapsulation support
  *
  * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
  */
@@ -125,8 +124,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
                goto discard;
 
        /* Point to L2TP header */
-       optr = ptr = skb->data;
-       session_id = ntohl(*((__be32 *) ptr));
+       optr = skb->data;
+       ptr = skb->data;
+       session_id = ntohl(*((__be32 *)ptr));
        ptr += 4;
 
        /* RFC3931: L2TP/IP packets have the first 4 bytes containing
@@ -154,7 +154,8 @@ static int l2tp_ip_recv(struct sk_buff *skb)
                        goto discard_sess;
 
                /* Point to L2TP header */
-               optr = ptr = skb->data;
+               optr = skb->data;
+               ptr = skb->data;
                ptr += 4;
                pr_debug("%s: ip recv\n", tunnel->name);
                print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
@@ -176,7 +177,7 @@ pass_up:
        if ((skb->data[0] & 0xc0) != 0xc0)
                goto discard;
 
-       tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
+       tunnel_id = ntohl(*(__be32 *)&skb->data[4]);
        iph = (struct iphdr *)skb_network_header(skb);
 
        read_lock_bh(&l2tp_ip_lock);
@@ -260,7 +261,7 @@ static void l2tp_ip_destroy_sock(struct sock *sk)
 static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
        struct inet_sock *inet = inet_sk(sk);
-       struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
+       struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *)uaddr;
        struct net *net = sock_net(sk);
        int ret;
        int chk_addr_ret;
@@ -285,8 +286,10 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
            chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
                goto out;
 
-       if (addr->l2tp_addr.s_addr)
-               inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
+       if (addr->l2tp_addr.s_addr) {
+               inet->inet_rcv_saddr = addr->l2tp_addr.s_addr;
+               inet->inet_saddr = addr->l2tp_addr.s_addr;
+       }
        if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
                inet->inet_saddr = 0;  /* Use device */
 
@@ -316,7 +319,7 @@ out:
 
 static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
-       struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
+       struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
        int rc;
 
        if (addr_len < sizeof(*lsa))
@@ -375,6 +378,7 @@ static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
                lsa->l2tp_addr.s_addr = inet->inet_daddr;
        } else {
                __be32 addr = inet->inet_rcv_saddr;
+
                if (!addr)
                        addr = inet->inet_saddr;
                lsa->l2tp_conn_id = lsk->conn_id;
@@ -422,6 +426,7 @@ static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        /* Get and verify the address. */
        if (msg->msg_name) {
                DECLARE_SOCKADDR(struct sockaddr_l2tpip *, lip, msg->msg_name);
+
                rc = -EINVAL;
                if (msg->msg_namelen < sizeof(*lip))
                        goto out;
@@ -456,7 +461,7 @@ static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        skb_reset_transport_header(skb);
 
        /* Insert 0 session_id */
-       *((__be32 *) skb_put(skb, 4)) = 0;
+       *((__be32 *)skb_put(skb, 4)) = 0;
 
        /* Copy user data into skb */
        rc = memcpy_from_msg(skb_put(skb, len), msg, len);
@@ -467,10 +472,10 @@ static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 
        fl4 = &inet->cork.fl.u.ip4;
        if (connected)
-               rt = (struct rtable *) __sk_dst_check(sk, 0);
+               rt = (struct rtable *)__sk_dst_check(sk, 0);
 
        rcu_read_lock();
-       if (rt == NULL) {
+       if (!rt) {
                const struct ip_options_rcu *inet_opt;
 
                inet_opt = rcu_dereference(inet->inet_opt);
@@ -612,10 +617,6 @@ static struct proto l2tp_ip_prot = {
        .hash              = l2tp_ip_hash,
        .unhash            = l2tp_ip_unhash,
        .obj_size          = sizeof(struct l2tp_ip_sock),
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_ip_setsockopt,
-       .compat_getsockopt = compat_ip_getsockopt,
-#endif
 };
 
 static const struct proto_ops l2tp_ip_ops = {
@@ -638,10 +639,6 @@ static const struct proto_ops l2tp_ip_ops = {
        .recvmsg           = sock_common_recvmsg,
        .mmap              = sock_no_mmap,
        .sendpage          = sock_no_sendpage,
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_sock_common_setsockopt,
-       .compat_getsockopt = compat_sock_common_getsockopt,
-#endif
 };
 
 static struct inet_protosw l2tp_ip_protosw = {
index 526ed2c..bc757bc 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * L2TPv3 IP encapsulation support for IPv6
+/* L2TPv3 IP encapsulation support for IPv6
  *
  * Copyright (c) 2012 Katalix Systems Ltd
  */
@@ -38,7 +37,8 @@ struct l2tp_ip6_sock {
        u32                     peer_conn_id;
 
        /* ipv6_pinfo has to be the last member of l2tp_ip6_sock, see
-          inet6_sk_generic */
+        * inet6_sk_generic
+        */
        struct ipv6_pinfo       inet6;
 };
 
@@ -137,8 +137,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
                goto discard;
 
        /* Point to L2TP header */
-       optr = ptr = skb->data;
-       session_id = ntohl(*((__be32 *) ptr));
+       optr = skb->data;
+       ptr = skb->data;
+       session_id = ntohl(*((__be32 *)ptr));
        ptr += 4;
 
        /* RFC3931: L2TP/IP packets have the first 4 bytes containing
@@ -166,7 +167,8 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
                        goto discard_sess;
 
                /* Point to L2TP header */
-               optr = ptr = skb->data;
+               optr = skb->data;
+               ptr = skb->data;
                ptr += 4;
                pr_debug("%s: ip recv\n", tunnel->name);
                print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
@@ -188,7 +190,7 @@ pass_up:
        if ((skb->data[0] & 0xc0) != 0xc0)
                goto discard;
 
-       tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
+       tunnel_id = ntohl(*(__be32 *)&skb->data[4]);
        iph = ipv6_hdr(skb);
 
        read_lock_bh(&l2tp_ip6_lock);
@@ -276,7 +278,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 {
        struct inet_sock *inet = inet_sk(sk);
        struct ipv6_pinfo *np = inet6_sk(sk);
-       struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr;
+       struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *)uaddr;
        struct net *net = sock_net(sk);
        __be32 v4addr = 0;
        int bound_dev_if;
@@ -375,8 +377,8 @@ out_unlock:
 static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
                            int addr_len)
 {
-       struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *) uaddr;
-       struct sockaddr_in6     *usin = (struct sockaddr_in6 *) uaddr;
+       struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)uaddr;
+       struct sockaddr_in6     *usin = (struct sockaddr_in6 *)uaddr;
        struct in6_addr *daddr;
        int     addr_type;
        int rc;
@@ -486,7 +488,7 @@ static int l2tp_ip6_push_pending_frames(struct sock *sk)
        int err = 0;
 
        skb = skb_peek(&sk->sk_write_queue);
-       if (skb == NULL)
+       if (!skb)
                goto out;
 
        transhdr = (__be32 *)skb_transport_header(skb);
@@ -519,7 +521,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        int err;
 
        /* Rough check on arithmetic overflow,
-          better check is made in ip6_append_data().
+        * better check is made in ip6_append_data().
         */
        if (len > INT_MAX)
                return -EMSGSIZE;
@@ -528,9 +530,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        if (msg->msg_flags & MSG_OOB)
                return -EOPNOTSUPP;
 
-       /*
-        *      Get and verify the address.
-        */
+       /* Get and verify the address */
        memset(&fl6, 0, sizeof(fl6));
 
        fl6.flowi6_mark = sk->sk_mark;
@@ -548,15 +548,14 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                daddr = &lsa->l2tp_addr;
                if (np->sndflow) {
                        fl6.flowlabel = lsa->l2tp_flowinfo & IPV6_FLOWINFO_MASK;
-                       if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
+                       if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) {
                                flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
                                if (IS_ERR(flowlabel))
                                        return -EINVAL;
                        }
                }
 
-               /*
-                * Otherwise it will be difficult to maintain
+               /* Otherwise it will be difficult to maintain
                 * sk->sk_dst_cache.
                 */
                if (sk->sk_state == TCP_ESTABLISHED &&
@@ -594,7 +593,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                        if (IS_ERR(flowlabel))
                                return -EINVAL;
                }
-               if (!(opt->opt_nflen|opt->opt_flen))
+               if (!(opt->opt_nflen | opt->opt_flen))
                        opt = NULL;
        }
 
@@ -745,10 +744,6 @@ static struct proto l2tp_ip6_prot = {
        .hash              = l2tp_ip6_hash,
        .unhash            = l2tp_ip6_unhash,
        .obj_size          = sizeof(struct l2tp_ip6_sock),
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_ipv6_setsockopt,
-       .compat_getsockopt = compat_ipv6_getsockopt,
-#endif
 };
 
 static const struct proto_ops l2tp_ip6_ops = {
@@ -773,8 +768,6 @@ static const struct proto_ops l2tp_ip6_ops = {
        .sendpage          = sock_no_sendpage,
 #ifdef CONFIG_COMPAT
        .compat_ioctl      = inet6_compat_ioctl,
-       .compat_setsockopt = compat_sock_common_setsockopt,
-       .compat_getsockopt = compat_sock_common_getsockopt,
 #endif
 };
 
index ebb381c..35716a6 100644 (file)
@@ -1,6 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
-/*
- * L2TP netlink layer, for management
+/* L2TP netlink layer, for management
  *
  * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
  *
@@ -27,7 +26,6 @@
 
 #include "l2tp_core.h"
 
-
 static struct genl_family l2tp_nl_family;
 
 static const struct genl_multicast_group l2tp_multicast_group[] = {
@@ -157,81 +155,82 @@ static int l2tp_session_notify(struct genl_family *family,
        return ret;
 }
 
+static int l2tp_nl_cmd_tunnel_create_get_addr(struct nlattr **attrs, struct l2tp_tunnel_cfg *cfg)
+{
+       if (attrs[L2TP_ATTR_UDP_SPORT])
+               cfg->local_udp_port = nla_get_u16(attrs[L2TP_ATTR_UDP_SPORT]);
+       if (attrs[L2TP_ATTR_UDP_DPORT])
+               cfg->peer_udp_port = nla_get_u16(attrs[L2TP_ATTR_UDP_DPORT]);
+       cfg->use_udp_checksums = nla_get_flag(attrs[L2TP_ATTR_UDP_CSUM]);
+
+       /* Must have either AF_INET or AF_INET6 address for source and destination */
+#if IS_ENABLED(CONFIG_IPV6)
+       if (attrs[L2TP_ATTR_IP6_SADDR] && attrs[L2TP_ATTR_IP6_DADDR]) {
+               cfg->local_ip6 = nla_data(attrs[L2TP_ATTR_IP6_SADDR]);
+               cfg->peer_ip6 = nla_data(attrs[L2TP_ATTR_IP6_DADDR]);
+               cfg->udp6_zero_tx_checksums = nla_get_flag(attrs[L2TP_ATTR_UDP_ZERO_CSUM6_TX]);
+               cfg->udp6_zero_rx_checksums = nla_get_flag(attrs[L2TP_ATTR_UDP_ZERO_CSUM6_RX]);
+               return 0;
+       }
+#endif
+       if (attrs[L2TP_ATTR_IP_SADDR] && attrs[L2TP_ATTR_IP_DADDR]) {
+               cfg->local_ip.s_addr = nla_get_in_addr(attrs[L2TP_ATTR_IP_SADDR]);
+               cfg->peer_ip.s_addr = nla_get_in_addr(attrs[L2TP_ATTR_IP_DADDR]);
+               return 0;
+       }
+       return -EINVAL;
+}
+
 static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info)
 {
        u32 tunnel_id;
        u32 peer_tunnel_id;
        int proto_version;
-       int fd;
+       int fd = -1;
        int ret = 0;
        struct l2tp_tunnel_cfg cfg = { 0, };
        struct l2tp_tunnel *tunnel;
        struct net *net = genl_info_net(info);
+       struct nlattr **attrs = info->attrs;
 
-       if (!info->attrs[L2TP_ATTR_CONN_ID]) {
+       if (!attrs[L2TP_ATTR_CONN_ID]) {
                ret = -EINVAL;
                goto out;
        }
-       tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
+       tunnel_id = nla_get_u32(attrs[L2TP_ATTR_CONN_ID]);
 
-       if (!info->attrs[L2TP_ATTR_PEER_CONN_ID]) {
+       if (!attrs[L2TP_ATTR_PEER_CONN_ID]) {
                ret = -EINVAL;
                goto out;
        }
-       peer_tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_CONN_ID]);
+       peer_tunnel_id = nla_get_u32(attrs[L2TP_ATTR_PEER_CONN_ID]);
 
-       if (!info->attrs[L2TP_ATTR_PROTO_VERSION]) {
+       if (!attrs[L2TP_ATTR_PROTO_VERSION]) {
                ret = -EINVAL;
                goto out;
        }
-       proto_version = nla_get_u8(info->attrs[L2TP_ATTR_PROTO_VERSION]);
+       proto_version = nla_get_u8(attrs[L2TP_ATTR_PROTO_VERSION]);
 
-       if (!info->attrs[L2TP_ATTR_ENCAP_TYPE]) {
+       if (!attrs[L2TP_ATTR_ENCAP_TYPE]) {
                ret = -EINVAL;
                goto out;
        }
-       cfg.encap = nla_get_u16(info->attrs[L2TP_ATTR_ENCAP_TYPE]);
-
-       fd = -1;
-       if (info->attrs[L2TP_ATTR_FD]) {
-               fd = nla_get_u32(info->attrs[L2TP_ATTR_FD]);
+       cfg.encap = nla_get_u16(attrs[L2TP_ATTR_ENCAP_TYPE]);
+
+       /* Managed tunnels take the tunnel socket from userspace.
+        * Unmanaged tunnels must call out the source and destination addresses
+        * for the kernel to create the tunnel socket itself.
+        */
+       if (attrs[L2TP_ATTR_FD]) {
+               fd = nla_get_u32(attrs[L2TP_ATTR_FD]);
        } else {
-#if IS_ENABLED(CONFIG_IPV6)
-               if (info->attrs[L2TP_ATTR_IP6_SADDR] &&
-                   info->attrs[L2TP_ATTR_IP6_DADDR]) {
-                       cfg.local_ip6 = nla_data(
-                               info->attrs[L2TP_ATTR_IP6_SADDR]);
-                       cfg.peer_ip6 = nla_data(
-                               info->attrs[L2TP_ATTR_IP6_DADDR]);
-               } else
-#endif
-               if (info->attrs[L2TP_ATTR_IP_SADDR] &&
-                   info->attrs[L2TP_ATTR_IP_DADDR]) {
-                       cfg.local_ip.s_addr = nla_get_in_addr(
-                               info->attrs[L2TP_ATTR_IP_SADDR]);
-                       cfg.peer_ip.s_addr = nla_get_in_addr(
-                               info->attrs[L2TP_ATTR_IP_DADDR]);
-               } else {
-                       ret = -EINVAL;
+               ret = l2tp_nl_cmd_tunnel_create_get_addr(attrs, &cfg);
+               if (ret < 0)
                        goto out;
-               }
-               if (info->attrs[L2TP_ATTR_UDP_SPORT])
-                       cfg.local_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_SPORT]);
-               if (info->attrs[L2TP_ATTR_UDP_DPORT])
-                       cfg.peer_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_DPORT]);
-               cfg.use_udp_checksums = nla_get_flag(
-                       info->attrs[L2TP_ATTR_UDP_CSUM]);
-
-#if IS_ENABLED(CONFIG_IPV6)
-               cfg.udp6_zero_tx_checksums = nla_get_flag(
-                       info->attrs[L2TP_ATTR_UDP_ZERO_CSUM6_TX]);
-               cfg.udp6_zero_rx_checksums = nla_get_flag(
-                       info->attrs[L2TP_ATTR_UDP_ZERO_CSUM6_RX]);
-#endif
        }
 
-       if (info->attrs[L2TP_ATTR_DEBUG])
-               cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
+       if (attrs[L2TP_ATTR_DEBUG])
+               cfg.debug = nla_get_u32(attrs[L2TP_ATTR_DEBUG]);
 
        ret = -EINVAL;
        switch (cfg.encap) {
@@ -320,16 +319,79 @@ out:
        return ret;
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
+static int l2tp_nl_tunnel_send_addr6(struct sk_buff *skb, struct sock *sk,
+                                    enum l2tp_encap_type encap)
+{
+       struct inet_sock *inet = inet_sk(sk);
+       struct ipv6_pinfo *np = inet6_sk(sk);
+
+       switch (encap) {
+       case L2TP_ENCAPTYPE_UDP:
+               if (udp_get_no_check6_tx(sk) &&
+                   nla_put_flag(skb, L2TP_ATTR_UDP_ZERO_CSUM6_TX))
+                       return -1;
+               if (udp_get_no_check6_rx(sk) &&
+                   nla_put_flag(skb, L2TP_ATTR_UDP_ZERO_CSUM6_RX))
+                       return -1;
+               if (nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) ||
+                   nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)))
+                       return -1;
+               fallthrough;
+       case L2TP_ENCAPTYPE_IP:
+               if (nla_put_in6_addr(skb, L2TP_ATTR_IP6_SADDR, &np->saddr) ||
+                   nla_put_in6_addr(skb, L2TP_ATTR_IP6_DADDR, &sk->sk_v6_daddr))
+                       return -1;
+               break;
+       }
+       return 0;
+}
+#endif
+
+static int l2tp_nl_tunnel_send_addr4(struct sk_buff *skb, struct sock *sk,
+                                    enum l2tp_encap_type encap)
+{
+       struct inet_sock *inet = inet_sk(sk);
+
+       switch (encap) {
+       case L2TP_ENCAPTYPE_UDP:
+               if (nla_put_u8(skb, L2TP_ATTR_UDP_CSUM, !sk->sk_no_check_tx) ||
+                   nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) ||
+                   nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)))
+                       return -1;
+               fallthrough;
+       case L2TP_ENCAPTYPE_IP:
+               if (nla_put_in_addr(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr) ||
+                   nla_put_in_addr(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr))
+                       return -1;
+               break;
+       }
+
+       return 0;
+}
+
+/* Append attributes for the tunnel address, handling the different attribute types
+ * used for different tunnel encapsulation and AF_INET v.s. AF_INET6.
+ */
+static int l2tp_nl_tunnel_send_addr(struct sk_buff *skb, struct l2tp_tunnel *tunnel)
+{
+       struct sock *sk = tunnel->sock;
+
+       if (!sk)
+               return 0;
+
+#if IS_ENABLED(CONFIG_IPV6)
+       if (sk->sk_family == AF_INET6)
+               return l2tp_nl_tunnel_send_addr6(skb, sk, tunnel->encap);
+#endif
+       return l2tp_nl_tunnel_send_addr4(skb, sk, tunnel->encap);
+}
+
 static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int flags,
                               struct l2tp_tunnel *tunnel, u8 cmd)
 {
        void *hdr;
        struct nlattr *nest;
-       struct sock *sk = NULL;
-       struct inet_sock *inet;
-#if IS_ENABLED(CONFIG_IPV6)
-       struct ipv6_pinfo *np = NULL;
-#endif
 
        hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags, cmd);
        if (!hdr)
@@ -343,7 +405,7 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
                goto nla_put_failure;
 
        nest = nla_nest_start_noflag(skb, L2TP_ATTR_STATS);
-       if (nest == NULL)
+       if (!nest)
                goto nla_put_failure;
 
        if (nla_put_u64_64bit(skb, L2TP_ATTR_TX_PACKETS,
@@ -373,58 +435,9 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
                goto nla_put_failure;
        nla_nest_end(skb, nest);
 
-       sk = tunnel->sock;
-       if (!sk)
-               goto out;
-
-#if IS_ENABLED(CONFIG_IPV6)
-       if (sk->sk_family == AF_INET6)
-               np = inet6_sk(sk);
-#endif
-
-       inet = inet_sk(sk);
-
-       switch (tunnel->encap) {
-       case L2TP_ENCAPTYPE_UDP:
-               switch (sk->sk_family) {
-               case AF_INET:
-                       if (nla_put_u8(skb, L2TP_ATTR_UDP_CSUM, !sk->sk_no_check_tx))
-                               goto nla_put_failure;
-                       break;
-#if IS_ENABLED(CONFIG_IPV6)
-               case AF_INET6:
-                       if (udp_get_no_check6_tx(sk) &&
-                           nla_put_flag(skb, L2TP_ATTR_UDP_ZERO_CSUM6_TX))
-                               goto nla_put_failure;
-                       if (udp_get_no_check6_rx(sk) &&
-                           nla_put_flag(skb, L2TP_ATTR_UDP_ZERO_CSUM6_RX))
-                               goto nla_put_failure;
-                       break;
-#endif
-               }
-               if (nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) ||
-                   nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)))
-                       goto nla_put_failure;
-               /* fall through  */
-       case L2TP_ENCAPTYPE_IP:
-#if IS_ENABLED(CONFIG_IPV6)
-               if (np) {
-                       if (nla_put_in6_addr(skb, L2TP_ATTR_IP6_SADDR,
-                                            &np->saddr) ||
-                           nla_put_in6_addr(skb, L2TP_ATTR_IP6_DADDR,
-                                            &sk->sk_v6_daddr))
-                               goto nla_put_failure;
-               } else
-#endif
-               if (nla_put_in_addr(skb, L2TP_ATTR_IP_SADDR,
-                                   inet->inet_saddr) ||
-                   nla_put_in_addr(skb, L2TP_ATTR_IP_DADDR,
-                                   inet->inet_daddr))
-                       goto nla_put_failure;
-               break;
-       }
+       if (l2tp_nl_tunnel_send_addr(skb, tunnel))
+               goto nla_put_failure;
 
-out:
        genlmsg_end(skb, hdr);
        return 0;
 
@@ -485,7 +498,7 @@ static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback
 
        for (;;) {
                tunnel = l2tp_tunnel_get_nth(net, ti);
-               if (tunnel == NULL)
+               if (!tunnel)
                        goto out;
 
                if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid,
@@ -570,6 +583,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
 
                if (info->attrs[L2TP_ATTR_COOKIE]) {
                        u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]);
+
                        if (len > 8) {
                                ret = -EINVAL;
                                goto out_tunnel;
@@ -579,6 +593,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
                }
                if (info->attrs[L2TP_ATTR_PEER_COOKIE]) {
                        u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]);
+
                        if (len > 8) {
                                ret = -EINVAL;
                                goto out_tunnel;
@@ -606,14 +621,13 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
                cfg.reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]);
 
 #ifdef CONFIG_MODULES
-       if (l2tp_nl_cmd_ops[cfg.pw_type] == NULL) {
+       if (!l2tp_nl_cmd_ops[cfg.pw_type]) {
                genl_unlock();
                request_module("net-l2tp-type-%u", cfg.pw_type);
                genl_lock();
        }
 #endif
-       if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) ||
-           (l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) {
+       if (!l2tp_nl_cmd_ops[cfg.pw_type] || !l2tp_nl_cmd_ops[cfg.pw_type]->session_create) {
                ret = -EPROTONOSUPPORT;
                goto out_tunnel;
        }
@@ -645,7 +659,7 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
        u16 pw_type;
 
        session = l2tp_nl_session_get(info);
-       if (session == NULL) {
+       if (!session) {
                ret = -ENODEV;
                goto out;
        }
@@ -670,7 +684,7 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
        struct l2tp_session *session;
 
        session = l2tp_nl_session_get(info);
-       if (session == NULL) {
+       if (!session) {
                ret = -ENODEV;
                goto out;
        }
@@ -715,8 +729,7 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
        if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
            nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) ||
            nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) ||
-           nla_put_u32(skb, L2TP_ATTR_PEER_SESSION_ID,
-                       session->peer_session_id) ||
+           nla_put_u32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id) ||
            nla_put_u32(skb, L2TP_ATTR_DEBUG, session->debug) ||
            nla_put_u16(skb, L2TP_ATTR_PW_TYPE, session->pwtype))
                goto nla_put_failure;
@@ -724,11 +737,9 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
        if ((session->ifname[0] &&
             nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
            (session->cookie_len &&
-            nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len,
-                    &session->cookie[0])) ||
+            nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len, session->cookie)) ||
            (session->peer_cookie_len &&
-            nla_put(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len,
-                    &session->peer_cookie[0])) ||
+            nla_put(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, session->peer_cookie)) ||
            nla_put_u8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq) ||
            nla_put_u8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq) ||
            nla_put_u8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode) ||
@@ -740,7 +751,7 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
                goto nla_put_failure;
 
        nest = nla_nest_start_noflag(skb, L2TP_ATTR_STATS);
-       if (nest == NULL)
+       if (!nest)
                goto nla_put_failure;
 
        if (nla_put_u64_64bit(skb, L2TP_ATTR_TX_PACKETS,
@@ -785,7 +796,7 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
        int ret;
 
        session = l2tp_nl_session_get(info);
-       if (session == NULL) {
+       if (!session) {
                ret = -ENODEV;
                goto err;
        }
@@ -824,14 +835,14 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback
        int si = cb->args[1];
 
        for (;;) {
-               if (tunnel == NULL) {
+               if (!tunnel) {
                        tunnel = l2tp_tunnel_get_nth(net, ti);
-                       if (tunnel == NULL)
+                       if (!tunnel)
                                goto out;
                }
 
                session = l2tp_session_get_nth(tunnel, si);
-               if (session == NULL) {
+               if (!session) {
                        ti++;
                        l2tp_tunnel_dec_refcount(tunnel);
                        tunnel = NULL;
index c54cb59..13c3153 100644 (file)
@@ -117,8 +117,7 @@ struct pppol2tp_session {
        int                     owner;          /* pid that opened the socket */
 
        struct mutex            sk_lock;        /* Protects .sk */
-       struct sock __rcu       *sk;            /* Pointer to the session
-                                                * PPPoX socket */
+       struct sock __rcu       *sk;            /* Pointer to the session PPPoX socket */
        struct sock             *__sk;          /* Copy of .sk, for cleanup */
        struct rcu_head         rcu;            /* For asynchronous release */
 };
@@ -155,17 +154,20 @@ static inline struct l2tp_session *pppol2tp_sock_to_session(struct sock *sk)
 {
        struct l2tp_session *session;
 
-       if (sk == NULL)
+       if (!sk)
                return NULL;
 
        sock_hold(sk);
        session = (struct l2tp_session *)(sk->sk_user_data);
-       if (session == NULL) {
+       if (!session) {
+               sock_put(sk);
+               goto out;
+       }
+       if (WARN_ON(session->magic != L2TP_SESSION_MAGIC)) {
+               session = NULL;
                sock_put(sk);
                goto out;
        }
-
-       BUG_ON(session->magic != L2TP_SESSION_MAGIC);
 
 out:
        return session;
@@ -218,7 +220,7 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
         */
        rcu_read_lock();
        sk = rcu_dereference(ps->sk);
-       if (sk == NULL)
+       if (!sk)
                goto no_sock;
 
        /* If the first two bytes are 0xFF03, consider that it is the PPP's
@@ -286,7 +288,7 @@ static int pppol2tp_sendmsg(struct socket *sock, struct msghdr *m,
        /* Get session and tunnel contexts */
        error = -EBADF;
        session = pppol2tp_sock_to_session(sk);
-       if (session == NULL)
+       if (!session)
                goto error;
 
        tunnel = session->tunnel;
@@ -351,7 +353,7 @@ error:
  */
 static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
 {
-       struct sock *sk = (struct sock *) chan->private;
+       struct sock *sk = (struct sock *)chan->private;
        struct l2tp_session *session;
        struct l2tp_tunnel *tunnel;
        int uhlen, headroom;
@@ -361,7 +363,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
 
        /* Get session and tunnel contexts from the socket */
        session = pppol2tp_sock_to_session(sk);
-       if (session == NULL)
+       if (!session)
                goto abort;
 
        tunnel = session->tunnel;
@@ -420,7 +422,8 @@ static void pppol2tp_session_destruct(struct sock *sk)
 
        if (session) {
                sk->sk_user_data = NULL;
-               BUG_ON(session->magic != L2TP_SESSION_MAGIC);
+               if (WARN_ON(session->magic != L2TP_SESSION_MAGIC))
+                       return;
                l2tp_session_dec_refcount(session);
        }
 }
@@ -704,7 +707,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
         * tunnel id.
         */
        if (!info.session_id && !info.peer_session_id) {
-               if (tunnel == NULL) {
+               if (!tunnel) {
                        struct l2tp_tunnel_cfg tcfg = {
                                .encap = L2TP_ENCAPTYPE_UDP,
                                .debug = 0,
@@ -739,11 +742,11 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
        } else {
                /* Error if we can't find the tunnel */
                error = -ENOENT;
-               if (tunnel == NULL)
+               if (!tunnel)
                        goto end;
 
                /* Error if socket is not prepped */
-               if (tunnel->sock == NULL)
+               if (!tunnel->sock)
                        goto end;
        }
 
@@ -803,8 +806,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
         * the internal context for use by ioctl() and sockopt()
         * handlers.
         */
-       if ((session->session_id == 0) &&
-           (session->peer_session_id == 0)) {
+       if (session->session_id == 0 && session->peer_session_id == 0) {
                error = 0;
                goto out_no_ppp;
        }
@@ -912,22 +914,23 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
        struct pppol2tp_session *pls;
 
        error = -ENOTCONN;
-       if (sk == NULL)
+       if (!sk)
                goto end;
        if (!(sk->sk_state & PPPOX_CONNECTED))
                goto end;
 
        error = -EBADF;
        session = pppol2tp_sock_to_session(sk);
-       if (session == NULL)
+       if (!session)
                goto end;
 
        pls = l2tp_session_priv(session);
        tunnel = session->tunnel;
 
        inet = inet_sk(tunnel->sock);
-       if ((tunnel->version == 2) && (tunnel->sock->sk_family == AF_INET)) {
+       if (tunnel->version == 2 && tunnel->sock->sk_family == AF_INET) {
                struct sockaddr_pppol2tp sp;
+
                len = sizeof(sp);
                memset(&sp, 0, len);
                sp.sa_family    = AF_PPPOX;
@@ -943,8 +946,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
                sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr;
                memcpy(uaddr, &sp, len);
 #if IS_ENABLED(CONFIG_IPV6)
-       } else if ((tunnel->version == 2) &&
-                  (tunnel->sock->sk_family == AF_INET6)) {
+       } else if (tunnel->version == 2 && tunnel->sock->sk_family == AF_INET6) {
                struct sockaddr_pppol2tpin6 sp;
 
                len = sizeof(sp);
@@ -962,8 +964,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
                memcpy(&sp.pppol2tp.addr.sin6_addr, &tunnel->sock->sk_v6_daddr,
                       sizeof(tunnel->sock->sk_v6_daddr));
                memcpy(uaddr, &sp, len);
-       } else if ((tunnel->version == 3) &&
-                  (tunnel->sock->sk_family == AF_INET6)) {
+       } else if (tunnel->version == 3 && tunnel->sock->sk_family == AF_INET6) {
                struct sockaddr_pppol2tpv3in6 sp;
 
                len = sizeof(sp);
@@ -984,6 +985,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
 #endif
        } else if (tunnel->version == 3) {
                struct sockaddr_pppol2tpv3 sp;
+
                len = sizeof(sp);
                memset(&sp, 0, len);
                sp.sa_family    = AF_PPPOX;
@@ -1178,7 +1180,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
 
        switch (optname) {
        case PPPOL2TP_SO_RECVSEQ:
-               if ((val != 0) && (val != 1)) {
+               if (val != 0 && val != 1) {
                        err = -EINVAL;
                        break;
                }
@@ -1189,7 +1191,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
                break;
 
        case PPPOL2TP_SO_SENDSEQ:
-               if ((val != 0) && (val != 1)) {
+               if (val != 0 && val != 1) {
                        err = -EINVAL;
                        break;
                }
@@ -1207,7 +1209,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
                break;
 
        case PPPOL2TP_SO_LNSMODE:
-               if ((val != 0) && (val != 1)) {
+               if (val != 0 && val != 1) {
                        err = -EINVAL;
                        break;
                }
@@ -1244,7 +1246,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
  * session or the special tunnel type.
  */
 static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
-                              char __user *optval, unsigned int optlen)
+                              sockptr_t optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct l2tp_session *session;
@@ -1258,23 +1260,22 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
        if (optlen < sizeof(int))
                return -EINVAL;
 
-       if (get_user(val, (int __user *)optval))
+       if (copy_from_sockptr(&val, optval, sizeof(int)))
                return -EFAULT;
 
        err = -ENOTCONN;
-       if (sk->sk_user_data == NULL)
+       if (!sk->sk_user_data)
                goto end;
 
        /* Get session context from the socket */
        err = -EBADF;
        session = pppol2tp_sock_to_session(sk);
-       if (session == NULL)
+       if (!session)
                goto end;
 
        /* Special case: if session_id == 0x0000, treat as operation on tunnel
         */
-       if ((session->session_id == 0) &&
-           (session->peer_session_id == 0)) {
+       if (session->session_id == 0 && session->peer_session_id == 0) {
                tunnel = session->tunnel;
                err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val);
        } else {
@@ -1343,7 +1344,7 @@ static int pppol2tp_session_getsockopt(struct sock *sk,
                break;
 
        case PPPOL2TP_SO_REORDERTO:
-               *val = (int) jiffies_to_msecs(session->reorder_timeout);
+               *val = (int)jiffies_to_msecs(session->reorder_timeout);
                l2tp_info(session, L2TP_MSG_CONTROL,
                          "%s: get reorder_timeout=%d\n", session->name, *val);
                break;
@@ -1381,18 +1382,17 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
                return -EINVAL;
 
        err = -ENOTCONN;
-       if (sk->sk_user_data == NULL)
+       if (!sk->sk_user_data)
                goto end;
 
        /* Get the session context */
        err = -EBADF;
        session = pppol2tp_sock_to_session(sk);
-       if (session == NULL)
+       if (!session)
                goto end;
 
        /* Special case: if session_id == 0x0000, treat as operation on tunnel */
-       if ((session->session_id == 0) &&
-           (session->peer_session_id == 0)) {
+       if (session->session_id == 0 && session->peer_session_id == 0) {
                tunnel = session->tunnel;
                err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
                if (err)
@@ -1407,7 +1407,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
        if (put_user(len, optlen))
                goto end_put_sess;
 
-       if (copy_to_user((void __user *) optval, &val, len))
+       if (copy_to_user((void __user *)optval, &val, len))
                goto end_put_sess;
 
        err = 0;
@@ -1463,7 +1463,7 @@ static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
        pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx);
        pd->session_idx++;
 
-       if (pd->session == NULL) {
+       if (!pd->session) {
                pd->session_idx = 0;
                pppol2tp_next_tunnel(net, pd);
        }
@@ -1478,17 +1478,21 @@ static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs)
        if (!pos)
                goto out;
 
-       BUG_ON(m->private == NULL);
+       if (WARN_ON(!m->private)) {
+               pd = NULL;
+               goto out;
+       }
+
        pd = m->private;
        net = seq_file_net(m);
 
-       if (pd->tunnel == NULL)
+       if (!pd->tunnel)
                pppol2tp_next_tunnel(net, pd);
        else
                pppol2tp_next_session(net, pd);
 
        /* NULL tunnel and session indicates end of list */
-       if ((pd->tunnel == NULL) && (pd->session == NULL))
+       if (!pd->tunnel && !pd->session)
                pd = NULL;
 
 out:
@@ -1551,6 +1555,7 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
 
        if (tunnel->sock) {
                struct inet_sock *inet = inet_sk(tunnel->sock);
+
                ip = ntohl(inet->inet_saddr);
                port = ntohs(inet->inet_sport);
        }
@@ -1564,8 +1569,7 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
                user_data_ok = 'N';
        }
 
-       seq_printf(m, "  SESSION '%s' %08X/%d %04X/%04X -> "
-                  "%04X/%04X %d %c\n",
+       seq_printf(m, "  SESSION '%s' %08X/%d %04X/%04X -> %04X/%04X %d %c\n",
                   session->name, ip, port,
                   tunnel->tunnel_id,
                   session->session_id,
@@ -1604,8 +1608,7 @@ static int pppol2tp_seq_show(struct seq_file *m, void *v)
                seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n");
                seq_puts(m, "TUNNEL name, user-data-ok session-count\n");
                seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
-               seq_puts(m, "  SESSION name, addr/port src-tid/sid "
-                        "dest-tid/sid state user-data-ok\n");
+               seq_puts(m, "  SESSION name, addr/port src-tid/sid dest-tid/sid state user-data-ok\n");
                seq_puts(m, "   mtu/mru/rcvseq/sendseq/lns debug reorderto\n");
                seq_puts(m, "   nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
                goto out;
@@ -1638,7 +1641,7 @@ static __net_init int pppol2tp_init_net(struct net *net)
        int err = 0;
 
        pde = proc_create_net("pppol2tp", 0444, net->proc_net,
-                       &pppol2tp_seq_ops, sizeof(struct pppol2tp_seq_data));
+                             &pppol2tp_seq_ops, sizeof(struct pppol2tp_seq_data));
        if (!pde) {
                err = -ENOMEM;
                goto out;
index f35899d..e71ca5a 100644 (file)
@@ -9,6 +9,99 @@
 #include <net/fib_rules.h>
 #include <net/l3mdev.h>
 
+static DEFINE_SPINLOCK(l3mdev_lock);
+
+struct l3mdev_handler {
+       lookup_by_table_id_t dev_lookup;
+};
+
+static struct l3mdev_handler l3mdev_handlers[L3MDEV_TYPE_MAX + 1];
+
+static int l3mdev_check_type(enum l3mdev_type l3type)
+{
+       if (l3type <= L3MDEV_TYPE_UNSPEC || l3type > L3MDEV_TYPE_MAX)
+               return -EINVAL;
+
+       return 0;
+}
+
+int l3mdev_table_lookup_register(enum l3mdev_type l3type,
+                                lookup_by_table_id_t fn)
+{
+       struct l3mdev_handler *hdlr;
+       int res;
+
+       res = l3mdev_check_type(l3type);
+       if (res)
+               return res;
+
+       hdlr = &l3mdev_handlers[l3type];
+
+       spin_lock(&l3mdev_lock);
+
+       if (hdlr->dev_lookup) {
+               res = -EBUSY;
+               goto unlock;
+       }
+
+       hdlr->dev_lookup = fn;
+       res = 0;
+
+unlock:
+       spin_unlock(&l3mdev_lock);
+
+       return res;
+}
+EXPORT_SYMBOL_GPL(l3mdev_table_lookup_register);
+
+void l3mdev_table_lookup_unregister(enum l3mdev_type l3type,
+                                   lookup_by_table_id_t fn)
+{
+       struct l3mdev_handler *hdlr;
+
+       if (l3mdev_check_type(l3type))
+               return;
+
+       hdlr = &l3mdev_handlers[l3type];
+
+       spin_lock(&l3mdev_lock);
+
+       if (hdlr->dev_lookup == fn)
+               hdlr->dev_lookup = NULL;
+
+       spin_unlock(&l3mdev_lock);
+}
+EXPORT_SYMBOL_GPL(l3mdev_table_lookup_unregister);
+
+int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type,
+                                     struct net *net, u32 table_id)
+{
+       lookup_by_table_id_t lookup;
+       struct l3mdev_handler *hdlr;
+       int ifindex = -EINVAL;
+       int res;
+
+       res = l3mdev_check_type(l3type);
+       if (res)
+               return res;
+
+       hdlr = &l3mdev_handlers[l3type];
+
+       spin_lock(&l3mdev_lock);
+
+       lookup = hdlr->dev_lookup;
+       if (!lookup)
+               goto unlock;
+
+       ifindex = lookup(net, table_id);
+
+unlock:
+       spin_unlock(&l3mdev_lock);
+
+       return ifindex;
+}
+EXPORT_SYMBOL_GPL(l3mdev_ifindex_lookup_by_table_id);
+
 /**
  *     l3mdev_master_ifindex - get index of L3 master device
  *     @dev: targeted interface
index 54fb8d4..7180979 100644 (file)
@@ -273,6 +273,10 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr)
 
        if (!sock_flag(sk, SOCK_ZAPPED))
                goto out;
+       if (!addr->sllc_arphrd)
+               addr->sllc_arphrd = ARPHRD_ETHER;
+       if (addr->sllc_arphrd != ARPHRD_ETHER)
+               goto out;
        rc = -ENODEV;
        if (sk->sk_bound_dev_if) {
                llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
@@ -328,7 +332,9 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
        if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
                goto out;
        rc = -EAFNOSUPPORT;
-       if (unlikely(addr->sllc_family != AF_LLC))
+       if (!addr->sllc_arphrd)
+               addr->sllc_arphrd = ARPHRD_ETHER;
+       if (unlikely(addr->sllc_family != AF_LLC || addr->sllc_arphrd != ARPHRD_ETHER))
                goto out;
        dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
        rc = -ENODEV;
@@ -336,8 +342,6 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
        if (sk->sk_bound_dev_if) {
                llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if);
                if (llc->dev) {
-                       if (!addr->sllc_arphrd)
-                               addr->sllc_arphrd = llc->dev->type;
                        if (is_zero_ether_addr(addr->sllc_mac))
                                memcpy(addr->sllc_mac, llc->dev->dev_addr,
                                       IFHWADDRLEN);
@@ -980,7 +984,6 @@ out:
  *     llc_ui_getname - return the address info of a socket
  *     @sock: Socket to get address of.
  *     @uaddr: Address structure to return information.
- *     @uaddrlen: Length of address structure.
  *     @peer: Does user want local or remote address information.
  *
  *     Return the address information of a socket.
@@ -1050,7 +1053,7 @@ static int llc_ui_ioctl(struct socket *sock, unsigned int cmd,
  *     Set various connection specific parameters.
  */
 static int llc_ui_setsockopt(struct socket *sock, int level, int optname,
-                            char __user *optval, unsigned int optlen)
+                            sockptr_t optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct llc_sock *llc = llc_sk(sk);
@@ -1060,7 +1063,7 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname,
        lock_sock(sk);
        if (unlikely(level != SOL_LLC || optlen != sizeof(int)))
                goto out;
-       rc = get_user(opt, (int __user *)optval);
+       rc = copy_from_sockptr(&opt, optval, sizeof(opt));
        if (rc)
                goto out;
        rc = -EINVAL;
index 7b620ac..1144cda 100644 (file)
@@ -284,8 +284,8 @@ out:;
 /**
  *     llc_conn_remove_acked_pdus - Removes acknowledged pdus from tx queue
  *     @sk: active connection
- *     nr: NR
- *     how_many_unacked: size of pdu_unack_q after removing acked pdus
+ *     @nr: NR
+ *     @how_many_unacked: size of pdu_unack_q after removing acked pdus
  *
  *     Removes acknowledged pdus from transmit queue (pdu_unack_q). Returns
  *     the number of pdus that removed from queue.
@@ -906,6 +906,7 @@ static void llc_sk_init(struct sock *sk)
 
 /**
  *     llc_sk_alloc - Allocates LLC sock
+ *     @net: network namespace
  *     @family: upper layer protocol family
  *     @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
  *
@@ -951,7 +952,7 @@ void llc_sk_stop_all_timers(struct sock *sk, bool sync)
 
 /**
  *     llc_sk_free - Frees a LLC socket
- *     @sk - socket to free
+ *     @sk: - socket to free
  *
  *     Frees a LLC socket
  */
index 82cb93f..c309b72 100644 (file)
@@ -144,6 +144,7 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
  *     @skb: received pdu
  *     @dev: device that receive pdu
  *     @pt: packet type
+ *     @orig_dev: the original receive net device
  *
  *     When the system receives a 802.2 frame this function is called. It
  *     checks SAP and connection of received pdu and passes frame to
index 2e6cb79..792d195 100644 (file)
@@ -25,7 +25,7 @@ void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 pdu_type)
 
 /**
  *     pdu_set_pf_bit - sets poll/final bit in LLC header
- *     @pdu_frame: input frame that p/f bit must be set into it.
+ *     @skb: Frame to set bit in
  *     @bit_value: poll/final bit (0 or 1).
  *
  *     This function sets poll/final bit in LLC header (based on type of PDU).
index be41906..6805ce4 100644 (file)
@@ -37,6 +37,7 @@ static int llc_mac_header_len(unsigned short devtype)
 
 /**
  *     llc_alloc_frame - allocates sk_buff for frame
+ *     @sk:  socket to allocate frame to
  *     @dev: network device this skb will be sent over
  *     @type: pdu type to allocate
  *     @data_size: data size to allocate
@@ -273,6 +274,7 @@ void llc_build_and_send_xid_pkt(struct llc_sap *sap, struct sk_buff *skb,
  *     llc_sap_rcv - sends received pdus to the sap state machine
  *     @sap: current sap component structure.
  *     @skb: received frame.
+ *     @sk:  socket to associate to frame
  *
  *     Sends received pdus to the sap state machine.
  */
@@ -379,6 +381,7 @@ static void llc_do_mcast(struct llc_sap *sap, struct sk_buff *skb,
  *     llc_sap_mcast - Deliver multicast PDU's to all matching datagram sockets.
  *     @sap: SAP
  *     @laddr: address of local LLC (MAC + SAP)
+ *     @skb: PDU to deliver
  *
  *     Search socket list of the SAP and finds connections with same sap.
  *     Deliver clone to each.
index aa51509..02cde0f 100644 (file)
@@ -1105,11 +1105,8 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
                               ttl, lifetime, 0, ifmsh->preq_id++, sdata);
 
        spin_lock_bh(&mpath->state_lock);
-       if (mpath->flags & MESH_PATH_DELETED) {
-               spin_unlock_bh(&mpath->state_lock);
-               goto enddiscovery;
-       }
-       mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
+       if (!(mpath->flags & MESH_PATH_DELETED))
+               mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);
        spin_unlock_bh(&mpath->state_lock);
 
 enddiscovery:
index 117519b..fe4e853 100644 (file)
@@ -72,7 +72,6 @@ static void mesh_table_free(struct mesh_table *tbl)
 }
 
 /**
- *
  * mesh_path_assign_nexthop - update mesh path next hop
  *
  * @mpath: mesh path to update
@@ -140,7 +139,6 @@ static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
 }
 
 /**
- *
  * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
  *
  * This function is used to transfer or copy frames from an unresolved mpath to
@@ -152,7 +150,7 @@ static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
  *
  * The gate mpath must be an active mpath with a valid mpath->next_hop.
  *
- * @mpath: An active mpath the frames will be sent to (i.e. the gate)
+ * @gate_mpath: An active mpath the frames will be sent to (i.e. the gate)
  * @from_mpath: The failed mpath
  * @copy: When true, copy all the frames to the new mpath queue.  When false,
  * move them.
index a88ab6f..5c5af4b 100644 (file)
@@ -2396,6 +2396,7 @@ static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
 
 static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
 {
+       struct ieee80211_hdr *hdr = (void *)rx->skb->data;
        struct sk_buff *skb = rx->skb;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
 
@@ -2406,6 +2407,31 @@ static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
        if (status->flag & RX_FLAG_DECRYPTED)
                return 0;
 
+       /* check mesh EAPOL frames first */
+       if (unlikely(rx->sta && ieee80211_vif_is_mesh(&rx->sdata->vif) &&
+                    ieee80211_is_data(fc))) {
+               struct ieee80211s_hdr *mesh_hdr;
+               u16 hdr_len = ieee80211_hdrlen(fc);
+               u16 ethertype_offset;
+               __be16 ethertype;
+
+               if (!ether_addr_equal(hdr->addr1, rx->sdata->vif.addr))
+                       goto drop_check;
+
+               /* make sure fixed part of mesh header is there, also checks skb len */
+               if (!pskb_may_pull(rx->skb, hdr_len + 6))
+                       goto drop_check;
+
+               mesh_hdr = (struct ieee80211s_hdr *)(skb->data + hdr_len);
+               ethertype_offset = hdr_len + ieee80211_get_mesh_hdrlen(mesh_hdr) +
+                                  sizeof(rfc1042_header);
+
+               if (skb_copy_bits(rx->skb, ethertype_offset, &ethertype, 2) == 0 &&
+                   ethertype == rx->sdata->control_port_protocol)
+                       return 0;
+       }
+
+drop_check:
        /* Drop unencrypted frames if key is set. */
        if (unlikely(!ieee80211_has_protected(fc) &&
                     !ieee80211_is_any_nullfunc(fc) &&
index 7b1baca..cbc40b3 100644 (file)
@@ -639,11 +639,23 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
                u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie;
                struct ieee80211_sub_if_data *sdata;
                struct ieee80211_hdr *hdr = (void *)skb->data;
+               __be16 ethertype = 0;
+
+               if (skb->len >= ETH_HLEN && skb->protocol == cpu_to_be16(ETH_P_802_3))
+                       skb_copy_bits(skb, 2 * ETH_ALEN, &ethertype, ETH_TLEN);
 
                rcu_read_lock();
                sdata = ieee80211_sdata_from_skb(local, skb);
                if (sdata) {
-                       if (ieee80211_is_any_nullfunc(hdr->frame_control))
+                       if (ethertype == sdata->control_port_protocol ||
+                           ethertype == cpu_to_be16(ETH_P_PREAUTH))
+                               cfg80211_control_port_tx_status(&sdata->wdev,
+                                                               cookie,
+                                                               skb->data,
+                                                               skb->len,
+                                                               acked,
+                                                               GFP_ATOMIC);
+                       else if (ieee80211_is_any_nullfunc(hdr->frame_control))
                                cfg80211_probe_status(sdata->dev, hdr->addr1,
                                                      cookie, acked,
                                                      info->status.ack_signal,
@@ -654,12 +666,8 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
                                                        skb->data, skb->len,
                                                        acked, GFP_ATOMIC);
                        else
-                               cfg80211_control_port_tx_status(&sdata->wdev,
-                                                               cookie,
-                                                               skb->data,
-                                                               skb->len,
-                                                               acked,
-                                                               GFP_ATOMIC);
+                               pr_warn("Unknown status report in ack skb\n");
+
                }
                rcu_read_unlock();
 
index e9ce658..1a2941e 100644 (file)
@@ -3996,6 +3996,9 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
        skb_list_walk_safe(skb, skb, next) {
                skb_mark_not_on_list(skb);
 
+               if (skb->protocol == sdata->control_port_protocol)
+                       ctrl_flags |= IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP;
+
                skb = ieee80211_build_hdr(sdata, skb, info_flags,
                                          sta, ctrl_flags, cookie);
                if (IS_ERR(skb)) {
@@ -4206,7 +4209,7 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
            (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER)))
                ra = sdata->u.mgd.bssid;
 
-       if (!is_valid_ether_addr(ra))
+       if (is_zero_ether_addr(ra))
                goto out_free;
 
        multicast = is_multicast_ether_addr(ra);
@@ -5371,7 +5374,8 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
                return -EINVAL;
 
        if (proto == sdata->control_port_protocol)
-               ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
+               ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO |
+                             IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP;
 
        if (unencrypted)
                flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
index a9ed3bf..698bc35 100644 (file)
@@ -13,17 +13,29 @@ config MPTCP
 
 if MPTCP
 
+config INET_MPTCP_DIAG
+       depends on INET_DIAG
+       def_tristate INET_DIAG
+
 config MPTCP_IPV6
        bool "MPTCP: IPv6 support for Multipath TCP"
        select IPV6
        default y
 
-config MPTCP_HMAC_TEST
-       bool "Tests for MPTCP HMAC implementation"
+endif
+
+config MPTCP_KUNIT_TESTS
+       tristate "This builds the MPTCP KUnit tests" if !KUNIT_ALL_TESTS
+       select MPTCP
+       depends on KUNIT
+       default KUNIT_ALL_TESTS
        help
-         This option enable boot time self-test for the HMAC implementation
-         used by the MPTCP code
+         Currently covers the MPTCP crypto and token helpers.
+         Only useful for kernel devs running KUnit test harness and are not
+         for inclusion into a production build.
 
-         Say N if you are unsure.
+         For more information on KUnit and unit tests in general please refer
+         to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+         If unsure, say N.
 
-endif
index baa0640..2360cbd 100644 (file)
@@ -3,3 +3,9 @@ obj-$(CONFIG_MPTCP) += mptcp.o
 
 mptcp-y := protocol.o subflow.o options.o token.o crypto.o ctrl.o pm.o diag.o \
           mib.o pm_netlink.o
+
+obj-$(CONFIG_INET_MPTCP_DIAG) += mptcp_diag.o
+
+mptcp_crypto_test-objs := crypto_test.o
+mptcp_token_test-objs := token_test.o
+obj-$(CONFIG_MPTCP_KUNIT_TESTS) += mptcp_crypto_test.o mptcp_token_test.o
index 3d98071..6c4ea97 100644 (file)
@@ -87,65 +87,6 @@ void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac)
        sha256_final(&state, (u8 *)hmac);
 }
 
-#ifdef CONFIG_MPTCP_HMAC_TEST
-struct test_cast {
-       char *key;
-       char *msg;
-       char *result;
-};
-
-/* we can't reuse RFC 4231 test vectors, as we have constraint on the
- * input and key size.
- */
-static struct test_cast tests[] = {
-       {
-               .key = "0b0b0b0b0b0b0b0b",
-               .msg = "48692054",
-               .result = "8385e24fb4235ac37556b6b886db106284a1da671699f46db1f235ec622dcafa",
-       },
-       {
-               .key = "aaaaaaaaaaaaaaaa",
-               .msg = "dddddddd",
-               .result = "2c5e219164ff1dca1c4a92318d847bb6b9d44492984e1eb71aff9022f71046e9",
-       },
-       {
-               .key = "0102030405060708",
-               .msg = "cdcdcdcd",
-               .result = "e73b9ba9969969cefb04aa0d6df18ec2fcc075b6f23b4d8c4da736a5dbbc6e7d",
-       },
-};
-
-static int __init test_mptcp_crypto(void)
-{
-       char hmac[32], hmac_hex[65];
-       u32 nonce1, nonce2;
-       u64 key1, key2;
-       u8 msg[8];
-       int i, j;
-
-       for (i = 0; i < ARRAY_SIZE(tests); ++i) {
-               /* mptcp hmap will convert to be before computing the hmac */
-               key1 = be64_to_cpu(*((__be64 *)&tests[i].key[0]));
-               key2 = be64_to_cpu(*((__be64 *)&tests[i].key[8]));
-               nonce1 = be32_to_cpu(*((__be32 *)&tests[i].msg[0]));
-               nonce2 = be32_to_cpu(*((__be32 *)&tests[i].msg[4]));
-
-               put_unaligned_be32(nonce1, &msg[0]);
-               put_unaligned_be32(nonce2, &msg[4]);
-
-               mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
-               for (j = 0; j < 32; ++j)
-                       sprintf(&hmac_hex[j << 1], "%02x", hmac[j] & 0xff);
-               hmac_hex[64] = 0;
-
-               if (memcmp(hmac_hex, tests[i].result, 64))
-                       pr_err("test %d failed, got %s expected %s", i,
-                              hmac_hex, tests[i].result);
-               else
-                       pr_info("test %d [ ok ]", i);
-       }
-       return 0;
-}
-
-late_initcall(test_mptcp_crypto);
+#if IS_MODULE(CONFIG_MPTCP_KUNIT_TESTS)
+EXPORT_SYMBOL_GPL(mptcp_crypto_hmac_sha);
 #endif
diff --git a/net/mptcp/crypto_test.c b/net/mptcp/crypto_test.c
new file mode 100644 (file)
index 0000000..017248d
--- /dev/null
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <kunit/test.h>
+
+#include "protocol.h"
+
+struct test_case {
+       char *key;
+       char *msg;
+       char *result;
+};
+
+/* we can't reuse RFC 4231 test vectors, as we have constraint on the
+ * input and key size.
+ */
+static struct test_case tests[] = {
+       {
+               .key = "0b0b0b0b0b0b0b0b",
+               .msg = "48692054",
+               .result = "8385e24fb4235ac37556b6b886db106284a1da671699f46db1f235ec622dcafa",
+       },
+       {
+               .key = "aaaaaaaaaaaaaaaa",
+               .msg = "dddddddd",
+               .result = "2c5e219164ff1dca1c4a92318d847bb6b9d44492984e1eb71aff9022f71046e9",
+       },
+       {
+               .key = "0102030405060708",
+               .msg = "cdcdcdcd",
+               .result = "e73b9ba9969969cefb04aa0d6df18ec2fcc075b6f23b4d8c4da736a5dbbc6e7d",
+       },
+};
+
+static void mptcp_crypto_test_basic(struct kunit *test)
+{
+       char hmac[32], hmac_hex[65];
+       u32 nonce1, nonce2;
+       u64 key1, key2;
+       u8 msg[8];
+       int i, j;
+
+       for (i = 0; i < ARRAY_SIZE(tests); ++i) {
+               /* mptcp hmap will convert to be before computing the hmac */
+               key1 = be64_to_cpu(*((__be64 *)&tests[i].key[0]));
+               key2 = be64_to_cpu(*((__be64 *)&tests[i].key[8]));
+               nonce1 = be32_to_cpu(*((__be32 *)&tests[i].msg[0]));
+               nonce2 = be32_to_cpu(*((__be32 *)&tests[i].msg[4]));
+
+               put_unaligned_be32(nonce1, &msg[0]);
+               put_unaligned_be32(nonce2, &msg[4]);
+
+               mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
+               for (j = 0; j < 32; ++j)
+                       sprintf(&hmac_hex[j << 1], "%02x", hmac[j] & 0xff);
+               hmac_hex[64] = 0;
+
+               KUNIT_EXPECT_STREQ(test, &hmac_hex[0], tests[i].result);
+       }
+}
+
+static struct kunit_case mptcp_crypto_test_cases[] = {
+       KUNIT_CASE(mptcp_crypto_test_basic),
+       {}
+};
+
+static struct kunit_suite mptcp_crypto_suite = {
+       .name = "mptcp-crypto",
+       .test_cases = mptcp_crypto_test_cases,
+};
+
+kunit_test_suite(mptcp_crypto_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/net/mptcp/mptcp_diag.c b/net/mptcp/mptcp_diag.c
new file mode 100644 (file)
index 0000000..5f390a9
--- /dev/null
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+/* MPTCP socket monitoring support
+ *
+ * Copyright (c) 2020 Red Hat
+ *
+ * Author: Paolo Abeni <pabeni@redhat.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/net.h>
+#include <linux/inet_diag.h>
+#include <net/netlink.h>
+#include <uapi/linux/mptcp.h>
+#include "protocol.h"
+
+static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
+                       struct netlink_callback *cb,
+                       const struct inet_diag_req_v2 *req,
+                       struct nlattr *bc, bool net_admin)
+{
+       if (!inet_diag_bc_sk(bc, sk))
+               return 0;
+
+       return inet_sk_diag_fill(sk, inet_csk(sk), skb, cb, req, NLM_F_MULTI,
+                                net_admin);
+}
+
+static int mptcp_diag_dump_one(struct netlink_callback *cb,
+                              const struct inet_diag_req_v2 *req)
+{
+       struct sk_buff *in_skb = cb->skb;
+       struct mptcp_sock *msk = NULL;
+       struct sk_buff *rep;
+       int err = -ENOENT;
+       struct net *net;
+       struct sock *sk;
+
+       net = sock_net(in_skb->sk);
+       msk = mptcp_token_get_sock(req->id.idiag_cookie[0]);
+       if (!msk)
+               goto out_nosk;
+
+       err = -ENOMEM;
+       sk = (struct sock *)msk;
+       rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) +
+                       inet_diag_msg_attrs_size() +
+                       nla_total_size(sizeof(struct mptcp_info)) +
+                       nla_total_size(sizeof(struct inet_diag_meminfo)) + 64,
+                       GFP_KERNEL);
+       if (!rep)
+               goto out;
+
+       err = inet_sk_diag_fill(sk, inet_csk(sk), rep, cb, req, 0,
+                               netlink_net_capable(in_skb, CAP_NET_ADMIN));
+       if (err < 0) {
+               WARN_ON(err == -EMSGSIZE);
+               kfree_skb(rep);
+               goto out;
+       }
+       err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).portid,
+                             MSG_DONTWAIT);
+       if (err > 0)
+               err = 0;
+out:
+       sock_put(sk);
+
+out_nosk:
+       return err;
+}
+
+static void mptcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+                           const struct inet_diag_req_v2 *r)
+{
+       bool net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
+       struct net *net = sock_net(skb->sk);
+       struct inet_diag_dump_data *cb_data;
+       struct mptcp_sock *msk;
+       struct nlattr *bc;
+
+       cb_data = cb->data;
+       bc = cb_data->inet_diag_nla_bc;
+
+       while ((msk = mptcp_token_iter_next(net, &cb->args[0], &cb->args[1])) !=
+              NULL) {
+               struct inet_sock *inet = (struct inet_sock *)msk;
+               struct sock *sk = (struct sock *)msk;
+               int ret = 0;
+
+               if (!(r->idiag_states & (1 << sk->sk_state)))
+                       goto next;
+               if (r->sdiag_family != AF_UNSPEC &&
+                   sk->sk_family != r->sdiag_family)
+                       goto next;
+               if (r->id.idiag_sport != inet->inet_sport &&
+                   r->id.idiag_sport)
+                       goto next;
+               if (r->id.idiag_dport != inet->inet_dport &&
+                   r->id.idiag_dport)
+                       goto next;
+
+               ret = sk_diag_dump(sk, skb, cb, r, bc, net_admin);
+next:
+               sock_put(sk);
+               if (ret < 0) {
+                       /* will retry on the same position */
+                       cb->args[1]--;
+                       break;
+               }
+               cond_resched();
+       }
+}
+
+static void mptcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
+                               void *_info)
+{
+       struct mptcp_sock *msk = mptcp_sk(sk);
+       struct mptcp_info *info = _info;
+       u32 flags = 0;
+       bool slow;
+       u8 val;
+
+       r->idiag_rqueue = sk_rmem_alloc_get(sk);
+       r->idiag_wqueue = sk_wmem_alloc_get(sk);
+       if (!info)
+               return;
+
+       slow = lock_sock_fast(sk);
+       info->mptcpi_subflows = READ_ONCE(msk->pm.subflows);
+       info->mptcpi_add_addr_signal = READ_ONCE(msk->pm.add_addr_signaled);
+       info->mptcpi_add_addr_accepted = READ_ONCE(msk->pm.add_addr_accepted);
+       info->mptcpi_subflows_max = READ_ONCE(msk->pm.subflows_max);
+       val = READ_ONCE(msk->pm.add_addr_signal_max);
+       info->mptcpi_add_addr_signal_max = val;
+       val = READ_ONCE(msk->pm.add_addr_accept_max);
+       info->mptcpi_add_addr_accepted_max = val;
+       if (test_bit(MPTCP_FALLBACK_DONE, &msk->flags))
+               flags |= MPTCP_INFO_FLAG_FALLBACK;
+       if (READ_ONCE(msk->can_ack))
+               flags |= MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED;
+       info->mptcpi_flags = flags;
+       info->mptcpi_token = READ_ONCE(msk->token);
+       info->mptcpi_write_seq = READ_ONCE(msk->write_seq);
+       info->mptcpi_snd_una = atomic64_read(&msk->snd_una);
+       info->mptcpi_rcv_nxt = READ_ONCE(msk->ack_seq);
+       unlock_sock_fast(sk, slow);
+}
+
+static const struct inet_diag_handler mptcp_diag_handler = {
+       .dump            = mptcp_diag_dump,
+       .dump_one        = mptcp_diag_dump_one,
+       .idiag_get_info  = mptcp_diag_get_info,
+       .idiag_type      = IPPROTO_MPTCP,
+       .idiag_info_size = sizeof(struct mptcp_info),
+};
+
+static int __init mptcp_diag_init(void)
+{
+       return inet_diag_register(&mptcp_diag_handler);
+}
+
+static void __exit mptcp_diag_exit(void)
+{
+       inet_diag_unregister(&mptcp_diag_handler);
+}
+
+module_init(mptcp_diag_init);
+module_exit(mptcp_diag_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 2-262 /* AF_INET - IPPROTO_MPTCP */);
index 490b925..3bc56eb 100644 (file)
@@ -336,9 +336,7 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
         */
        subflow->snd_isn = TCP_SKB_CB(skb)->end_seq;
        if (subflow->request_mptcp) {
-               pr_debug("local_key=%llu", subflow->local_key);
                opts->suboptions = OPTION_MPTCP_MPC_SYN;
-               opts->sndr_key = subflow->local_key;
                *size = TCPOLEN_MPTCP_MPC_SYN;
                return true;
        } else if (subflow->request_join) {
@@ -451,9 +449,9 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
 }
 
 static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow,
-                                struct mptcp_ext *ext)
+                                struct sk_buff *skb, struct mptcp_ext *ext)
 {
-       if (!ext->use_map) {
+       if (!ext->use_map || !skb->len) {
                /* RFC6824 requires a DSS mapping with specific values
                 * if DATA_FIN is set but no data payload is mapped
                 */
@@ -505,7 +503,7 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
                        opts->ext_copy = *mpext;
 
                if (skb && tcp_fin && subflow->data_fin_tx_enable)
-                       mptcp_write_data_fin(subflow, &opts->ext_copy);
+                       mptcp_write_data_fin(subflow, skb, &opts->ext_copy);
                ret = true;
        }
 
@@ -626,6 +624,9 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
 
        opts->suboptions = 0;
 
+       if (unlikely(mptcp_check_fallback(sk)))
+               return false;
+
        if (mptcp_established_options_mp(sk, skb, &opt_size, remaining, opts))
                ret = true;
        else if (mptcp_established_options_dss(sk, skb, &opt_size, remaining,
@@ -708,6 +709,7 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
                 * additional ack.
                 */
                subflow->fully_established = 1;
+               WRITE_ONCE(msk->fully_established, true);
                goto fully_established;
        }
 
@@ -716,15 +718,14 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
         */
        if (!mp_opt->mp_capable) {
                subflow->mp_capable = 0;
-               tcp_sk(sk)->is_mptcp = 0;
+               pr_fallback(msk);
+               __mptcp_do_fallback(msk);
                return false;
        }
 
        if (unlikely(!READ_ONCE(msk->pm.server_side)))
                pr_warn_once("bogus mpc option on established client sk");
-       subflow->fully_established = 1;
-       subflow->remote_key = mp_opt->sndr_key;
-       subflow->can_ack = 1;
+       mptcp_subflow_fully_established(subflow, mp_opt);
 
 fully_established:
        if (likely(subflow->pm_notified))
@@ -816,6 +817,9 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
        struct mptcp_options_received mp_opt;
        struct mptcp_ext *mpext;
 
+       if (__mptcp_check_fallback(msk))
+               return;
+
        mptcp_get_options(skb, &mp_opt);
        if (!check_fully_established(msk, sk, subflow, skb, &mp_opt))
                return;
index 977d9c8..a8ad205 100644 (file)
@@ -10,8 +10,6 @@
 #include <net/mptcp.h>
 #include "protocol.h"
 
-static struct workqueue_struct *pm_wq;
-
 /* path manager command handlers */
 
 int mptcp_pm_announce_addr(struct mptcp_sock *msk,
@@ -78,7 +76,7 @@ static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
                return false;
 
        msk->pm.status |= BIT(new_status);
-       if (queue_work(pm_wq, &msk->pm.work))
+       if (schedule_work(&msk->work))
                sock_hold((struct sock *)msk);
        return true;
 }
@@ -181,35 +179,6 @@ int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
        return mptcp_pm_nl_get_local_id(msk, skc);
 }
 
-static void pm_worker(struct work_struct *work)
-{
-       struct mptcp_pm_data *pm = container_of(work, struct mptcp_pm_data,
-                                               work);
-       struct mptcp_sock *msk = container_of(pm, struct mptcp_sock, pm);
-       struct sock *sk = (struct sock *)msk;
-
-       lock_sock(sk);
-       spin_lock_bh(&msk->pm.lock);
-
-       pr_debug("msk=%p status=%x", msk, pm->status);
-       if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
-               pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
-               mptcp_pm_nl_add_addr_received(msk);
-       }
-       if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) {
-               pm->status &= ~BIT(MPTCP_PM_ESTABLISHED);
-               mptcp_pm_nl_fully_established(msk);
-       }
-       if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) {
-               pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED);
-               mptcp_pm_nl_subflow_established(msk);
-       }
-
-       spin_unlock_bh(&msk->pm.lock);
-       release_sock(sk);
-       sock_put(sk);
-}
-
 void mptcp_pm_data_init(struct mptcp_sock *msk)
 {
        msk->pm.add_addr_signaled = 0;
@@ -223,22 +192,11 @@ void mptcp_pm_data_init(struct mptcp_sock *msk)
        msk->pm.status = 0;
 
        spin_lock_init(&msk->pm.lock);
-       INIT_WORK(&msk->pm.work, pm_worker);
 
        mptcp_pm_nl_data_init(msk);
 }
 
-void mptcp_pm_close(struct mptcp_sock *msk)
-{
-       if (cancel_work_sync(&msk->pm.work))
-               sock_put((struct sock *)msk);
-}
-
-void mptcp_pm_init(void)
+void __init mptcp_pm_init(void)
 {
-       pm_wq = alloc_workqueue("pm_wq", WQ_UNBOUND | WQ_MEM_RECLAIM, 8);
-       if (!pm_wq)
-               panic("Failed to allocate workqueue");
-
        mptcp_pm_nl_init();
 }
index b78edf2..c8820c4 100644 (file)
@@ -851,7 +851,7 @@ static struct pernet_operations mptcp_pm_pernet_ops = {
        .size = sizeof(struct pm_nl_pernet),
 };
 
-void mptcp_pm_nl_init(void)
+void __init mptcp_pm_nl_init(void)
 {
        if (register_pernet_subsys(&mptcp_pm_pernet_ops) < 0)
                panic("Failed to register MPTCP PM pernet subsystem.\n");
index 3980fbb..2891ae8 100644 (file)
@@ -52,18 +52,10 @@ static struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk)
        return msk->subflow;
 }
 
-static bool __mptcp_needs_tcp_fallback(const struct mptcp_sock *msk)
-{
-       return msk->first && !sk_is_mptcp(msk->first);
-}
-
-static struct socket *mptcp_is_tcpsk(struct sock *sk)
+static bool mptcp_is_tcpsk(struct sock *sk)
 {
        struct socket *sock = sk->sk_socket;
 
-       if (sock->sk != sk)
-               return NULL;
-
        if (unlikely(sk->sk_prot == &tcp_prot)) {
                /* we are being invoked after mptcp_accept() has
                 * accepted a non-mp-capable flow: sk is a tcp_sk,
@@ -73,59 +65,37 @@ static struct socket *mptcp_is_tcpsk(struct sock *sk)
                 * bypass mptcp.
                 */
                sock->ops = &inet_stream_ops;
-               return sock;
+               return true;
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
        } else if (unlikely(sk->sk_prot == &tcpv6_prot)) {
                sock->ops = &inet6_stream_ops;
-               return sock;
+               return true;
 #endif
        }
 
-       return NULL;
+       return false;
 }
 
-static struct socket *__mptcp_tcp_fallback(struct mptcp_sock *msk)
+static struct sock *__mptcp_tcp_fallback(struct mptcp_sock *msk)
 {
-       struct socket *sock;
-
        sock_owned_by_me((const struct sock *)msk);
 
-       sock = mptcp_is_tcpsk((struct sock *)msk);
-       if (unlikely(sock))
-               return sock;
-
-       if (likely(!__mptcp_needs_tcp_fallback(msk)))
+       if (likely(!__mptcp_check_fallback(msk)))
                return NULL;
 
-       return msk->subflow;
-}
-
-static bool __mptcp_can_create_subflow(const struct mptcp_sock *msk)
-{
-       return !msk->first;
+       return msk->first;
 }
 
-static struct socket *__mptcp_socket_create(struct mptcp_sock *msk, int state)
+static int __mptcp_socket_create(struct mptcp_sock *msk)
 {
        struct mptcp_subflow_context *subflow;
        struct sock *sk = (struct sock *)msk;
        struct socket *ssock;
        int err;
 
-       ssock = __mptcp_tcp_fallback(msk);
-       if (unlikely(ssock))
-               return ssock;
-
-       ssock = __mptcp_nmpc_socket(msk);
-       if (ssock)
-               goto set_state;
-
-       if (!__mptcp_can_create_subflow(msk))
-               return ERR_PTR(-EINVAL);
-
        err = mptcp_subflow_create_socket(sk, &ssock);
        if (err)
-               return ERR_PTR(err);
+               return err;
 
        msk->first = ssock->sk;
        msk->subflow = ssock;
@@ -133,10 +103,12 @@ static struct socket *__mptcp_socket_create(struct mptcp_sock *msk, int state)
        list_add(&subflow->node, &msk->conn_list);
        subflow->request_mptcp = 1;
 
-set_state:
-       if (state != MPTCP_SAME_STATE)
-               inet_sk_state_store(sk, state);
-       return ssock;
+       /* accept() will wait on first subflow sk_wq, and we always wakes up
+        * via msk->sk_socket
+        */
+       RCU_INIT_POINTER(msk->first->sk_wq, &sk->sk_socket->wq);
+
+       return 0;
 }
 
 static void __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
@@ -207,13 +179,6 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
                return false;
        }
 
-       if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
-               int rcvbuf = max(ssk->sk_rcvbuf, sk->sk_rcvbuf);
-
-               if (rcvbuf > sk->sk_rcvbuf)
-                       sk->sk_rcvbuf = rcvbuf;
-       }
-
        tp = tcp_sk(ssk);
        do {
                u32 map_remaining, offset;
@@ -229,6 +194,15 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
                if (!skb)
                        break;
 
+               if (__mptcp_check_fallback(msk)) {
+                       /* if we are running under the workqueue, TCP could have
+                        * collapsed skbs between dummy map creation and now
+                        * be sure to adjust the size
+                        */
+                       map_remaining = skb->len;
+                       subflow->map_data_len = skb->len;
+               }
+
                offset = seq - TCP_SKB_CB(skb)->seq;
                fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
                if (fin) {
@@ -466,8 +440,15 @@ static void mptcp_clean_una(struct sock *sk)
 {
        struct mptcp_sock *msk = mptcp_sk(sk);
        struct mptcp_data_frag *dtmp, *dfrag;
-       u64 snd_una = atomic64_read(&msk->snd_una);
        bool cleaned = false;
+       u64 snd_una;
+
+       /* on fallback we just need to ignore snd_una, as this is really
+        * plain TCP
+        */
+       if (__mptcp_check_fallback(msk))
+               atomic64_set(&msk->snd_una, msk->write_seq);
+       snd_una = atomic64_read(&msk->snd_una);
 
        list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) {
                if (after64(dfrag->data_seq + dfrag->data_len, snd_una))
@@ -479,15 +460,20 @@ static void mptcp_clean_una(struct sock *sk)
 
        dfrag = mptcp_rtx_head(sk);
        if (dfrag && after64(snd_una, dfrag->data_seq)) {
-               u64 delta = dfrag->data_seq + dfrag->data_len - snd_una;
+               u64 delta = snd_una - dfrag->data_seq;
+
+               if (WARN_ON_ONCE(delta > dfrag->data_len))
+                       goto out;
 
                dfrag->data_seq += delta;
+               dfrag->offset += delta;
                dfrag->data_len -= delta;
 
                dfrag_uncharge(sk, delta);
                cleaned = true;
        }
 
+out:
        if (cleaned) {
                sk_mem_reclaim_partial(sk);
 
@@ -740,7 +726,6 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        int mss_now = 0, size_goal = 0, ret = 0;
        struct mptcp_sock *msk = mptcp_sk(sk);
        struct page_frag *pfrag;
-       struct socket *ssock;
        size_t copied = 0;
        struct sock *ssk;
        bool tx_ok;
@@ -759,15 +744,6 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                        goto out;
        }
 
-fallback:
-       ssock = __mptcp_tcp_fallback(msk);
-       if (unlikely(ssock)) {
-               release_sock(sk);
-               pr_debug("fallback passthrough");
-               ret = sock_sendmsg(ssock, msg);
-               return ret >= 0 ? ret + copied : (copied ? copied : ret);
-       }
-
        pfrag = sk_page_frag(sk);
 restart:
        mptcp_clean_una(sk);
@@ -819,17 +795,6 @@ wait_for_sndbuf:
                        }
                        break;
                }
-               if (ret == 0 && unlikely(__mptcp_needs_tcp_fallback(msk))) {
-                       /* Can happen for passive sockets:
-                        * 3WHS negotiated MPTCP, but first packet after is
-                        * plain TCP (e.g. due to middlebox filtering unknown
-                        * options).
-                        *
-                        * Fall back to TCP.
-                        */
-                       release_sock(ssk);
-                       goto fallback;
-               }
 
                copied += ret;
 
@@ -949,6 +914,100 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
        return copied;
 }
 
+/* receive buffer autotuning.  See tcp_rcv_space_adjust for more information.
+ *
+ * Only difference: Use highest rtt estimate of the subflows in use.
+ */
+static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
+{
+       struct mptcp_subflow_context *subflow;
+       struct sock *sk = (struct sock *)msk;
+       u32 time, advmss = 1;
+       u64 rtt_us, mstamp;
+
+       sock_owned_by_me(sk);
+
+       if (copied <= 0)
+               return;
+
+       msk->rcvq_space.copied += copied;
+
+       mstamp = div_u64(tcp_clock_ns(), NSEC_PER_USEC);
+       time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time);
+
+       rtt_us = msk->rcvq_space.rtt_us;
+       if (rtt_us && time < (rtt_us >> 3))
+               return;
+
+       rtt_us = 0;
+       mptcp_for_each_subflow(msk, subflow) {
+               const struct tcp_sock *tp;
+               u64 sf_rtt_us;
+               u32 sf_advmss;
+
+               tp = tcp_sk(mptcp_subflow_tcp_sock(subflow));
+
+               sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us);
+               sf_advmss = READ_ONCE(tp->advmss);
+
+               rtt_us = max(sf_rtt_us, rtt_us);
+               advmss = max(sf_advmss, advmss);
+       }
+
+       msk->rcvq_space.rtt_us = rtt_us;
+       if (time < (rtt_us >> 3) || rtt_us == 0)
+               return;
+
+       if (msk->rcvq_space.copied <= msk->rcvq_space.space)
+               goto new_measure;
+
+       if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf &&
+           !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
+               int rcvmem, rcvbuf;
+               u64 rcvwin, grow;
+
+               rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss;
+
+               grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space);
+
+               do_div(grow, msk->rcvq_space.space);
+               rcvwin += (grow << 1);
+
+               rcvmem = SKB_TRUESIZE(advmss + MAX_TCP_HEADER);
+               while (tcp_win_from_space(sk, rcvmem) < advmss)
+                       rcvmem += 128;
+
+               do_div(rcvwin, advmss);
+               rcvbuf = min_t(u64, rcvwin * rcvmem,
+                              sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
+
+               if (rcvbuf > sk->sk_rcvbuf) {
+                       u32 window_clamp;
+
+                       window_clamp = tcp_win_from_space(sk, rcvbuf);
+                       WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
+
+                       /* Make subflows follow along.  If we do not do this, we
+                        * get drops at subflow level if skbs can't be moved to
+                        * the mptcp rx queue fast enough (announced rcv_win can
+                        * exceed ssk->sk_rcvbuf).
+                        */
+                       mptcp_for_each_subflow(msk, subflow) {
+                               struct sock *ssk;
+
+                               ssk = mptcp_subflow_tcp_sock(subflow);
+                               WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf);
+                               tcp_sk(ssk)->window_clamp = window_clamp;
+                       }
+               }
+       }
+
+       msk->rcvq_space.space = msk->rcvq_space.copied;
+new_measure:
+       msk->rcvq_space.copied = 0;
+       msk->rcvq_space.time = mstamp;
+}
+
 static bool __mptcp_move_skbs(struct mptcp_sock *msk)
 {
        unsigned int moved = 0;
@@ -972,7 +1031,6 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                         int nonblock, int flags, int *addr_len)
 {
        struct mptcp_sock *msk = mptcp_sk(sk);
-       struct socket *ssock;
        int copied = 0;
        int target;
        long timeo;
@@ -981,16 +1039,6 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                return -EOPNOTSUPP;
 
        lock_sock(sk);
-       ssock = __mptcp_tcp_fallback(msk);
-       if (unlikely(ssock)) {
-fallback:
-               release_sock(sk);
-               pr_debug("fallback-read subflow=%p",
-                        mptcp_subflow_ctx(ssock->sk));
-               copied = sock_recvmsg(ssock, msg, flags);
-               return copied;
-       }
-
        timeo = sock_rcvtimeo(sk, nonblock);
 
        len = min_t(size_t, len, INT_MAX);
@@ -1056,9 +1104,6 @@ fallback:
 
                pr_debug("block timeout %ld", timeo);
                mptcp_wait_data(sk, &timeo);
-               ssock = __mptcp_tcp_fallback(msk);
-               if (unlikely(ssock))
-                       goto fallback;
        }
 
        if (skb_queue_empty(&sk->sk_receive_queue)) {
@@ -1075,6 +1120,8 @@ fallback:
                set_bit(MPTCP_DATA_READY, &msk->flags);
        }
 out_err:
+       mptcp_rcv_space_adjust(msk, copied);
+
        release_sock(sk);
        return copied;
 }
@@ -1172,6 +1219,29 @@ static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
        return 0;
 }
 
+static void pm_work(struct mptcp_sock *msk)
+{
+       struct mptcp_pm_data *pm = &msk->pm;
+
+       spin_lock_bh(&msk->pm.lock);
+
+       pr_debug("msk=%p status=%x", msk, pm->status);
+       if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
+               pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
+               mptcp_pm_nl_add_addr_received(msk);
+       }
+       if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) {
+               pm->status &= ~BIT(MPTCP_PM_ESTABLISHED);
+               mptcp_pm_nl_fully_established(msk);
+       }
+       if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) {
+               pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED);
+               mptcp_pm_nl_subflow_established(msk);
+       }
+
+       spin_unlock_bh(&msk->pm.lock);
+}
+
 static void mptcp_worker(struct work_struct *work)
 {
        struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
@@ -1188,6 +1258,9 @@ static void mptcp_worker(struct work_struct *work)
        __mptcp_flush_join_list(msk);
        __mptcp_move_skbs(msk);
 
+       if (msk->pm.status)
+               pm_work(msk);
+
        if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
                mptcp_check_for_eof(msk);
 
@@ -1283,7 +1356,12 @@ static int mptcp_init_sock(struct sock *sk)
        if (ret)
                return ret;
 
+       ret = __mptcp_socket_create(mptcp_sk(sk));
+       if (ret)
+               return ret;
+
        sk_sockets_allocated_inc(sk);
+       sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
        sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[2];
 
        return 0;
@@ -1335,8 +1413,6 @@ static void mptcp_subflow_shutdown(struct sock *ssk, int how,
                break;
        }
 
-       /* Wake up anyone sleeping in poll. */
-       ssk->sk_state_change(ssk);
        release_sock(ssk);
 }
 
@@ -1375,7 +1451,6 @@ static void mptcp_close(struct sock *sk, long timeout)
        }
 
        mptcp_cancel_work(sk);
-       mptcp_pm_close(msk);
 
        __skb_queue_purge(&sk->sk_receive_queue);
 
@@ -1447,20 +1522,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
        msk->local_key = subflow_req->local_key;
        msk->token = subflow_req->token;
        msk->subflow = NULL;
-
-       if (unlikely(mptcp_token_new_accept(subflow_req->token, nsk))) {
-               nsk->sk_state = TCP_CLOSE;
-               bh_unlock_sock(nsk);
-
-               /* we can't call into mptcp_close() here - possible BH context
-                * free the sock directly.
-                * sk_clone_lock() sets nsk refcnt to two, hence call sk_free()
-                * too.
-                */
-               sk_common_release(nsk);
-               sk_free(nsk);
-               return NULL;
-       }
+       WRITE_ONCE(msk->fully_established, false);
 
        msk->write_seq = subflow_req->idsn + 1;
        atomic64_set(&msk->snd_una, msk->write_seq);
@@ -1482,6 +1544,22 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
        return nsk;
 }
 
+void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
+{
+       const struct tcp_sock *tp = tcp_sk(ssk);
+
+       msk->rcvq_space.copied = 0;
+       msk->rcvq_space.rtt_us = 0;
+
+       msk->rcvq_space.time = tp->tcp_mstamp;
+
+       /* initial rcv_space offering made to peer */
+       msk->rcvq_space.space = min_t(u32, tp->rcv_wnd,
+                                     TCP_INIT_CWND * tp->advmss);
+       if (msk->rcvq_space.space == 0)
+               msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
+}
+
 static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
                                 bool kern)
 {
@@ -1501,7 +1579,6 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
                return NULL;
 
        pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk));
-
        if (sk_is_mptcp(newsk)) {
                struct mptcp_subflow_context *subflow;
                struct sock *new_mptcp_sock;
@@ -1529,8 +1606,8 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
                newsk = new_mptcp_sock;
                mptcp_copy_inaddrs(newsk, ssk);
                list_add(&subflow->node, &msk->conn_list);
-               inet_sk_state_store(newsk, TCP_ESTABLISHED);
 
+               mptcp_rcv_space_init(msk, ssk);
                bh_unlock_sock(new_mptcp_sock);
 
                __MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
@@ -1547,21 +1624,82 @@ static void mptcp_destroy(struct sock *sk)
 {
        struct mptcp_sock *msk = mptcp_sk(sk);
 
-       mptcp_token_destroy(msk->token);
+       mptcp_token_destroy(msk);
        if (msk->cached_ext)
                __skb_ext_put(msk->cached_ext);
 
        sk_sockets_allocated_dec(sk);
 }
 
+static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname,
+                                      sockptr_t optval, unsigned int optlen)
+{
+       struct sock *sk = (struct sock *)msk;
+       struct socket *ssock;
+       int ret;
+
+       switch (optname) {
+       case SO_REUSEPORT:
+       case SO_REUSEADDR:
+               lock_sock(sk);
+               ssock = __mptcp_nmpc_socket(msk);
+               if (!ssock) {
+                       release_sock(sk);
+                       return -EINVAL;
+               }
+
+               ret = sock_setsockopt(ssock, SOL_SOCKET, optname, optval, optlen);
+               if (ret == 0) {
+                       if (optname == SO_REUSEPORT)
+                               sk->sk_reuseport = ssock->sk->sk_reuseport;
+                       else if (optname == SO_REUSEADDR)
+                               sk->sk_reuse = ssock->sk->sk_reuse;
+               }
+               release_sock(sk);
+               return ret;
+       }
+
+       return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, optval, optlen);
+}
+
+static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname,
+                              sockptr_t optval, unsigned int optlen)
+{
+       struct sock *sk = (struct sock *)msk;
+       int ret = -EOPNOTSUPP;
+       struct socket *ssock;
+
+       switch (optname) {
+       case IPV6_V6ONLY:
+               lock_sock(sk);
+               ssock = __mptcp_nmpc_socket(msk);
+               if (!ssock) {
+                       release_sock(sk);
+                       return -EINVAL;
+               }
+
+               ret = tcp_setsockopt(ssock->sk, SOL_IPV6, optname, optval, optlen);
+               if (ret == 0)
+                       sk->sk_ipv6only = ssock->sk->sk_ipv6only;
+
+               release_sock(sk);
+               break;
+       }
+
+       return ret;
+}
+
 static int mptcp_setsockopt(struct sock *sk, int level, int optname,
-                           char __user *optval, unsigned int optlen)
+                           sockptr_t optval, unsigned int optlen)
 {
        struct mptcp_sock *msk = mptcp_sk(sk);
-       struct socket *ssock;
+       struct sock *ssk;
 
        pr_debug("msk=%p", msk);
 
+       if (level == SOL_SOCKET)
+               return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
+
        /* @@ the meaning of setsockopt() when the socket is connected and
         * there are multiple subflows is not yet defined. It is up to the
         * MPTCP-level socket to configure the subflows until the subflow
@@ -1569,11 +1707,13 @@ static int mptcp_setsockopt(struct sock *sk, int level, int optname,
         * to the one remaining subflow.
         */
        lock_sock(sk);
-       ssock = __mptcp_tcp_fallback(msk);
+       ssk = __mptcp_tcp_fallback(msk);
        release_sock(sk);
-       if (ssock)
-               return tcp_setsockopt(ssock->sk, level, optname, optval,
-                                     optlen);
+       if (ssk)
+               return tcp_setsockopt(ssk, level, optname, optval, optlen);
+
+       if (level == SOL_IPV6)
+               return mptcp_setsockopt_v6(msk, optname, optval, optlen);
 
        return -EOPNOTSUPP;
 }
@@ -1582,7 +1722,7 @@ static int mptcp_getsockopt(struct sock *sk, int level, int optname,
                            char __user *optval, int __user *option)
 {
        struct mptcp_sock *msk = mptcp_sk(sk);
-       struct socket *ssock;
+       struct sock *ssk;
 
        pr_debug("msk=%p", msk);
 
@@ -1593,11 +1733,10 @@ static int mptcp_getsockopt(struct sock *sk, int level, int optname,
         * to the one remaining subflow.
         */
        lock_sock(sk);
-       ssock = __mptcp_tcp_fallback(msk);
+       ssk = __mptcp_tcp_fallback(msk);
        release_sock(sk);
-       if (ssock)
-               return tcp_getsockopt(ssock->sk, level, optname, optval,
-                                     option);
+       if (ssk)
+               return tcp_getsockopt(ssk, level, optname, optval, option);
 
        return -EOPNOTSUPP;
 }
@@ -1636,6 +1775,20 @@ static void mptcp_release_cb(struct sock *sk)
        }
 }
 
+static int mptcp_hash(struct sock *sk)
+{
+       /* should never be called,
+        * we hash the TCP subflows not the master socket
+        */
+       WARN_ON_ONCE(1);
+       return 0;
+}
+
+static void mptcp_unhash(struct sock *sk)
+{
+       /* called from sk_common_release(), but nothing to do here */
+}
+
 static int mptcp_get_port(struct sock *sk, unsigned short snum)
 {
        struct mptcp_sock *msk = mptcp_sk(sk);
@@ -1660,32 +1813,26 @@ void mptcp_finish_connect(struct sock *ssk)
        sk = subflow->conn;
        msk = mptcp_sk(sk);
 
-       if (!subflow->mp_capable) {
-               MPTCP_INC_STATS(sock_net(sk),
-                               MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
-               return;
-       }
-
        pr_debug("msk=%p, token=%u", sk, subflow->token);
 
        mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
        ack_seq++;
        subflow->map_seq = ack_seq;
        subflow->map_subflow_seq = 1;
-       subflow->rel_write_seq = 1;
 
        /* the socket is not connected yet, no msk/subflow ops can access/race
         * accessing the field below
         */
        WRITE_ONCE(msk->remote_key, subflow->remote_key);
        WRITE_ONCE(msk->local_key, subflow->local_key);
-       WRITE_ONCE(msk->token, subflow->token);
        WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
        WRITE_ONCE(msk->ack_seq, ack_seq);
        WRITE_ONCE(msk->can_ack, 1);
        atomic64_set(&msk->snd_una, msk->write_seq);
 
        mptcp_pm_new_connection(msk, 0);
+
+       mptcp_rcv_space_init(msk, ssk);
 }
 
 static void mptcp_sock_graft(struct sock *sk, struct socket *parent)
@@ -1708,7 +1855,7 @@ bool mptcp_finish_join(struct sock *sk)
        pr_debug("msk=%p, subflow=%p", msk, subflow);
 
        /* mptcp socket already closing? */
-       if (inet_sk_state_load(parent) != TCP_ESTABLISHED)
+       if (!mptcp_is_fully_established(parent))
                return false;
 
        if (!msk->pm.server_side)
@@ -1761,8 +1908,8 @@ static struct proto mptcp_prot = {
        .sendmsg        = mptcp_sendmsg,
        .recvmsg        = mptcp_recvmsg,
        .release_cb     = mptcp_release_cb,
-       .hash           = inet_hash,
-       .unhash         = inet_unhash,
+       .hash           = mptcp_hash,
+       .unhash         = mptcp_unhash,
        .get_port       = mptcp_get_port,
        .sockets_allocated      = &mptcp_sockets_allocated,
        .memory_allocated       = &tcp_memory_allocated,
@@ -1771,6 +1918,7 @@ static struct proto mptcp_prot = {
        .sysctl_wmem_offset     = offsetof(struct net, ipv4.sysctl_tcp_wmem),
        .sysctl_mem     = sysctl_tcp_mem,
        .obj_size       = sizeof(struct mptcp_sock),
+       .slab_flags     = SLAB_TYPESAFE_BY_RCU,
        .no_autobind    = true,
 };
 
@@ -1781,9 +1929,9 @@ static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        int err;
 
        lock_sock(sock->sk);
-       ssock = __mptcp_socket_create(msk, MPTCP_SAME_STATE);
-       if (IS_ERR(ssock)) {
-               err = PTR_ERR(ssock);
+       ssock = __mptcp_nmpc_socket(msk);
+       if (!ssock) {
+               err = -EINVAL;
                goto unlock;
        }
 
@@ -1796,10 +1944,18 @@ unlock:
        return err;
 }
 
+static void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
+                                        struct mptcp_subflow_context *subflow)
+{
+       subflow->request_mptcp = 0;
+       __mptcp_do_fallback(msk);
+}
+
 static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
                                int addr_len, int flags)
 {
        struct mptcp_sock *msk = mptcp_sk(sock->sk);
+       struct mptcp_subflow_context *subflow;
        struct socket *ssock;
        int err;
 
@@ -1812,19 +1968,24 @@ static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
                goto do_connect;
        }
 
-       ssock = __mptcp_socket_create(msk, TCP_SYN_SENT);
-       if (IS_ERR(ssock)) {
-               err = PTR_ERR(ssock);
+       ssock = __mptcp_nmpc_socket(msk);
+       if (!ssock) {
+               err = -EINVAL;
                goto unlock;
        }
 
+       mptcp_token_destroy(msk);
+       inet_sk_state_store(sock->sk, TCP_SYN_SENT);
+       subflow = mptcp_subflow_ctx(ssock->sk);
 #ifdef CONFIG_TCP_MD5SIG
        /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
         * TCP option space.
         */
        if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info))
-               mptcp_subflow_ctx(ssock->sk)->request_mptcp = 0;
+               mptcp_subflow_early_fallback(msk, subflow);
 #endif
+       if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk))
+               mptcp_subflow_early_fallback(msk, subflow);
 
 do_connect:
        err = ssock->ops->connect(ssock, uaddr, addr_len, flags);
@@ -1843,42 +2004,6 @@ unlock:
        return err;
 }
 
-static int mptcp_v4_getname(struct socket *sock, struct sockaddr *uaddr,
-                           int peer)
-{
-       if (sock->sk->sk_prot == &tcp_prot) {
-               /* we are being invoked from __sys_accept4, after
-                * mptcp_accept() has just accepted a non-mp-capable
-                * flow: sk is a tcp_sk, not an mptcp one.
-                *
-                * Hand the socket over to tcp so all further socket ops
-                * bypass mptcp.
-                */
-               sock->ops = &inet_stream_ops;
-       }
-
-       return inet_getname(sock, uaddr, peer);
-}
-
-#if IS_ENABLED(CONFIG_MPTCP_IPV6)
-static int mptcp_v6_getname(struct socket *sock, struct sockaddr *uaddr,
-                           int peer)
-{
-       if (sock->sk->sk_prot == &tcpv6_prot) {
-               /* we are being invoked from __sys_accept4 after
-                * mptcp_accept() has accepted a non-mp-capable
-                * subflow: sk is a tcp_sk, not mptcp.
-                *
-                * Hand the socket over to tcp so all further
-                * socket ops bypass mptcp.
-                */
-               sock->ops = &inet6_stream_ops;
-       }
-
-       return inet6_getname(sock, uaddr, peer);
-}
-#endif
-
 static int mptcp_listen(struct socket *sock, int backlog)
 {
        struct mptcp_sock *msk = mptcp_sk(sock->sk);
@@ -1888,12 +2013,14 @@ static int mptcp_listen(struct socket *sock, int backlog)
        pr_debug("msk=%p", msk);
 
        lock_sock(sock->sk);
-       ssock = __mptcp_socket_create(msk, TCP_LISTEN);
-       if (IS_ERR(ssock)) {
-               err = PTR_ERR(ssock);
+       ssock = __mptcp_nmpc_socket(msk);
+       if (!ssock) {
+               err = -EINVAL;
                goto unlock;
        }
 
+       mptcp_token_destroy(msk);
+       inet_sk_state_store(sock->sk, TCP_LISTEN);
        sock_set_flag(sock->sk, SOCK_RCU_FREE);
 
        err = ssock->ops->listen(ssock, backlog);
@@ -1906,15 +2033,6 @@ unlock:
        return err;
 }
 
-static bool is_tcp_proto(const struct proto *p)
-{
-#if IS_ENABLED(CONFIG_MPTCP_IPV6)
-       return p == &tcp_prot || p == &tcpv6_prot;
-#else
-       return p == &tcp_prot;
-#endif
-}
-
 static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
                               int flags, bool kern)
 {
@@ -1932,11 +2050,12 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
        if (!ssock)
                goto unlock_fail;
 
+       clear_bit(MPTCP_DATA_READY, &msk->flags);
        sock_hold(ssock->sk);
        release_sock(sock->sk);
 
        err = ssock->ops->accept(sock, newsock, flags, kern);
-       if (err == 0 && !is_tcp_proto(newsock->sk->sk_prot)) {
+       if (err == 0 && !mptcp_is_tcpsk(newsock->sk)) {
                struct mptcp_sock *msk = mptcp_sk(newsock->sk);
                struct mptcp_subflow_context *subflow;
 
@@ -1952,6 +2071,8 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
                }
        }
 
+       if (inet_csk_listen_poll(ssock->sk))
+               set_bit(MPTCP_DATA_READY, &msk->flags);
        sock_put(ssock->sk);
        return err;
 
@@ -1960,39 +2081,36 @@ unlock_fail:
        return -EINVAL;
 }
 
+static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
+{
+       return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM :
+              0;
+}
+
 static __poll_t mptcp_poll(struct file *file, struct socket *sock,
                           struct poll_table_struct *wait)
 {
        struct sock *sk = sock->sk;
        struct mptcp_sock *msk;
-       struct socket *ssock;
        __poll_t mask = 0;
+       int state;
 
        msk = mptcp_sk(sk);
-       lock_sock(sk);
-       ssock = __mptcp_tcp_fallback(msk);
-       if (!ssock)
-               ssock = __mptcp_nmpc_socket(msk);
-       if (ssock) {
-               mask = ssock->ops->poll(file, ssock, wait);
-               release_sock(sk);
-               return mask;
-       }
-
-       release_sock(sk);
        sock_poll_wait(file, sock, wait);
-       lock_sock(sk);
 
-       if (test_bit(MPTCP_DATA_READY, &msk->flags))
-               mask = EPOLLIN | EPOLLRDNORM;
-       if (sk_stream_is_writeable(sk) &&
-           test_bit(MPTCP_SEND_SPACE, &msk->flags))
-               mask |= EPOLLOUT | EPOLLWRNORM;
+       state = inet_sk_state_load(sk);
+       if (state == TCP_LISTEN)
+               return mptcp_check_readable(msk);
+
+       if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
+               mask |= mptcp_check_readable(msk);
+               if (sk_stream_is_writeable(sk) &&
+                   test_bit(MPTCP_SEND_SPACE, &msk->flags))
+                       mask |= EPOLLOUT | EPOLLWRNORM;
+       }
        if (sk->sk_shutdown & RCV_SHUTDOWN)
                mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
 
-       release_sock(sk);
-
        return mask;
 }
 
@@ -2000,18 +2118,11 @@ static int mptcp_shutdown(struct socket *sock, int how)
 {
        struct mptcp_sock *msk = mptcp_sk(sock->sk);
        struct mptcp_subflow_context *subflow;
-       struct socket *ssock;
        int ret = 0;
 
        pr_debug("sk=%p, how=%d", msk, how);
 
        lock_sock(sock->sk);
-       ssock = __mptcp_tcp_fallback(msk);
-       if (ssock) {
-               release_sock(sock->sk);
-               return inet_shutdown(ssock, how);
-       }
-
        if (how == SHUT_WR || how == SHUT_RDWR)
                inet_sk_state_store(sock->sk, TCP_FIN_WAIT1);
 
@@ -2037,6 +2148,9 @@ static int mptcp_shutdown(struct socket *sock, int how)
                mptcp_subflow_shutdown(tcp_sk, how, 1, msk->write_seq);
        }
 
+       /* Wake up anyone sleeping in poll. */
+       sock->sk->sk_state_change(sock->sk);
+
 out_unlock:
        release_sock(sock->sk);
 
@@ -2051,7 +2165,7 @@ static const struct proto_ops mptcp_stream_ops = {
        .connect           = mptcp_stream_connect,
        .socketpair        = sock_no_socketpair,
        .accept            = mptcp_stream_accept,
-       .getname           = mptcp_v4_getname,
+       .getname           = inet_getname,
        .poll              = mptcp_poll,
        .ioctl             = inet_ioctl,
        .gettstamp         = sock_gettstamp,
@@ -2063,10 +2177,6 @@ static const struct proto_ops mptcp_stream_ops = {
        .recvmsg           = inet_recvmsg,
        .mmap              = sock_no_mmap,
        .sendpage          = inet_sendpage,
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_sock_common_setsockopt,
-       .compat_getsockopt = compat_sock_common_getsockopt,
-#endif
 };
 
 static struct inet_protosw mptcp_protosw = {
@@ -2077,7 +2187,7 @@ static struct inet_protosw mptcp_protosw = {
        .flags          = INET_PROTOSW_ICSK,
 };
 
-void mptcp_proto_init(void)
+void __init mptcp_proto_init(void)
 {
        mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo;
 
@@ -2086,6 +2196,7 @@ void mptcp_proto_init(void)
 
        mptcp_subflow_init();
        mptcp_pm_init();
+       mptcp_token_init();
 
        if (proto_register(&mptcp_prot, 1) != 0)
                panic("Failed to register MPTCP proto.\n");
@@ -2104,7 +2215,7 @@ static const struct proto_ops mptcp_v6_stream_ops = {
        .connect           = mptcp_stream_connect,
        .socketpair        = sock_no_socketpair,
        .accept            = mptcp_stream_accept,
-       .getname           = mptcp_v6_getname,
+       .getname           = inet6_getname,
        .poll              = mptcp_poll,
        .ioctl             = inet6_ioctl,
        .gettstamp         = sock_gettstamp,
@@ -2118,8 +2229,6 @@ static const struct proto_ops mptcp_v6_stream_ops = {
        .sendpage          = inet_sendpage,
 #ifdef CONFIG_COMPAT
        .compat_ioctl      = inet6_compat_ioctl,
-       .compat_setsockopt = compat_sock_common_setsockopt,
-       .compat_getsockopt = compat_sock_common_getsockopt,
 #endif
 };
 
@@ -2139,7 +2248,7 @@ static struct inet_protosw mptcp_v6_protosw = {
        .flags          = INET_PROTOSW_ICSK,
 };
 
-int mptcp_proto_v6_init(void)
+int __init mptcp_proto_v6_init(void)
 {
        int err;
 
index 809687d..67634b5 100644 (file)
@@ -89,6 +89,7 @@
 #define MPTCP_SEND_SPACE       1
 #define MPTCP_WORK_RTX         2
 #define MPTCP_WORK_EOF         3
+#define MPTCP_FALLBACK_DONE    4
 
 struct mptcp_options_received {
        u64     sndr_key;
@@ -135,8 +136,6 @@ static inline __be32 mptcp_option(u8 subopt, u8 len, u8 nib, u8 field)
                     ((nib & 0xF) << 8) | field);
 }
 
-#define MPTCP_PM_MAX_ADDR      4
-
 struct mptcp_addr_info {
        sa_family_t             family;
        __be16                  port;
@@ -175,8 +174,6 @@ struct mptcp_pm_data {
        u8              local_addr_max;
        u8              subflows_max;
        u8              status;
-
-       struct          work_struct work;
 };
 
 struct mptcp_data_frag {
@@ -201,6 +198,7 @@ struct mptcp_sock {
        u32             token;
        unsigned long   flags;
        bool            can_ack;
+       bool            fully_established;
        spinlock_t      join_list_lock;
        struct work_struct work;
        struct list_head conn_list;
@@ -210,6 +208,12 @@ struct mptcp_sock {
        struct socket   *subflow; /* outgoing connect/listener/!mp_capable */
        struct sock     *first;
        struct mptcp_pm_data    pm;
+       struct {
+               u32     space;  /* bytes copied in last measurement window */
+               u32     copied; /* bytes copied in this measurement window */
+               u64     time;   /* start time of measurement window */
+               u64     rtt_us; /* last maximum rtt of subflows */
+       } rcvq_space;
 };
 
 #define mptcp_for_each_subflow(__msk, __subflow)                       \
@@ -234,10 +238,7 @@ static inline struct mptcp_data_frag *mptcp_rtx_head(const struct sock *sk)
 {
        struct mptcp_sock *msk = mptcp_sk(sk);
 
-       if (list_empty(&msk->rtx_queue))
-               return NULL;
-
-       return list_first_entry(&msk->rtx_queue, struct mptcp_data_frag, list);
+       return list_first_entry_or_null(&msk->rtx_queue, struct mptcp_data_frag, list);
 }
 
 struct mptcp_subflow_request_sock {
@@ -254,6 +255,8 @@ struct mptcp_subflow_request_sock {
        u64     thmac;
        u32     local_nonce;
        u32     remote_nonce;
+       struct mptcp_sock       *msk;
+       struct hlist_nulls_node token_node;
 };
 
 static inline struct mptcp_subflow_request_sock *
@@ -340,8 +343,10 @@ mptcp_subflow_get_mapped_dsn(const struct mptcp_subflow_context *subflow)
 }
 
 int mptcp_is_enabled(struct net *net);
+void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
+                                    struct mptcp_options_received *mp_opt);
 bool mptcp_subflow_data_available(struct sock *sk);
-void mptcp_subflow_init(void);
+void __init mptcp_subflow_init(void);
 
 /* called with sk socket lock held */
 int __mptcp_subflow_connect(struct sock *sk, int ifindex,
@@ -359,14 +364,9 @@ static inline void mptcp_subflow_tcp_fallback(struct sock *sk,
        inet_csk(sk)->icsk_af_ops = ctx->icsk_af_ops;
 }
 
-extern const struct inet_connection_sock_af_ops ipv4_specific;
+void __init mptcp_proto_init(void);
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
-extern const struct inet_connection_sock_af_ops ipv6_specific;
-#endif
-
-void mptcp_proto_init(void);
-#if IS_ENABLED(CONFIG_MPTCP_IPV6)
-int mptcp_proto_v6_init(void);
+int __init mptcp_proto_v6_init(void);
 #endif
 
 struct sock *mptcp_sk_clone(const struct sock *sk,
@@ -376,36 +376,39 @@ void mptcp_get_options(const struct sk_buff *skb,
                       struct mptcp_options_received *mp_opt);
 
 void mptcp_finish_connect(struct sock *sk);
+static inline bool mptcp_is_fully_established(struct sock *sk)
+{
+       return inet_sk_state_load(sk) == TCP_ESTABLISHED &&
+              READ_ONCE(mptcp_sk(sk)->fully_established);
+}
+void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk);
 void mptcp_data_ready(struct sock *sk, struct sock *ssk);
 bool mptcp_finish_join(struct sock *sk);
 void mptcp_data_acked(struct sock *sk);
 void mptcp_subflow_eof(struct sock *sk);
 
+void __init mptcp_token_init(void);
+static inline void mptcp_token_init_request(struct request_sock *req)
+{
+       mptcp_subflow_rsk(req)->token_node.pprev = NULL;
+}
+
 int mptcp_token_new_request(struct request_sock *req);
-void mptcp_token_destroy_request(u32 token);
+void mptcp_token_destroy_request(struct request_sock *req);
 int mptcp_token_new_connect(struct sock *sk);
-int mptcp_token_new_accept(u32 token, struct sock *conn);
+void mptcp_token_accept(struct mptcp_subflow_request_sock *r,
+                       struct mptcp_sock *msk);
 struct mptcp_sock *mptcp_token_get_sock(u32 token);
-void mptcp_token_destroy(u32 token);
+struct mptcp_sock *mptcp_token_iter_next(const struct net *net, long *s_slot,
+                                        long *s_num);
+void mptcp_token_destroy(struct mptcp_sock *msk);
 
 void mptcp_crypto_key_sha(u64 key, u32 *token, u64 *idsn);
-static inline void mptcp_crypto_key_gen_sha(u64 *key, u32 *token, u64 *idsn)
-{
-       /* we might consider a faster version that computes the key as a
-        * hash of some information available in the MPTCP socket. Use
-        * random data at the moment, as it's probably the safest option
-        * in case multiple sockets are opened in different namespaces at
-        * the same time.
-        */
-       get_random_bytes(key, sizeof(u64));
-       mptcp_crypto_key_sha(*key, token, idsn);
-}
 
 void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac);
 
-void mptcp_pm_init(void);
+void __init mptcp_pm_init(void);
 void mptcp_pm_data_init(struct mptcp_sock *msk);
-void mptcp_pm_close(struct mptcp_sock *msk);
 void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side);
 void mptcp_pm_fully_established(struct mptcp_sock *msk);
 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk);
@@ -437,7 +440,7 @@ bool mptcp_pm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
                          struct mptcp_addr_info *saddr);
 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
 
-void mptcp_pm_nl_init(void);
+void __init mptcp_pm_nl_init(void);
 void mptcp_pm_nl_data_init(struct mptcp_sock *msk);
 void mptcp_pm_nl_fully_established(struct mptcp_sock *msk);
 void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk);
@@ -458,4 +461,46 @@ static inline bool before64(__u64 seq1, __u64 seq2)
 
 void mptcp_diag_subflow_init(struct tcp_ulp_ops *ops);
 
+static inline bool __mptcp_check_fallback(struct mptcp_sock *msk)
+{
+       return test_bit(MPTCP_FALLBACK_DONE, &msk->flags);
+}
+
+static inline bool mptcp_check_fallback(struct sock *sk)
+{
+       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+       struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+
+       return __mptcp_check_fallback(msk);
+}
+
+static inline void __mptcp_do_fallback(struct mptcp_sock *msk)
+{
+       if (test_bit(MPTCP_FALLBACK_DONE, &msk->flags)) {
+               pr_debug("TCP fallback already done (msk=%p)", msk);
+               return;
+       }
+       set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
+}
+
+static inline void mptcp_do_fallback(struct sock *sk)
+{
+       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+       struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+
+       __mptcp_do_fallback(msk);
+}
+
+#define pr_fallback(a) pr_debug("%s:fallback to TCP (msk=%p)", __func__, a)
+
+static inline bool subflow_simultaneous_connect(struct sock *sk)
+{
+       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+       struct sock *parent = subflow->conn;
+
+       return sk->sk_state == TCP_ESTABLISHED &&
+              !mptcp_sk(parent)->pm.server_side &&
+              !subflow->conn_finished;
+}
+
 #endif /* __MPTCP_PROTOCOL_H */
index bf13257..e645483 100644 (file)
@@ -29,48 +29,16 @@ static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
        MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
 }
 
-static int subflow_rebuild_header(struct sock *sk)
-{
-       struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
-       int local_id, err = 0;
-
-       if (subflow->request_mptcp && !subflow->token) {
-               pr_debug("subflow=%p", sk);
-               err = mptcp_token_new_connect(sk);
-       } else if (subflow->request_join && !subflow->local_nonce) {
-               struct mptcp_sock *msk = (struct mptcp_sock *)subflow->conn;
-
-               pr_debug("subflow=%p", sk);
-
-               do {
-                       get_random_bytes(&subflow->local_nonce, sizeof(u32));
-               } while (!subflow->local_nonce);
-
-               if (subflow->local_id)
-                       goto out;
-
-               local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)sk);
-               if (local_id < 0)
-                       return -EINVAL;
-
-               subflow->local_id = local_id;
-       }
-
-out:
-       if (err)
-               return err;
-
-       return subflow->icsk_af_ops->rebuild_header(sk);
-}
-
 static void subflow_req_destructor(struct request_sock *req)
 {
        struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
 
        pr_debug("subflow_req=%p", subflow_req);
 
-       if (subflow_req->mp_capable)
-               mptcp_token_destroy_request(subflow_req->token);
+       if (subflow_req->msk)
+               sock_put((struct sock *)subflow_req->msk);
+
+       mptcp_token_destroy_request(req);
        tcp_request_sock_ops.destructor(req);
 }
 
@@ -85,9 +53,15 @@ static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
        mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
 }
 
+static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
+{
+       return mptcp_is_fully_established((void *)msk) &&
+              READ_ONCE(msk->pm.accept_subflow);
+}
+
 /* validate received token and create truncated hmac and nonce for SYN-ACK */
-static bool subflow_token_join_request(struct request_sock *req,
-                                      const struct sk_buff *skb)
+static struct mptcp_sock *subflow_token_join_request(struct request_sock *req,
+                                                    const struct sk_buff *skb)
 {
        struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
        u8 hmac[SHA256_DIGEST_SIZE];
@@ -97,13 +71,13 @@ static bool subflow_token_join_request(struct request_sock *req,
        msk = mptcp_token_get_sock(subflow_req->token);
        if (!msk) {
                SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
-               return false;
+               return NULL;
        }
 
        local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
        if (local_id < 0) {
                sock_put((struct sock *)msk);
-               return false;
+               return NULL;
        }
        subflow_req->local_id = local_id;
 
@@ -114,9 +88,7 @@ static bool subflow_token_join_request(struct request_sock *req,
                              subflow_req->remote_nonce, hmac);
 
        subflow_req->thmac = get_unaligned_be64(hmac);
-
-       sock_put((struct sock *)msk);
-       return true;
+       return msk;
 }
 
 static void subflow_init_req(struct request_sock *req,
@@ -133,6 +105,8 @@ static void subflow_init_req(struct request_sock *req,
 
        subflow_req->mp_capable = 0;
        subflow_req->mp_join = 0;
+       subflow_req->msk = NULL;
+       mptcp_token_init_request(req);
 
 #ifdef CONFIG_TCP_MD5SIG
        /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
@@ -166,12 +140,9 @@ static void subflow_init_req(struct request_sock *req,
                subflow_req->remote_id = mp_opt.join_id;
                subflow_req->token = mp_opt.token;
                subflow_req->remote_nonce = mp_opt.nonce;
-               pr_debug("token=%u, remote_nonce=%u", subflow_req->token,
-                        subflow_req->remote_nonce);
-               if (!subflow_token_join_request(req, skb)) {
-                       subflow_req->mp_join = 0;
-                       // @@ need to trigger RST
-               }
+               subflow_req->msk = subflow_token_join_request(req, skb);
+               pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
+                        subflow_req->remote_nonce, subflow_req->msk);
        }
 }
 
@@ -223,7 +194,6 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
        struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
        struct mptcp_options_received mp_opt;
        struct sock *parent = subflow->conn;
-       struct tcp_sock *tp = tcp_sk(sk);
 
        subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
 
@@ -236,46 +206,40 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
        if (subflow->conn_finished)
                return;
 
+       subflow->rel_write_seq = 1;
        subflow->conn_finished = 1;
+       subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
+       pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
 
        mptcp_get_options(skb, &mp_opt);
-       if (subflow->request_mptcp && mp_opt.mp_capable) {
+       if (subflow->request_mptcp) {
+               if (!mp_opt.mp_capable) {
+                       MPTCP_INC_STATS(sock_net(sk),
+                                       MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
+                       mptcp_do_fallback(sk);
+                       pr_fallback(mptcp_sk(subflow->conn));
+                       goto fallback;
+               }
+
                subflow->mp_capable = 1;
                subflow->can_ack = 1;
                subflow->remote_key = mp_opt.sndr_key;
                pr_debug("subflow=%p, remote_key=%llu", subflow,
                         subflow->remote_key);
-       } else if (subflow->request_join && mp_opt.mp_join) {
-               subflow->mp_join = 1;
+               mptcp_finish_connect(sk);
+       } else if (subflow->request_join) {
+               u8 hmac[SHA256_DIGEST_SIZE];
+
+               if (!mp_opt.mp_join)
+                       goto do_reset;
+
                subflow->thmac = mp_opt.thmac;
                subflow->remote_nonce = mp_opt.nonce;
                pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", subflow,
                         subflow->thmac, subflow->remote_nonce);
-       } else if (subflow->request_mptcp) {
-               tp->is_mptcp = 0;
-       }
-
-       if (!tp->is_mptcp)
-               return;
 
-       if (subflow->mp_capable) {
-               pr_debug("subflow=%p, remote_key=%llu", mptcp_subflow_ctx(sk),
-                        subflow->remote_key);
-               mptcp_finish_connect(sk);
-
-               if (skb) {
-                       pr_debug("synack seq=%u", TCP_SKB_CB(skb)->seq);
-                       subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
-               }
-       } else if (subflow->mp_join) {
-               u8 hmac[SHA256_DIGEST_SIZE];
-
-               pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u",
-                        subflow, subflow->thmac,
-                        subflow->remote_nonce);
                if (!subflow_thmac_valid(subflow)) {
                        MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
-                       subflow->mp_join = 0;
                        goto do_reset;
                }
 
@@ -283,21 +247,22 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
                                      subflow->local_nonce,
                                      subflow->remote_nonce,
                                      hmac);
-
                memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
 
-               if (skb)
-                       subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
-
                if (!mptcp_finish_join(sk))
                        goto do_reset;
 
+               subflow->mp_join = 1;
                MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
-       } else {
-do_reset:
-               tcp_send_active_reset(sk, GFP_ATOMIC);
-               tcp_done(sk);
+       } else if (mptcp_check_fallback(sk)) {
+fallback:
+               mptcp_rcv_space_init(mptcp_sk(parent), sk);
        }
+       return;
+
+do_reset:
+       tcp_send_active_reset(sk, GFP_ATOMIC);
+       tcp_done(sk);
 }
 
 static struct request_sock_ops subflow_request_sock_ops;
@@ -354,10 +319,9 @@ static bool subflow_hmac_valid(const struct request_sock *req,
        const struct mptcp_subflow_request_sock *subflow_req;
        u8 hmac[SHA256_DIGEST_SIZE];
        struct mptcp_sock *msk;
-       bool ret;
 
        subflow_req = mptcp_subflow_rsk(req);
-       msk = mptcp_token_get_sock(subflow_req->token);
+       msk = subflow_req->msk;
        if (!msk)
                return false;
 
@@ -365,12 +329,7 @@ static bool subflow_hmac_valid(const struct request_sock *req,
                              subflow_req->remote_nonce,
                              subflow_req->local_nonce, hmac);
 
-       ret = true;
-       if (crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN))
-               ret = false;
-
-       sock_put((struct sock *)msk);
-       return ret;
+       return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
 }
 
 static void mptcp_sock_destruct(struct sock *sk)
@@ -393,7 +352,7 @@ static void mptcp_sock_destruct(struct sock *sk)
                sock_orphan(sk);
        }
 
-       mptcp_token_destroy(mptcp_sk(sk)->token);
+       mptcp_token_destroy(mptcp_sk(sk));
        inet_sock_destruct(sk);
 }
 
@@ -428,6 +387,17 @@ static void subflow_drop_ctx(struct sock *ssk)
        kfree_rcu(ctx, rcu);
 }
 
+void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
+                                    struct mptcp_options_received *mp_opt)
+{
+       struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+
+       subflow->remote_key = mp_opt->sndr_key;
+       subflow->fully_established = 1;
+       subflow->can_ack = 1;
+       WRITE_ONCE(msk->fully_established, true);
+}
+
 static struct sock *subflow_syn_recv_sock(const struct sock *sk,
                                          struct sk_buff *skb,
                                          struct request_sock *req,
@@ -438,22 +408,25 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
        struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
        struct mptcp_subflow_request_sock *subflow_req;
        struct mptcp_options_received mp_opt;
-       bool fallback_is_fatal = false;
+       bool fallback, fallback_is_fatal;
        struct sock *new_msk = NULL;
-       bool fallback = false;
        struct sock *child;
 
        pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
 
-       /* we need later a valid 'mp_capable' value even when options are not
-        * parsed
+       /* After child creation we must look for 'mp_capable' even when options
+        * are not parsed
         */
        mp_opt.mp_capable = 0;
-       if (tcp_rsk(req)->is_mptcp == 0)
+
+       /* hopefully temporary handling for MP_JOIN+syncookie */
+       subflow_req = mptcp_subflow_rsk(req);
+       fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join;
+       fallback = !tcp_rsk(req)->is_mptcp;
+       if (fallback)
                goto create_child;
 
        /* if the sk is MP_CAPABLE, we try to fetch the client key */
-       subflow_req = mptcp_subflow_rsk(req);
        if (subflow_req->mp_capable) {
                if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) {
                        /* here we can receive and accept an in-window,
@@ -474,12 +447,12 @@ create_msk:
                if (!new_msk)
                        fallback = true;
        } else if (subflow_req->mp_join) {
-               fallback_is_fatal = true;
                mptcp_get_options(skb, &mp_opt);
                if (!mp_opt.mp_join ||
+                   !mptcp_can_accept_new_subflow(subflow_req->msk) ||
                    !subflow_hmac_valid(req, &mp_opt)) {
                        SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
-                       return NULL;
+                       fallback = true;
                }
        }
 
@@ -505,27 +478,34 @@ create_child:
                }
 
                if (ctx->mp_capable) {
+                       /* this can't race with mptcp_close(), as the msk is
+                        * not yet exposted to user-space
+                        */
+                       inet_sk_state_store((void *)new_msk, TCP_ESTABLISHED);
+
                        /* new mpc subflow takes ownership of the newly
                         * created mptcp socket
                         */
                        new_msk->sk_destruct = mptcp_sock_destruct;
                        mptcp_pm_new_connection(mptcp_sk(new_msk), 1);
+                       mptcp_token_accept(subflow_req, mptcp_sk(new_msk));
                        ctx->conn = new_msk;
                        new_msk = NULL;
 
                        /* with OoO packets we can reach here without ingress
                         * mpc option
                         */
-                       ctx->remote_key = mp_opt.sndr_key;
-                       ctx->fully_established = mp_opt.mp_capable;
-                       ctx->can_ack = mp_opt.mp_capable;
+                       if (mp_opt.mp_capable)
+                               mptcp_subflow_fully_established(ctx, &mp_opt);
                } else if (ctx->mp_join) {
                        struct mptcp_sock *owner;
 
-                       owner = mptcp_token_get_sock(ctx->token);
+                       owner = subflow_req->msk;
                        if (!owner)
                                goto dispose_child;
 
+                       /* move the msk reference ownership to the subflow */
+                       subflow_req->msk = NULL;
                        ctx->conn = (struct sock *)owner;
                        if (!mptcp_finish_join(child))
                                goto dispose_child;
@@ -551,9 +531,9 @@ out:
 dispose_child:
        subflow_drop_ctx(child);
        tcp_rsk(req)->drop_req = true;
-       tcp_send_active_reset(child, GFP_ATOMIC);
        inet_csk_prepare_for_destroy_sock(child);
        tcp_done(child);
+       req->rsk_ops->send_reset(sk, skb);
 
        /* The last child reference will be released by the caller */
        return child;
@@ -565,7 +545,8 @@ enum mapping_status {
        MAPPING_OK,
        MAPPING_INVALID,
        MAPPING_EMPTY,
-       MAPPING_DATA_FIN
+       MAPPING_DATA_FIN,
+       MAPPING_DUMMY
 };
 
 static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
@@ -629,6 +610,9 @@ static enum mapping_status get_mapping_status(struct sock *ssk)
        if (!skb)
                return MAPPING_EMPTY;
 
+       if (mptcp_check_fallback(ssk))
+               return MAPPING_DUMMY;
+
        mpext = mptcp_get_ext(skb);
        if (!mpext || !mpext->use_map) {
                if (!subflow->map_valid && !skb->len) {
@@ -770,6 +754,16 @@ static bool subflow_check_data_avail(struct sock *ssk)
                        ssk->sk_err = EBADMSG;
                        goto fatal;
                }
+               if (status == MAPPING_DUMMY) {
+                       __mptcp_do_fallback(msk);
+                       skb = skb_peek(&ssk->sk_receive_queue);
+                       subflow->map_valid = 1;
+                       subflow->map_seq = READ_ONCE(msk->ack_seq);
+                       subflow->map_data_len = skb->len;
+                       subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq -
+                                                  subflow->ssn_offset;
+                       return true;
+               }
 
                if (status != MAPPING_OK)
                        return false;
@@ -892,15 +886,20 @@ void mptcp_space(const struct sock *ssk, int *space, int *full_space)
 static void subflow_data_ready(struct sock *sk)
 {
        struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+       u16 state = 1 << inet_sk_state_load(sk);
        struct sock *parent = subflow->conn;
+       struct mptcp_sock *msk;
 
-       if (!subflow->mp_capable && !subflow->mp_join) {
-               subflow->tcp_data_ready(sk);
-
+       msk = mptcp_sk(parent);
+       if (state & TCPF_LISTEN) {
+               set_bit(MPTCP_DATA_READY, &msk->flags);
                parent->sk_data_ready(parent);
                return;
        }
 
+       WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
+                    !subflow->mp_join && !(state & TCPF_CLOSE));
+
        if (mptcp_subflow_data_available(sk))
                mptcp_data_ready(parent, sk);
 }
@@ -977,19 +976,34 @@ int __mptcp_subflow_connect(struct sock *sk, int ifindex,
        struct mptcp_sock *msk = mptcp_sk(sk);
        struct mptcp_subflow_context *subflow;
        struct sockaddr_storage addr;
+       int local_id = loc->id;
        struct socket *sf;
+       struct sock *ssk;
        u32 remote_token;
        int addrlen;
        int err;
 
-       if (sk->sk_state != TCP_ESTABLISHED)
+       if (!mptcp_is_fully_established(sk))
                return -ENOTCONN;
 
        err = mptcp_subflow_create_socket(sk, &sf);
        if (err)
                return err;
 
-       subflow = mptcp_subflow_ctx(sf->sk);
+       ssk = sf->sk;
+       subflow = mptcp_subflow_ctx(ssk);
+       do {
+               get_random_bytes(&subflow->local_nonce, sizeof(u32));
+       } while (!subflow->local_nonce);
+
+       if (!local_id) {
+               err = mptcp_pm_get_local_id(msk, (struct sock_common *)ssk);
+               if (err < 0)
+                       goto failed;
+
+               local_id = err;
+       }
+
        subflow->remote_key = msk->remote_key;
        subflow->local_key = msk->local_key;
        subflow->token = msk->token;
@@ -1000,15 +1014,16 @@ int __mptcp_subflow_connect(struct sock *sk, int ifindex,
        if (loc->family == AF_INET6)
                addrlen = sizeof(struct sockaddr_in6);
 #endif
-       sf->sk->sk_bound_dev_if = ifindex;
+       ssk->sk_bound_dev_if = ifindex;
        err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
        if (err)
                goto failed;
 
        mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
-       pr_debug("msk=%p remote_token=%u", msk, remote_token);
+       pr_debug("msk=%p remote_token=%u local_id=%d", msk, remote_token,
+                local_id);
        subflow->remote_token = remote_token;
-       subflow->local_id = loc->id;
+       subflow->local_id = local_id;
        subflow->request_join = 1;
        subflow->request_bkup = 1;
        mptcp_info2sockaddr(remote, &addr);
@@ -1053,8 +1068,10 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
        err = tcp_set_ulp(sf->sk, "mptcp");
        release_sock(sf->sk);
 
-       if (err)
+       if (err) {
+               sock_release(sf);
                return err;
+       }
 
        /* the newly created socket really belongs to the owning MPTCP master
         * socket, even if for additional subflows the allocation is performed
@@ -1119,11 +1136,22 @@ static void subflow_state_change(struct sock *sk)
 
        __subflow_state_change(sk);
 
+       if (subflow_simultaneous_connect(sk)) {
+               mptcp_do_fallback(sk);
+               mptcp_rcv_space_init(mptcp_sk(parent), sk);
+               pr_fallback(mptcp_sk(parent));
+               subflow->conn_finished = 1;
+               if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
+                       inet_sk_state_store(parent, TCP_ESTABLISHED);
+                       parent->sk_state_change(parent);
+               }
+       }
+
        /* as recvmsg() does not acquire the subflow socket for ssk selection
         * a fin packet carrying a DSS can be unnoticed if we don't trigger
         * the data available machinery here.
         */
-       if (subflow->mp_capable && mptcp_subflow_data_available(sk))
+       if (mptcp_subflow_data_available(sk))
                mptcp_data_ready(parent, sk);
 
        if (!(parent->sk_shutdown & RCV_SHUTDOWN) &&
@@ -1256,7 +1284,7 @@ static int subflow_ops_init(struct request_sock_ops *subflow_ops)
        return 0;
 }
 
-void mptcp_subflow_init(void)
+void __init mptcp_subflow_init(void)
 {
        subflow_request_sock_ops = tcp_request_sock_ops;
        if (subflow_ops_init(&subflow_request_sock_ops) != 0)
@@ -1269,7 +1297,6 @@ void mptcp_subflow_init(void)
        subflow_specific.conn_request = subflow_v4_conn_request;
        subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
        subflow_specific.sk_rx_dst_set = subflow_finish_connect;
-       subflow_specific.rebuild_header = subflow_rebuild_header;
 
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
        subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
@@ -1279,7 +1306,6 @@ void mptcp_subflow_init(void)
        subflow_v6_specific.conn_request = subflow_v6_conn_request;
        subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
        subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
-       subflow_v6_specific.rebuild_header = subflow_rebuild_header;
 
        subflow_v6m_specific = subflow_v6_specific;
        subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
index 33352dd..97cfc45 100644 (file)
@@ -24,7 +24,7 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/radix-tree.h>
+#include <linux/memblock.h>
 #include <linux/ip.h>
 #include <linux/tcp.h>
 #include <net/sock.h>
 #include <net/mptcp.h>
 #include "protocol.h"
 
-static RADIX_TREE(token_tree, GFP_ATOMIC);
-static RADIX_TREE(token_req_tree, GFP_ATOMIC);
-static DEFINE_SPINLOCK(token_tree_lock);
-static int token_used __read_mostly;
+#define TOKEN_MAX_RETRIES      4
+#define TOKEN_MAX_CHAIN_LEN    4
+
+struct token_bucket {
+       spinlock_t              lock;
+       int                     chain_len;
+       struct hlist_nulls_head req_chain;
+       struct hlist_nulls_head msk_chain;
+};
+
+static struct token_bucket *token_hash __read_mostly;
+static unsigned int token_mask __read_mostly;
+
+static struct token_bucket *token_bucket(u32 token)
+{
+       return &token_hash[token & token_mask];
+}
+
+/* called with bucket lock held */
+static struct mptcp_subflow_request_sock *
+__token_lookup_req(struct token_bucket *t, u32 token)
+{
+       struct mptcp_subflow_request_sock *req;
+       struct hlist_nulls_node *pos;
+
+       hlist_nulls_for_each_entry_rcu(req, pos, &t->req_chain, token_node)
+               if (req->token == token)
+                       return req;
+       return NULL;
+}
+
+/* called with bucket lock held */
+static struct mptcp_sock *
+__token_lookup_msk(struct token_bucket *t, u32 token)
+{
+       struct hlist_nulls_node *pos;
+       struct sock *sk;
+
+       sk_nulls_for_each_rcu(sk, pos, &t->msk_chain)
+               if (mptcp_sk(sk)->token == token)
+                       return mptcp_sk(sk);
+       return NULL;
+}
+
+static bool __token_bucket_busy(struct token_bucket *t, u32 token)
+{
+       return !token || t->chain_len >= TOKEN_MAX_CHAIN_LEN ||
+              __token_lookup_req(t, token) || __token_lookup_msk(t, token);
+}
+
+static void mptcp_crypto_key_gen_sha(u64 *key, u32 *token, u64 *idsn)
+{
+       /* we might consider a faster version that computes the key as a
+        * hash of some information available in the MPTCP socket. Use
+        * random data at the moment, as it's probably the safest option
+        * in case multiple sockets are opened in different namespaces at
+        * the same time.
+        */
+       get_random_bytes(key, sizeof(u64));
+       mptcp_crypto_key_sha(*key, token, idsn);
+}
 
 /**
  * mptcp_token_new_request - create new key/idsn/token for subflow_request
@@ -52,30 +109,32 @@ static int token_used __read_mostly;
 int mptcp_token_new_request(struct request_sock *req)
 {
        struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
-       int err;
-
-       while (1) {
-               u32 token;
-
-               mptcp_crypto_key_gen_sha(&subflow_req->local_key,
-                                        &subflow_req->token,
-                                        &subflow_req->idsn);
-               pr_debug("req=%p local_key=%llu, token=%u, idsn=%llu\n",
-                        req, subflow_req->local_key, subflow_req->token,
-                        subflow_req->idsn);
-
-               token = subflow_req->token;
-               spin_lock_bh(&token_tree_lock);
-               if (!radix_tree_lookup(&token_req_tree, token) &&
-                   !radix_tree_lookup(&token_tree, token))
-                       break;
-               spin_unlock_bh(&token_tree_lock);
+       int retries = TOKEN_MAX_RETRIES;
+       struct token_bucket *bucket;
+       u32 token;
+
+again:
+       mptcp_crypto_key_gen_sha(&subflow_req->local_key,
+                                &subflow_req->token,
+                                &subflow_req->idsn);
+       pr_debug("req=%p local_key=%llu, token=%u, idsn=%llu\n",
+                req, subflow_req->local_key, subflow_req->token,
+                subflow_req->idsn);
+
+       token = subflow_req->token;
+       bucket = token_bucket(token);
+       spin_lock_bh(&bucket->lock);
+       if (__token_bucket_busy(bucket, token)) {
+               spin_unlock_bh(&bucket->lock);
+               if (!--retries)
+                       return -EBUSY;
+               goto again;
        }
 
-       err = radix_tree_insert(&token_req_tree,
-                               subflow_req->token, &token_used);
-       spin_unlock_bh(&token_tree_lock);
-       return err;
+       hlist_nulls_add_head_rcu(&subflow_req->token_node, &bucket->req_chain);
+       bucket->chain_len++;
+       spin_unlock_bh(&bucket->lock);
+       return 0;
 }
 
 /**
@@ -97,48 +156,56 @@ int mptcp_token_new_request(struct request_sock *req)
 int mptcp_token_new_connect(struct sock *sk)
 {
        struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
-       struct sock *mptcp_sock = subflow->conn;
-       int err;
+       struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+       int retries = TOKEN_MAX_RETRIES;
+       struct token_bucket *bucket;
 
-       while (1) {
-               u32 token;
+       pr_debug("ssk=%p, local_key=%llu, token=%u, idsn=%llu\n",
+                sk, subflow->local_key, subflow->token, subflow->idsn);
 
-               mptcp_crypto_key_gen_sha(&subflow->local_key, &subflow->token,
-                                        &subflow->idsn);
+again:
+       mptcp_crypto_key_gen_sha(&subflow->local_key, &subflow->token,
+                                &subflow->idsn);
 
-               pr_debug("ssk=%p, local_key=%llu, token=%u, idsn=%llu\n",
-                        sk, subflow->local_key, subflow->token, subflow->idsn);
-
-               token = subflow->token;
-               spin_lock_bh(&token_tree_lock);
-               if (!radix_tree_lookup(&token_req_tree, token) &&
-                   !radix_tree_lookup(&token_tree, token))
-                       break;
-               spin_unlock_bh(&token_tree_lock);
+       bucket = token_bucket(subflow->token);
+       spin_lock_bh(&bucket->lock);
+       if (__token_bucket_busy(bucket, subflow->token)) {
+               spin_unlock_bh(&bucket->lock);
+               if (!--retries)
+                       return -EBUSY;
+               goto again;
        }
-       err = radix_tree_insert(&token_tree, subflow->token, mptcp_sock);
-       spin_unlock_bh(&token_tree_lock);
 
-       return err;
+       WRITE_ONCE(msk->token, subflow->token);
+       __sk_nulls_add_node_rcu((struct sock *)msk, &bucket->msk_chain);
+       bucket->chain_len++;
+       spin_unlock_bh(&bucket->lock);
+       return 0;
 }
 
 /**
- * mptcp_token_new_accept - insert token for later processing
- * @token: the token to insert to the tree
- * @conn: the just cloned socket linked to the new connection
+ * mptcp_token_accept - replace a req sk with full sock in token hash
+ * @req: the request socket to be removed
+ * @msk: the just cloned socket linked to the new connection
  *
  * Called when a SYN packet creates a new logical connection, i.e.
  * is not a join request.
  */
-int mptcp_token_new_accept(u32 token, struct sock *conn)
+void mptcp_token_accept(struct mptcp_subflow_request_sock *req,
+                       struct mptcp_sock *msk)
 {
-       int err;
+       struct mptcp_subflow_request_sock *pos;
+       struct token_bucket *bucket;
 
-       spin_lock_bh(&token_tree_lock);
-       err = radix_tree_insert(&token_tree, token, conn);
-       spin_unlock_bh(&token_tree_lock);
+       bucket = token_bucket(req->token);
+       spin_lock_bh(&bucket->lock);
 
-       return err;
+       /* pedantic lookup check for the moved token */
+       pos = __token_lookup_req(bucket, req->token);
+       if (!WARN_ON_ONCE(pos != req))
+               hlist_nulls_del_init_rcu(&req->token_node);
+       __sk_nulls_add_node_rcu((struct sock *)msk, &bucket->msk_chain);
+       spin_unlock_bh(&bucket->lock);
 }
 
 /**
@@ -152,45 +219,171 @@ int mptcp_token_new_accept(u32 token, struct sock *conn)
  */
 struct mptcp_sock *mptcp_token_get_sock(u32 token)
 {
-       struct sock *conn;
-
-       spin_lock_bh(&token_tree_lock);
-       conn = radix_tree_lookup(&token_tree, token);
-       if (conn) {
-               /* token still reserved? */
-               if (conn == (struct sock *)&token_used)
-                       conn = NULL;
-               else
-                       sock_hold(conn);
+       struct hlist_nulls_node *pos;
+       struct token_bucket *bucket;
+       struct mptcp_sock *msk;
+       struct sock *sk;
+
+       rcu_read_lock();
+       bucket = token_bucket(token);
+
+again:
+       sk_nulls_for_each_rcu(sk, pos, &bucket->msk_chain) {
+               msk = mptcp_sk(sk);
+               if (READ_ONCE(msk->token) != token)
+                       continue;
+               if (!refcount_inc_not_zero(&sk->sk_refcnt))
+                       goto not_found;
+               if (READ_ONCE(msk->token) != token) {
+                       sock_put(sk);
+                       goto again;
+               }
+               goto found;
        }
-       spin_unlock_bh(&token_tree_lock);
+       if (get_nulls_value(pos) != (token & token_mask))
+               goto again;
+
+not_found:
+       msk = NULL;
 
-       return mptcp_sk(conn);
+found:
+       rcu_read_unlock();
+       return msk;
 }
+EXPORT_SYMBOL_GPL(mptcp_token_get_sock);
+
+/**
+ * mptcp_token_iter_next - iterate over the token container from given pos
+ * @net: namespace to be iterated
+ * @s_slot: start slot number
+ * @s_num: start number inside the given lock
+ *
+ * This function returns the first mptcp connection structure found inside the
+ * token container starting from the specified position, or NULL.
+ *
+ * On successful iteration, the iterator is move to the next position and the
+ * the acquires a reference to the returned socket.
+ */
+struct mptcp_sock *mptcp_token_iter_next(const struct net *net, long *s_slot,
+                                        long *s_num)
+{
+       struct mptcp_sock *ret = NULL;
+       struct hlist_nulls_node *pos;
+       int slot, num;
+
+       for (slot = *s_slot; slot <= token_mask; *s_num = 0, slot++) {
+               struct token_bucket *bucket = &token_hash[slot];
+               struct sock *sk;
+
+               num = 0;
+
+               if (hlist_nulls_empty(&bucket->msk_chain))
+                       continue;
+
+               rcu_read_lock();
+               sk_nulls_for_each_rcu(sk, pos, &bucket->msk_chain) {
+                       ++num;
+                       if (!net_eq(sock_net(sk), net))
+                               continue;
+
+                       if (num <= *s_num)
+                               continue;
+
+                       if (!refcount_inc_not_zero(&sk->sk_refcnt))
+                               continue;
+
+                       if (!net_eq(sock_net(sk), net)) {
+                               sock_put(sk);
+                               continue;
+                       }
+
+                       ret = mptcp_sk(sk);
+                       rcu_read_unlock();
+                       goto out;
+               }
+               rcu_read_unlock();
+       }
+
+out:
+       *s_slot = slot;
+       *s_num = num;
+       return ret;
+}
+EXPORT_SYMBOL_GPL(mptcp_token_iter_next);
 
 /**
  * mptcp_token_destroy_request - remove mptcp connection/token
- * @token: token of mptcp connection to remove
+ * @req: mptcp request socket dropping the token
  *
- * Remove not-yet-fully-established incoming connection identified
- * by @token.
+ * Remove the token associated to @req.
  */
-void mptcp_token_destroy_request(u32 token)
+void mptcp_token_destroy_request(struct request_sock *req)
 {
-       spin_lock_bh(&token_tree_lock);
-       radix_tree_delete(&token_req_tree, token);
-       spin_unlock_bh(&token_tree_lock);
+       struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
+       struct mptcp_subflow_request_sock *pos;
+       struct token_bucket *bucket;
+
+       if (hlist_nulls_unhashed(&subflow_req->token_node))
+               return;
+
+       bucket = token_bucket(subflow_req->token);
+       spin_lock_bh(&bucket->lock);
+       pos = __token_lookup_req(bucket, subflow_req->token);
+       if (!WARN_ON_ONCE(pos != subflow_req)) {
+               hlist_nulls_del_init_rcu(&pos->token_node);
+               bucket->chain_len--;
+       }
+       spin_unlock_bh(&bucket->lock);
 }
 
 /**
  * mptcp_token_destroy - remove mptcp connection/token
- * @token: token of mptcp connection to remove
+ * @msk: mptcp connection dropping the token
  *
- * Remove the connection identified by @token.
+ * Remove the token associated to @msk
  */
-void mptcp_token_destroy(u32 token)
+void mptcp_token_destroy(struct mptcp_sock *msk)
 {
-       spin_lock_bh(&token_tree_lock);
-       radix_tree_delete(&token_tree, token);
-       spin_unlock_bh(&token_tree_lock);
+       struct token_bucket *bucket;
+       struct mptcp_sock *pos;
+
+       if (sk_unhashed((struct sock *)msk))
+               return;
+
+       bucket = token_bucket(msk->token);
+       spin_lock_bh(&bucket->lock);
+       pos = __token_lookup_msk(bucket, msk->token);
+       if (!WARN_ON_ONCE(pos != msk)) {
+               __sk_nulls_del_node_init_rcu((struct sock *)pos);
+               bucket->chain_len--;
+       }
+       spin_unlock_bh(&bucket->lock);
 }
+
+void __init mptcp_token_init(void)
+{
+       int i;
+
+       token_hash = alloc_large_system_hash("MPTCP token",
+                                            sizeof(struct token_bucket),
+                                            0,
+                                            20,/* one slot per 1MB of memory */
+                                            HASH_ZERO,
+                                            NULL,
+                                            &token_mask,
+                                            0,
+                                            64 * 1024);
+       for (i = 0; i < token_mask + 1; ++i) {
+               INIT_HLIST_NULLS_HEAD(&token_hash[i].req_chain, i);
+               INIT_HLIST_NULLS_HEAD(&token_hash[i].msk_chain, i);
+               spin_lock_init(&token_hash[i].lock);
+       }
+}
+
+#if IS_MODULE(CONFIG_MPTCP_KUNIT_TESTS)
+EXPORT_SYMBOL_GPL(mptcp_token_new_request);
+EXPORT_SYMBOL_GPL(mptcp_token_new_connect);
+EXPORT_SYMBOL_GPL(mptcp_token_accept);
+EXPORT_SYMBOL_GPL(mptcp_token_destroy_request);
+EXPORT_SYMBOL_GPL(mptcp_token_destroy);
+#endif
diff --git a/net/mptcp/token_test.c b/net/mptcp/token_test.c
new file mode 100644 (file)
index 0000000..e1bd6f0
--- /dev/null
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <kunit/test.h>
+
+#include "protocol.h"
+
+static struct mptcp_subflow_request_sock *build_req_sock(struct kunit *test)
+{
+       struct mptcp_subflow_request_sock *req;
+
+       req = kunit_kzalloc(test, sizeof(struct mptcp_subflow_request_sock),
+                           GFP_USER);
+       KUNIT_EXPECT_NOT_ERR_OR_NULL(test, req);
+       mptcp_token_init_request((struct request_sock *)req);
+       return req;
+}
+
+static void mptcp_token_test_req_basic(struct kunit *test)
+{
+       struct mptcp_subflow_request_sock *req = build_req_sock(test);
+       struct mptcp_sock *null_msk = NULL;
+
+       KUNIT_ASSERT_EQ(test, 0,
+                       mptcp_token_new_request((struct request_sock *)req));
+       KUNIT_EXPECT_NE(test, 0, (int)req->token);
+       KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(req->token));
+
+       /* cleanup */
+       mptcp_token_destroy_request((struct request_sock *)req);
+}
+
+static struct inet_connection_sock *build_icsk(struct kunit *test)
+{
+       struct inet_connection_sock *icsk;
+
+       icsk = kunit_kzalloc(test, sizeof(struct inet_connection_sock),
+                            GFP_USER);
+       KUNIT_EXPECT_NOT_ERR_OR_NULL(test, icsk);
+       return icsk;
+}
+
+static struct mptcp_subflow_context *build_ctx(struct kunit *test)
+{
+       struct mptcp_subflow_context *ctx;
+
+       ctx = kunit_kzalloc(test, sizeof(struct mptcp_subflow_context),
+                           GFP_USER);
+       KUNIT_EXPECT_NOT_ERR_OR_NULL(test, ctx);
+       return ctx;
+}
+
+static struct mptcp_sock *build_msk(struct kunit *test)
+{
+       struct mptcp_sock *msk;
+
+       msk = kunit_kzalloc(test, sizeof(struct mptcp_sock), GFP_USER);
+       KUNIT_EXPECT_NOT_ERR_OR_NULL(test, msk);
+       refcount_set(&((struct sock *)msk)->sk_refcnt, 1);
+       return msk;
+}
+
+static void mptcp_token_test_msk_basic(struct kunit *test)
+{
+       struct inet_connection_sock *icsk = build_icsk(test);
+       struct mptcp_subflow_context *ctx = build_ctx(test);
+       struct mptcp_sock *msk = build_msk(test);
+       struct mptcp_sock *null_msk = NULL;
+       struct sock *sk;
+
+       rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
+       ctx->conn = (struct sock *)msk;
+       sk = (struct sock *)msk;
+
+       KUNIT_ASSERT_EQ(test, 0,
+                       mptcp_token_new_connect((struct sock *)icsk));
+       KUNIT_EXPECT_NE(test, 0, (int)ctx->token);
+       KUNIT_EXPECT_EQ(test, ctx->token, msk->token);
+       KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(ctx->token));
+       KUNIT_EXPECT_EQ(test, 2, (int)refcount_read(&sk->sk_refcnt));
+
+       mptcp_token_destroy(msk);
+       KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(ctx->token));
+}
+
+static void mptcp_token_test_accept(struct kunit *test)
+{
+       struct mptcp_subflow_request_sock *req = build_req_sock(test);
+       struct mptcp_sock *msk = build_msk(test);
+
+       KUNIT_ASSERT_EQ(test, 0,
+                       mptcp_token_new_request((struct request_sock *)req));
+       msk->token = req->token;
+       mptcp_token_accept(req, msk);
+       KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(msk->token));
+
+       /* this is now a no-op */
+       mptcp_token_destroy_request((struct request_sock *)req);
+       KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(msk->token));
+
+       /* cleanup */
+       mptcp_token_destroy(msk);
+}
+
+static void mptcp_token_test_destroyed(struct kunit *test)
+{
+       struct mptcp_subflow_request_sock *req = build_req_sock(test);
+       struct mptcp_sock *msk = build_msk(test);
+       struct mptcp_sock *null_msk = NULL;
+       struct sock *sk;
+
+       sk = (struct sock *)msk;
+
+       KUNIT_ASSERT_EQ(test, 0,
+                       mptcp_token_new_request((struct request_sock *)req));
+       msk->token = req->token;
+       mptcp_token_accept(req, msk);
+
+       /* simulate race on removal */
+       refcount_set(&sk->sk_refcnt, 0);
+       KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(msk->token));
+
+       /* cleanup */
+       mptcp_token_destroy(msk);
+}
+
+static struct kunit_case mptcp_token_test_cases[] = {
+       KUNIT_CASE(mptcp_token_test_req_basic),
+       KUNIT_CASE(mptcp_token_test_msk_basic),
+       KUNIT_CASE(mptcp_token_test_accept),
+       KUNIT_CASE(mptcp_token_test_destroyed),
+       {}
+};
+
+static struct kunit_suite mptcp_token_suite = {
+       .name = "mptcp-token",
+       .test_cases = mptcp_token_test_cases,
+};
+
+kunit_test_suite(mptcp_token_suite);
+
+MODULE_LICENSE("GPL");
index a94bb59..5b1f4ec 100644 (file)
@@ -471,7 +471,7 @@ static int ncsi_rsp_handler_sma(struct ncsi_request *nr)
                memcpy(&ncf->addrs[index], cmd->mac, ETH_ALEN);
        } else {
                clear_bit(cmd->index - 1, bitmap);
-               memset(&ncf->addrs[index], 0, ETH_ALEN);
+               eth_zero_addr(&ncf->addrs[index]);
        }
        spin_unlock_irqrestore(&nc->lock, flags);
 
index 486959f..a8ce04a 100644 (file)
@@ -326,7 +326,7 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
        set->variant = &bitmap_ip;
        if (!init_map_ip(set, map, first_ip, last_ip,
                         elements, hosts, netmask)) {
-               kfree(map);
+               ip_set_free(map);
                return -ENOMEM;
        }
        if (tb[IPSET_ATTR_TIMEOUT]) {
index 2310a31..2c625e0 100644 (file)
@@ -363,7 +363,7 @@ bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
        map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
        set->variant = &bitmap_ipmac;
        if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
-               kfree(map);
+               ip_set_free(map);
                return -ENOMEM;
        }
        if (tb[IPSET_ATTR_TIMEOUT]) {
index e56ced6..7138e08 100644 (file)
@@ -274,7 +274,7 @@ bitmap_port_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
        map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long);
        set->variant = &bitmap_port;
        if (!init_map_port(set, map, first_port, last_port)) {
-               kfree(map);
+               ip_set_free(map);
                return -ENOMEM;
        }
        if (tb[IPSET_ATTR_TIMEOUT]) {
index 340cb95..56621d6 100644 (file)
@@ -460,6 +460,8 @@ ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len,
        for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
                if (!add_extension(id, cadt_flags, tb))
                        continue;
+               if (align < ip_set_extensions[id].align)
+                       align = ip_set_extensions[id].align;
                len = ALIGN(len, ip_set_extensions[id].align);
                set->offset[id] = len;
                set->extensions |= ip_set_extensions[id].type;
index 1ee4375..521e970 100644 (file)
@@ -682,7 +682,7 @@ retry:
        }
        t->hregion = ip_set_alloc(ahash_sizeof_regions(htable_bits));
        if (!t->hregion) {
-               kfree(t);
+               ip_set_free(t);
                ret = -ENOMEM;
                goto out;
        }
@@ -1533,7 +1533,7 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
        }
        t->hregion = ip_set_alloc(ahash_sizeof_regions(hbits));
        if (!t->hregion) {
-               kfree(t);
+               ip_set_free(t);
                kfree(h);
                return -ENOMEM;
        }
index 02f2f63..b3921ae 100644 (file)
@@ -807,6 +807,31 @@ static void ip_vs_conn_rcu_free(struct rcu_head *head)
        kmem_cache_free(ip_vs_conn_cachep, cp);
 }
 
+/* Try to delete connection while not holding reference */
+static void ip_vs_conn_del(struct ip_vs_conn *cp)
+{
+       if (del_timer(&cp->timer)) {
+               /* Drop cp->control chain too */
+               if (cp->control)
+                       cp->timeout = 0;
+               ip_vs_conn_expire(&cp->timer);
+       }
+}
+
+/* Try to delete connection while holding reference */
+static void ip_vs_conn_del_put(struct ip_vs_conn *cp)
+{
+       if (del_timer(&cp->timer)) {
+               /* Drop cp->control chain too */
+               if (cp->control)
+                       cp->timeout = 0;
+               __ip_vs_conn_put(cp);
+               ip_vs_conn_expire(&cp->timer);
+       } else {
+               __ip_vs_conn_put(cp);
+       }
+}
+
 static void ip_vs_conn_expire(struct timer_list *t)
 {
        struct ip_vs_conn *cp = from_timer(cp, t, timer);
@@ -827,14 +852,17 @@ static void ip_vs_conn_expire(struct timer_list *t)
 
                /* does anybody control me? */
                if (ct) {
+                       bool has_ref = !cp->timeout && __ip_vs_conn_get(ct);
+
                        ip_vs_control_del(cp);
                        /* Drop CTL or non-assured TPL if not used anymore */
-                       if (!cp->timeout && !atomic_read(&ct->n_control) &&
+                       if (has_ref && !atomic_read(&ct->n_control) &&
                            (!(ct->flags & IP_VS_CONN_F_TEMPLATE) ||
                             !(ct->state & IP_VS_CTPL_S_ASSURED))) {
                                IP_VS_DBG(4, "drop controlling connection\n");
-                               ct->timeout = 0;
-                               ip_vs_conn_expire_now(ct);
+                               ip_vs_conn_del_put(ct);
+                       } else if (has_ref) {
+                               __ip_vs_conn_put(ct);
                        }
                }
 
@@ -1317,8 +1345,7 @@ try_drop:
 
 drop:
                        IP_VS_DBG(4, "drop connection\n");
-                       cp->timeout = 0;
-                       ip_vs_conn_expire_now(cp);
+                       ip_vs_conn_del(cp);
                }
                cond_resched_rcu();
        }
@@ -1341,19 +1368,15 @@ flush_again:
                hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
                        if (cp->ipvs != ipvs)
                                continue;
-                       /* As timers are expired in LIFO order, restart
-                        * the timer of controlling connection first, so
-                        * that it is expired after us.
-                        */
+                       if (atomic_read(&cp->n_control))
+                               continue;
                        cp_c = cp->control;
-                       /* cp->control is valid only with reference to cp */
-                       if (cp_c && __ip_vs_conn_get(cp)) {
+                       IP_VS_DBG(4, "del connection\n");
+                       ip_vs_conn_del(cp);
+                       if (cp_c && !atomic_read(&cp_c->n_control)) {
                                IP_VS_DBG(4, "del controlling connection\n");
-                               ip_vs_conn_expire_now(cp_c);
-                               __ip_vs_conn_put(cp);
+                               ip_vs_conn_del(cp_c);
                        }
-                       IP_VS_DBG(4, "del connection\n");
-                       ip_vs_conn_expire_now(cp);
                }
                cond_resched_rcu();
        }
index aa6a603..b4a6b76 100644 (file)
@@ -2066,14 +2066,14 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
 
        conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
        if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
-               bool uses_ct = false, resched = false;
+               bool old_ct = false, resched = false;
 
                if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
                    unlikely(!atomic_read(&cp->dest->weight))) {
                        resched = true;
-                       uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
+                       old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
                } else if (is_new_conn_expected(cp, conn_reuse_mode)) {
-                       uses_ct = ip_vs_conn_uses_conntrack(cp, skb);
+                       old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
                        if (!atomic_read(&cp->n_control)) {
                                resched = true;
                        } else {
@@ -2081,15 +2081,17 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
                                 * that uses conntrack while it is still
                                 * referenced by controlled connection(s).
                                 */
-                               resched = !uses_ct;
+                               resched = !old_ct;
                        }
                }
 
                if (resched) {
+                       if (!old_ct)
+                               cp->flags &= ~IP_VS_CONN_F_NFCT;
                        if (!atomic_read(&cp->n_control))
                                ip_vs_conn_expire_now(cp);
                        __ip_vs_conn_put(cp);
-                       if (uses_ct)
+                       if (old_ct)
                                return NF_DROP;
                        cp = NULL;
                }
@@ -2256,7 +2258,7 @@ ip_vs_forward_icmp_v6(void *priv, struct sk_buff *skb,
 #endif
 
 
-static const struct nf_hook_ops ip_vs_ops[] = {
+static const struct nf_hook_ops ip_vs_ops4[] = {
        /* After packet filtering, change source only for VS/NAT */
        {
                .hook           = ip_vs_reply4,
@@ -2302,7 +2304,10 @@ static const struct nf_hook_ops ip_vs_ops[] = {
                .hooknum        = NF_INET_FORWARD,
                .priority       = 100,
        },
+};
+
 #ifdef CONFIG_IP_VS_IPV6
+static const struct nf_hook_ops ip_vs_ops6[] = {
        /* After packet filtering, change source only for VS/NAT */
        {
                .hook           = ip_vs_reply6,
@@ -2348,8 +2353,64 @@ static const struct nf_hook_ops ip_vs_ops[] = {
                .hooknum        = NF_INET_FORWARD,
                .priority       = 100,
        },
-#endif
 };
+#endif
+
+int ip_vs_register_hooks(struct netns_ipvs *ipvs, unsigned int af)
+{
+       const struct nf_hook_ops *ops;
+       unsigned int count;
+       unsigned int afmask;
+       int ret = 0;
+
+       if (af == AF_INET6) {
+#ifdef CONFIG_IP_VS_IPV6
+               ops = ip_vs_ops6;
+               count = ARRAY_SIZE(ip_vs_ops6);
+               afmask = 2;
+#else
+               return -EINVAL;
+#endif
+       } else {
+               ops = ip_vs_ops4;
+               count = ARRAY_SIZE(ip_vs_ops4);
+               afmask = 1;
+       }
+
+       if (!(ipvs->hooks_afmask & afmask)) {
+               ret = nf_register_net_hooks(ipvs->net, ops, count);
+               if (ret >= 0)
+                       ipvs->hooks_afmask |= afmask;
+       }
+       return ret;
+}
+
+void ip_vs_unregister_hooks(struct netns_ipvs *ipvs, unsigned int af)
+{
+       const struct nf_hook_ops *ops;
+       unsigned int count;
+       unsigned int afmask;
+
+       if (af == AF_INET6) {
+#ifdef CONFIG_IP_VS_IPV6
+               ops = ip_vs_ops6;
+               count = ARRAY_SIZE(ip_vs_ops6);
+               afmask = 2;
+#else
+               return;
+#endif
+       } else {
+               ops = ip_vs_ops4;
+               count = ARRAY_SIZE(ip_vs_ops4);
+               afmask = 1;
+       }
+
+       if (ipvs->hooks_afmask & afmask) {
+               nf_unregister_net_hooks(ipvs->net, ops, count);
+               ipvs->hooks_afmask &= ~afmask;
+       }
+}
+
 /*
  *     Initialize IP Virtual Server netns mem.
  */
@@ -2425,19 +2486,6 @@ static void __net_exit __ip_vs_cleanup_batch(struct list_head *net_list)
        }
 }
 
-static int __net_init __ip_vs_dev_init(struct net *net)
-{
-       int ret;
-
-       ret = nf_register_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
-       if (ret < 0)
-               goto hook_fail;
-       return 0;
-
-hook_fail:
-       return ret;
-}
-
 static void __net_exit __ip_vs_dev_cleanup_batch(struct list_head *net_list)
 {
        struct netns_ipvs *ipvs;
@@ -2446,7 +2494,8 @@ static void __net_exit __ip_vs_dev_cleanup_batch(struct list_head *net_list)
        EnterFunction(2);
        list_for_each_entry(net, net_list, exit_list) {
                ipvs = net_ipvs(net);
-               nf_unregister_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
+               ip_vs_unregister_hooks(ipvs, AF_INET);
+               ip_vs_unregister_hooks(ipvs, AF_INET6);
                ipvs->enable = 0;       /* Disable packet reception */
                smp_wmb();
                ip_vs_sync_net_cleanup(ipvs);
@@ -2462,7 +2511,6 @@ static struct pernet_operations ipvs_core_ops = {
 };
 
 static struct pernet_operations ipvs_core_dev_ops = {
-       .init = __ip_vs_dev_init,
        .exit_batch = __ip_vs_dev_cleanup_batch,
 };
 
index 412656c..bcac316 100644 (file)
@@ -224,7 +224,8 @@ static void defense_work_handler(struct work_struct *work)
        update_defense_level(ipvs);
        if (atomic_read(&ipvs->dropentry))
                ip_vs_random_dropentry(ipvs);
-       schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD);
+       queue_delayed_work(system_long_wq, &ipvs->defense_work,
+                          DEFENSE_TIMER_PERIOD);
 }
 #endif
 
@@ -1272,6 +1273,7 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
        struct ip_vs_scheduler *sched = NULL;
        struct ip_vs_pe *pe = NULL;
        struct ip_vs_service *svc = NULL;
+       int ret_hooks = -1;
 
        /* increase the module use count */
        if (!ip_vs_use_count_inc())
@@ -1313,6 +1315,14 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
        }
 #endif
 
+       if ((u->af == AF_INET && !ipvs->num_services) ||
+           (u->af == AF_INET6 && !ipvs->num_services6)) {
+               ret = ip_vs_register_hooks(ipvs, u->af);
+               if (ret < 0)
+                       goto out_err;
+               ret_hooks = ret;
+       }
+
        svc = kzalloc(sizeof(struct ip_vs_service), GFP_KERNEL);
        if (svc == NULL) {
                IP_VS_DBG(1, "%s(): no memory\n", __func__);
@@ -1374,6 +1384,8 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
        /* Count only IPv4 services for old get/setsockopt interface */
        if (svc->af == AF_INET)
                ipvs->num_services++;
+       else if (svc->af == AF_INET6)
+               ipvs->num_services6++;
 
        /* Hash the service into the service table */
        ip_vs_svc_hash(svc);
@@ -1385,6 +1397,8 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
 
 
  out_err:
+       if (ret_hooks >= 0)
+               ip_vs_unregister_hooks(ipvs, u->af);
        if (svc != NULL) {
                ip_vs_unbind_scheduler(svc, sched);
                ip_vs_service_free(svc);
@@ -1500,9 +1514,15 @@ static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup)
        struct ip_vs_pe *old_pe;
        struct netns_ipvs *ipvs = svc->ipvs;
 
-       /* Count only IPv4 services for old get/setsockopt interface */
-       if (svc->af == AF_INET)
+       if (svc->af == AF_INET) {
                ipvs->num_services--;
+               if (!ipvs->num_services)
+                       ip_vs_unregister_hooks(ipvs, svc->af);
+       } else if (svc->af == AF_INET6) {
+               ipvs->num_services6--;
+               if (!ipvs->num_services6)
+                       ip_vs_unregister_hooks(ipvs, svc->af);
+       }
 
        ip_vs_stop_estimator(svc->ipvs, &svc->stats);
 
@@ -2414,7 +2434,7 @@ static void ip_vs_copy_udest_compat(struct ip_vs_dest_user_kern *udest,
 }
 
 static int
-do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
+do_ip_vs_set_ctl(struct sock *sk, int cmd, sockptr_t ptr, unsigned int len)
 {
        struct net *net = sock_net(sk);
        int ret;
@@ -2438,7 +2458,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
                return -EINVAL;
        }
 
-       if (copy_from_user(arg, user, len) != 0)
+       if (copy_from_sockptr(arg, ptr, len) != 0)
                return -EFAULT;
 
        /* Handle daemons since they have another lock */
@@ -4063,7 +4083,8 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs)
        ipvs->sysctl_tbl = tbl;
        /* Schedule defense work */
        INIT_DELAYED_WORK(&ipvs->defense_work, defense_work_handler);
-       schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD);
+       queue_delayed_work(system_long_wq, &ipvs->defense_work,
+                          DEFENSE_TIMER_PERIOD);
 
        return 0;
 }
index 79cd9dd..358108f 100644 (file)
@@ -1006,7 +1006,7 @@ static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
  *
  * @skb: skb that causes the clash
  * @h: tuplehash of the clashing entry already in table
- * @hash_reply: hash slot for reply direction
+ * @reply_hash: hash slot for reply direction
  *
  * A conntrack entry can be inserted to the connection tracking table
  * if there is no existing entry with an identical tuple.
@@ -2158,6 +2158,8 @@ static int nf_conntrack_update(struct net *net, struct sk_buff *skb)
                err = __nf_conntrack_update(net, skb, ct, ctinfo);
                if (err < 0)
                        return err;
+
+               ct = nf_ct_get(skb, &ctinfo);
        }
 
        return nf_confirm_cthelper(skb, ct, ctinfo);
index d7bd8b1..832eabe 100644 (file)
@@ -939,7 +939,8 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
                        filter->mark.mask = 0xffffffff;
                }
        } else if (cda[CTA_MARK_MASK]) {
-               return ERR_PTR(-EINVAL);
+               err = -EINVAL;
+               goto err_filter;
        }
 #endif
        if (!cda[CTA_FILTER])
@@ -947,15 +948,17 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
 
        err = ctnetlink_parse_zone(cda[CTA_ZONE], &filter->zone);
        if (err < 0)
-               return ERR_PTR(err);
+               goto err_filter;
 
        err = ctnetlink_parse_filter(cda[CTA_FILTER], filter);
        if (err < 0)
-               return ERR_PTR(err);
+               goto err_filter;
 
        if (filter->orig_flags) {
-               if (!cda[CTA_TUPLE_ORIG])
-                       return ERR_PTR(-EINVAL);
+               if (!cda[CTA_TUPLE_ORIG]) {
+                       err = -EINVAL;
+                       goto err_filter;
+               }
 
                err = ctnetlink_parse_tuple_filter(cda, &filter->orig,
                                                   CTA_TUPLE_ORIG,
@@ -963,23 +966,32 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
                                                   &filter->zone,
                                                   filter->orig_flags);
                if (err < 0)
-                       return ERR_PTR(err);
+                       goto err_filter;
        }
 
        if (filter->reply_flags) {
-               if (!cda[CTA_TUPLE_REPLY])
-                       return ERR_PTR(-EINVAL);
+               if (!cda[CTA_TUPLE_REPLY]) {
+                       err = -EINVAL;
+                       goto err_filter;
+               }
 
                err = ctnetlink_parse_tuple_filter(cda, &filter->reply,
                                                   CTA_TUPLE_REPLY,
                                                   filter->family,
                                                   &filter->zone,
                                                   filter->orig_flags);
-               if (err < 0)
-                       return ERR_PTR(err);
+               if (err < 0) {
+                       err = -EINVAL;
+                       goto err_filter;
+               }
        }
 
        return filter;
+
+err_filter:
+       kfree(filter);
+
+       return ERR_PTR(err);
 }
 
 static bool ctnetlink_needs_filter(u8 family, const struct nlattr * const *cda)
index f108a76..2b01a15 100644 (file)
@@ -73,3 +73,4 @@ EXPORT_SYMBOL_GPL(nft_fwd_dup_netdev_offload);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("Netfilter packet duplication support");
index 6a3034f..b1eb527 100644 (file)
@@ -387,51 +387,6 @@ static void nf_flow_offload_work_gc(struct work_struct *work)
        queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
 }
 
-int nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
-                                flow_setup_cb_t *cb, void *cb_priv)
-{
-       struct flow_block *block = &flow_table->flow_block;
-       struct flow_block_cb *block_cb;
-       int err = 0;
-
-       down_write(&flow_table->flow_block_lock);
-       block_cb = flow_block_cb_lookup(block, cb, cb_priv);
-       if (block_cb) {
-               err = -EEXIST;
-               goto unlock;
-       }
-
-       block_cb = flow_block_cb_alloc(cb, cb_priv, cb_priv, NULL);
-       if (IS_ERR(block_cb)) {
-               err = PTR_ERR(block_cb);
-               goto unlock;
-       }
-
-       list_add_tail(&block_cb->list, &block->cb_list);
-
-unlock:
-       up_write(&flow_table->flow_block_lock);
-       return err;
-}
-EXPORT_SYMBOL_GPL(nf_flow_table_offload_add_cb);
-
-void nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
-                                 flow_setup_cb_t *cb, void *cb_priv)
-{
-       struct flow_block *block = &flow_table->flow_block;
-       struct flow_block_cb *block_cb;
-
-       down_write(&flow_table->flow_block_lock);
-       block_cb = flow_block_cb_lookup(block, cb, cb_priv);
-       if (block_cb) {
-               list_del(&block_cb->list);
-               flow_block_cb_free(block_cb);
-       } else {
-               WARN_ON(true);
-       }
-       up_write(&flow_table->flow_block_lock);
-}
-EXPORT_SYMBOL_GPL(nf_flow_table_offload_del_cb);
 
 static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
                                __be16 port, __be16 new_port)
@@ -639,3 +594,4 @@ module_exit(nf_flow_table_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("Netfilter flow table module");
index 88bedf1..bc4126d 100644 (file)
@@ -72,3 +72,4 @@ module_exit(nf_flow_inet_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NF_FLOWTABLE(1); /* NFPROTO_INET */
+MODULE_DESCRIPTION("Netfilter flow table mixed IPv4/IPv6 module");
index 62651e6..2a6993f 100644 (file)
@@ -950,6 +950,7 @@ static void nf_flow_table_indr_cleanup(struct flow_block_cb *block_cb)
        nf_flow_table_gc_cleanup(flowtable, dev);
        down_write(&flowtable->flow_block_lock);
        list_del(&block_cb->list);
+       list_del(&block_cb->driver_list);
        flow_block_cb_free(block_cb);
        up_write(&flowtable->flow_block_lock);
 }
@@ -963,7 +964,7 @@ static int nf_flow_table_indr_offload_cmd(struct flow_block_offload *bo,
        nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable,
                                         extack);
 
-       return flow_indr_dev_setup_offload(dev, TC_SETUP_FT, flowtable, bo,
+       return flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_FT, flowtable, bo,
                                           nf_flow_table_indr_cleanup);
 }
 
index 46cb378..34afcd0 100644 (file)
@@ -89,78 +89,32 @@ out:
        return ops;
 }
 
-/* Call get/setsockopt() */
-static int nf_sockopt(struct sock *sk, u_int8_t pf, int val,
-                     char __user *opt, int *len, int get)
+int nf_setsockopt(struct sock *sk, u_int8_t pf, int val, sockptr_t opt,
+                 unsigned int len)
 {
        struct nf_sockopt_ops *ops;
        int ret;
 
-       ops = nf_sockopt_find(sk, pf, val, get);
+       ops = nf_sockopt_find(sk, pf, val, 0);
        if (IS_ERR(ops))
                return PTR_ERR(ops);
-
-       if (get)
-               ret = ops->get(sk, val, opt, len);
-       else
-               ret = ops->set(sk, val, opt, *len);
-
+       ret = ops->set(sk, val, opt, len);
        module_put(ops->owner);
        return ret;
 }
-
-int nf_setsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt,
-                 unsigned int len)
-{
-       return nf_sockopt(sk, pf, val, opt, &len, 0);
-}
 EXPORT_SYMBOL(nf_setsockopt);
 
 int nf_getsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt,
                  int *len)
 {
-       return nf_sockopt(sk, pf, val, opt, len, 1);
-}
-EXPORT_SYMBOL(nf_getsockopt);
-
-#ifdef CONFIG_COMPAT
-static int compat_nf_sockopt(struct sock *sk, u_int8_t pf, int val,
-                            char __user *opt, int *len, int get)
-{
        struct nf_sockopt_ops *ops;
        int ret;
 
-       ops = nf_sockopt_find(sk, pf, val, get);
+       ops = nf_sockopt_find(sk, pf, val, 1);
        if (IS_ERR(ops))
                return PTR_ERR(ops);
-
-       if (get) {
-               if (ops->compat_get)
-                       ret = ops->compat_get(sk, val, opt, len);
-               else
-                       ret = ops->get(sk, val, opt, len);
-       } else {
-               if (ops->compat_set)
-                       ret = ops->compat_set(sk, val, opt, *len);
-               else
-                       ret = ops->set(sk, val, opt, *len);
-       }
-
+       ret = ops->get(sk, val, opt, len);
        module_put(ops->owner);
        return ret;
 }
-
-int compat_nf_setsockopt(struct sock *sk, u_int8_t pf,
-               int val, char __user *opt, unsigned int len)
-{
-       return compat_nf_sockopt(sk, pf, val, opt, &len, 0);
-}
-EXPORT_SYMBOL(compat_nf_setsockopt);
-
-int compat_nf_getsockopt(struct sock *sk, u_int8_t pf,
-               int val, char __user *opt, int *len)
-{
-       return compat_nf_sockopt(sk, pf, val, opt, len, 1);
-}
-EXPORT_SYMBOL(compat_nf_getsockopt);
-#endif
+EXPORT_SYMBOL(nf_getsockopt);
index b9cbe1e..ebcdc8e 100644 (file)
@@ -1237,3 +1237,4 @@ EXPORT_SYMBOL_GPL(nf_synproxy_ipv6_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("nftables SYNPROXY expression support");
index 073aa10..6708a4f 100644 (file)
@@ -280,9 +280,15 @@ static struct nft_trans *nft_trans_chain_add(struct nft_ctx *ctx, int msg_type)
        if (trans == NULL)
                return ERR_PTR(-ENOMEM);
 
-       if (msg_type == NFT_MSG_NEWCHAIN)
+       if (msg_type == NFT_MSG_NEWCHAIN) {
                nft_activate_next(ctx->net, ctx->chain);
 
+               if (ctx->nla[NFTA_CHAIN_ID]) {
+                       nft_trans_chain_id(trans) =
+                               ntohl(nla_get_be32(ctx->nla[NFTA_CHAIN_ID]));
+               }
+       }
+
        list_add_tail(&trans->list, &ctx->net->nft.commit_list);
        return trans;
 }
@@ -1050,6 +1056,9 @@ static int nft_flush_table(struct nft_ctx *ctx)
                if (!nft_is_active_next(ctx->net, chain))
                        continue;
 
+               if (nft_chain_is_bound(chain))
+                       continue;
+
                ctx->chain = chain;
 
                err = nft_delrule_by_chain(ctx);
@@ -1092,6 +1101,9 @@ static int nft_flush_table(struct nft_ctx *ctx)
                if (!nft_is_active_next(ctx->net, chain))
                        continue;
 
+               if (nft_chain_is_bound(chain))
+                       continue;
+
                ctx->chain = chain;
 
                err = nft_delchain(ctx);
@@ -1274,6 +1286,7 @@ static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = {
                                    .len = NFT_MODULE_AUTOLOAD_LIMIT },
        [NFTA_CHAIN_COUNTERS]   = { .type = NLA_NESTED },
        [NFTA_CHAIN_FLAGS]      = { .type = NLA_U32 },
+       [NFTA_CHAIN_ID]         = { .type = NLA_U32 },
 };
 
 static const struct nla_policy nft_hook_policy[NFTA_HOOK_MAX + 1] = {
@@ -1406,13 +1419,12 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
                                              lockdep_commit_lock_is_held(net));
                if (nft_dump_stats(skb, stats))
                        goto nla_put_failure;
-
-               if ((chain->flags & NFT_CHAIN_HW_OFFLOAD) &&
-                   nla_put_be32(skb, NFTA_CHAIN_FLAGS,
-                                htonl(NFT_CHAIN_HW_OFFLOAD)))
-                       goto nla_put_failure;
        }
 
+       if (chain->flags &&
+           nla_put_be32(skb, NFTA_CHAIN_FLAGS, htonl(chain->flags)))
+               goto nla_put_failure;
+
        if (nla_put_be32(skb, NFTA_CHAIN_USE, htonl(chain->use)))
                goto nla_put_failure;
 
@@ -1614,7 +1626,7 @@ static void nf_tables_chain_free_chain_rules(struct nft_chain *chain)
        kvfree(chain->rules_next);
 }
 
-static void nf_tables_chain_destroy(struct nft_ctx *ctx)
+void nf_tables_chain_destroy(struct nft_ctx *ctx)
 {
        struct nft_chain *chain = ctx->chain;
        struct nft_hook *hook, *next;
@@ -1896,7 +1908,7 @@ static int nft_basechain_init(struct nft_base_chain *basechain, u8 family,
                nft_basechain_hook_init(&basechain->ops, family, hook, chain);
        }
 
-       chain->flags |= NFT_BASE_CHAIN | flags;
+       chain->flags |= NFT_CHAIN_BASE | flags;
        basechain->policy = NF_ACCEPT;
        if (chain->flags & NFT_CHAIN_HW_OFFLOAD &&
            nft_chain_offload_priority(basechain) < 0)
@@ -1907,6 +1919,22 @@ static int nft_basechain_init(struct nft_base_chain *basechain, u8 family,
        return 0;
 }
 
+static int nft_chain_add(struct nft_table *table, struct nft_chain *chain)
+{
+       int err;
+
+       err = rhltable_insert_key(&table->chains_ht, chain->name,
+                                 &chain->rhlhead, nft_chain_ht_params);
+       if (err)
+               return err;
+
+       list_add_tail_rcu(&chain->list, &table->chains);
+
+       return 0;
+}
+
+static u64 chain_id;
+
 static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
                              u8 policy, u32 flags)
 {
@@ -1915,6 +1943,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
        struct nft_base_chain *basechain;
        struct nft_stats __percpu *stats;
        struct net *net = ctx->net;
+       char name[NFT_NAME_MAXLEN];
        struct nft_trans *trans;
        struct nft_chain *chain;
        struct nft_rule **rules;
@@ -1926,6 +1955,9 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
        if (nla[NFTA_CHAIN_HOOK]) {
                struct nft_chain_hook hook;
 
+               if (flags & NFT_CHAIN_BINDING)
+                       return -EOPNOTSUPP;
+
                err = nft_chain_parse_hook(net, nla, &hook, family, true);
                if (err < 0)
                        return err;
@@ -1955,16 +1987,33 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
                        return err;
                }
        } else {
+               if (flags & NFT_CHAIN_BASE)
+                       return -EINVAL;
+               if (flags & NFT_CHAIN_HW_OFFLOAD)
+                       return -EOPNOTSUPP;
+
                chain = kzalloc(sizeof(*chain), GFP_KERNEL);
                if (chain == NULL)
                        return -ENOMEM;
+
+               chain->flags = flags;
        }
        ctx->chain = chain;
 
        INIT_LIST_HEAD(&chain->rules);
        chain->handle = nf_tables_alloc_handle(table);
        chain->table = table;
-       chain->name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL);
+
+       if (nla[NFTA_CHAIN_NAME]) {
+               chain->name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL);
+       } else {
+               if (!(flags & NFT_CHAIN_BINDING))
+                       return -EINVAL;
+
+               snprintf(name, sizeof(name), "__chain%llu", ++chain_id);
+               chain->name = kstrdup(name, GFP_KERNEL);
+       }
+
        if (!chain->name) {
                err = -ENOMEM;
                goto err1;
@@ -1984,16 +2033,9 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
        if (err < 0)
                goto err1;
 
-       err = rhltable_insert_key(&table->chains_ht, chain->name,
-                                 &chain->rhlhead, nft_chain_ht_params);
-       if (err)
-               goto err2;
-
        trans = nft_trans_chain_add(ctx, NFT_MSG_NEWCHAIN);
        if (IS_ERR(trans)) {
                err = PTR_ERR(trans);
-               rhltable_remove(&table->chains_ht, &chain->rhlhead,
-                               nft_chain_ht_params);
                goto err2;
        }
 
@@ -2001,8 +2043,13 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
        if (nft_is_base_chain(chain))
                nft_trans_chain_policy(trans) = policy;
 
+       err = nft_chain_add(table, chain);
+       if (err < 0) {
+               nft_trans_destroy(trans);
+               goto err2;
+       }
+
        table->use++;
-       list_add_tail_rcu(&chain->list, &table->chains);
 
        return 0;
 err2:
@@ -2146,6 +2193,22 @@ err:
        return err;
 }
 
+static struct nft_chain *nft_chain_lookup_byid(const struct net *net,
+                                              const struct nlattr *nla)
+{
+       u32 id = ntohl(nla_get_be32(nla));
+       struct nft_trans *trans;
+
+       list_for_each_entry(trans, &net->nft.commit_list, list) {
+               struct nft_chain *chain = trans->ctx.chain;
+
+               if (trans->msg_type == NFT_MSG_NEWCHAIN &&
+                   id == nft_trans_chain_id(trans))
+                       return chain;
+       }
+       return ERR_PTR(-ENOENT);
+}
+
 static int nf_tables_newchain(struct net *net, struct sock *nlsk,
                              struct sk_buff *skb, const struct nlmsghdr *nlh,
                              const struct nlattr * const nla[],
@@ -2154,9 +2217,9 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
        const struct nfgenmsg *nfmsg = nlmsg_data(nlh);
        u8 genmask = nft_genmask_next(net);
        int family = nfmsg->nfgen_family;
+       struct nft_chain *chain = NULL;
        const struct nlattr *attr;
        struct nft_table *table;
-       struct nft_chain *chain;
        u8 policy = NF_ACCEPT;
        struct nft_ctx ctx;
        u64 handle = 0;
@@ -2181,7 +2244,7 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
                        return PTR_ERR(chain);
                }
                attr = nla[NFTA_CHAIN_HANDLE];
-       } else {
+       } else if (nla[NFTA_CHAIN_NAME]) {
                chain = nft_chain_lookup(net, table, attr, genmask);
                if (IS_ERR(chain)) {
                        if (PTR_ERR(chain) != -ENOENT) {
@@ -2190,6 +2253,8 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
                        }
                        chain = NULL;
                }
+       } else if (!nla[NFTA_CHAIN_ID]) {
+               return -EINVAL;
        }
 
        if (nla[NFTA_CHAIN_POLICY]) {
@@ -2220,6 +2285,9 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
        else if (chain)
                flags = chain->flags;
 
+       if (flags & ~NFT_CHAIN_FLAGS)
+               return -EOPNOTSUPP;
+
        nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla);
 
        if (chain != NULL) {
@@ -2230,7 +2298,7 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
                if (nlh->nlmsg_flags & NLM_F_REPLACE)
                        return -EOPNOTSUPP;
 
-               flags |= chain->flags & NFT_BASE_CHAIN;
+               flags |= chain->flags & NFT_CHAIN_BASE;
                return nf_tables_updchain(&ctx, genmask, policy, flags);
        }
 
@@ -2307,7 +2375,7 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk,
 
 /**
  *     nft_register_expr - register nf_tables expr type
- *     @ops: expr type
+ *     @type: expr type
  *
  *     Registers the expr type for use with nf_tables. Returns zero on
  *     success or a negative errno code otherwise.
@@ -2326,7 +2394,7 @@ EXPORT_SYMBOL_GPL(nft_register_expr);
 
 /**
  *     nft_unregister_expr - unregister nf_tables expr type
- *     @ops: expr type
+ *     @type: expr type
  *
  *     Unregisters the expr typefor use with nf_tables.
  */
@@ -2624,6 +2692,7 @@ static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = {
                                    .len = NFT_USERDATA_MAXLEN },
        [NFTA_RULE_ID]          = { .type = NLA_U32 },
        [NFTA_RULE_POSITION_ID] = { .type = NLA_U32 },
+       [NFTA_RULE_CHAIN_ID]    = { .type = NLA_U32 },
 };
 
 static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
@@ -2938,8 +3007,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
        kfree(rule);
 }
 
-static void nf_tables_rule_release(const struct nft_ctx *ctx,
-                                  struct nft_rule *rule)
+void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *rule)
 {
        nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE);
        nf_tables_rule_destroy(ctx, rule);
@@ -3030,10 +3098,24 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
                return PTR_ERR(table);
        }
 
-       chain = nft_chain_lookup(net, table, nla[NFTA_RULE_CHAIN], genmask);
-       if (IS_ERR(chain)) {
-               NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
-               return PTR_ERR(chain);
+       if (nla[NFTA_RULE_CHAIN]) {
+               chain = nft_chain_lookup(net, table, nla[NFTA_RULE_CHAIN],
+                                        genmask);
+               if (IS_ERR(chain)) {
+                       NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
+                       return PTR_ERR(chain);
+               }
+               if (nft_chain_is_bound(chain))
+                       return -EOPNOTSUPP;
+
+       } else if (nla[NFTA_RULE_CHAIN_ID]) {
+               chain = nft_chain_lookup_byid(net, nla[NFTA_RULE_CHAIN_ID]);
+               if (IS_ERR(chain)) {
+                       NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN_ID]);
+                       return PTR_ERR(chain);
+               }
+       } else {
+               return -EINVAL;
        }
 
        if (nla[NFTA_RULE_HANDLE]) {
@@ -3245,6 +3327,8 @@ static int nf_tables_delrule(struct net *net, struct sock *nlsk,
                        NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]);
                        return PTR_ERR(chain);
                }
+               if (nft_chain_is_bound(chain))
+                       return -EOPNOTSUPP;
        }
 
        nft_ctx_init(&ctx, net, skb, nlh, family, table, chain, nla);
@@ -5281,11 +5365,24 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
  */
 void nft_data_hold(const struct nft_data *data, enum nft_data_types type)
 {
+       struct nft_chain *chain;
+       struct nft_rule *rule;
+
        if (type == NFT_DATA_VERDICT) {
                switch (data->verdict.code) {
                case NFT_JUMP:
                case NFT_GOTO:
-                       data->verdict.chain->use++;
+                       chain = data->verdict.chain;
+                       chain->use++;
+
+                       if (!nft_chain_is_bound(chain))
+                               break;
+
+                       chain->table->use++;
+                       list_for_each_entry(rule, &chain->rules, list)
+                               chain->use++;
+
+                       nft_chain_add(chain->table, chain);
                        break;
                }
        }
@@ -5498,7 +5595,7 @@ struct nft_set_gc_batch *nft_set_gc_batch_alloc(const struct nft_set *set,
 
 /**
  *     nft_register_obj- register nf_tables stateful object type
- *     @obj: object type
+ *     @obj_type: object type
  *
  *     Registers the object type for use with nf_tables. Returns zero on
  *     success or a negative errno code otherwise.
@@ -5517,7 +5614,7 @@ EXPORT_SYMBOL_GPL(nft_register_obj);
 
 /**
  *     nft_unregister_obj - unregister nf_tables object type
- *     @obj: object type
+ *     @obj_type: object type
  *
  *     Unregisters the object type for use with nf_tables.
  */
@@ -6550,12 +6647,22 @@ err1:
        return err;
 }
 
+static void nft_flowtable_hook_release(struct nft_flowtable_hook *flowtable_hook)
+{
+       struct nft_hook *this, *next;
+
+       list_for_each_entry_safe(this, next, &flowtable_hook->list, list) {
+               list_del(&this->list);
+               kfree(this);
+       }
+}
+
 static int nft_delflowtable_hook(struct nft_ctx *ctx,
                                 struct nft_flowtable *flowtable)
 {
        const struct nlattr * const *nla = ctx->nla;
        struct nft_flowtable_hook flowtable_hook;
-       struct nft_hook *this, *next, *hook;
+       struct nft_hook *this, *hook;
        struct nft_trans *trans;
        int err;
 
@@ -6564,33 +6671,40 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx,
        if (err < 0)
                return err;
 
-       list_for_each_entry_safe(this, next, &flowtable_hook.list, list) {
+       list_for_each_entry(this, &flowtable_hook.list, list) {
                hook = nft_hook_list_find(&flowtable->hook_list, this);
                if (!hook) {
                        err = -ENOENT;
                        goto err_flowtable_del_hook;
                }
                hook->inactive = true;
-               list_del(&this->list);
-               kfree(this);
        }
 
        trans = nft_trans_alloc(ctx, NFT_MSG_DELFLOWTABLE,
                                sizeof(struct nft_trans_flowtable));
-       if (!trans)
-               return -ENOMEM;
+       if (!trans) {
+               err = -ENOMEM;
+               goto err_flowtable_del_hook;
+       }
 
        nft_trans_flowtable(trans) = flowtable;
        nft_trans_flowtable_update(trans) = true;
        INIT_LIST_HEAD(&nft_trans_flowtable_hooks(trans));
+       nft_flowtable_hook_release(&flowtable_hook);
 
        list_add_tail(&trans->list, &ctx->net->nft.commit_list);
 
        return 0;
 
 err_flowtable_del_hook:
-       list_for_each_entry(hook, &flowtable_hook.list, list)
+       list_for_each_entry(this, &flowtable_hook.list, list) {
+               hook = nft_hook_list_find(&flowtable->hook_list, this);
+               if (!hook)
+                       break;
+
                hook->inactive = false;
+       }
+       nft_flowtable_hook_release(&flowtable_hook);
 
        return err;
 }
@@ -7408,7 +7522,7 @@ static void nft_obj_del(struct nft_object *obj)
        list_del_rcu(&obj->list);
 }
 
-static void nft_chain_del(struct nft_chain *chain)
+void nft_chain_del(struct nft_chain *chain)
 {
        struct nft_table *table = chain->table;
 
@@ -7759,6 +7873,10 @@ static int __nf_tables_abort(struct net *net, bool autoload)
                                kfree(nft_trans_chain_name(trans));
                                nft_trans_destroy(trans);
                        } else {
+                               if (nft_chain_is_bound(trans->ctx.chain)) {
+                                       nft_trans_destroy(trans);
+                                       break;
+                               }
                                trans->ctx.table->use--;
                                nft_chain_del(trans->ctx.chain);
                                nf_tables_unregister_hook(trans->ctx.net,
@@ -8188,6 +8306,7 @@ static const struct nla_policy nft_verdict_policy[NFTA_VERDICT_MAX + 1] = {
        [NFTA_VERDICT_CODE]     = { .type = NLA_U32 },
        [NFTA_VERDICT_CHAIN]    = { .type = NLA_STRING,
                                    .len = NFT_CHAIN_MAXNAMELEN - 1 },
+       [NFTA_VERDICT_CHAIN_ID] = { .type = NLA_U32 },
 };
 
 static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
@@ -8224,10 +8343,19 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
                break;
        case NFT_JUMP:
        case NFT_GOTO:
-               if (!tb[NFTA_VERDICT_CHAIN])
+               if (tb[NFTA_VERDICT_CHAIN]) {
+                       chain = nft_chain_lookup(ctx->net, ctx->table,
+                                                tb[NFTA_VERDICT_CHAIN],
+                                                genmask);
+               } else if (tb[NFTA_VERDICT_CHAIN_ID]) {
+                       chain = nft_chain_lookup_byid(ctx->net,
+                                                     tb[NFTA_VERDICT_CHAIN_ID]);
+                       if (IS_ERR(chain))
+                               return PTR_ERR(chain);
+               } else {
                        return -EINVAL;
-               chain = nft_chain_lookup(ctx->net, ctx->table,
-                                        tb[NFTA_VERDICT_CHAIN], genmask);
+               }
+
                if (IS_ERR(chain))
                        return PTR_ERR(chain);
                if (nft_is_base_chain(chain))
@@ -8245,10 +8373,23 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
 
 static void nft_verdict_uninit(const struct nft_data *data)
 {
+       struct nft_chain *chain;
+       struct nft_rule *rule;
+
        switch (data->verdict.code) {
        case NFT_JUMP:
        case NFT_GOTO:
-               data->verdict.chain->use--;
+               chain = data->verdict.chain;
+               chain->use--;
+
+               if (!nft_chain_is_bound(chain))
+                       break;
+
+               chain->table->use--;
+               list_for_each_entry(rule, &chain->rules, list)
+                       chain->use--;
+
+               nft_chain_del(chain);
                break;
        }
 }
index 185fc82..9ef37c1 100644 (file)
@@ -296,6 +296,7 @@ static void nft_indr_block_cleanup(struct flow_block_cb *block_cb)
        nft_flow_block_offload_init(&bo, dev_net(dev), FLOW_BLOCK_UNBIND,
                                    basechain, &extack);
        mutex_lock(&net->nft.commit_mutex);
+       list_del(&block_cb->driver_list);
        list_move(&block_cb->list, &bo.cb_list);
        nft_flow_offload_unbind(&bo, basechain);
        mutex_unlock(&net->nft.commit_mutex);
@@ -311,7 +312,7 @@ static int nft_indr_block_offload_cmd(struct nft_base_chain *basechain,
 
        nft_flow_block_offload_init(&bo, dev_net(dev), cmd, basechain, &extack);
 
-       err = flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, basechain, &bo,
+       err = flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_BLOCK, basechain, &bo,
                                          nft_indr_block_cleanup);
        if (err < 0)
                return err;
index 99127e2..5f24edf 100644 (file)
@@ -33,6 +33,7 @@
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER);
+MODULE_DESCRIPTION("Netfilter messages via netlink socket");
 
 #define nfnl_dereference_protected(id) \
        rcu_dereference_protected(table[(id)].subsys, \
index f9adca6..aa1a066 100644 (file)
@@ -902,3 +902,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_EXPR("match");
 MODULE_ALIAS_NFT_EXPR("target");
+MODULE_DESCRIPTION("x_tables over nftables support");
index 69d6173..7d0761f 100644 (file)
@@ -280,3 +280,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso");
 MODULE_ALIAS_NFT_EXPR("connlimit");
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CONNLIMIT);
+MODULE_DESCRIPTION("nftables connlimit rule support");
index f6d4d0f..85ed461 100644 (file)
@@ -303,3 +303,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
 MODULE_ALIAS_NFT_EXPR("counter");
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_COUNTER);
+MODULE_DESCRIPTION("nftables counter rule support");
index faea72c..77258af 100644 (file)
@@ -1345,3 +1345,4 @@ MODULE_ALIAS_NFT_EXPR("notrack");
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_HELPER);
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_TIMEOUT);
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_EXPECT);
+MODULE_DESCRIPTION("Netfilter nf_tables conntrack module");
index c2e78c1..40788b3 100644 (file)
@@ -102,3 +102,4 @@ module_exit(nft_dup_netdev_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_AF_EXPR(5, "dup");
+MODULE_DESCRIPTION("nftables netdev packet duplication support");
index 465432e..a88d44e 100644 (file)
@@ -76,3 +76,4 @@ module_exit(nft_fib_inet_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
 MODULE_ALIAS_NFT_AF_EXPR(1, "fib");
+MODULE_DESCRIPTION("nftables fib inet support");
index a2e726a..3f3478a 100644 (file)
@@ -85,3 +85,4 @@ module_exit(nft_fib_netdev_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo M. Bermudo Garay <pablombg@gmail.com>");
 MODULE_ALIAS_NFT_AF_EXPR(5, "fib");
+MODULE_DESCRIPTION("nftables netdev fib lookups support");
index b70b489..3b9b97a 100644 (file)
@@ -286,3 +286,4 @@ module_exit(nft_flow_offload_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_EXPR("flow_offload");
+MODULE_DESCRIPTION("nftables hardware flow offload module");
index b836d55..96371d8 100644 (file)
@@ -248,3 +248,4 @@ module_exit(nft_hash_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Laura Garcia <nevola@gmail.com>");
 MODULE_ALIAS_NFT_EXPR("hash");
+MODULE_DESCRIPTION("Netfilter nftables hash module");
index c7f0ef7..9e55663 100644 (file)
@@ -54,6 +54,23 @@ static int nft_immediate_init(const struct nft_ctx *ctx,
        if (err < 0)
                goto err1;
 
+       if (priv->dreg == NFT_REG_VERDICT) {
+               struct nft_chain *chain = priv->data.verdict.chain;
+
+               switch (priv->data.verdict.code) {
+               case NFT_JUMP:
+               case NFT_GOTO:
+                       if (nft_chain_is_bound(chain)) {
+                               err = -EBUSY;
+                               goto err1;
+                       }
+                       chain->bound = true;
+                       break;
+               default:
+                       break;
+               }
+       }
+
        return 0;
 
 err1:
@@ -81,6 +98,39 @@ static void nft_immediate_deactivate(const struct nft_ctx *ctx,
        return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg));
 }
 
+static void nft_immediate_destroy(const struct nft_ctx *ctx,
+                                 const struct nft_expr *expr)
+{
+       const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+       const struct nft_data *data = &priv->data;
+       struct nft_ctx chain_ctx;
+       struct nft_chain *chain;
+       struct nft_rule *rule;
+
+       if (priv->dreg != NFT_REG_VERDICT)
+               return;
+
+       switch (data->verdict.code) {
+       case NFT_JUMP:
+       case NFT_GOTO:
+               chain = data->verdict.chain;
+
+               if (!nft_chain_is_bound(chain))
+                       break;
+
+               chain_ctx = *ctx;
+               chain_ctx.chain = chain;
+
+               list_for_each_entry(rule, &chain->rules, list)
+                       nf_tables_rule_release(&chain_ctx, rule);
+
+               nf_tables_chain_destroy(&chain_ctx);
+               break;
+       default:
+               break;
+       }
+}
+
 static int nft_immediate_dump(struct sk_buff *skb, const struct nft_expr *expr)
 {
        const struct nft_immediate_expr *priv = nft_expr_priv(expr);
@@ -170,6 +220,7 @@ static const struct nft_expr_ops nft_imm_ops = {
        .init           = nft_immediate_init,
        .activate       = nft_immediate_activate,
        .deactivate     = nft_immediate_deactivate,
+       .destroy        = nft_immediate_destroy,
        .dump           = nft_immediate_dump,
        .validate       = nft_immediate_validate,
        .offload        = nft_immediate_offload,
index 35b67d7..0e2c315 100644 (file)
@@ -372,3 +372,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
 MODULE_ALIAS_NFT_EXPR("limit");
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_LIMIT);
+MODULE_DESCRIPTION("nftables limit expression support");
index fe4831f..5789945 100644 (file)
@@ -298,3 +298,4 @@ module_exit(nft_log_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
 MODULE_ALIAS_NFT_EXPR("log");
+MODULE_DESCRIPTION("Netfilter nf_tables log module");
index bc9fd98..71390b7 100644 (file)
@@ -305,3 +305,4 @@ module_exit(nft_masq_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
 MODULE_ALIAS_NFT_EXPR("masq");
+MODULE_DESCRIPTION("Netfilter nftables masquerade expression support");
index 23a7bfd..4bcf33b 100644 (file)
@@ -402,3 +402,4 @@ module_exit(nft_nat_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Tomasz Bursztyka <tomasz.bursztyka@linux.intel.com>");
 MODULE_ALIAS_NFT_EXPR("nat");
+MODULE_DESCRIPTION("Network Address Translation support");
index 48edb9d..f1fc824 100644 (file)
@@ -217,3 +217,4 @@ module_exit(nft_ng_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Laura Garcia <nevola@gmail.com>");
 MODULE_ALIAS_NFT_EXPR("numgen");
+MODULE_DESCRIPTION("nftables number generator module");
index bfd18d2..5f9207a 100644 (file)
@@ -252,3 +252,4 @@ module_exit(nft_objref_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_EXPR("objref");
+MODULE_DESCRIPTION("nftables stateful object reference module");
index b42247a..c261d57 100644 (file)
@@ -149,3 +149,4 @@ module_exit(nft_osf_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Fernando Fernandez <ffmancera@riseup.net>");
 MODULE_ALIAS_NFT_EXPR("osf");
+MODULE_DESCRIPTION("nftables passive OS fingerprint support");
index 5ece0a6..23265d7 100644 (file)
@@ -216,3 +216,4 @@ module_exit(nft_queue_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Eric Leblond <eric@regit.org>");
 MODULE_ALIAS_NFT_EXPR("queue");
+MODULE_DESCRIPTION("Netfilter nftables queue module");
index 4413690..0363f53 100644 (file)
@@ -254,3 +254,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_EXPR("quota");
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_QUOTA);
+MODULE_DESCRIPTION("Netfilter nftables quota module");
index 5b77917..2056051 100644 (file)
@@ -292,3 +292,4 @@ module_exit(nft_redir_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
 MODULE_ALIAS_NFT_EXPR("redir");
+MODULE_DESCRIPTION("Netfilter nftables redirect support");
index 00f865f..61fb7e8 100644 (file)
@@ -30,7 +30,8 @@ int nft_reject_validate(const struct nft_ctx *ctx,
        return nft_chain_validate_hooks(ctx->chain,
                                        (1 << NF_INET_LOCAL_IN) |
                                        (1 << NF_INET_FORWARD) |
-                                       (1 << NF_INET_LOCAL_OUT));
+                                       (1 << NF_INET_LOCAL_OUT) |
+                                       (1 << NF_INET_PRE_ROUTING));
 }
 EXPORT_SYMBOL_GPL(nft_reject_validate);
 
@@ -119,3 +120,4 @@ EXPORT_SYMBOL_GPL(nft_reject_icmpv6_code);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("Netfilter x_tables over nftables module");
index f41f414..cf8f264 100644 (file)
@@ -149,3 +149,4 @@ module_exit(nft_reject_inet_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
 MODULE_ALIAS_NFT_AF_EXPR(1, "reject");
+MODULE_DESCRIPTION("Netfilter nftables reject inet support");
index 8b5acc6..cc6082a 100644 (file)
@@ -401,7 +401,7 @@ int pipapo_refill(unsigned long *map, int len, int rules, unsigned long *dst,
  * nft_pipapo_lookup() - Lookup function
  * @net:       Network namespace
  * @set:       nftables API set representation
- * @elem:      nftables API element representation containing key data
+ * @key:       nftables API element representation containing key data
  * @ext:       nftables API extension pointer, filled with matching reference
  *
  * For more details, see DOC: Theory of Operation.
@@ -1075,7 +1075,7 @@ out:
  * @m:         Matching data, including mapping table
  * @map:       Table of rule maps: array of first rule and amount of rules
  *             in next field a given rule maps to, for each field
- * @ext:       For last field, nft_set_ext pointer matching rules map to
+ * @e:         For last field, nft_set_ext pointer matching rules map to
  */
 static void pipapo_map(struct nft_pipapo_match *m,
                       union nft_pipapo_map_bucket map[NFT_PIPAPO_MAX_FIELDS],
@@ -1099,7 +1099,7 @@ static void pipapo_map(struct nft_pipapo_match *m,
 /**
  * pipapo_realloc_scratch() - Reallocate scratch maps for partial match results
  * @clone:     Copy of matching data with pending insertions and deletions
- * @bsize_max  Maximum bucket size, scratch maps cover two buckets
+ * @bsize_max: Maximum bucket size, scratch maps cover two buckets
  *
  * Return: 0 on success, -ENOMEM on failure.
  */
@@ -1242,14 +1242,16 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
                end += NFT_PIPAPO_GROUPS_PADDED_SIZE(f);
        }
 
-       if (!*this_cpu_ptr(m->scratch) || bsize_max > m->bsize_max) {
+       if (!*get_cpu_ptr(m->scratch) || bsize_max > m->bsize_max) {
+               put_cpu_ptr(m->scratch);
+
                err = pipapo_realloc_scratch(m, bsize_max);
                if (err)
                        return err;
 
-               this_cpu_write(nft_pipapo_scratch_index, false);
-
                m->bsize_max = bsize_max;
+       } else {
+               put_cpu_ptr(m->scratch);
        }
 
        *ext2 = &e->ext;
@@ -1445,7 +1447,7 @@ static void pipapo_unmap(union nft_pipapo_map_bucket *mt, int rules,
 /**
  * pipapo_drop() - Delete entry from lookup and mapping tables, given rule map
  * @m:         Matching data
- * @rulemap    Table of rule maps, arrays of first rule and amount of rules
+ * @rulemap:   Table of rule maps, arrays of first rule and amount of rules
  *             in next field a given entry maps to, for each field
  *
  * For each rule in lookup table buckets mapping to this set of rules, drop
index 62f416b..b6aad3f 100644 (file)
@@ -271,12 +271,14 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
 
                        if (nft_rbtree_interval_start(new)) {
                                if (nft_rbtree_interval_end(rbe) &&
-                                   nft_set_elem_active(&rbe->ext, genmask))
+                                   nft_set_elem_active(&rbe->ext, genmask) &&
+                                   !nft_set_elem_expired(&rbe->ext))
                                        overlap = false;
                        } else {
                                overlap = nft_rbtree_interval_end(rbe) &&
                                          nft_set_elem_active(&rbe->ext,
-                                                             genmask);
+                                                             genmask) &&
+                                         !nft_set_elem_expired(&rbe->ext);
                        }
                } else if (d > 0) {
                        p = &parent->rb_right;
@@ -284,9 +286,11 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
                        if (nft_rbtree_interval_end(new)) {
                                overlap = nft_rbtree_interval_end(rbe) &&
                                          nft_set_elem_active(&rbe->ext,
-                                                             genmask);
+                                                             genmask) &&
+                                         !nft_set_elem_expired(&rbe->ext);
                        } else if (nft_rbtree_interval_end(rbe) &&
-                                  nft_set_elem_active(&rbe->ext, genmask)) {
+                                  nft_set_elem_active(&rbe->ext, genmask) &&
+                                  !nft_set_elem_expired(&rbe->ext)) {
                                overlap = true;
                        }
                } else {
@@ -294,15 +298,18 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
                            nft_rbtree_interval_start(new)) {
                                p = &parent->rb_left;
 
-                               if (nft_set_elem_active(&rbe->ext, genmask))
+                               if (nft_set_elem_active(&rbe->ext, genmask) &&
+                                   !nft_set_elem_expired(&rbe->ext))
                                        overlap = false;
                        } else if (nft_rbtree_interval_start(rbe) &&
                                   nft_rbtree_interval_end(new)) {
                                p = &parent->rb_right;
 
-                               if (nft_set_elem_active(&rbe->ext, genmask))
+                               if (nft_set_elem_active(&rbe->ext, genmask) &&
+                                   !nft_set_elem_expired(&rbe->ext))
                                        overlap = false;
-                       } else if (nft_set_elem_active(&rbe->ext, genmask)) {
+                       } else if (nft_set_elem_active(&rbe->ext, genmask) &&
+                                  !nft_set_elem_expired(&rbe->ext)) {
                                *ext = &rbe->ext;
                                return -EEXIST;
                        } else {
index e2c1fc6..4fda8b3 100644 (file)
@@ -388,3 +388,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Fernando Fernandez <ffmancera@riseup.net>");
 MODULE_ALIAS_NFT_EXPR("synproxy");
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_SYNPROXY);
+MODULE_DESCRIPTION("nftables SYNPROXY expression support");
index 30be578..d3eb953 100644 (file)
@@ -719,3 +719,4 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
 MODULE_ALIAS_NFT_EXPR("tunnel");
 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL);
+MODULE_DESCRIPTION("nftables tunnel expression support");
index 99a468b..b97eb4b 100644 (file)
@@ -1028,34 +1028,33 @@ int xt_check_target(struct xt_tgchk_param *par,
 EXPORT_SYMBOL_GPL(xt_check_target);
 
 /**
- * xt_copy_counters_from_user - copy counters and metadata from userspace
+ * xt_copy_counters - copy counters and metadata from a sockptr_t
  *
- * @user: src pointer to userspace memory
+ * @arg: src sockptr
  * @len: alleged size of userspace memory
  * @info: where to store the xt_counters_info metadata
- * @compat: true if we setsockopt call is done by 32bit task on 64bit kernel
  *
  * Copies counter meta data from @user and stores it in @info.
  *
  * vmallocs memory to hold the counters, then copies the counter data
  * from @user to the new memory and returns a pointer to it.
  *
- * If @compat is true, @info gets converted automatically to the 64bit
- * representation.
+ * If called from a compat syscall, @info gets converted automatically to the
+ * 64bit representation.
  *
  * The metadata associated with the counters is stored in @info.
  *
  * Return: returns pointer that caller has to test via IS_ERR().
  * If IS_ERR is false, caller has to vfree the pointer.
  */
-void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
-                                struct xt_counters_info *info, bool compat)
+void *xt_copy_counters(sockptr_t arg, unsigned int len,
+                      struct xt_counters_info *info)
 {
        void *mem;
        u64 size;
 
 #ifdef CONFIG_COMPAT
-       if (compat) {
+       if (in_compat_syscall()) {
                /* structures only differ in size due to alignment */
                struct compat_xt_counters_info compat_tmp;
 
@@ -1063,12 +1062,12 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
                        return ERR_PTR(-EINVAL);
 
                len -= sizeof(compat_tmp);
-               if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
+               if (copy_from_sockptr(&compat_tmp, arg, sizeof(compat_tmp)) != 0)
                        return ERR_PTR(-EFAULT);
 
                memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
                info->num_counters = compat_tmp.num_counters;
-               user += sizeof(compat_tmp);
+               sockptr_advance(arg, sizeof(compat_tmp));
        } else
 #endif
        {
@@ -1076,10 +1075,10 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
                        return ERR_PTR(-EINVAL);
 
                len -= sizeof(*info);
-               if (copy_from_user(info, user, sizeof(*info)) != 0)
+               if (copy_from_sockptr(info, arg, sizeof(*info)) != 0)
                        return ERR_PTR(-EFAULT);
 
-               user += sizeof(*info);
+               sockptr_advance(arg, sizeof(*info));
        }
        info->name[sizeof(info->name) - 1] = '\0';
 
@@ -1093,13 +1092,13 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
        if (!mem)
                return ERR_PTR(-ENOMEM);
 
-       if (copy_from_user(mem, user, len) == 0)
+       if (copy_from_sockptr(mem, arg, len) == 0)
                return mem;
 
        vfree(mem);
        return ERR_PTR(-EFAULT);
 }
-EXPORT_SYMBOL_GPL(xt_copy_counters_from_user);
+EXPORT_SYMBOL_GPL(xt_copy_counters);
 
 #ifdef CONFIG_COMPAT
 int xt_compat_target_offset(const struct xt_target *target)
index a8e5f6c..b4f7bbc 100644 (file)
@@ -244,3 +244,4 @@ MODULE_ALIAS("ipt_SNAT");
 MODULE_ALIAS("ipt_DNAT");
 MODULE_ALIAS("ip6t_SNAT");
 MODULE_ALIAS("ip6t_DNAT");
+MODULE_DESCRIPTION("SNAT and DNAT targets support");
index a1f2320..d07de2c 100644 (file)
@@ -92,7 +92,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)
 
 /**
  * netlbl_domhsh_hash - Hashing function for the domain hash table
- * @domain: the domain name to hash
+ * @key: the domain name to hash
  *
  * Description:
  * This is the hashing function for the domain hash table, it returns the
index 4f2c3b1..d8921b8 100644 (file)
@@ -60,6 +60,7 @@
 #include <linux/genetlink.h>
 #include <linux/net_namespace.h>
 #include <linux/nospec.h>
+#include <linux/btf_ids.h>
 
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
@@ -1620,7 +1621,7 @@ static void netlink_update_socket_mc(struct netlink_sock *nlk,
 }
 
 static int netlink_setsockopt(struct socket *sock, int level, int optname,
-                             char __user *optval, unsigned int optlen)
+                             sockptr_t optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct netlink_sock *nlk = nlk_sk(sk);
@@ -1631,7 +1632,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
                return -ENOPROTOOPT;
 
        if (optlen >= sizeof(int) &&
-           get_user(val, (unsigned int __user *)optval))
+           copy_from_sockptr(&val, optval, sizeof(val)))
                return -EFAULT;
 
        switch (optname) {
@@ -2803,7 +2804,10 @@ static const struct rhashtable_params netlink_rhashtable_params = {
 };
 
 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
-static const struct bpf_iter_reg netlink_reg_info = {
+BTF_ID_LIST(btf_netlink_sock_id)
+BTF_ID(struct, netlink_sock)
+
+static struct bpf_iter_reg netlink_reg_info = {
        .target                 = "netlink",
        .seq_ops                = &netlink_seq_ops,
        .init_seq_private       = bpf_iter_init_seq_net,
@@ -2818,6 +2822,7 @@ static const struct bpf_iter_reg netlink_reg_info = {
 
 static int __init bpf_iter_register(void)
 {
+       netlink_reg_info.ctx_arg_info[0].btf_id = *btf_netlink_sock_id;
        return bpf_iter_reg_target(&netlink_reg_info);
 }
 #endif
index 55ee680..9395ee8 100644 (file)
@@ -351,22 +351,11 @@ int genl_register_family(struct genl_family *family)
                start = end = GENL_ID_VFS_DQUOT;
        }
 
-       if (family->maxattr && !family->parallel_ops) {
-               family->attrbuf = kmalloc_array(family->maxattr + 1,
-                                               sizeof(struct nlattr *),
-                                               GFP_KERNEL);
-               if (family->attrbuf == NULL) {
-                       err = -ENOMEM;
-                       goto errout_locked;
-               }
-       } else
-               family->attrbuf = NULL;
-
        family->id = idr_alloc_cyclic(&genl_fam_idr, family,
                                      start, end + 1, GFP_KERNEL);
        if (family->id < 0) {
                err = family->id;
-               goto errout_free;
+               goto errout_locked;
        }
 
        err = genl_validate_assign_mc_groups(family);
@@ -385,8 +374,6 @@ int genl_register_family(struct genl_family *family)
 
 errout_remove:
        idr_remove(&genl_fam_idr, family->id);
-errout_free:
-       kfree(family->attrbuf);
 errout_locked:
        genl_unlock_all();
        return err;
@@ -419,8 +406,6 @@ int genl_unregister_family(const struct genl_family *family)
                   atomic_read(&genl_sk_destructing_cnt) == 0);
        genl_unlock();
 
-       kfree(family->attrbuf);
-
        genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
 
        return 0;
@@ -485,30 +470,23 @@ genl_family_rcv_msg_attrs_parse(const struct genl_family *family,
        if (!family->maxattr)
                return NULL;
 
-       if (family->parallel_ops) {
-               attrbuf = kmalloc_array(family->maxattr + 1,
-                                       sizeof(struct nlattr *), GFP_KERNEL);
-               if (!attrbuf)
-                       return ERR_PTR(-ENOMEM);
-       } else {
-               attrbuf = family->attrbuf;
-       }
+       attrbuf = kmalloc_array(family->maxattr + 1,
+                               sizeof(struct nlattr *), GFP_KERNEL);
+       if (!attrbuf)
+               return ERR_PTR(-ENOMEM);
 
        err = __nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr,
                            family->policy, validate, extack);
        if (err) {
-               if (family->parallel_ops)
-                       kfree(attrbuf);
+               kfree(attrbuf);
                return ERR_PTR(err);
        }
        return attrbuf;
 }
 
-static void genl_family_rcv_msg_attrs_free(const struct genl_family *family,
-                                          struct nlattr **attrbuf)
+static void genl_family_rcv_msg_attrs_free(struct nlattr **attrbuf)
 {
-       if (family->parallel_ops)
-               kfree(attrbuf);
+       kfree(attrbuf);
 }
 
 struct genl_start_context {
@@ -542,7 +520,7 @@ static int genl_start(struct netlink_callback *cb)
 no_attrs:
        info = genl_dumpit_info_alloc();
        if (!info) {
-               genl_family_rcv_msg_attrs_free(ctx->family, attrs);
+               genl_family_rcv_msg_attrs_free(attrs);
                return -ENOMEM;
        }
        info->family = ctx->family;
@@ -559,7 +537,7 @@ no_attrs:
        }
 
        if (rc) {
-               genl_family_rcv_msg_attrs_free(info->family, info->attrs);
+               genl_family_rcv_msg_attrs_free(info->attrs);
                genl_dumpit_info_free(info);
                cb->data = NULL;
        }
@@ -588,7 +566,7 @@ static int genl_lock_done(struct netlink_callback *cb)
                rc = ops->done(cb);
                genl_unlock();
        }
-       genl_family_rcv_msg_attrs_free(info->family, info->attrs);
+       genl_family_rcv_msg_attrs_free(info->attrs);
        genl_dumpit_info_free(info);
        return rc;
 }
@@ -601,7 +579,7 @@ static int genl_parallel_done(struct netlink_callback *cb)
 
        if (ops->done)
                rc = ops->done(cb);
-       genl_family_rcv_msg_attrs_free(info->family, info->attrs);
+       genl_family_rcv_msg_attrs_free(info->attrs);
        genl_dumpit_info_free(info);
        return rc;
 }
@@ -694,7 +672,7 @@ static int genl_family_rcv_msg_doit(const struct genl_family *family,
                family->post_doit(ops, skb, &info);
 
 out:
-       genl_family_rcv_msg_attrs_free(family, attrbuf);
+       genl_family_rcv_msg_attrs_free(attrbuf);
 
        return err;
 }
@@ -1166,60 +1144,11 @@ static struct genl_family genl_ctrl __ro_after_init = {
        .netnsok = true,
 };
 
-static int genl_bind(struct net *net, int group)
-{
-       struct genl_family *f;
-       int err = -ENOENT;
-       unsigned int id;
-
-       down_read(&cb_lock);
-
-       idr_for_each_entry(&genl_fam_idr, f, id) {
-               if (group >= f->mcgrp_offset &&
-                   group < f->mcgrp_offset + f->n_mcgrps) {
-                       int fam_grp = group - f->mcgrp_offset;
-
-                       if (!f->netnsok && net != &init_net)
-                               err = -ENOENT;
-                       else if (f->mcast_bind)
-                               err = f->mcast_bind(net, fam_grp);
-                       else
-                               err = 0;
-                       break;
-               }
-       }
-       up_read(&cb_lock);
-
-       return err;
-}
-
-static void genl_unbind(struct net *net, int group)
-{
-       struct genl_family *f;
-       unsigned int id;
-
-       down_read(&cb_lock);
-
-       idr_for_each_entry(&genl_fam_idr, f, id) {
-               if (group >= f->mcgrp_offset &&
-                   group < f->mcgrp_offset + f->n_mcgrps) {
-                       int fam_grp = group - f->mcgrp_offset;
-
-                       if (f->mcast_unbind)
-                               f->mcast_unbind(net, fam_grp);
-                       break;
-               }
-       }
-       up_read(&cb_lock);
-}
-
 static int __net_init genl_pernet_init(struct net *net)
 {
        struct netlink_kernel_cfg cfg = {
                .input          = genl_rcv,
                .flags          = NL_CFG_F_NONROOT_RECV,
-               .bind           = genl_bind,
-               .unbind         = genl_unbind,
        };
 
        /* we'll bump the group number right afterwards */
index f90ef69..6d16e1a 100644 (file)
@@ -294,7 +294,7 @@ void nr_destroy_socket(struct sock *sk)
  */
 
 static int nr_setsockopt(struct socket *sock, int level, int optname,
-       char __user *optval, unsigned int optlen)
+               sockptr_t optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct nr_sock *nr = nr_sk(sk);
@@ -306,7 +306,7 @@ static int nr_setsockopt(struct socket *sock, int level, int optname,
        if (optlen < sizeof(unsigned int))
                return -EINVAL;
 
-       if (get_user(opt, (unsigned int __user *)optval))
+       if (copy_from_sockptr(&opt, optval, sizeof(unsigned int)))
                return -EFAULT;
 
        switch (optname) {
index c5f9c3e..eb377f8 100644 (file)
@@ -704,7 +704,6 @@ EXPORT_SYMBOL(nfc_tm_deactivated);
  * nfc_alloc_send_skb - allocate a skb for data exchange responses
  *
  * @size: size to allocate
- * @gfp: gfp flags
  */
 struct sk_buff *nfc_alloc_send_skb(struct nfc_dev *dev, struct sock *sk,
                                   unsigned int flags, unsigned int size,
@@ -749,7 +748,7 @@ EXPORT_SYMBOL(nfc_alloc_recv_skb);
  *
  * @dev: The nfc device that found the targets
  * @targets: array of nfc targets found
- * @ntargets: targets array size
+ * @n_targets: targets array size
  *
  * The device driver must call this function when one or many nfc targets
  * are found. After calling this function, the device driver must stop
index 2860441..d257ed3 100644 (file)
@@ -218,7 +218,7 @@ error:
 }
 
 static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname,
-                              char __user *optval, unsigned int optlen)
+                              sockptr_t optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
@@ -241,7 +241,7 @@ static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname,
                        break;
                }
 
-               if (get_user(opt, (u32 __user *) optval)) {
+               if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
                        err = -EFAULT;
                        break;
                }
@@ -263,7 +263,7 @@ static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname,
                        break;
                }
 
-               if (get_user(opt, (u32 __user *) optval)) {
+               if (copy_from_sockptr(&opt, optval, sizeof(u32))) {
                        err = -EFAULT;
                        break;
                }
@@ -921,8 +921,6 @@ static const struct proto_ops llcp_rawsock_ops = {
        .ioctl          = sock_no_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
-       .setsockopt     = sock_no_setsockopt,
-       .getsockopt     = sock_no_getsockopt,
        .sendmsg        = sock_no_sendmsg,
        .recvmsg        = llcp_sock_recvmsg,
        .mmap           = sock_no_mmap,
index 7cd5248..f7b7dc5 100644 (file)
@@ -1182,7 +1182,7 @@ EXPORT_SYMBOL(nci_free_device);
 /**
  * nci_register_device - register a nci device in the nfc subsystem
  *
- * @dev: The nci device to register
+ * @ndev: The nci device to register
  */
 int nci_register_device(struct nci_dev *ndev)
 {
@@ -1246,7 +1246,7 @@ EXPORT_SYMBOL(nci_register_device);
 /**
  * nci_unregister_device - unregister a nci device in the nfc subsystem
  *
- * @dev: The nci device to unregister
+ * @ndev: The nci device to unregister
  */
 void nci_unregister_device(struct nci_dev *ndev)
 {
index ba5ffd3..b2061b6 100644 (file)
@@ -276,8 +276,6 @@ static const struct proto_ops rawsock_ops = {
        .ioctl          = sock_no_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
-       .setsockopt     = sock_no_setsockopt,
-       .getsockopt     = sock_no_getsockopt,
        .sendmsg        = rawsock_sendmsg,
        .recvmsg        = rawsock_recvmsg,
        .mmap           = sock_no_mmap,
@@ -296,8 +294,6 @@ static const struct proto_ops rawsock_raw_ops = {
        .ioctl          = sock_no_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
-       .setsockopt     = sock_no_setsockopt,
-       .getsockopt     = sock_no_getsockopt,
        .sendmsg        = sock_no_sendmsg,
        .recvmsg        = rawsock_recvmsg,
        .mmap           = sock_no_mmap,
index fc0efd8..2611657 100644 (file)
@@ -1169,9 +1169,10 @@ static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
                                 struct sw_flow_key *key,
                                 const struct nlattr *attr, bool last)
 {
+       struct ovs_skb_cb *ovs_cb = OVS_CB(skb);
        const struct nlattr *actions, *cpl_arg;
+       int len, max_len, rem = nla_len(attr);
        const struct check_pkt_len_arg *arg;
-       int rem = nla_len(attr);
        bool clone_flow_key;
 
        /* The first netlink attribute in 'attr' is always
@@ -1180,7 +1181,11 @@ static int execute_check_pkt_len(struct datapath *dp, struct sk_buff *skb,
        cpl_arg = nla_data(attr);
        arg = nla_data(cpl_arg);
 
-       if (skb->len <= arg->pkt_len) {
+       len = ovs_cb->mru ? ovs_cb->mru + skb->mac_len : skb->len;
+       max_len = arg->pkt_len;
+
+       if ((skb_is_gso(skb) && skb_gso_validate_mac_len(skb, max_len)) ||
+           len <= max_len) {
                /* Second netlink attribute in 'attr' is always
                 * 'OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL'.
                 */
index 94b0245..6b6822f 100644 (file)
@@ -130,6 +130,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
                                  const struct dp_upcall_info *,
                                  uint32_t cutlen);
 
+static void ovs_dp_masks_rebalance(struct work_struct *work);
+
 /* Must be called with rcu_read_lock or ovs_mutex. */
 const char *ovs_dp_name(const struct datapath *dp)
 {
@@ -2338,6 +2340,23 @@ out:
        return skb->len;
 }
 
+static void ovs_dp_masks_rebalance(struct work_struct *work)
+{
+       struct ovs_net *ovs_net = container_of(work, struct ovs_net,
+                                              masks_rebalance.work);
+       struct datapath *dp;
+
+       ovs_lock();
+
+       list_for_each_entry(dp, &ovs_net->dps, list_node)
+               ovs_flow_masks_rebalance(&dp->table);
+
+       ovs_unlock();
+
+       schedule_delayed_work(&ovs_net->masks_rebalance,
+                             msecs_to_jiffies(DP_MASKS_REBALANCE_INTERVAL));
+}
+
 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
        [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
        [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
@@ -2432,6 +2451,9 @@ static int __net_init ovs_init_net(struct net *net)
 
        INIT_LIST_HEAD(&ovs_net->dps);
        INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
+       INIT_DELAYED_WORK(&ovs_net->masks_rebalance, ovs_dp_masks_rebalance);
+       schedule_delayed_work(&ovs_net->masks_rebalance,
+                             msecs_to_jiffies(DP_MASKS_REBALANCE_INTERVAL));
        return ovs_ct_init(net);
 }
 
@@ -2486,6 +2508,7 @@ static void __net_exit ovs_exit_net(struct net *dnet)
 
        ovs_unlock();
 
+       cancel_delayed_work_sync(&ovs_net->masks_rebalance);
        cancel_work_sync(&ovs_net->dp_notify_work);
 }
 
index 2016dd1..24fcec2 100644 (file)
@@ -20,8 +20,9 @@
 #include "meter.h"
 #include "vport-internal_dev.h"
 
-#define DP_MAX_PORTS           USHRT_MAX
-#define DP_VPORT_HASH_BUCKETS  1024
+#define DP_MAX_PORTS                USHRT_MAX
+#define DP_VPORT_HASH_BUCKETS       1024
+#define DP_MASKS_REBALANCE_INTERVAL 4000
 
 /**
  * struct dp_stats_percpu - per-cpu packet processing statistics for a given
@@ -131,6 +132,7 @@ struct dp_upcall_info {
 struct ovs_net {
        struct list_head dps;
        struct work_struct dp_notify_work;
+       struct delayed_work masks_rebalance;
 #if    IS_ENABLED(CONFIG_NETFILTER_CONNCOUNT)
        struct ovs_ct_limit_info *ct_limit_info;
 #endif
index 79252d4..9d3e50c 100644 (file)
@@ -1763,11 +1763,11 @@ static void mask_set_nlattr(struct nlattr *attr, u8 val)
  * does not include any don't care bit.
  * @net: Used to determine per-namespace field support.
  * @match: receives the extracted flow match information.
- * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
+ * @nla_key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
  * sequence. The fields should of the packet that triggered the creation
  * of this flow.
- * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink
- * attribute specifies the mask field of the wildcarded flow.
+ * @nla_mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_*
+ * Netlink attribute specifies the mask field of the wildcarded flow.
  * @log: Boolean to allow kernel error logging.  Normally true, but when
  * probing for feature compatibility this should be passed in as false to
  * suppress unnecessary error logging.
index 2398d72..af22c9e 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/icmp.h>
 #include <linux/icmpv6.h>
 #include <linux/rculist.h>
+#include <linux/sort.h>
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/ndisc.h>
@@ -169,16 +170,70 @@ static struct table_instance *table_instance_alloc(int new_size)
        return ti;
 }
 
+static void __mask_array_destroy(struct mask_array *ma)
+{
+       free_percpu(ma->masks_usage_cntr);
+       kfree(ma);
+}
+
+static void mask_array_rcu_cb(struct rcu_head *rcu)
+{
+       struct mask_array *ma = container_of(rcu, struct mask_array, rcu);
+
+       __mask_array_destroy(ma);
+}
+
+static void tbl_mask_array_reset_counters(struct mask_array *ma)
+{
+       int i, cpu;
+
+       /* As the per CPU counters are not atomic we can not go ahead and
+        * reset them from another CPU. To be able to still have an approximate
+        * zero based counter we store the value at reset, and subtract it
+        * later when processing.
+        */
+       for (i = 0; i < ma->max; i++)  {
+               ma->masks_usage_zero_cntr[i] = 0;
+
+               for_each_possible_cpu(cpu) {
+                       u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr,
+                                                         cpu);
+                       unsigned int start;
+                       u64 counter;
+
+                       do {
+                               start = u64_stats_fetch_begin_irq(&ma->syncp);
+                               counter = usage_counters[i];
+                       } while (u64_stats_fetch_retry_irq(&ma->syncp, start));
+
+                       ma->masks_usage_zero_cntr[i] += counter;
+               }
+       }
+}
+
 static struct mask_array *tbl_mask_array_alloc(int size)
 {
        struct mask_array *new;
 
        size = max(MASK_ARRAY_SIZE_MIN, size);
        new = kzalloc(sizeof(struct mask_array) +
-                     sizeof(struct sw_flow_mask *) * size, GFP_KERNEL);
+                     sizeof(struct sw_flow_mask *) * size +
+                     sizeof(u64) * size, GFP_KERNEL);
        if (!new)
                return NULL;
 
+       new->masks_usage_zero_cntr = (u64 *)((u8 *)new +
+                                            sizeof(struct mask_array) +
+                                            sizeof(struct sw_flow_mask *) *
+                                            size);
+
+       new->masks_usage_cntr = __alloc_percpu(sizeof(u64) * size,
+                                              __alignof__(u64));
+       if (!new->masks_usage_cntr) {
+               kfree(new);
+               return NULL;
+       }
+
        new->count = 0;
        new->max = size;
 
@@ -202,10 +257,10 @@ static int tbl_mask_array_realloc(struct flow_table *tbl, int size)
                        if (ovsl_dereference(old->masks[i]))
                                new->masks[new->count++] = old->masks[i];
                }
+               call_rcu(&old->rcu, mask_array_rcu_cb);
        }
 
        rcu_assign_pointer(tbl->mask_array, new);
-       kfree_rcu(old, rcu);
 
        return 0;
 }
@@ -223,6 +278,11 @@ static int tbl_mask_array_add_mask(struct flow_table *tbl,
                        return err;
 
                ma = ovsl_dereference(tbl->mask_array);
+       } else {
+               /* On every add or delete we need to reset the counters so
+                * every new mask gets a fair chance of being prioritized.
+                */
+               tbl_mask_array_reset_counters(ma);
        }
 
        BUG_ON(ovsl_dereference(ma->masks[ma_count]));
@@ -260,6 +320,9 @@ found:
        if (ma->max >= (MASK_ARRAY_SIZE_MIN * 2) &&
            ma_count <= (ma->max / 3))
                tbl_mask_array_realloc(tbl, ma->max / 2);
+       else
+               tbl_mask_array_reset_counters(ma);
+
 }
 
 /* Remove 'mask' from the mask list, if it is not needed any more. */
@@ -312,7 +375,7 @@ int ovs_flow_tbl_init(struct flow_table *table)
 free_ti:
        __table_instance_destroy(ti);
 free_mask_array:
-       kfree(ma);
+       __mask_array_destroy(ma);
 free_mask_cache:
        free_percpu(table->mask_cache);
        return -ENOMEM;
@@ -392,7 +455,7 @@ void ovs_flow_tbl_destroy(struct flow_table *table)
        struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti);
 
        free_percpu(table->mask_cache);
-       kfree_rcu(rcu_dereference_raw(table->mask_array), rcu);
+       call_rcu(&table->mask_array->rcu, mask_array_rcu_cb);
        table_instance_destroy(table, ti, ufid_ti, false);
 }
 
@@ -606,6 +669,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
                                   u32 *n_mask_hit,
                                   u32 *index)
 {
+       u64 *usage_counters = this_cpu_ptr(ma->masks_usage_cntr);
        struct sw_flow *flow;
        struct sw_flow_mask *mask;
        int i;
@@ -614,8 +678,12 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
                mask = rcu_dereference_ovsl(ma->masks[*index]);
                if (mask) {
                        flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
-                       if (flow)
+                       if (flow) {
+                               u64_stats_update_begin(&ma->syncp);
+                               usage_counters[*index]++;
+                               u64_stats_update_end(&ma->syncp);
                                return flow;
+                       }
                }
        }
 
@@ -631,6 +699,9 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
                flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
                if (flow) { /* Found */
                        *index = i;
+                       u64_stats_update_begin(&ma->syncp);
+                       usage_counters[*index]++;
+                       u64_stats_update_end(&ma->syncp);
                        return flow;
                }
        }
@@ -934,6 +1005,98 @@ int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
        return 0;
 }
 
+static int compare_mask_and_count(const void *a, const void *b)
+{
+       const struct mask_count *mc_a = a;
+       const struct mask_count *mc_b = b;
+
+       return (s64)mc_b->counter - (s64)mc_a->counter;
+}
+
+/* Must be called with OVS mutex held. */
+void ovs_flow_masks_rebalance(struct flow_table *table)
+{
+       struct mask_array *ma = rcu_dereference_ovsl(table->mask_array);
+       struct mask_count *masks_and_count;
+       struct mask_array *new;
+       int masks_entries = 0;
+       int i;
+
+       /* Build array of all current entries with use counters. */
+       masks_and_count = kmalloc_array(ma->max, sizeof(*masks_and_count),
+                                       GFP_KERNEL);
+       if (!masks_and_count)
+               return;
+
+       for (i = 0; i < ma->max; i++)  {
+               struct sw_flow_mask *mask;
+               unsigned int start;
+               int cpu;
+
+               mask = rcu_dereference_ovsl(ma->masks[i]);
+               if (unlikely(!mask))
+                       break;
+
+               masks_and_count[i].index = i;
+               masks_and_count[i].counter = 0;
+
+               for_each_possible_cpu(cpu) {
+                       u64 *usage_counters = per_cpu_ptr(ma->masks_usage_cntr,
+                                                         cpu);
+                       u64 counter;
+
+                       do {
+                               start = u64_stats_fetch_begin_irq(&ma->syncp);
+                               counter = usage_counters[i];
+                       } while (u64_stats_fetch_retry_irq(&ma->syncp, start));
+
+                       masks_and_count[i].counter += counter;
+               }
+
+               /* Subtract the zero count value. */
+               masks_and_count[i].counter -= ma->masks_usage_zero_cntr[i];
+
+               /* Rather than calling tbl_mask_array_reset_counters()
+                * below when no change is needed, do it inline here.
+                */
+               ma->masks_usage_zero_cntr[i] += masks_and_count[i].counter;
+       }
+
+       if (i == 0)
+               goto free_mask_entries;
+
+       /* Sort the entries */
+       masks_entries = i;
+       sort(masks_and_count, masks_entries, sizeof(*masks_and_count),
+            compare_mask_and_count, NULL);
+
+       /* If the order is the same, nothing to do... */
+       for (i = 0; i < masks_entries; i++) {
+               if (i != masks_and_count[i].index)
+                       break;
+       }
+       if (i == masks_entries)
+               goto free_mask_entries;
+
+       /* Rebuilt the new list in order of usage. */
+       new = tbl_mask_array_alloc(ma->max);
+       if (!new)
+               goto free_mask_entries;
+
+       for (i = 0; i < masks_entries; i++) {
+               int index = masks_and_count[i].index;
+
+               new->masks[new->count++] =
+                       rcu_dereference_ovsl(ma->masks[index]);
+       }
+
+       rcu_assign_pointer(table->mask_array, new);
+       call_rcu(&ma->rcu, mask_array_rcu_cb);
+
+free_mask_entries:
+       kfree(masks_and_count);
+}
+
 /* Initializes the flow module.
  * Returns zero if successful or a negative error code. */
 int ovs_flow_init(void)
index 8a5cea6..1f664b0 100644 (file)
@@ -27,9 +27,17 @@ struct mask_cache_entry {
        u32 mask_index;
 };
 
+struct mask_count {
+       int index;
+       u64 counter;
+};
+
 struct mask_array {
        struct rcu_head rcu;
        int count, max;
+       u64 __percpu *masks_usage_cntr;
+       u64 *masks_usage_zero_cntr;
+       struct u64_stats_sync syncp;
        struct sw_flow_mask __rcu *masks[];
 };
 
@@ -86,4 +94,7 @@ bool ovs_flow_cmp(const struct sw_flow *, const struct sw_flow_match *);
 
 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
                       bool full, const struct sw_flow_mask *mask);
+
+void ovs_flow_masks_rebalance(struct flow_table *table);
+
 #endif /* flow_table.h */
index 47febb4..0d44c5c 100644 (file)
@@ -87,6 +87,7 @@ EXPORT_SYMBOL_GPL(ovs_vport_ops_unregister);
 /**
  *     ovs_vport_locate - find a port that has already been created
  *
+ * @net: network namespace
  * @name: name of port to find
  *
  * Must be called with ovs or RCU read lock.
@@ -418,7 +419,7 @@ u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb)
  *
  * @vport: vport that received the packet
  * @skb: skb that was received
- * @tun_key: tunnel (if any) that carried packet
+ * @tun_info: tunnel (if any) that carried packet
  *
  * Must be called with rcu_read_lock.  The packet cannot be shared and
  * skb->data should point to the Ethernet header.
index 29bd405..0b8160d 100644 (file)
@@ -593,6 +593,7 @@ static void init_prb_bdqc(struct packet_sock *po,
                                                req_u->req3.tp_block_size);
        p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
        p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
+       rwlock_init(&p1->blk_fill_in_prog_lock);
 
        p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
        prb_init_ft_ops(p1, req_u);
@@ -659,10 +660,9 @@ static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
         *
         */
        if (BLOCK_NUM_PKTS(pbd)) {
-               while (atomic_read(&pkc->blk_fill_in_prog)) {
-                       /* Waiting for skb_copy_bits to finish... */
-                       cpu_relax();
-               }
+               /* Waiting for skb_copy_bits to finish... */
+               write_lock(&pkc->blk_fill_in_prog_lock);
+               write_unlock(&pkc->blk_fill_in_prog_lock);
        }
 
        if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
@@ -921,10 +921,9 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
                 * the timer-handler already handled this case.
                 */
                if (!(status & TP_STATUS_BLK_TMO)) {
-                       while (atomic_read(&pkc->blk_fill_in_prog)) {
-                               /* Waiting for skb_copy_bits to finish... */
-                               cpu_relax();
-                       }
+                       /* Waiting for skb_copy_bits to finish... */
+                       write_lock(&pkc->blk_fill_in_prog_lock);
+                       write_unlock(&pkc->blk_fill_in_prog_lock);
                }
                prb_close_block(pkc, pbd, po, status);
                return;
@@ -944,7 +943,8 @@ static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
 static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
 {
        struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
-       atomic_dec(&pkc->blk_fill_in_prog);
+
+       read_unlock(&pkc->blk_fill_in_prog_lock);
 }
 
 static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
@@ -998,7 +998,7 @@ static void prb_fill_curr_block(char *curr,
        pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
        BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
        BLOCK_NUM_PKTS(pbd) += 1;
-       atomic_inc(&pkc->blk_fill_in_prog);
+       read_lock(&pkc->blk_fill_in_prog_lock);
        prb_run_all_ft_ops(pkc, ppd);
 }
 
@@ -1536,7 +1536,7 @@ static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
        }
 }
 
-static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
+static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data,
                                unsigned int len)
 {
        struct bpf_prog *new;
@@ -1545,10 +1545,10 @@ static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
 
        if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
                return -EPERM;
-       if (len != sizeof(fprog))
-               return -EINVAL;
-       if (copy_from_user(&fprog, data, len))
-               return -EFAULT;
+
+       ret = copy_bpf_fprog_from_user(&fprog, data, len);
+       if (ret)
+               return ret;
 
        ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
        if (ret)
@@ -1558,7 +1558,7 @@ static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
        return 0;
 }
 
-static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
+static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data,
                                unsigned int len)
 {
        struct bpf_prog *new;
@@ -1568,7 +1568,7 @@ static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
                return -EPERM;
        if (len != sizeof(fd))
                return -EINVAL;
-       if (copy_from_user(&fd, data, len))
+       if (copy_from_sockptr(&fd, data, len))
                return -EFAULT;
 
        new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
@@ -1579,7 +1579,7 @@ static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
        return 0;
 }
 
-static int fanout_set_data(struct packet_sock *po, char __user *data,
+static int fanout_set_data(struct packet_sock *po, sockptr_t data,
                           unsigned int len)
 {
        switch (po->fanout->type) {
@@ -3652,7 +3652,8 @@ static void packet_flush_mclist(struct sock *sk)
 }
 
 static int
-packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
+packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
+                 unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct packet_sock *po = pkt_sk(sk);
@@ -3672,7 +3673,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
                        return -EINVAL;
                if (len > sizeof(mreq))
                        len = sizeof(mreq);
-               if (copy_from_user(&mreq, optval, len))
+               if (copy_from_sockptr(&mreq, optval, len))
                        return -EFAULT;
                if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
                        return -EINVAL;
@@ -3703,7 +3704,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
                if (optlen < len) {
                        ret = -EINVAL;
                } else {
-                       if (copy_from_user(&req_u.req, optval, len))
+                       if (copy_from_sockptr(&req_u.req, optval, len))
                                ret = -EFAULT;
                        else
                                ret = packet_set_ring(sk, &req_u, 0,
@@ -3718,7 +3719,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
 
                if (optlen != sizeof(val))
                        return -EINVAL;
-               if (copy_from_user(&val, optval, sizeof(val)))
+               if (copy_from_sockptr(&val, optval, sizeof(val)))
                        return -EFAULT;
 
                pkt_sk(sk)->copy_thresh = val;
@@ -3730,7 +3731,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
 
                if (optlen != sizeof(val))
                        return -EINVAL;
-               if (copy_from_user(&val, optval, sizeof(val)))
+               if (copy_from_sockptr(&val, optval, sizeof(val)))
                        return -EFAULT;
                switch (val) {
                case TPACKET_V1:
@@ -3756,7 +3757,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
 
                if (optlen != sizeof(val))
                        return -EINVAL;
-               if (copy_from_user(&val, optval, sizeof(val)))
+               if (copy_from_sockptr(&val, optval, sizeof(val)))
                        return -EFAULT;
                if (val > INT_MAX)
                        return -EINVAL;
@@ -3776,7 +3777,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
 
                if (optlen != sizeof(val))
                        return -EINVAL;
-               if (copy_from_user(&val, optval, sizeof(val)))
+               if (copy_from_sockptr(&val, optval, sizeof(val)))
                        return -EFAULT;
 
                lock_sock(sk);
@@ -3795,7 +3796,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
 
                if (optlen < sizeof(val))
                        return -EINVAL;
-               if (copy_from_user(&val, optval, sizeof(val)))
+               if (copy_from_sockptr(&val, optval, sizeof(val)))
                        return -EFAULT;
 
                lock_sock(sk);
@@ -3809,7 +3810,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
 
                if (optlen < sizeof(val))
                        return -EINVAL;
-               if (copy_from_user(&val, optval, sizeof(val)))
+               if (copy_from_sockptr(&val, optval, sizeof(val)))
                        return -EFAULT;
 
                lock_sock(sk);
@@ -3825,7 +3826,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
                        return -EINVAL;
                if (optlen < sizeof(val))
                        return -EINVAL;
-               if (copy_from_user(&val, optval, sizeof(val)))
+               if (copy_from_sockptr(&val, optval, sizeof(val)))
                        return -EFAULT;
 
                lock_sock(sk);
@@ -3844,7 +3845,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
 
                if (optlen != sizeof(val))
                        return -EINVAL;
-               if (copy_from_user(&val, optval, sizeof(val)))
+               if (copy_from_sockptr(&val, optval, sizeof(val)))
                        return -EFAULT;
 
                po->tp_tstamp = val;
@@ -3856,7 +3857,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
 
                if (optlen != sizeof(val))
                        return -EINVAL;
-               if (copy_from_user(&val, optval, sizeof(val)))
+               if (copy_from_sockptr(&val, optval, sizeof(val)))
                        return -EFAULT;
 
                return fanout_add(sk, val & 0xffff, val >> 16);
@@ -3874,7 +3875,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
 
                if (optlen != sizeof(val))
                        return -EINVAL;
-               if (copy_from_user(&val, optval, sizeof(val)))
+               if (copy_from_sockptr(&val, optval, sizeof(val)))
                        return -EFAULT;
                if (val < 0 || val > 1)
                        return -EINVAL;
@@ -3888,7 +3889,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
 
                if (optlen != sizeof(val))
                        return -EINVAL;
-               if (copy_from_user(&val, optval, sizeof(val)))
+               if (copy_from_sockptr(&val, optval, sizeof(val)))
                        return -EFAULT;
 
                lock_sock(sk);
@@ -3907,7 +3908,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
 
                if (optlen != sizeof(val))
                        return -EINVAL;
-               if (copy_from_user(&val, optval, sizeof(val)))
+               if (copy_from_sockptr(&val, optval, sizeof(val)))
                        return -EFAULT;
 
                po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
@@ -4040,28 +4041,6 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
        return 0;
 }
 
-
-#ifdef CONFIG_COMPAT
-static int compat_packet_setsockopt(struct socket *sock, int level, int optname,
-                                   char __user *optval, unsigned int optlen)
-{
-       struct packet_sock *po = pkt_sk(sock->sk);
-
-       if (level != SOL_PACKET)
-               return -ENOPROTOOPT;
-
-       if (optname == PACKET_FANOUT_DATA &&
-           po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) {
-               optval = (char __user *)get_compat_bpf_fprog(optval);
-               if (!optval)
-                       return -EFAULT;
-               optlen = sizeof(struct sock_fprog);
-       }
-
-       return packet_setsockopt(sock, level, optname, optval, optlen);
-}
-#endif
-
 static int packet_notifier(struct notifier_block *this,
                           unsigned long msg, void *ptr)
 {
@@ -4293,7 +4272,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
        struct packet_ring_buffer *rb;
        struct sk_buff_head *rb_queue;
        __be16 num;
-       int err = -EINVAL;
+       int err;
        /* Added to avoid minimal code churn */
        struct tpacket_req *req = &req_u->req;
 
@@ -4525,8 +4504,6 @@ static const struct proto_ops packet_ops_spkt = {
        .gettstamp =    sock_gettstamp,
        .listen =       sock_no_listen,
        .shutdown =     sock_no_shutdown,
-       .setsockopt =   sock_no_setsockopt,
-       .getsockopt =   sock_no_getsockopt,
        .sendmsg =      packet_sendmsg_spkt,
        .recvmsg =      packet_recvmsg,
        .mmap =         sock_no_mmap,
@@ -4549,9 +4526,6 @@ static const struct proto_ops packet_ops = {
        .shutdown =     sock_no_shutdown,
        .setsockopt =   packet_setsockopt,
        .getsockopt =   packet_getsockopt,
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_packet_setsockopt,
-#endif
        .sendmsg =      packet_sendmsg,
        .recvmsg =      packet_recvmsg,
        .mmap =         packet_mmap,
index 907f4cd..fd41ecb 100644 (file)
@@ -39,7 +39,7 @@ struct tpacket_kbdq_core {
        char            *nxt_offset;
        struct sk_buff  *skb;
 
-       atomic_t        blk_fill_in_prog;
+       rwlock_t        blk_fill_in_prog_lock;
 
        /* Default is set to 8ms */
 #define DEFAULT_PRB_RETIRE_TOV (8)
index 4577e43..e47d09a 100644 (file)
@@ -975,7 +975,7 @@ static int pep_init(struct sock *sk)
 }
 
 static int pep_setsockopt(struct sock *sk, int level, int optname,
-                               char __user *optval, unsigned int optlen)
+                         sockptr_t optval, unsigned int optlen)
 {
        struct pep_sock *pn = pep_sk(sk);
        int val = 0, err = 0;
@@ -983,7 +983,7 @@ static int pep_setsockopt(struct sock *sk, int level, int optname,
        if (level != SOL_PNPIPE)
                return -ENOPROTOOPT;
        if (optlen >= sizeof(int)) {
-               if (get_user(val, (int __user *) optval))
+               if (copy_from_sockptr(&val, optval, sizeof(int)))
                        return -EFAULT;
        }
 
index 76d499f..2599235 100644 (file)
@@ -439,12 +439,6 @@ const struct proto_ops phonet_dgram_ops = {
        .ioctl          = pn_socket_ioctl,
        .listen         = sock_no_listen,
        .shutdown       = sock_no_shutdown,
-       .setsockopt     = sock_no_setsockopt,
-       .getsockopt     = sock_no_getsockopt,
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt = sock_no_setsockopt,
-       .compat_getsockopt = sock_no_getsockopt,
-#endif
        .sendmsg        = pn_socket_sendmsg,
        .recvmsg        = sock_common_recvmsg,
        .mmap           = sock_no_mmap,
@@ -466,10 +460,6 @@ const struct proto_ops phonet_stream_ops = {
        .shutdown       = sock_no_shutdown,
        .setsockopt     = sock_common_setsockopt,
        .getsockopt     = sock_common_getsockopt,
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_sock_common_setsockopt,
-       .compat_getsockopt = compat_sock_common_getsockopt,
-#endif
        .sendmsg        = pn_socket_sendmsg,
        .recvmsg        = sock_common_recvmsg,
        .mmap           = sock_no_mmap,
index 2d8d613..0cb4adf 100644 (file)
@@ -166,6 +166,7 @@ static void __qrtr_node_release(struct kref *kref)
 {
        struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
        struct radix_tree_iter iter;
+       struct qrtr_tx_flow *flow;
        unsigned long flags;
        void __rcu **slot;
 
@@ -181,8 +182,9 @@ static void __qrtr_node_release(struct kref *kref)
 
        /* Free tx flow counters */
        radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) {
+               flow = *slot;
                radix_tree_iter_delete(&node->qrtr_tx_flow, &iter, slot);
-               kfree(*slot);
+               kfree(flow);
        }
        kfree(node);
 }
@@ -427,7 +429,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
        unsigned int ver;
        size_t hdrlen;
 
-       if (len & 3)
+       if (len == 0 || len & 3)
                return -EINVAL;
 
        skb = netdev_alloc_skb(NULL, len);
@@ -441,6 +443,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
 
        switch (ver) {
        case QRTR_PROTO_VER_1:
+               if (len < sizeof(*v1))
+                       goto err;
                v1 = data;
                hdrlen = sizeof(*v1);
 
@@ -454,6 +458,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
                size = le32_to_cpu(v1->size);
                break;
        case QRTR_PROTO_VER_2:
+               if (len < sizeof(*v2))
+                       goto err;
                v2 = data;
                hdrlen = sizeof(*v2) + v2->optlen;
 
@@ -1202,8 +1208,6 @@ static const struct proto_ops qrtr_proto_ops = {
        .gettstamp      = sock_gettstamp,
        .poll           = datagram_poll,
        .shutdown       = sock_no_shutdown,
-       .setsockopt     = sock_no_setsockopt,
-       .getsockopt     = sock_no_getsockopt,
        .release        = qrtr_release,
        .mmap           = sock_no_mmap,
        .sendpage       = sock_no_sendpage,
index 1a5bf3f..b239120 100644 (file)
@@ -290,8 +290,7 @@ static int rds_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
        return 0;
 }
 
-static int rds_cancel_sent_to(struct rds_sock *rs, char __user *optval,
-                             int len)
+static int rds_cancel_sent_to(struct rds_sock *rs, sockptr_t optval, int len)
 {
        struct sockaddr_in6 sin6;
        struct sockaddr_in sin;
@@ -308,14 +307,15 @@ static int rds_cancel_sent_to(struct rds_sock *rs, char __user *optval,
                goto out;
        } else if (len < sizeof(struct sockaddr_in6)) {
                /* Assume IPv4 */
-               if (copy_from_user(&sin, optval, sizeof(struct sockaddr_in))) {
+               if (copy_from_sockptr(&sin, optval,
+                               sizeof(struct sockaddr_in))) {
                        ret = -EFAULT;
                        goto out;
                }
                ipv6_addr_set_v4mapped(sin.sin_addr.s_addr, &sin6.sin6_addr);
                sin6.sin6_port = sin.sin_port;
        } else {
-               if (copy_from_user(&sin6, optval,
+               if (copy_from_sockptr(&sin6, optval,
                                   sizeof(struct sockaddr_in6))) {
                        ret = -EFAULT;
                        goto out;
@@ -327,21 +327,20 @@ out:
        return ret;
 }
 
-static int rds_set_bool_option(unsigned char *optvar, char __user *optval,
+static int rds_set_bool_option(unsigned char *optvar, sockptr_t optval,
                               int optlen)
 {
        int value;
 
        if (optlen < sizeof(int))
                return -EINVAL;
-       if (get_user(value, (int __user *) optval))
+       if (copy_from_sockptr(&value, optval, sizeof(int)))
                return -EFAULT;
        *optvar = !!value;
        return 0;
 }
 
-static int rds_cong_monitor(struct rds_sock *rs, char __user *optval,
-                           int optlen)
+static int rds_cong_monitor(struct rds_sock *rs, sockptr_t optval, int optlen)
 {
        int ret;
 
@@ -358,8 +357,7 @@ static int rds_cong_monitor(struct rds_sock *rs, char __user *optval,
        return ret;
 }
 
-static int rds_set_transport(struct rds_sock *rs, char __user *optval,
-                            int optlen)
+static int rds_set_transport(struct rds_sock *rs, sockptr_t optval, int optlen)
 {
        int t_type;
 
@@ -369,7 +367,7 @@ static int rds_set_transport(struct rds_sock *rs, char __user *optval,
        if (optlen != sizeof(int))
                return -EINVAL;
 
-       if (copy_from_user(&t_type, (int __user *)optval, sizeof(t_type)))
+       if (copy_from_sockptr(&t_type, optval, sizeof(t_type)))
                return -EFAULT;
 
        if (t_type < 0 || t_type >= RDS_TRANS_COUNT)
@@ -380,7 +378,7 @@ static int rds_set_transport(struct rds_sock *rs, char __user *optval,
        return rs->rs_transport ? 0 : -ENOPROTOOPT;
 }
 
-static int rds_enable_recvtstamp(struct sock *sk, char __user *optval,
+static int rds_enable_recvtstamp(struct sock *sk, sockptr_t optval,
                                 int optlen, int optname)
 {
        int val, valbool;
@@ -388,7 +386,7 @@ static int rds_enable_recvtstamp(struct sock *sk, char __user *optval,
        if (optlen != sizeof(int))
                return -EFAULT;
 
-       if (get_user(val, (int __user *)optval))
+       if (copy_from_sockptr(&val, optval, sizeof(int)))
                return -EFAULT;
 
        valbool = val ? 1 : 0;
@@ -404,7 +402,7 @@ static int rds_enable_recvtstamp(struct sock *sk, char __user *optval,
        return 0;
 }
 
-static int rds_recv_track_latency(struct rds_sock *rs, char __user *optval,
+static int rds_recv_track_latency(struct rds_sock *rs, sockptr_t optval,
                                  int optlen)
 {
        struct rds_rx_trace_so trace;
@@ -413,7 +411,7 @@ static int rds_recv_track_latency(struct rds_sock *rs, char __user *optval,
        if (optlen != sizeof(struct rds_rx_trace_so))
                return -EFAULT;
 
-       if (copy_from_user(&trace, optval, sizeof(trace)))
+       if (copy_from_sockptr(&trace, optval, sizeof(trace)))
                return -EFAULT;
 
        if (trace.rx_traces > RDS_MSG_RX_DGRAM_TRACE_MAX)
@@ -432,7 +430,7 @@ static int rds_recv_track_latency(struct rds_sock *rs, char __user *optval,
 }
 
 static int rds_setsockopt(struct socket *sock, int level, int optname,
-                         char __user *optval, unsigned int optlen)
+                         sockptr_t optval, unsigned int optlen)
 {
        struct rds_sock *rs = rds_sk_to_rs(sock->sk);
        int ret;
index ed7f213..f2fcab1 100644 (file)
@@ -905,6 +905,17 @@ void rds_conn_path_connect_if_down(struct rds_conn_path *cp)
 }
 EXPORT_SYMBOL_GPL(rds_conn_path_connect_if_down);
 
+/* Check connectivity of all paths
+ */
+void rds_check_all_paths(struct rds_connection *conn)
+{
+       int i = 0;
+
+       do {
+               rds_conn_path_connect_if_down(&conn->c_path[i]);
+       } while (++i < conn->c_npaths);
+}
+
 void rds_conn_connect_if_down(struct rds_connection *conn)
 {
        WARN_ON(conn->c_trans->t_mp_capable);
index 5ae069d..8dfff43 100644 (file)
@@ -264,7 +264,13 @@ struct rds_ib_device {
        int                     *vector_load;
 };
 
-#define ibdev_to_node(ibdev) dev_to_node((ibdev)->dev.parent)
+static inline int ibdev_to_node(struct ib_device *ibdev)
+{
+       struct device *parent;
+
+       parent = ibdev->dev.parent;
+       return parent ? dev_to_node(parent) : NUMA_NO_NODE;
+}
 #define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
 
 /* bits for i_ack_flags */
index a7ae118..ccdd304 100644 (file)
@@ -353,21 +353,20 @@ out:
        return ret;
 }
 
-int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
+int rds_get_mr(struct rds_sock *rs, sockptr_t optval, int optlen)
 {
        struct rds_get_mr_args args;
 
        if (optlen != sizeof(struct rds_get_mr_args))
                return -EINVAL;
 
-       if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval,
-                          sizeof(struct rds_get_mr_args)))
+       if (copy_from_sockptr(&args, optval, sizeof(struct rds_get_mr_args)))
                return -EFAULT;
 
        return __rds_rdma_map(rs, &args, NULL, NULL, NULL);
 }
 
-int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
+int rds_get_mr_for_dest(struct rds_sock *rs, sockptr_t optval, int optlen)
 {
        struct rds_get_mr_for_dest_args args;
        struct rds_get_mr_args new_args;
@@ -375,7 +374,7 @@ int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
        if (optlen != sizeof(struct rds_get_mr_for_dest_args))
                return -EINVAL;
 
-       if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval,
+       if (copy_from_sockptr(&args, optval,
                           sizeof(struct rds_get_mr_for_dest_args)))
                return -EFAULT;
 
@@ -394,7 +393,7 @@ int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
 /*
  * Free the MR indicated by the given R_Key
  */
-int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen)
+int rds_free_mr(struct rds_sock *rs, sockptr_t optval, int optlen)
 {
        struct rds_free_mr_args args;
        struct rds_mr *mr;
@@ -403,8 +402,7 @@ int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen)
        if (optlen != sizeof(struct rds_free_mr_args))
                return -EINVAL;
 
-       if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval,
-                          sizeof(struct rds_free_mr_args)))
+       if (copy_from_sockptr(&args, optval, sizeof(struct rds_free_mr_args)))
                return -EFAULT;
 
        /* Special case - a null cookie means flush all unused MRs */
index bfafd4a..ca4c3a6 100644 (file)
@@ -13,7 +13,7 @@
 
 /* Below reject reason is for legacy interoperability issue with non-linux
  * RDS endpoints where older version incompatibility is conveyed via value 1.
- * For future version(s), proper encoded reject reason should be be used.
+ * For future version(s), proper encoded reject reason should be used.
  */
 #define RDS_RDMA_REJ_INCOMPAT          1
 
index 6019b0c..d35d1fc 100644 (file)
@@ -778,6 +778,7 @@ void rds_conn_drop(struct rds_connection *conn);
 void rds_conn_path_drop(struct rds_conn_path *cpath, bool destroy);
 void rds_conn_connect_if_down(struct rds_connection *conn);
 void rds_conn_path_connect_if_down(struct rds_conn_path *cp);
+void rds_check_all_paths(struct rds_connection *conn);
 void rds_for_each_conn_info(struct socket *sock, unsigned int len,
                          struct rds_info_iterator *iter,
                          struct rds_info_lengths *lens,
@@ -823,6 +824,12 @@ rds_conn_path_up(struct rds_conn_path *cp)
 }
 
 static inline int
+rds_conn_path_down(struct rds_conn_path *cp)
+{
+       return atomic_read(&cp->cp_state) == RDS_CONN_DOWN;
+}
+
+static inline int
 rds_conn_up(struct rds_connection *conn)
 {
        WARN_ON(conn->c_trans->t_mp_capable);
@@ -917,9 +924,9 @@ int rds_send_pong(struct rds_conn_path *cp, __be16 dport);
 
 /* rdma.c */
 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
-int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
-int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
-int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
+int rds_get_mr(struct rds_sock *rs, sockptr_t optval, int optlen);
+int rds_get_mr_for_dest(struct rds_sock *rs, sockptr_t optval, int optlen);
+int rds_free_mr(struct rds_sock *rs, sockptr_t optval, int optlen);
 void rds_rdma_drop_keys(struct rds_sock *rs);
 int rds_rdma_extra_size(struct rds_rdma_args *args,
                        struct rds_iov_vector *iov);
index 68e2bdb..9a529a0 100644 (file)
@@ -1340,7 +1340,8 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
                goto out;
        }
 
-       rds_conn_path_connect_if_down(cpath);
+       if (rds_conn_path_down(cpath))
+               rds_check_all_paths(conn);
 
        ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
        if (ret) {
index 46f709a..f8001ec 100644 (file)
 #include "rds.h"
 #include "loop.h"
 
+static char * const rds_trans_modules[] = {
+       [RDS_TRANS_IB] = "rds_rdma",
+       [RDS_TRANS_GAP] = NULL,
+       [RDS_TRANS_TCP] = "rds_tcp",
+};
+
 static struct rds_transport *transports[RDS_TRANS_COUNT];
 static DECLARE_RWSEM(rds_trans_sem);
 
@@ -110,18 +116,20 @@ struct rds_transport *rds_trans_get(int t_type)
 {
        struct rds_transport *ret = NULL;
        struct rds_transport *trans;
-       unsigned int i;
 
        down_read(&rds_trans_sem);
-       for (i = 0; i < RDS_TRANS_COUNT; i++) {
-               trans = transports[i];
-
-               if (trans && trans->t_type == t_type &&
-                   (!trans->t_owner || try_module_get(trans->t_owner))) {
-                       ret = trans;
-                       break;
-               }
+       trans = transports[t_type];
+       if (!trans) {
+               up_read(&rds_trans_sem);
+               if (rds_trans_modules[t_type])
+                       request_module(rds_trans_modules[t_type]);
+               down_read(&rds_trans_sem);
+               trans = transports[t_type];
        }
+       if (trans && trans->t_type == t_type &&
+           (!trans->t_owner || try_module_get(trans->t_owner)))
+               ret = trans;
+
        up_read(&rds_trans_sem);
 
        return ret;
index ce85656..cf7d974 100644 (file)
@@ -365,7 +365,7 @@ void rose_destroy_socket(struct sock *sk)
  */
 
 static int rose_setsockopt(struct socket *sock, int level, int optname,
-       char __user *optval, unsigned int optlen)
+               sockptr_t optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct rose_sock *rose = rose_sk(sk);
@@ -377,7 +377,7 @@ static int rose_setsockopt(struct socket *sock, int level, int optname,
        if (optlen < sizeof(int))
                return -EINVAL;
 
-       if (get_user(opt, (int __user *)optval))
+       if (copy_from_sockptr(&opt, optval, sizeof(int)))
                return -EFAULT;
 
        switch (optname) {
index 394189b..e6725a6 100644 (file)
@@ -267,7 +267,7 @@ static int rxrpc_listen(struct socket *sock, int backlog)
  * @gfp: The allocation constraints
  * @notify_rx: Where to send notifications instead of socket queue
  * @upgrade: Request service upgrade for call
- * @intr: The call is interruptible
+ * @interruptibility: The call is interruptible, or can be canceled.
  * @debug_id: The debug ID for tracing to be assigned to the call
  *
  * Allow a kernel service to begin a call on the nominated socket.  This just
@@ -588,7 +588,7 @@ EXPORT_SYMBOL(rxrpc_sock_set_min_security_level);
  * set RxRPC socket options
  */
 static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
-                           char __user *optval, unsigned int optlen)
+                           sockptr_t optval, unsigned int optlen)
 {
        struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
        unsigned int min_sec_level;
@@ -639,8 +639,8 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
                        ret = -EISCONN;
                        if (rx->sk.sk_state != RXRPC_UNBOUND)
                                goto error;
-                       ret = get_user(min_sec_level,
-                                      (unsigned int __user *) optval);
+                       ret = copy_from_sockptr(&min_sec_level, optval,
+                                      sizeof(unsigned int));
                        if (ret < 0)
                                goto error;
                        ret = -EINVAL;
@@ -658,7 +658,7 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
                        if (rx->sk.sk_state != RXRPC_SERVER_BOUND2)
                                goto error;
                        ret = -EFAULT;
-                       if (copy_from_user(service_upgrade, optval,
+                       if (copy_from_sockptr(service_upgrade, optval,
                                           sizeof(service_upgrade)) != 0)
                                goto error;
                        ret = -EINVAL;
index 9a2139e..6d29a36 100644 (file)
@@ -909,8 +909,8 @@ extern const struct rxrpc_security rxrpc_no_security;
 extern struct key_type key_type_rxrpc;
 extern struct key_type key_type_rxrpc_s;
 
-int rxrpc_request_key(struct rxrpc_sock *, char __user *, int);
-int rxrpc_server_keyring(struct rxrpc_sock *, char __user *, int);
+int rxrpc_request_key(struct rxrpc_sock *, sockptr_t , int);
+int rxrpc_server_keyring(struct rxrpc_sock *, sockptr_t, int);
 int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time64_t,
                              u32);
 
index b7611cc..032ed76 100644 (file)
 #include <net/ip.h>
 #include "ar-internal.h"
 
+static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call,
+                              unsigned long user_call_ID)
+{
+}
+
 /*
  * Preallocate a single service call, connection and peer and, if possible,
  * give them a user ID and attach the user's side of the ID to them.
@@ -228,6 +233,8 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
                if (rx->discard_new_call) {
                        _debug("discard %lx", call->user_call_ID);
                        rx->discard_new_call(call, call->user_call_ID);
+                       if (call->notify_rx)
+                               call->notify_rx = rxrpc_dummy_notify;
                        rxrpc_put_call(call, rxrpc_call_put_kernel);
                }
                rxrpc_call_completed(call);
index aa1c8ee..6be2672 100644 (file)
@@ -253,7 +253,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
                 * confuse things
                 */
                annotation &= ~RXRPC_TX_ANNO_MASK;
-               annotation |= RXRPC_TX_ANNO_RESENT;
+               annotation |= RXRPC_TX_ANNO_UNACK | RXRPC_TX_ANNO_RESENT;
                call->rxtx_annotations[ix] = annotation;
 
                skb = call->rxtx_buffer[ix];
index 299ac98..7675793 100644 (file)
@@ -722,13 +722,12 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
               ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
               rwind, ntohl(ackinfo->jumbo_max));
 
+       if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
+               rwind = RXRPC_RXTX_BUFF_SIZE - 1;
        if (call->tx_winsize != rwind) {
-               if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
-                       rwind = RXRPC_RXTX_BUFF_SIZE - 1;
                if (rwind > call->tx_winsize)
                        wake = true;
-               trace_rxrpc_rx_rwind_change(call, sp->hdr.serial,
-                                           ntohl(ackinfo->rwind), wake);
+               trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, rwind, wake);
                call->tx_winsize = rwind;
        }
 
index 0c98313..94c3df3 100644 (file)
@@ -896,7 +896,7 @@ static void rxrpc_describe(const struct key *key, struct seq_file *m)
 /*
  * grab the security key for a socket
  */
-int rxrpc_request_key(struct rxrpc_sock *rx, char __user *optval, int optlen)
+int rxrpc_request_key(struct rxrpc_sock *rx, sockptr_t optval, int optlen)
 {
        struct key *key;
        char *description;
@@ -906,7 +906,7 @@ int rxrpc_request_key(struct rxrpc_sock *rx, char __user *optval, int optlen)
        if (optlen <= 0 || optlen > PAGE_SIZE - 1)
                return -EINVAL;
 
-       description = memdup_user_nul(optval, optlen);
+       description = memdup_sockptr_nul(optval, optlen);
        if (IS_ERR(description))
                return PTR_ERR(description);
 
@@ -926,8 +926,7 @@ int rxrpc_request_key(struct rxrpc_sock *rx, char __user *optval, int optlen)
 /*
  * grab the security keyring for a server socket
  */
-int rxrpc_server_keyring(struct rxrpc_sock *rx, char __user *optval,
-                        int optlen)
+int rxrpc_server_keyring(struct rxrpc_sock *rx, sockptr_t optval, int optlen)
 {
        struct key *key;
        char *description;
@@ -937,7 +936,7 @@ int rxrpc_server_keyring(struct rxrpc_sock *rx, char __user *optval,
        if (optlen <= 0 || optlen > PAGE_SIZE - 1)
                return -EINVAL;
 
-       description = memdup_user_nul(optval, optlen);
+       description = memdup_sockptr_nul(optval, optlen);
        if (IS_ERR(description))
                return PTR_ERR(description);
 
index 84badf0..a3b37d8 100644 (file)
@@ -468,6 +468,9 @@ choice
        config DEFAULT_FQ_CODEL
                bool "Fair Queue Controlled Delay" if NET_SCH_FQ_CODEL
 
+       config DEFAULT_FQ_PIE
+               bool "Flow Queue Proportional Integral controller Enhanced" if NET_SCH_FQ_PIE
+
        config DEFAULT_SFQ
                bool "Stochastic Fair Queue" if NET_SCH_SFQ
 
@@ -480,6 +483,7 @@ config DEFAULT_NET_SCH
        default "pfifo_fast" if DEFAULT_PFIFO_FAST
        default "fq" if DEFAULT_FQ
        default "fq_codel" if DEFAULT_FQ_CODEL
+       default "fq_pie" if DEFAULT_FQ_PIE
        default "sfq" if DEFAULT_SFQ
        default "pfifo_fast"
 endif
index 8ac7eb0..063d8aa 100644 (file)
@@ -1059,14 +1059,13 @@ err:
        return err;
 }
 
-void tcf_action_update_stats(struct tc_action *a, u64 bytes, u32 packets,
-                            bool drop, bool hw)
+void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
+                            u64 drops, bool hw)
 {
        if (a->cpu_bstats) {
                _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
 
-               if (drop)
-                       this_cpu_ptr(a->cpu_qstats)->drops += packets;
+               this_cpu_ptr(a->cpu_qstats)->drops += drops;
 
                if (hw)
                        _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
@@ -1075,8 +1074,7 @@ void tcf_action_update_stats(struct tc_action *a, u64 bytes, u32 packets,
        }
 
        _bstats_update(&a->tcfa_bstats, bytes, packets);
-       if (drop)
-               a->tcfa_qstats.drops += packets;
+       a->tcfa_qstats.drops += drops;
        if (hw)
                _bstats_update(&a->tcfa_bstats_hw, bytes, packets);
 }
@@ -1475,7 +1473,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
 {
        struct net *net = sock_net(skb->sk);
        struct nlattr *tca[TCA_ROOT_MAX + 1];
-       u32 portid = skb ? NETLINK_CB(skb).portid : 0;
+       u32 portid = NETLINK_CB(skb).portid;
        int ret = 0, ovr = 0;
 
        if ((n->nlmsg_type != RTM_GETACTION) &&
index 43a2430..f901421 100644 (file)
@@ -43,17 +43,20 @@ static int tcf_connmark_act(struct sk_buff *skb, const struct tc_action *a,
        tcf_lastuse_update(&ca->tcf_tm);
        bstats_update(&ca->tcf_bstats, skb);
 
-       if (skb->protocol == htons(ETH_P_IP)) {
+       switch (skb_protocol(skb, true)) {
+       case htons(ETH_P_IP):
                if (skb->len < sizeof(struct iphdr))
                        goto out;
 
                proto = NFPROTO_IPV4;
-       } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               break;
+       case htons(ETH_P_IPV6):
                if (skb->len < sizeof(struct ipv6hdr))
                        goto out;
 
                proto = NFPROTO_IPV6;
-       } else {
+               break;
+       default:
                goto out;
        }
 
index cb8608f..f5826e4 100644 (file)
@@ -587,7 +587,7 @@ static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a,
                goto drop;
 
        update_flags = params->update_flags;
-       protocol = tc_skb_protocol(skb);
+       protocol = skb_protocol(skb, false);
 again:
        switch (protocol) {
        case cpu_to_be16(ETH_P_IP):
@@ -598,7 +598,8 @@ again:
                if (!tcf_csum_ipv6(skb, update_flags))
                        goto drop;
                break;
-       case cpu_to_be16(ETH_P_8021AD): /* fall through */
+       case cpu_to_be16(ETH_P_8021AD):
+               fallthrough;
        case cpu_to_be16(ETH_P_8021Q):
                if (skb_vlan_tag_present(skb) && !orig_vlan_tag_present) {
                        protocol = skb->protocol;
index e29f0f4..3893e03 100644 (file)
@@ -624,7 +624,7 @@ static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
 {
        u8 family = NFPROTO_UNSPEC;
 
-       switch (skb->protocol) {
+       switch (skb_protocol(skb, true)) {
        case htons(ETH_P_IP):
                family = NFPROTO_IPV4;
                break;
@@ -748,6 +748,7 @@ static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
                          const struct nf_nat_range2 *range,
                          enum nf_nat_manip_type maniptype)
 {
+       __be16 proto = skb_protocol(skb, true);
        int hooknum, err = NF_ACCEPT;
 
        /* See HOOK2MANIP(). */
@@ -759,14 +760,13 @@ static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
        switch (ctinfo) {
        case IP_CT_RELATED:
        case IP_CT_RELATED_REPLY:
-               if (skb->protocol == htons(ETH_P_IP) &&
+               if (proto == htons(ETH_P_IP) &&
                    ip_hdr(skb)->protocol == IPPROTO_ICMP) {
                        if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
                                                           hooknum))
                                err = NF_DROP;
                        goto out;
-               } else if (IS_ENABLED(CONFIG_IPV6) &&
-                          skb->protocol == htons(ETH_P_IPV6)) {
+               } else if (IS_ENABLED(CONFIG_IPV6) && proto == htons(ETH_P_IPV6)) {
                        __be16 frag_off;
                        u8 nexthdr = ipv6_hdr(skb)->nexthdr;
                        int hdrlen = ipv6_skip_exthdr(skb,
@@ -783,7 +783,7 @@ static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
                        }
                }
                /* Non-ICMP, fall thru to initialize if needed. */
-               /* fall through */
+               fallthrough;
        case IP_CT_NEW:
                /* Seen it before?  This can happen for loopback, retrans,
                 * or local packets.
@@ -925,6 +925,8 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
        force = p->ct_action & TCA_CT_ACT_FORCE;
        tmpl = p->tmpl;
 
+       tcf_lastuse_update(&c->tcf_tm);
+
        if (clear) {
                ct = nf_ct_get(skb, &ctinfo);
                if (ct) {
@@ -1450,12 +1452,12 @@ static int tcf_ct_search(struct net *net, struct tc_action **a, u32 index)
        return tcf_idr_search(tn, a, index);
 }
 
-static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
-                            u64 lastuse, bool hw)
+static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
+                            u64 drops, u64 lastuse, bool hw)
 {
        struct tcf_ct *c = to_ct(a);
 
-       tcf_action_update_stats(a, bytes, packets, false, hw);
+       tcf_action_update_stats(a, bytes, packets, drops, hw);
        c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
 }
 
@@ -1543,17 +1545,6 @@ static void __exit ct_cleanup_module(void)
        destroy_workqueue(act_ct_wq);
 }
 
-void tcf_ct_flow_table_restore_skb(struct sk_buff *skb, unsigned long cookie)
-{
-       enum ip_conntrack_info ctinfo = cookie & NFCT_INFOMASK;
-       struct nf_conn *ct;
-
-       ct = (struct nf_conn *)(cookie & NFCT_PTRMASK);
-       nf_conntrack_get(&ct->ct_general);
-       nf_ct_set(skb, ct, ctinfo);
-}
-EXPORT_SYMBOL_GPL(tcf_ct_flow_table_restore_skb);
-
 module_init(ct_init_module);
 module_exit(ct_cleanup_module);
 MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>");
@@ -1561,4 +1552,3 @@ MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
 MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
 MODULE_DESCRIPTION("Connection tracking action");
 MODULE_LICENSE("GPL v2");
-
index 1964962..b5042f3 100644 (file)
@@ -96,19 +96,22 @@ static int tcf_ctinfo_act(struct sk_buff *skb, const struct tc_action *a,
        action = READ_ONCE(ca->tcf_action);
 
        wlen = skb_network_offset(skb);
-       if (tc_skb_protocol(skb) == htons(ETH_P_IP)) {
+       switch (skb_protocol(skb, true)) {
+       case htons(ETH_P_IP):
                wlen += sizeof(struct iphdr);
                if (!pskb_may_pull(skb, wlen))
                        goto out;
 
                proto = NFPROTO_IPV4;
-       } else if (tc_skb_protocol(skb) == htons(ETH_P_IPV6)) {
+               break;
+       case htons(ETH_P_IPV6):
                wlen += sizeof(struct ipv6hdr);
                if (!pskb_may_pull(skb, wlen))
                        goto out;
 
                proto = NFPROTO_IPV6;
-       } else {
+               break;
+       default:
                goto out;
        }
 
index 4160657..410e3bb 100644 (file)
@@ -171,14 +171,15 @@ static int tcf_gact_act(struct sk_buff *skb, const struct tc_action *a,
        return action;
 }
 
-static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u32 packets,
-                                 u64 lastuse, bool hw)
+static void tcf_gact_stats_update(struct tc_action *a, u64 bytes, u64 packets,
+                                 u64 drops, u64 lastuse, bool hw)
 {
        struct tcf_gact *gact = to_gact(a);
        int action = READ_ONCE(gact->tcf_action);
        struct tcf_t *tm = &gact->tcf_tm;
 
-       tcf_action_update_stats(a, bytes, packets, action == TC_ACT_SHOT, hw);
+       tcf_action_update_stats(a, bytes, packets,
+                               action == TC_ACT_SHOT ? packets : drops, hw);
        tm->lastuse = max_t(u64, tm->lastuse, lastuse);
 }
 
index 9c62859..1fb8d42 100644 (file)
@@ -32,7 +32,7 @@ static ktime_t gate_get_time(struct tcf_gate *gact)
        return KTIME_MAX;
 }
 
-static int gate_get_start_time(struct tcf_gate *gact, ktime_t *start)
+static void gate_get_start_time(struct tcf_gate *gact, ktime_t *start)
 {
        struct tcf_gate_params *param = &gact->param;
        ktime_t now, base, cycle;
@@ -43,18 +43,13 @@ static int gate_get_start_time(struct tcf_gate *gact, ktime_t *start)
 
        if (ktime_after(base, now)) {
                *start = base;
-               return 0;
+               return;
        }
 
        cycle = param->tcfg_cycletime;
 
-       /* cycle time should not be zero */
-       if (!cycle)
-               return -EFAULT;
-
        n = div64_u64(ktime_sub_ns(now, base), cycle);
        *start = ktime_add_ns(base, (n + 1) * cycle);
-       return 0;
 }
 
 static void gate_start_timer(struct tcf_gate *gact, ktime_t start)
@@ -277,6 +272,27 @@ release_list:
        return err;
 }
 
+static void gate_setup_timer(struct tcf_gate *gact, u64 basetime,
+                            enum tk_offsets tko, s32 clockid,
+                            bool do_init)
+{
+       if (!do_init) {
+               if (basetime == gact->param.tcfg_basetime &&
+                   tko == gact->tk_offset &&
+                   clockid == gact->param.tcfg_clockid)
+                       return;
+
+               spin_unlock_bh(&gact->tcf_lock);
+               hrtimer_cancel(&gact->hitimer);
+               spin_lock_bh(&gact->tcf_lock);
+       }
+       gact->param.tcfg_basetime = basetime;
+       gact->param.tcfg_clockid = clockid;
+       gact->tk_offset = tko;
+       hrtimer_init(&gact->hitimer, clockid, HRTIMER_MODE_ABS_SOFT);
+       gact->hitimer.function = gate_timer_func;
+}
+
 static int tcf_gate_init(struct net *net, struct nlattr *nla,
                         struct nlattr *est, struct tc_action **a,
                         int ovr, int bind, bool rtnl_held,
@@ -287,12 +303,12 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
        enum tk_offsets tk_offset = TK_OFFS_TAI;
        struct nlattr *tb[TCA_GATE_MAX + 1];
        struct tcf_chain *goto_ch = NULL;
+       u64 cycletime = 0, basetime = 0;
        struct tcf_gate_params *p;
        s32 clockid = CLOCK_TAI;
        struct tcf_gate *gact;
        struct tc_gate *parm;
        int ret = 0, err;
-       u64 basetime = 0;
        u32 gflags = 0;
        s32 prio = -1;
        ktime_t start;
@@ -308,6 +324,27 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
        if (!tb[TCA_GATE_PARMS])
                return -EINVAL;
 
+       if (tb[TCA_GATE_CLOCKID]) {
+               clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]);
+               switch (clockid) {
+               case CLOCK_REALTIME:
+                       tk_offset = TK_OFFS_REAL;
+                       break;
+               case CLOCK_MONOTONIC:
+                       tk_offset = TK_OFFS_MAX;
+                       break;
+               case CLOCK_BOOTTIME:
+                       tk_offset = TK_OFFS_BOOT;
+                       break;
+               case CLOCK_TAI:
+                       tk_offset = TK_OFFS_TAI;
+                       break;
+               default:
+                       NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
+                       return -EINVAL;
+               }
+       }
+
        parm = nla_data(tb[TCA_GATE_PARMS]);
        index = parm->index;
 
@@ -331,10 +368,6 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
                tcf_idr_release(*a, bind);
                return -EEXIST;
        }
-       if (ret == ACT_P_CREATED) {
-               to_gate(*a)->param.tcfg_clockid = -1;
-               INIT_LIST_HEAD(&(to_gate(*a)->param.entries));
-       }
 
        if (tb[TCA_GATE_PRIORITY])
                prio = nla_get_s32(tb[TCA_GATE_PRIORITY]);
@@ -345,41 +378,19 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
        if (tb[TCA_GATE_FLAGS])
                gflags = nla_get_u32(tb[TCA_GATE_FLAGS]);
 
-       if (tb[TCA_GATE_CLOCKID]) {
-               clockid = nla_get_s32(tb[TCA_GATE_CLOCKID]);
-               switch (clockid) {
-               case CLOCK_REALTIME:
-                       tk_offset = TK_OFFS_REAL;
-                       break;
-               case CLOCK_MONOTONIC:
-                       tk_offset = TK_OFFS_MAX;
-                       break;
-               case CLOCK_BOOTTIME:
-                       tk_offset = TK_OFFS_BOOT;
-                       break;
-               case CLOCK_TAI:
-                       tk_offset = TK_OFFS_TAI;
-                       break;
-               default:
-                       NL_SET_ERR_MSG(extack, "Invalid 'clockid'");
-                       goto release_idr;
-               }
-       }
+       gact = to_gate(*a);
+       if (ret == ACT_P_CREATED)
+               INIT_LIST_HEAD(&gact->param.entries);
 
        err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
        if (err < 0)
                goto release_idr;
 
-       gact = to_gate(*a);
-
        spin_lock_bh(&gact->tcf_lock);
        p = &gact->param;
 
-       if (tb[TCA_GATE_CYCLE_TIME]) {
-               p->tcfg_cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]);
-               if (!p->tcfg_cycletime_ext)
-                       goto chain_put;
-       }
+       if (tb[TCA_GATE_CYCLE_TIME])
+               cycletime = nla_get_u64(tb[TCA_GATE_CYCLE_TIME]);
 
        if (tb[TCA_GATE_ENTRY_LIST]) {
                err = parse_gate_list(tb[TCA_GATE_ENTRY_LIST], p, extack);
@@ -387,35 +398,29 @@ static int tcf_gate_init(struct net *net, struct nlattr *nla,
                        goto chain_put;
        }
 
-       if (!p->tcfg_cycletime) {
+       if (!cycletime) {
                struct tcfg_gate_entry *entry;
                ktime_t cycle = 0;
 
                list_for_each_entry(entry, &p->entries, list)
                        cycle = ktime_add_ns(cycle, entry->interval);
-               p->tcfg_cycletime = cycle;
+               cycletime = cycle;
+               if (!cycletime) {
+                       err = -EINVAL;
+                       goto chain_put;
+               }
        }
+       p->tcfg_cycletime = cycletime;
 
        if (tb[TCA_GATE_CYCLE_TIME_EXT])
                p->tcfg_cycletime_ext =
                        nla_get_u64(tb[TCA_GATE_CYCLE_TIME_EXT]);
 
+       gate_setup_timer(gact, basetime, tk_offset, clockid,
+                        ret == ACT_P_CREATED);
        p->tcfg_priority = prio;
-       p->tcfg_basetime = basetime;
-       p->tcfg_clockid = clockid;
        p->tcfg_flags = gflags;
-
-       gact->tk_offset = tk_offset;
-       hrtimer_init(&gact->hitimer, clockid, HRTIMER_MODE_ABS_SOFT);
-       gact->hitimer.function = gate_timer_func;
-
-       err = gate_get_start_time(gact, &start);
-       if (err < 0) {
-               NL_SET_ERR_MSG(extack,
-                              "Internal error: failed get start time");
-               release_entry_list(&p->entries);
-               goto chain_put;
-       }
+       gate_get_start_time(gact, &start);
 
        gact->current_close_time = start;
        gact->current_gate_status = GATE_ACT_GATE_OPEN | GATE_ACT_PENDING;
@@ -443,6 +448,13 @@ chain_put:
        if (goto_ch)
                tcf_chain_put_by_act(goto_ch);
 release_idr:
+       /* action is not inserted in any list: it's safe to init hitimer
+        * without taking tcf_lock.
+        */
+       if (ret == ACT_P_CREATED)
+               gate_setup_timer(gact, gact->param.tcfg_basetime,
+                                gact->tk_offset, gact->param.tcfg_clockid,
+                                true);
        tcf_idr_release(*a, bind);
        return err;
 }
@@ -453,9 +465,7 @@ static void tcf_gate_cleanup(struct tc_action *a)
        struct tcf_gate_params *p;
 
        p = &gact->param;
-       if (p->tcfg_clockid != -1)
-               hrtimer_cancel(&gact->hitimer);
-
+       hrtimer_cancel(&gact->hitimer);
        release_entry_list(&p->entries);
 }
 
@@ -568,13 +578,13 @@ static int tcf_gate_walker(struct net *net, struct sk_buff *skb,
        return tcf_generic_walker(tn, skb, cb, type, ops, extack);
 }
 
-static void tcf_gate_stats_update(struct tc_action *a, u64 bytes, u32 packets,
-                                 u64 lastuse, bool hw)
+static void tcf_gate_stats_update(struct tc_action *a, u64 bytes, u64 packets,
+                                 u64 drops, u64 lastuse, bool hw)
 {
        struct tcf_gate *gact = to_gate(a);
        struct tcf_t *tm = &gact->tcf_tm;
 
-       tcf_action_update_stats(a, bytes, packets, false, hw);
+       tcf_action_update_stats(a, bytes, packets, drops, hw);
        tm->lastuse = max_t(u64, tm->lastuse, lastuse);
 }
 
index 83dd82f..b270531 100644 (file)
@@ -312,13 +312,13 @@ out:
        return retval;
 }
 
-static void tcf_stats_update(struct tc_action *a, u64 bytes, u32 packets,
-                            u64 lastuse, bool hw)
+static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
+                            u64 drops, u64 lastuse, bool hw)
 {
        struct tcf_mirred *m = to_mirred(a);
        struct tcf_t *tm = &m->tcf_tm;
 
-       tcf_action_update_stats(a, bytes, packets, false, hw);
+       tcf_action_update_stats(a, bytes, packets, drops, hw);
        tm->lastuse = max_t(u64, tm->lastuse, lastuse);
 }
 
index be3f215..8118e26 100644 (file)
@@ -82,7 +82,7 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
                        goto drop;
                break;
        case TCA_MPLS_ACT_PUSH:
-               new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb->protocol));
+               new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb_protocol(skb, true)));
                if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len,
                                  skb->dev && skb->dev->type == ARPHRD_ETHER))
                        goto drop;
index d41d620..66986db 100644 (file)
@@ -409,13 +409,13 @@ done:
        return p->tcf_action;
 }
 
-static void tcf_pedit_stats_update(struct tc_action *a, u64 bytes, u32 packets,
-                                  u64 lastuse, bool hw)
+static void tcf_pedit_stats_update(struct tc_action *a, u64 bytes, u64 packets,
+                                  u64 drops, u64 lastuse, bool hw)
 {
        struct tcf_pedit *d = to_pedit(a);
        struct tcf_t *tm = &d->tcf_tm;
 
-       tcf_action_update_stats(a, bytes, packets, false, hw);
+       tcf_action_update_stats(a, bytes, packets, drops, hw);
        tm->lastuse = max_t(u64, tm->lastuse, lastuse);
 }
 
index 8b7a0ac..0b431d4 100644 (file)
@@ -288,13 +288,13 @@ static void tcf_police_cleanup(struct tc_action *a)
 }
 
 static void tcf_police_stats_update(struct tc_action *a,
-                                   u64 bytes, u32 packets,
+                                   u64 bytes, u64 packets, u64 drops,
                                    u64 lastuse, bool hw)
 {
        struct tcf_police *police = to_police(a);
        struct tcf_t *tm = &police->tcf_tm;
 
-       tcf_action_update_stats(a, bytes, packets, false, hw);
+       tcf_action_update_stats(a, bytes, packets, drops, hw);
        tm->lastuse = max_t(u64, tm->lastuse, lastuse);
 }
 
index b125b2b..d065238 100644 (file)
@@ -41,7 +41,7 @@ static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a,
        if (params->flags & SKBEDIT_F_INHERITDSFIELD) {
                int wlen = skb_network_offset(skb);
 
-               switch (tc_skb_protocol(skb)) {
+               switch (skb_protocol(skb, true)) {
                case htons(ETH_P_IP):
                        wlen += sizeof(struct iphdr);
                        if (!pskb_may_pull(skb, wlen))
@@ -74,12 +74,13 @@ err:
 }
 
 static void tcf_skbedit_stats_update(struct tc_action *a, u64 bytes,
-                                    u32 packets, u64 lastuse, bool hw)
+                                    u64 packets, u64 drops,
+                                    u64 lastuse, bool hw)
 {
        struct tcf_skbedit *d = to_skbedit(a);
        struct tcf_t *tm = &d->tcf_tm;
 
-       tcf_action_update_stats(a, bytes, packets, false, hw);
+       tcf_action_update_stats(a, bytes, packets, drops, hw);
        tm->lastuse = max_t(u64, tm->lastuse, lastuse);
 }
 
index c91d395..a5ff9f6 100644 (file)
@@ -302,13 +302,13 @@ static int tcf_vlan_walker(struct net *net, struct sk_buff *skb,
        return tcf_generic_walker(tn, skb, cb, type, ops, extack);
 }
 
-static void tcf_vlan_stats_update(struct tc_action *a, u64 bytes, u32 packets,
-                                 u64 lastuse, bool hw)
+static void tcf_vlan_stats_update(struct tc_action *a, u64 bytes, u64 packets,
+                                 u64 drops, u64 lastuse, bool hw)
 {
        struct tcf_vlan *v = to_vlan(a);
        struct tcf_t *tm = &v->tcf_tm;
 
-       tcf_action_update_stats(a, bytes, packets, false, hw);
+       tcf_action_update_stats(a, bytes, packets, drops, hw);
        tm->lastuse = max_t(u64, tm->lastuse, lastuse);
 }
 
index a00a203..b2b7440 100644 (file)
@@ -622,7 +622,7 @@ static int tcf_block_setup(struct tcf_block *block,
                           struct flow_block_offload *bo);
 
 static void tcf_block_offload_init(struct flow_block_offload *bo,
-                                  struct net_device *dev,
+                                  struct net_device *dev, struct Qdisc *sch,
                                   enum flow_block_command command,
                                   enum flow_block_binder_type binder_type,
                                   struct flow_block *flow_block,
@@ -634,6 +634,7 @@ static void tcf_block_offload_init(struct flow_block_offload *bo,
        bo->block = flow_block;
        bo->block_shared = shared;
        bo->extack = extack;
+       bo->sch = sch;
        INIT_LIST_HEAD(&bo->cb_list);
 }
 
@@ -644,14 +645,16 @@ static void tc_block_indr_cleanup(struct flow_block_cb *block_cb)
 {
        struct tcf_block *block = block_cb->indr.data;
        struct net_device *dev = block_cb->indr.dev;
+       struct Qdisc *sch = block_cb->indr.sch;
        struct netlink_ext_ack extack = {};
        struct flow_block_offload bo;
 
-       tcf_block_offload_init(&bo, dev, FLOW_BLOCK_UNBIND,
+       tcf_block_offload_init(&bo, dev, sch, FLOW_BLOCK_UNBIND,
                               block_cb->indr.binder_type,
                               &block->flow_block, tcf_block_shared(block),
                               &extack);
        down_write(&block->cb_lock);
+       list_del(&block_cb->driver_list);
        list_move(&block_cb->list, &bo.cb_list);
        up_write(&block->cb_lock);
        rtnl_lock();
@@ -665,31 +668,35 @@ static bool tcf_block_offload_in_use(struct tcf_block *block)
 }
 
 static int tcf_block_offload_cmd(struct tcf_block *block,
-                                struct net_device *dev,
+                                struct net_device *dev, struct Qdisc *sch,
                                 struct tcf_block_ext_info *ei,
                                 enum flow_block_command command,
                                 struct netlink_ext_ack *extack)
 {
        struct flow_block_offload bo = {};
-       int err;
 
-       tcf_block_offload_init(&bo, dev, command, ei->binder_type,
+       tcf_block_offload_init(&bo, dev, sch, command, ei->binder_type,
                               &block->flow_block, tcf_block_shared(block),
                               extack);
 
-       if (dev->netdev_ops->ndo_setup_tc)
+       if (dev->netdev_ops->ndo_setup_tc) {
+               int err;
+
                err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo);
-       else
-               err = flow_indr_dev_setup_offload(dev, TC_SETUP_BLOCK, block,
-                                                 &bo, tc_block_indr_cleanup);
+               if (err < 0) {
+                       if (err != -EOPNOTSUPP)
+                               NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
+                       return err;
+               }
 
-       if (err < 0) {
-               if (err != -EOPNOTSUPP)
-                       NL_SET_ERR_MSG(extack, "Driver ndo_setup_tc failed");
-               return err;
+               return tcf_block_setup(block, &bo);
        }
 
-       return tcf_block_setup(block, &bo);
+       flow_indr_dev_setup_offload(dev, sch, TC_SETUP_BLOCK, block, &bo,
+                                   tc_block_indr_cleanup);
+       tcf_block_setup(block, &bo);
+
+       return -EOPNOTSUPP;
 }
 
 static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
@@ -712,7 +719,7 @@ static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q,
                goto err_unlock;
        }
 
-       err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_BIND, extack);
+       err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_BIND, extack);
        if (err == -EOPNOTSUPP)
                goto no_offload_dev_inc;
        if (err)
@@ -739,7 +746,7 @@ static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q,
        int err;
 
        down_write(&block->cb_lock);
-       err = tcf_block_offload_cmd(block, dev, ei, FLOW_BLOCK_UNBIND, NULL);
+       err = tcf_block_offload_cmd(block, dev, q, ei, FLOW_BLOCK_UNBIND, NULL);
        if (err == -EOPNOTSUPP)
                goto no_offload_dev_dec;
        up_write(&block->cb_lock);
@@ -1533,7 +1540,7 @@ static inline int __tcf_classify(struct sk_buff *skb,
 reclassify:
 #endif
        for (; tp; tp = rcu_dereference_bh(tp->next)) {
-               __be16 protocol = tc_skb_protocol(skb);
+               __be16 protocol = skb_protocol(skb, false);
                int err;
 
                if (tp->protocol != protocol &&
@@ -3655,9 +3662,11 @@ int tc_setup_flow_action(struct flow_action *flow_action,
                        tcf_sample_get_group(entry, act);
                } else if (is_tcf_police(act)) {
                        entry->id = FLOW_ACTION_POLICE;
-                       entry->police.burst = tcf_police_tcfp_burst(act);
+                       entry->police.burst = tcf_police_burst(act);
                        entry->police.rate_bytes_ps =
                                tcf_police_rate_bytes_ps(act);
+                       entry->police.mtu = tcf_police_tcfp_mtu(act);
+                       entry->police.index = act->tcfa_index;
                } else if (is_tcf_ct(act)) {
                        entry->id = FLOW_ACTION_CT;
                        entry->ct.action = tcf_ct_action(act);
@@ -3741,6 +3750,119 @@ unsigned int tcf_exts_num_actions(struct tcf_exts *exts)
 }
 EXPORT_SYMBOL(tcf_exts_num_actions);
 
+#ifdef CONFIG_NET_CLS_ACT
+static int tcf_qevent_parse_block_index(struct nlattr *block_index_attr,
+                                       u32 *p_block_index,
+                                       struct netlink_ext_ack *extack)
+{
+       *p_block_index = nla_get_u32(block_index_attr);
+       if (!*p_block_index) {
+               NL_SET_ERR_MSG(extack, "Block number may not be zero");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
+                   enum flow_block_binder_type binder_type,
+                   struct nlattr *block_index_attr,
+                   struct netlink_ext_ack *extack)
+{
+       u32 block_index;
+       int err;
+
+       if (!block_index_attr)
+               return 0;
+
+       err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
+       if (err)
+               return err;
+
+       if (!block_index)
+               return 0;
+
+       qe->info.binder_type = binder_type;
+       qe->info.chain_head_change = tcf_chain_head_change_dflt;
+       qe->info.chain_head_change_priv = &qe->filter_chain;
+       qe->info.block_index = block_index;
+
+       return tcf_block_get_ext(&qe->block, sch, &qe->info, extack);
+}
+EXPORT_SYMBOL(tcf_qevent_init);
+
+void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
+{
+       if (qe->info.block_index)
+               tcf_block_put_ext(qe->block, sch, &qe->info);
+}
+EXPORT_SYMBOL(tcf_qevent_destroy);
+
+int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
+                              struct netlink_ext_ack *extack)
+{
+       u32 block_index;
+       int err;
+
+       if (!block_index_attr)
+               return 0;
+
+       err = tcf_qevent_parse_block_index(block_index_attr, &block_index, extack);
+       if (err)
+               return err;
+
+       /* Bounce newly-configured block or change in block. */
+       if (block_index != qe->info.block_index) {
+               NL_SET_ERR_MSG(extack, "Change of blocks is not supported");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(tcf_qevent_validate_change);
+
+struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
+                                 struct sk_buff **to_free, int *ret)
+{
+       struct tcf_result cl_res;
+       struct tcf_proto *fl;
+
+       if (!qe->info.block_index)
+               return skb;
+
+       fl = rcu_dereference_bh(qe->filter_chain);
+
+       switch (tcf_classify(skb, fl, &cl_res, false)) {
+       case TC_ACT_SHOT:
+               qdisc_qstats_drop(sch);
+               __qdisc_drop(skb, to_free);
+               *ret = __NET_XMIT_BYPASS;
+               return NULL;
+       case TC_ACT_STOLEN:
+       case TC_ACT_QUEUED:
+       case TC_ACT_TRAP:
+               __qdisc_drop(skb, to_free);
+               *ret = __NET_XMIT_STOLEN;
+               return NULL;
+       case TC_ACT_REDIRECT:
+               skb_do_redirect(skb);
+               *ret = __NET_XMIT_STOLEN;
+               return NULL;
+       }
+
+       return skb;
+}
+EXPORT_SYMBOL(tcf_qevent_handle);
+
+int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
+{
+       if (!qe->info.block_index)
+               return 0;
+       return nla_put_u32(skb, attr_name, qe->info.block_index);
+}
+EXPORT_SYMBOL(tcf_qevent_dump);
+#endif
+
 static __net_init int tcf_net_init(struct net *net)
 {
        struct tcf_net *tn = net_generic(net, tcf_net_id);
index 80ae7b9..ab53a93 100644 (file)
@@ -80,7 +80,7 @@ static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
        if (dst)
                return ntohl(dst);
 
-       return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
+       return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
 }
 
 static u32 flow_get_proto(const struct sk_buff *skb,
@@ -104,7 +104,7 @@ static u32 flow_get_proto_dst(const struct sk_buff *skb,
        if (flow->ports.ports)
                return ntohs(flow->ports.dst);
 
-       return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
+       return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
 }
 
 static u32 flow_get_iif(const struct sk_buff *skb)
@@ -151,7 +151,7 @@ static u32 flow_get_nfct(const struct sk_buff *skb)
 static u32 flow_get_nfct_src(const struct sk_buff *skb,
                             const struct flow_keys *flow)
 {
-       switch (tc_skb_protocol(skb)) {
+       switch (skb_protocol(skb, true)) {
        case htons(ETH_P_IP):
                return ntohl(CTTUPLE(skb, src.u3.ip));
        case htons(ETH_P_IPV6):
@@ -164,7 +164,7 @@ fallback:
 static u32 flow_get_nfct_dst(const struct sk_buff *skb,
                             const struct flow_keys *flow)
 {
-       switch (tc_skb_protocol(skb)) {
+       switch (skb_protocol(skb, true)) {
        case htons(ETH_P_IP):
                return ntohl(CTTUPLE(skb, dst.u3.ip));
        case htons(ETH_P_IPV6):
index b2da372..a4f7ef1 100644 (file)
@@ -64,6 +64,7 @@ struct fl_flow_key {
                };
        } tp_range;
        struct flow_dissector_key_ct ct;
+       struct flow_dissector_key_hash hash;
 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
 
 struct fl_flow_mask_range {
@@ -313,11 +314,12 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
                /* skb_flow_dissect() does not set n_proto in case an unknown
                 * protocol, so do it rather here.
                 */
-               skb_key.basic.n_proto = skb->protocol;
+               skb_key.basic.n_proto = skb_protocol(skb, false);
                skb_flow_dissect_tunnel_info(skb, &mask->dissector, &skb_key);
                skb_flow_dissect_ct(skb, &mask->dissector, &skb_key,
                                    fl_ct_info_to_flower_map,
                                    ARRAY_SIZE(fl_ct_info_to_flower_map));
+               skb_flow_dissect_hash(skb, &mask->dissector, &skb_key);
                skb_flow_dissect(skb, &mask->dissector, &skb_key, 0);
 
                f = fl_mask_lookup(mask, &skb_key);
@@ -491,6 +493,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f,
 
        tcf_exts_stats_update(&f->exts, cls_flower.stats.bytes,
                              cls_flower.stats.pkts,
+                             cls_flower.stats.drops,
                              cls_flower.stats.lastused,
                              cls_flower.stats.used_hw_stats,
                              cls_flower.stats.used_hw_stats_valid);
@@ -694,6 +697,9 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
        [TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY,
                                            .len = 128 / BITS_PER_BYTE },
        [TCA_FLOWER_FLAGS]              = { .type = NLA_U32 },
+       [TCA_FLOWER_KEY_HASH]           = { .type = NLA_U32 },
+       [TCA_FLOWER_KEY_HASH_MASK]      = { .type = NLA_U32 },
+
 };
 
 static const struct nla_policy
@@ -1625,6 +1631,10 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
 
        fl_set_key_ip(tb, true, &key->enc_ip, &mask->enc_ip);
 
+       fl_set_key_val(tb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
+                      &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
+                      sizeof(key->hash.hash));
+
        if (tb[TCA_FLOWER_KEY_ENC_OPTS]) {
                ret = fl_set_enc_opt(tb, key, mask, extack);
                if (ret)
@@ -1739,6 +1749,8 @@ static void fl_init_dissector(struct flow_dissector *dissector,
                             FLOW_DISSECTOR_KEY_ENC_OPTS, enc_opts);
        FL_KEY_SET_IF_MASKED(mask, keys, cnt,
                             FLOW_DISSECTOR_KEY_CT, ct);
+       FL_KEY_SET_IF_MASKED(mask, keys, cnt,
+                            FLOW_DISSECTOR_KEY_HASH, hash);
 
        skb_flow_dissector_init(dissector, keys, cnt);
 }
@@ -2959,6 +2971,11 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net,
        if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags))
                goto nla_put_failure;
 
+       if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH,
+                            &mask->hash.hash, TCA_FLOWER_KEY_HASH_MASK,
+                            sizeof(key->hash.hash)))
+               goto nla_put_failure;
+
        return 0;
 
 nla_put_failure:
index 8d39dbc..cafb844 100644 (file)
@@ -338,7 +338,8 @@ static void mall_stats_hw_filter(struct tcf_proto *tp,
        tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false, true);
 
        tcf_exts_stats_update(&head->exts, cls_mall.stats.bytes,
-                             cls_mall.stats.pkts, cls_mall.stats.lastused,
+                             cls_mall.stats.pkts, cls_mall.stats.drops,
+                             cls_mall.stats.lastused,
                              cls_mall.stats.used_hw_stats,
                              cls_mall.stats.used_hw_stats_valid);
 }
index 61e9502..78bec34 100644 (file)
@@ -533,7 +533,7 @@ tcindex_change(struct net *net, struct sk_buff *in_skb,
 
        pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
            "p %p,r %p,*arg %p\n",
-           tp, handle, tca, arg, opt, p, r, arg ? *arg : NULL);
+           tp, handle, tca, arg, opt, p, r, *arg);
 
        if (!opt)
                return 0;
index e15ff33..771b068 100644 (file)
@@ -796,9 +796,7 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
        struct tc_u32_sel *s = &n->sel;
        struct tc_u_knode *new;
 
-       new = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key),
-                     GFP_KERNEL);
-
+       new = kzalloc(struct_size(new, sel.keys, s->nkeys), GFP_KERNEL);
        if (!new)
                return NULL;
 
index b9a94fd..5ea84de 100644 (file)
@@ -40,6 +40,7 @@ struct canid_match {
 
 /**
  * em_canid_get_id() - Extracts Can ID out of the sk_buff structure.
+ * @skb: buffer to extract Can ID from
  */
 static canid_t em_canid_get_id(struct sk_buff *skb)
 {
index df00566..c95cf86 100644 (file)
@@ -59,7 +59,7 @@ static int em_ipset_match(struct sk_buff *skb, struct tcf_ematch *em,
        };
        int ret, network_offset;
 
-       switch (tc_skb_protocol(skb)) {
+       switch (skb_protocol(skb, true)) {
        case htons(ETH_P_IP):
                state.pf = NFPROTO_IPV4;
                if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
index 18755d2..3650117 100644 (file)
@@ -212,7 +212,7 @@ static int em_ipt_match(struct sk_buff *skb, struct tcf_ematch *em,
        struct nf_hook_state state;
        int ret;
 
-       switch (tc_skb_protocol(skb)) {
+       switch (skb_protocol(skb, true)) {
        case htons(ETH_P_IP):
                if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
                        return 0;
index d99966a..4625496 100644 (file)
@@ -195,7 +195,7 @@ META_COLLECTOR(int_priority)
 META_COLLECTOR(int_protocol)
 {
        /* Let userspace take care of the byte ordering */
-       dst->value = tc_skb_protocol(skb);
+       dst->value = skb_protocol(skb, false);
 }
 
 META_COLLECTOR(int_pkttype)
index dd3b8c1..f885bea 100644 (file)
@@ -389,7 +389,6 @@ EXPORT_SYMBOL(tcf_em_tree_validate);
 /**
  * tcf_em_tree_destroy - destroy an ematch tree
  *
- * @tp: classifier kind handle
  * @tree: ematch tree to be deleted
  *
  * This functions destroys an ematch tree previously created by
@@ -425,7 +424,7 @@ EXPORT_SYMBOL(tcf_em_tree_destroy);
  * tcf_em_tree_dump - dump ematch tree into a rtnl message
  *
  * @skb: skb holding the rtnl message
- * @t: ematch tree to be dumped
+ * @tree: ematch tree to be dumped
  * @tlv: TLV type to be used to encapsulate the tree
  *
  * This function dumps a ematch tree into a rtnl message. It is valid to
index 9a3449b..2a76a2f 100644 (file)
@@ -267,7 +267,8 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
            root->handle == handle)
                return root;
 
-       hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle) {
+       hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle,
+                                  lockdep_rtnl_is_held()) {
                if (q->handle == handle)
                        return q;
        }
@@ -1093,8 +1094,7 @@ skip:
                int err;
 
                /* Only support running class lockless if parent is lockless */
-               if (new && (new->flags & TCQ_F_NOLOCK) &&
-                   parent && !(parent->flags & TCQ_F_NOLOCK))
+               if (new && (new->flags & TCQ_F_NOLOCK) && !(parent->flags & TCQ_F_NOLOCK))
                        qdisc_clear_nolock(new);
 
                if (!cops || !cops->graft)
index ee12ca9..1c281cc 100644 (file)
@@ -553,16 +553,16 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt,
        if (!p->link.q)
                p->link.q = &noop_qdisc;
        pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
+       p->link.vcc = NULL;
+       p->link.sock = NULL;
+       p->link.common.classid = sch->handle;
+       p->link.ref = 1;
 
        err = tcf_block_get(&p->link.block, &p->link.filter_list, sch,
                            extack);
        if (err)
                return err;
 
-       p->link.vcc = NULL;
-       p->link.sock = NULL;
-       p->link.common.classid = sch->handle;
-       p->link.ref = 1;
        tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch);
        return 0;
 }
index 60f8ae5..561d20c 100644 (file)
@@ -312,8 +312,8 @@ static const u8 precedence[] = {
 };
 
 static const u8 diffserv8[] = {
-       2, 5, 1, 2, 4, 2, 2, 2,
-       0, 2, 1, 2, 1, 2, 1, 2,
+       2, 0, 1, 2, 4, 2, 2, 2,
+       1, 2, 1, 2, 1, 2, 1, 2,
        5, 2, 4, 2, 4, 2, 4, 2,
        3, 2, 3, 2, 3, 2, 3, 2,
        6, 2, 3, 2, 3, 2, 3, 2,
@@ -323,7 +323,7 @@ static const u8 diffserv8[] = {
 };
 
 static const u8 diffserv4[] = {
-       0, 2, 0, 0, 2, 0, 0, 0,
+       0, 1, 0, 0, 2, 0, 0, 0,
        1, 0, 0, 0, 0, 0, 0, 0,
        2, 0, 2, 0, 2, 0, 2, 0,
        2, 0, 2, 0, 2, 0, 2, 0,
@@ -334,7 +334,7 @@ static const u8 diffserv4[] = {
 };
 
 static const u8 diffserv3[] = {
-       0, 0, 0, 0, 2, 0, 0, 0,
+       0, 1, 0, 0, 2, 0, 0, 0,
        1, 0, 0, 0, 0, 0, 0, 0,
        0, 0, 0, 0, 0, 0, 0, 0,
        0, 0, 0, 0, 0, 0, 0, 0,
@@ -592,7 +592,7 @@ static bool cake_update_flowkeys(struct flow_keys *keys,
        bool rev = !skb->_nfct, upd = false;
        __be32 ip;
 
-       if (tc_skb_protocol(skb) != htons(ETH_P_IP))
+       if (skb_protocol(skb, true) != htons(ETH_P_IP))
                return false;
 
        if (!nf_ct_get_tuple_skb(&tuple, skb))
@@ -1551,32 +1551,51 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
        return idx + (tin << 16);
 }
 
-static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
+static u8 cake_handle_diffserv(struct sk_buff *skb, bool wash)
 {
-       int wlen = skb_network_offset(skb);
+       const int offset = skb_network_offset(skb);
+       u16 *buf, buf_;
        u8 dscp;
 
-       switch (tc_skb_protocol(skb)) {
+       switch (skb_protocol(skb, true)) {
        case htons(ETH_P_IP):
-               wlen += sizeof(struct iphdr);
-               if (!pskb_may_pull(skb, wlen) ||
-                   skb_try_make_writable(skb, wlen))
+               buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
+               if (unlikely(!buf))
                        return 0;
 
-               dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
-               if (wash && dscp)
+               /* ToS is in the second byte of iphdr */
+               dscp = ipv4_get_dsfield((struct iphdr *)buf) >> 2;
+
+               if (wash && dscp) {
+                       const int wlen = offset + sizeof(struct iphdr);
+
+                       if (!pskb_may_pull(skb, wlen) ||
+                           skb_try_make_writable(skb, wlen))
+                               return 0;
+
                        ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
+               }
+
                return dscp;
 
        case htons(ETH_P_IPV6):
-               wlen += sizeof(struct ipv6hdr);
-               if (!pskb_may_pull(skb, wlen) ||
-                   skb_try_make_writable(skb, wlen))
+               buf = skb_header_pointer(skb, offset, sizeof(buf_), &buf_);
+               if (unlikely(!buf))
                        return 0;
 
-               dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
-               if (wash && dscp)
+               /* Traffic class is in the first and second bytes of ipv6hdr */
+               dscp = ipv6_get_dsfield((struct ipv6hdr *)buf) >> 2;
+
+               if (wash && dscp) {
+                       const int wlen = offset + sizeof(struct ipv6hdr);
+
+                       if (!pskb_may_pull(skb, wlen) ||
+                           skb_try_make_writable(skb, wlen))
+                               return 0;
+
                        ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
+               }
+
                return dscp;
 
        case htons(ETH_P_ARP):
@@ -1593,14 +1612,17 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
 {
        struct cake_sched_data *q = qdisc_priv(sch);
        u32 tin, mark;
+       bool wash;
        u8 dscp;
 
        /* Tin selection: Default to diffserv-based selection, allow overriding
-        * using firewall marks or skb->priority.
+        * using firewall marks or skb->priority. Call DSCP parsing early if
+        * wash is enabled, otherwise defer to below to skip unneeded parsing.
         */
-       dscp = cake_handle_diffserv(skb,
-                                   q->rate_flags & CAKE_FLAG_WASH);
        mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft;
+       wash = !!(q->rate_flags & CAKE_FLAG_WASH);
+       if (wash)
+               dscp = cake_handle_diffserv(skb, wash);
 
        if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
                tin = 0;
@@ -1614,6 +1636,8 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
                tin = q->tin_order[TC_H_MIN(skb->priority) - 1];
 
        else {
+               if (!wash)
+                       dscp = cake_handle_diffserv(skb, wash);
                tin = q->tin_index[dscp];
 
                if (unlikely(tin >= q->tin_cnt))
@@ -2691,7 +2715,7 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
        qdisc_watchdog_init(&q->watchdog, sch);
 
        if (opt) {
-               int err = cake_change(sch, opt, extack);
+               err = cake_change(sch, opt, extack);
 
                if (err)
                        return err;
@@ -3008,7 +3032,7 @@ static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl,
                        PUT_STAT_S32(BLUE_TIMER_US,
                                     ktime_to_us(
                                             ktime_sub(now,
-                                                    flow->cvars.blue_timer)));
+                                                      flow->cvars.blue_timer)));
                }
                if (flow->cvars.dropping) {
                        PUT_STAT_S32(DROP_NEXT_US,
index 39b427d..b2130df 100644 (file)
@@ -250,7 +250,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
                case TC_ACT_STOLEN:
                case TC_ACT_TRAP:
                        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
-                       /* fall through */
+                       fallthrough;
                case TC_ACT_SHOT:
                        return NULL;
                case TC_ACT_RECLASSIFY:
index 07a2b0b..dde5646 100644 (file)
@@ -324,7 +324,7 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
                case TC_ACT_STOLEN:
                case TC_ACT_TRAP:
                        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
-                       /* fall through */
+                       fallthrough;
                case TC_ACT_SHOT:
                        return NULL;
                }
index 05605b3..2b88710 100644 (file)
@@ -210,7 +210,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
        if (p->set_tc_index) {
                int wlen = skb_network_offset(skb);
 
-               switch (tc_skb_protocol(skb)) {
+               switch (skb_protocol(skb, true)) {
                case htons(ETH_P_IP):
                        wlen += sizeof(struct iphdr);
                        if (!pskb_may_pull(skb, wlen) ||
@@ -303,7 +303,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
        index = skb->tc_index & (p->indices - 1);
        pr_debug("index %d->%d\n", skb->tc_index, index);
 
-       switch (tc_skb_protocol(skb)) {
+       switch (skb_protocol(skb, true)) {
        case htons(ETH_P_IP):
                ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask,
                                    p->mv[index].value);
@@ -320,7 +320,7 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
                 */
                if (p->mv[index].mask != 0xff || p->mv[index].value)
                        pr_warn("%s: unsupported protocol %d\n",
-                               __func__, ntohs(tc_skb_protocol(skb)));
+                               __func__, ntohs(skb_protocol(skb, true)));
                break;
        }
 
index a87e915..c1e84d1 100644 (file)
@@ -397,7 +397,7 @@ static struct ets_class *ets_classify(struct sk_buff *skb, struct Qdisc *sch,
                case TC_ACT_QUEUED:
                case TC_ACT_TRAP:
                        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
-                       /* fall through */
+                       fallthrough;
                case TC_ACT_SHOT:
                        return NULL;
                }
index 8f06a80..2fb76fc 100644 (file)
@@ -1075,3 +1075,4 @@ module_init(fq_module_init)
 module_exit(fq_module_exit)
 MODULE_AUTHOR("Eric Dumazet");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Fair Queue Packet Scheduler");
index 436160b..3106653 100644 (file)
@@ -99,7 +99,7 @@ static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
                case TC_ACT_QUEUED:
                case TC_ACT_TRAP:
                        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
-                       /* fall through */
+                       fallthrough;
                case TC_ACT_SHOT:
                        return 0;
                }
@@ -721,3 +721,4 @@ module_init(fq_codel_module_init)
 module_exit(fq_codel_module_exit)
 MODULE_AUTHOR("Eric Dumazet");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Fair Queue CoDel discipline");
index fb760ce..f98c740 100644 (file)
@@ -102,7 +102,7 @@ static unsigned int fq_pie_classify(struct sk_buff *skb, struct Qdisc *sch,
                case TC_ACT_QUEUED:
                case TC_ACT_TRAP:
                        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
-                       /* fall through */
+                       fallthrough;
                case TC_ACT_SHOT:
                        return 0;
                }
index 433f219..0f5f121 100644 (file)
@@ -1136,7 +1136,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
                case TC_ACT_STOLEN:
                case TC_ACT_TRAP:
                        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
-                       /* fall through */
+                       fallthrough;
                case TC_ACT_SHOT:
                        return NULL;
                }
index be35f03..420ede8 100644 (file)
@@ -721,3 +721,4 @@ module_exit(hhf_module_exit)
 MODULE_AUTHOR("Terry Lam");
 MODULE_AUTHOR("Nandita Dukkipati");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Heavy-Hitter Filter (HHF)");
index 8184c87..ba37def 100644 (file)
@@ -239,7 +239,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
                case TC_ACT_STOLEN:
                case TC_ACT_TRAP:
                        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
-                       /* fall through */
+                       fallthrough;
                case TC_ACT_SHOT:
                        return NULL;
                }
index 1330ad2..5c27b42 100644 (file)
@@ -43,7 +43,7 @@ multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
        case TC_ACT_QUEUED:
        case TC_ACT_TRAP:
                *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
-               /* fall through */
+               fallthrough;
        case TC_ACT_SHOT:
                return NULL;
        }
index 6479417..3eabb87 100644 (file)
@@ -46,7 +46,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
                case TC_ACT_QUEUED:
                case TC_ACT_TRAP:
                        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
-                       /* fall through */
+                       fallthrough;
                case TC_ACT_SHOT:
                        return NULL;
                }
index 0b05ac7..6335230 100644 (file)
@@ -699,7 +699,7 @@ static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch,
                case TC_ACT_STOLEN:
                case TC_ACT_TRAP:
                        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
-                       /* fall through */
+                       fallthrough;
                case TC_ACT_SHOT:
                        return NULL;
                }
index 555a1b9..4cc0ad0 100644 (file)
@@ -46,6 +46,8 @@ struct red_sched_data {
        struct red_vars         vars;
        struct red_stats        stats;
        struct Qdisc            *qdisc;
+       struct tcf_qevent       qe_early_drop;
+       struct tcf_qevent       qe_mark;
 };
 
 #define TC_RED_SUPPORTED_FLAGS (TC_RED_HISTORIC_FLAGS | TC_RED_NODROP)
@@ -92,6 +94,9 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 
                if (INET_ECN_set_ce(skb)) {
                        q->stats.prob_mark++;
+                       skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
+                       if (!skb)
+                               return NET_XMIT_CN | ret;
                } else if (!red_use_nodrop(q)) {
                        q->stats.prob_drop++;
                        goto congestion_drop;
@@ -109,6 +114,9 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 
                if (INET_ECN_set_ce(skb)) {
                        q->stats.forced_mark++;
+                       skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
+                       if (!skb)
+                               return NET_XMIT_CN | ret;
                } else if (!red_use_nodrop(q)) {
                        q->stats.forced_drop++;
                        goto congestion_drop;
@@ -129,6 +137,10 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
        return ret;
 
 congestion_drop:
+       skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, to_free, &ret);
+       if (!skb)
+               return NET_XMIT_CN | ret;
+
        qdisc_drop(skb, sch, to_free);
        return NET_XMIT_CN;
 }
@@ -202,6 +214,8 @@ static void red_destroy(struct Qdisc *sch)
 {
        struct red_sched_data *q = qdisc_priv(sch);
 
+       tcf_qevent_destroy(&q->qe_mark, sch);
+       tcf_qevent_destroy(&q->qe_early_drop, sch);
        del_timer_sync(&q->adapt_timer);
        red_offload(sch, false);
        qdisc_put(q->qdisc);
@@ -213,14 +227,15 @@ static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
        [TCA_RED_STAB]  = { .len = RED_STAB_SIZE },
        [TCA_RED_MAX_P] = { .type = NLA_U32 },
        [TCA_RED_FLAGS] = NLA_POLICY_BITFIELD32(TC_RED_SUPPORTED_FLAGS),
+       [TCA_RED_EARLY_DROP_BLOCK] = { .type = NLA_U32 },
+       [TCA_RED_MARK_BLOCK] = { .type = NLA_U32 },
 };
 
-static int red_change(struct Qdisc *sch, struct nlattr *opt,
-                     struct netlink_ext_ack *extack)
+static int __red_change(struct Qdisc *sch, struct nlattr **tb,
+                       struct netlink_ext_ack *extack)
 {
        struct Qdisc *old_child = NULL, *child = NULL;
        struct red_sched_data *q = qdisc_priv(sch);
-       struct nlattr *tb[TCA_RED_MAX + 1];
        struct nla_bitfield32 flags_bf;
        struct tc_red_qopt *ctl;
        unsigned char userbits;
@@ -228,14 +243,6 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
        int err;
        u32 max_P;
 
-       if (opt == NULL)
-               return -EINVAL;
-
-       err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
-                                         NULL);
-       if (err < 0)
-               return err;
-
        if (tb[TCA_RED_PARMS] == NULL ||
            tb[TCA_RED_STAB] == NULL)
                return -EINVAL;
@@ -323,11 +330,74 @@ static int red_init(struct Qdisc *sch, struct nlattr *opt,
                    struct netlink_ext_ack *extack)
 {
        struct red_sched_data *q = qdisc_priv(sch);
+       struct nlattr *tb[TCA_RED_MAX + 1];
+       int err;
+
+       if (!opt)
+               return -EINVAL;
+
+       err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
+                                         extack);
+       if (err < 0)
+               return err;
 
        q->qdisc = &noop_qdisc;
        q->sch = sch;
        timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
-       return red_change(sch, opt, extack);
+
+       err = __red_change(sch, tb, extack);
+       if (err)
+               return err;
+
+       err = tcf_qevent_init(&q->qe_early_drop, sch,
+                             FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
+                             tb[TCA_RED_EARLY_DROP_BLOCK], extack);
+       if (err)
+               goto err_early_drop_init;
+
+       err = tcf_qevent_init(&q->qe_mark, sch,
+                             FLOW_BLOCK_BINDER_TYPE_RED_MARK,
+                             tb[TCA_RED_MARK_BLOCK], extack);
+       if (err)
+               goto err_mark_init;
+
+       return 0;
+
+err_mark_init:
+       tcf_qevent_destroy(&q->qe_early_drop, sch);
+err_early_drop_init:
+       del_timer_sync(&q->adapt_timer);
+       red_offload(sch, false);
+       qdisc_put(q->qdisc);
+       return err;
+}
+
+static int red_change(struct Qdisc *sch, struct nlattr *opt,
+                     struct netlink_ext_ack *extack)
+{
+       struct red_sched_data *q = qdisc_priv(sch);
+       struct nlattr *tb[TCA_RED_MAX + 1];
+       int err;
+
+       if (!opt)
+               return -EINVAL;
+
+       err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
+                                         extack);
+       if (err < 0)
+               return err;
+
+       err = tcf_qevent_validate_change(&q->qe_early_drop,
+                                        tb[TCA_RED_EARLY_DROP_BLOCK], extack);
+       if (err)
+               return err;
+
+       err = tcf_qevent_validate_change(&q->qe_mark,
+                                        tb[TCA_RED_MARK_BLOCK], extack);
+       if (err)
+               return err;
+
+       return __red_change(sch, tb, extack);
 }
 
 static int red_dump_offload_stats(struct Qdisc *sch)
@@ -371,7 +441,9 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
        if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
            nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P) ||
            nla_put_bitfield32(skb, TCA_RED_FLAGS,
-                              q->flags, TC_RED_SUPPORTED_FLAGS))
+                              q->flags, TC_RED_SUPPORTED_FLAGS) ||
+           tcf_qevent_dump(skb, TCA_RED_MARK_BLOCK, &q->qe_mark) ||
+           tcf_qevent_dump(skb, TCA_RED_EARLY_DROP_BLOCK, &q->qe_early_drop))
                goto nla_put_failure;
        return nla_nest_end(skb, opts);
 
index 4074c50..da047a3 100644 (file)
@@ -265,7 +265,7 @@ static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl,
                case TC_ACT_QUEUED:
                case TC_ACT_TRAP:
                        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
-                       /* fall through */
+                       fallthrough;
                case TC_ACT_SHOT:
                        return false;
                }
index 5a6def5..cae5dbb 100644 (file)
@@ -186,7 +186,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
                case TC_ACT_QUEUED:
                case TC_ACT_TRAP:
                        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
-                       /* fall through */
+                       fallthrough;
                case TC_ACT_SHOT:
                        return 0;
                }
index b1eb12d..e981992 100644 (file)
@@ -1108,11 +1108,10 @@ static void setup_txtime(struct taprio_sched *q,
 
 static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries)
 {
-       size_t size = sizeof(struct tc_taprio_sched_entry) * num_entries +
-                     sizeof(struct __tc_taprio_qopt_offload);
        struct __tc_taprio_qopt_offload *__offload;
 
-       __offload = kzalloc(size, GFP_KERNEL);
+       __offload = kzalloc(struct_size(__offload, offload.entries, num_entries),
+                           GFP_KERNEL);
        if (!__offload)
                return NULL;
 
index 689ef6f..2f1f0a3 100644 (file)
@@ -239,7 +239,7 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
                char haddr[MAX_ADDR_LEN];
 
                neigh_ha_snapshot(haddr, n, dev);
-               err = dev_hard_header(skb, dev, ntohs(tc_skb_protocol(skb)),
+               err = dev_hard_header(skb, dev, ntohs(skb_protocol(skb, false)),
                                      haddr, NULL, skb->len);
 
                if (err < 0)
index 7231513..8d73546 100644 (file)
@@ -1565,12 +1565,15 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
                                     enum sctp_scope scope, gfp_t gfp)
 {
+       struct sock *sk = asoc->base.sk;
        int flags;
 
        /* Use scoping rules to determine the subset of addresses from
         * the endpoint.
         */
-       flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
+       flags = (PF_INET6 == sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
+       if (!inet_v6_ipv6only(sk))
+               flags |= SCTP_ADDR4_ALLOWED;
        if (asoc->peer.ipv4_address)
                flags |= SCTP_ADDR4_PEERSUPP;
        if (asoc->peer.ipv6_address)
index 53bc615..701c5a4 100644 (file)
@@ -461,6 +461,7 @@ static int sctp_copy_one_addr(struct net *net, struct sctp_bind_addr *dest,
                 * well as the remote peer.
                 */
                if ((((AF_INET == addr->sa.sa_family) &&
+                     (flags & SCTP_ADDR4_ALLOWED) &&
                      (flags & SCTP_ADDR4_PEERSUPP))) ||
                    (((AF_INET6 == addr->sa.sa_family) &&
                      (flags & SCTP_ADDR6_ALLOWED) &&
index ccfa0ab..aea2a98 100644 (file)
@@ -1033,8 +1033,6 @@ static const struct proto_ops inet6_seqpacket_ops = {
        .mmap              = sock_no_mmap,
 #ifdef CONFIG_COMPAT
        .compat_ioctl      = inet6_compat_ioctl,
-       .compat_setsockopt = compat_sock_common_setsockopt,
-       .compat_getsockopt = compat_sock_common_getsockopt,
 #endif
 };
 
@@ -1089,10 +1087,6 @@ static struct sctp_af sctp_af_inet6 = {
        .net_header_len    = sizeof(struct ipv6hdr),
        .sockaddr_len      = sizeof(struct sockaddr_in6),
        .ip_options_len    = sctp_v6_ip_options_len,
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_ipv6_setsockopt,
-       .compat_getsockopt = compat_ipv6_getsockopt,
-#endif
 };
 
 static struct sctp_pf sctp_pf_inet6 = {
index 092d1af..d19db22 100644 (file)
@@ -148,7 +148,8 @@ int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp,
                 * sock as well as the remote peer.
                 */
                if (addr->a.sa.sa_family == AF_INET &&
-                   !(copy_flags & SCTP_ADDR4_PEERSUPP))
+                   (!(copy_flags & SCTP_ADDR4_ALLOWED) ||
+                    !(copy_flags & SCTP_ADDR4_PEERSUPP)))
                        continue;
                if (addr->a.sa.sa_family == AF_INET6 &&
                    (!(copy_flags & SCTP_ADDR6_ALLOWED) ||
@@ -1035,10 +1036,6 @@ static const struct proto_ops inet_seqpacket_ops = {
        .recvmsg           = inet_recvmsg,
        .mmap              = sock_no_mmap,
        .sendpage          = sock_no_sendpage,
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_sock_common_setsockopt,
-       .compat_getsockopt = compat_sock_common_getsockopt,
-#endif
 };
 
 /* Registration with AF_INET family.  */
@@ -1092,10 +1089,6 @@ static struct sctp_af sctp_af_inet = {
        .net_header_len    = sizeof(struct iphdr),
        .sockaddr_len      = sizeof(struct sockaddr_in),
        .ip_options_len    = sctp_v4_ip_options_len,
-#ifdef CONFIG_COMPAT
-       .compat_setsockopt = compat_ip_setsockopt,
-       .compat_getsockopt = compat_ip_getsockopt,
-#endif
 };
 
 struct sctp_pf *sctp_get_pf_specific(sa_family_t family)
@@ -1374,15 +1367,15 @@ static struct pernet_operations sctp_ctrlsock_ops = {
 /* Initialize the universe into something sensible.  */
 static __init int sctp_init(void)
 {
-       int i;
-       int status = -EINVAL;
-       unsigned long goal;
-       unsigned long limit;
        unsigned long nr_pages = totalram_pages();
+       unsigned long limit;
+       unsigned long goal;
+       int max_entry_order;
+       int num_entries;
        int max_share;
+       int status;
        int order;
-       int num_entries;
-       int max_entry_order;
+       int i;
 
        sock_skb_cb_check_size(sizeof(struct sctp_ulpevent));
 
index d57e1a0..ec1fba1 100644 (file)
@@ -979,9 +979,8 @@ int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw)
  *
  * Returns 0 if ok, <0 errno code on error.
  */
-static int sctp_setsockopt_bindx_kernel(struct sock *sk,
-                                       struct sockaddr *addrs, int addrs_size,
-                                       int op)
+static int sctp_setsockopt_bindx(struct sock *sk, struct sockaddr *addrs,
+                                int addrs_size, int op)
 {
        int err;
        int addrcnt = 0;
@@ -991,7 +990,7 @@ static int sctp_setsockopt_bindx_kernel(struct sock *sk,
        struct sctp_af *af;
 
        pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n",
-                __func__, sk, addrs, addrs_size, op);
+                __func__, sk, addr_buf, addrs_size, op);
 
        if (unlikely(addrs_size <= 0))
                return -EINVAL;
@@ -1037,29 +1036,13 @@ static int sctp_setsockopt_bindx_kernel(struct sock *sk,
        }
 }
 
-static int sctp_setsockopt_bindx(struct sock *sk,
-                                struct sockaddr __user *addrs,
-                                int addrs_size, int op)
-{
-       struct sockaddr *kaddrs;
-       int err;
-
-       kaddrs = memdup_user(addrs, addrs_size);
-       if (IS_ERR(kaddrs))
-               return PTR_ERR(kaddrs);
-       err = sctp_setsockopt_bindx_kernel(sk, kaddrs, addrs_size, op);
-       kfree(kaddrs);
-       return err;
-}
-
 static int sctp_bind_add(struct sock *sk, struct sockaddr *addrs,
                int addrlen)
 {
        int err;
 
        lock_sock(sk);
-       err = sctp_setsockopt_bindx_kernel(sk, addrs, addrlen,
-                                          SCTP_BINDX_ADD_ADDR);
+       err = sctp_setsockopt_bindx(sk, addrs, addrlen, SCTP_BINDX_ADD_ADDR);
        release_sock(sk);
        return err;
 }
@@ -1303,36 +1286,29 @@ out_free:
  * it.
  *
  * sk        The sk of the socket
- * addrs     The pointer to the addresses in user land
+ * addrs     The pointer to the addresses
  * addrssize Size of the addrs buffer
  *
  * Returns >=0 if ok, <0 errno code on error.
  */
-static int __sctp_setsockopt_connectx(struct sock *sk,
-                                     struct sockaddr __user *addrs,
-                                     int addrs_size,
-                                     sctp_assoc_t *assoc_id)
+static int __sctp_setsockopt_connectx(struct sock *sk, struct sockaddr *kaddrs,
+                                     int addrs_size, sctp_assoc_t *assoc_id)
 {
-       struct sockaddr *kaddrs;
        int err = 0, flags = 0;
 
        pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
-                __func__, sk, addrs, addrs_size);
+                __func__, sk, kaddrs, addrs_size);
 
        /* make sure the 1st addr's sa_family is accessible later */
        if (unlikely(addrs_size < sizeof(sa_family_t)))
                return -EINVAL;
 
-       kaddrs = memdup_user(addrs, addrs_size);
-       if (IS_ERR(kaddrs))
-               return PTR_ERR(kaddrs);
-
        /* Allow security module to validate connectx addresses. */
        err = security_sctp_bind_connect(sk, SCTP_SOCKOPT_CONNECTX,
                                         (struct sockaddr *)kaddrs,
                                          addrs_size);
        if (err)
-               goto out_free;
+               return err;
 
        /* in-kernel sockets don't generally have a file allocated to them
         * if all they do is call sock_create_kern().
@@ -1340,12 +1316,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
        if (sk->sk_socket->file)
                flags = sk->sk_socket->file->f_flags;
 
-       err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id);
-
-out_free:
-       kfree(kaddrs);
-
-       return err;
+       return __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id);
 }
 
 /*
@@ -1353,10 +1324,10 @@ out_free:
  * to the option that doesn't provide association id.
  */
 static int sctp_setsockopt_connectx_old(struct sock *sk,
-                                       struct sockaddr __user *addrs,
+                                       struct sockaddr *kaddrs,
                                        int addrs_size)
 {
-       return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL);
+       return __sctp_setsockopt_connectx(sk, kaddrs, addrs_size, NULL);
 }
 
 /*
@@ -1366,13 +1337,13 @@ static int sctp_setsockopt_connectx_old(struct sock *sk,
  * always positive.
  */
 static int sctp_setsockopt_connectx(struct sock *sk,
-                                   struct sockaddr __user *addrs,
+                                   struct sockaddr *kaddrs,
                                    int addrs_size)
 {
        sctp_assoc_t assoc_id = 0;
        int err = 0;
 
-       err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id);
+       err = __sctp_setsockopt_connectx(sk, kaddrs, addrs_size, &assoc_id);
 
        if (err)
                return err;
@@ -1402,6 +1373,7 @@ static int sctp_getsockopt_connectx3(struct sock *sk, int len,
 {
        struct sctp_getaddrs_old param;
        sctp_assoc_t assoc_id = 0;
+       struct sockaddr *kaddrs;
        int err = 0;
 
 #ifdef CONFIG_COMPAT
@@ -1425,9 +1397,12 @@ static int sctp_getsockopt_connectx3(struct sock *sk, int len,
                        return -EFAULT;
        }
 
-       err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *)
-                                        param.addrs, param.addr_num,
-                                        &assoc_id);
+       kaddrs = memdup_user(param.addrs, param.addr_num);
+       if (IS_ERR(kaddrs))
+               return PTR_ERR(kaddrs);
+
+       err = __sctp_setsockopt_connectx(sk, kaddrs, param.addr_num, &assoc_id);
+       kfree(kaddrs);
        if (err == 0 || err == -EINPROGRESS) {
                if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
                        return -EFAULT;
@@ -2209,28 +2184,18 @@ out:
  * exceeds the current PMTU size, the message will NOT be sent and
  * instead a error will be indicated to the user.
  */
-static int sctp_setsockopt_disable_fragments(struct sock *sk,
-                                            char __user *optval,
+static int sctp_setsockopt_disable_fragments(struct sock *sk, int *val,
                                             unsigned int optlen)
 {
-       int val;
-
        if (optlen < sizeof(int))
                return -EINVAL;
-
-       if (get_user(val, (int __user *)optval))
-               return -EFAULT;
-
-       sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1;
-
+       sctp_sk(sk)->disable_fragments = (*val == 0) ? 0 : 1;
        return 0;
 }
 
-static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
+static int sctp_setsockopt_events(struct sock *sk, __u8 *sn_type,
                                  unsigned int optlen)
 {
-       struct sctp_event_subscribe subscribe;
-       __u8 *sn_type = (__u8 *)&subscribe;
        struct sctp_sock *sp = sctp_sk(sk);
        struct sctp_association *asoc;
        int i;
@@ -2238,9 +2203,6 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
        if (optlen > sizeof(struct sctp_event_subscribe))
                return -EINVAL;
 
-       if (copy_from_user(&subscribe, optval, optlen))
-               return -EFAULT;
-
        for (i = 0; i < optlen; i++)
                sctp_ulpevent_type_set(&sp->subscribe, SCTP_SN_TYPE_BASE + i,
                                       sn_type[i]);
@@ -2280,7 +2242,7 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
  * integer defining the number of seconds of idle time before an
  * association is closed.
  */
-static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
+static int sctp_setsockopt_autoclose(struct sock *sk, u32 *optval,
                                     unsigned int optlen)
 {
        struct sctp_sock *sp = sctp_sk(sk);
@@ -2291,9 +2253,8 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
                return -EOPNOTSUPP;
        if (optlen != sizeof(int))
                return -EINVAL;
-       if (copy_from_user(&sp->autoclose, optval, optlen))
-               return -EFAULT;
 
+       sp->autoclose = *optval;
        if (sp->autoclose > net->sctp.max_autoclose)
                sp->autoclose = net->sctp.max_autoclose;
 
@@ -2628,48 +2589,42 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
 }
 
 static int sctp_setsockopt_peer_addr_params(struct sock *sk,
-                                           char __user *optval,
+                                           struct sctp_paddrparams *params,
                                            unsigned int optlen)
 {
-       struct sctp_paddrparams  params;
        struct sctp_transport   *trans = NULL;
        struct sctp_association *asoc = NULL;
        struct sctp_sock        *sp = sctp_sk(sk);
        int error;
        int hb_change, pmtud_change, sackdelay_change;
 
-       if (optlen == sizeof(params)) {
-               if (copy_from_user(&params, optval, optlen))
-                       return -EFAULT;
-       } else if (optlen == ALIGN(offsetof(struct sctp_paddrparams,
+       if (optlen == ALIGN(offsetof(struct sctp_paddrparams,
                                            spp_ipv6_flowlabel), 4)) {
-               if (copy_from_user(&params, optval, optlen))
-                       return -EFAULT;
-               if (params.spp_flags & (SPP_DSCP | SPP_IPV6_FLOWLABEL))
+               if (params->spp_flags & (SPP_DSCP | SPP_IPV6_FLOWLABEL))
                        return -EINVAL;
-       } else {
+       } else if (optlen != sizeof(*params)) {
                return -EINVAL;
        }
 
        /* Validate flags and value parameters. */
-       hb_change        = params.spp_flags & SPP_HB;
-       pmtud_change     = params.spp_flags & SPP_PMTUD;
-       sackdelay_change = params.spp_flags & SPP_SACKDELAY;
+       hb_change        = params->spp_flags & SPP_HB;
+       pmtud_change     = params->spp_flags & SPP_PMTUD;
+       sackdelay_change = params->spp_flags & SPP_SACKDELAY;
 
        if (hb_change        == SPP_HB ||
            pmtud_change     == SPP_PMTUD ||
            sackdelay_change == SPP_SACKDELAY ||
-           params.spp_sackdelay > 500 ||
-           (params.spp_pathmtu &&
-            params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT))
+           params->spp_sackdelay > 500 ||
+           (params->spp_pathmtu &&
+            params->spp_pathmtu < SCTP_DEFAULT_MINSEGMENT))
                return -EINVAL;
 
        /* If an address other than INADDR_ANY is specified, and
         * no transport is found, then the request is invalid.
         */
-       if (!sctp_is_any(sk, (union sctp_addr *)&params.spp_address)) {
-               trans = sctp_addr_id2transport(sk, &params.spp_address,
-                                              params.spp_assoc_id);
+       if (!sctp_is_any(sk, (union sctp_addr *)&params->spp_address)) {
+               trans = sctp_addr_id2transport(sk, &params->spp_address,
+                                              params->spp_assoc_id);
                if (!trans)
                        return -EINVAL;
        }
@@ -2678,19 +2633,19 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
         * socket is a one to many style socket, and an association
         * was not found, then the id was invalid.
         */
-       asoc = sctp_id2assoc(sk, params.spp_assoc_id);
-       if (!asoc && params.spp_assoc_id != SCTP_FUTURE_ASSOC &&
+       asoc = sctp_id2assoc(sk, params->spp_assoc_id);
+       if (!asoc && params->spp_assoc_id != SCTP_FUTURE_ASSOC &&
            sctp_style(sk, UDP))
                return -EINVAL;
 
        /* Heartbeat demand can only be sent on a transport or
         * association, but not a socket.
         */
-       if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc)
+       if (params->spp_flags & SPP_HB_DEMAND && !trans && !asoc)
                return -EINVAL;
 
        /* Process parameters. */
-       error = sctp_apply_peer_addr_params(&params, trans, asoc, sp,
+       error = sctp_apply_peer_addr_params(params, trans, asoc, sp,
                                            hb_change, pmtud_change,
                                            sackdelay_change);
 
@@ -2703,7 +2658,7 @@ static int sctp_setsockopt_peer_addr_params(struct sock *sk,
        if (!trans && asoc) {
                list_for_each_entry(trans, &asoc->peer.transport_addr_list,
                                transports) {
-                       sctp_apply_peer_addr_params(&params, trans, asoc, sp,
+                       sctp_apply_peer_addr_params(params, trans, asoc, sp,
                                                    hb_change, pmtud_change,
                                                    sackdelay_change);
                }
@@ -2794,83 +2749,86 @@ static void sctp_apply_asoc_delayed_ack(struct sctp_sack_info *params,
  *    timer to expire.  The default value for this is 2, setting this
  *    value to 1 will disable the delayed sack algorithm.
  */
-
-static int sctp_setsockopt_delayed_ack(struct sock *sk,
-                                      char __user *optval, unsigned int optlen)
+static int __sctp_setsockopt_delayed_ack(struct sock *sk,
+                                        struct sctp_sack_info *params)
 {
        struct sctp_sock *sp = sctp_sk(sk);
        struct sctp_association *asoc;
-       struct sctp_sack_info params;
-
-       if (optlen == sizeof(struct sctp_sack_info)) {
-               if (copy_from_user(&params, optval, optlen))
-                       return -EFAULT;
-
-               if (params.sack_delay == 0 && params.sack_freq == 0)
-                       return 0;
-       } else if (optlen == sizeof(struct sctp_assoc_value)) {
-               pr_warn_ratelimited(DEPRECATED
-                                   "%s (pid %d) "
-                                   "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
-                                   "Use struct sctp_sack_info instead\n",
-                                   current->comm, task_pid_nr(current));
-               if (copy_from_user(&params, optval, optlen))
-                       return -EFAULT;
-
-               if (params.sack_delay == 0)
-                       params.sack_freq = 1;
-               else
-                       params.sack_freq = 0;
-       } else
-               return -EINVAL;
 
        /* Validate value parameter. */
-       if (params.sack_delay > 500)
+       if (params->sack_delay > 500)
                return -EINVAL;
 
        /* Get association, if sack_assoc_id != SCTP_FUTURE_ASSOC and the
         * socket is a one to many style socket, and an association
         * was not found, then the id was invalid.
         */
-       asoc = sctp_id2assoc(sk, params.sack_assoc_id);
-       if (!asoc && params.sack_assoc_id > SCTP_ALL_ASSOC &&
+       asoc = sctp_id2assoc(sk, params->sack_assoc_id);
+       if (!asoc && params->sack_assoc_id > SCTP_ALL_ASSOC &&
            sctp_style(sk, UDP))
                return -EINVAL;
 
        if (asoc) {
-               sctp_apply_asoc_delayed_ack(&params, asoc);
+               sctp_apply_asoc_delayed_ack(params, asoc);
 
                return 0;
        }
 
        if (sctp_style(sk, TCP))
-               params.sack_assoc_id = SCTP_FUTURE_ASSOC;
+               params->sack_assoc_id = SCTP_FUTURE_ASSOC;
 
-       if (params.sack_assoc_id == SCTP_FUTURE_ASSOC ||
-           params.sack_assoc_id == SCTP_ALL_ASSOC) {
-               if (params.sack_delay) {
-                       sp->sackdelay = params.sack_delay;
+       if (params->sack_assoc_id == SCTP_FUTURE_ASSOC ||
+           params->sack_assoc_id == SCTP_ALL_ASSOC) {
+               if (params->sack_delay) {
+                       sp->sackdelay = params->sack_delay;
                        sp->param_flags =
                                sctp_spp_sackdelay_enable(sp->param_flags);
                }
-               if (params.sack_freq == 1) {
+               if (params->sack_freq == 1) {
                        sp->param_flags =
                                sctp_spp_sackdelay_disable(sp->param_flags);
-               } else if (params.sack_freq > 1) {
-                       sp->sackfreq = params.sack_freq;
+               } else if (params->sack_freq > 1) {
+                       sp->sackfreq = params->sack_freq;
                        sp->param_flags =
                                sctp_spp_sackdelay_enable(sp->param_flags);
                }
        }
 
-       if (params.sack_assoc_id == SCTP_CURRENT_ASSOC ||
-           params.sack_assoc_id == SCTP_ALL_ASSOC)
+       if (params->sack_assoc_id == SCTP_CURRENT_ASSOC ||
+           params->sack_assoc_id == SCTP_ALL_ASSOC)
                list_for_each_entry(asoc, &sp->ep->asocs, asocs)
-                       sctp_apply_asoc_delayed_ack(&params, asoc);
+                       sctp_apply_asoc_delayed_ack(params, asoc);
 
        return 0;
 }
 
+static int sctp_setsockopt_delayed_ack(struct sock *sk,
+                                      struct sctp_sack_info *params,
+                                      unsigned int optlen)
+{
+       if (optlen == sizeof(struct sctp_assoc_value)) {
+               struct sctp_assoc_value *v = (struct sctp_assoc_value *)params;
+               struct sctp_sack_info p;
+
+               pr_warn_ratelimited(DEPRECATED
+                                   "%s (pid %d) "
+                                   "Use of struct sctp_assoc_value in delayed_ack socket option.\n"
+                                   "Use struct sctp_sack_info instead\n",
+                                   current->comm, task_pid_nr(current));
+
+               p.sack_assoc_id = v->assoc_id;
+               p.sack_delay = v->assoc_value;
+               p.sack_freq = v->assoc_value ? 0 : 1;
+               return __sctp_setsockopt_delayed_ack(sk, &p);
+       }
+
+       if (optlen != sizeof(struct sctp_sack_info))
+               return -EINVAL;
+       if (params->sack_delay == 0 && params->sack_freq == 0)
+               return 0;
+       return __sctp_setsockopt_delayed_ack(sk, params);
+}
+
 /* 7.1.3 Initialization Parameters (SCTP_INITMSG)
  *
  * Applications can specify protocol parameters for the default association
@@ -2882,24 +2840,22 @@ static int sctp_setsockopt_delayed_ack(struct sock *sk,
  * by the change).  With TCP-style sockets, this option is inherited by
  * sockets derived from a listener socket.
  */
-static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen)
+static int sctp_setsockopt_initmsg(struct sock *sk, struct sctp_initmsg *sinit,
+                                  unsigned int optlen)
 {
-       struct sctp_initmsg sinit;
        struct sctp_sock *sp = sctp_sk(sk);
 
        if (optlen != sizeof(struct sctp_initmsg))
                return -EINVAL;
-       if (copy_from_user(&sinit, optval, optlen))
-               return -EFAULT;
 
-       if (sinit.sinit_num_ostreams)
-               sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams;
-       if (sinit.sinit_max_instreams)
-               sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams;
-       if (sinit.sinit_max_attempts)
-               sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts;
-       if (sinit.sinit_max_init_timeo)
-               sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo;
+       if (sinit->sinit_num_ostreams)
+               sp->initmsg.sinit_num_ostreams = sinit->sinit_num_ostreams;
+       if (sinit->sinit_max_instreams)
+               sp->initmsg.sinit_max_instreams = sinit->sinit_max_instreams;
+       if (sinit->sinit_max_attempts)
+               sp->initmsg.sinit_max_attempts = sinit->sinit_max_attempts;
+       if (sinit->sinit_max_init_timeo)
+               sp->initmsg.sinit_max_init_timeo = sinit->sinit_max_init_timeo;
 
        return 0;
 }
@@ -2919,57 +2875,54 @@ static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigne
  *   to this call if the caller is using the UDP model.
  */
 static int sctp_setsockopt_default_send_param(struct sock *sk,
-                                             char __user *optval,
+                                             struct sctp_sndrcvinfo *info,
                                              unsigned int optlen)
 {
        struct sctp_sock *sp = sctp_sk(sk);
        struct sctp_association *asoc;
-       struct sctp_sndrcvinfo info;
 
-       if (optlen != sizeof(info))
+       if (optlen != sizeof(*info))
                return -EINVAL;
-       if (copy_from_user(&info, optval, optlen))
-               return -EFAULT;
-       if (info.sinfo_flags &
+       if (info->sinfo_flags &
            ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
              SCTP_ABORT | SCTP_EOF))
                return -EINVAL;
 
-       asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
-       if (!asoc && info.sinfo_assoc_id > SCTP_ALL_ASSOC &&
+       asoc = sctp_id2assoc(sk, info->sinfo_assoc_id);
+       if (!asoc && info->sinfo_assoc_id > SCTP_ALL_ASSOC &&
            sctp_style(sk, UDP))
                return -EINVAL;
 
        if (asoc) {
-               asoc->default_stream = info.sinfo_stream;
-               asoc->default_flags = info.sinfo_flags;
-               asoc->default_ppid = info.sinfo_ppid;
-               asoc->default_context = info.sinfo_context;
-               asoc->default_timetolive = info.sinfo_timetolive;
+               asoc->default_stream = info->sinfo_stream;
+               asoc->default_flags = info->sinfo_flags;
+               asoc->default_ppid = info->sinfo_ppid;
+               asoc->default_context = info->sinfo_context;
+               asoc->default_timetolive = info->sinfo_timetolive;
 
                return 0;
        }
 
        if (sctp_style(sk, TCP))
-               info.sinfo_assoc_id = SCTP_FUTURE_ASSOC;
+               info->sinfo_assoc_id = SCTP_FUTURE_ASSOC;
 
-       if (info.sinfo_assoc_id == SCTP_FUTURE_ASSOC ||
-           info.sinfo_assoc_id == SCTP_ALL_ASSOC) {
-               sp->default_stream = info.sinfo_stream;
-               sp->default_flags = info.sinfo_flags;
-               sp->default_ppid = info.sinfo_ppid;
-               sp->default_context = info.sinfo_context;
-               sp->default_timetolive = info.sinfo_timetolive;
+       if (info->sinfo_assoc_id == SCTP_FUTURE_ASSOC ||
+           info->sinfo_assoc_id == SCTP_ALL_ASSOC) {
+               sp->default_stream = info->sinfo_stream;
+               sp->default_flags = info->sinfo_flags;
+               sp->default_ppid = info->sinfo_ppid;
+               sp->default_context = info->sinfo_context;
+               sp->default_timetolive = info->sinfo_timetolive;
        }
 
-       if (info.sinfo_assoc_id == SCTP_CURRENT_ASSOC ||
-           info.sinfo_assoc_id == SCTP_ALL_ASSOC) {
+       if (info->sinfo_assoc_id == SCTP_CURRENT_ASSOC ||
+           info->sinfo_assoc_id == SCTP_ALL_ASSOC) {
                list_for_each_entry(asoc, &sp->ep->asocs, asocs) {
-                       asoc->default_stream = info.sinfo_stream;
-                       asoc->default_flags = info.sinfo_flags;
-                       asoc->default_ppid = info.sinfo_ppid;
-                       asoc->default_context = info.sinfo_context;
-                       asoc->default_timetolive = info.sinfo_timetolive;
+                       asoc->default_stream = info->sinfo_stream;
+                       asoc->default_flags = info->sinfo_flags;
+                       asoc->default_ppid = info->sinfo_ppid;
+                       asoc->default_context = info->sinfo_context;
+                       asoc->default_timetolive = info->sinfo_timetolive;
                }
        }
 
@@ -2980,54 +2933,51 @@ static int sctp_setsockopt_default_send_param(struct sock *sk,
  * (SCTP_DEFAULT_SNDINFO)
  */
 static int sctp_setsockopt_default_sndinfo(struct sock *sk,
-                                          char __user *optval,
+                                          struct sctp_sndinfo *info,
                                           unsigned int optlen)
 {
        struct sctp_sock *sp = sctp_sk(sk);
        struct sctp_association *asoc;
-       struct sctp_sndinfo info;
 
-       if (optlen != sizeof(info))
+       if (optlen != sizeof(*info))
                return -EINVAL;
-       if (copy_from_user(&info, optval, optlen))
-               return -EFAULT;
-       if (info.snd_flags &
+       if (info->snd_flags &
            ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
              SCTP_ABORT | SCTP_EOF))
                return -EINVAL;
 
-       asoc = sctp_id2assoc(sk, info.snd_assoc_id);
-       if (!asoc && info.snd_assoc_id > SCTP_ALL_ASSOC &&
+       asoc = sctp_id2assoc(sk, info->snd_assoc_id);
+       if (!asoc && info->snd_assoc_id > SCTP_ALL_ASSOC &&
            sctp_style(sk, UDP))
                return -EINVAL;
 
        if (asoc) {
-               asoc->default_stream = info.snd_sid;
-               asoc->default_flags = info.snd_flags;
-               asoc->default_ppid = info.snd_ppid;
-               asoc->default_context = info.snd_context;
+               asoc->default_stream = info->snd_sid;
+               asoc->default_flags = info->snd_flags;
+               asoc->default_ppid = info->snd_ppid;
+               asoc->default_context = info->snd_context;
 
                return 0;
        }
 
        if (sctp_style(sk, TCP))
-               info.snd_assoc_id = SCTP_FUTURE_ASSOC;
+               info->snd_assoc_id = SCTP_FUTURE_ASSOC;
 
-       if (info.snd_assoc_id == SCTP_FUTURE_ASSOC ||
-           info.snd_assoc_id == SCTP_ALL_ASSOC) {
-               sp->default_stream = info.snd_sid;
-               sp->default_flags = info.snd_flags;
-               sp->default_ppid = info.snd_ppid;
-               sp->default_context = info.snd_context;
+       if (info->snd_assoc_id == SCTP_FUTURE_ASSOC ||
+           info->snd_assoc_id == SCTP_ALL_ASSOC) {
+               sp->default_stream = info->snd_sid;
+               sp->default_flags = info->snd_flags;
+               sp->default_ppid = info->snd_ppid;
+               sp->default_context = info->snd_context;
        }
 
-       if (info.snd_assoc_id == SCTP_CURRENT_ASSOC ||
-           info.snd_assoc_id == SCTP_ALL_ASSOC) {
+       if (info->snd_assoc_id == SCTP_CURRENT_ASSOC ||
+           info->snd_assoc_id == SCTP_ALL_ASSOC) {
                list_for_each_entry(asoc, &sp->ep->asocs, asocs) {
-                       asoc->default_stream = info.snd_sid;
-                       asoc->default_flags = info.snd_flags;
-                       asoc->default_ppid = info.snd_ppid;
-                       asoc->default_context = info.snd_context;
+                       asoc->default_stream = info->snd_sid;
+                       asoc->default_flags = info->snd_flags;
+                       asoc->default_ppid = info->snd_ppid;
+                       asoc->default_context = info->snd_context;
                }
        }
 
@@ -3040,10 +2990,9 @@ static int sctp_setsockopt_default_sndinfo(struct sock *sk,
  * the association primary.  The enclosed address must be one of the
  * association peer's addresses.
  */
-static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval,
+static int sctp_setsockopt_primary_addr(struct sock *sk, struct sctp_prim *prim,
                                        unsigned int optlen)
 {
-       struct sctp_prim prim;
        struct sctp_transport *trans;
        struct sctp_af *af;
        int err;
@@ -3051,21 +3000,18 @@ static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval,
        if (optlen != sizeof(struct sctp_prim))
                return -EINVAL;
 
-       if (copy_from_user(&prim, optval, sizeof(struct sctp_prim)))
-               return -EFAULT;
-
        /* Allow security module to validate address but need address len. */
-       af = sctp_get_af_specific(prim.ssp_addr.ss_family);
+       af = sctp_get_af_specific(prim->ssp_addr.ss_family);
        if (!af)
                return -EINVAL;
 
        err = security_sctp_bind_connect(sk, SCTP_PRIMARY_ADDR,
-                                        (struct sockaddr *)&prim.ssp_addr,
+                                        (struct sockaddr *)&prim->ssp_addr,
                                         af->sockaddr_len);
        if (err)
                return err;
 
-       trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id);
+       trans = sctp_addr_id2transport(sk, &prim->ssp_addr, prim->ssp_assoc_id);
        if (!trans)
                return -EINVAL;
 
@@ -3082,17 +3028,12 @@ static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval,
  * introduced, at the cost of more packets in the network.  Expects an
  *  integer boolean flag.
  */
-static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval,
+static int sctp_setsockopt_nodelay(struct sock *sk, int *val,
                                   unsigned int optlen)
 {
-       int val;
-
        if (optlen < sizeof(int))
                return -EINVAL;
-       if (get_user(val, (int __user *)optval))
-               return -EFAULT;
-
-       sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1;
+       sctp_sk(sk)->nodelay = (*val == 0) ? 0 : 1;
        return 0;
 }
 
@@ -3108,9 +3049,10 @@ static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval,
  * be changed.
  *
  */
-static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen)
+static int sctp_setsockopt_rtoinfo(struct sock *sk,
+                                  struct sctp_rtoinfo *rtoinfo,
+                                  unsigned int optlen)
 {
-       struct sctp_rtoinfo rtoinfo;
        struct sctp_association *asoc;
        unsigned long rto_min, rto_max;
        struct sctp_sock *sp = sctp_sk(sk);
@@ -3118,18 +3060,15 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigne
        if (optlen != sizeof (struct sctp_rtoinfo))
                return -EINVAL;
 
-       if (copy_from_user(&rtoinfo, optval, optlen))
-               return -EFAULT;
-
-       asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
+       asoc = sctp_id2assoc(sk, rtoinfo->srto_assoc_id);
 
        /* Set the values to the specific association */
-       if (!asoc && rtoinfo.srto_assoc_id != SCTP_FUTURE_ASSOC &&
+       if (!asoc && rtoinfo->srto_assoc_id != SCTP_FUTURE_ASSOC &&
            sctp_style(sk, UDP))
                return -EINVAL;
 
-       rto_max = rtoinfo.srto_max;
-       rto_min = rtoinfo.srto_min;
+       rto_max = rtoinfo->srto_max;
+       rto_min = rtoinfo->srto_min;
 
        if (rto_max)
                rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max;
@@ -3145,17 +3084,17 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigne
                return -EINVAL;
 
        if (asoc) {
-               if (rtoinfo.srto_initial != 0)
+               if (rtoinfo->srto_initial != 0)
                        asoc->rto_initial =
-                               msecs_to_jiffies(rtoinfo.srto_initial);
+                               msecs_to_jiffies(rtoinfo->srto_initial);
                asoc->rto_max = rto_max;
                asoc->rto_min = rto_min;
        } else {
                /* If there is no association or the association-id = 0
                 * set the values to the endpoint.
                 */
-               if (rtoinfo.srto_initial != 0)
-                       sp->rtoinfo.srto_initial = rtoinfo.srto_initial;
+               if (rtoinfo->srto_initial != 0)
+                       sp->rtoinfo.srto_initial = rtoinfo->srto_initial;
                sp->rtoinfo.srto_max = rto_max;
                sp->rtoinfo.srto_min = rto_min;
        }
@@ -3174,26 +3113,25 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigne
  * See [SCTP] for more information.
  *
  */
-static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen)
+static int sctp_setsockopt_associnfo(struct sock *sk,
+                                    struct sctp_assocparams *assocparams,
+                                    unsigned int optlen)
 {
 
-       struct sctp_assocparams assocparams;
        struct sctp_association *asoc;
 
        if (optlen != sizeof(struct sctp_assocparams))
                return -EINVAL;
-       if (copy_from_user(&assocparams, optval, optlen))
-               return -EFAULT;
 
-       asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
+       asoc = sctp_id2assoc(sk, assocparams->sasoc_assoc_id);
 
-       if (!asoc && assocparams.sasoc_assoc_id != SCTP_FUTURE_ASSOC &&
+       if (!asoc && assocparams->sasoc_assoc_id != SCTP_FUTURE_ASSOC &&
            sctp_style(sk, UDP))
                return -EINVAL;
 
        /* Set the values to the specific association */
        if (asoc) {
-               if (assocparams.sasoc_asocmaxrxt != 0) {
+               if (assocparams->sasoc_asocmaxrxt != 0) {
                        __u32 path_sum = 0;
                        int   paths = 0;
                        struct sctp_transport *peer_addr;
@@ -3210,24 +3148,25 @@ static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsig
                         * then one path.
                         */
                        if (paths > 1 &&
-                           assocparams.sasoc_asocmaxrxt > path_sum)
+                           assocparams->sasoc_asocmaxrxt > path_sum)
                                return -EINVAL;
 
-                       asoc->max_retrans = assocparams.sasoc_asocmaxrxt;
+                       asoc->max_retrans = assocparams->sasoc_asocmaxrxt;
                }
 
-               if (assocparams.sasoc_cookie_life != 0)
-                       asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life);
+               if (assocparams->sasoc_cookie_life != 0)
+                       asoc->cookie_life =
+                               ms_to_ktime(assocparams->sasoc_cookie_life);
        } else {
                /* Set the values to the endpoint */
                struct sctp_sock *sp = sctp_sk(sk);
 
-               if (assocparams.sasoc_asocmaxrxt != 0)
+               if (assocparams->sasoc_asocmaxrxt != 0)
                        sp->assocparams.sasoc_asocmaxrxt =
-                                               assocparams.sasoc_asocmaxrxt;
-               if (assocparams.sasoc_cookie_life != 0)
+                                               assocparams->sasoc_asocmaxrxt;
+               if (assocparams->sasoc_cookie_life != 0)
                        sp->assocparams.sasoc_cookie_life =
-                                               assocparams.sasoc_cookie_life;
+                                               assocparams->sasoc_cookie_life;
        }
        return 0;
 }
@@ -3242,16 +3181,14 @@ static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsig
  * addresses and a user will receive both PF_INET6 and PF_INET type
  * addresses on the socket.
  */
-static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen)
+static int sctp_setsockopt_mappedv4(struct sock *sk, int *val,
+                                   unsigned int optlen)
 {
-       int val;
        struct sctp_sock *sp = sctp_sk(sk);
 
        if (optlen < sizeof(int))
                return -EINVAL;
-       if (get_user(val, (int __user *)optval))
-               return -EFAULT;
-       if (val)
+       if (*val)
                sp->v4mapped = 1;
        else
                sp->v4mapped = 0;
@@ -3286,11 +3223,13 @@ static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsign
  *    changed (effecting future associations only).
  * assoc_value:  This parameter specifies the maximum size in bytes.
  */
-static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen)
+static int sctp_setsockopt_maxseg(struct sock *sk,
+                                 struct sctp_assoc_value *params,
+                                 unsigned int optlen)
 {
        struct sctp_sock *sp = sctp_sk(sk);
-       struct sctp_assoc_value params;
        struct sctp_association *asoc;
+       sctp_assoc_t assoc_id;
        int val;
 
        if (optlen == sizeof(int)) {
@@ -3299,19 +3238,17 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
                                    "Use of int in maxseg socket option.\n"
                                    "Use struct sctp_assoc_value instead\n",
                                    current->comm, task_pid_nr(current));
-               if (copy_from_user(&val, optval, optlen))
-                       return -EFAULT;
-               params.assoc_id = SCTP_FUTURE_ASSOC;
+               assoc_id = SCTP_FUTURE_ASSOC;
+               val = *(int *)params;
        } else if (optlen == sizeof(struct sctp_assoc_value)) {
-               if (copy_from_user(&params, optval, optlen))
-                       return -EFAULT;
-               val = params.assoc_value;
+               assoc_id = params->assoc_id;
+               val = params->assoc_value;
        } else {
                return -EINVAL;
        }
 
-       asoc = sctp_id2assoc(sk, params.assoc_id);
-       if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+       asoc = sctp_id2assoc(sk, assoc_id);
+       if (!asoc && assoc_id != SCTP_FUTURE_ASSOC &&
            sctp_style(sk, UDP))
                return -EINVAL;
 
@@ -3346,12 +3283,12 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
  *   locally bound addresses. The following structure is used to make a
  *   set primary request:
  */
-static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval,
+static int sctp_setsockopt_peer_primary_addr(struct sock *sk,
+                                            struct sctp_setpeerprim *prim,
                                             unsigned int optlen)
 {
        struct sctp_sock        *sp;
        struct sctp_association *asoc = NULL;
-       struct sctp_setpeerprim prim;
        struct sctp_chunk       *chunk;
        struct sctp_af          *af;
        int                     err;
@@ -3364,10 +3301,7 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva
        if (optlen != sizeof(struct sctp_setpeerprim))
                return -EINVAL;
 
-       if (copy_from_user(&prim, optval, optlen))
-               return -EFAULT;
-
-       asoc = sctp_id2assoc(sk, prim.sspp_assoc_id);
+       asoc = sctp_id2assoc(sk, prim->sspp_assoc_id);
        if (!asoc)
                return -EINVAL;
 
@@ -3380,26 +3314,26 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva
        if (!sctp_state(asoc, ESTABLISHED))
                return -ENOTCONN;
 
-       af = sctp_get_af_specific(prim.sspp_addr.ss_family);
+       af = sctp_get_af_specific(prim->sspp_addr.ss_family);
        if (!af)
                return -EINVAL;
 
-       if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL))
+       if (!af->addr_valid((union sctp_addr *)&prim->sspp_addr, sp, NULL))
                return -EADDRNOTAVAIL;
 
-       if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr))
+       if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim->sspp_addr))
                return -EADDRNOTAVAIL;
 
        /* Allow security module to validate address. */
        err = security_sctp_bind_connect(sk, SCTP_SET_PEER_PRIMARY_ADDR,
-                                        (struct sockaddr *)&prim.sspp_addr,
+                                        (struct sockaddr *)&prim->sspp_addr,
                                         af->sockaddr_len);
        if (err)
                return err;
 
        /* Create an ASCONF chunk with SET_PRIMARY parameter    */
        chunk = sctp_make_asconf_set_prim(asoc,
-                                         (union sctp_addr *)&prim.sspp_addr);
+                                         (union sctp_addr *)&prim->sspp_addr);
        if (!chunk)
                return -ENOMEM;
 
@@ -3410,17 +3344,14 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva
        return err;
 }
 
-static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval,
+static int sctp_setsockopt_adaptation_layer(struct sock *sk,
+                                           struct sctp_setadaptation *adapt,
                                            unsigned int optlen)
 {
-       struct sctp_setadaptation adaptation;
-
        if (optlen != sizeof(struct sctp_setadaptation))
                return -EINVAL;
-       if (copy_from_user(&adaptation, optval, optlen))
-               return -EFAULT;
 
-       sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind;
+       sctp_sk(sk)->adaptation_ind = adapt->ssb_adaptation_ind;
 
        return 0;
 }
@@ -3439,40 +3370,38 @@ static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval
  * received messages from the peer and does not effect the value that is
  * saved with outbound messages.
  */
-static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
+static int sctp_setsockopt_context(struct sock *sk,
+                                  struct sctp_assoc_value *params,
                                   unsigned int optlen)
 {
        struct sctp_sock *sp = sctp_sk(sk);
-       struct sctp_assoc_value params;
        struct sctp_association *asoc;
 
        if (optlen != sizeof(struct sctp_assoc_value))
                return -EINVAL;
-       if (copy_from_user(&params, optval, optlen))
-               return -EFAULT;
 
-       asoc = sctp_id2assoc(sk, params.assoc_id);
-       if (!asoc && params.assoc_id > SCTP_ALL_ASSOC &&
+       asoc = sctp_id2assoc(sk, params->assoc_id);
+       if (!asoc && params->assoc_id > SCTP_ALL_ASSOC &&
            sctp_style(sk, UDP))
                return -EINVAL;
 
        if (asoc) {
-               asoc->default_rcv_context = params.assoc_value;
+               asoc->default_rcv_context = params->assoc_value;
 
                return 0;
        }
 
        if (sctp_style(sk, TCP))
-               params.assoc_id = SCTP_FUTURE_ASSOC;
+               params->assoc_id = SCTP_FUTURE_ASSOC;
 
-       if (params.assoc_id == SCTP_FUTURE_ASSOC ||
-           params.assoc_id == SCTP_ALL_ASSOC)
-               sp->default_rcv_context = params.assoc_value;
+       if (params->assoc_id == SCTP_FUTURE_ASSOC ||
+           params->assoc_id == SCTP_ALL_ASSOC)
+               sp->default_rcv_context = params->assoc_value;
 
-       if (params.assoc_id == SCTP_CURRENT_ASSOC ||
-           params.assoc_id == SCTP_ALL_ASSOC)
+       if (params->assoc_id == SCTP_CURRENT_ASSOC ||
+           params->assoc_id == SCTP_ALL_ASSOC)
                list_for_each_entry(asoc, &sp->ep->asocs, asocs)
-                       asoc->default_rcv_context = params.assoc_value;
+                       asoc->default_rcv_context = params->assoc_value;
 
        return 0;
 }
@@ -3501,18 +3430,13 @@ static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
  * application using the one to many model may become confused and act
  * incorrectly.
  */
-static int sctp_setsockopt_fragment_interleave(struct sock *sk,
-                                              char __user *optval,
+static int sctp_setsockopt_fragment_interleave(struct sock *sk, int *val,
                                               unsigned int optlen)
 {
-       int val;
-
        if (optlen != sizeof(int))
                return -EINVAL;
-       if (get_user(val, (int __user *)optval))
-               return -EFAULT;
 
-       sctp_sk(sk)->frag_interleave = !!val;
+       sctp_sk(sk)->frag_interleave = !!*val;
 
        if (!sctp_sk(sk)->frag_interleave)
                sctp_sk(sk)->ep->intl_enable = 0;
@@ -3537,24 +3461,19 @@ static int sctp_setsockopt_fragment_interleave(struct sock *sk,
  * call as long as the user provided buffer is large enough to hold the
  * message.
  */
-static int sctp_setsockopt_partial_delivery_point(struct sock *sk,
-                                                 char __user *optval,
+static int sctp_setsockopt_partial_delivery_point(struct sock *sk, u32 *val,
                                                  unsigned int optlen)
 {
-       u32 val;
-
        if (optlen != sizeof(u32))
                return -EINVAL;
-       if (get_user(val, (int __user *)optval))
-               return -EFAULT;
 
        /* Note: We double the receive buffer from what the user sets
         * it to be, also initial rwnd is based on rcvbuf/2.
         */
-       if (val > (sk->sk_rcvbuf >> 1))
+       if (*val > (sk->sk_rcvbuf >> 1))
                return -EINVAL;
 
-       sctp_sk(sk)->pd_point = val;
+       sctp_sk(sk)->pd_point = *val;
 
        return 0; /* is this the right error code? */
 }
@@ -3571,12 +3490,13 @@ static int sctp_setsockopt_partial_delivery_point(struct sock *sk,
  * future associations inheriting the socket value.
  */
 static int sctp_setsockopt_maxburst(struct sock *sk,
-                                   char __user *optval,
+                                   struct sctp_assoc_value *params,
                                    unsigned int optlen)
 {
        struct sctp_sock *sp = sctp_sk(sk);
-       struct sctp_assoc_value params;
        struct sctp_association *asoc;
+       sctp_assoc_t assoc_id;
+       u32 assoc_value;
 
        if (optlen == sizeof(int)) {
                pr_warn_ratelimited(DEPRECATED
@@ -3584,37 +3504,33 @@ static int sctp_setsockopt_maxburst(struct sock *sk,
                                    "Use of int in max_burst socket option deprecated.\n"
                                    "Use struct sctp_assoc_value instead\n",
                                    current->comm, task_pid_nr(current));
-               if (copy_from_user(&params.assoc_value, optval, optlen))
-                       return -EFAULT;
-               params.assoc_id = SCTP_FUTURE_ASSOC;
+               assoc_id = SCTP_FUTURE_ASSOC;
+               assoc_value = *((int *)params);
        } else if (optlen == sizeof(struct sctp_assoc_value)) {
-               if (copy_from_user(&params, optval, optlen))
-                       return -EFAULT;
+               assoc_id = params->assoc_id;
+               assoc_value = params->assoc_value;
        } else
                return -EINVAL;
 
-       asoc = sctp_id2assoc(sk, params.assoc_id);
-       if (!asoc && params.assoc_id > SCTP_ALL_ASSOC &&
-           sctp_style(sk, UDP))
+       asoc = sctp_id2assoc(sk, assoc_id);
+       if (!asoc && assoc_id > SCTP_ALL_ASSOC && sctp_style(sk, UDP))
                return -EINVAL;
 
        if (asoc) {
-               asoc->max_burst = params.assoc_value;
+               asoc->max_burst = assoc_value;
 
                return 0;
        }
 
        if (sctp_style(sk, TCP))
-               params.assoc_id = SCTP_FUTURE_ASSOC;
+               assoc_id = SCTP_FUTURE_ASSOC;
 
-       if (params.assoc_id == SCTP_FUTURE_ASSOC ||
-           params.assoc_id == SCTP_ALL_ASSOC)
-               sp->max_burst = params.assoc_value;
+       if (assoc_id == SCTP_FUTURE_ASSOC || assoc_id == SCTP_ALL_ASSOC)
+               sp->max_burst = assoc_value;
 
-       if (params.assoc_id == SCTP_CURRENT_ASSOC ||
-           params.assoc_id == SCTP_ALL_ASSOC)
+       if (assoc_id == SCTP_CURRENT_ASSOC || assoc_id == SCTP_ALL_ASSOC)
                list_for_each_entry(asoc, &sp->ep->asocs, asocs)
-                       asoc->max_burst = params.assoc_value;
+                       asoc->max_burst = assoc_value;
 
        return 0;
 }
@@ -3627,21 +3543,18 @@ static int sctp_setsockopt_maxburst(struct sock *sk,
  * will only effect future associations on the socket.
  */
 static int sctp_setsockopt_auth_chunk(struct sock *sk,
-                                     char __user *optval,
+                                     struct sctp_authchunk *val,
                                      unsigned int optlen)
 {
        struct sctp_endpoint *ep = sctp_sk(sk)->ep;
-       struct sctp_authchunk val;
 
        if (!ep->auth_enable)
                return -EACCES;
 
        if (optlen != sizeof(struct sctp_authchunk))
                return -EINVAL;
-       if (copy_from_user(&val, optval, optlen))
-               return -EFAULT;
 
-       switch (val.sauth_chunk) {
+       switch (val->sauth_chunk) {
        case SCTP_CID_INIT:
        case SCTP_CID_INIT_ACK:
        case SCTP_CID_SHUTDOWN_COMPLETE:
@@ -3650,7 +3563,7 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk,
        }
 
        /* add this chunk id to the endpoint */
-       return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk);
+       return sctp_auth_ep_add_chunkid(ep, val->sauth_chunk);
 }
 
 /*
@@ -3660,13 +3573,11 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk,
  * endpoint requires the peer to use.
  */
 static int sctp_setsockopt_hmac_ident(struct sock *sk,
-                                     char __user *optval,
+                                     struct sctp_hmacalgo *hmacs,
                                      unsigned int optlen)
 {
        struct sctp_endpoint *ep = sctp_sk(sk)->ep;
-       struct sctp_hmacalgo *hmacs;
        u32 idents;
-       int err;
 
        if (!ep->auth_enable)
                return -EACCES;
@@ -3676,21 +3587,12 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
        optlen = min_t(unsigned int, optlen, sizeof(struct sctp_hmacalgo) +
                                             SCTP_AUTH_NUM_HMACS * sizeof(u16));
 
-       hmacs = memdup_user(optval, optlen);
-       if (IS_ERR(hmacs))
-               return PTR_ERR(hmacs);
-
        idents = hmacs->shmac_num_idents;
        if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS ||
-           (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) {
-               err = -EINVAL;
-               goto out;
-       }
+           (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo)))
+               return -EINVAL;
 
-       err = sctp_auth_ep_set_hmacs(ep, hmacs);
-out:
-       kfree(hmacs);
-       return err;
+       return sctp_auth_ep_set_hmacs(ep, hmacs);
 }
 
 /*
@@ -3700,11 +3602,10 @@ out:
  * association shared key.
  */
 static int sctp_setsockopt_auth_key(struct sock *sk,
-                                   char __user *optval,
+                                   struct sctp_authkey *authkey,
                                    unsigned int optlen)
 {
        struct sctp_endpoint *ep = sctp_sk(sk)->ep;
-       struct sctp_authkey *authkey;
        struct sctp_association *asoc;
        int ret = -EINVAL;
 
@@ -3715,10 +3616,6 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
         */
        optlen = min_t(unsigned int, optlen, USHRT_MAX + sizeof(*authkey));
 
-       authkey = memdup_user(optval, optlen);
-       if (IS_ERR(authkey))
-               return PTR_ERR(authkey);
-
        if (authkey->sca_keylength > optlen - sizeof(*authkey))
                goto out;
 
@@ -3755,7 +3652,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
        }
 
 out:
-       kzfree(authkey);
+       memzero_explicit(authkey, optlen);
        return ret;
 }
 
@@ -3766,42 +3663,39 @@ out:
  * the association shared key.
  */
 static int sctp_setsockopt_active_key(struct sock *sk,
-                                     char __user *optval,
+                                     struct sctp_authkeyid *val,
                                      unsigned int optlen)
 {
        struct sctp_endpoint *ep = sctp_sk(sk)->ep;
        struct sctp_association *asoc;
-       struct sctp_authkeyid val;
        int ret = 0;
 
        if (optlen != sizeof(struct sctp_authkeyid))
                return -EINVAL;
-       if (copy_from_user(&val, optval, optlen))
-               return -EFAULT;
 
-       asoc = sctp_id2assoc(sk, val.scact_assoc_id);
-       if (!asoc && val.scact_assoc_id > SCTP_ALL_ASSOC &&
+       asoc = sctp_id2assoc(sk, val->scact_assoc_id);
+       if (!asoc && val->scact_assoc_id > SCTP_ALL_ASSOC &&
            sctp_style(sk, UDP))
                return -EINVAL;
 
        if (asoc)
-               return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
+               return sctp_auth_set_active_key(ep, asoc, val->scact_keynumber);
 
        if (sctp_style(sk, TCP))
-               val.scact_assoc_id = SCTP_FUTURE_ASSOC;
+               val->scact_assoc_id = SCTP_FUTURE_ASSOC;
 
-       if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
-           val.scact_assoc_id == SCTP_ALL_ASSOC) {
-               ret = sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
+       if (val->scact_assoc_id == SCTP_FUTURE_ASSOC ||
+           val->scact_assoc_id == SCTP_ALL_ASSOC) {
+               ret = sctp_auth_set_active_key(ep, asoc, val->scact_keynumber);
                if (ret)
                        return ret;
        }
 
-       if (val.scact_assoc_id == SCTP_CURRENT_ASSOC ||
-           val.scact_assoc_id == SCTP_ALL_ASSOC) {
+       if (val->scact_assoc_id == SCTP_CURRENT_ASSOC ||
+           val->scact_assoc_id == SCTP_ALL_ASSOC) {
                list_for_each_entry(asoc, &ep->asocs, asocs) {
                        int res = sctp_auth_set_active_key(ep, asoc,
-                                                          val.scact_keynumber);
+                                                          val->scact_keynumber);
 
                        if (res && !ret)
                                ret = res;
@@ -3817,42 +3711,39 @@ static int sctp_setsockopt_active_key(struct sock *sk,
  * This set option will delete a shared secret key from use.
  */
 static int sctp_setsockopt_del_key(struct sock *sk,
-                                  char __user *optval,
+                                  struct sctp_authkeyid *val,
                                   unsigned int optlen)
 {
        struct sctp_endpoint *ep = sctp_sk(sk)->ep;
        struct sctp_association *asoc;
-       struct sctp_authkeyid val;
        int ret = 0;
 
        if (optlen != sizeof(struct sctp_authkeyid))
                return -EINVAL;
-       if (copy_from_user(&val, optval, optlen))
-               return -EFAULT;
 
-       asoc = sctp_id2assoc(sk, val.scact_assoc_id);
-       if (!asoc && val.scact_assoc_id > SCTP_ALL_ASSOC &&
+       asoc = sctp_id2assoc(sk, val->scact_assoc_id);
+       if (!asoc && val->scact_assoc_id > SCTP_ALL_ASSOC &&
            sctp_style(sk, UDP))
                return -EINVAL;
 
        if (asoc)
-               return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
+               return sctp_auth_del_key_id(ep, asoc, val->scact_keynumber);
 
        if (sctp_style(sk, TCP))
-               val.scact_assoc_id = SCTP_FUTURE_ASSOC;
+               val->scact_assoc_id = SCTP_FUTURE_ASSOC;
 
-       if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
-           val.scact_assoc_id == SCTP_ALL_ASSOC) {
-               ret = sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
+       if (val->scact_assoc_id == SCTP_FUTURE_ASSOC ||
+           val->scact_assoc_id == SCTP_ALL_ASSOC) {
+               ret = sctp_auth_del_key_id(ep, asoc, val->scact_keynumber);
                if (ret)
                        return ret;
        }
 
-       if (val.scact_assoc_id == SCTP_CURRENT_ASSOC ||
-           val.scact_assoc_id == SCTP_ALL_ASSOC) {
+       if (val->scact_assoc_id == SCTP_CURRENT_ASSOC ||
+           val->scact_assoc_id == SCTP_ALL_ASSOC) {
                list_for_each_entry(asoc, &ep->asocs, asocs) {
                        int res = sctp_auth_del_key_id(ep, asoc,
-                                                      val.scact_keynumber);
+                                                      val->scact_keynumber);
 
                        if (res && !ret)
                                ret = res;
@@ -3867,42 +3758,40 @@ static int sctp_setsockopt_del_key(struct sock *sk,
  *
  * This set option will deactivate a shared secret key.
  */
-static int sctp_setsockopt_deactivate_key(struct sock *sk, char __user *optval,
+static int sctp_setsockopt_deactivate_key(struct sock *sk,
+                                         struct sctp_authkeyid *val,
                                          unsigned int optlen)
 {
        struct sctp_endpoint *ep = sctp_sk(sk)->ep;
        struct sctp_association *asoc;
-       struct sctp_authkeyid val;
        int ret = 0;
 
        if (optlen != sizeof(struct sctp_authkeyid))
                return -EINVAL;
-       if (copy_from_user(&val, optval, optlen))
-               return -EFAULT;
 
-       asoc = sctp_id2assoc(sk, val.scact_assoc_id);
-       if (!asoc && val.scact_assoc_id > SCTP_ALL_ASSOC &&
+       asoc = sctp_id2assoc(sk, val->scact_assoc_id);
+       if (!asoc && val->scact_assoc_id > SCTP_ALL_ASSOC &&
            sctp_style(sk, UDP))
                return -EINVAL;
 
        if (asoc)
-               return sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber);
+               return sctp_auth_deact_key_id(ep, asoc, val->scact_keynumber);
 
        if (sctp_style(sk, TCP))
-               val.scact_assoc_id = SCTP_FUTURE_ASSOC;
+               val->scact_assoc_id = SCTP_FUTURE_ASSOC;
 
-       if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
-           val.scact_assoc_id == SCTP_ALL_ASSOC) {
-               ret = sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber);
+       if (val->scact_assoc_id == SCTP_FUTURE_ASSOC ||
+           val->scact_assoc_id == SCTP_ALL_ASSOC) {
+               ret = sctp_auth_deact_key_id(ep, asoc, val->scact_keynumber);
                if (ret)
                        return ret;
        }
 
-       if (val.scact_assoc_id == SCTP_CURRENT_ASSOC ||
-           val.scact_assoc_id == SCTP_ALL_ASSOC) {
+       if (val->scact_assoc_id == SCTP_CURRENT_ASSOC ||
+           val->scact_assoc_id == SCTP_ALL_ASSOC) {
                list_for_each_entry(asoc, &ep->asocs, asocs) {
                        int res = sctp_auth_deact_key_id(ep, asoc,
-                                                        val.scact_keynumber);
+                                                        val->scact_keynumber);
 
                        if (res && !ret)
                                ret = res;
@@ -3926,26 +3815,23 @@ static int sctp_setsockopt_deactivate_key(struct sock *sk, char __user *optval,
  * Note. In this implementation, socket operation overrides default parameter
  * being set by sysctl as well as FreeBSD implementation
  */
-static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
+static int sctp_setsockopt_auto_asconf(struct sock *sk, int *val,
                                        unsigned int optlen)
 {
-       int val;
        struct sctp_sock *sp = sctp_sk(sk);
 
        if (optlen < sizeof(int))
                return -EINVAL;
-       if (get_user(val, (int __user *)optval))
-               return -EFAULT;
-       if (!sctp_is_ep_boundall(sk) && val)
+       if (!sctp_is_ep_boundall(sk) && *val)
                return -EINVAL;
-       if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
+       if ((*val && sp->do_auto_asconf) || (!*val && !sp->do_auto_asconf))
                return 0;
 
        spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
-       if (val == 0 && sp->do_auto_asconf) {
+       if (*val == 0 && sp->do_auto_asconf) {
                list_del(&sp->auto_asconf_list);
                sp->do_auto_asconf = 0;
-       } else if (val && !sp->do_auto_asconf) {
+       } else if (*val && !sp->do_auto_asconf) {
                list_add_tail(&sp->auto_asconf_list,
                    &sock_net(sk)->sctp.auto_asconf_splist);
                sp->do_auto_asconf = 1;
@@ -3962,176 +3848,154 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
  * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
  */
 static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
-                                           char __user *optval,
+                                           struct sctp_paddrthlds_v2 *val,
                                            unsigned int optlen, bool v2)
 {
-       struct sctp_paddrthlds_v2 val;
        struct sctp_transport *trans;
        struct sctp_association *asoc;
        int len;
 
-       len = v2 ? sizeof(val) : sizeof(struct sctp_paddrthlds);
+       len = v2 ? sizeof(*val) : sizeof(struct sctp_paddrthlds);
        if (optlen < len)
                return -EINVAL;
-       if (copy_from_user(&val, optval, len))
-               return -EFAULT;
 
-       if (v2 && val.spt_pathpfthld > val.spt_pathcpthld)
+       if (v2 && val->spt_pathpfthld > val->spt_pathcpthld)
                return -EINVAL;
 
-       if (!sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
-               trans = sctp_addr_id2transport(sk, &val.spt_address,
-                                              val.spt_assoc_id);
+       if (!sctp_is_any(sk, (const union sctp_addr *)&val->spt_address)) {
+               trans = sctp_addr_id2transport(sk, &val->spt_address,
+                                              val->spt_assoc_id);
                if (!trans)
                        return -ENOENT;
 
-               if (val.spt_pathmaxrxt)
-                       trans->pathmaxrxt = val.spt_pathmaxrxt;
+               if (val->spt_pathmaxrxt)
+                       trans->pathmaxrxt = val->spt_pathmaxrxt;
                if (v2)
-                       trans->ps_retrans = val.spt_pathcpthld;
-               trans->pf_retrans = val.spt_pathpfthld;
+                       trans->ps_retrans = val->spt_pathcpthld;
+               trans->pf_retrans = val->spt_pathpfthld;
 
                return 0;
        }
 
-       asoc = sctp_id2assoc(sk, val.spt_assoc_id);
-       if (!asoc && val.spt_assoc_id != SCTP_FUTURE_ASSOC &&
+       asoc = sctp_id2assoc(sk, val->spt_assoc_id);
+       if (!asoc && val->spt_assoc_id != SCTP_FUTURE_ASSOC &&
            sctp_style(sk, UDP))
                return -EINVAL;
 
        if (asoc) {
                list_for_each_entry(trans, &asoc->peer.transport_addr_list,
                                    transports) {
-                       if (val.spt_pathmaxrxt)
-                               trans->pathmaxrxt = val.spt_pathmaxrxt;
+                       if (val->spt_pathmaxrxt)
+                               trans->pathmaxrxt = val->spt_pathmaxrxt;
                        if (v2)
-                               trans->ps_retrans = val.spt_pathcpthld;
-                       trans->pf_retrans = val.spt_pathpfthld;
+                               trans->ps_retrans = val->spt_pathcpthld;
+                       trans->pf_retrans = val->spt_pathpfthld;
                }
 
-               if (val.spt_pathmaxrxt)
-                       asoc->pathmaxrxt = val.spt_pathmaxrxt;
+               if (val->spt_pathmaxrxt)
+                       asoc->pathmaxrxt = val->spt_pathmaxrxt;
                if (v2)
-                       asoc->ps_retrans = val.spt_pathcpthld;
-               asoc->pf_retrans = val.spt_pathpfthld;
+                       asoc->ps_retrans = val->spt_pathcpthld;
+               asoc->pf_retrans = val->spt_pathpfthld;
        } else {
                struct sctp_sock *sp = sctp_sk(sk);
 
-               if (val.spt_pathmaxrxt)
-                       sp->pathmaxrxt = val.spt_pathmaxrxt;
+               if (val->spt_pathmaxrxt)
+                       sp->pathmaxrxt = val->spt_pathmaxrxt;
                if (v2)
-                       sp->ps_retrans = val.spt_pathcpthld;
-               sp->pf_retrans = val.spt_pathpfthld;
+                       sp->ps_retrans = val->spt_pathcpthld;
+               sp->pf_retrans = val->spt_pathpfthld;
        }
 
        return 0;
 }
 
-static int sctp_setsockopt_recvrcvinfo(struct sock *sk,
-                                      char __user *optval,
+static int sctp_setsockopt_recvrcvinfo(struct sock *sk, int *val,
                                       unsigned int optlen)
 {
-       int val;
-
        if (optlen < sizeof(int))
                return -EINVAL;
-       if (get_user(val, (int __user *) optval))
-               return -EFAULT;
 
-       sctp_sk(sk)->recvrcvinfo = (val == 0) ? 0 : 1;
+       sctp_sk(sk)->recvrcvinfo = (*val == 0) ? 0 : 1;
 
        return 0;
 }
 
-static int sctp_setsockopt_recvnxtinfo(struct sock *sk,
-                                      char __user *optval,
+static int sctp_setsockopt_recvnxtinfo(struct sock *sk, int *val,
                                       unsigned int optlen)
 {
-       int val;
-
        if (optlen < sizeof(int))
                return -EINVAL;
-       if (get_user(val, (int __user *) optval))
-               return -EFAULT;
 
-       sctp_sk(sk)->recvnxtinfo = (val == 0) ? 0 : 1;
+       sctp_sk(sk)->recvnxtinfo = (*val == 0) ? 0 : 1;
 
        return 0;
 }
 
 static int sctp_setsockopt_pr_supported(struct sock *sk,
-                                       char __user *optval,
+                                       struct sctp_assoc_value *params,
                                        unsigned int optlen)
 {
-       struct sctp_assoc_value params;
        struct sctp_association *asoc;
 
-       if (optlen != sizeof(params))
+       if (optlen != sizeof(*params))
                return -EINVAL;
 
-       if (copy_from_user(&params, optval, optlen))
-               return -EFAULT;
-
-       asoc = sctp_id2assoc(sk, params.assoc_id);
-       if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+       asoc = sctp_id2assoc(sk, params->assoc_id);
+       if (!asoc && params->assoc_id != SCTP_FUTURE_ASSOC &&
            sctp_style(sk, UDP))
                return -EINVAL;
 
-       sctp_sk(sk)->ep->prsctp_enable = !!params.assoc_value;
+       sctp_sk(sk)->ep->prsctp_enable = !!params->assoc_value;
 
        return 0;
 }
 
 static int sctp_setsockopt_default_prinfo(struct sock *sk,
-                                         char __user *optval,
+                                         struct sctp_default_prinfo *info,
                                          unsigned int optlen)
 {
        struct sctp_sock *sp = sctp_sk(sk);
-       struct sctp_default_prinfo info;
        struct sctp_association *asoc;
        int retval = -EINVAL;
 
-       if (optlen != sizeof(info))
+       if (optlen != sizeof(*info))
                goto out;
 
-       if (copy_from_user(&info, optval, sizeof(info))) {
-               retval = -EFAULT;
+       if (info->pr_policy & ~SCTP_PR_SCTP_MASK)
                goto out;
-       }
 
-       if (info.pr_policy & ~SCTP_PR_SCTP_MASK)
-               goto out;
+       if (info->pr_policy == SCTP_PR_SCTP_NONE)
+               info->pr_value = 0;
 
-       if (info.pr_policy == SCTP_PR_SCTP_NONE)
-               info.pr_value = 0;
-
-       asoc = sctp_id2assoc(sk, info.pr_assoc_id);
-       if (!asoc && info.pr_assoc_id > SCTP_ALL_ASSOC &&
+       asoc = sctp_id2assoc(sk, info->pr_assoc_id);
+       if (!asoc && info->pr_assoc_id > SCTP_ALL_ASSOC &&
            sctp_style(sk, UDP))
                goto out;
 
        retval = 0;
 
        if (asoc) {
-               SCTP_PR_SET_POLICY(asoc->default_flags, info.pr_policy);
-               asoc->default_timetolive = info.pr_value;
+               SCTP_PR_SET_POLICY(asoc->default_flags, info->pr_policy);
+               asoc->default_timetolive = info->pr_value;
                goto out;
        }
 
        if (sctp_style(sk, TCP))
-               info.pr_assoc_id = SCTP_FUTURE_ASSOC;
+               info->pr_assoc_id = SCTP_FUTURE_ASSOC;
 
-       if (info.pr_assoc_id == SCTP_FUTURE_ASSOC ||
-           info.pr_assoc_id == SCTP_ALL_ASSOC) {
-               SCTP_PR_SET_POLICY(sp->default_flags, info.pr_policy);
-               sp->default_timetolive = info.pr_value;
+       if (info->pr_assoc_id == SCTP_FUTURE_ASSOC ||
+           info->pr_assoc_id == SCTP_ALL_ASSOC) {
+               SCTP_PR_SET_POLICY(sp->default_flags, info->pr_policy);
+               sp->default_timetolive = info->pr_value;
        }
 
-       if (info.pr_assoc_id == SCTP_CURRENT_ASSOC ||
-           info.pr_assoc_id == SCTP_ALL_ASSOC) {
+       if (info->pr_assoc_id == SCTP_CURRENT_ASSOC ||
+           info->pr_assoc_id == SCTP_ALL_ASSOC) {
                list_for_each_entry(asoc, &sp->ep->asocs, asocs) {
-                       SCTP_PR_SET_POLICY(asoc->default_flags, info.pr_policy);
-                       asoc->default_timetolive = info.pr_value;
+                       SCTP_PR_SET_POLICY(asoc->default_flags,
+                                          info->pr_policy);
+                       asoc->default_timetolive = info->pr_value;
                }
        }
 
@@ -4140,27 +4004,21 @@ out:
 }
 
 static int sctp_setsockopt_reconfig_supported(struct sock *sk,
-                                             char __user *optval,
+                                             struct sctp_assoc_value *params,
                                              unsigned int optlen)
 {
-       struct sctp_assoc_value params;
        struct sctp_association *asoc;
        int retval = -EINVAL;
 
-       if (optlen != sizeof(params))
-               goto out;
-
-       if (copy_from_user(&params, optval, optlen)) {
-               retval = -EFAULT;
+       if (optlen != sizeof(*params))
                goto out;
-       }
 
-       asoc = sctp_id2assoc(sk, params.assoc_id);
-       if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+       asoc = sctp_id2assoc(sk, params->assoc_id);
+       if (!asoc && params->assoc_id != SCTP_FUTURE_ASSOC &&
            sctp_style(sk, UDP))
                goto out;
 
-       sctp_sk(sk)->ep->reconf_enable = !!params.assoc_value;
+       sctp_sk(sk)->ep->reconf_enable = !!params->assoc_value;
 
        retval = 0;
 
@@ -4169,60 +4027,52 @@ out:
 }
 
 static int sctp_setsockopt_enable_strreset(struct sock *sk,
-                                          char __user *optval,
+                                          struct sctp_assoc_value *params,
                                           unsigned int optlen)
 {
        struct sctp_endpoint *ep = sctp_sk(sk)->ep;
-       struct sctp_assoc_value params;
        struct sctp_association *asoc;
        int retval = -EINVAL;
 
-       if (optlen != sizeof(params))
+       if (optlen != sizeof(*params))
                goto out;
 
-       if (copy_from_user(&params, optval, optlen)) {
-               retval = -EFAULT;
-               goto out;
-       }
-
-       if (params.assoc_value & (~SCTP_ENABLE_STRRESET_MASK))
+       if (params->assoc_value & (~SCTP_ENABLE_STRRESET_MASK))
                goto out;
 
-       asoc = sctp_id2assoc(sk, params.assoc_id);
-       if (!asoc && params.assoc_id > SCTP_ALL_ASSOC &&
+       asoc = sctp_id2assoc(sk, params->assoc_id);
+       if (!asoc && params->assoc_id > SCTP_ALL_ASSOC &&
            sctp_style(sk, UDP))
                goto out;
 
        retval = 0;
 
        if (asoc) {
-               asoc->strreset_enable = params.assoc_value;
+               asoc->strreset_enable = params->assoc_value;
                goto out;
        }
 
        if (sctp_style(sk, TCP))
-               params.assoc_id = SCTP_FUTURE_ASSOC;
+               params->assoc_id = SCTP_FUTURE_ASSOC;
 
-       if (params.assoc_id == SCTP_FUTURE_ASSOC ||
-           params.assoc_id == SCTP_ALL_ASSOC)
-               ep->strreset_enable = params.assoc_value;
+       if (params->assoc_id == SCTP_FUTURE_ASSOC ||
+           params->assoc_id == SCTP_ALL_ASSOC)
+               ep->strreset_enable = params->assoc_value;
 
-       if (params.assoc_id == SCTP_CURRENT_ASSOC ||
-           params.assoc_id == SCTP_ALL_ASSOC)
+       if (params->assoc_id == SCTP_CURRENT_ASSOC ||
+           params->assoc_id == SCTP_ALL_ASSOC)
                list_for_each_entry(asoc, &ep->asocs, asocs)
-                       asoc->strreset_enable = params.assoc_value;
+                       asoc->strreset_enable = params->assoc_value;
 
 out:
        return retval;
 }
 
 static int sctp_setsockopt_reset_streams(struct sock *sk,
-                                        char __user *optval,
+                                        struct sctp_reset_streams *params,
                                         unsigned int optlen)
 {
-       struct sctp_reset_streams *params;
        struct sctp_association *asoc;
-       int retval = -EINVAL;
 
        if (optlen < sizeof(*params))
                return -EINVAL;
@@ -4230,116 +4080,82 @@ static int sctp_setsockopt_reset_streams(struct sock *sk,
        optlen = min_t(unsigned int, optlen, USHRT_MAX +
                                             sizeof(__u16) * sizeof(*params));
 
-       params = memdup_user(optval, optlen);
-       if (IS_ERR(params))
-               return PTR_ERR(params);
-
        if (params->srs_number_streams * sizeof(__u16) >
            optlen - sizeof(*params))
-               goto out;
+               return -EINVAL;
 
        asoc = sctp_id2assoc(sk, params->srs_assoc_id);
        if (!asoc)
-               goto out;
-
-       retval = sctp_send_reset_streams(asoc, params);
+               return -EINVAL;
 
-out:
-       kfree(params);
-       return retval;
+       return sctp_send_reset_streams(asoc, params);
 }
 
-static int sctp_setsockopt_reset_assoc(struct sock *sk,
-                                      char __user *optval,
+static int sctp_setsockopt_reset_assoc(struct sock *sk, sctp_assoc_t *associd,
                                       unsigned int optlen)
 {
        struct sctp_association *asoc;
-       sctp_assoc_t associd;
-       int retval = -EINVAL;
 
-       if (optlen != sizeof(associd))
-               goto out;
-
-       if (copy_from_user(&associd, optval, optlen)) {
-               retval = -EFAULT;
-               goto out;
-       }
+       if (optlen != sizeof(*associd))
+               return -EINVAL;
 
-       asoc = sctp_id2assoc(sk, associd);
+       asoc = sctp_id2assoc(sk, *associd);
        if (!asoc)
-               goto out;
-
-       retval = sctp_send_reset_assoc(asoc);
+               return -EINVAL;
 
-out:
-       return retval;
+       return sctp_send_reset_assoc(asoc);
 }
 
 static int sctp_setsockopt_add_streams(struct sock *sk,
-                                      char __user *optval,
+                                      struct sctp_add_streams *params,
                                       unsigned int optlen)
 {
        struct sctp_association *asoc;
-       struct sctp_add_streams params;
-       int retval = -EINVAL;
 
-       if (optlen != sizeof(params))
-               goto out;
-
-       if (copy_from_user(&params, optval, optlen)) {
-               retval = -EFAULT;
-               goto out;
-       }
+       if (optlen != sizeof(*params))
+               return -EINVAL;
 
-       asoc = sctp_id2assoc(sk, params.sas_assoc_id);
+       asoc = sctp_id2assoc(sk, params->sas_assoc_id);
        if (!asoc)
-               goto out;
-
-       retval = sctp_send_add_streams(asoc, &params);
+               return -EINVAL;
 
-out:
-       return retval;
+       return sctp_send_add_streams(asoc, params);
 }
 
 static int sctp_setsockopt_scheduler(struct sock *sk,
-                                    char __user *optval,
+                                    struct sctp_assoc_value *params,
                                     unsigned int optlen)
 {
        struct sctp_sock *sp = sctp_sk(sk);
        struct sctp_association *asoc;
-       struct sctp_assoc_value params;
        int retval = 0;
 
-       if (optlen < sizeof(params))
+       if (optlen < sizeof(*params))
                return -EINVAL;
 
-       optlen = sizeof(params);
-       if (copy_from_user(&params, optval, optlen))
-               return -EFAULT;
-
-       if (params.assoc_value > SCTP_SS_MAX)
+       if (params->assoc_value > SCTP_SS_MAX)
                return -EINVAL;
 
-       asoc = sctp_id2assoc(sk, params.assoc_id);
-       if (!asoc && params.assoc_id > SCTP_ALL_ASSOC &&
+       asoc = sctp_id2assoc(sk, params->assoc_id);
+       if (!asoc && params->assoc_id > SCTP_ALL_ASSOC &&
            sctp_style(sk, UDP))
                return -EINVAL;
 
        if (asoc)
-               return sctp_sched_set_sched(asoc, params.assoc_value);
+               return sctp_sched_set_sched(asoc, params->assoc_value);
 
        if (sctp_style(sk, TCP))
-               params.assoc_id = SCTP_FUTURE_ASSOC;
+               params->assoc_id = SCTP_FUTURE_ASSOC;
 
-       if (params.assoc_id == SCTP_FUTURE_ASSOC ||
-           params.assoc_id == SCTP_ALL_ASSOC)
-               sp->default_ss = params.assoc_value;
+       if (params->assoc_id == SCTP_FUTURE_ASSOC ||
+           params->assoc_id == SCTP_ALL_ASSOC)
+               sp->default_ss = params->assoc_value;
 
-       if (params.assoc_id == SCTP_CURRENT_ASSOC ||
-           params.assoc_id == SCTP_ALL_ASSOC) {
+       if (params->assoc_id == SCTP_CURRENT_ASSOC ||
+           params->assoc_id == SCTP_ALL_ASSOC) {
                list_for_each_entry(asoc, &sp->ep->asocs, asocs) {
                        int ret = sctp_sched_set_sched(asoc,
-                                                      params.assoc_value);
+                                                      params->assoc_value);
 
                        if (ret && !retval)
                                retval = ret;
@@ -4350,38 +4166,32 @@ static int sctp_setsockopt_scheduler(struct sock *sk,
 }
 
 static int sctp_setsockopt_scheduler_value(struct sock *sk,
-                                          char __user *optval,
+                                          struct sctp_stream_value *params,
                                           unsigned int optlen)
 {
-       struct sctp_stream_value params;
        struct sctp_association *asoc;
        int retval = -EINVAL;
 
-       if (optlen < sizeof(params))
-               goto out;
-
-       optlen = sizeof(params);
-       if (copy_from_user(&params, optval, optlen)) {
-               retval = -EFAULT;
+       if (optlen < sizeof(*params))
                goto out;
-       }
 
-       asoc = sctp_id2assoc(sk, params.assoc_id);
-       if (!asoc && params.assoc_id != SCTP_CURRENT_ASSOC &&
+       asoc = sctp_id2assoc(sk, params->assoc_id);
+       if (!asoc && params->assoc_id != SCTP_CURRENT_ASSOC &&
            sctp_style(sk, UDP))
                goto out;
 
        if (asoc) {
-               retval = sctp_sched_set_value(asoc, params.stream_id,
-                                             params.stream_value, GFP_KERNEL);
+               retval = sctp_sched_set_value(asoc, params->stream_id,
+                                             params->stream_value, GFP_KERNEL);
                goto out;
        }
 
        retval = 0;
 
        list_for_each_entry(asoc, &sctp_sk(sk)->ep->asocs, asocs) {
-               int ret = sctp_sched_set_value(asoc, params.stream_id,
-                                              params.stream_value, GFP_KERNEL);
+               int ret = sctp_sched_set_value(asoc, params->stream_id,
+                                              params->stream_value,
+                                              GFP_KERNEL);
                if (ret && !retval) /* try to return the 1st error. */
                        retval = ret;
        }
@@ -4391,46 +4201,30 @@ out:
 }
 
 static int sctp_setsockopt_interleaving_supported(struct sock *sk,
-                                                 char __user *optval,
+                                                 struct sctp_assoc_value *p,
                                                  unsigned int optlen)
 {
        struct sctp_sock *sp = sctp_sk(sk);
-       struct sctp_assoc_value params;
        struct sctp_association *asoc;
-       int retval = -EINVAL;
 
-       if (optlen < sizeof(params))
-               goto out;
-
-       optlen = sizeof(params);
-       if (copy_from_user(&params, optval, optlen)) {
-               retval = -EFAULT;
-               goto out;
-       }
+       if (optlen < sizeof(*p))
+               return -EINVAL;
 
-       asoc = sctp_id2assoc(sk, params.assoc_id);
-       if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
-           sctp_style(sk, UDP))
-               goto out;
+       asoc = sctp_id2assoc(sk, p->assoc_id);
+       if (!asoc && p->assoc_id != SCTP_FUTURE_ASSOC && sctp_style(sk, UDP))
+               return -EINVAL;
 
        if (!sock_net(sk)->sctp.intl_enable || !sp->frag_interleave) {
-               retval = -EPERM;
-               goto out;
+               return -EPERM;
        }
 
-       sp->ep->intl_enable = !!params.assoc_value;
-
-       retval = 0;
-
-out:
-       return retval;
+       sp->ep->intl_enable = !!p->assoc_value;
+       return 0;
 }
 
-static int sctp_setsockopt_reuse_port(struct sock *sk, char __user *optval,
+static int sctp_setsockopt_reuse_port(struct sock *sk, int *val,
                                      unsigned int optlen)
 {
-       int val;
-
        if (!sctp_style(sk, TCP))
                return -EOPNOTSUPP;
 
@@ -4440,10 +4234,7 @@ static int sctp_setsockopt_reuse_port(struct sock *sk, char __user *optval,
        if (optlen < sizeof(int))
                return -EINVAL;
 
-       if (get_user(val, (int __user *)optval))
-               return -EFAULT;
-
-       sctp_sk(sk)->reuse = !!val;
+       sctp_sk(sk)->reuse = !!*val;
 
        return 0;
 }
@@ -4469,45 +4260,40 @@ static int sctp_assoc_ulpevent_type_set(struct sctp_event *param,
        return 0;
 }
 
-static int sctp_setsockopt_event(struct sock *sk, char __user *optval,
+static int sctp_setsockopt_event(struct sock *sk, struct sctp_event *param,
                                 unsigned int optlen)
 {
        struct sctp_sock *sp = sctp_sk(sk);
        struct sctp_association *asoc;
-       struct sctp_event param;
        int retval = 0;
 
-       if (optlen < sizeof(param))
+       if (optlen < sizeof(*param))
                return -EINVAL;
 
-       optlen = sizeof(param);
-       if (copy_from_user(&param, optval, optlen))
-               return -EFAULT;
-
-       if (param.se_type < SCTP_SN_TYPE_BASE ||
-           param.se_type > SCTP_SN_TYPE_MAX)
+       if (param->se_type < SCTP_SN_TYPE_BASE ||
+           param->se_type > SCTP_SN_TYPE_MAX)
                return -EINVAL;
 
-       asoc = sctp_id2assoc(sk, param.se_assoc_id);
-       if (!asoc && param.se_assoc_id > SCTP_ALL_ASSOC &&
+       asoc = sctp_id2assoc(sk, param->se_assoc_id);
+       if (!asoc && param->se_assoc_id > SCTP_ALL_ASSOC &&
            sctp_style(sk, UDP))
                return -EINVAL;
 
        if (asoc)
-               return sctp_assoc_ulpevent_type_set(&param, asoc);
+               return sctp_assoc_ulpevent_type_set(param, asoc);
 
        if (sctp_style(sk, TCP))
-               param.se_assoc_id = SCTP_FUTURE_ASSOC;
+               param->se_assoc_id = SCTP_FUTURE_ASSOC;
 
-       if (param.se_assoc_id == SCTP_FUTURE_ASSOC ||
-           param.se_assoc_id == SCTP_ALL_ASSOC)
+       if (param->se_assoc_id == SCTP_FUTURE_ASSOC ||
+           param->se_assoc_id == SCTP_ALL_ASSOC)
                sctp_ulpevent_type_set(&sp->subscribe,
-                                      param.se_type, param.se_on);
+                                      param->se_type, param->se_on);
 
-       if (param.se_assoc_id == SCTP_CURRENT_ASSOC ||
-           param.se_assoc_id == SCTP_ALL_ASSOC) {
+       if (param->se_assoc_id == SCTP_CURRENT_ASSOC ||
+           param->se_assoc_id == SCTP_ALL_ASSOC) {
                list_for_each_entry(asoc, &sp->ep->asocs, asocs) {
-                       int ret = sctp_assoc_ulpevent_type_set(&param, asoc);
+                       int ret = sctp_assoc_ulpevent_type_set(param, asoc);
 
                        if (ret && !retval)
                                retval = ret;
@@ -4518,29 +4304,23 @@ static int sctp_setsockopt_event(struct sock *sk, char __user *optval,
 }
 
 static int sctp_setsockopt_asconf_supported(struct sock *sk,
-                                           char __user *optval,
+                                           struct sctp_assoc_value *params,
                                            unsigned int optlen)
 {
-       struct sctp_assoc_value params;
        struct sctp_association *asoc;
        struct sctp_endpoint *ep;
        int retval = -EINVAL;
 
-       if (optlen != sizeof(params))
+       if (optlen != sizeof(*params))
                goto out;
 
-       if (copy_from_user(&params, optval, optlen)) {
-               retval = -EFAULT;
-               goto out;
-       }
-
-       asoc = sctp_id2assoc(sk, params.assoc_id);
-       if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+       asoc = sctp_id2assoc(sk, params->assoc_id);
+       if (!asoc && params->assoc_id != SCTP_FUTURE_ASSOC &&
            sctp_style(sk, UDP))
                goto out;
 
        ep = sctp_sk(sk)->ep;
-       ep->asconf_enable = !!params.assoc_value;
+       ep->asconf_enable = !!params->assoc_value;
 
        if (ep->asconf_enable && ep->auth_enable) {
                sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF);
@@ -4554,29 +4334,23 @@ out:
 }
 
 static int sctp_setsockopt_auth_supported(struct sock *sk,
-                                         char __user *optval,
+                                         struct sctp_assoc_value *params,
                                          unsigned int optlen)
 {
-       struct sctp_assoc_value params;
        struct sctp_association *asoc;
        struct sctp_endpoint *ep;
        int retval = -EINVAL;
 
-       if (optlen != sizeof(params))
-               goto out;
-
-       if (copy_from_user(&params, optval, optlen)) {
-               retval = -EFAULT;
+       if (optlen != sizeof(*params))
                goto out;
-       }
 
-       asoc = sctp_id2assoc(sk, params.assoc_id);
-       if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+       asoc = sctp_id2assoc(sk, params->assoc_id);
+       if (!asoc && params->assoc_id != SCTP_FUTURE_ASSOC &&
            sctp_style(sk, UDP))
                goto out;
 
        ep = sctp_sk(sk)->ep;
-       if (params.assoc_value) {
+       if (params->assoc_value) {
                retval = sctp_auth_init(ep, GFP_KERNEL);
                if (retval)
                        goto out;
@@ -4586,7 +4360,7 @@ static int sctp_setsockopt_auth_supported(struct sock *sk,
                }
        }
 
-       ep->auth_enable = !!params.assoc_value;
+       ep->auth_enable = !!params->assoc_value;
        retval = 0;
 
 out:
@@ -4594,27 +4368,21 @@ out:
 }
 
 static int sctp_setsockopt_ecn_supported(struct sock *sk,
-                                        char __user *optval,
+                                        struct sctp_assoc_value *params,
                                         unsigned int optlen)
 {
-       struct sctp_assoc_value params;
        struct sctp_association *asoc;
        int retval = -EINVAL;
 
-       if (optlen != sizeof(params))
-               goto out;
-
-       if (copy_from_user(&params, optval, optlen)) {
-               retval = -EFAULT;
+       if (optlen != sizeof(*params))
                goto out;
-       }
 
-       asoc = sctp_id2assoc(sk, params.assoc_id);
-       if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+       asoc = sctp_id2assoc(sk, params->assoc_id);
+       if (!asoc && params->assoc_id != SCTP_FUTURE_ASSOC &&
            sctp_style(sk, UDP))
                goto out;
 
-       sctp_sk(sk)->ep->ecn_enable = !!params.assoc_value;
+       sctp_sk(sk)->ep->ecn_enable = !!params->assoc_value;
        retval = 0;
 
 out:
@@ -4622,33 +4390,27 @@ out:
 }
 
 static int sctp_setsockopt_pf_expose(struct sock *sk,
-                                    char __user *optval,
+                                    struct sctp_assoc_value *params,
                                     unsigned int optlen)
 {
-       struct sctp_assoc_value params;
        struct sctp_association *asoc;
        int retval = -EINVAL;
 
-       if (optlen != sizeof(params))
+       if (optlen != sizeof(*params))
                goto out;
 
-       if (copy_from_user(&params, optval, optlen)) {
-               retval = -EFAULT;
-               goto out;
-       }
-
-       if (params.assoc_value > SCTP_PF_EXPOSE_MAX)
+       if (params->assoc_value > SCTP_PF_EXPOSE_MAX)
                goto out;
 
-       asoc = sctp_id2assoc(sk, params.assoc_id);
-       if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC &&
+       asoc = sctp_id2assoc(sk, params->assoc_id);
+       if (!asoc && params->assoc_id != SCTP_FUTURE_ASSOC &&
            sctp_style(sk, UDP))
                goto out;
 
        if (asoc)
-               asoc->pf_expose = params.assoc_value;
+               asoc->pf_expose = params->assoc_value;
        else
-               sctp_sk(sk)->pf_expose = params.assoc_value;
+               sctp_sk(sk)->pf_expose = params->assoc_value;
        retval = 0;
 
 out:
@@ -4675,8 +4437,9 @@ out:
  *   optlen  - the size of the buffer.
  */
 static int sctp_setsockopt(struct sock *sk, int level, int optname,
-                          char __user *optval, unsigned int optlen)
+                          sockptr_t optval, unsigned int optlen)
 {
+       void *kopt = NULL;
        int retval = 0;
 
        pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname);
@@ -4689,8 +4452,14 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
         */
        if (level != SOL_SCTP) {
                struct sctp_af *af = sctp_sk(sk)->pf->af;
-               retval = af->setsockopt(sk, level, optname, optval, optlen);
-               goto out_nounlock;
+
+               return af->setsockopt(sk, level, optname, optval, optlen);
+       }
+
+       if (optlen > 0) {
+               kopt = memdup_sockptr(optval, optlen);
+               if (IS_ERR(kopt))
+                       return PTR_ERR(kopt);
        }
 
        lock_sock(sk);
@@ -4698,179 +4467,174 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
        switch (optname) {
        case SCTP_SOCKOPT_BINDX_ADD:
                /* 'optlen' is the size of the addresses buffer. */
-               retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval,
-                                              optlen, SCTP_BINDX_ADD_ADDR);
+               retval = sctp_setsockopt_bindx(sk, kopt, optlen,
+                                              SCTP_BINDX_ADD_ADDR);
                break;
 
        case SCTP_SOCKOPT_BINDX_REM:
                /* 'optlen' is the size of the addresses buffer. */
-               retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval,
-                                              optlen, SCTP_BINDX_REM_ADDR);
+               retval = sctp_setsockopt_bindx(sk, kopt, optlen,
+                                              SCTP_BINDX_REM_ADDR);
                break;
 
        case SCTP_SOCKOPT_CONNECTX_OLD:
                /* 'optlen' is the size of the addresses buffer. */
-               retval = sctp_setsockopt_connectx_old(sk,
-                                           (struct sockaddr __user *)optval,
-                                           optlen);
+               retval = sctp_setsockopt_connectx_old(sk, kopt, optlen);
                break;
 
        case SCTP_SOCKOPT_CONNECTX:
                /* 'optlen' is the size of the addresses buffer. */
-               retval = sctp_setsockopt_connectx(sk,
-                                           (struct sockaddr __user *)optval,
-                                           optlen);
+               retval = sctp_setsockopt_connectx(sk, kopt, optlen);
                break;
 
        case SCTP_DISABLE_FRAGMENTS:
-               retval = sctp_setsockopt_disable_fragments(sk, optval, optlen);
+               retval = sctp_setsockopt_disable_fragments(sk, kopt, optlen);
                break;
 
        case SCTP_EVENTS:
-               retval = sctp_setsockopt_events(sk, optval, optlen);
+               retval = sctp_setsockopt_events(sk, kopt, optlen);
                break;
 
        case SCTP_AUTOCLOSE:
-               retval = sctp_setsockopt_autoclose(sk, optval, optlen);
+               retval = sctp_setsockopt_autoclose(sk, kopt, optlen);
                break;
 
        case SCTP_PEER_ADDR_PARAMS:
-               retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen);
+               retval = sctp_setsockopt_peer_addr_params(sk, kopt, optlen);
                break;
 
        case SCTP_DELAYED_SACK:
-               retval = sctp_setsockopt_delayed_ack(sk, optval, optlen);
+               retval = sctp_setsockopt_delayed_ack(sk, kopt, optlen);
                break;
        case SCTP_PARTIAL_DELIVERY_POINT:
-               retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen);
+               retval = sctp_setsockopt_partial_delivery_point(sk, kopt, optlen);
                break;
 
        case SCTP_INITMSG:
-               retval = sctp_setsockopt_initmsg(sk, optval, optlen);
+               retval = sctp_setsockopt_initmsg(sk, kopt, optlen);
                break;
        case SCTP_DEFAULT_SEND_PARAM:
-               retval = sctp_setsockopt_default_send_param(sk, optval,
-                                                           optlen);
+               retval = sctp_setsockopt_default_send_param(sk, kopt, optlen);
                break;
        case SCTP_DEFAULT_SNDINFO:
-               retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen);
+               retval = sctp_setsockopt_default_sndinfo(sk, kopt, optlen);
                break;
        case SCTP_PRIMARY_ADDR:
-               retval = sctp_setsockopt_primary_addr(sk, optval, optlen);
+               retval = sctp_setsockopt_primary_addr(sk, kopt, optlen);
                break;
        case SCTP_SET_PEER_PRIMARY_ADDR:
-               retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen);
+               retval = sctp_setsockopt_peer_primary_addr(sk, kopt, optlen);
                break;
        case SCTP_NODELAY:
-               retval = sctp_setsockopt_nodelay(sk, optval, optlen);
+               retval = sctp_setsockopt_nodelay(sk, kopt, optlen);
                break;
        case SCTP_RTOINFO:
-               retval = sctp_setsockopt_rtoinfo(sk, optval, optlen);
+               retval = sctp_setsockopt_rtoinfo(sk, kopt, optlen);
                break;
        case SCTP_ASSOCINFO:
-               retval = sctp_setsockopt_associnfo(sk, optval, optlen);
+               retval = sctp_setsockopt_associnfo(sk, kopt, optlen);
                break;
        case SCTP_I_WANT_MAPPED_V4_ADDR:
-               retval = sctp_setsockopt_mappedv4(sk, optval, optlen);
+               retval = sctp_setsockopt_mappedv4(sk, kopt, optlen);
                break;
        case SCTP_MAXSEG:
-               retval = sctp_setsockopt_maxseg(sk, optval, optlen);
+               retval = sctp_setsockopt_maxseg(sk, kopt, optlen);
                break;
        case SCTP_ADAPTATION_LAYER:
-               retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen);
+               retval = sctp_setsockopt_adaptation_layer(sk, kopt, optlen);
                break;
        case SCTP_CONTEXT:
-               retval = sctp_setsockopt_context(sk, optval, optlen);
+               retval = sctp_setsockopt_context(sk, kopt, optlen);
                break;
        case SCTP_FRAGMENT_INTERLEAVE:
-               retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen);
+               retval = sctp_setsockopt_fragment_interleave(sk, kopt, optlen);
                break;
        case SCTP_MAX_BURST:
-               retval = sctp_setsockopt_maxburst(sk, optval, optlen);
+               retval = sctp_setsockopt_maxburst(sk, kopt, optlen);
                break;
        case SCTP_AUTH_CHUNK:
-               retval = sctp_setsockopt_auth_chunk(sk, optval, optlen);
+               retval = sctp_setsockopt_auth_chunk(sk, kopt, optlen);
                break;
        case SCTP_HMAC_IDENT:
-               retval = sctp_setsockopt_hmac_ident(sk, optval, optlen);
+               retval = sctp_setsockopt_hmac_ident(sk, kopt, optlen);
                break;
        case SCTP_AUTH_KEY:
-               retval = sctp_setsockopt_auth_key(sk, optval, optlen);
+               retval = sctp_setsockopt_auth_key(sk, kopt, optlen);
                break;
        case SCTP_AUTH_ACTIVE_KEY:
-               retval = sctp_setsockopt_active_key(sk, optval, optlen);
+               retval = sctp_setsockopt_active_key(sk, kopt, optlen);
                break;
        case SCTP_AUTH_DELETE_KEY:
-               retval = sctp_setsockopt_del_key(sk, optval, optlen);
+               retval = sctp_setsockopt_del_key(sk, kopt, optlen);
                break;
        case SCTP_AUTH_DEACTIVATE_KEY:
-               retval = sctp_setsockopt_deactivate_key(sk, optval, optlen);
+               retval = sctp_setsockopt_deactivate_key(sk, kopt, optlen);
                break;
        case SCTP_AUTO_ASCONF:
-               retval = sctp_setsockopt_auto_asconf(sk, optval, optlen);
+               retval = sctp_setsockopt_auto_asconf(sk, kopt, optlen);
                break;
        case SCTP_PEER_ADDR_THLDS:
-               retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen,
+               retval = sctp_setsockopt_paddr_thresholds(sk, kopt, optlen,
                                                          false);
                break;
        case SCTP_PEER_ADDR_THLDS_V2:
-               retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen,
+               retval = sctp_setsockopt_paddr_thresholds(sk, kopt, optlen,
                                                          true);
                break;
        case SCTP_RECVRCVINFO:
-               retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen);
+               retval = sctp_setsockopt_recvrcvinfo(sk, kopt, optlen);
                break;
        case SCTP_RECVNXTINFO:
-               retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen);
+               retval = sctp_setsockopt_recvnxtinfo(sk, kopt, optlen);
                break;
        case SCTP_PR_SUPPORTED:
-               retval = sctp_setsockopt_pr_supported(sk, optval, optlen);
+               retval = sctp_setsockopt_pr_supported(sk, kopt, optlen);
                break;
        case SCTP_DEFAULT_PRINFO:
-               retval = sctp_setsockopt_default_prinfo(sk, optval, optlen);
+               retval = sctp_setsockopt_default_prinfo(sk, kopt, optlen);
                break;
        case SCTP_RECONFIG_SUPPORTED:
-               retval = sctp_setsockopt_reconfig_supported(sk, optval, optlen);
+               retval = sctp_setsockopt_reconfig_supported(sk, kopt, optlen);
                break;
        case SCTP_ENABLE_STREAM_RESET:
-               retval = sctp_setsockopt_enable_strreset(sk, optval, optlen);
+               retval = sctp_setsockopt_enable_strreset(sk, kopt, optlen);
                break;
        case SCTP_RESET_STREAMS:
-               retval = sctp_setsockopt_reset_streams(sk, optval, optlen);
+               retval = sctp_setsockopt_reset_streams(sk, kopt, optlen);
                break;
        case SCTP_RESET_ASSOC:
-               retval = sctp_setsockopt_reset_assoc(sk, optval, optlen);
+               retval = sctp_setsockopt_reset_assoc(sk, kopt, optlen);
                break;
        case SCTP_ADD_STREAMS:
-               retval = sctp_setsockopt_add_streams(sk, optval, optlen);
+               retval = sctp_setsockopt_add_streams(sk, kopt, optlen);
                break;
        case SCTP_STREAM_SCHEDULER:
-               retval = sctp_setsockopt_scheduler(sk, optval, optlen);
+               retval = sctp_setsockopt_scheduler(sk, kopt, optlen);
                break;
        case SCTP_STREAM_SCHEDULER_VALUE:
-               retval = sctp_setsockopt_scheduler_value(sk, optval, optlen);
+               retval = sctp_setsockopt_scheduler_value(sk, kopt, optlen);
                break;
        case SCTP_INTERLEAVING_SUPPORTED:
-               retval = sctp_setsockopt_interleaving_supported(sk, optval,
+               retval = sctp_setsockopt_interleaving_supported(sk, kopt,
                                                                optlen);
                break;
        case SCTP_REUSE_PORT:
-               retval = sctp_setsockopt_reuse_port(sk, optval, optlen);
+               retval = sctp_setsockopt_reuse_port(sk, kopt, optlen);
                break;
        case SCTP_EVENT:
-               retval = sctp_setsockopt_event(sk, optval, optlen);
+               retval = sctp_setsockopt_event(sk, kopt, optlen);
                break;
        case SCTP_ASCONF_SUPPORTED:
-               retval = sctp_setsockopt_asconf_supported(sk, optval, optlen);
+               retval = sctp_setsockopt_asconf_supported(sk, kopt, optlen);
                break;
        case SCTP_AUTH_SUPPORTED:
-               retval = sctp_setsockopt_auth_supported(sk, optval, optlen);
+               retval = sctp_setsockopt_auth_supported(sk, kopt, optlen);
                break;
        case SCTP_ECN_SUPPORTED:
-               retval = sctp_setsockopt_ecn_supported(sk, optval, optlen);
+               retval = sctp_setsockopt_ecn_supported(sk, kopt, optlen);
                break;
        case SCTP_EXPOSE_POTENTIALLY_FAILED_STATE:
-               retval = sctp_setsockopt_pf_expose(sk, optval, optlen);
+               retval = sctp_setsockopt_pf_expose(sk, kopt, optlen);
                break;
        default:
                retval = -ENOPROTOOPT;
@@ -4878,8 +4642,7 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
        }
 
        release_sock(sk);
-
-out_nounlock:
+       kfree(kopt);
        return retval;
 }
 
index 9033215..4ac1d4d 100644 (file)
@@ -1731,7 +1731,7 @@ out:
 }
 
 static int smc_setsockopt(struct socket *sock, int level, int optname,
-                         char __user *optval, unsigned int optlen)
+                         sockptr_t optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct smc_sock *smc;
@@ -1742,8 +1742,11 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
        /* generic setsockopts reaching us here always apply to the
         * CLC socket
         */
-       rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname,
-                                          optval, optlen);
+       if (unlikely(!smc->clcsock->ops->setsockopt))
+               rc = -EOPNOTSUPP;
+       else
+               rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname,
+                                                  optval, optlen);
        if (smc->clcsock->sk->sk_err) {
                sk->sk_err = smc->clcsock->sk->sk_err;
                sk->sk_error_report(sk);
@@ -1751,7 +1754,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
 
        if (optlen < sizeof(int))
                return -EINVAL;
-       if (get_user(val, (int __user *)optval))
+       if (copy_from_sockptr(&val, optval, sizeof(int)))
                return -EFAULT;
 
        lock_sock(sk);
@@ -1808,6 +1811,8 @@ static int smc_getsockopt(struct socket *sock, int level, int optname,
 
        smc = smc_sk(sock->sk);
        /* socket options apply to the CLC socket */
+       if (unlikely(!smc->clcsock->ops->getsockopt))
+               return -EOPNOTSUPP;
        return smc->clcsock->ops->getsockopt(smc->clcsock, level, optname,
                                             optval, optlen);
 }
index d5627df..779f414 100644 (file)
@@ -27,6 +27,7 @@
 
 #define SMCR_CLC_ACCEPT_CONFIRM_LEN 68
 #define SMCD_CLC_ACCEPT_CONFIRM_LEN 48
+#define SMC_CLC_RECV_BUF_LEN   100
 
 /* eye catcher "SMCR" EBCDIC for CLC messages */
 static const char SMC_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xd9'};
@@ -36,7 +37,7 @@ static const char SMCD_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xc4'};
 /* check if received message has a correct header length and contains valid
  * heading and trailing eyecatchers
  */
-static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
+static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm, bool check_trl)
 {
        struct smc_clc_msg_proposal_prefix *pclc_prfx;
        struct smc_clc_msg_accept_confirm *clc;
@@ -49,12 +50,9 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
                return false;
        switch (clcm->type) {
        case SMC_CLC_PROPOSAL:
-               if (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D &&
-                   clcm->path != SMC_TYPE_B)
-                       return false;
                pclc = (struct smc_clc_msg_proposal *)clcm;
                pclc_prfx = smc_clc_proposal_get_prefix(pclc);
-               if (ntohs(pclc->hdr.length) !=
+               if (ntohs(pclc->hdr.length) <
                        sizeof(*pclc) + ntohs(pclc->iparea_offset) +
                        sizeof(*pclc_prfx) +
                        pclc_prfx->ipv6_prefixes_cnt *
@@ -86,7 +84,8 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
        default:
                return false;
        }
-       if (memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) &&
+       if (check_trl &&
+           memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) &&
            memcmp(trl->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER)))
                return false;
        return true;
@@ -276,7 +275,8 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
        struct msghdr msg = {NULL, 0};
        int reason_code = 0;
        struct kvec vec = {buf, buflen};
-       int len, datlen;
+       int len, datlen, recvlen;
+       bool check_trl = true;
        int krflags;
 
        /* peek the first few bytes to determine length of data to receive
@@ -320,10 +320,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
        }
        datlen = ntohs(clcm->length);
        if ((len < sizeof(struct smc_clc_msg_hdr)) ||
-           (datlen > buflen) ||
-           (clcm->version != SMC_CLC_V1) ||
-           (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D &&
-            clcm->path != SMC_TYPE_B) ||
+           (clcm->version < SMC_CLC_V1) ||
            ((clcm->type != SMC_CLC_DECLINE) &&
             (clcm->type != expected_type))) {
                smc->sk.sk_err = EPROTO;
@@ -331,16 +328,38 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
                goto out;
        }
 
+       if (clcm->type == SMC_CLC_PROPOSAL && clcm->path == SMC_TYPE_N)
+               reason_code = SMC_CLC_DECL_VERSMISMAT; /* just V2 offered */
+
        /* receive the complete CLC message */
        memset(&msg, 0, sizeof(struct msghdr));
-       iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, datlen);
+       if (datlen > buflen) {
+               check_trl = false;
+               recvlen = buflen;
+       } else {
+               recvlen = datlen;
+       }
+       iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, recvlen);
        krflags = MSG_WAITALL;
        len = sock_recvmsg(smc->clcsock, &msg, krflags);
-       if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) {
+       if (len < recvlen || !smc_clc_msg_hdr_valid(clcm, check_trl)) {
                smc->sk.sk_err = EPROTO;
                reason_code = -EPROTO;
                goto out;
        }
+       datlen -= len;
+       while (datlen) {
+               u8 tmp[SMC_CLC_RECV_BUF_LEN];
+
+               vec.iov_base = &tmp;
+               vec.iov_len = SMC_CLC_RECV_BUF_LEN;
+               /* receive remaining proposal message */
+               recvlen = datlen > SMC_CLC_RECV_BUF_LEN ?
+                                               SMC_CLC_RECV_BUF_LEN : datlen;
+               iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, recvlen);
+               len = sock_recvmsg(smc->clcsock, &msg, krflags);
+               datlen -= len;
+       }
        if (clcm->type == SMC_CLC_DECLINE) {
                struct smc_clc_msg_decline *dclc;
 
index 4658767..76c2b15 100644 (file)
@@ -25,6 +25,7 @@
 #define SMC_CLC_V1             0x1             /* SMC version                */
 #define SMC_TYPE_R             0               /* SMC-R only                 */
 #define SMC_TYPE_D             1               /* SMC-D only                 */
+#define SMC_TYPE_N             2               /* neither SMC-R nor SMC-D    */
 #define SMC_TYPE_B             3               /* SMC-R and SMC-D            */
 #define CLC_WAIT_TIME          (6 * HZ)        /* max. wait time on clcsock  */
 #define CLC_WAIT_TIME_SHORT    HZ              /* short wait time on clcsock */
@@ -46,6 +47,7 @@
 #define SMC_CLC_DECL_ISMVLANERR        0x03090000  /* err to reg vlan id on ism dev  */
 #define SMC_CLC_DECL_NOACTLINK 0x030a0000  /* no active smc-r link in lgr    */
 #define SMC_CLC_DECL_NOSRVLINK 0x030b0000  /* SMC-R link from srv not found  */
+#define SMC_CLC_DECL_VERSMISMAT        0x030c0000  /* SMC version mismatch           */
 #define SMC_CLC_DECL_SYNCERR   0x04000000  /* synchronization error          */
 #define SMC_CLC_DECL_PEERDECL  0x05000000  /* peer declined during handshake */
 #define SMC_CLC_DECL_INTERR    0x09990000  /* internal error                 */
index 7964a21..f69d205 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/workqueue.h>
 #include <linux/wait.h>
 #include <linux/reboot.h>
+#include <linux/mutex.h>
 #include <net/tcp.h>
 #include <net/sock.h>
 #include <rdma/ib_verbs.h>
@@ -247,7 +248,8 @@ static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr)
                if (smc_link_usable(lnk))
                        lnk->state = SMC_LNK_INACTIVE;
        }
-       wake_up_interruptible_all(&lgr->llc_waiter);
+       wake_up_all(&lgr->llc_msg_waiter);
+       wake_up_all(&lgr->llc_flow_waiter);
 }
 
 static void smc_lgr_free(struct smc_link_group *lgr);
@@ -1130,18 +1132,19 @@ static void smcr_link_up(struct smc_link_group *lgr,
                        return;
                if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
                        /* some other llc task is ongoing */
-                       wait_event_interruptible_timeout(lgr->llc_waiter,
-                               (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
+                       wait_event_timeout(lgr->llc_flow_waiter,
+                               (list_empty(&lgr->list) ||
+                                lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
                                SMC_LLC_WAIT_TIME);
                }
-               if (list_empty(&lgr->list) ||
-                   !smc_ib_port_active(smcibdev, ibport))
-                       return; /* lgr or device no longer active */
-               link = smc_llc_usable_link(lgr);
-               if (!link)
-                       return;
-               smc_llc_send_add_link(link, smcibdev->mac[ibport - 1], gid,
-                                     NULL, SMC_LLC_REQ);
+               /* lgr or device no longer active? */
+               if (!list_empty(&lgr->list) &&
+                   smc_ib_port_active(smcibdev, ibport))
+                       link = smc_llc_usable_link(lgr);
+               if (link)
+                       smc_llc_send_add_link(link, smcibdev->mac[ibport - 1],
+                                             gid, NULL, SMC_LLC_REQ);
+               wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */
        }
 }
 
@@ -1195,13 +1198,17 @@ static void smcr_link_down(struct smc_link *lnk)
                if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
                        /* another llc task is ongoing */
                        mutex_unlock(&lgr->llc_conf_mutex);
-                       wait_event_interruptible_timeout(lgr->llc_waiter,
-                               (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
+                       wait_event_timeout(lgr->llc_flow_waiter,
+                               (list_empty(&lgr->list) ||
+                                lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
                                SMC_LLC_WAIT_TIME);
                        mutex_lock(&lgr->llc_conf_mutex);
                }
-               smc_llc_send_delete_link(to_lnk, del_link_id, SMC_LLC_REQ, true,
-                                        SMC_LLC_DEL_LOST_PATH);
+               if (!list_empty(&lgr->list))
+                       smc_llc_send_delete_link(to_lnk, del_link_id,
+                                                SMC_LLC_REQ, true,
+                                                SMC_LLC_DEL_LOST_PATH);
+               wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */
        }
 }
 
@@ -1262,7 +1269,7 @@ static void smc_link_down_work(struct work_struct *work)
 
        if (list_empty(&lgr->list))
                return;
-       wake_up_interruptible_all(&lgr->llc_waiter);
+       wake_up_all(&lgr->llc_msg_waiter);
        mutex_lock(&lgr->llc_conf_mutex);
        smcr_link_down(link);
        mutex_unlock(&lgr->llc_conf_mutex);
@@ -1955,20 +1962,20 @@ static void smc_core_going_away(void)
        struct smc_ib_device *smcibdev;
        struct smcd_dev *smcd;
 
-       spin_lock(&smc_ib_devices.lock);
+       mutex_lock(&smc_ib_devices.mutex);
        list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
                int i;
 
                for (i = 0; i < SMC_MAX_PORTS; i++)
                        set_bit(i, smcibdev->ports_going_away);
        }
-       spin_unlock(&smc_ib_devices.lock);
+       mutex_unlock(&smc_ib_devices.mutex);
 
-       spin_lock(&smcd_dev_list.lock);
+       mutex_lock(&smcd_dev_list.mutex);
        list_for_each_entry(smcd, &smcd_dev_list.list, list) {
                smcd->going_away = 1;
        }
-       spin_unlock(&smcd_dev_list.lock);
+       mutex_unlock(&smcd_dev_list.mutex);
 }
 
 /* Clean up all SMC link groups */
@@ -1980,10 +1987,10 @@ static void smc_lgrs_shutdown(void)
 
        smc_smcr_terminate_all(NULL);
 
-       spin_lock(&smcd_dev_list.lock);
+       mutex_lock(&smcd_dev_list.mutex);
        list_for_each_entry(smcd, &smcd_dev_list.list, list)
                smc_smcd_terminate_all(smcd);
-       spin_unlock(&smcd_dev_list.lock);
+       mutex_unlock(&smcd_dev_list.mutex);
 }
 
 static int smc_core_reboot_event(struct notifier_block *this,
index 86d160f..c3ff512 100644 (file)
@@ -262,8 +262,10 @@ struct smc_link_group {
                        struct work_struct      llc_del_link_work;
                        struct work_struct      llc_event_work;
                                                /* llc event worker */
-                       wait_queue_head_t       llc_waiter;
+                       wait_queue_head_t       llc_flow_waiter;
                                                /* w4 next llc event */
+                       wait_queue_head_t       llc_msg_waiter;
+                                               /* w4 next llc msg */
                        struct smc_llc_flow     llc_flow_lcl;
                                                /* llc local control field */
                        struct smc_llc_flow     llc_flow_rmt;
index 562a52d..7637fde 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/workqueue.h>
 #include <linux/scatterlist.h>
 #include <linux/wait.h>
+#include <linux/mutex.h>
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_cache.h>
 
@@ -33,7 +34,7 @@
 #define SMC_QP_RNR_RETRY                       7 /* 7: infinite */
 
 struct smc_ib_devices smc_ib_devices = {       /* smc-registered ib devices */
-       .lock = __SPIN_LOCK_UNLOCKED(smc_ib_devices.lock),
+       .mutex = __MUTEX_INITIALIZER(smc_ib_devices.mutex),
        .list = LIST_HEAD_INIT(smc_ib_devices.list),
 };
 
@@ -565,9 +566,9 @@ static int smc_ib_add_dev(struct ib_device *ibdev)
        INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work);
        atomic_set(&smcibdev->lnk_cnt, 0);
        init_waitqueue_head(&smcibdev->lnks_deleted);
-       spin_lock(&smc_ib_devices.lock);
+       mutex_lock(&smc_ib_devices.mutex);
        list_add_tail(&smcibdev->list, &smc_ib_devices.list);
-       spin_unlock(&smc_ib_devices.lock);
+       mutex_unlock(&smc_ib_devices.mutex);
        ib_set_client_data(ibdev, &smc_ib_client, smcibdev);
        INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev,
                              smc_ib_global_event_handler);
@@ -602,9 +603,9 @@ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
 {
        struct smc_ib_device *smcibdev = client_data;
 
-       spin_lock(&smc_ib_devices.lock);
+       mutex_lock(&smc_ib_devices.mutex);
        list_del_init(&smcibdev->list); /* remove from smc_ib_devices */
-       spin_unlock(&smc_ib_devices.lock);
+       mutex_unlock(&smc_ib_devices.mutex);
        pr_warn_ratelimited("smc: removing ib device %s\n",
                            smcibdev->ibdev->name);
        smc_smcr_terminate_all(smcibdev);
index e6a696a..ae6776e 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <linux/interrupt.h>
 #include <linux/if_ether.h>
+#include <linux/mutex.h>
 #include <linux/wait.h>
 #include <rdma/ib_verbs.h>
 #include <net/smc.h>
@@ -25,7 +26,7 @@
 
 struct smc_ib_devices {                        /* list of smc ib devices definition */
        struct list_head        list;
-       spinlock_t              lock;   /* protects list of smc ib devices */
+       struct mutex            mutex;  /* protects list of smc ib devices */
 };
 
 extern struct smc_ib_devices   smc_ib_devices; /* list of smc ib devices */
index 91f85fc..998c525 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/spinlock.h>
+#include <linux/mutex.h>
 #include <linux/slab.h>
 #include <asm/page.h>
 
@@ -17,7 +18,7 @@
 
 struct smcd_dev_list smcd_dev_list = {
        .list = LIST_HEAD_INIT(smcd_dev_list.list),
-       .lock = __SPIN_LOCK_UNLOCKED(smcd_dev_list.lock)
+       .mutex = __MUTEX_INITIALIZER(smcd_dev_list.mutex)
 };
 
 /* Test if an ISM communication is possible. */
@@ -317,9 +318,9 @@ EXPORT_SYMBOL_GPL(smcd_alloc_dev);
 
 int smcd_register_dev(struct smcd_dev *smcd)
 {
-       spin_lock(&smcd_dev_list.lock);
+       mutex_lock(&smcd_dev_list.mutex);
        list_add_tail(&smcd->list, &smcd_dev_list.list);
-       spin_unlock(&smcd_dev_list.lock);
+       mutex_unlock(&smcd_dev_list.mutex);
 
        pr_warn_ratelimited("smc: adding smcd device %s with pnetid %.16s%s\n",
                            dev_name(&smcd->dev), smcd->pnetid,
@@ -333,9 +334,9 @@ void smcd_unregister_dev(struct smcd_dev *smcd)
 {
        pr_warn_ratelimited("smc: removing smcd device %s\n",
                            dev_name(&smcd->dev));
-       spin_lock(&smcd_dev_list.lock);
+       mutex_lock(&smcd_dev_list.mutex);
        list_del_init(&smcd->list);
-       spin_unlock(&smcd_dev_list.lock);
+       mutex_unlock(&smcd_dev_list.mutex);
        smcd->going_away = 1;
        smc_smcd_terminate_all(smcd);
        flush_workqueue(smcd->event_wq);
index 4da946c..81cc453 100644 (file)
 #define SMCD_ISM_H
 
 #include <linux/uio.h>
+#include <linux/mutex.h>
 
 #include "smc.h"
 
 struct smcd_dev_list { /* List of SMCD devices */
        struct list_head list;
-       spinlock_t lock;        /* Protects list of devices */
+       struct mutex mutex;     /* Protects list of devices */
 };
 
 extern struct smcd_dev_list    smcd_dev_list; /* list of smcd devices */
index 391237b..c1a0386 100644 (file)
@@ -186,6 +186,26 @@ static inline void smc_llc_flow_qentry_set(struct smc_llc_flow *flow,
        flow->qentry = qentry;
 }
 
+static void smc_llc_flow_parallel(struct smc_link_group *lgr, u8 flow_type,
+                                 struct smc_llc_qentry *qentry)
+{
+       u8 msg_type = qentry->msg.raw.hdr.common.type;
+
+       if ((msg_type == SMC_LLC_ADD_LINK || msg_type == SMC_LLC_DELETE_LINK) &&
+           flow_type != msg_type && !lgr->delayed_event) {
+               lgr->delayed_event = qentry;
+               return;
+       }
+       /* drop parallel or already-in-progress llc requests */
+       if (flow_type != msg_type)
+               pr_warn_once("smc: SMC-R lg %*phN dropped parallel "
+                            "LLC msg: msg %d flow %d role %d\n",
+                            SMC_LGR_ID_SIZE, &lgr->id,
+                            qentry->msg.raw.hdr.common.type,
+                            flow_type, lgr->role);
+       kfree(qentry);
+}
+
 /* try to start a new llc flow, initiated by an incoming llc msg */
 static bool smc_llc_flow_start(struct smc_llc_flow *flow,
                               struct smc_llc_qentry *qentry)
@@ -195,14 +215,7 @@ static bool smc_llc_flow_start(struct smc_llc_flow *flow,
        spin_lock_bh(&lgr->llc_flow_lock);
        if (flow->type) {
                /* a flow is already active */
-               if ((qentry->msg.raw.hdr.common.type == SMC_LLC_ADD_LINK ||
-                    qentry->msg.raw.hdr.common.type == SMC_LLC_DELETE_LINK) &&
-                   !lgr->delayed_event) {
-                       lgr->delayed_event = qentry;
-               } else {
-                       /* forget this llc request */
-                       kfree(qentry);
-               }
+               smc_llc_flow_parallel(lgr, flow->type, qentry);
                spin_unlock_bh(&lgr->llc_flow_lock);
                return false;
        }
@@ -222,8 +235,8 @@ static bool smc_llc_flow_start(struct smc_llc_flow *flow,
        }
        if (qentry == lgr->delayed_event)
                lgr->delayed_event = NULL;
-       spin_unlock_bh(&lgr->llc_flow_lock);
        smc_llc_flow_qentry_set(flow, qentry);
+       spin_unlock_bh(&lgr->llc_flow_lock);
        return true;
 }
 
@@ -251,11 +264,11 @@ again:
                return 0;
        }
        spin_unlock_bh(&lgr->llc_flow_lock);
-       rc = wait_event_interruptible_timeout(lgr->llc_waiter,
-                       (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
-                        (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
-                         lgr->llc_flow_rmt.type == allowed_remote)),
-                       SMC_LLC_WAIT_TIME);
+       rc = wait_event_timeout(lgr->llc_flow_waiter, (list_empty(&lgr->list) ||
+                               (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
+                                (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
+                                 lgr->llc_flow_rmt.type == allowed_remote))),
+                               SMC_LLC_WAIT_TIME * 10);
        if (!rc)
                return -ETIMEDOUT;
        goto again;
@@ -272,7 +285,7 @@ void smc_llc_flow_stop(struct smc_link_group *lgr, struct smc_llc_flow *flow)
            flow == &lgr->llc_flow_lcl)
                schedule_work(&lgr->llc_event_work);
        else
-               wake_up_interruptible(&lgr->llc_waiter);
+               wake_up(&lgr->llc_flow_waiter);
 }
 
 /* lnk is optional and used for early wakeup when link goes down, useful in
@@ -283,26 +296,32 @@ struct smc_llc_qentry *smc_llc_wait(struct smc_link_group *lgr,
                                    int time_out, u8 exp_msg)
 {
        struct smc_llc_flow *flow = &lgr->llc_flow_lcl;
+       u8 rcv_msg;
 
-       wait_event_interruptible_timeout(lgr->llc_waiter,
-                                        (flow->qentry ||
-                                         (lnk && !smc_link_usable(lnk)) ||
-                                         list_empty(&lgr->list)),
-                                        time_out);
+       wait_event_timeout(lgr->llc_msg_waiter,
+                          (flow->qentry ||
+                           (lnk && !smc_link_usable(lnk)) ||
+                           list_empty(&lgr->list)),
+                          time_out);
        if (!flow->qentry ||
            (lnk && !smc_link_usable(lnk)) || list_empty(&lgr->list)) {
                smc_llc_flow_qentry_del(flow);
                goto out;
        }
-       if (exp_msg && flow->qentry->msg.raw.hdr.common.type != exp_msg) {
+       rcv_msg = flow->qentry->msg.raw.hdr.common.type;
+       if (exp_msg && rcv_msg != exp_msg) {
                if (exp_msg == SMC_LLC_ADD_LINK &&
-                   flow->qentry->msg.raw.hdr.common.type ==
-                   SMC_LLC_DELETE_LINK) {
+                   rcv_msg == SMC_LLC_DELETE_LINK) {
                        /* flow_start will delay the unexpected msg */
                        smc_llc_flow_start(&lgr->llc_flow_lcl,
                                           smc_llc_flow_qentry_clr(flow));
                        return NULL;
                }
+               pr_warn_once("smc: SMC-R lg %*phN dropped unexpected LLC msg: "
+                            "msg %d exp %d flow %d role %d flags %x\n",
+                            SMC_LGR_ID_SIZE, &lgr->id, rcv_msg, exp_msg,
+                            flow->type, lgr->role,
+                            flow->qentry->msg.raw.hdr.flags);
                smc_llc_flow_qentry_del(flow);
        }
 out:
@@ -1222,8 +1241,8 @@ static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr)
        smc_llc_send_message(lnk, &qentry->msg); /* response */
 
        if (smc_link_downing(&lnk_del->state)) {
-               smc_switch_conns(lgr, lnk_del, false);
-               smc_wr_tx_wait_no_pending_sends(lnk_del);
+               if (smc_switch_conns(lgr, lnk_del, false))
+                       smc_wr_tx_wait_no_pending_sends(lnk_del);
        }
        smcr_link_clear(lnk_del, true);
 
@@ -1297,8 +1316,8 @@ static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr)
                goto out; /* asymmetric link already deleted */
 
        if (smc_link_downing(&lnk_del->state)) {
-               smc_switch_conns(lgr, lnk_del, false);
-               smc_wr_tx_wait_no_pending_sends(lnk_del);
+               if (smc_switch_conns(lgr, lnk_del, false))
+                       smc_wr_tx_wait_no_pending_sends(lnk_del);
        }
        if (!list_empty(&lgr->list)) {
                /* qentry is either a request from peer (send it back to
@@ -1459,7 +1478,7 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
                                /* a flow is waiting for this message */
                                smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
                                                        qentry);
-                               wake_up_interruptible(&lgr->llc_waiter);
+                               wake_up(&lgr->llc_msg_waiter);
                        } else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
                                                      qentry)) {
                                schedule_work(&lgr->llc_add_link_work);
@@ -1474,7 +1493,7 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
                if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
                        /* a flow is waiting for this message */
                        smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry);
-                       wake_up_interruptible(&lgr->llc_waiter);
+                       wake_up(&lgr->llc_msg_waiter);
                        return;
                }
                break;
@@ -1485,7 +1504,7 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
                                /* DEL LINK REQ during ADD LINK SEQ */
                                smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
                                                        qentry);
-                               wake_up_interruptible(&lgr->llc_waiter);
+                               wake_up(&lgr->llc_msg_waiter);
                        } else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
                                                      qentry)) {
                                schedule_work(&lgr->llc_del_link_work);
@@ -1496,7 +1515,7 @@ static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
                                /* DEL LINK REQ during ADD LINK SEQ */
                                smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
                                                        qentry);
-                               wake_up_interruptible(&lgr->llc_waiter);
+                               wake_up(&lgr->llc_msg_waiter);
                        } else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
                                                      qentry)) {
                                schedule_work(&lgr->llc_del_link_work);
@@ -1581,7 +1600,7 @@ static void smc_llc_rx_response(struct smc_link *link,
        case SMC_LLC_DELETE_RKEY:
                /* assign responses to the local flow, we requested them */
                smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry);
-               wake_up_interruptible(&link->lgr->llc_waiter);
+               wake_up(&link->lgr->llc_msg_waiter);
                return;
        case SMC_LLC_CONFIRM_RKEY_CONT:
                /* not used because max links is 3 */
@@ -1616,7 +1635,7 @@ static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc)
        spin_lock_irqsave(&lgr->llc_event_q_lock, flags);
        list_add_tail(&qentry->list, &lgr->llc_event_q);
        spin_unlock_irqrestore(&lgr->llc_event_q_lock, flags);
-       schedule_work(&link->lgr->llc_event_work);
+       schedule_work(&lgr->llc_event_work);
 }
 
 /* copy received msg and add it to the event queue */
@@ -1677,7 +1696,8 @@ void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc)
        INIT_LIST_HEAD(&lgr->llc_event_q);
        spin_lock_init(&lgr->llc_event_q_lock);
        spin_lock_init(&lgr->llc_flow_lock);
-       init_waitqueue_head(&lgr->llc_waiter);
+       init_waitqueue_head(&lgr->llc_flow_waiter);
+       init_waitqueue_head(&lgr->llc_msg_waiter);
        mutex_init(&lgr->llc_conf_mutex);
        lgr->llc_testlink_time = net->ipv4.sysctl_tcp_keepalive_time;
 }
@@ -1686,7 +1706,8 @@ void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc)
 void smc_llc_lgr_clear(struct smc_link_group *lgr)
 {
        smc_llc_event_flush(lgr);
-       wake_up_interruptible_all(&lgr->llc_waiter);
+       wake_up_all(&lgr->llc_flow_waiter);
+       wake_up_all(&lgr->llc_msg_waiter);
        cancel_work_sync(&lgr->llc_event_work);
        cancel_work_sync(&lgr->llc_add_link_work);
        cancel_work_sync(&lgr->llc_del_link_work);
index 014d91b..30e5fac 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/module.h>
 #include <linux/list.h>
 #include <linux/ctype.h>
+#include <linux/mutex.h>
 #include <net/netlink.h>
 #include <net/genetlink.h>
 
@@ -129,7 +130,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
                return rc;
 
        /* remove ib devices */
-       spin_lock(&smc_ib_devices.lock);
+       mutex_lock(&smc_ib_devices.mutex);
        list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
                for (ibport = 0; ibport < SMC_MAX_PORTS; ibport++) {
                        if (ibdev->pnetid_by_user[ibport] &&
@@ -149,9 +150,9 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
                        }
                }
        }
-       spin_unlock(&smc_ib_devices.lock);
+       mutex_unlock(&smc_ib_devices.mutex);
        /* remove smcd devices */
-       spin_lock(&smcd_dev_list.lock);
+       mutex_lock(&smcd_dev_list.mutex);
        list_for_each_entry(smcd_dev, &smcd_dev_list.list, list) {
                if (smcd_dev->pnetid_by_user &&
                    (!pnet_name ||
@@ -165,7 +166,7 @@ static int smc_pnet_remove_by_pnetid(struct net *net, char *pnet_name)
                        rc = 0;
                }
        }
-       spin_unlock(&smcd_dev_list.lock);
+       mutex_unlock(&smcd_dev_list.mutex);
        return rc;
 }
 
@@ -240,14 +241,14 @@ static bool smc_pnet_apply_ib(struct smc_ib_device *ib_dev, u8 ib_port,
        u8 pnet_null[SMC_MAX_PNETID_LEN] = {0};
        bool applied = false;
 
-       spin_lock(&smc_ib_devices.lock);
+       mutex_lock(&smc_ib_devices.mutex);
        if (smc_pnet_match(ib_dev->pnetid[ib_port - 1], pnet_null)) {
                memcpy(ib_dev->pnetid[ib_port - 1], pnet_name,
                       SMC_MAX_PNETID_LEN);
                ib_dev->pnetid_by_user[ib_port - 1] = true;
                applied = true;
        }
-       spin_unlock(&smc_ib_devices.lock);
+       mutex_unlock(&smc_ib_devices.mutex);
        return applied;
 }
 
@@ -258,13 +259,13 @@ static bool smc_pnet_apply_smcd(struct smcd_dev *smcd_dev, char *pnet_name)
        u8 pnet_null[SMC_MAX_PNETID_LEN] = {0};
        bool applied = false;
 
-       spin_lock(&smcd_dev_list.lock);
+       mutex_lock(&smcd_dev_list.mutex);
        if (smc_pnet_match(smcd_dev->pnetid, pnet_null)) {
                memcpy(smcd_dev->pnetid, pnet_name, SMC_MAX_PNETID_LEN);
                smcd_dev->pnetid_by_user = true;
                applied = true;
        }
-       spin_unlock(&smcd_dev_list.lock);
+       mutex_unlock(&smcd_dev_list.mutex);
        return applied;
 }
 
@@ -300,7 +301,7 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name)
 {
        struct smc_ib_device *ibdev;
 
-       spin_lock(&smc_ib_devices.lock);
+       mutex_lock(&smc_ib_devices.mutex);
        list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
                if (!strncmp(ibdev->ibdev->name, ib_name,
                             sizeof(ibdev->ibdev->name)) ||
@@ -311,7 +312,7 @@ static struct smc_ib_device *smc_pnet_find_ib(char *ib_name)
        }
        ibdev = NULL;
 out:
-       spin_unlock(&smc_ib_devices.lock);
+       mutex_unlock(&smc_ib_devices.mutex);
        return ibdev;
 }
 
@@ -320,7 +321,7 @@ static struct smcd_dev *smc_pnet_find_smcd(char *smcd_name)
 {
        struct smcd_dev *smcd_dev;
 
-       spin_lock(&smcd_dev_list.lock);
+       mutex_lock(&smcd_dev_list.mutex);
        list_for_each_entry(smcd_dev, &smcd_dev_list.list, list) {
                if (!strncmp(dev_name(&smcd_dev->dev), smcd_name,
                             IB_DEVICE_NAME_MAX - 1))
@@ -328,7 +329,7 @@ static struct smcd_dev *smc_pnet_find_smcd(char *smcd_name)
        }
        smcd_dev = NULL;
 out:
-       spin_unlock(&smcd_dev_list.lock);
+       mutex_unlock(&smcd_dev_list.mutex);
        return smcd_dev;
 }
 
@@ -825,7 +826,7 @@ static void _smc_pnet_find_roce_by_pnetid(u8 *pnet_id,
        int i;
 
        ini->ib_dev = NULL;
-       spin_lock(&smc_ib_devices.lock);
+       mutex_lock(&smc_ib_devices.mutex);
        list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
                if (ibdev == known_dev)
                        continue;
@@ -844,7 +845,7 @@ static void _smc_pnet_find_roce_by_pnetid(u8 *pnet_id,
                }
        }
 out:
-       spin_unlock(&smc_ib_devices.lock);
+       mutex_unlock(&smc_ib_devices.mutex);
 }
 
 /* find alternate roce device with same pnet_id and vlan_id */
@@ -863,7 +864,7 @@ static void smc_pnet_find_rdma_dev(struct net_device *netdev,
 {
        struct smc_ib_device *ibdev;
 
-       spin_lock(&smc_ib_devices.lock);
+       mutex_lock(&smc_ib_devices.mutex);
        list_for_each_entry(ibdev, &smc_ib_devices.list, list) {
                struct net_device *ndev;
                int i;
@@ -888,7 +889,7 @@ static void smc_pnet_find_rdma_dev(struct net_device *netdev,
                        }
                }
        }
-       spin_unlock(&smc_ib_devices.lock);
+       mutex_unlock(&smc_ib_devices.mutex);
 }
 
 /* Determine the corresponding IB device port based on the hardware PNETID.
@@ -924,7 +925,7 @@ static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev,
            smc_pnet_find_ndev_pnetid_by_table(ndev, ndev_pnetid))
                return; /* pnetid could not be determined */
 
-       spin_lock(&smcd_dev_list.lock);
+       mutex_lock(&smcd_dev_list.mutex);
        list_for_each_entry(ismdev, &smcd_dev_list.list, list) {
                if (smc_pnet_match(ismdev->pnetid, ndev_pnetid) &&
                    !ismdev->going_away) {
@@ -932,7 +933,7 @@ static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev,
                        break;
                }
        }
-       spin_unlock(&smcd_dev_list.lock);
+       mutex_unlock(&smcd_dev_list.mutex);
 }
 
 /* PNET table analysis for a given sock:
index 7239ba9..1e23cdd 100644 (file)
@@ -169,6 +169,8 @@ void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
 static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
 {
        *idx = link->wr_tx_cnt;
+       if (!smc_link_usable(link))
+               return -ENOLINK;
        for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) {
                if (!test_and_set_bit(*idx, link->wr_tx_mask))
                        return 0;
@@ -560,15 +562,15 @@ void smc_wr_free_link(struct smc_link *lnk)
 {
        struct ib_device *ibdev;
 
+       if (!lnk->smcibdev)
+               return;
+       ibdev = lnk->smcibdev->ibdev;
+
        if (smc_wr_tx_wait_no_pending_sends(lnk))
                memset(lnk->wr_tx_mask, 0,
                       BITS_TO_LONGS(SMC_WR_BUF_CNT) *
                                                sizeof(*lnk->wr_tx_mask));
 
-       if (!lnk->smcibdev)
-               return;
-       ibdev = lnk->smcibdev->ibdev;
-
        if (lnk->wr_rx_dma_addr) {
                ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
                                    SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
index 976426d..94ca454 100644 (file)
@@ -586,15 +586,6 @@ struct socket *sock_alloc(void)
 }
 EXPORT_SYMBOL(sock_alloc);
 
-/**
- *     sock_release - close a socket
- *     @sock: socket to close
- *
- *     The socket is released from the protocol stack if it has a release
- *     callback, and the inode is then released if the socket is bound to
- *     an inode not a file.
- */
-
 static void __sock_release(struct socket *sock, struct inode *inode)
 {
        if (sock->ops) {
@@ -620,6 +611,14 @@ static void __sock_release(struct socket *sock, struct inode *inode)
        sock->file = NULL;
 }
 
+/**
+ *     sock_release - close a socket
+ *     @sock: socket to close
+ *
+ *     The socket is released from the protocol stack if it has a release
+ *     callback, and the inode is then released if the socket is bound to
+ *     an inode not a file.
+ */
 void sock_release(struct socket *sock)
 {
        __sock_release(sock, NULL);
@@ -2080,15 +2079,25 @@ SYSCALL_DEFINE4(recv, int, fd, void __user *, ubuf, size_t, size,
        return __sys_recvfrom(fd, ubuf, size, flags, NULL, NULL);
 }
 
+static bool sock_use_custom_sol_socket(const struct socket *sock)
+{
+       const struct sock *sk = sock->sk;
+
+       /* Use sock->ops->setsockopt() for MPTCP */
+       return IS_ENABLED(CONFIG_MPTCP) &&
+              sk->sk_protocol == IPPROTO_MPTCP &&
+              sk->sk_type == SOCK_STREAM &&
+              (sk->sk_family == AF_INET || sk->sk_family == AF_INET6);
+}
+
 /*
  *     Set a socket option. Because we don't know the option lengths we have
  *     to pass the user mode parameter for the protocols to sort out.
  */
-
-static int __sys_setsockopt(int fd, int level, int optname,
-                           char __user *optval, int optlen)
+int __sys_setsockopt(int fd, int level, int optname, char __user *user_optval,
+               int optlen)
 {
-       mm_segment_t oldfs = get_fs();
+       sockptr_t optval;
        char *kernel_optval = NULL;
        int err, fput_needed;
        struct socket *sock;
@@ -2096,44 +2105,41 @@ static int __sys_setsockopt(int fd, int level, int optname,
        if (optlen < 0)
                return -EINVAL;
 
-       sock = sockfd_lookup_light(fd, &err, &fput_needed);
-       if (sock != NULL) {
-               err = security_socket_setsockopt(sock, level, optname);
-               if (err)
-                       goto out_put;
+       err = init_user_sockptr(&optval, user_optval);
+       if (err)
+               return err;
 
-               err = BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock->sk, &level,
-                                                    &optname, optval, &optlen,
-                                                    &kernel_optval);
+       sock = sockfd_lookup_light(fd, &err, &fput_needed);
+       if (!sock)
+               return err;
 
-               if (err < 0) {
-                       goto out_put;
-               } else if (err > 0) {
-                       err = 0;
-                       goto out_put;
-               }
+       err = security_socket_setsockopt(sock, level, optname);
+       if (err)
+               goto out_put;
 
-               if (kernel_optval) {
-                       set_fs(KERNEL_DS);
-                       optval = (char __user __force *)kernel_optval;
-               }
+       if (!in_compat_syscall())
+               err = BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock->sk, &level, &optname,
+                                                    user_optval, &optlen,
+                                                    &kernel_optval);
+       if (err < 0)
+               goto out_put;
+       if (err > 0) {
+               err = 0;
+               goto out_put;
+       }
 
-               if (level == SOL_SOCKET)
-                       err =
-                           sock_setsockopt(sock, level, optname, optval,
+       if (kernel_optval)
+               optval = KERNEL_SOCKPTR(kernel_optval);
+       if (level == SOL_SOCKET && !sock_use_custom_sol_socket(sock))
+               err = sock_setsockopt(sock, level, optname, optval, optlen);
+       else if (unlikely(!sock->ops->setsockopt))
+               err = -EOPNOTSUPP;
+       else
+               err = sock->ops->setsockopt(sock, level, optname, optval,
                                            optlen);
-               else
-                       err =
-                           sock->ops->setsockopt(sock, level, optname, optval,
-                                                 optlen);
-
-               if (kernel_optval) {
-                       set_fs(oldfs);
-                       kfree(kernel_optval);
-               }
+       kfree(kernel_optval);
 out_put:
-               fput_light(sock->file, fput_needed);
-       }
+       fput_light(sock->file, fput_needed);
        return err;
 }
 
@@ -2147,37 +2153,38 @@ SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname,
  *     Get a socket option. Because we don't know the option lengths we have
  *     to pass a user mode parameter for the protocols to sort out.
  */
-
-static int __sys_getsockopt(int fd, int level, int optname,
-                           char __user *optval, int __user *optlen)
+int __sys_getsockopt(int fd, int level, int optname, char __user *optval,
+               int __user *optlen)
 {
        int err, fput_needed;
        struct socket *sock;
        int max_optlen;
 
        sock = sockfd_lookup_light(fd, &err, &fput_needed);
-       if (sock != NULL) {
-               err = security_socket_getsockopt(sock, level, optname);
-               if (err)
-                       goto out_put;
+       if (!sock)
+               return err;
 
+       err = security_socket_getsockopt(sock, level, optname);
+       if (err)
+               goto out_put;
+
+       if (!in_compat_syscall())
                max_optlen = BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen);
 
-               if (level == SOL_SOCKET)
-                       err =
-                           sock_getsockopt(sock, level, optname, optval,
+       if (level == SOL_SOCKET)
+               err = sock_getsockopt(sock, level, optname, optval, optlen);
+       else if (unlikely(!sock->ops->getsockopt))
+               err = -EOPNOTSUPP;
+       else
+               err = sock->ops->getsockopt(sock, level, optname, optval,
                                            optlen);
-               else
-                       err =
-                           sock->ops->getsockopt(sock, level, optname, optval,
-                                                 optlen);
 
+       if (!in_compat_syscall())
                err = BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock->sk, level, optname,
-                                                    optval, optlen,
-                                                    max_optlen, err);
+                                                    optval, optlen, max_optlen,
+                                                    err);
 out_put:
-               fput_light(sock->file, fput_needed);
-       }
+       fput_light(sock->file, fput_needed);
        return err;
 }
 
index 39e14d5..e9d0953 100644 (file)
@@ -1317,6 +1317,7 @@ rpc_gssd_dummy_populate(struct dentry *root, struct rpc_pipe *pipe_data)
        q.len = strlen(gssd_dummy_clnt_dir[0].name);
        clnt_dentry = d_hash_and_lookup(gssd_dentry, &q);
        if (!clnt_dentry) {
+               __rpc_depopulate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1);
                pipe_dentry = ERR_PTR(-ENOENT);
                goto out;
        }
index 5c4ec93..c537272 100644 (file)
@@ -44,6 +44,7 @@
 #include <net/tcp.h>
 #include <net/tcp_states.h>
 #include <linux/uaccess.h>
+#include <linux/highmem.h>
 #include <asm/ioctls.h>
 
 #include <linux/sunrpc/types.h>
index 6f7d82f..be11d67 100644 (file)
@@ -1118,6 +1118,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
                base = 0;
        } else {
                base -= buf->head[0].iov_len;
+               subbuf->head[0].iov_base = buf->head[0].iov_base;
                subbuf->head[0].iov_len = 0;
        }
 
@@ -1130,6 +1131,8 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
                base = 0;
        } else {
                base -= buf->page_len;
+               subbuf->pages = buf->pages;
+               subbuf->page_base = 0;
                subbuf->page_len = 0;
        }
 
@@ -1141,6 +1144,7 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
                base = 0;
        } else {
                base -= buf->tail[0].iov_len;
+               subbuf->tail[0].iov_base = buf->tail[0].iov_base;
                subbuf->tail[0].iov_len = 0;
        }
 
index ef99788..b647562 100644 (file)
@@ -367,7 +367,7 @@ static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
        trace_xprtrdma_wc_fastreg(wc, frwr);
        /* The MR will get recycled when the associated req is retransmitted */
 
-       rpcrdma_flush_disconnect(cq, wc);
+       rpcrdma_flush_disconnect(cq->cq_context, wc);
 }
 
 /**
@@ -452,7 +452,7 @@ static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
        trace_xprtrdma_wc_li(wc, frwr);
        __frwr_release_mr(wc, mr);
 
-       rpcrdma_flush_disconnect(cq, wc);
+       rpcrdma_flush_disconnect(cq->cq_context, wc);
 }
 
 /**
@@ -474,7 +474,7 @@ static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
        __frwr_release_mr(wc, mr);
        complete(&frwr->fr_linv_done);
 
-       rpcrdma_flush_disconnect(cq, wc);
+       rpcrdma_flush_disconnect(cq->cq_context, wc);
 }
 
 /**
@@ -582,7 +582,7 @@ static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc)
        smp_rmb();
        rpcrdma_complete_rqst(rep);
 
-       rpcrdma_flush_disconnect(cq, wc);
+       rpcrdma_flush_disconnect(cq->cq_context, wc);
 }
 
 /**
index 2081c8f..935bbef 100644 (file)
@@ -1349,8 +1349,7 @@ rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
                        be32_to_cpup(p), be32_to_cpu(rep->rr_xid));
        }
 
-       r_xprt->rx_stats.bad_reply_count++;
-       return -EREMOTEIO;
+       return -EIO;
 }
 
 /* Perform XID lookup, reconstruction of the RPC reply, and
@@ -1387,13 +1386,11 @@ out:
        spin_unlock(&xprt->queue_lock);
        return;
 
-/* If the incoming reply terminated a pending RPC, the next
- * RPC call will post a replacement receive buffer as it is
- * being marshaled.
- */
 out_badheader:
        trace_xprtrdma_reply_hdr(rep);
        r_xprt->rx_stats.bad_reply_count++;
+       rqst->rq_task->tk_status = status;
+       status = 0;
        goto out;
 }
 
index 0c4af7f..14165b6 100644 (file)
@@ -242,7 +242,7 @@ xprt_rdma_connect_worker(struct work_struct *work)
 
        rc = rpcrdma_xprt_connect(r_xprt);
        xprt_clear_connecting(xprt);
-       if (r_xprt->rx_ep && r_xprt->rx_ep->re_connect_status > 0) {
+       if (!rc) {
                xprt->connect_cookie++;
                xprt->stat.connect_count++;
                xprt->stat.connect_time += (long)jiffies -
index 2ae3483..2198c8e 100644 (file)
@@ -84,7 +84,8 @@ static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep);
 static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt);
 static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
 static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt);
-static int rpcrdma_ep_destroy(struct rpcrdma_ep *ep);
+static void rpcrdma_ep_get(struct rpcrdma_ep *ep);
+static int rpcrdma_ep_put(struct rpcrdma_ep *ep);
 static struct rpcrdma_regbuf *
 rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
                     gfp_t flags);
@@ -97,7 +98,8 @@ static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb);
  */
 static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
 {
-       struct rdma_cm_id *id = r_xprt->rx_ep->re_id;
+       struct rpcrdma_ep *ep = r_xprt->rx_ep;
+       struct rdma_cm_id *id = ep->re_id;
 
        /* Flush Receives, then wait for deferred Reply work
         * to complete.
@@ -108,6 +110,8 @@ static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
         * local invalidations.
         */
        ib_drain_sq(id->qp);
+
+       rpcrdma_ep_put(ep);
 }
 
 /**
@@ -126,23 +130,27 @@ static void rpcrdma_qp_event_handler(struct ib_event *event, void *context)
        trace_xprtrdma_qp_event(ep, event);
 }
 
+/* Ensure xprt_force_disconnect() is invoked exactly once when a
+ * connection is closed or lost. (The important thing is it needs
+ * to be invoked "at least" once).
+ */
+static void rpcrdma_force_disconnect(struct rpcrdma_ep *ep)
+{
+       if (atomic_add_unless(&ep->re_force_disconnect, 1, 1))
+               xprt_force_disconnect(ep->re_xprt);
+}
+
 /**
  * rpcrdma_flush_disconnect - Disconnect on flushed completion
- * @cq: completion queue
+ * @r_xprt: transport to disconnect
  * @wc: work completion entry
  *
  * Must be called in process context.
  */
-void rpcrdma_flush_disconnect(struct ib_cq *cq, struct ib_wc *wc)
+void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc)
 {
-       struct rpcrdma_xprt *r_xprt = cq->cq_context;
-       struct rpc_xprt *xprt = &r_xprt->rx_xprt;
-
-       if (wc->status != IB_WC_SUCCESS &&
-           r_xprt->rx_ep->re_connect_status == 1) {
-               r_xprt->rx_ep->re_connect_status = -ECONNABORTED;
-               xprt_force_disconnect(xprt);
-       }
+       if (wc->status != IB_WC_SUCCESS)
+               rpcrdma_force_disconnect(r_xprt->rx_ep);
 }
 
 /**
@@ -156,11 +164,12 @@ static void rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
        struct ib_cqe *cqe = wc->wr_cqe;
        struct rpcrdma_sendctx *sc =
                container_of(cqe, struct rpcrdma_sendctx, sc_cqe);
+       struct rpcrdma_xprt *r_xprt = cq->cq_context;
 
        /* WARNING: Only wr_cqe and status are reliable at this point */
        trace_xprtrdma_wc_send(sc, wc);
-       rpcrdma_sendctx_put_locked((struct rpcrdma_xprt *)cq->cq_context, sc);
-       rpcrdma_flush_disconnect(cq, wc);
+       rpcrdma_sendctx_put_locked(r_xprt, sc);
+       rpcrdma_flush_disconnect(r_xprt, wc);
 }
 
 /**
@@ -195,7 +204,7 @@ static void rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
        return;
 
 out_flushed:
-       rpcrdma_flush_disconnect(cq, wc);
+       rpcrdma_flush_disconnect(r_xprt, wc);
        rpcrdma_rep_destroy(rep);
 }
 
@@ -239,7 +248,6 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
 {
        struct sockaddr *sap = (struct sockaddr *)&id->route.addr.dst_addr;
        struct rpcrdma_ep *ep = id->context;
-       struct rpc_xprt *xprt = ep->re_xprt;
 
        might_sleep();
 
@@ -263,10 +271,9 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
                /* fall through */
        case RDMA_CM_EVENT_ADDR_CHANGE:
                ep->re_connect_status = -ENODEV;
-               xprt_force_disconnect(xprt);
                goto disconnected;
        case RDMA_CM_EVENT_ESTABLISHED:
-               kref_get(&ep->re_kref);
+               rpcrdma_ep_get(ep);
                ep->re_connect_status = 1;
                rpcrdma_update_cm_private(ep, &event->param.conn);
                trace_xprtrdma_inline_thresh(ep);
@@ -288,8 +295,8 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
        case RDMA_CM_EVENT_DISCONNECTED:
                ep->re_connect_status = -ECONNABORTED;
 disconnected:
-               xprt_force_disconnect(xprt);
-               return rpcrdma_ep_destroy(ep);
+               rpcrdma_force_disconnect(ep);
+               return rpcrdma_ep_put(ep);
        default:
                break;
        }
@@ -345,7 +352,7 @@ out:
        return ERR_PTR(rc);
 }
 
-static void rpcrdma_ep_put(struct kref *kref)
+static void rpcrdma_ep_destroy(struct kref *kref)
 {
        struct rpcrdma_ep *ep = container_of(kref, struct rpcrdma_ep, re_kref);
 
@@ -369,13 +376,18 @@ static void rpcrdma_ep_put(struct kref *kref)
        module_put(THIS_MODULE);
 }
 
+static noinline void rpcrdma_ep_get(struct rpcrdma_ep *ep)
+{
+       kref_get(&ep->re_kref);
+}
+
 /* Returns:
  *     %0 if @ep still has a positive kref count, or
  *     %1 if @ep was destroyed successfully.
  */
-static int rpcrdma_ep_destroy(struct rpcrdma_ep *ep)
+static noinline int rpcrdma_ep_put(struct rpcrdma_ep *ep)
 {
-       return kref_put(&ep->re_kref, rpcrdma_ep_put);
+       return kref_put(&ep->re_kref, rpcrdma_ep_destroy);
 }
 
 static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
@@ -492,7 +504,7 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
        return 0;
 
 out_destroy:
-       rpcrdma_ep_destroy(ep);
+       rpcrdma_ep_put(ep);
        rdma_destroy_id(id);
 out_free:
        kfree(ep);
@@ -519,10 +531,13 @@ retry:
                return rc;
        ep = r_xprt->rx_ep;
 
-       ep->re_connect_status = 0;
        xprt_clear_connected(xprt);
-
        rpcrdma_reset_cwnd(r_xprt);
+
+       /* Bump the ep's reference count while there are
+        * outstanding Receives.
+        */
+       rpcrdma_ep_get(ep);
        rpcrdma_post_recvs(r_xprt, true);
 
        rc = rpcrdma_sendctxs_create(r_xprt);
@@ -552,8 +567,6 @@ retry:
        rpcrdma_mrs_create(r_xprt);
 
 out:
-       if (rc)
-               ep->re_connect_status = rc;
        trace_xprtrdma_connect(r_xprt, rc);
        return rc;
 }
@@ -587,7 +600,7 @@ void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt)
        rpcrdma_mrs_destroy(r_xprt);
        rpcrdma_sendctxs_destroy(r_xprt);
 
-       if (rpcrdma_ep_destroy(ep))
+       if (rpcrdma_ep_put(ep))
                rdma_destroy_id(id);
 
        r_xprt->rx_ep = NULL;
index 0a16fdb..43974ef 100644 (file)
@@ -82,6 +82,7 @@ struct rpcrdma_ep {
        unsigned int            re_max_inline_recv;
        int                     re_async_rc;
        int                     re_connect_status;
+       atomic_t                re_force_disconnect;
        struct ib_qp_init_attr  re_attr;
        wait_queue_head_t       re_connect_wait;
        struct rpc_xprt         *re_xprt;
@@ -446,7 +447,7 @@ extern unsigned int xprt_rdma_memreg_strategy;
 /*
  * Endpoint calls - xprtrdma/verbs.c
  */
-void rpcrdma_flush_disconnect(struct ib_cq *cq, struct ib_wc *wc);
+void rpcrdma_flush_disconnect(struct rpcrdma_xprt *r_xprt, struct ib_wc *wc);
 int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt);
 void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt);
 
index f25604d..865f3e0 100644 (file)
@@ -304,8 +304,8 @@ static int switchdev_port_obj_add_defer(struct net_device *dev,
  *     switchdev_port_obj_add - Add port object
  *
  *     @dev: port device
- *     @id: object ID
  *     @obj: object to add
+ *     @extack: netlink extended ack
  *
  *     Use a 2-phase prepare-commit transaction model to ensure
  *     system is not left in a partially updated state due to
@@ -357,7 +357,6 @@ static int switchdev_port_obj_del_defer(struct net_device *dev,
  *     switchdev_port_obj_del - Delete port object
  *
  *     @dev: port device
- *     @id: object ID
  *     @obj: object to delete
  *
  *     rtnl_lock must be held and must not be in atomic section,
index 383f87b..940d176 100644 (file)
@@ -250,8 +250,8 @@ static void tipc_bcast_select_xmit_method(struct net *net, int dests,
  * Consumes the buffer chain.
  * Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE
  */
-static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
-                          u16 *cong_link_cnt)
+int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
+                   u16 *cong_link_cnt)
 {
        struct tipc_link *l = tipc_bc_sndlink(net);
        struct sk_buff_head xmitq;
@@ -752,7 +752,7 @@ void tipc_nlist_purge(struct tipc_nlist *nl)
        nl->local = false;
 }
 
-u32 tipc_bcast_get_broadcast_mode(struct net *net)
+u32 tipc_bcast_get_mode(struct net *net)
 {
        struct tipc_bc_base *bb = tipc_bc_base(net);
 
index 4240c95..2d9352d 100644 (file)
@@ -90,6 +90,8 @@ void tipc_bcast_toggle_rcast(struct net *net, bool supp);
 int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts,
                    struct tipc_mc_method *method, struct tipc_nlist *dests,
                    u16 *cong_link_cnt);
+int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
+                   u16 *cong_link_cnt);
 int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb);
 void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
                        struct tipc_msg *hdr);
@@ -101,7 +103,7 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg,
 int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]);
 int tipc_bclink_reset_stats(struct net *net, struct tipc_link *l);
 
-u32 tipc_bcast_get_broadcast_mode(struct net *net);
+u32 tipc_bcast_get_mode(struct net *net);
 u32 tipc_bcast_get_broadcast_ratio(struct net *net);
 
 void tipc_mcast_filter_msg(struct net *net, struct sk_buff_head *defq,
index e366ec9..808b147 100644 (file)
@@ -595,7 +595,7 @@ void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
 
 /**
  * tipc_l2_rcv_msg - handle incoming TIPC message from an interface
- * @buf: the received packet
+ * @skb: the received message
  * @dev: the net device that the packet was received on
  * @pt: the packet_type structure which was used to register this handler
  * @orig_dev: the original receive net device in case the device is a bond
index bfe43da..d4ecacd 100644 (file)
@@ -74,7 +74,7 @@ struct tipc_discoverer {
 /**
  * tipc_disc_init_msg - initialize a link setup message
  * @net: the applicable net namespace
- * @type: message type (request or response)
+ * @mtyp: message type (request or response)
  * @b: ptr to bearer issuing message
  */
 static void tipc_disc_init_msg(struct net *net, struct sk_buff *skb,
@@ -339,7 +339,7 @@ exit:
  * @net: the applicable net namespace
  * @b: ptr to bearer issuing requests
  * @dest: destination address for request messages
- * @dest_domain: network domain to which links can be established
+ * @skb: pointer to created frame
  *
  * Returns 0 if successful, otherwise -errno.
  */
@@ -393,7 +393,6 @@ void tipc_disc_delete(struct tipc_discoverer *d)
  * tipc_disc_reset - reset object to send periodic link setup requests
  * @net: the applicable net namespace
  * @b: ptr to bearer issuing requests
- * @dest_domain: network domain to which links can be established
  */
 void tipc_disc_reset(struct net *net, struct tipc_bearer *b)
 {
index ee3b8d0..d46d4ee 100644 (file)
@@ -445,7 +445,7 @@ u32 tipc_link_state(struct tipc_link *l)
 
 /**
  * tipc_link_create - create a new link
- * @n: pointer to associated node
+ * @net: pointer to associated network namespace
  * @if_name: associated interface name
  * @bearer_id: id (index) of associated bearer
  * @tolerance: link tolerance to be used by link
@@ -530,7 +530,7 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
 
 /**
  * tipc_link_bc_create - create new link to be used for broadcast
- * @n: pointer to associated node
+ * @net: pointer to associated network namespace
  * @mtu: mtu to be used initially if no peers
  * @window: send window to be used
  * @inputq: queue to put messages ready for delivery
@@ -921,6 +921,21 @@ static void link_prepare_wakeup(struct tipc_link *l)
 
 }
 
+/**
+ * tipc_link_set_skb_retransmit_time - set the time at which retransmission of
+ *                                     the given skb should be next attempted
+ * @skb: skb to set a future retransmission time for
+ * @l: link the skb will be transmitted on
+ */
+static void tipc_link_set_skb_retransmit_time(struct sk_buff *skb,
+                                             struct tipc_link *l)
+{
+       if (link_is_bc_sndlink(l))
+               TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
+       else
+               TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
+}
+
 void tipc_link_reset(struct tipc_link *l)
 {
        struct sk_buff_head list;
@@ -974,7 +989,7 @@ void tipc_link_reset(struct tipc_link *l)
 
 /**
  * tipc_link_xmit(): enqueue buffer list according to queue situation
- * @link: link to use
+ * @l: link to use
  * @list: chain of buffers containing message
  * @xmitq: returned list of packets to be sent by caller
  *
@@ -1036,9 +1051,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
                                return -ENOBUFS;
                        }
                        __skb_queue_tail(transmq, skb);
-                       /* next retransmit attempt */
-                       if (link_is_bc_sndlink(l))
-                               TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
+                       tipc_link_set_skb_retransmit_time(skb, l);
                        __skb_queue_tail(xmitq, _skb);
                        TIPC_SKB_CB(skb)->ackers = l->ackers;
                        l->rcv_unacked = 0;
@@ -1139,9 +1152,7 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
                if (unlikely(skb == l->backlog[imp].target_bskb))
                        l->backlog[imp].target_bskb = NULL;
                __skb_queue_tail(&l->transmq, skb);
-               /* next retransmit attempt */
-               if (link_is_bc_sndlink(l))
-                       TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
+               tipc_link_set_skb_retransmit_time(skb, l);
 
                __skb_queue_tail(xmitq, _skb);
                TIPC_SKB_CB(skb)->ackers = l->ackers;
@@ -1385,12 +1396,12 @@ u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l,
                p = (struct tipc_gap_ack_blks *)msg_data(hdr);
                sz = ntohs(p->len);
                /* Sanity check */
-               if (sz == tipc_gap_ack_blks_sz(p->ugack_cnt + p->bgack_cnt)) {
+               if (sz == struct_size(p, gacks, p->ugack_cnt + p->bgack_cnt)) {
                        /* Good, check if the desired type exists */
                        if ((uc && p->ugack_cnt) || (!uc && p->bgack_cnt))
                                goto ok;
                /* Backward compatible: peer might not support bc, but uc? */
-               } else if (uc && sz == tipc_gap_ack_blks_sz(p->ugack_cnt)) {
+               } else if (uc && sz == struct_size(p, gacks, p->ugack_cnt)) {
                        if (p->ugack_cnt) {
                                p->bgack_cnt = 0;
                                goto ok;
@@ -1472,7 +1483,7 @@ static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr)
                        __tipc_build_gap_ack_blks(ga, l, ga->bgack_cnt) : 0;
 
        /* Total len */
-       len = tipc_gap_ack_blks_sz(ga->bgack_cnt + ga->ugack_cnt);
+       len = struct_size(ga, gacks, ga->bgack_cnt + ga->ugack_cnt);
        ga->len = htons(len);
        return len;
 }
@@ -1521,7 +1532,7 @@ static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r,
                gacks = &ga->gacks[ga->bgack_cnt];
        } else if (ga) {
                /* Copy the Gap ACKs, bc part, for later renewal if needed */
-               this_ga = kmemdup(ga, tipc_gap_ack_blks_sz(ga->bgack_cnt),
+               this_ga = kmemdup(ga, struct_size(ga, gacks, ga->bgack_cnt),
                                  GFP_ATOMIC);
                if (likely(this_ga)) {
                        this_ga->start_index = 0;
@@ -1584,8 +1595,7 @@ release:
                        /* retransmit skb if unrestricted*/
                        if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
                                continue;
-                       TIPC_SKB_CB(skb)->nxt_retr = (is_uc) ?
-                                       TIPC_UC_RETR_TIME : TIPC_BC_RETR_LIM;
+                       tipc_link_set_skb_retransmit_time(skb, l);
                        _skb = pskb_copy(skb, GFP_ATOMIC);
                        if (!_skb)
                                continue;
@@ -2745,7 +2755,7 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg,
        void *hdr;
        struct nlattr *attrs;
        struct nlattr *prop;
-       u32 bc_mode = tipc_bcast_get_broadcast_mode(net);
+       u32 bc_mode = tipc_bcast_get_mode(net);
        u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net);
 
        if (!bcl)
index 01b6486..848fae6 100644 (file)
@@ -202,7 +202,7 @@ err:
 
 /**
  * tipc_msg_append(): Append data to tail of an existing buffer queue
- * @hdr: header to be used
+ * @_hdr: header to be used
  * @m: the data to be appended
  * @mss: max allowable size of buffer
  * @dlen: size of data to be appended
index 58660d5..1016e96 100644 (file)
@@ -189,11 +189,9 @@ struct tipc_gap_ack_blks {
        struct tipc_gap_ack gacks[];
 };
 
-#define tipc_gap_ack_blks_sz(n) (sizeof(struct tipc_gap_ack_blks) + \
-                                sizeof(struct tipc_gap_ack) * (n))
-
 #define MAX_GAP_ACK_BLKS       128
-#define MAX_GAP_ACK_BLKS_SZ    tipc_gap_ack_blks_sz(MAX_GAP_ACK_BLKS)
+#define MAX_GAP_ACK_BLKS_SZ    (sizeof(struct tipc_gap_ack_blks) + \
+                                sizeof(struct tipc_gap_ack) * MAX_GAP_ACK_BLKS)
 
 static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
 {
@@ -438,6 +436,36 @@ static inline void msg_set_errcode(struct tipc_msg *m, u32 err)
        msg_set_bits(m, 1, 25, 0xf, err);
 }
 
+static inline void msg_set_bulk(struct tipc_msg *m)
+{
+       msg_set_bits(m, 1, 28, 0x1, 1);
+}
+
+static inline u32 msg_is_bulk(struct tipc_msg *m)
+{
+       return msg_bits(m, 1, 28, 0x1);
+}
+
+static inline void msg_set_last_bulk(struct tipc_msg *m)
+{
+       msg_set_bits(m, 1, 27, 0x1, 1);
+}
+
+static inline u32 msg_is_last_bulk(struct tipc_msg *m)
+{
+       return msg_bits(m, 1, 27, 0x1);
+}
+
+static inline void msg_set_non_legacy(struct tipc_msg *m)
+{
+       msg_set_bits(m, 1, 26, 0x1, 1);
+}
+
+static inline u32 msg_is_legacy(struct tipc_msg *m)
+{
+       return !msg_bits(m, 1, 26, 0x1);
+}
+
 static inline u32 msg_reroute_cnt(struct tipc_msg *m)
 {
        return msg_bits(m, 1, 21, 0xf);
@@ -567,6 +595,16 @@ static inline void msg_set_origport(struct tipc_msg *m, u32 p)
        msg_set_word(m, 4, p);
 }
 
+static inline u16 msg_named_seqno(struct tipc_msg *m)
+{
+       return msg_bits(m, 4, 0, 0xffff);
+}
+
+static inline void msg_set_named_seqno(struct tipc_msg *m, u16 n)
+{
+       msg_set_bits(m, 4, 0, 0xffff, n);
+}
+
 static inline u32 msg_destport(struct tipc_msg *m)
 {
        return msg_word(m, 5);
index 5feaf3b..2f9c148 100644 (file)
@@ -102,7 +102,8 @@ struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ)
                pr_warn("Publication distribution failure\n");
                return NULL;
        }
-
+       msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
+       msg_set_non_legacy(buf_msg(skb));
        item = (struct distr_item *)msg_data(buf_msg(skb));
        publ_to_item(item, publ);
        return skb;
@@ -114,8 +115,8 @@ struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ)
 struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
 {
        struct name_table *nt = tipc_name_table(net);
-       struct sk_buff *buf;
        struct distr_item *item;
+       struct sk_buff *skb;
 
        write_lock_bh(&nt->cluster_scope_lock);
        list_del(&publ->binding_node);
@@ -123,15 +124,16 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
        if (publ->scope == TIPC_NODE_SCOPE)
                return NULL;
 
-       buf = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0);
-       if (!buf) {
+       skb = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0);
+       if (!skb) {
                pr_warn("Withdrawal distribution failure\n");
                return NULL;
        }
-
-       item = (struct distr_item *)msg_data(buf_msg(buf));
+       msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++);
+       msg_set_non_legacy(buf_msg(skb));
+       item = (struct distr_item *)msg_data(buf_msg(skb));
        publ_to_item(item, publ);
-       return buf;
+       return skb;
 }
 
 /**
@@ -141,7 +143,7 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
  * @pls: linked list of publication items to be packed into buffer chain
  */
 static void named_distribute(struct net *net, struct sk_buff_head *list,
-                            u32 dnode, struct list_head *pls)
+                            u32 dnode, struct list_head *pls, u16 seqno)
 {
        struct publication *publ;
        struct sk_buff *skb = NULL;
@@ -149,6 +151,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
        u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) /
                        ITEM_SIZE) * ITEM_SIZE;
        u32 msg_rem = msg_dsz;
+       struct tipc_msg *hdr;
 
        list_for_each_entry(publ, pls, binding_node) {
                /* Prepare next buffer: */
@@ -159,8 +162,11 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
                                pr_warn("Bulk publication failure\n");
                                return;
                        }
-                       msg_set_bc_ack_invalid(buf_msg(skb), true);
-                       item = (struct distr_item *)msg_data(buf_msg(skb));
+                       hdr = buf_msg(skb);
+                       msg_set_bc_ack_invalid(hdr, true);
+                       msg_set_bulk(hdr);
+                       msg_set_non_legacy(hdr);
+                       item = (struct distr_item *)msg_data(hdr);
                }
 
                /* Pack publication into message: */
@@ -176,24 +182,35 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
                }
        }
        if (skb) {
-               msg_set_size(buf_msg(skb), INT_H_SIZE + (msg_dsz - msg_rem));
+               hdr = buf_msg(skb);
+               msg_set_size(hdr, INT_H_SIZE + (msg_dsz - msg_rem));
                skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem));
                __skb_queue_tail(list, skb);
        }
+       hdr = buf_msg(skb_peek_tail(list));
+       msg_set_last_bulk(hdr);
+       msg_set_named_seqno(hdr, seqno);
 }
 
 /**
  * tipc_named_node_up - tell specified node about all publications by this node
  */
-void tipc_named_node_up(struct net *net, u32 dnode)
+void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities)
 {
        struct name_table *nt = tipc_name_table(net);
+       struct tipc_net *tn = tipc_net(net);
        struct sk_buff_head head;
+       u16 seqno;
 
        __skb_queue_head_init(&head);
+       spin_lock_bh(&tn->nametbl_lock);
+       if (!(capabilities & TIPC_NAMED_BCAST))
+               nt->rc_dests++;
+       seqno = nt->snd_nxt;
+       spin_unlock_bh(&tn->nametbl_lock);
 
        read_lock_bh(&nt->cluster_scope_lock);
-       named_distribute(net, &head, dnode, &nt->cluster_scope);
+       named_distribute(net, &head, dnode, &nt->cluster_scope, seqno);
        tipc_node_xmit(net, &head, dnode, 0);
        read_unlock_bh(&nt->cluster_scope_lock);
 }
@@ -245,13 +262,21 @@ static void tipc_dist_queue_purge(struct net *net, u32 addr)
        spin_unlock_bh(&tn->nametbl_lock);
 }
 
-void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr)
+void tipc_publ_notify(struct net *net, struct list_head *nsub_list,
+                     u32 addr, u16 capabilities)
 {
+       struct name_table *nt = tipc_name_table(net);
+       struct tipc_net *tn = tipc_net(net);
+
        struct publication *publ, *tmp;
 
        list_for_each_entry_safe(publ, tmp, nsub_list, binding_node)
                tipc_publ_purge(net, publ, addr);
        tipc_dist_queue_purge(net, addr);
+       spin_lock_bh(&tn->nametbl_lock);
+       if (!(capabilities & TIPC_NAMED_BCAST))
+               nt->rc_dests--;
+       spin_unlock_bh(&tn->nametbl_lock);
 }
 
 /**
@@ -295,29 +320,62 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
        return false;
 }
 
+static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq,
+                                         u16 *rcv_nxt, bool *open)
+{
+       struct sk_buff *skb, *tmp;
+       struct tipc_msg *hdr;
+       u16 seqno;
+
+       skb_queue_walk_safe(namedq, skb, tmp) {
+               skb_linearize(skb);
+               hdr = buf_msg(skb);
+               seqno = msg_named_seqno(hdr);
+               if (msg_is_last_bulk(hdr)) {
+                       *rcv_nxt = seqno;
+                       *open = true;
+               }
+
+               if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) {
+                       __skb_unlink(skb, namedq);
+                       return skb;
+               }
+
+               if (*open && (*rcv_nxt == seqno)) {
+                       (*rcv_nxt)++;
+                       __skb_unlink(skb, namedq);
+                       return skb;
+               }
+
+               if (less(seqno, *rcv_nxt)) {
+                       __skb_unlink(skb, namedq);
+                       kfree_skb(skb);
+                       continue;
+               }
+       }
+       return NULL;
+}
+
 /**
  * tipc_named_rcv - process name table update messages sent by another node
  */
-void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq)
+void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq,
+                   u16 *rcv_nxt, bool *open)
 {
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-       struct tipc_msg *msg;
+       struct tipc_net *tn = tipc_net(net);
        struct distr_item *item;
-       uint count;
-       u32 node;
+       struct tipc_msg *hdr;
        struct sk_buff *skb;
-       int mtype;
+       u32 count, node;
 
        spin_lock_bh(&tn->nametbl_lock);
-       for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) {
-               skb_linearize(skb);
-               msg = buf_msg(skb);
-               mtype = msg_type(msg);
-               item = (struct distr_item *)msg_data(msg);
-               count = msg_data_sz(msg) / ITEM_SIZE;
-               node = msg_orignode(msg);
+       while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) {
+               hdr = buf_msg(skb);
+               node = msg_orignode(hdr);
+               item = (struct distr_item *)msg_data(hdr);
+               count = msg_data_sz(hdr) / ITEM_SIZE;
                while (count--) {
-                       tipc_update_nametbl(net, item, node, mtype);
+                       tipc_update_nametbl(net, item, node, msg_type(hdr));
                        item++;
                }
                kfree_skb(skb);
@@ -345,6 +403,6 @@ void tipc_named_reinit(struct net *net)
                publ->node = self;
        list_for_each_entry_rcu(publ, &nt->cluster_scope, binding_node)
                publ->node = self;
-
+       nt->rc_dests = 0;
        spin_unlock_bh(&tn->nametbl_lock);
 }
index 63fc73e..0923231 100644 (file)
@@ -67,11 +67,14 @@ struct distr_item {
        __be32 key;
 };
 
+void tipc_named_bcast(struct net *net, struct sk_buff *skb);
 struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ);
 struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ);
-void tipc_named_node_up(struct net *net, u32 dnode);
-void tipc_named_rcv(struct net *net, struct sk_buff_head *msg_queue);
+void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities);
+void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq,
+                   u16 *rcv_nxt, bool *open);
 void tipc_named_reinit(struct net *net);
-void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr);
+void tipc_publ_notify(struct net *net, struct list_head *nsub_list,
+                     u32 addr, u16 capabilities);
 
 #endif
index 359b2bc..2ac33d3 100644 (file)
@@ -729,6 +729,7 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower,
        struct tipc_net *tn = tipc_net(net);
        struct publication *p = NULL;
        struct sk_buff *skb = NULL;
+       u32 rc_dests;
 
        spin_lock_bh(&tn->nametbl_lock);
 
@@ -743,12 +744,14 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower,
                nt->local_publ_count++;
                skb = tipc_named_publish(net, p);
        }
+       rc_dests = nt->rc_dests;
 exit:
        spin_unlock_bh(&tn->nametbl_lock);
 
        if (skb)
-               tipc_node_broadcast(net, skb);
+               tipc_node_broadcast(net, skb, rc_dests);
        return p;
+
 }
 
 /**
@@ -762,6 +765,7 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower,
        u32 self = tipc_own_addr(net);
        struct sk_buff *skb = NULL;
        struct publication *p;
+       u32 rc_dests;
 
        spin_lock_bh(&tn->nametbl_lock);
 
@@ -775,10 +779,11 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower,
                pr_err("Failed to remove local publication {%u,%u,%u}/%u\n",
                       type, lower, upper, key);
        }
+       rc_dests = nt->rc_dests;
        spin_unlock_bh(&tn->nametbl_lock);
 
        if (skb) {
-               tipc_node_broadcast(net, skb);
+               tipc_node_broadcast(net, skb, rc_dests);
                return 1;
        }
        return 0;
index 728bc70..8064e19 100644 (file)
@@ -106,6 +106,8 @@ struct name_table {
        struct list_head cluster_scope;
        rwlock_t cluster_scope_lock;
        u32 local_publ_count;
+       u32 rc_dests;
+       u32 snd_nxt;
 };
 
 int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb);
index a4c2816..4edcee3 100644 (file)
@@ -75,6 +75,8 @@ struct tipc_bclink_entry {
        struct sk_buff_head arrvq;
        struct sk_buff_head inputq2;
        struct sk_buff_head namedq;
+       u16 named_rcv_nxt;
+       bool named_open;
 };
 
 /**
@@ -396,10 +398,10 @@ static void tipc_node_write_unlock(struct tipc_node *n)
        write_unlock_bh(&n->lock);
 
        if (flags & TIPC_NOTIFY_NODE_DOWN)
-               tipc_publ_notify(net, publ_list, addr);
+               tipc_publ_notify(net, publ_list, addr, n->capabilities);
 
        if (flags & TIPC_NOTIFY_NODE_UP)
-               tipc_named_node_up(net, addr);
+               tipc_named_node_up(net, addr, n->capabilities);
 
        if (flags & TIPC_NOTIFY_LINK_UP) {
                tipc_mon_peer_up(net, addr, bearer_id);
@@ -1483,6 +1485,7 @@ static void node_lost_contact(struct tipc_node *n,
 
        /* Clean up broadcast state */
        tipc_bcast_remove_peer(n->net, n->bc_entry.link);
+       __skb_queue_purge(&n->bc_entry.namedq);
 
        /* Abort any ongoing link failover */
        for (i = 0; i < MAX_BEARERS; i++) {
@@ -1512,7 +1515,7 @@ static void node_lost_contact(struct tipc_node *n,
  * tipc_node_get_linkname - get the name of a link
  *
  * @bearer_id: id of the bearer
- * @node: peer node address
+ * @addr: peer node address
  * @linkname: link name output buffer
  *
  * Returns 0 on success
@@ -1729,12 +1732,23 @@ int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq)
        return 0;
 }
 
-void tipc_node_broadcast(struct net *net, struct sk_buff *skb)
+void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests)
 {
+       struct sk_buff_head xmitq;
        struct sk_buff *txskb;
        struct tipc_node *n;
+       u16 dummy;
        u32 dst;
 
+       /* Use broadcast if all nodes support it */
+       if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) {
+               __skb_queue_head_init(&xmitq);
+               __skb_queue_tail(&xmitq, skb);
+               tipc_bcast_xmit(net, &xmitq, &dummy);
+               return;
+       }
+
+       /* Otherwise use legacy replicast method */
        rcu_read_lock();
        list_for_each_entry_rcu(n, tipc_nodes(net), list) {
                dst = n->addr;
@@ -1749,7 +1763,6 @@ void tipc_node_broadcast(struct net *net, struct sk_buff *skb)
                tipc_node_xmit_skb(net, txskb, dst, 0);
        }
        rcu_read_unlock();
-
        kfree_skb(skb);
 }
 
@@ -1844,7 +1857,9 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
 
        /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */
        if (!skb_queue_empty(&n->bc_entry.namedq))
-               tipc_named_rcv(net, &n->bc_entry.namedq);
+               tipc_named_rcv(net, &n->bc_entry.namedq,
+                              &n->bc_entry.named_rcv_nxt,
+                              &n->bc_entry.named_open);
 
        /* If reassembly or retransmission failure => reset all links to peer */
        if (rc & TIPC_LINK_DOWN_EVT)
@@ -2007,7 +2022,7 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
  * tipc_rcv - process TIPC packets/messages arriving from off-node
  * @net: the applicable net namespace
  * @skb: TIPC packet
- * @bearer: pointer to bearer message arrived on
+ * @b: pointer to bearer message arrived on
  *
  * Invoked with no locks held. Bearer pointer must point to a valid bearer
  * structure (i.e. cannot be NULL), but bearer can be inactive.
@@ -2114,7 +2129,9 @@ rcv:
                tipc_node_link_down(n, bearer_id, false);
 
        if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
-               tipc_named_rcv(net, &n->bc_entry.namedq);
+               tipc_named_rcv(net, &n->bc_entry.namedq,
+                              &n->bc_entry.named_rcv_nxt,
+                              &n->bc_entry.named_open);
 
        if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1)))
                tipc_node_mcast_rcv(n);
index a6803b4..9f6f13f 100644 (file)
@@ -55,7 +55,8 @@ enum {
        TIPC_MCAST_RBCTL      = (1 << 7),
        TIPC_GAP_ACK_BLOCK    = (1 << 8),
        TIPC_TUNNEL_ENHANCED  = (1 << 9),
-       TIPC_NAGLE            = (1 << 10)
+       TIPC_NAGLE            = (1 << 10),
+       TIPC_NAMED_BCAST      = (1 << 11)
 };
 
 #define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT           |  \
@@ -68,7 +69,8 @@ enum {
                                TIPC_MCAST_RBCTL       |   \
                                TIPC_GAP_ACK_BLOCK     |   \
                                TIPC_TUNNEL_ENHANCED   |   \
-                               TIPC_NAGLE)
+                               TIPC_NAGLE             |   \
+                               TIPC_NAMED_BCAST)
 
 #define INVALID_BEARER_ID -1
 
@@ -101,7 +103,7 @@ int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
                       u32 selector);
 void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr);
 void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr);
-void tipc_node_broadcast(struct net *net, struct sk_buff *skb);
+void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests);
 int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port);
 void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port);
 int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected);
index a94f383..07419f3 100644 (file)
@@ -711,7 +711,6 @@ exit:
  * tipc_getname - get port ID of socket or peer socket
  * @sock: socket structure
  * @uaddr: area for returned socket address
- * @uaddr_len: area for returned length of socket address
  * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
  *
  * Returns 0 on success, errno otherwise
@@ -1053,7 +1052,7 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
 
 /**
  * tipc_send_group_bcast - send message to all members in communication group
- * @sk: socket structure
+ * @sock: socket structure
  * @m: message to send
  * @dlen: total length of message data
  * @timeout: timeout to wait for wakeup
@@ -1673,7 +1672,7 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
 /**
  * tipc_sk_set_orig_addr - capture sender's address for received message
  * @m: descriptor for message info
- * @hdr: received message header
+ * @skb: received message
  *
  * Note: Address is not captured if not requested by receiver.
  */
@@ -2095,7 +2094,6 @@ static void tipc_write_space(struct sock *sk)
 /**
  * tipc_data_ready - wake up threads to indicate messages have been received
  * @sk: socket
- * @len: the length of messages
  */
 static void tipc_data_ready(struct sock *sk)
 {
@@ -2677,7 +2675,7 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
 /**
  * tipc_accept - wait for connection request
  * @sock: listening socket
- * @newsock: new socket that is to be connected
+ * @new_sock: new socket that is to be connected
  * @flags: file-related flags associated with socket
  *
  * Returns 0 on success, errno otherwise
@@ -3105,7 +3103,7 @@ static int tipc_sk_leave(struct tipc_sock *tsk)
  * Returns 0 on success, errno otherwise
  */
 static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
-                          char __user *ov, unsigned int ol)
+                          sockptr_t ov, unsigned int ol)
 {
        struct sock *sk = sock->sk;
        struct tipc_sock *tsk = tipc_sk(sk);
@@ -3126,17 +3124,17 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
        case TIPC_NODELAY:
                if (ol < sizeof(value))
                        return -EINVAL;
-               if (get_user(value, (u32 __user *)ov))
+               if (copy_from_sockptr(&value, ov, sizeof(u32)))
                        return -EFAULT;
                break;
        case TIPC_GROUP_JOIN:
                if (ol < sizeof(mreq))
                        return -EINVAL;
-               if (copy_from_user(&mreq, ov, sizeof(mreq)))
+               if (copy_from_sockptr(&mreq, ov, sizeof(mreq)))
                        return -EFAULT;
                break;
        default:
-               if (ov || ol)
+               if (!sockptr_is_null(ov) || ol)
                        return -EINVAL;
        }
 
index 28a283f..d91b7c5 100644 (file)
@@ -565,7 +565,7 @@ msg_full:
 
 /**
  * tipc_parse_udp_addr - build udp media address from netlink data
- * @nlattr:    netlink attribute containing sockaddr storage aligned address
+ * @nla:       netlink attribute containing sockaddr storage aligned address
  * @addr:      tipc media address to fill with address, port and protocol type
  * @scope_id:  IPv6 scope id pointer, not NULL indicates it's required
  */
index 0e55f83..18fa606 100644 (file)
@@ -690,15 +690,55 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx,
        TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC);
 }
 
+static bool
+tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
+                          s64 resync_req, u32 *seq)
+{
+       u32 is_async = resync_req & RESYNC_REQ_ASYNC;
+       u32 req_seq = resync_req >> 32;
+       u32 req_end = req_seq + ((resync_req >> 16) & 0xffff);
+
+       if (is_async) {
+               /* asynchronous stage: log all headers seq such that
+                * req_seq <= seq <= end_seq, and wait for real resync request
+                */
+               if (between(*seq, req_seq, req_end) &&
+                   resync_async->loglen < TLS_DEVICE_RESYNC_ASYNC_LOGMAX)
+                       resync_async->log[resync_async->loglen++] = *seq;
+
+               return false;
+       }
+
+       /* synchronous stage: check against the logged entries and
+        * proceed to check the next entries if no match was found
+        */
+       while (resync_async->loglen) {
+               if (req_seq == resync_async->log[resync_async->loglen - 1] &&
+                   atomic64_try_cmpxchg(&resync_async->req,
+                                        &resync_req, 0)) {
+                       resync_async->loglen = 0;
+                       *seq = req_seq;
+                       return true;
+               }
+               resync_async->loglen--;
+       }
+
+       if (req_seq == *seq &&
+           atomic64_try_cmpxchg(&resync_async->req,
+                                &resync_req, 0))
+               return true;
+
+       return false;
+}
+
 void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
 {
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_offload_context_rx *rx_ctx;
-       bool is_req_pending, is_force_resync;
        u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
+       u32 sock_data, is_req_pending;
        struct tls_prot_info *prot;
        s64 resync_req;
-       u32 sock_data;
        u32 req_seq;
 
        if (tls_ctx->rx_conf != TLS_HW)
@@ -713,11 +753,9 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
                resync_req = atomic64_read(&rx_ctx->resync_req);
                req_seq = resync_req >> 32;
                seq += TLS_HEADER_SIZE - 1;
-               is_req_pending = resync_req & RESYNC_REQ;
-               is_force_resync = resync_req & RESYNC_REQ_FORCE;
+               is_req_pending = resync_req;
 
-               if (likely(!is_req_pending) ||
-                   (!is_force_resync && req_seq != seq) ||
+               if (likely(!is_req_pending) || req_seq != seq ||
                    !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
                        return;
                break;
@@ -739,6 +777,16 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
                seq += rcd_len;
                tls_bigint_increment(rcd_sn, prot->rec_seq_size);
                break;
+       case TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC:
+               resync_req = atomic64_read(&rx_ctx->resync_async->req);
+               is_req_pending = resync_req;
+               if (likely(!is_req_pending))
+                       return;
+
+               if (!tls_device_rx_resync_async(rx_ctx->resync_async,
+                                               resync_req, &seq))
+                       return;
+               break;
        }
 
        tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
index ec10041..d77f7d8 100644 (file)
@@ -450,7 +450,7 @@ static int tls_getsockopt(struct sock *sk, int level, int optname,
        return do_tls_getsockopt(sk, optname, optval, optlen);
 }
 
-static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
+static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
                                  unsigned int optlen, int tx)
 {
        struct tls_crypto_info *crypto_info;
@@ -460,7 +460,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
        int rc = 0;
        int conf;
 
-       if (!optval || (optlen < sizeof(*crypto_info))) {
+       if (sockptr_is_null(optval) || (optlen < sizeof(*crypto_info))) {
                rc = -EINVAL;
                goto out;
        }
@@ -479,7 +479,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
                goto out;
        }
 
-       rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info));
+       rc = copy_from_sockptr(crypto_info, optval, sizeof(*crypto_info));
        if (rc) {
                rc = -EFAULT;
                goto err_crypto_info;
@@ -522,8 +522,9 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
                goto err_crypto_info;
        }
 
-       rc = copy_from_user(crypto_info + 1, optval + sizeof(*crypto_info),
-                           optlen - sizeof(*crypto_info));
+       sockptr_advance(optval, sizeof(*crypto_info));
+       rc = copy_from_sockptr(crypto_info + 1, optval,
+                              optlen - sizeof(*crypto_info));
        if (rc) {
                rc = -EFAULT;
                goto err_crypto_info;
@@ -579,8 +580,8 @@ out:
        return rc;
 }
 
-static int do_tls_setsockopt(struct sock *sk, int optname,
-                            char __user *optval, unsigned int optlen)
+static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval,
+                            unsigned int optlen)
 {
        int rc = 0;
 
@@ -600,7 +601,7 @@ static int do_tls_setsockopt(struct sock *sk, int optname,
 }
 
 static int tls_setsockopt(struct sock *sk, int level, int optname,
-                         char __user *optval, unsigned int optlen)
+                         sockptr_t optval, unsigned int optlen)
 {
        struct tls_context *ctx = tls_get_ctx(sk);
 
index 3385a7a..181ea6f 100644 (file)
@@ -714,8 +714,6 @@ static const struct proto_ops unix_stream_ops = {
 #endif
        .listen =       unix_listen,
        .shutdown =     unix_shutdown,
-       .setsockopt =   sock_no_setsockopt,
-       .getsockopt =   sock_no_getsockopt,
        .sendmsg =      unix_stream_sendmsg,
        .recvmsg =      unix_stream_recvmsg,
        .mmap =         sock_no_mmap,
@@ -741,8 +739,6 @@ static const struct proto_ops unix_dgram_ops = {
 #endif
        .listen =       sock_no_listen,
        .shutdown =     unix_shutdown,
-       .setsockopt =   sock_no_setsockopt,
-       .getsockopt =   sock_no_getsockopt,
        .sendmsg =      unix_dgram_sendmsg,
        .recvmsg =      unix_dgram_recvmsg,
        .mmap =         sock_no_mmap,
@@ -767,8 +763,6 @@ static const struct proto_ops unix_seqpacket_ops = {
 #endif
        .listen =       unix_listen,
        .shutdown =     unix_shutdown,
-       .setsockopt =   sock_no_setsockopt,
-       .getsockopt =   sock_no_getsockopt,
        .sendmsg =      unix_seqpacket_sendmsg,
        .recvmsg =      unix_seqpacket_recvmsg,
        .mmap =         sock_no_mmap,
index 626bf90..27bbcfa 100644 (file)
@@ -1202,8 +1202,6 @@ static const struct proto_ops vsock_dgram_ops = {
        .ioctl = sock_no_ioctl,
        .listen = sock_no_listen,
        .shutdown = vsock_shutdown,
-       .setsockopt = sock_no_setsockopt,
-       .getsockopt = sock_no_getsockopt,
        .sendmsg = vsock_dgram_sendmsg,
        .recvmsg = vsock_dgram_recvmsg,
        .mmap = sock_no_mmap,
@@ -1519,7 +1517,7 @@ static void vsock_update_buffer_size(struct vsock_sock *vsk,
 static int vsock_stream_setsockopt(struct socket *sock,
                                   int level,
                                   int optname,
-                                  char __user *optval,
+                                  sockptr_t optval,
                                   unsigned int optlen)
 {
        int err;
@@ -1537,7 +1535,7 @@ static int vsock_stream_setsockopt(struct socket *sock,
                        err = -EINVAL;                    \
                        goto exit;                        \
                }                                         \
-               if (copy_from_user(&_v, optval, sizeof(_v)) != 0) {     \
+               if (copy_from_sockptr(&_v, optval, sizeof(_v)) != 0) {  \
                        err = -EFAULT;                                  \
                        goto exit;                                      \
                }                                                       \
index 263ae39..0e07fb8 100644 (file)
@@ -5016,7 +5016,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
                err = nl80211_parse_he_obss_pd(
                                        info->attrs[NL80211_ATTR_HE_OBSS_PD],
                                        &params.he_obss_pd);
-               goto out;
+               if (err)
+                       goto out;
        }
 
        if (info->attrs[NL80211_ATTR_HE_BSS_COLOR]) {
@@ -5024,7 +5025,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
                                        info->attrs[NL80211_ATTR_HE_BSS_COLOR],
                                        &params.he_bss_color);
                if (err)
-                       return err;
+                       goto out;
        }
 
        nl80211_calculate_ap_params(&params);
index 0d74a31..35b8847 100644 (file)
@@ -2384,7 +2384,7 @@ static void reg_set_request_processed(void)
 
 /**
  * reg_process_hint_core - process core regulatory requests
- * @pending_request: a pending core regulatory request
+ * @core_request: a pending core regulatory request
  *
  * The wireless subsystem can use this function to process
  * a regulatory request issued by the regulatory core.
@@ -2493,6 +2493,7 @@ __reg_process_hint_driver(struct regulatory_request *driver_request)
 
 /**
  * reg_process_hint_driver - process driver regulatory requests
+ * @wiphy: the wireless device for the regulatory request
  * @driver_request: a pending driver regulatory request
  *
  * The wireless subsystem can use this function to process
@@ -2593,6 +2594,7 @@ __reg_process_hint_country_ie(struct wiphy *wiphy,
 
 /**
  * reg_process_hint_country_ie - process regulatory requests from country IEs
+ * @wiphy: the wireless device for the regulatory request
  * @country_ie_request: a regulatory request from a country IE
  *
  * The wireless subsystem can use this function to process
index cac9e28..aa918d7 100644 (file)
@@ -220,7 +220,6 @@ EXPORT_WEXT_HANDLER(cfg80211_wext_giwrange);
 
 /**
  * cfg80211_wext_freq - get wext frequency for non-"auto"
- * @dev: the net device
  * @freq: the wext freq encoding
  *
  * Returns a frequency, or a negative error code, or 0 for auto.
index e3ed232..68729aa 100644 (file)
@@ -17,7 +17,7 @@ config X25
          if you want that) and the lower level data link layer protocol LAPB
          (say Y to "LAPB Data Link Driver" below if you want that).
 
-         You can read more about X.25 at <http://www.sangoma.com/tutorials/x25/> and
+         You can read more about X.25 at <https://www.sangoma.com/tutorials/x25/> and
          <http://docwiki.cisco.com/wiki/X.25>.
          Information about X.25 for Linux is contained in the files
          <file:Documentation/networking/x25.rst> and
index d5b09bb..0bbb283 100644 (file)
@@ -431,7 +431,7 @@ void x25_destroy_socket_from_timer(struct sock *sk)
  */
 
 static int x25_setsockopt(struct socket *sock, int level, int optname,
-                         char __user *optval, unsigned int optlen)
+                         sockptr_t optval, unsigned int optlen)
 {
        int opt;
        struct sock *sk = sock->sk;
@@ -445,7 +445,7 @@ static int x25_setsockopt(struct socket *sock, int level, int optname,
                goto out;
 
        rc = -EFAULT;
-       if (get_user(opt, (int __user *)optval))
+       if (copy_from_sockptr(&opt, optval, sizeof(int)))
                goto out;
 
        if (opt)
index 7d02532..fdae054 100644 (file)
@@ -270,7 +270,7 @@ void x25_link_device_up(struct net_device *dev)
 
 /**
  *     __x25_remove_neigh - remove neighbour from x25_neigh_list
- *     @nb - neigh to remove
+ *     @nb: - neigh to remove
  *
  *     Remove neighbour from x25_neigh_list. If it was there.
  *     Caller must hold x25_neigh_list_lock.
index b8e94d5..00e46c9 100644 (file)
@@ -142,7 +142,7 @@ struct net_device *x25_dev_get(char *devname)
 
 /**
  *     x25_get_route - Find a route given an X.25 address.
- *     @addr - address to find a route for
+ *     @addr: - address to find a route for
  *
  *     Find a route given an X.25 address.
  */
index 3700266..2e94a7e 100644 (file)
@@ -123,7 +123,7 @@ static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
        addr = xp_get_handle(xskb);
        err = xskq_prod_reserve_desc(xs->rx, addr, len);
        if (err) {
-               xs->rx_dropped++;
+               xs->rx_queue_full++;
                return err;
        }
 
@@ -274,8 +274,10 @@ bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc)
 
        rcu_read_lock();
        list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
-               if (!xskq_cons_peek_desc(xs->tx, desc, umem))
+               if (!xskq_cons_peek_desc(xs->tx, desc, umem)) {
+                       xs->tx->queue_empty_descs++;
                        continue;
+               }
 
                /* This is the backpressure mechanism for the Tx path.
                 * Reserve space in the completion queue and only proceed
@@ -387,6 +389,8 @@ static int xsk_generic_xmit(struct sock *sk)
                sent_frame = true;
        }
 
+       xs->tx->queue_empty_descs++;
+
 out:
        if (sent_frame)
                sk->sk_write_space(sk);
@@ -698,7 +702,7 @@ struct xdp_umem_reg_v1 {
 };
 
 static int xsk_setsockopt(struct socket *sock, int level, int optname,
-                         char __user *optval, unsigned int optlen)
+                         sockptr_t optval, unsigned int optlen)
 {
        struct sock *sk = sock->sk;
        struct xdp_sock *xs = xdp_sk(sk);
@@ -716,7 +720,7 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
 
                if (optlen < sizeof(entries))
                        return -EINVAL;
-               if (copy_from_user(&entries, optval, sizeof(entries)))
+               if (copy_from_sockptr(&entries, optval, sizeof(entries)))
                        return -EFAULT;
 
                mutex_lock(&xs->mutex);
@@ -743,7 +747,7 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
                else if (optlen < sizeof(mr))
                        mr_size = sizeof(struct xdp_umem_reg_v1);
 
-               if (copy_from_user(&mr, optval, mr_size))
+               if (copy_from_sockptr(&mr, optval, mr_size))
                        return -EFAULT;
 
                mutex_lock(&xs->mutex);
@@ -770,7 +774,7 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
                struct xsk_queue **q;
                int entries;
 
-               if (copy_from_user(&entries, optval, sizeof(entries)))
+               if (copy_from_sockptr(&entries, optval, sizeof(entries)))
                        return -EFAULT;
 
                mutex_lock(&xs->mutex);
@@ -812,6 +816,12 @@ static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
        ring->desc = offsetof(struct xdp_umem_ring, desc);
 }
 
+struct xdp_statistics_v1 {
+       __u64 rx_dropped;
+       __u64 rx_invalid_descs;
+       __u64 tx_invalid_descs;
+};
+
 static int xsk_getsockopt(struct socket *sock, int level, int optname,
                          char __user *optval, int __user *optlen)
 {
@@ -831,19 +841,35 @@ static int xsk_getsockopt(struct socket *sock, int level, int optname,
        case XDP_STATISTICS:
        {
                struct xdp_statistics stats;
+               bool extra_stats = true;
+               size_t stats_size;
 
-               if (len < sizeof(stats))
+               if (len < sizeof(struct xdp_statistics_v1)) {
                        return -EINVAL;
+               } else if (len < sizeof(stats)) {
+                       extra_stats = false;
+                       stats_size = sizeof(struct xdp_statistics_v1);
+               } else {
+                       stats_size = sizeof(stats);
+               }
 
                mutex_lock(&xs->mutex);
                stats.rx_dropped = xs->rx_dropped;
+               if (extra_stats) {
+                       stats.rx_ring_full = xs->rx_queue_full;
+                       stats.rx_fill_ring_empty_descs =
+                               xs->umem ? xskq_nb_queue_empty_descs(xs->umem->fq) : 0;
+                       stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
+               } else {
+                       stats.rx_dropped += xs->rx_queue_full;
+               }
                stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
                stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
                mutex_unlock(&xs->mutex);
 
-               if (copy_to_user(optval, &stats, sizeof(stats)))
+               if (copy_to_user(optval, &stats, stats_size))
                        return -EFAULT;
-               if (put_user(sizeof(stats), optlen))
+               if (put_user(stats_size, optlen))
                        return -EFAULT;
 
                return 0;
index 540ed75..a2044c2 100644 (file)
@@ -2,9 +2,6 @@
 
 #include <net/xsk_buff_pool.h>
 #include <net/xdp_sock.h>
-#include <linux/dma-direct.h>
-#include <linux/dma-noncoherent.h>
-#include <linux/swiotlb.h>
 
 #include "xsk_queue.h"
 
@@ -55,7 +52,6 @@ struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks,
        pool->free_heads_cnt = chunks;
        pool->headroom = headroom;
        pool->chunk_size = chunk_size;
-       pool->cheap_dma = true;
        pool->unaligned = unaligned;
        pool->frame_len = chunk_size - headroom - XDP_PACKET_HEADROOM;
        INIT_LIST_HEAD(&pool->free_list);
@@ -125,48 +121,6 @@ static void xp_check_dma_contiguity(struct xsk_buff_pool *pool)
        }
 }
 
-static bool __maybe_unused xp_check_swiotlb_dma(struct xsk_buff_pool *pool)
-{
-#if defined(CONFIG_SWIOTLB)
-       phys_addr_t paddr;
-       u32 i;
-
-       for (i = 0; i < pool->dma_pages_cnt; i++) {
-               paddr = dma_to_phys(pool->dev, pool->dma_pages[i]);
-               if (is_swiotlb_buffer(paddr))
-                       return false;
-       }
-#endif
-       return true;
-}
-
-static bool xp_check_cheap_dma(struct xsk_buff_pool *pool)
-{
-#if defined(CONFIG_HAS_DMA)
-       const struct dma_map_ops *ops = get_dma_ops(pool->dev);
-
-       if (ops) {
-               return !ops->sync_single_for_cpu &&
-                       !ops->sync_single_for_device;
-       }
-
-       if (!dma_is_direct(ops))
-               return false;
-
-       if (!xp_check_swiotlb_dma(pool))
-               return false;
-
-       if (!dev_is_dma_coherent(pool->dev)) {
-#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) ||               \
-       defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) ||        \
-       defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE)
-               return false;
-#endif
-       }
-#endif
-       return true;
-}
-
 int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
               unsigned long attrs, struct page **pages, u32 nr_pages)
 {
@@ -180,6 +134,7 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
 
        pool->dev = dev;
        pool->dma_pages_cnt = nr_pages;
+       pool->dma_need_sync = false;
 
        for (i = 0; i < pool->dma_pages_cnt; i++) {
                dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
@@ -188,14 +143,13 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
                        xp_dma_unmap(pool, attrs);
                        return -ENOMEM;
                }
+               if (dma_need_sync(dev, dma))
+                       pool->dma_need_sync = true;
                pool->dma_pages[i] = dma;
        }
 
        if (pool->unaligned)
                xp_check_dma_contiguity(pool);
-
-       pool->dev = dev;
-       pool->cheap_dma = xp_check_cheap_dma(pool);
        return 0;
 }
 EXPORT_SYMBOL(xp_dma_map);
@@ -235,6 +189,7 @@ static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
 
        for (;;) {
                if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) {
+                       pool->fq->queue_empty_descs++;
                        xp_release(xskb);
                        return NULL;
                }
@@ -280,7 +235,7 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
        xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
        xskb->xdp.data_meta = xskb->xdp.data;
 
-       if (!pool->cheap_dma) {
+       if (pool->dma_need_sync) {
                dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
                                                 pool->frame_len,
                                                 DMA_BIDIRECTIONAL);
index 0163b26..21e9c2d 100644 (file)
@@ -76,6 +76,19 @@ static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb)
        return err;
 }
 
+static int xsk_diag_put_stats(const struct xdp_sock *xs, struct sk_buff *nlskb)
+{
+       struct xdp_diag_stats du = {};
+
+       du.n_rx_dropped = xs->rx_dropped;
+       du.n_rx_invalid = xskq_nb_invalid_descs(xs->rx);
+       du.n_rx_full = xs->rx_queue_full;
+       du.n_fill_ring_empty = xs->umem ? xskq_nb_queue_empty_descs(xs->umem->fq) : 0;
+       du.n_tx_invalid = xskq_nb_invalid_descs(xs->tx);
+       du.n_tx_ring_empty = xskq_nb_queue_empty_descs(xs->tx);
+       return nla_put(nlskb, XDP_DIAG_STATS, sizeof(du), &du);
+}
+
 static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb,
                         struct xdp_diag_req *req,
                         struct user_namespace *user_ns,
@@ -118,6 +131,10 @@ static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb,
            sock_diag_put_meminfo(sk, nlskb, XDP_DIAG_MEMINFO))
                goto out_nlmsg_trim;
 
+       if ((req->xdiag_show & XDP_SHOW_STATS) &&
+           xsk_diag_put_stats(xs, nlskb))
+               goto out_nlmsg_trim;
+
        mutex_unlock(&xs->mutex);
        nlmsg_end(nlskb, nlh);
        return 0;
index 5b5d24d..bf42cfd 100644 (file)
@@ -38,6 +38,7 @@ struct xsk_queue {
        u32 cached_cons;
        struct xdp_ring *ring;
        u64 invalid_descs;
+       u64 queue_empty_descs;
 };
 
 /* The structure of the shared state of the rings are the same as the
@@ -354,6 +355,11 @@ static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
        return q ? q->invalid_descs : 0;
 }
 
+static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
+{
+       return q ? q->queue_empty_descs : 0;
+}
+
 struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
 void xskq_destroy(struct xsk_queue *q_ops);
 
index 1dc7208..8367adb 100644 (file)
@@ -254,6 +254,7 @@ void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
        spin_unlock_bh(&map->lock);
 }
 
+static int xsk_map_btf_id;
 const struct bpf_map_ops xsk_map_ops = {
        .map_alloc = xsk_map_alloc,
        .map_free = xsk_map_free,
@@ -264,4 +265,6 @@ const struct bpf_map_ops xsk_map_ops = {
        .map_update_elem = xsk_map_update_elem,
        .map_delete_elem = xsk_map_delete_elem,
        .map_check_btf = map_check_no_btf,
+       .map_btf_name = "xsk_map",
+       .map_btf_id = &xsk_map_btf_id,
 };
index b5d4a1e..5b9a5ab 100644 (file)
@@ -67,6 +67,30 @@ config XFRM_STATISTICS
 
          If unsure, say N.
 
+# This option selects XFRM_ALGO along with the AH authentication algorithms that
+# RFC 8221 lists as MUST be implemented.
+config XFRM_AH
+       tristate
+       select XFRM_ALGO
+       select CRYPTO
+       select CRYPTO_HMAC
+       select CRYPTO_SHA256
+
+# This option selects XFRM_ALGO along with the ESP encryption and authentication
+# algorithms that RFC 8221 lists as MUST be implemented.
+config XFRM_ESP
+       tristate
+       select XFRM_ALGO
+       select CRYPTO
+       select CRYPTO_AES
+       select CRYPTO_AUTHENC
+       select CRYPTO_CBC
+       select CRYPTO_ECHAINIV
+       select CRYPTO_GCM
+       select CRYPTO_HMAC
+       select CRYPTO_SEQIV
+       select CRYPTO_SHA256
+
 config XFRM_IPCOMP
        tristate
        select XFRM_ALGO
index f50d1f9..edf1189 100644 (file)
@@ -106,9 +106,10 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
        struct sk_buff *skb2, *nskb, *pskb = NULL;
        netdev_features_t esp_features = features;
        struct xfrm_offload *xo = xfrm_offload(skb);
+       struct net_device *dev = skb->dev;
        struct sec_path *sp;
 
-       if (!xo)
+       if (!xo || (xo->flags & XFRM_XMIT))
                return skb;
 
        if (!(features & NETIF_F_HW_ESP))
@@ -119,6 +120,10 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
        if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
                return skb;
 
+       /* This skb was already validated on the upper/virtual dev */
+       if ((x->xso.dev != dev) && (x->xso.real_dev == dev))
+               return skb;
+
        local_irq_save(flags);
        sd = this_cpu_ptr(&softnet_data);
        err = !skb_queue_empty(&sd->xfrm_backlog);
@@ -129,25 +134,22 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
                return skb;
        }
 
-       if (skb_is_gso(skb)) {
-               struct net_device *dev = skb->dev;
+       xo->flags |= XFRM_XMIT;
 
-               if (unlikely(x->xso.dev != dev)) {
-                       struct sk_buff *segs;
+       if (skb_is_gso(skb) && unlikely(x->xso.dev != dev)) {
+               struct sk_buff *segs;
 
-                       /* Packet got rerouted, fixup features and segment it. */
-                       esp_features = esp_features & ~(NETIF_F_HW_ESP
-                                                       | NETIF_F_GSO_ESP);
+               /* Packet got rerouted, fixup features and segment it. */
+               esp_features = esp_features & ~(NETIF_F_HW_ESP | NETIF_F_GSO_ESP);
 
-                       segs = skb_gso_segment(skb, esp_features);
-                       if (IS_ERR(segs)) {
-                               kfree_skb(skb);
-                               atomic_long_inc(&dev->tx_dropped);
-                               return NULL;
-                       } else {
-                               consume_skb(skb);
-                               skb = segs;
-                       }
+               segs = skb_gso_segment(skb, esp_features);
+               if (IS_ERR(segs)) {
+                       kfree_skb(skb);
+                       atomic_long_inc(&dev->tx_dropped);
+                       return NULL;
+               } else {
+                       consume_skb(skb);
+                       skb = segs;
                }
        }
 
@@ -259,6 +261,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
        }
 
        xso->dev = dev;
+       xso->real_dev = dev;
        xso->num_exthdrs = 1;
        xso->flags = xuo->flags;
 
index c407ecb..b615729 100644 (file)
@@ -37,6 +37,7 @@
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/ip6_route.h>
+#include <net/ip_tunnels.h>
 #include <net/addrconf.h>
 #include <net/xfrm.h>
 #include <net/net_namespace.h>
@@ -581,6 +582,7 @@ static const struct net_device_ops xfrmi_netdev_ops = {
 static void xfrmi_dev_setup(struct net_device *dev)
 {
        dev->netdev_ops         = &xfrmi_netdev_ops;
+       dev->header_ops         = &ip_tunnel_header_ops;
        dev->type               = ARPHRD_NONE;
        dev->mtu                = ETH_DATA_LEN;
        dev->min_mtu            = ETH_MIN_MTU;
index e4c23f6..a7ab193 100644 (file)
@@ -574,16 +574,12 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
        switch (x->outer_mode.family) {
        case AF_INET:
                memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
-#ifdef CONFIG_NETFILTER
                IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
-#endif
                break;
        case AF_INET6:
                memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
 
-#ifdef CONFIG_NETFILTER
                IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
-#endif
                break;
        }
 
index 8be2d92..69520ad 100644 (file)
@@ -2264,7 +2264,7 @@ static bool km_is_alive(const struct km_event *c)
        return is_alive;
 }
 
-int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
+int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval, int optlen)
 {
        int err;
        u8 *data;
@@ -2274,7 +2274,7 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
        if (in_compat_syscall())
                return -EOPNOTSUPP;
 
-       if (!optval && !optlen) {
+       if (sockptr_is_null(optval) && !optlen) {
                xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
                xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
                __sk_dst_reset(sk);
@@ -2284,7 +2284,7 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
        if (optlen <= 0 || optlen > PAGE_SIZE)
                return -EMSGSIZE;
 
-       data = memdup_user(optval, optlen);
+       data = memdup_sockptr(optval, optlen);
        if (IS_ERR(data))
                return PTR_ERR(data);
 
index f3ac549..0ed6e4d 100644 (file)
@@ -211,7 +211,7 @@ config SAMPLE_WATCHDOG
 
 config SAMPLE_WATCH_QUEUE
        bool "Build example /dev/watch_queue notification consumer"
-       depends on HEADERS_INSTALL
+       depends on CC_CAN_LINK && HEADERS_INSTALL
        help
          Build example userspace program to use the new mount_notify(),
          sb_notify() syscalls and the KEYCTL_WATCH_KEY keyctl() function.
index 8403e47..f87ee02 100644 (file)
@@ -93,7 +93,7 @@ sampleip-objs := sampleip_user.o $(TRACE_HELPERS)
 tc_l2_redirect-objs := bpf_load.o tc_l2_redirect_user.o
 lwt_len_hist-objs := bpf_load.o lwt_len_hist_user.o
 xdp_tx_iptunnel-objs := xdp_tx_iptunnel_user.o
-test_map_in_map-objs := bpf_load.o test_map_in_map_user.o
+test_map_in_map-objs := test_map_in_map_user.o
 per_socket_stats_example-objs := cookie_uid_helper_example.o
 xdp_redirect-objs := xdp_redirect_user.o
 xdp_redirect_map-objs := xdp_redirect_map_user.o
index d5992f7..59f45fe 100644 (file)
@@ -30,6 +30,8 @@
 #define BPF_M_MAP      1
 #define BPF_M_PROG     2
 
+char bpf_log_buf[BPF_LOG_BUF_SIZE];
+
 static void usage(void)
 {
        printf("Usage: fds_example [...]\n");
@@ -57,7 +59,6 @@ static int bpf_prog_create(const char *object)
                BPF_EXIT_INSN(),
        };
        size_t insns_cnt = sizeof(insns) / sizeof(struct bpf_insn);
-       char bpf_log_buf[BPF_LOG_BUF_SIZE];
        struct bpf_object *obj;
        int prog_fd;
 
index 12e91ae..8773f22 100644 (file)
 #include <linux/version.h>
 #include <uapi/linux/bpf.h>
 #include <bpf/bpf_helpers.h>
-#include "bpf_legacy.h"
 #include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "trace_common.h"
 
 #define MAX_ENTRIES 1000
 #define MAX_NR_CPUS 1024
 
-struct bpf_map_def_legacy SEC("maps") hash_map = {
-       .type = BPF_MAP_TYPE_HASH,
-       .key_size = sizeof(u32),
-       .value_size = sizeof(long),
-       .max_entries = MAX_ENTRIES,
+struct {
+       __uint(type, BPF_MAP_TYPE_HASH);
+       __type(key, u32);
+       __type(value, long);
+       __uint(max_entries, MAX_ENTRIES);
+} hash_map SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_LRU_HASH);
+       __type(key, u32);
+       __type(value, long);
+       __uint(max_entries, 10000);
+} lru_hash_map SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_LRU_HASH);
+       __type(key, u32);
+       __type(value, long);
+       __uint(max_entries, 10000);
+       __uint(map_flags, BPF_F_NO_COMMON_LRU);
+} nocommon_lru_hash_map SEC(".maps");
+
+struct inner_lru {
+       __uint(type, BPF_MAP_TYPE_LRU_HASH);
+       __type(key, u32);
+       __type(value, long);
+       __uint(max_entries, MAX_ENTRIES);
+       __uint(map_flags, BPF_F_NUMA_NODE);
+       __uint(numa_node, 0);
+} inner_lru_hash_map SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+       __uint(max_entries, MAX_NR_CPUS);
+       __uint(key_size, sizeof(u32));
+       __array(values, struct inner_lru); /* use inner_lru as inner map */
+} array_of_lru_hashs SEC(".maps") = {
+       /* statically initialize the first element */
+       .values = { &inner_lru_hash_map },
 };
 
-struct bpf_map_def_legacy SEC("maps") lru_hash_map = {
-       .type = BPF_MAP_TYPE_LRU_HASH,
-       .key_size = sizeof(u32),
-       .value_size = sizeof(long),
-       .max_entries = 10000,
-};
-
-struct bpf_map_def_legacy SEC("maps") nocommon_lru_hash_map = {
-       .type = BPF_MAP_TYPE_LRU_HASH,
-       .key_size = sizeof(u32),
-       .value_size = sizeof(long),
-       .max_entries = 10000,
-       .map_flags = BPF_F_NO_COMMON_LRU,
-};
-
-struct bpf_map_def_legacy SEC("maps") inner_lru_hash_map = {
-       .type = BPF_MAP_TYPE_LRU_HASH,
-       .key_size = sizeof(u32),
-       .value_size = sizeof(long),
-       .max_entries = MAX_ENTRIES,
-       .map_flags = BPF_F_NUMA_NODE,
-       .numa_node = 0,
-};
-
-struct bpf_map_def_legacy SEC("maps") array_of_lru_hashs = {
-       .type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
-       .key_size = sizeof(u32),
-       .max_entries = MAX_NR_CPUS,
-};
-
-struct bpf_map_def_legacy SEC("maps") percpu_hash_map = {
-       .type = BPF_MAP_TYPE_PERCPU_HASH,
-       .key_size = sizeof(u32),
-       .value_size = sizeof(long),
-       .max_entries = MAX_ENTRIES,
-};
-
-struct bpf_map_def_legacy SEC("maps") hash_map_alloc = {
-       .type = BPF_MAP_TYPE_HASH,
-       .key_size = sizeof(u32),
-       .value_size = sizeof(long),
-       .max_entries = MAX_ENTRIES,
-       .map_flags = BPF_F_NO_PREALLOC,
-};
-
-struct bpf_map_def_legacy SEC("maps") percpu_hash_map_alloc = {
-       .type = BPF_MAP_TYPE_PERCPU_HASH,
-       .key_size = sizeof(u32),
-       .value_size = sizeof(long),
-       .max_entries = MAX_ENTRIES,
-       .map_flags = BPF_F_NO_PREALLOC,
-};
-
-struct bpf_map_def_legacy SEC("maps") lpm_trie_map_alloc = {
-       .type = BPF_MAP_TYPE_LPM_TRIE,
-       .key_size = 8,
-       .value_size = sizeof(long),
-       .max_entries = 10000,
-       .map_flags = BPF_F_NO_PREALLOC,
-};
-
-struct bpf_map_def_legacy SEC("maps") array_map = {
-       .type = BPF_MAP_TYPE_ARRAY,
-       .key_size = sizeof(u32),
-       .value_size = sizeof(long),
-       .max_entries = MAX_ENTRIES,
-};
-
-struct bpf_map_def_legacy SEC("maps") lru_hash_lookup_map = {
-       .type = BPF_MAP_TYPE_LRU_HASH,
-       .key_size = sizeof(u32),
-       .value_size = sizeof(long),
-       .max_entries = MAX_ENTRIES,
-};
-
-SEC("kprobe/sys_getuid")
+struct {
+       __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+       __uint(key_size, sizeof(u32));
+       __uint(value_size, sizeof(long));
+       __uint(max_entries, MAX_ENTRIES);
+} percpu_hash_map SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_HASH);
+       __type(key, u32);
+       __type(value, long);
+       __uint(max_entries, MAX_ENTRIES);
+       __uint(map_flags, BPF_F_NO_PREALLOC);
+} hash_map_alloc SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+       __uint(key_size, sizeof(u32));
+       __uint(value_size, sizeof(long));
+       __uint(max_entries, MAX_ENTRIES);
+       __uint(map_flags, BPF_F_NO_PREALLOC);
+} percpu_hash_map_alloc SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_LPM_TRIE);
+       __uint(key_size, 8);
+       __uint(value_size, sizeof(long));
+       __uint(max_entries, 10000);
+       __uint(map_flags, BPF_F_NO_PREALLOC);
+} lpm_trie_map_alloc SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_ARRAY);
+       __type(key, u32);
+       __type(value, long);
+       __uint(max_entries, MAX_ENTRIES);
+} array_map SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_LRU_HASH);
+       __type(key, u32);
+       __type(value, long);
+       __uint(max_entries, MAX_ENTRIES);
+} lru_hash_lookup_map SEC(".maps");
+
+SEC("kprobe/" SYSCALL(sys_getuid))
 int stress_hmap(struct pt_regs *ctx)
 {
        u32 key = bpf_get_current_pid_tgid();
@@ -112,7 +117,7 @@ int stress_hmap(struct pt_regs *ctx)
        return 0;
 }
 
-SEC("kprobe/sys_geteuid")
+SEC("kprobe/" SYSCALL(sys_geteuid))
 int stress_percpu_hmap(struct pt_regs *ctx)
 {
        u32 key = bpf_get_current_pid_tgid();
@@ -126,7 +131,7 @@ int stress_percpu_hmap(struct pt_regs *ctx)
        return 0;
 }
 
-SEC("kprobe/sys_getgid")
+SEC("kprobe/" SYSCALL(sys_getgid))
 int stress_hmap_alloc(struct pt_regs *ctx)
 {
        u32 key = bpf_get_current_pid_tgid();
@@ -140,7 +145,7 @@ int stress_hmap_alloc(struct pt_regs *ctx)
        return 0;
 }
 
-SEC("kprobe/sys_getegid")
+SEC("kprobe/" SYSCALL(sys_getegid))
 int stress_percpu_hmap_alloc(struct pt_regs *ctx)
 {
        u32 key = bpf_get_current_pid_tgid();
@@ -154,9 +159,10 @@ int stress_percpu_hmap_alloc(struct pt_regs *ctx)
        return 0;
 }
 
-SEC("kprobe/sys_connect")
+SEC("kprobe/" SYSCALL(sys_connect))
 int stress_lru_hmap_alloc(struct pt_regs *ctx)
 {
+       struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx);
        char fmt[] = "Failed at stress_lru_hmap_alloc. ret:%dn";
        union {
                u16 dst6[8];
@@ -175,8 +181,8 @@ int stress_lru_hmap_alloc(struct pt_regs *ctx)
        long val = 1;
        u32 key = 0;
 
-       in6 = (struct sockaddr_in6 *)PT_REGS_PARM2(ctx);
-       addrlen = (int)PT_REGS_PARM3(ctx);
+       in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(real_regs);
+       addrlen = (int)PT_REGS_PARM3_CORE(real_regs);
 
        if (addrlen != sizeof(*in6))
                return 0;
@@ -233,7 +239,7 @@ done:
        return 0;
 }
 
-SEC("kprobe/sys_gettid")
+SEC("kprobe/" SYSCALL(sys_gettid))
 int stress_lpm_trie_map_alloc(struct pt_regs *ctx)
 {
        union {
@@ -255,7 +261,7 @@ int stress_lpm_trie_map_alloc(struct pt_regs *ctx)
        return 0;
 }
 
-SEC("kprobe/sys_getpgid")
+SEC("kprobe/" SYSCALL(sys_getpgid))
 int stress_hash_map_lookup(struct pt_regs *ctx)
 {
        u32 key = 1, i;
@@ -268,7 +274,7 @@ int stress_hash_map_lookup(struct pt_regs *ctx)
        return 0;
 }
 
-SEC("kprobe/sys_getppid")
+SEC("kprobe/" SYSCALL(sys_getppid))
 int stress_array_map_lookup(struct pt_regs *ctx)
 {
        u32 key = 1, i;
index fe5564b..8b13230 100644 (file)
@@ -11,7 +11,6 @@
 #include <sys/wait.h>
 #include <stdlib.h>
 #include <signal.h>
-#include <linux/bpf.h>
 #include <string.h>
 #include <time.h>
 #include <sys/resource.h>
@@ -19,7 +18,7 @@
 #include <errno.h>
 
 #include <bpf/bpf.h>
-#include "bpf_load.h"
+#include <bpf/libbpf.h>
 
 #define TEST_BIT(t) (1U << (t))
 #define MAX_NR_CPUS 1024
@@ -61,12 +60,18 @@ const char *test_map_names[NR_TESTS] = {
        [LRU_HASH_LOOKUP] = "lru_hash_lookup_map",
 };
 
+enum map_idx {
+       array_of_lru_hashs_idx,
+       hash_map_alloc_idx,
+       lru_hash_lookup_idx,
+       NR_IDXES,
+};
+
+static int map_fd[NR_IDXES];
+
 static int test_flags = ~0;
 static uint32_t num_map_entries;
 static uint32_t inner_lru_hash_size;
-static int inner_lru_hash_idx = -1;
-static int array_of_lru_hashs_idx = -1;
-static int lru_hash_lookup_idx = -1;
 static int lru_hash_lookup_test_entries = 32;
 static uint32_t max_cnt = 1000000;
 
@@ -122,30 +127,30 @@ static void do_test_lru(enum test_type test, int cpu)
        __u64 start_time;
        int i, ret;
 
-       if (test == INNER_LRU_HASH_PREALLOC) {
+       if (test == INNER_LRU_HASH_PREALLOC && cpu) {
+               /* If CPU is not 0, create inner_lru hash map and insert the fd
+                * value into the array_of_lru_hash map. In case of CPU 0,
+                * 'inner_lru_hash_map' was statically inserted on the map init
+                */
                int outer_fd = map_fd[array_of_lru_hashs_idx];
                unsigned int mycpu, mynode;
 
                assert(cpu < MAX_NR_CPUS);
 
-               if (cpu) {
-                       ret = syscall(__NR_getcpu, &mycpu, &mynode, NULL);
-                       assert(!ret);
-
-                       inner_lru_map_fds[cpu] =
-                               bpf_create_map_node(BPF_MAP_TYPE_LRU_HASH,
-                                                   test_map_names[INNER_LRU_HASH_PREALLOC],
-                                                   sizeof(uint32_t),
-                                                   sizeof(long),
-                                                   inner_lru_hash_size, 0,
-                                                   mynode);
-                       if (inner_lru_map_fds[cpu] == -1) {
-                               printf("cannot create BPF_MAP_TYPE_LRU_HASH %s(%d)\n",
-                                      strerror(errno), errno);
-                               exit(1);
-                       }
-               } else {
-                       inner_lru_map_fds[cpu] = map_fd[inner_lru_hash_idx];
+               ret = syscall(__NR_getcpu, &mycpu, &mynode, NULL);
+               assert(!ret);
+
+               inner_lru_map_fds[cpu] =
+                       bpf_create_map_node(BPF_MAP_TYPE_LRU_HASH,
+                                           test_map_names[INNER_LRU_HASH_PREALLOC],
+                                           sizeof(uint32_t),
+                                           sizeof(long),
+                                           inner_lru_hash_size, 0,
+                                           mynode);
+               if (inner_lru_map_fds[cpu] == -1) {
+                       printf("cannot create BPF_MAP_TYPE_LRU_HASH %s(%d)\n",
+                              strerror(errno), errno);
+                       exit(1);
                }
 
                ret = bpf_map_update_elem(outer_fd, &cpu,
@@ -377,7 +382,8 @@ static void fill_lpm_trie(void)
                key->data[1] = rand() & 0xff;
                key->data[2] = rand() & 0xff;
                key->data[3] = rand() & 0xff;
-               r = bpf_map_update_elem(map_fd[6], key, &value, 0);
+               r = bpf_map_update_elem(map_fd[hash_map_alloc_idx],
+                                       key, &value, 0);
                assert(!r);
        }
 
@@ -388,59 +394,52 @@ static void fill_lpm_trie(void)
        key->data[3] = 1;
        value = 128;
 
-       r = bpf_map_update_elem(map_fd[6], key, &value, 0);
+       r = bpf_map_update_elem(map_fd[hash_map_alloc_idx], key, &value, 0);
        assert(!r);
 }
 
-static void fixup_map(struct bpf_map_data *map, int idx)
+static void fixup_map(struct bpf_object *obj)
 {
+       struct bpf_map *map;
        int i;
 
-       if (!strcmp("inner_lru_hash_map", map->name)) {
-               inner_lru_hash_idx = idx;
-               inner_lru_hash_size = map->def.max_entries;
-       }
+       bpf_object__for_each_map(map, obj) {
+               const char *name = bpf_map__name(map);
 
-       if (!strcmp("array_of_lru_hashs", map->name)) {
-               if (inner_lru_hash_idx == -1) {
-                       printf("inner_lru_hash_map must be defined before array_of_lru_hashs\n");
-                       exit(1);
+               /* Only change the max_entries for the enabled test(s) */
+               for (i = 0; i < NR_TESTS; i++) {
+                       if (!strcmp(test_map_names[i], name) &&
+                           (check_test_flags(i))) {
+                               bpf_map__resize(map, num_map_entries);
+                               continue;
+                       }
                }
-               map->def.inner_map_idx = inner_lru_hash_idx;
-               array_of_lru_hashs_idx = idx;
        }
 
-       if (!strcmp("lru_hash_lookup_map", map->name))
-               lru_hash_lookup_idx = idx;
-
-       if (num_map_entries <= 0)
-               return;
-
        inner_lru_hash_size = num_map_entries;
-
-       /* Only change the max_entries for the enabled test(s) */
-       for (i = 0; i < NR_TESTS; i++) {
-               if (!strcmp(test_map_names[i], map->name) &&
-                   (check_test_flags(i))) {
-                       map->def.max_entries = num_map_entries;
-               }
-       }
 }
 
 int main(int argc, char **argv)
 {
        struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
+       int nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+       struct bpf_link *links[8];
+       struct bpf_program *prog;
+       struct bpf_object *obj;
+       struct bpf_map *map;
        char filename[256];
-       int num_cpu = 8;
+       int i = 0;
 
-       snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
-       setrlimit(RLIMIT_MEMLOCK, &r);
+       if (setrlimit(RLIMIT_MEMLOCK, &r)) {
+               perror("setrlimit(RLIMIT_MEMLOCK)");
+               return 1;
+       }
 
        if (argc > 1)
                test_flags = atoi(argv[1]) ? : test_flags;
 
        if (argc > 2)
-               num_cpu = atoi(argv[2]) ? : num_cpu;
+               nr_cpus = atoi(argv[2]) ? : nr_cpus;
 
        if (argc > 3)
                num_map_entries = atoi(argv[3]);
@@ -448,14 +447,61 @@ int main(int argc, char **argv)
        if (argc > 4)
                max_cnt = atoi(argv[4]);
 
-       if (load_bpf_file_fixup_map(filename, fixup_map)) {
-               printf("%s", bpf_log_buf);
-               return 1;
+       snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+       obj = bpf_object__open_file(filename, NULL);
+       if (libbpf_get_error(obj)) {
+               fprintf(stderr, "ERROR: opening BPF object file failed\n");
+               return 0;
+       }
+
+       map = bpf_object__find_map_by_name(obj, "inner_lru_hash_map");
+       if (libbpf_get_error(map)) {
+               fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+               goto cleanup;
+       }
+
+       inner_lru_hash_size = bpf_map__max_entries(map);
+       if (!inner_lru_hash_size) {
+               fprintf(stderr, "ERROR: failed to get map attribute\n");
+               goto cleanup;
+       }
+
+       /* resize BPF map prior to loading */
+       if (num_map_entries > 0)
+               fixup_map(obj);
+
+       /* load BPF program */
+       if (bpf_object__load(obj)) {
+               fprintf(stderr, "ERROR: loading BPF object file failed\n");
+               goto cleanup;
+       }
+
+       map_fd[0] = bpf_object__find_map_fd_by_name(obj, "array_of_lru_hashs");
+       map_fd[1] = bpf_object__find_map_fd_by_name(obj, "hash_map_alloc");
+       map_fd[2] = bpf_object__find_map_fd_by_name(obj, "lru_hash_lookup_map");
+       if (map_fd[0] < 0 || map_fd[1] < 0 || map_fd[2] < 0) {
+               fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+               goto cleanup;
+       }
+
+       bpf_object__for_each_program(prog, obj) {
+               links[i] = bpf_program__attach(prog);
+               if (libbpf_get_error(links[i])) {
+                       fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+                       links[i] = NULL;
+                       goto cleanup;
+               }
+               i++;
        }
 
        fill_lpm_trie();
 
-       run_perf_test(num_cpu);
+       run_perf_test(nr_cpus);
+
+cleanup:
+       for (i--; i >= 0; i--)
+               bpf_link__destroy(links[i]);
 
+       bpf_object__close(obj);
        return 0;
 }
index d459f73..e74ee1c 100644 (file)
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
 
-#define _(P) ({typeof(P) val; bpf_probe_read(&val, sizeof(val), &P); val;})
+#define _(P)                                                                   \
+       ({                                                                     \
+               typeof(P) val;                                                 \
+               bpf_probe_read_kernel(&val, sizeof(val), &(P));                \
+               val;                                                           \
+       })
 
 #define MINBLOCK_US    1
 
index 6cee61e..8def45c 100644 (file)
 #include <uapi/linux/bpf.h>
 #include <uapi/linux/in6.h>
 #include <bpf/bpf_helpers.h>
-#include "bpf_legacy.h"
 #include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "trace_common.h"
 
 #define MAX_NR_PORTS 65536
 
 /* map #0 */
-struct bpf_map_def_legacy SEC("maps") port_a = {
-       .type = BPF_MAP_TYPE_ARRAY,
-       .key_size = sizeof(u32),
-       .value_size = sizeof(int),
-       .max_entries = MAX_NR_PORTS,
-};
+struct inner_a {
+       __uint(type, BPF_MAP_TYPE_ARRAY);
+       __type(key, u32);
+       __type(value, int);
+       __uint(max_entries, MAX_NR_PORTS);
+} port_a SEC(".maps");
 
 /* map #1 */
-struct bpf_map_def_legacy SEC("maps") port_h = {
-       .type = BPF_MAP_TYPE_HASH,
-       .key_size = sizeof(u32),
-       .value_size = sizeof(int),
-       .max_entries = 1,
-};
+struct inner_h {
+       __uint(type, BPF_MAP_TYPE_HASH);
+       __type(key, u32);
+       __type(value, int);
+       __uint(max_entries, 1);
+} port_h SEC(".maps");
 
 /* map #2 */
-struct bpf_map_def_legacy SEC("maps") reg_result_h = {
-       .type = BPF_MAP_TYPE_HASH,
-       .key_size = sizeof(u32),
-       .value_size = sizeof(int),
-       .max_entries = 1,
-};
+struct {
+       __uint(type, BPF_MAP_TYPE_HASH);
+       __type(key, u32);
+       __type(value, int);
+       __uint(max_entries, 1);
+} reg_result_h SEC(".maps");
 
 /* map #3 */
-struct bpf_map_def_legacy SEC("maps") inline_result_h = {
-       .type = BPF_MAP_TYPE_HASH,
-       .key_size = sizeof(u32),
-       .value_size = sizeof(int),
-       .max_entries = 1,
-};
+struct {
+       __uint(type, BPF_MAP_TYPE_HASH);
+       __type(key, u32);
+       __type(value, int);
+       __uint(max_entries, 1);
+} inline_result_h SEC(".maps");
 
 /* map #4 */ /* Test case #0 */
-struct bpf_map_def_legacy SEC("maps") a_of_port_a = {
-       .type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
-       .key_size = sizeof(u32),
-       .inner_map_idx = 0, /* map_fd[0] is port_a */
-       .max_entries = MAX_NR_PORTS,
-};
+struct {
+       __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+       __uint(max_entries, MAX_NR_PORTS);
+       __uint(key_size, sizeof(u32));
+       __array(values, struct inner_a); /* use inner_a as inner map */
+} a_of_port_a SEC(".maps");
 
 /* map #5 */ /* Test case #1 */
-struct bpf_map_def_legacy SEC("maps") h_of_port_a = {
-       .type = BPF_MAP_TYPE_HASH_OF_MAPS,
-       .key_size = sizeof(u32),
-       .inner_map_idx = 0, /* map_fd[0] is port_a */
-       .max_entries = 1,
-};
+struct {
+       __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+       __uint(max_entries, 1);
+       __uint(key_size, sizeof(u32));
+       __array(values, struct inner_a); /* use inner_a as inner map */
+} h_of_port_a SEC(".maps");
 
 /* map #6 */ /* Test case #2 */
-struct bpf_map_def_legacy SEC("maps") h_of_port_h = {
-       .type = BPF_MAP_TYPE_HASH_OF_MAPS,
-       .key_size = sizeof(u32),
-       .inner_map_idx = 1, /* map_fd[1] is port_h */
-       .max_entries = 1,
-};
+struct {
+       __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+       __uint(max_entries, 1);
+       __uint(key_size, sizeof(u32));
+       __array(values, struct inner_h); /* use inner_h as inner map */
+} h_of_port_h SEC(".maps");
 
 static __always_inline int do_reg_lookup(void *inner_map, u32 port)
 {
@@ -102,9 +103,10 @@ static __always_inline int do_inline_hash_lookup(void *inner_map, u32 port)
        return result ? *result : -ENOENT;
 }
 
-SEC("kprobe/sys_connect")
+SEC("kprobe/" SYSCALL(sys_connect))
 int trace_sys_connect(struct pt_regs *ctx)
 {
+       struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx);
        struct sockaddr_in6 *in6;
        u16 test_case, port, dst6[8];
        int addrlen, ret, inline_ret, ret_key = 0;
@@ -112,8 +114,8 @@ int trace_sys_connect(struct pt_regs *ctx)
        void *outer_map, *inner_map;
        bool inline_hash = false;
 
-       in6 = (struct sockaddr_in6 *)PT_REGS_PARM2(ctx);
-       addrlen = (int)PT_REGS_PARM3(ctx);
+       in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(real_regs);
+       addrlen = (int)PT_REGS_PARM3_CORE(real_regs);
 
        if (addrlen != sizeof(*in6))
                return 0;
index eb29bcb..98656de 100644 (file)
@@ -11,7 +11,9 @@
 #include <stdlib.h>
 #include <stdio.h>
 #include <bpf/bpf.h>
-#include "bpf_load.h"
+#include <bpf/libbpf.h>
+
+static int map_fd[7];
 
 #define PORT_A         (map_fd[0])
 #define PORT_H         (map_fd[1])
@@ -113,18 +115,59 @@ static void test_map_in_map(void)
 int main(int argc, char **argv)
 {
        struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
+       struct bpf_link *link = NULL;
+       struct bpf_program *prog;
+       struct bpf_object *obj;
        char filename[256];
 
-       assert(!setrlimit(RLIMIT_MEMLOCK, &r));
+       if (setrlimit(RLIMIT_MEMLOCK, &r)) {
+               perror("setrlimit(RLIMIT_MEMLOCK)");
+               return 1;
+       }
 
        snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+       obj = bpf_object__open_file(filename, NULL);
+       if (libbpf_get_error(obj)) {
+               fprintf(stderr, "ERROR: opening BPF object file failed\n");
+               return 0;
+       }
 
-       if (load_bpf_file(filename)) {
-               printf("%s", bpf_log_buf);
-               return 1;
+       prog = bpf_object__find_program_by_name(obj, "trace_sys_connect");
+       if (!prog) {
+               printf("finding a prog in obj file failed\n");
+               goto cleanup;
+       }
+
+       /* load BPF program */
+       if (bpf_object__load(obj)) {
+               fprintf(stderr, "ERROR: loading BPF object file failed\n");
+               goto cleanup;
+       }
+
+       map_fd[0] = bpf_object__find_map_fd_by_name(obj, "port_a");
+       map_fd[1] = bpf_object__find_map_fd_by_name(obj, "port_h");
+       map_fd[2] = bpf_object__find_map_fd_by_name(obj, "reg_result_h");
+       map_fd[3] = bpf_object__find_map_fd_by_name(obj, "inline_result_h");
+       map_fd[4] = bpf_object__find_map_fd_by_name(obj, "a_of_port_a");
+       map_fd[5] = bpf_object__find_map_fd_by_name(obj, "h_of_port_a");
+       map_fd[6] = bpf_object__find_map_fd_by_name(obj, "h_of_port_h");
+       if (map_fd[0] < 0 || map_fd[1] < 0 || map_fd[2] < 0 ||
+           map_fd[3] < 0 || map_fd[4] < 0 || map_fd[5] < 0 || map_fd[6] < 0) {
+               fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+               goto cleanup;
+       }
+
+       link = bpf_program__attach(prog);
+       if (libbpf_get_error(link)) {
+               fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+               link = NULL;
+               goto cleanup;
        }
 
        test_map_in_map();
 
+cleanup:
+       bpf_link__destroy(link);
+       bpf_object__close(obj);
        return 0;
 }
index 8b811c2..f6d593e 100644 (file)
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
 
-#define _(P) ({typeof(P) val = 0; bpf_probe_read(&val, sizeof(val), &P); val;})
+#define _(P)                                                                   \
+       ({                                                                     \
+               typeof(P) val = 0;                                             \
+               bpf_probe_read_kernel(&val, sizeof(val), &(P));                \
+               val;                                                           \
+       })
 
 SEC("kprobe/__set_task_comm")
 int prog(struct pt_regs *ctx)
@@ -25,8 +30,9 @@ int prog(struct pt_regs *ctx)
        tsk = (void *)PT_REGS_PARM1(ctx);
 
        pid = _(tsk->pid);
-       bpf_probe_read(oldcomm, sizeof(oldcomm), &tsk->comm);
-       bpf_probe_read(newcomm, sizeof(newcomm), (void *)PT_REGS_PARM2(ctx));
+       bpf_probe_read_kernel(oldcomm, sizeof(oldcomm), &tsk->comm);
+       bpf_probe_read_kernel(newcomm, sizeof(newcomm),
+                             (void *)PT_REGS_PARM2(ctx));
        signal = _(tsk->signal);
        oom_score_adj = _(signal->oom_score_adj);
        return 0;
index f033f36..fd651a6 100644 (file)
@@ -10,6 +10,8 @@
 #include <linux/version.h>
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+#include "trace_common.h"
 
 struct bpf_map_def SEC("maps") dnat_map = {
        .type = BPF_MAP_TYPE_HASH,
@@ -26,13 +28,14 @@ struct bpf_map_def SEC("maps") dnat_map = {
  * This example sits on a syscall, and the syscall ABI is relatively stable
  * of course, across platforms, and over time, the ABI may change.
  */
-SEC("kprobe/sys_connect")
+SEC("kprobe/" SYSCALL(sys_connect))
 int bpf_prog1(struct pt_regs *ctx)
 {
+       struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx);
+       void *sockaddr_arg = (void *)PT_REGS_PARM2_CORE(real_regs);
+       int sockaddr_len = (int)PT_REGS_PARM3_CORE(real_regs);
        struct sockaddr_in new_addr, orig_addr = {};
        struct sockaddr_in *mapped_addr;
-       void *sockaddr_arg = (void *)PT_REGS_PARM2(ctx);
-       int sockaddr_len = (int)PT_REGS_PARM3(ctx);
 
        if (sockaddr_len > sizeof(orig_addr))
                return 0;
index 8e2610e..3f4599c 100644 (file)
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
 
-#define _(P) ({typeof(P) val = 0; bpf_probe_read(&val, sizeof(val), &P); val;})
+#define _(P)                                                                   \
+       ({                                                                     \
+               typeof(P) val = 0;                                             \
+               bpf_probe_read_kernel(&val, sizeof(val), &(P));                \
+               val;                                                           \
+       })
 
 /* kprobe is NOT a stable ABI
  * kernel functions can be removed, renamed or completely change semantics.
@@ -34,7 +39,7 @@ int bpf_prog1(struct pt_regs *ctx)
        dev = _(skb->dev);
        len = _(skb->len);
 
-       bpf_probe_read(devname, sizeof(devname), dev->name);
+       bpf_probe_read_kernel(devname, sizeof(devname), dev->name);
 
        if (devname[0] == 'l' && devname[1] == 'o') {
                char fmt[] = "skb %p len %d\n";
index 32b49e8..64a1f75 100644 (file)
@@ -47,7 +47,7 @@ PROG(SYS__NR_write)(struct pt_regs *ctx)
 {
        struct seccomp_data sd;
 
-       bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx));
+       bpf_probe_read_kernel(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx));
        if (sd.args[2] == 512) {
                char fmt[] = "write(fd=%d, buf=%p, size=%d)\n";
                bpf_trace_printk(fmt, sizeof(fmt),
@@ -60,7 +60,7 @@ PROG(SYS__NR_read)(struct pt_regs *ctx)
 {
        struct seccomp_data sd;
 
-       bpf_probe_read(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx));
+       bpf_probe_read_kernel(&sd, sizeof(sd), (void *)PT_REGS_PARM2(ctx));
        if (sd.args[2] > 128 && sd.args[2] <= 1024) {
                char fmt[] = "read(fd=%d, buf=%p, size=%d)\n";
                bpf_trace_printk(fmt, sizeof(fmt),
index dd558cb..ef53b93 100644 (file)
@@ -509,11 +509,8 @@ static void *alloc_rec_per_cpu(int record_size)
 {
        unsigned int nr_cpus = bpf_num_possible_cpus();
        void *array;
-       size_t size;
 
-       size = record_size * nr_cpus;
-       array = malloc(size);
-       memset(array, 0, size);
+       array = calloc(nr_cpus, record_size);
        if (!array) {
                fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
                exit(EXIT_FAIL_MEM);
@@ -528,8 +525,7 @@ static struct stats_record *alloc_stats_record(void)
        int i;
 
        /* Alloc main stats_record structure */
-       rec = malloc(sizeof(*rec));
-       memset(rec, 0, sizeof(*rec));
+       rec = calloc(1, sizeof(*rec));
        if (!rec) {
                fprintf(stderr, "Mem alloc error\n");
                exit(EXIT_FAIL_MEM);
index 2baf8db..8255025 100644 (file)
@@ -21,7 +21,7 @@
 struct {
        __uint(type, BPF_MAP_TYPE_CPUMAP);
        __uint(key_size, sizeof(u32));
-       __uint(value_size, sizeof(u32));
+       __uint(value_size, sizeof(struct bpf_cpumap_val));
        __uint(max_entries, MAX_CPUS);
 } cpu_map SEC(".maps");
 
@@ -30,6 +30,9 @@ struct datarec {
        __u64 processed;
        __u64 dropped;
        __u64 issue;
+       __u64 xdp_pass;
+       __u64 xdp_drop;
+       __u64 xdp_redirect;
 };
 
 /* Count RX packets, as XDP bpf_prog doesn't get direct TX-success
@@ -692,13 +695,16 @@ int trace_xdp_cpumap_enqueue(struct cpumap_enqueue_ctx *ctx)
  * Code in:         kernel/include/trace/events/xdp.h
  */
 struct cpumap_kthread_ctx {
-       u64 __pad;              // First 8 bytes are not accessible by bpf code
-       int map_id;             //      offset:8;  size:4; signed:1;
-       u32 act;                //      offset:12; size:4; signed:0;
-       int cpu;                //      offset:16; size:4; signed:1;
-       unsigned int drops;     //      offset:20; size:4; signed:0;
-       unsigned int processed; //      offset:24; size:4; signed:0;
-       int sched;              //      offset:28; size:4; signed:1;
+       u64 __pad;                      // First 8 bytes are not accessible
+       int map_id;                     //      offset:8;  size:4; signed:1;
+       u32 act;                        //      offset:12; size:4; signed:0;
+       int cpu;                        //      offset:16; size:4; signed:1;
+       unsigned int drops;             //      offset:20; size:4; signed:0;
+       unsigned int processed;         //      offset:24; size:4; signed:0;
+       int sched;                      //      offset:28; size:4; signed:1;
+       unsigned int xdp_pass;          //      offset:32; size:4; signed:0;
+       unsigned int xdp_drop;          //      offset:36; size:4; signed:0;
+       unsigned int xdp_redirect;      //      offset:40; size:4; signed:0;
 };
 
 SEC("tracepoint/xdp/xdp_cpumap_kthread")
@@ -712,6 +718,9 @@ int trace_xdp_cpumap_kthread(struct cpumap_kthread_ctx *ctx)
                return 0;
        rec->processed += ctx->processed;
        rec->dropped   += ctx->drops;
+       rec->xdp_pass  += ctx->xdp_pass;
+       rec->xdp_drop  += ctx->xdp_drop;
+       rec->xdp_redirect  += ctx->xdp_redirect;
 
        /* Count times kthread yielded CPU via schedule call */
        if (ctx->sched)
index f346816..004c062 100644 (file)
@@ -70,6 +70,11 @@ static const struct option long_options[] = {
        {"stress-mode", no_argument,            NULL, 'x' },
        {"no-separators", no_argument,          NULL, 'z' },
        {"force",       no_argument,            NULL, 'F' },
+       {"mprog-disable", no_argument,          NULL, 'n' },
+       {"mprog-name",  required_argument,      NULL, 'e' },
+       {"mprog-filename", required_argument,   NULL, 'f' },
+       {"redirect-device", required_argument,  NULL, 'r' },
+       {"redirect-map", required_argument,     NULL, 'm' },
        {0, 0, NULL,  0 }
 };
 
@@ -156,6 +161,9 @@ struct datarec {
        __u64 processed;
        __u64 dropped;
        __u64 issue;
+       __u64 xdp_pass;
+       __u64 xdp_drop;
+       __u64 xdp_redirect;
 };
 struct record {
        __u64 timestamp;
@@ -175,6 +183,9 @@ static bool map_collect_percpu(int fd, __u32 key, struct record *rec)
        /* For percpu maps, userspace gets a value per possible CPU */
        unsigned int nr_cpus = bpf_num_possible_cpus();
        struct datarec values[nr_cpus];
+       __u64 sum_xdp_redirect = 0;
+       __u64 sum_xdp_pass = 0;
+       __u64 sum_xdp_drop = 0;
        __u64 sum_processed = 0;
        __u64 sum_dropped = 0;
        __u64 sum_issue = 0;
@@ -196,10 +207,19 @@ static bool map_collect_percpu(int fd, __u32 key, struct record *rec)
                sum_dropped        += values[i].dropped;
                rec->cpu[i].issue = values[i].issue;
                sum_issue        += values[i].issue;
+               rec->cpu[i].xdp_pass = values[i].xdp_pass;
+               sum_xdp_pass += values[i].xdp_pass;
+               rec->cpu[i].xdp_drop = values[i].xdp_drop;
+               sum_xdp_drop += values[i].xdp_drop;
+               rec->cpu[i].xdp_redirect = values[i].xdp_redirect;
+               sum_xdp_redirect += values[i].xdp_redirect;
        }
        rec->total.processed = sum_processed;
        rec->total.dropped   = sum_dropped;
        rec->total.issue     = sum_issue;
+       rec->total.xdp_pass  = sum_xdp_pass;
+       rec->total.xdp_drop  = sum_xdp_drop;
+       rec->total.xdp_redirect = sum_xdp_redirect;
        return true;
 }
 
@@ -207,11 +227,8 @@ static struct datarec *alloc_record_per_cpu(void)
 {
        unsigned int nr_cpus = bpf_num_possible_cpus();
        struct datarec *array;
-       size_t size;
 
-       size = sizeof(struct datarec) * nr_cpus;
-       array = malloc(size);
-       memset(array, 0, size);
+       array = calloc(nr_cpus, sizeof(struct datarec));
        if (!array) {
                fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
                exit(EXIT_FAIL_MEM);
@@ -226,11 +243,11 @@ static struct stats_record *alloc_stats_record(void)
 
        size = sizeof(*rec) + n_cpus * sizeof(struct record);
        rec = malloc(size);
-       memset(rec, 0, size);
        if (!rec) {
                fprintf(stderr, "Mem alloc error\n");
                exit(EXIT_FAIL_MEM);
        }
+       memset(rec, 0, size);
        rec->rx_cnt.cpu    = alloc_record_per_cpu();
        rec->redir_err.cpu = alloc_record_per_cpu();
        rec->kthread.cpu   = alloc_record_per_cpu();
@@ -303,17 +320,33 @@ static __u64 calc_errs_pps(struct datarec *r,
        return pps;
 }
 
+static void calc_xdp_pps(struct datarec *r, struct datarec *p,
+                        double *xdp_pass, double *xdp_drop,
+                        double *xdp_redirect, double period_)
+{
+       *xdp_pass = 0, *xdp_drop = 0, *xdp_redirect = 0;
+       if (period_ > 0) {
+               *xdp_redirect = (r->xdp_redirect - p->xdp_redirect) / period_;
+               *xdp_pass = (r->xdp_pass - p->xdp_pass) / period_;
+               *xdp_drop = (r->xdp_drop - p->xdp_drop) / period_;
+       }
+}
+
 static void stats_print(struct stats_record *stats_rec,
                        struct stats_record *stats_prev,
-                       char *prog_name)
+                       char *prog_name, char *mprog_name, int mprog_fd)
 {
        unsigned int nr_cpus = bpf_num_possible_cpus();
        double pps = 0, drop = 0, err = 0;
+       bool mprog_enabled = false;
        struct record *rec, *prev;
        int to_cpu;
        double t;
        int i;
 
+       if (mprog_fd > 0)
+               mprog_enabled = true;
+
        /* Header */
        printf("Running XDP/eBPF prog_name:%s\n", prog_name);
        printf("%-15s %-7s %-14s %-11s %-9s\n",
@@ -458,6 +491,34 @@ static void stats_print(struct stats_record *stats_rec,
                printf(fm2_err, "xdp_exception", "total", pps, drop);
        }
 
+       /* CPUMAP attached XDP program that runs on remote/destination CPU */
+       if (mprog_enabled) {
+               char *fmt_k = "%-15s %-7d %'-14.0f %'-11.0f %'-10.0f\n";
+               char *fm2_k = "%-15s %-7s %'-14.0f %'-11.0f %'-10.0f\n";
+               double xdp_pass, xdp_drop, xdp_redirect;
+
+               printf("\n2nd remote XDP/eBPF prog_name: %s\n", mprog_name);
+               printf("%-15s %-7s %-14s %-11s %-9s\n",
+                      "XDP-cpumap", "CPU:to", "xdp-pass", "xdp-drop", "xdp-redir");
+
+               rec  = &stats_rec->kthread;
+               prev = &stats_prev->kthread;
+               t = calc_period(rec, prev);
+               for (i = 0; i < nr_cpus; i++) {
+                       struct datarec *r = &rec->cpu[i];
+                       struct datarec *p = &prev->cpu[i];
+
+                       calc_xdp_pps(r, p, &xdp_pass, &xdp_drop,
+                                    &xdp_redirect, t);
+                       if (xdp_pass > 0 || xdp_drop > 0 || xdp_redirect > 0)
+                               printf(fmt_k, "xdp-in-kthread", i, xdp_pass, xdp_drop,
+                                      xdp_redirect);
+               }
+               calc_xdp_pps(&rec->total, &prev->total, &xdp_pass, &xdp_drop,
+                            &xdp_redirect, t);
+               printf(fm2_k, "xdp-in-kthread", "total", xdp_pass, xdp_drop, xdp_redirect);
+       }
+
        printf("\n");
        fflush(stdout);
 }
@@ -494,7 +555,7 @@ static inline void swap(struct stats_record **a, struct stats_record **b)
        *b = tmp;
 }
 
-static int create_cpu_entry(__u32 cpu, __u32 queue_size,
+static int create_cpu_entry(__u32 cpu, struct bpf_cpumap_val *value,
                            __u32 avail_idx, bool new)
 {
        __u32 curr_cpus_count = 0;
@@ -504,7 +565,7 @@ static int create_cpu_entry(__u32 cpu, __u32 queue_size,
        /* Add a CPU entry to cpumap, as this allocate a cpu entry in
         * the kernel for the cpu.
         */
-       ret = bpf_map_update_elem(cpu_map_fd, &cpu, &queue_size, 0);
+       ret = bpf_map_update_elem(cpu_map_fd, &cpu, value, 0);
        if (ret) {
                fprintf(stderr, "Create CPU entry failed (err:%d)\n", ret);
                exit(EXIT_FAIL_BPF);
@@ -535,9 +596,9 @@ static int create_cpu_entry(__u32 cpu, __u32 queue_size,
                }
        }
        /* map_fd[7] = cpus_iterator */
-       printf("%s CPU:%u as idx:%u queue_size:%d (total cpus_count:%u)\n",
+       printf("%s CPU:%u as idx:%u qsize:%d prog_fd: %d (cpus_count:%u)\n",
               new ? "Add-new":"Replace", cpu, avail_idx,
-              queue_size, curr_cpus_count);
+              value->qsize, value->bpf_prog.fd, curr_cpus_count);
 
        return 0;
 }
@@ -561,21 +622,26 @@ static void mark_cpus_unavailable(void)
 }
 
 /* Stress cpumap management code by concurrently changing underlying cpumap */
-static void stress_cpumap(void)
+static void stress_cpumap(struct bpf_cpumap_val *value)
 {
        /* Changing qsize will cause kernel to free and alloc a new
         * bpf_cpu_map_entry, with an associated/complicated tear-down
         * procedure.
         */
-       create_cpu_entry(1,  1024, 0, false);
-       create_cpu_entry(1,     8, 0, false);
-       create_cpu_entry(1, 16000, 0, false);
+       value->qsize = 1024;
+       create_cpu_entry(1, value, 0, false);
+       value->qsize = 8;
+       create_cpu_entry(1, value, 0, false);
+       value->qsize = 16000;
+       create_cpu_entry(1, value, 0, false);
 }
 
 static void stats_poll(int interval, bool use_separators, char *prog_name,
+                      char *mprog_name, struct bpf_cpumap_val *value,
                       bool stress_mode)
 {
        struct stats_record *record, *prev;
+       int mprog_fd;
 
        record = alloc_stats_record();
        prev   = alloc_stats_record();
@@ -587,11 +653,12 @@ static void stats_poll(int interval, bool use_separators, char *prog_name,
 
        while (1) {
                swap(&prev, &record);
+               mprog_fd = value->bpf_prog.fd;
                stats_collect(record);
-               stats_print(record, prev, prog_name);
+               stats_print(record, prev, prog_name, mprog_name, mprog_fd);
                sleep(interval);
                if (stress_mode)
-                       stress_cpumap();
+                       stress_cpumap(value);
        }
 
        free_stats_record(record);
@@ -664,15 +731,66 @@ static int init_map_fds(struct bpf_object *obj)
        return 0;
 }
 
+static int load_cpumap_prog(char *file_name, char *prog_name,
+                           char *redir_interface, char *redir_map)
+{
+       struct bpf_prog_load_attr prog_load_attr = {
+               .prog_type              = BPF_PROG_TYPE_XDP,
+               .expected_attach_type   = BPF_XDP_CPUMAP,
+               .file = file_name,
+       };
+       struct bpf_program *prog;
+       struct bpf_object *obj;
+       int fd;
+
+       if (bpf_prog_load_xattr(&prog_load_attr, &obj, &fd))
+               return -1;
+
+       if (fd < 0) {
+               fprintf(stderr, "ERR: bpf_prog_load_xattr: %s\n",
+                       strerror(errno));
+               return fd;
+       }
+
+       if (redir_interface && redir_map) {
+               int err, map_fd, ifindex_out, key = 0;
+
+               map_fd = bpf_object__find_map_fd_by_name(obj, redir_map);
+               if (map_fd < 0)
+                       return map_fd;
+
+               ifindex_out = if_nametoindex(redir_interface);
+               if (!ifindex_out)
+                       return -1;
+
+               err = bpf_map_update_elem(map_fd, &key, &ifindex_out, 0);
+               if (err < 0)
+                       return err;
+       }
+
+       prog = bpf_object__find_program_by_title(obj, prog_name);
+       if (!prog) {
+               fprintf(stderr, "bpf_object__find_program_by_title failed\n");
+               return EXIT_FAIL;
+       }
+
+       return bpf_program__fd(prog);
+}
+
 int main(int argc, char **argv)
 {
        struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY};
        char *prog_name = "xdp_cpu_map5_lb_hash_ip_pairs";
+       char *mprog_filename = "xdp_redirect_kern.o";
+       char *redir_interface = NULL, *redir_map = NULL;
+       char *mprog_name = "xdp_redirect_dummy";
+       bool mprog_disable = false;
        struct bpf_prog_load_attr prog_load_attr = {
                .prog_type      = BPF_PROG_TYPE_UNSPEC,
        };
        struct bpf_prog_info info = {};
        __u32 info_len = sizeof(info);
+       struct bpf_cpumap_val value;
        bool use_separators = true;
        bool stress_mode = false;
        struct bpf_program *prog;
@@ -684,6 +802,7 @@ int main(int argc, char **argv)
        int add_cpu = -1;
        int opt, err;
        int prog_fd;
+       int *cpu, i;
        __u32 qsize;
 
        n_cpus = get_nprocs_conf();
@@ -719,8 +838,15 @@ int main(int argc, char **argv)
        }
        mark_cpus_unavailable();
 
+       cpu = malloc(n_cpus * sizeof(int));
+       if (!cpu) {
+               fprintf(stderr, "failed to allocate cpu array\n");
+               return EXIT_FAIL;
+       }
+       memset(cpu, 0, n_cpus * sizeof(int));
+
        /* Parse commands line args */
-       while ((opt = getopt_long(argc, argv, "hSd:s:p:q:c:xzF",
+       while ((opt = getopt_long(argc, argv, "hSd:s:p:q:c:xzFf:e:r:m:",
                                  long_options, &longindex)) != -1) {
                switch (opt) {
                case 'd':
@@ -754,6 +880,21 @@ int main(int argc, char **argv)
                        /* Selecting eBPF prog to load */
                        prog_name = optarg;
                        break;
+               case 'n':
+                       mprog_disable = true;
+                       break;
+               case 'f':
+                       mprog_filename = optarg;
+                       break;
+               case 'e':
+                       mprog_name = optarg;
+                       break;
+               case 'r':
+                       redir_interface = optarg;
+                       break;
+               case 'm':
+                       redir_map = optarg;
+                       break;
                case 'c':
                        /* Add multiple CPUs */
                        add_cpu = strtoul(optarg, NULL, 0);
@@ -763,8 +904,7 @@ int main(int argc, char **argv)
                                        errno, strerror(errno));
                                goto error;
                        }
-                       create_cpu_entry(add_cpu, qsize, added_cpus, true);
-                       added_cpus++;
+                       cpu[added_cpus++] = add_cpu;
                        break;
                case 'q':
                        qsize = atoi(optarg);
@@ -775,6 +915,7 @@ int main(int argc, char **argv)
                case 'h':
                error:
                default:
+                       free(cpu);
                        usage(argv, obj);
                        return EXIT_FAIL_OPTION;
                }
@@ -787,15 +928,30 @@ int main(int argc, char **argv)
        if (ifindex == -1) {
                fprintf(stderr, "ERR: required option --dev missing\n");
                usage(argv, obj);
-               return EXIT_FAIL_OPTION;
+               err = EXIT_FAIL_OPTION;
+               goto out;
        }
        /* Required option */
        if (add_cpu == -1) {
                fprintf(stderr, "ERR: required option --cpu missing\n");
                fprintf(stderr, " Specify multiple --cpu option to add more\n");
                usage(argv, obj);
-               return EXIT_FAIL_OPTION;
+               err = EXIT_FAIL_OPTION;
+               goto out;
+       }
+
+       value.bpf_prog.fd = 0;
+       if (!mprog_disable)
+               value.bpf_prog.fd = load_cpumap_prog(mprog_filename, mprog_name,
+                                                    redir_interface, redir_map);
+       if (value.bpf_prog.fd < 0) {
+               err = value.bpf_prog.fd;
+               goto out;
        }
+       value.qsize = qsize;
+
+       for (i = 0; i < added_cpus; i++)
+               create_cpu_entry(cpu[i], &value, i, true);
 
        /* Remove XDP program when program is interrupted or killed */
        signal(SIGINT, int_exit);
@@ -804,27 +960,33 @@ int main(int argc, char **argv)
        prog = bpf_object__find_program_by_title(obj, prog_name);
        if (!prog) {
                fprintf(stderr, "bpf_object__find_program_by_title failed\n");
-               return EXIT_FAIL;
+               err = EXIT_FAIL;
+               goto out;
        }
 
        prog_fd = bpf_program__fd(prog);
        if (prog_fd < 0) {
                fprintf(stderr, "bpf_program__fd failed\n");
-               return EXIT_FAIL;
+               err = EXIT_FAIL;
+               goto out;
        }
 
        if (bpf_set_link_xdp_fd(ifindex, prog_fd, xdp_flags) < 0) {
                fprintf(stderr, "link set xdp fd failed\n");
-               return EXIT_FAIL_XDP;
+               err = EXIT_FAIL_XDP;
+               goto out;
        }
 
        err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
        if (err) {
                printf("can't get prog info - %s\n", strerror(errno));
-               return err;
+               goto out;
        }
        prog_id = info.id;
 
-       stats_poll(interval, use_separators, prog_name, stress_mode);
-       return EXIT_OK;
+       stats_poll(interval, use_separators, prog_name, mprog_name,
+                  &value, stress_mode);
+out:
+       free(cpu);
+       return err;
 }
index 4fe4750..caa4e7f 100644 (file)
@@ -198,11 +198,8 @@ static struct datarec *alloc_record_per_cpu(void)
 {
        unsigned int nr_cpus = bpf_num_possible_cpus();
        struct datarec *array;
-       size_t size;
 
-       size = sizeof(struct datarec) * nr_cpus;
-       array = malloc(size);
-       memset(array, 0, size);
+       array = calloc(nr_cpus, sizeof(struct datarec));
        if (!array) {
                fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
                exit(EXIT_FAIL_MEM);
@@ -214,11 +211,8 @@ static struct record *alloc_record_per_rxq(void)
 {
        unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries;
        struct record *array;
-       size_t size;
 
-       size = sizeof(struct record) * nr_rxqs;
-       array = malloc(size);
-       memset(array, 0, size);
+       array = calloc(nr_rxqs, sizeof(struct record));
        if (!array) {
                fprintf(stderr, "Mem alloc error (nr_rxqs:%u)\n", nr_rxqs);
                exit(EXIT_FAIL_MEM);
@@ -232,8 +226,7 @@ static struct stats_record *alloc_stats_record(void)
        struct stats_record *rec;
        int i;
 
-       rec = malloc(sizeof(*rec));
-       memset(rec, 0, sizeof(*rec));
+       rec = calloc(1, sizeof(struct stats_record));
        if (!rec) {
                fprintf(stderr, "Mem alloc error\n");
                exit(EXIT_FAIL_MEM);
index c91e913..19c6794 100644 (file)
@@ -77,6 +77,7 @@ static u32 opt_batch_size = 64;
 static int opt_pkt_count;
 static u16 opt_pkt_size = MIN_PKT_SIZE;
 static u32 opt_pkt_fill_pattern = 0x12345678;
+static bool opt_extra_stats;
 static int opt_poll;
 static int opt_interval = 1;
 static u32 opt_xdp_bind_flags = XDP_USE_NEED_WAKEUP;
@@ -103,8 +104,20 @@ struct xsk_socket_info {
        struct xsk_socket *xsk;
        unsigned long rx_npkts;
        unsigned long tx_npkts;
+       unsigned long rx_dropped_npkts;
+       unsigned long rx_invalid_npkts;
+       unsigned long tx_invalid_npkts;
+       unsigned long rx_full_npkts;
+       unsigned long rx_fill_empty_npkts;
+       unsigned long tx_empty_npkts;
        unsigned long prev_rx_npkts;
        unsigned long prev_tx_npkts;
+       unsigned long prev_rx_dropped_npkts;
+       unsigned long prev_rx_invalid_npkts;
+       unsigned long prev_tx_invalid_npkts;
+       unsigned long prev_rx_full_npkts;
+       unsigned long prev_rx_fill_empty_npkts;
+       unsigned long prev_tx_empty_npkts;
        u32 outstanding_tx;
 };
 
@@ -147,6 +160,30 @@ static void print_benchmark(bool running)
        }
 }
 
+static int xsk_get_xdp_stats(int fd, struct xsk_socket_info *xsk)
+{
+       struct xdp_statistics stats;
+       socklen_t optlen;
+       int err;
+
+       optlen = sizeof(stats);
+       err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen);
+       if (err)
+               return err;
+
+       if (optlen == sizeof(struct xdp_statistics)) {
+               xsk->rx_dropped_npkts = stats.rx_dropped;
+               xsk->rx_invalid_npkts = stats.rx_invalid_descs;
+               xsk->tx_invalid_npkts = stats.tx_invalid_descs;
+               xsk->rx_full_npkts = stats.rx_ring_full;
+               xsk->rx_fill_empty_npkts = stats.rx_fill_ring_empty_descs;
+               xsk->tx_empty_npkts = stats.tx_ring_empty_descs;
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
 static void dump_stats(void)
 {
        unsigned long now = get_nsecs();
@@ -157,7 +194,8 @@ static void dump_stats(void)
 
        for (i = 0; i < num_socks && xsks[i]; i++) {
                char *fmt = "%-15s %'-11.0f %'-11lu\n";
-               double rx_pps, tx_pps;
+               double rx_pps, tx_pps, dropped_pps, rx_invalid_pps, full_pps, fill_empty_pps,
+                       tx_invalid_pps, tx_empty_pps;
 
                rx_pps = (xsks[i]->rx_npkts - xsks[i]->prev_rx_npkts) *
                         1000000000. / dt;
@@ -175,6 +213,46 @@ static void dump_stats(void)
 
                xsks[i]->prev_rx_npkts = xsks[i]->rx_npkts;
                xsks[i]->prev_tx_npkts = xsks[i]->tx_npkts;
+
+               if (opt_extra_stats) {
+                       if (!xsk_get_xdp_stats(xsk_socket__fd(xsks[i]->xsk), xsks[i])) {
+                               dropped_pps = (xsks[i]->rx_dropped_npkts -
+                                               xsks[i]->prev_rx_dropped_npkts) * 1000000000. / dt;
+                               rx_invalid_pps = (xsks[i]->rx_invalid_npkts -
+                                               xsks[i]->prev_rx_invalid_npkts) * 1000000000. / dt;
+                               tx_invalid_pps = (xsks[i]->tx_invalid_npkts -
+                                               xsks[i]->prev_tx_invalid_npkts) * 1000000000. / dt;
+                               full_pps = (xsks[i]->rx_full_npkts -
+                                               xsks[i]->prev_rx_full_npkts) * 1000000000. / dt;
+                               fill_empty_pps = (xsks[i]->rx_fill_empty_npkts -
+                                               xsks[i]->prev_rx_fill_empty_npkts)
+                                               * 1000000000. / dt;
+                               tx_empty_pps = (xsks[i]->tx_empty_npkts -
+                                               xsks[i]->prev_tx_empty_npkts) * 1000000000. / dt;
+
+                               printf(fmt, "rx dropped", dropped_pps,
+                                      xsks[i]->rx_dropped_npkts);
+                               printf(fmt, "rx invalid", rx_invalid_pps,
+                                      xsks[i]->rx_invalid_npkts);
+                               printf(fmt, "tx invalid", tx_invalid_pps,
+                                      xsks[i]->tx_invalid_npkts);
+                               printf(fmt, "rx queue full", full_pps,
+                                      xsks[i]->rx_full_npkts);
+                               printf(fmt, "fill ring empty", fill_empty_pps,
+                                      xsks[i]->rx_fill_empty_npkts);
+                               printf(fmt, "tx ring empty", tx_empty_pps,
+                                      xsks[i]->tx_empty_npkts);
+
+                               xsks[i]->prev_rx_dropped_npkts = xsks[i]->rx_dropped_npkts;
+                               xsks[i]->prev_rx_invalid_npkts = xsks[i]->rx_invalid_npkts;
+                               xsks[i]->prev_tx_invalid_npkts = xsks[i]->tx_invalid_npkts;
+                               xsks[i]->prev_rx_full_npkts = xsks[i]->rx_full_npkts;
+                               xsks[i]->prev_rx_fill_empty_npkts = xsks[i]->rx_fill_empty_npkts;
+                               xsks[i]->prev_tx_empty_npkts = xsks[i]->tx_empty_npkts;
+                       } else {
+                               printf("%-15s\n", "Error retrieving extra stats");
+                       }
+               }
        }
 }
 
@@ -630,6 +708,7 @@ static struct option long_options[] = {
        {"tx-pkt-count", required_argument, 0, 'C'},
        {"tx-pkt-size", required_argument, 0, 's'},
        {"tx-pkt-pattern", required_argument, 0, 'P'},
+       {"extra-stats", no_argument, 0, 'x'},
        {0, 0, 0, 0}
 };
 
@@ -664,6 +743,7 @@ static void usage(const char *prog)
                "                       (Default: %d bytes)\n"
                "                       Min size: %d, Max size %d.\n"
                "  -P, --tx-pkt-pattern=nPacket fill pattern. Default: 0x%x\n"
+               "  -x, --extra-stats    Display extra statistics.\n"
                "\n";
        fprintf(stderr, str, prog, XSK_UMEM__DEFAULT_FRAME_SIZE,
                opt_batch_size, MIN_PKT_SIZE, MIN_PKT_SIZE,
@@ -679,7 +759,7 @@ static void parse_command_line(int argc, char **argv)
        opterr = 0;
 
        for (;;) {
-               c = getopt_long(argc, argv, "Frtli:q:pSNn:czf:muMd:b:C:s:P:",
+               c = getopt_long(argc, argv, "Frtli:q:pSNn:czf:muMd:b:C:s:P:x",
                                long_options, &option_index);
                if (c == -1)
                        break;
@@ -760,6 +840,9 @@ static void parse_command_line(int argc, char **argv)
                case 'P':
                        opt_pkt_fill_pattern = strtol(optarg, NULL, 16);
                        break;
+               case 'x':
+                       opt_extra_stats = 1;
+                       break;
                default:
                        usage(basename(argv[0]));
                }
index d523450..6aba02a 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/timer.h>
 #include <linux/err.h>
 #include <linux/jiffies.h>
+#include <linux/workqueue.h>
 
 /*
  * Any file that uses trace points, must include the header.
@@ -20,6 +21,16 @@ struct trace_array *tr;
 static void mytimer_handler(struct timer_list *unused);
 static struct task_struct *simple_tsk;
 
+static void trace_work_fn(struct work_struct *work)
+{
+       /*
+        * Disable tracing for event "sample_event".
+        */
+       trace_array_set_clr_event(tr, "sample-subsystem", "sample_event",
+                       false);
+}
+static DECLARE_WORK(trace_work, trace_work_fn);
+
 /*
  * mytimer: Timer setup to disable tracing for event "sample_event". This
  * timer is only for the purposes of the sample module to demonstrate access of
@@ -29,11 +40,7 @@ static DEFINE_TIMER(mytimer, mytimer_handler);
 
 static void mytimer_handler(struct timer_list *unused)
 {
-       /*
-        * Disable tracing for event "sample_event".
-        */
-       trace_array_set_clr_event(tr, "sample-subsystem", "sample_event",
-                       false);
+       schedule_work(&trace_work);
 }
 
 static void simple_thread_func(int count)
@@ -76,6 +83,7 @@ static int simple_thread(void *arg)
                simple_thread_func(count++);
 
        del_timer(&mytimer);
+       cancel_work_sync(&trace_work);
 
        /*
         * trace_array_put() decrements the reference counter associated with
@@ -107,8 +115,12 @@ static int __init sample_trace_array_init(void)
        trace_printk_init_buffers();
 
        simple_tsk = kthread_run(simple_thread, NULL, "sample-instance");
-       if (IS_ERR(simple_tsk))
+       if (IS_ERR(simple_tsk)) {
+               trace_array_put(tr);
+               trace_array_destroy(tr);
                return -1;
+       }
+
        return 0;
 }
 
index 3223448..ad3e560 100644 (file)
@@ -267,7 +267,7 @@ struct amt_host_if_msg_header {
 struct amt_host_if_resp_header {
        struct amt_host_if_msg_header header;
        uint32_t status;
-       unsigned char data[0];
+       unsigned char data[];
 } __attribute__((packed));
 
 const uuid_le MEI_IAMTHIF = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d,  \
index 76c577e..49c7a46 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/fcntl.h>
 #define statx foo
 #define statx_timestamp foo_timestamp
+struct statx;
+struct statx_timestamp;
 #include <sys/stat.h>
 #undef statx
 #undef statx_timestamp
index 8511fb6..792b22f 100644 (file)
@@ -1,7 +1,5 @@
-# List of programs to build
-hostprogs := watch_test
+# SPDX-License-Identifier: GPL-2.0-only
+userprogs := watch_test
+always-y := $(userprogs)
 
-# Tell kbuild to always build the programs
-always-y := $(hostprogs)
-
-HOSTCFLAGS_watch_test.o += -I$(objtree)/usr/include
+userccflags += -I usr/include
index 0c3dc98..9a15fbf 100644 (file)
@@ -86,20 +86,21 @@ cc-cross-prefix = $(firstword $(foreach c, $(1), \
                        $(if $(shell command -v -- $(c)gcc 2>/dev/null), $(c))))
 
 # output directory for tests below
-TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/)
+TMPOUT = $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/).tmp_$$$$
 
 # try-run
 # Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise)
 # Exit code chooses option. "$$TMP" serves as a temporary file and is
 # automatically cleaned up.
 try-run = $(shell set -e;              \
-       TMP="$(TMPOUT).$$$$.tmp";       \
-       TMPO="$(TMPOUT).$$$$.o";        \
+       TMP=$(TMPOUT)/tmp;              \
+       TMPO=$(TMPOUT)/tmp.o;           \
+       mkdir -p $(TMPOUT);             \
+       trap "rm -rf $(TMPOUT)" EXIT;   \
        if ($(1)) >/dev/null 2>&1;      \
        then echo "$(2)";               \
        else echo "$(3)";               \
-       fi;                             \
-       rm -f "$$TMP" "$$TMPO")
+       fi)
 
 # as-option
 # Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,)
index c264da2..a5fe72c 100644 (file)
@@ -25,18 +25,12 @@ failure = $(if-success,$(1),n,y)
 
 # $(cc-option,<flag>)
 # Return y if the compiler supports <flag>, n otherwise
-cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -S -x c /dev/null -o /dev/null)
+cc-option = $(success,mkdir .tmp_$$$$; trap "rm -rf .tmp_$$$$" EXIT; $(CC) -Werror $(CLANG_FLAGS) $(1) -c -x c /dev/null -o .tmp_$$$$/tmp.o)
 
 # $(ld-option,<flag>)
 # Return y if the linker supports <flag>, n otherwise
 ld-option = $(success,$(LD) -v $(1))
 
-# $(as-option,<flag>)
-# /dev/zero is used as output instead of /dev/null as some assembler cribs when
-# both input and output are same. Also both of them have same write behaviour so
-# can be easily substituted.
-as-option = $(success, $(CC) $(CLANG_FLAGS) $(1) -c -x assembler /dev/null -o /dev/zero)
-
 # $(as-instr,<instr>)
 # Return y if the assembler supports <instr>, n otherwise
 as-instr = $(success,printf "%b\n" "$(1)" | $(CC) $(CLANG_FLAGS) -c -x assembler -o /dev/null -)
index 4aea7cf..62c2756 100644 (file)
@@ -35,6 +35,7 @@ KBUILD_CFLAGS += $(call cc-option, -Wstringop-truncation)
 # The following turn off the warnings enabled by -Wextra
 KBUILD_CFLAGS += -Wno-missing-field-initializers
 KBUILD_CFLAGS += -Wno-sign-compare
+KBUILD_CFLAGS += -Wno-type-limits
 
 KBUILD_CPPFLAGS += -DKBUILD_EXTRA_WARN1
 
@@ -66,6 +67,7 @@ KBUILD_CFLAGS += -Wshadow
 KBUILD_CFLAGS += $(call cc-option, -Wlogical-op)
 KBUILD_CFLAGS += -Wmissing-field-initializers
 KBUILD_CFLAGS += -Wsign-compare
+KBUILD_CFLAGS += -Wtype-limits
 KBUILD_CFLAGS += $(call cc-option, -Wmaybe-uninitialized)
 KBUILD_CFLAGS += $(call cc-option, -Wunused-macros)
 
index 99ac59c..916b2f7 100644 (file)
@@ -212,6 +212,9 @@ $(foreach m, $(notdir $1), \
        $(addprefix $(obj)/, $(foreach s, $3, $($(m:%$(strip $2)=%$(s)))))))
 endef
 
+quiet_cmd_copy = COPY    $@
+      cmd_copy = cp $< $@
+
 # Shipped files
 # ===========================================================================
 
@@ -259,6 +262,7 @@ quiet_cmd_gzip = GZIP    $@
 # DTC
 # ---------------------------------------------------------------------------
 DTC ?= $(objtree)/scripts/dtc/dtc
+DTC_FLAGS += -Wno-interrupt_provider
 
 # Disable noisy checks by default
 ifeq ($(findstring 1,$(KBUILD_EXTRA_WARN)),)
@@ -274,7 +278,8 @@ endif
 
 ifneq ($(findstring 2,$(KBUILD_EXTRA_WARN)),)
 DTC_FLAGS += -Wnode_name_chars_strict \
-       -Wproperty_name_chars_strict
+       -Wproperty_name_chars_strict \
+       -Winterrupt_provider
 endif
 
 DTC_FLAGS += $(DTC_FLAGS_$(basetarget))
index 0fd1cf0..693dfa1 100755 (executable)
@@ -58,6 +58,21 @@ cat << EOF
 EOF
 }
 
+gen_proto_order_variant()
+{
+       local meta="$1"; shift
+       local pfx="$1"; shift
+       local name="$1"; shift
+       local sfx="$1"; shift
+       local order="$1"; shift
+       local arch="$1"
+       local atomic="$2"
+
+       local basename="${arch}${atomic}_${pfx}${name}${sfx}"
+
+       printf "#define arch_${basename}${order} ${basename}${order}\n"
+}
+
 #gen_proto_order_variants(meta, pfx, name, sfx, arch, atomic, int, args...)
 gen_proto_order_variants()
 {
@@ -72,6 +87,22 @@ gen_proto_order_variants()
 
        local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
 
+       if [ -z "$arch" ]; then
+               gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "" "$@"
+
+               if meta_has_acquire "${meta}"; then
+                       gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_acquire" "$@"
+               fi
+               if meta_has_release "${meta}"; then
+                       gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_release" "$@"
+               fi
+               if meta_has_relaxed "${meta}"; then
+                       gen_proto_order_variant "${meta}" "${pfx}" "${name}" "${sfx}" "_relaxed" "$@"
+               fi
+
+               echo ""
+       fi
+
        # If we don't have relaxed atomics, then we don't bother with ordering fallbacks
        # read_acquire and set_release need to be templated, though
        if ! meta_has_relaxed "${meta}"; then
index 91fa668..5bfa448 100755 (executable)
@@ -404,6 +404,7 @@ class PrinterHelpers(Printer):
 
     type_fwds = [
             'struct bpf_fib_lookup',
+            'struct bpf_sk_lookup',
             'struct bpf_perf_event_data',
             'struct bpf_perf_event_value',
             'struct bpf_pidns_info',
@@ -421,6 +422,12 @@ class PrinterHelpers(Printer):
             'struct sockaddr',
             'struct tcphdr',
             'struct seq_file',
+            'struct tcp6_sock',
+            'struct tcp_sock',
+            'struct tcp_timewait_sock',
+            'struct tcp_request_sock',
+            'struct udp6_sock',
+            'struct task_struct',
 
             'struct __sk_buff',
             'struct sk_msg_md',
@@ -444,6 +451,7 @@ class PrinterHelpers(Printer):
             'struct bpf_perf_event_data',
             'struct bpf_perf_event_value',
             'struct bpf_pidns_info',
+            'struct bpf_sk_lookup',
             'struct bpf_sock',
             'struct bpf_sock_addr',
             'struct bpf_sock_ops',
@@ -458,6 +466,12 @@ class PrinterHelpers(Printer):
             'struct sockaddr',
             'struct tcphdr',
             'struct seq_file',
+            'struct tcp6_sock',
+            'struct tcp_sock',
+            'struct tcp_timewait_sock',
+            'struct tcp_request_sock',
+            'struct udp6_sock',
+            'struct task_struct',
     }
     mapped_types = {
             'u8': '__u8',
@@ -475,6 +489,11 @@ class PrinterHelpers(Printer):
             'struct sk_msg_buff': 'struct sk_msg_md',
             'struct xdp_buff': 'struct xdp_md',
     }
+    # Helpers overloaded for different context types.
+    overloaded_helpers = [
+        'bpf_get_socket_cookie',
+        'bpf_sk_assign',
+    ]
 
     def print_header(self):
         header = '''\
@@ -531,7 +550,7 @@ class PrinterHelpers(Printer):
         for i, a in enumerate(proto['args']):
             t = a['type']
             n = a['name']
-            if proto['name'] == 'bpf_get_socket_cookie' and i == 0:
+            if proto['name'] in self.overloaded_helpers and i == 0:
                     t = 'void'
                     n = 'ctx'
             one_arg = '{}{}'.format(comma, self.map_type(t))
index 13e5fba..66a6d51 100755 (executable)
@@ -27,7 +27,10 @@ parse_symbol() {
        elif [[ "${modcache[$module]+isset}" == "isset" ]]; then
                local objfile=${modcache[$module]}
        else
-               [[ $modpath == "" ]] && return
+               if [[ $modpath == "" ]]; then
+                       echo "WARNING! Modules path isn't set, but is needed to parse this symbol" >&2
+                       return
+               fi
                local objfile=$(find "$modpath" -name "${module//_/[-_]}.ko*" -print -quit)
                [[ $objfile == "" ]] && return
                modcache[$module]=$objfile
index 4b3c486..b7955db 100644 (file)
@@ -1022,6 +1022,9 @@ static void check_i2c_bus_bridge(struct check *c, struct dt_info *dti, struct no
 }
 WARNING(i2c_bus_bridge, check_i2c_bus_bridge, NULL, &addr_size_cells);
 
+#define I2C_OWN_SLAVE_ADDRESS  (1U << 30)
+#define I2C_TEN_BIT_ADDRESS    (1U << 31)
+
 static void check_i2c_bus_reg(struct check *c, struct dt_info *dti, struct node *node)
 {
        struct property *prop;
@@ -1044,6 +1047,8 @@ static void check_i2c_bus_reg(struct check *c, struct dt_info *dti, struct node
        }
 
        reg = fdt32_to_cpu(*cells);
+       /* Ignore I2C_OWN_SLAVE_ADDRESS */
+       reg &= ~I2C_OWN_SLAVE_ADDRESS;
        snprintf(unit_addr, sizeof(unit_addr), "%x", reg);
        if (!streq(unitname, unit_addr))
                FAIL(c, dti, node, "I2C bus unit address format error, expected \"%s\"",
@@ -1051,10 +1056,15 @@ static void check_i2c_bus_reg(struct check *c, struct dt_info *dti, struct node
 
        for (len = prop->val.len; len > 0; len -= 4) {
                reg = fdt32_to_cpu(*(cells++));
-               if (reg > 0x3ff)
+               /* Ignore I2C_OWN_SLAVE_ADDRESS */
+               reg &= ~I2C_OWN_SLAVE_ADDRESS;
+
+               if ((reg & I2C_TEN_BIT_ADDRESS) && ((reg & ~I2C_TEN_BIT_ADDRESS) > 0x3ff))
                        FAIL_PROP(c, dti, node, prop, "I2C address must be less than 10-bits, got \"0x%x\"",
                                  reg);
-
+               else if (reg > 0x7f)
+                       FAIL_PROP(c, dti, node, prop, "I2C address must be less than 7-bits, got \"0x%x\". Set I2C_TEN_BIT_ADDRESS for 10 bit addresses or fix the property",
+                                 reg);
        }
 }
 WARNING(i2c_bus_reg, check_i2c_bus_reg, NULL, &reg_format, &i2c_bus_bridge);
@@ -1547,6 +1557,28 @@ static bool node_is_interrupt_provider(struct node *node)
 
        return false;
 }
+
+static void check_interrupt_provider(struct check *c,
+                                    struct dt_info *dti,
+                                    struct node *node)
+{
+       struct property *prop;
+
+       if (!node_is_interrupt_provider(node))
+               return;
+
+       prop = get_property(node, "#interrupt-cells");
+       if (!prop)
+               FAIL(c, dti, node,
+                    "Missing #interrupt-cells in interrupt provider");
+
+       prop = get_property(node, "#address-cells");
+       if (!prop)
+               FAIL(c, dti, node,
+                    "Missing #address-cells in interrupt provider");
+}
+WARNING(interrupt_provider, check_interrupt_provider, NULL);
+
 static void check_interrupts_property(struct check *c,
                                      struct dt_info *dti,
                                      struct node *node)
@@ -1604,7 +1636,7 @@ static void check_interrupts_property(struct check *c,
 
        prop = get_property(irq_node, "#interrupt-cells");
        if (!prop) {
-               FAIL(c, dti, irq_node, "Missing #interrupt-cells in interrupt-parent");
+               /* We warn about that already in another test. */
                return;
        }
 
@@ -1828,6 +1860,7 @@ static struct check *check_table[] = {
        &deprecated_gpio_property,
        &gpios_property,
        &interrupts_property,
+       &interrupt_provider,
 
        &alias_paths,
 
index 6e74ece..a08f415 100644 (file)
@@ -51,6 +51,37 @@ extern int annotate;         /* annotate .dts with input source location */
 
 typedef uint32_t cell_t;
 
+static inline uint16_t dtb_ld16(const void *p)
+{
+       const uint8_t *bp = (const uint8_t *)p;
+
+       return ((uint16_t)bp[0] << 8)
+               | bp[1];
+}
+
+static inline uint32_t dtb_ld32(const void *p)
+{
+       const uint8_t *bp = (const uint8_t *)p;
+
+       return ((uint32_t)bp[0] << 24)
+               | ((uint32_t)bp[1] << 16)
+               | ((uint32_t)bp[2] << 8)
+               | bp[3];
+}
+
+static inline uint64_t dtb_ld64(const void *p)
+{
+       const uint8_t *bp = (const uint8_t *)p;
+
+       return ((uint64_t)bp[0] << 56)
+               | ((uint64_t)bp[1] << 48)
+               | ((uint64_t)bp[2] << 40)
+               | ((uint64_t)bp[3] << 32)
+               | ((uint64_t)bp[4] << 24)
+               | ((uint64_t)bp[5] << 16)
+               | ((uint64_t)bp[6] << 8)
+               | bp[7];
+}
 
 #define streq(a, b)    (strcmp((a), (b)) == 0)
 #define strstarts(s, prefix)   (strncmp((s), (prefix), strlen(prefix)) == 0)
index bd6977e..07f10d2 100644 (file)
@@ -156,7 +156,7 @@ static void asm_emit_data(void *e, struct data d)
                emit_offset_label(f, m->ref, m->offset);
 
        while ((d.len - off) >= sizeof(uint32_t)) {
-               asm_emit_cell(e, fdt32_to_cpu(*((fdt32_t *)(d.val+off))));
+               asm_emit_cell(e, dtb_ld32(d.val + off));
                off += sizeof(uint32_t);
        }
 
index 524b520..93e4a2b 100644 (file)
@@ -436,7 +436,7 @@ int fdt_open_into(const void *fdt, void *buf, int bufsize)
                        return struct_size;
        }
 
-       if (can_assume(LIBFDT_ORDER) |
+       if (can_assume(LIBFDT_ORDER) ||
            !fdt_blocks_misordered_(fdt, mem_rsv_size, struct_size)) {
                /* no further work necessary */
                err = fdt_move(fdt, buf, bufsize);
index 26759d5..94ce4bb 100644 (file)
@@ -32,7 +32,7 @@ static int fdt_sw_probe_(void *fdt)
 /* 'memrsv' state:     Initial state after fdt_create()
  *
  * Allowed functions:
- *     fdt_add_reservmap_entry()
+ *     fdt_add_reservemap_entry()
  *     fdt_finish_reservemap()         [moves to 'struct' state]
  */
 static int fdt_sw_probe_memrsv_(void *fdt)
index 36fadcd..fe49b5d 100644 (file)
@@ -9,6 +9,10 @@
 #include "libfdt_env.h"
 #include "fdt.h"
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 #define FDT_FIRST_SUPPORTED_VERSION    0x02
 #define FDT_LAST_SUPPORTED_VERSION     0x11
 
@@ -2069,4 +2073,8 @@ int fdt_overlay_apply(void *fdt, void *fdto);
 
 const char *fdt_strerror(int errval);
 
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* LIBFDT_H */
index c9d980c..061ba8c 100644 (file)
@@ -110,13 +110,13 @@ static void write_propval_int(FILE *f, const char *p, size_t len, size_t width)
                        fprintf(f, "%02"PRIx8, *(const uint8_t*)p);
                        break;
                case 2:
-                       fprintf(f, "0x%02"PRIx16, fdt16_to_cpu(*(const fdt16_t*)p));
+                       fprintf(f, "0x%02"PRIx16, dtb_ld16(p));
                        break;
                case 4:
-                       fprintf(f, "0x%02"PRIx32, fdt32_to_cpu(*(const fdt32_t*)p));
+                       fprintf(f, "0x%02"PRIx32, dtb_ld32(p));
                        break;
                case 8:
-                       fprintf(f, "0x%02"PRIx64, fdt64_to_cpu(*(const fdt64_t*)p));
+                       fprintf(f, "0x%02"PRIx64, dtb_ld64(p));
                        break;
                }
                if (p + width < end)
@@ -183,7 +183,7 @@ static enum markertype guess_value_type(struct property *prop)
                        nnotcelllbl++;
        }
 
-       if ((p[len-1] == '\0') && (nnotstring == 0) && (nnul < (len-nnul))
+       if ((p[len-1] == '\0') && (nnotstring == 0) && (nnul <= (len-nnul))
            && (nnotstringlbl == 0)) {
                return TYPE_STRING;
        } else if (((len % sizeof(cell_t)) == 0) && (nnotcelllbl == 0)) {
index 61dd711..0714799 100644 (file)
@@ -1 +1 @@
-#define DTC_VERSION "DTC 1.6.0-g87a656ae"
+#define DTC_VERSION "DTC 1.6.0-g9d7888cb"
index 5b6ea8e..4e93c12 100644 (file)
@@ -59,10 +59,10 @@ static void yaml_propval_int(yaml_emitter_t *emitter, struct marker *markers, ch
                        sprintf(buf, "0x%"PRIx8, *(uint8_t*)(data + off));
                        break;
                case 2:
-                       sprintf(buf, "0x%"PRIx16, fdt16_to_cpu(*(fdt16_t*)(data + off)));
+                       sprintf(buf, "0x%"PRIx16, dtb_ld16(data + off));
                        break;
                case 4:
-                       sprintf(buf, "0x%"PRIx32, fdt32_to_cpu(*(fdt32_t*)(data + off)));
+                       sprintf(buf, "0x%"PRIx32, dtb_ld32(data + off));
                        m = markers;
                        is_phandle = false;
                        for_each_marker_of_type(m, REF_PHANDLE) {
@@ -73,7 +73,7 @@ static void yaml_propval_int(yaml_emitter_t *emitter, struct marker *markers, ch
                        }
                        break;
                case 8:
-                       sprintf(buf, "0x%"PRIx64, fdt64_to_cpu(*(fdt64_t*)(data + off)));
+                       sprintf(buf, "0x%"PRIx64, dtb_ld64(data + off));
                        break;
                }
 
index ce0b99f..ae19fb0 100644 (file)
@@ -78,7 +78,7 @@ config GCC_PLUGIN_RANDSTRUCT
          source tree isn't cleaned after kernel installation).
 
          The seed used for compilation is located at
-         scripts/gcc-plgins/randomize_layout_seed.h.  It remains after
+         scripts/gcc-plugins/randomize_layout_seed.h.  It remains after
          a make clean to allow for external modules to be compiled with
          the existing seed and will be removed by a make mrproper or
          make distclean.
index 955cf3a..224f510 100755 (executable)
@@ -11,7 +11,7 @@ then
        echo "asm/inline/volatile keywords."
        echo
        echo "INFILE: header file to operate on"
-       echo "OUTFILE: output file which the processed header is writen to"
+       echo "OUTFILE: output file which the processed header is written to"
 
        exit 1
 fi
index c0ac8f7..4a61612 100644 (file)
@@ -4,27 +4,19 @@
  * Copyright (C) 2015 Boris Barbulovski <bbarbulovski@gmail.com>
  */
 
-#include <qglobal.h>
-
-#include <QMainWindow>
-#include <QList>
-#include <qtextbrowser.h>
 #include <QAction>
+#include <QApplication>
+#include <QCloseEvent>
+#include <QDebug>
+#include <QDesktopWidget>
 #include <QFileDialog>
+#include <QLabel>
+#include <QLayout>
+#include <QList>
 #include <QMenu>
-
-#include <qapplication.h>
-#include <qdesktopwidget.h>
-#include <qtoolbar.h>
-#include <qlayout.h>
-#include <qsplitter.h>
-#include <qlineedit.h>
-#include <qlabel.h>
-#include <qpushbutton.h>
-#include <qmenubar.h>
-#include <qmessagebox.h>
-#include <qregexp.h>
-#include <qevent.h>
+#include <QMenuBar>
+#include <QMessageBox>
+#include <QToolBar>
 
 #include <stdlib.h>
 
@@ -445,9 +437,10 @@ void ConfigList::updateList(ConfigItem* item)
        if (rootEntry != &rootmenu && (mode == singleMode ||
            (mode == symbolMode && rootEntry->parent != &rootmenu))) {
                item = (ConfigItem *)topLevelItem(0);
-               if (!item)
+               if (!item && mode != symbolMode) {
                        item = new ConfigItem(this, 0, true);
-               last = item;
+                       last = item;
+               }
        }
        if ((mode == singleMode || (mode == symbolMode && !(rootEntry->flags & MENU_ROOT))) &&
            rootEntry->sym && rootEntry->prompt) {
@@ -545,7 +538,7 @@ void ConfigList::setRootMenu(struct menu *menu)
        rootEntry = menu;
        updateListAll();
        if (currentItem()) {
-               currentItem()->setSelected(hasFocus());
+               setSelected(currentItem(), hasFocus());
                scrollToItem(currentItem());
        }
 }
@@ -873,7 +866,7 @@ void ConfigList::focusInEvent(QFocusEvent *e)
 
        ConfigItem* item = (ConfigItem *)currentItem();
        if (item) {
-               item->setSelected(true);
+               setSelected(item, true);
                menu = item->menu;
        }
        emit gotFocus(menu);
@@ -1021,7 +1014,7 @@ ConfigInfoView::ConfigInfoView(QWidget* parent, const char *name)
        : Parent(parent), sym(0), _menu(0)
 {
        setObjectName(name);
-
+       setOpenLinks(false);
 
        if (!objectName().isEmpty()) {
                configSettings->beginGroup(objectName());
@@ -1094,7 +1087,7 @@ void ConfigInfoView::menuInfo(void)
                        if (sym->name) {
                                head += " (";
                                if (showDebug())
-                                       head += QString().sprintf("<a href=\"s%p\">", sym);
+                                       head += QString().sprintf("<a href=\"s%s\">", sym->name);
                                head += print_filter(sym->name);
                                if (showDebug())
                                        head += "</a>";
@@ -1103,7 +1096,7 @@ void ConfigInfoView::menuInfo(void)
                } else if (sym->name) {
                        head += "<big><b>";
                        if (showDebug())
-                               head += QString().sprintf("<a href=\"s%p\">", sym);
+                               head += QString().sprintf("<a href=\"s%s\">", sym->name);
                        head += print_filter(sym->name);
                        if (showDebug())
                                head += "</a>";
@@ -1154,13 +1147,16 @@ QString ConfigInfoView::debug_info(struct symbol *sym)
                switch (prop->type) {
                case P_PROMPT:
                case P_MENU:
-                       debug += QString().sprintf("prompt: <a href=\"m%p\">", prop->menu);
+                       debug += QString().sprintf("prompt: <a href=\"m%s\">", sym->name);
                        debug += print_filter(prop->text);
                        debug += "</a><br>";
                        break;
                case P_DEFAULT:
                case P_SELECT:
                case P_RANGE:
+               case P_COMMENT:
+               case P_IMPLY:
+               case P_SYMBOL:
                        debug += prop_get_type_name(prop->type);
                        debug += ": ";
                        expr_print(prop->expr, expr_print_help, &debug, E_NONE);
@@ -1226,13 +1222,62 @@ void ConfigInfoView::expr_print_help(void *data, struct symbol *sym, const char
        QString str2 = print_filter(str);
 
        if (sym && sym->name && !(sym->flags & SYMBOL_CONST)) {
-               *text += QString().sprintf("<a href=\"s%p\">", sym);
+               *text += QString().sprintf("<a href=\"s%s\">", sym->name);
                *text += str2;
                *text += "</a>";
        } else
                *text += str2;
 }
 
+void ConfigInfoView::clicked(const QUrl &url)
+{
+       QByteArray str = url.toEncoded();
+       const std::size_t count = str.size();
+       char *data = new char[count + 1];
+       struct symbol **result;
+       struct menu *m = NULL;
+
+       if (count < 1) {
+               qInfo() << "Clicked link is empty";
+               delete data;
+               return;
+       }
+
+       memcpy(data, str.constData(), count);
+       data[count] = '\0';
+
+       /* Seek for exact match */
+       data[0] = '^';
+       strcat(data, "$");
+       result = sym_re_search(data);
+       if (!result) {
+               qInfo() << "Clicked symbol is invalid:" << data;
+               delete data;
+               return;
+       }
+
+       sym = *result;
+
+       /* Seek for the menu which holds the symbol */
+       for (struct property *prop = sym->prop; prop; prop = prop->next) {
+                   if (prop->type != P_PROMPT && prop->type != P_MENU)
+                           continue;
+                   m = prop->menu;
+                   break;
+       }
+
+       if (!m) {
+               /* Symbol is not visible as a menu */
+               symbolInfo();
+               emit showDebugChanged(true);
+       } else {
+               emit menuSelected(m);
+       }
+
+       free(result);
+       delete data;
+}
+
 QMenu* ConfigInfoView::createStandardContextMenu(const QPoint & pos)
 {
        QMenu* popup = Parent::createStandardContextMenu(pos);
@@ -1402,18 +1447,22 @@ ConfigMainWindow::ConfigMainWindow(void)
        addToolBar(toolBar);
 
        backAction = new QAction(QPixmap(xpm_back), "Back", this);
-         connect(backAction, SIGNAL(triggered(bool)), SLOT(goBack()));
-         backAction->setEnabled(false);
+       connect(backAction, SIGNAL(triggered(bool)), SLOT(goBack()));
+
        QAction *quitAction = new QAction("&Quit", this);
        quitAction->setShortcut(Qt::CTRL + Qt::Key_Q);
-         connect(quitAction, SIGNAL(triggered(bool)), SLOT(close()));
+       connect(quitAction, SIGNAL(triggered(bool)), SLOT(close()));
+
        QAction *loadAction = new QAction(QPixmap(xpm_load), "&Load", this);
        loadAction->setShortcut(Qt::CTRL + Qt::Key_L);
-         connect(loadAction, SIGNAL(triggered(bool)), SLOT(loadConfig()));
+       connect(loadAction, SIGNAL(triggered(bool)), SLOT(loadConfig()));
+
        saveAction = new QAction(QPixmap(xpm_save), "&Save", this);
        saveAction->setShortcut(Qt::CTRL + Qt::Key_S);
-         connect(saveAction, SIGNAL(triggered(bool)), SLOT(saveConfig()));
+       connect(saveAction, SIGNAL(triggered(bool)), SLOT(saveConfig()));
+
        conf_set_changed_callback(conf_changed);
+
        // Set saveAction's initial state
        conf_changed();
        configname = xstrdup(conf_get_configname());
@@ -1506,6 +1555,9 @@ ConfigMainWindow::ConfigMainWindow(void)
        helpMenu->addAction(showIntroAction);
        helpMenu->addAction(showAboutAction);
 
+       connect (helpText, SIGNAL (anchorClicked (const QUrl &)),
+                helpText, SLOT (clicked (const QUrl &)) );
+
        connect(configList, SIGNAL(menuChanged(struct menu *)),
                helpText, SLOT(setInfo(struct menu *)));
        connect(configList, SIGNAL(menuSelected(struct menu *)),
@@ -1611,21 +1663,11 @@ void ConfigMainWindow::searchConfig(void)
 void ConfigMainWindow::changeItens(struct menu *menu)
 {
        configList->setRootMenu(menu);
-
-       if (configList->rootEntry->parent == &rootmenu)
-               backAction->setEnabled(false);
-       else
-               backAction->setEnabled(true);
 }
 
 void ConfigMainWindow::changeMenu(struct menu *menu)
 {
        menuList->setRootMenu(menu);
-
-       if (menuList->rootEntry->parent == &rootmenu)
-               backAction->setEnabled(false);
-       else
-               backAction->setEnabled(true);
 }
 
 void ConfigMainWindow::setMenuLink(struct menu *menu)
@@ -1645,22 +1687,26 @@ void ConfigMainWindow::setMenuLink(struct menu *menu)
                        return;
                list->setRootMenu(parent);
                break;
-       case symbolMode:
+       case menuMode:
                if (menu->flags & MENU_ROOT) {
-                       configList->setRootMenu(menu);
+                       menuList->setRootMenu(menu);
                        configList->clearSelection();
-                       list = menuList;
-               } else {
                        list = configList;
+               } else {
                        parent = menu_get_parent_menu(menu->parent);
                        if (!parent)
                                return;
-                       item = menuList->findConfigItem(parent);
+
+                       /* Select the config view */
+                       item = configList->findConfigItem(parent);
                        if (item) {
-                               item->setSelected(true);
-                               menuList->scrollToItem(item);
+                               configList->setSelected(item, true);
+                               configList->scrollToItem(item);
                        }
-                       list->setRootMenu(parent);
+
+                       menuList->setRootMenu(parent);
+                       menuList->clearSelection();
+                       list = menuList;
                }
                break;
        case fullMode:
@@ -1673,9 +1719,10 @@ void ConfigMainWindow::setMenuLink(struct menu *menu)
        if (list) {
                item = list->findConfigItem(menu);
                if (item) {
-                       item->setSelected(true);
+                       list->setSelected(item, true);
                        list->scrollToItem(item);
                        list->setFocus();
+                       helpText->setInfo(menu);
                }
        }
 }
@@ -1688,25 +1735,11 @@ void ConfigMainWindow::listFocusChanged(void)
 
 void ConfigMainWindow::goBack(void)
 {
-       ConfigItem* item, *oldSelection;
-
-       configList->setParentMenu();
+qInfo() << __FUNCTION__;
        if (configList->rootEntry == &rootmenu)
-               backAction->setEnabled(false);
-
-       if (menuList->selectedItems().count() == 0)
                return;
 
-       item = (ConfigItem*)menuList->selectedItems().first();
-       oldSelection = item;
-       while (item) {
-               if (item->menu == configList->rootEntry) {
-                       oldSelection->setSelected(false);
-                       item->setSelected(true);
-                       break;
-               }
-               item = (ConfigItem*)item->parent();
-       }
+       configList->setParentMenu();
 }
 
 void ConfigMainWindow::showSingleView(void)
@@ -1718,6 +1751,8 @@ void ConfigMainWindow::showSingleView(void)
        fullViewAction->setEnabled(true);
        fullViewAction->setChecked(false);
 
+       backAction->setEnabled(true);
+
        menuView->hide();
        menuList->setRootMenu(0);
        configList->mode = singleMode;
@@ -1737,6 +1772,8 @@ void ConfigMainWindow::showSplitView(void)
        fullViewAction->setEnabled(true);
        fullViewAction->setChecked(false);
 
+       backAction->setEnabled(false);
+
        configList->mode = menuMode;
        if (configList->rootEntry == &rootmenu)
                configList->updateListAll();
@@ -1760,6 +1797,8 @@ void ConfigMainWindow::showFullView(void)
        fullViewAction->setEnabled(false);
        fullViewAction->setChecked(true);
 
+       backAction->setEnabled(false);
+
        menuView->hide();
        menuList->setRootMenu(0);
        configList->mode = fullMode;
index c879d79..fb9e972 100644 (file)
@@ -3,17 +3,17 @@
  * Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org>
  */
 
-#include <QTextBrowser>
-#include <QTreeWidget>
-#include <QMainWindow>
+#include <QCheckBox>
+#include <QDialog>
 #include <QHeaderView>
-#include <qsettings.h>
+#include <QLineEdit>
+#include <QMainWindow>
 #include <QPushButton>
 #include <QSettings>
-#include <QLineEdit>
 #include <QSplitter>
-#include <QCheckBox>
-#include <QDialog>
+#include <QTextBrowser>
+#include <QTreeWidget>
+
 #include "expr.h"
 
 class ConfigView;
@@ -45,11 +45,17 @@ class ConfigList : public QTreeWidget {
 public:
        ConfigList(ConfigView* p, const char *name = 0);
        void reinit(void);
+       ConfigItem* findConfigItem(struct menu *);
        ConfigView* parent(void) const
        {
                return (ConfigView*)Parent::parent();
        }
-       ConfigItem* findConfigItem(struct menu *);
+       void setSelected(QTreeWidgetItem *item, bool enable) {
+               for (int i = 0; i < selectedItems().size(); i++)
+                       selectedItems().at(i)->setSelected(false);
+
+               item->setSelected(enable);
+       }
 
 protected:
        void keyPressEvent(QKeyEvent *e);
@@ -250,6 +256,7 @@ public slots:
        void setInfo(struct menu *menu);
        void saveSettings(void);
        void setShowDebug(bool);
+       void clicked (const QUrl &url);
 
 signals:
        void showDebugChanged(bool);
index 92dd745..e26f02d 100755 (executable)
@@ -336,6 +336,12 @@ fi
 
 vmlinux_link vmlinux "${kallsymso}" ${btf_vmlinux_bin_o}
 
+# fill in BTF IDs
+if [ -n "${CONFIG_DEBUG_INFO_BTF}" ]; then
+info BTFIDS vmlinux
+${RESOLVE_BTFIDS} vmlinux
+fi
+
 if [ -n "${CONFIG_BUILDTIME_TABLE_SORT}" ]; then
        info SORTTAB vmlinux
        if ! sorttable vmlinux; then
index 74eab03..f9b1952 100644 (file)
 #undef has_rel_mcount
 #undef tot_relsize
 #undef get_mcountsym
+#undef find_symtab
+#undef get_shnum
+#undef set_shnum
+#undef get_shstrndx
+#undef get_symindex
 #undef get_sym_str_and_relp
 #undef do_func
 #undef Elf_Addr
 # define __has_rel_mcount      __has64_rel_mcount
 # define has_rel_mcount                has64_rel_mcount
 # define tot_relsize           tot64_relsize
+# define find_symtab           find_symtab64
+# define get_shnum             get_shnum64
+# define set_shnum             set_shnum64
+# define get_shstrndx          get_shstrndx64
+# define get_symindex          get_symindex64
 # define get_sym_str_and_relp  get_sym_str_and_relp_64
 # define do_func               do64
 # define get_mcountsym         get_mcountsym_64
 # define __has_rel_mcount      __has32_rel_mcount
 # define has_rel_mcount                has32_rel_mcount
 # define tot_relsize           tot32_relsize
+# define find_symtab           find_symtab32
+# define get_shnum             get_shnum32
+# define set_shnum             set_shnum32
+# define get_shstrndx          get_shstrndx32
+# define get_symindex          get_symindex32
 # define get_sym_str_and_relp  get_sym_str_and_relp_32
 # define do_func               do32
 # define get_mcountsym         get_mcountsym_32
@@ -173,6 +188,67 @@ static int MIPS_is_fake_mcount(Elf_Rel const *rp)
        return is_fake;
 }
 
+static unsigned int get_symindex(Elf_Sym const *sym, Elf32_Word const *symtab,
+                                Elf32_Word const *symtab_shndx)
+{
+       unsigned long offset;
+       int index;
+
+       if (sym->st_shndx != SHN_XINDEX)
+               return w2(sym->st_shndx);
+
+       offset = (unsigned long)sym - (unsigned long)symtab;
+       index = offset / sizeof(*sym);
+
+       return w(symtab_shndx[index]);
+}
+
+static unsigned int get_shnum(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0)
+{
+       if (shdr0 && !ehdr->e_shnum)
+               return w(shdr0->sh_size);
+
+       return w2(ehdr->e_shnum);
+}
+
+static void set_shnum(Elf_Ehdr *ehdr, Elf_Shdr *shdr0, unsigned int new_shnum)
+{
+       if (new_shnum >= SHN_LORESERVE) {
+               ehdr->e_shnum = 0;
+               shdr0->sh_size = w(new_shnum);
+       } else
+               ehdr->e_shnum = w2(new_shnum);
+}
+
+static int get_shstrndx(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0)
+{
+       if (ehdr->e_shstrndx != SHN_XINDEX)
+               return w2(ehdr->e_shstrndx);
+
+       return w(shdr0->sh_link);
+}
+
+static void find_symtab(Elf_Ehdr *const ehdr, Elf_Shdr const *shdr0,
+                       unsigned const nhdr, Elf32_Word **symtab,
+                       Elf32_Word **symtab_shndx)
+{
+       Elf_Shdr const *relhdr;
+       unsigned k;
+
+       *symtab = NULL;
+       *symtab_shndx = NULL;
+
+       for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) {
+               if (relhdr->sh_type == SHT_SYMTAB)
+                       *symtab = (void *)ehdr + relhdr->sh_offset;
+               else if (relhdr->sh_type == SHT_SYMTAB_SHNDX)
+                       *symtab_shndx = (void *)ehdr + relhdr->sh_offset;
+
+               if (*symtab && *symtab_shndx)
+                       break;
+       }
+}
+
 /* Append the new shstrtab, Elf_Shdr[], __mcount_loc and its relocations. */
 static int append_func(Elf_Ehdr *const ehdr,
                        Elf_Shdr *const shstr,
@@ -188,10 +264,12 @@ static int append_func(Elf_Ehdr *const ehdr,
        char const *mc_name = (sizeof(Elf_Rela) == rel_entsize)
                ? ".rela__mcount_loc"
                :  ".rel__mcount_loc";
-       unsigned const old_shnum = w2(ehdr->e_shnum);
        uint_t const old_shoff = _w(ehdr->e_shoff);
        uint_t const old_shstr_sh_size   = _w(shstr->sh_size);
        uint_t const old_shstr_sh_offset = _w(shstr->sh_offset);
+       Elf_Shdr *const shdr0 = (Elf_Shdr *)(old_shoff + (void *)ehdr);
+       unsigned int const old_shnum = get_shnum(ehdr, shdr0);
+       unsigned int const new_shnum = 2 + old_shnum; /* {.rel,}__mcount_loc */
        uint_t t = 1 + strlen(mc_name) + _w(shstr->sh_size);
        uint_t new_e_shoff;
 
@@ -201,6 +279,8 @@ static int append_func(Elf_Ehdr *const ehdr,
        t += (_align & -t);  /* word-byte align */
        new_e_shoff = t;
 
+       set_shnum(ehdr, shdr0, new_shnum);
+
        /* body for new shstrtab */
        if (ulseek(sb.st_size, SEEK_SET) < 0)
                return -1;
@@ -255,7 +335,6 @@ static int append_func(Elf_Ehdr *const ehdr,
                return -1;
 
        ehdr->e_shoff = _w(new_e_shoff);
-       ehdr->e_shnum = w2(2 + w2(ehdr->e_shnum));  /* {.rel,}__mcount_loc */
        if (ulseek(0, SEEK_SET) < 0)
                return -1;
        if (uwrite(ehdr, sizeof(*ehdr)) < 0)
@@ -434,6 +513,8 @@ static int find_secsym_ndx(unsigned const txtndx,
                                uint_t *const recvalp,
                                unsigned int *sym_index,
                                Elf_Shdr const *const symhdr,
+                               Elf32_Word const *symtab,
+                               Elf32_Word const *symtab_shndx,
                                Elf_Ehdr const *const ehdr)
 {
        Elf_Sym const *const sym0 = (Elf_Sym const *)(_w(symhdr->sh_offset)
@@ -445,7 +526,7 @@ static int find_secsym_ndx(unsigned const txtndx,
        for (symp = sym0, t = nsym; t; --t, ++symp) {
                unsigned int const st_bind = ELF_ST_BIND(symp->st_info);
 
-               if (txtndx == w2(symp->st_shndx)
+               if (txtndx == get_symindex(symp, symtab, symtab_shndx)
                        /* avoid STB_WEAK */
                    && (STB_LOCAL == st_bind || STB_GLOBAL == st_bind)) {
                        /* function symbols on ARM have quirks, avoid them */
@@ -516,21 +597,23 @@ static unsigned tot_relsize(Elf_Shdr const *const shdr0,
        return totrelsz;
 }
 
-
 /* Overall supervision for Elf32 ET_REL file. */
 static int do_func(Elf_Ehdr *const ehdr, char const *const fname,
                   unsigned const reltype)
 {
        Elf_Shdr *const shdr0 = (Elf_Shdr *)(_w(ehdr->e_shoff)
                + (void *)ehdr);
-       unsigned const nhdr = w2(ehdr->e_shnum);
-       Elf_Shdr *const shstr = &shdr0[w2(ehdr->e_shstrndx)];
+       unsigned const nhdr = get_shnum(ehdr, shdr0);
+       Elf_Shdr *const shstr = &shdr0[get_shstrndx(ehdr, shdr0)];
        char const *const shstrtab = (char const *)(_w(shstr->sh_offset)
                + (void *)ehdr);
 
        Elf_Shdr const *relhdr;
        unsigned k;
 
+       Elf32_Word *symtab;
+       Elf32_Word *symtab_shndx;
+
        /* Upper bound on space: assume all relevant relocs are for mcount. */
        unsigned       totrelsz;
 
@@ -561,6 +644,8 @@ static int do_func(Elf_Ehdr *const ehdr, char const *const fname,
                return -1;
        }
 
+       find_symtab(ehdr, shdr0, nhdr, &symtab, &symtab_shndx);
+
        for (relhdr = shdr0, k = nhdr; k; --k, ++relhdr) {
                char const *const txtname = has_rel_mcount(relhdr, shdr0,
                        shstrtab, fname);
@@ -577,6 +662,7 @@ static int do_func(Elf_Ehdr *const ehdr, char const *const fname,
                        result = find_secsym_ndx(w(relhdr->sh_info), txtname,
                                                &recval, &recsym,
                                                &shdr0[symsec_sh_link],
+                                               symtab, symtab_shndx,
                                                ehdr);
                        if (result)
                                goto out;
index e12c490..1d20003 100644 (file)
@@ -188,19 +188,7 @@ DEFINE_LSM(integrity) = {
 int integrity_kernel_read(struct file *file, loff_t offset,
                          void *addr, unsigned long count)
 {
-       mm_segment_t old_fs;
-       char __user *buf = (char __user *)addr;
-       ssize_t ret;
-
-       if (!(file->f_mode & FMODE_READ))
-               return -EBADF;
-
-       old_fs = get_fs();
-       set_fs(KERNEL_DS);
-       ret = __vfs_read(file, buf, count, &offset);
-       set_fs(old_fs);
-
-       return ret;
+       return __kernel_read(file, addr, count, &offset);
 }
 
 /*
index df93ac2..9d94080 100644 (file)
@@ -30,7 +30,7 @@
 
 enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN,
                     IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII };
-enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
+enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8, TPM_PCR10 = 10 };
 
 /* digest size for IMA, fits SHA1 or MD5 */
 #define IMA_DIGEST_SIZE                SHA1_DIGEST_SIZE
index 220b149..011c3c7 100644 (file)
@@ -823,13 +823,26 @@ static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
        if (rc != 0)
                return rc;
 
-       /* cumulative sha1 over tpm registers 0-7 */
+       /* cumulative digest over TPM registers 0-7 */
        for (i = TPM_PCR0; i < TPM_PCR8; i++) {
                ima_pcrread(i, &d);
                /* now accumulate with current aggregate */
                rc = crypto_shash_update(shash, d.digest,
                                         crypto_shash_digestsize(tfm));
        }
+       /*
+        * Extend cumulative digest over TPM registers 8-9, which contain
+        * measurement for the kernel command line (reg. 8) and image (reg. 9)
+        * in a typical PCR allocation. Registers 8-9 are only included in
+        * non-SHA1 boot_aggregate digests to avoid ambiguity.
+        */
+       if (alg_id != TPM_ALG_SHA1) {
+               for (i = TPM_PCR8; i < TPM_PCR10; i++) {
+                       ima_pcrread(i, &d);
+                       rc = crypto_shash_update(shash, d.digest,
+                                               crypto_shash_digestsize(tfm));
+               }
+       }
        if (!rc)
                crypto_shash_final(shash, digest);
        return rc;
index 298b737..16c1894 100644 (file)
@@ -107,7 +107,7 @@ struct ima_digest_data {
                } ng;
                u8 data[2];
        } xattr;
-       u8 digest[0];
+       u8 digest[];
 } __packed;
 
 /*
@@ -119,7 +119,7 @@ struct signature_v2_hdr {
        uint8_t hash_algo;      /* Digest algorithm [enum hash_algo] */
        __be32 keyid;           /* IMA key identifier - not X509/PGP specific */
        __be16 sig_size;        /* signature size */
-       uint8_t sig[0];         /* signature payload */
+       uint8_t sig[];          /* signature payload */
 } __packed;
 
 /* integrity data associated with an inode */
index 0ce3e73..70a7ad3 100644 (file)
@@ -1414,7 +1414,22 @@ EXPORT_SYMBOL(security_inode_copy_up);
 
 int security_inode_copy_up_xattr(const char *name)
 {
-       return call_int_hook(inode_copy_up_xattr, -EOPNOTSUPP, name);
+       struct security_hook_list *hp;
+       int rc;
+
+       /*
+        * The implementation can return 0 (accept the xattr), 1 (discard the
+        * xattr), -EOPNOTSUPP if it does not know anything about the xattr or
+        * any other error code incase of an error.
+        */
+       hlist_for_each_entry(hp,
+               &security_hook_heads.inode_copy_up_xattr, list) {
+               rc = hp->hook.inode_copy_up_xattr(name);
+               if (rc != LSM_RET_DEFAULT(inode_copy_up_xattr))
+                       return rc;
+       }
+
+       return LSM_RET_DEFAULT(inode_copy_up_xattr);
 }
 EXPORT_SYMBOL(security_inode_copy_up_xattr);
 
index da94a1b..0cc7cdd 100644 (file)
@@ -27,6 +27,9 @@ static int cond_evaluate_expr(struct policydb *p, struct cond_expr *expr)
        int s[COND_EXPR_MAXDEPTH];
        int sp = -1;
 
+       if (expr->len == 0)
+               return -1;
+
        for (i = 0; i < expr->len; i++) {
                struct cond_expr_node *node = &expr->nodes[i];
 
@@ -392,27 +395,19 @@ static int cond_read_node(struct policydb *p, struct cond_node *node, void *fp)
 
                rc = next_entry(buf, fp, sizeof(u32) * 2);
                if (rc)
-                       goto err;
+                       return rc;
 
                expr->expr_type = le32_to_cpu(buf[0]);
                expr->bool = le32_to_cpu(buf[1]);
 
-               if (!expr_node_isvalid(p, expr)) {
-                       rc = -EINVAL;
-                       goto err;
-               }
+               if (!expr_node_isvalid(p, expr))
+                       return -EINVAL;
        }
 
        rc = cond_read_av_list(p, fp, &node->true_list, NULL);
        if (rc)
-               goto err;
-       rc = cond_read_av_list(p, fp, &node->false_list, &node->true_list);
-       if (rc)
-               goto err;
-       return 0;
-err:
-       cond_node_destroy(node);
-       return rc;
+               return rc;
+       return cond_read_av_list(p, fp, &node->false_list, &node->true_list);
 }
 
 int cond_read_list(struct policydb *p, void *fp)
index 313919b..ef0afd8 100644 (file)
@@ -2888,8 +2888,12 @@ err:
        if (*names) {
                for (i = 0; i < *len; i++)
                        kfree((*names)[i]);
+               kfree(*names);
        }
        kfree(*values);
+       *len = 0;
+       *names = NULL;
+       *values = NULL;
        goto out;
 }
 
index 509290f..0e53f6f 100644 (file)
@@ -764,6 +764,9 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
 
        retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
        if (!retval) {
+               /* clear flags and stop any drain wait */
+               stream->partial_drain = false;
+               stream->metadata_set = false;
                snd_compr_drain_notify(stream);
                stream->runtime->total_bytes_available = 0;
                stream->runtime->total_bytes_transferred = 0;
@@ -921,6 +924,7 @@ static int snd_compr_partial_drain(struct snd_compr_stream *stream)
        if (stream->next_track == false)
                return -EPERM;
 
+       stream->partial_drain = true;
        retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
        if (retval) {
                pr_debug("Partial drain returned failure\n");
index e69a4ef..08c10ac 100644 (file)
@@ -91,6 +91,8 @@ int snd_opl3_ioctl(struct snd_hwdep * hw, struct file *file,
                {
                        struct snd_dm_fm_info info;
 
+                       memset(&info, 0, sizeof(info));
+
                        info.fm_mode = opl3->fm_mode;
                        info.rhythm = opl3->rhythm;
                        if (copy_to_user(argp, &info, sizeof(struct snd_dm_fm_info)))
index 20b8f6c..99aec73 100644 (file)
@@ -208,8 +208,8 @@ static const struct config_entry config_table[] = {
        },
 #endif
 
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE)
 /* Cometlake-LP */
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_LP)
        {
                .flags = FLAG_SOF,
                .device = 0x02c8,
@@ -240,9 +240,7 @@ static const struct config_entry config_table[] = {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0x02c8,
        },
-#endif
 /* Cometlake-H */
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_H)
        {
                .flags = FLAG_SOF,
                .device = 0x06c8,
index 2c6d2be..824f4ac 100644 (file)
@@ -72,6 +72,12 @@ static int compare_input_type(const void *ap, const void *bp)
        if (a->type != b->type)
                return (int)(a->type - b->type);
 
+       /* If has both hs_mic and hp_mic, pick the hs_mic ahead of hp_mic. */
+       if (a->is_headset_mic && b->is_headphone_mic)
+               return -1; /* don't swap */
+       else if (a->is_headphone_mic && b->is_headset_mic)
+               return 1; /* swap */
+
        /* In case one has boost and the other one has not,
           pick the one with boost first. */
        return (int)(b->has_boost_on_pin - a->has_boost_on_pin);
index d20aedd..3565e2a 100644 (file)
@@ -2470,6 +2470,9 @@ static const struct pci_device_id azx_ids[] = {
        /* Icelake */
        { PCI_DEVICE(0x8086, 0x34c8),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* Icelake-H */
+       { PCI_DEVICE(0x8086, 0x3dc8),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Jasperlake */
        { PCI_DEVICE(0x8086, 0x38c8),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
@@ -2478,9 +2481,14 @@ static const struct pci_device_id azx_ids[] = {
        /* Tigerlake */
        { PCI_DEVICE(0x8086, 0xa0c8),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* Tigerlake-H */
+       { PCI_DEVICE(0x8086, 0x43c8),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Elkhart Lake */
        { PCI_DEVICE(0x8086, 0x4b55),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       { PCI_DEVICE(0x8086, 0x4b58),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Broxton-P(Apollolake) */
        { PCI_DEVICE(0x8086, 0x5a98),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_BROXTON },
index fbd7cc6..41eaa89 100644 (file)
@@ -259,7 +259,7 @@ static int hinfo_to_pcm_index(struct hda_codec *codec,
                if (get_pcm_rec(spec, pcm_idx)->stream == hinfo)
                        return pcm_idx;
 
-       codec_warn(codec, "HDMI: hinfo %p not registered\n", hinfo);
+       codec_warn(codec, "HDMI: hinfo %p not tied to a PCM\n", hinfo);
        return -EINVAL;
 }
 
@@ -277,7 +277,8 @@ static int hinfo_to_pin_index(struct hda_codec *codec,
                        return pin_idx;
        }
 
-       codec_dbg(codec, "HDMI: hinfo %p not registered\n", hinfo);
+       codec_dbg(codec, "HDMI: hinfo %p (pcm %d) not registered\n", hinfo,
+                 hinfo_to_pcm_index(codec, hinfo));
        return -EINVAL;
 }
 
@@ -1804,33 +1805,43 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
 
 static int hdmi_parse_codec(struct hda_codec *codec)
 {
-       hda_nid_t nid;
+       hda_nid_t start_nid;
+       unsigned int caps;
        int i, nodes;
 
-       nodes = snd_hda_get_sub_nodes(codec, codec->core.afg, &nid);
-       if (!nid || nodes < 0) {
+       nodes = snd_hda_get_sub_nodes(codec, codec->core.afg, &start_nid);
+       if (!start_nid || nodes < 0) {
                codec_warn(codec, "HDMI: failed to get afg sub nodes\n");
                return -EINVAL;
        }
 
-       for (i = 0; i < nodes; i++, nid++) {
-               unsigned int caps;
-               unsigned int type;
+       /*
+        * hdmi_add_pin() assumes total amount of converters to
+        * be known, so first discover all converters
+        */
+       for (i = 0; i < nodes; i++) {
+               hda_nid_t nid = start_nid + i;
 
                caps = get_wcaps(codec, nid);
-               type = get_wcaps_type(caps);
 
                if (!(caps & AC_WCAP_DIGITAL))
                        continue;
 
-               switch (type) {
-               case AC_WID_AUD_OUT:
+               if (get_wcaps_type(caps) == AC_WID_AUD_OUT)
                        hdmi_add_cvt(codec, nid);
-                       break;
-               case AC_WID_PIN:
+       }
+
+       /* discover audio pins */
+       for (i = 0; i < nodes; i++) {
+               hda_nid_t nid = start_nid + i;
+
+               caps = get_wcaps(codec, nid);
+
+               if (!(caps & AC_WCAP_DIGITAL))
+                       continue;
+
+               if (get_wcaps_type(caps) == AC_WID_PIN)
                        hdmi_add_pin(codec, nid);
-                       break;
-               }
        }
 
        return 0;
@@ -4145,6 +4156,11 @@ HDA_CODEC_ENTRY(0x10de0095, "GPU 95 HDMI/DP",    patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de0097, "GPU 97 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de0098, "GPU 98 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de0099, "GPU 99 HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de009a, "GPU 9a HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de009d, "GPU 9d HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de009e, "GPU 9e HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de009f, "GPU 9f HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de00a0, "GPU a0 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI",      patch_nvhdmi_2ch),
 HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI",   patch_nvhdmi_2ch),
 HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP",   patch_via_hdmi),
index 6d73f8b..194ffa8 100644 (file)
@@ -2461,6 +2461,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
        SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950),
+       SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1462, 0x1275, "MSI-GL63", ALC1220_FIXUP_CLEVO_P950),
        SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
@@ -6148,6 +6149,9 @@ enum {
        ALC236_FIXUP_HP_MUTE_LED,
        ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
        ALC295_FIXUP_ASUS_MIC_NO_PRESENCE,
+       ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS,
+       ALC269VC_FIXUP_ACER_HEADSET_MIC,
+       ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -7326,6 +7330,35 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_HEADSET_MODE
        },
+       [ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x14, 0x90100120 }, /* use as internal speaker */
+                       { 0x18, 0x02a111f0 }, /* use as headset mic, without its own jack detect */
+                       { 0x1a, 0x01011020 }, /* use as line out */
+                       { },
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MIC
+       },
+       [ALC269VC_FIXUP_ACER_HEADSET_MIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x18, 0x02a11030 }, /* use as headset mic */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MIC
+       },
+       [ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x18, 0x01a11130 }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MIC
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -7341,10 +7374,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
        SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
        SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
        SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK),
+       SND_PCI_QUIRK(0x1025, 0x1247, "Acer vCopperbox", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS),
+       SND_PCI_QUIRK(0x1025, 0x1248, "Acer Veriton N4660G", ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
@@ -7470,6 +7506,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+       SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+       SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
@@ -7568,8 +7606,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
-       SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
-       SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
+       SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
        SND_PCI_QUIRK(0x17aa, 0x22be, "Thinkpad X1 Carbon 8th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
        SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
index d8f554f..e6386de 100644 (file)
@@ -342,11 +342,34 @@ static int acp3x_dma_close(struct snd_soc_component *component,
 {
        struct snd_soc_pcm_runtime *prtd;
        struct i2s_dev_data *adata;
+       struct i2s_stream_instance *ins;
 
        prtd = substream->private_data;
        component = snd_soc_rtdcom_lookup(prtd, DRV_NAME);
        adata = dev_get_drvdata(component->dev);
+       ins = substream->runtime->private_data;
+       if (!ins)
+               return -EINVAL;
 
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+               switch (ins->i2s_instance) {
+               case I2S_BT_INSTANCE:
+                       adata->play_stream = NULL;
+                       break;
+               case I2S_SP_INSTANCE:
+               default:
+                       adata->i2ssp_play_stream = NULL;
+               }
+       } else {
+               switch (ins->i2s_instance) {
+               case I2S_BT_INSTANCE:
+                       adata->capture_stream = NULL;
+                       break;
+               case I2S_SP_INSTANCE:
+               default:
+                       adata->i2ssp_capture_stream = NULL;
+               }
+       }
 
        /* Disable ACP irq, when the current stream is being closed and
         * another stream is also not active.
@@ -354,13 +377,6 @@ static int acp3x_dma_close(struct snd_soc_component *component,
        if (!adata->play_stream && !adata->capture_stream &&
                !adata->i2ssp_play_stream && !adata->i2ssp_capture_stream)
                rv_writel(0, adata->acp3x_base + mmACP_EXTERNAL_INTR_ENB);
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-               adata->play_stream = NULL;
-               adata->i2ssp_play_stream = NULL;
-       } else {
-               adata->capture_stream = NULL;
-               adata->i2ssp_capture_stream = NULL;
-       }
        return 0;
 }
 
index e437193..4a82690 100644 (file)
@@ -2,6 +2,7 @@
 # Renoir platform Support
 snd-rn-pci-acp3x-objs  := rn-pci-acp3x.o
 snd-acp3x-pdm-dma-objs := acp3x-pdm-dma.o
-obj-$(CONFIG_SND_SOC_AMD_RENOIR)        += snd-rn-pci-acp3x.o
-obj-$(CONFIG_SND_SOC_AMD_RENOIR)        += snd-acp3x-pdm-dma.o
-obj-$(CONFIG_SND_SOC_AMD_RENOIR_MACH)  += acp3x-rn.o
+snd-acp3x-rn-objs      := acp3x-rn.o
+obj-$(CONFIG_SND_SOC_AMD_RENOIR)       += snd-rn-pci-acp3x.o
+obj-$(CONFIG_SND_SOC_AMD_RENOIR)       += snd-acp3x-pdm-dma.o
+obj-$(CONFIG_SND_SOC_AMD_RENOIR_MACH)  += snd-acp3x-rn.o
index de003ac..473efe9 100644 (file)
@@ -441,13 +441,13 @@ static int hdac_hda_codec_probe(struct snd_soc_component *component)
        ret = snd_hda_codec_set_name(hcodec, hcodec->preset->name);
        if (ret < 0) {
                dev_err(&hdev->dev, "name failed %s\n", hcodec->preset->name);
-               goto error;
+               goto error_pm;
        }
 
        ret = snd_hdac_regmap_init(&hcodec->core);
        if (ret < 0) {
                dev_err(&hdev->dev, "regmap init failed\n");
-               goto error;
+               goto error_pm;
        }
 
        patch = (hda_codec_patch_t)hcodec->preset->driver_data;
@@ -455,7 +455,7 @@ static int hdac_hda_codec_probe(struct snd_soc_component *component)
                ret = patch(hcodec);
                if (ret < 0) {
                        dev_err(&hdev->dev, "patch failed %d\n", ret);
-                       goto error;
+                       goto error_regmap;
                }
        } else {
                dev_dbg(&hdev->dev, "no patch file found\n");
@@ -467,7 +467,7 @@ static int hdac_hda_codec_probe(struct snd_soc_component *component)
        ret = snd_hda_codec_parse_pcms(hcodec);
        if (ret < 0) {
                dev_err(&hdev->dev, "unable to map pcms to dai %d\n", ret);
-               goto error;
+               goto error_regmap;
        }
 
        /* HDMI controls need to be created in machine drivers */
@@ -476,7 +476,7 @@ static int hdac_hda_codec_probe(struct snd_soc_component *component)
                if (ret < 0) {
                        dev_err(&hdev->dev, "unable to create controls %d\n",
                                ret);
-                       goto error;
+                       goto error_regmap;
                }
        }
 
@@ -496,7 +496,9 @@ static int hdac_hda_codec_probe(struct snd_soc_component *component)
 
        return 0;
 
-error:
+error_regmap:
+       snd_hdac_regmap_exit(hdev);
+error_pm:
        pm_runtime_put(&hdev->dev);
 error_no_pm:
        snd_hdac_ext_bus_link_put(hdev->bus, hlink);
@@ -518,6 +520,8 @@ static void hdac_hda_codec_remove(struct snd_soc_component *component)
 
        pm_runtime_disable(&hdev->dev);
        snd_hdac_ext_bus_link_put(hdev->bus, hlink);
+
+       snd_hdac_regmap_exit(hdev);
 }
 
 static const struct snd_soc_dapm_route hdac_hda_dapm_routes[] = {
index 0d63ebf..e6613b5 100644 (file)
@@ -700,8 +700,8 @@ static bool max98390_readable_register(struct device *dev, unsigned int reg)
        case MAX98390_IRQ_CTRL ... MAX98390_WDOG_CTRL:
        case MAX98390_MEAS_ADC_THERM_WARN_THRESH
                ... MAX98390_BROWNOUT_INFINITE_HOLD:
-       case MAX98390_BROWNOUT_LVL_HOLD ... THERMAL_COILTEMP_RD_BACK_BYTE0:
-       case DSMIG_DEBUZZER_THRESHOLD ... MAX98390_R24FF_REV_ID:
+       case MAX98390_BROWNOUT_LVL_HOLD ... DSMIG_DEBUZZER_THRESHOLD:
+       case DSM_VOL_ENA ... MAX98390_R24FF_REV_ID:
                return true;
        default:
                return false;
@@ -717,7 +717,7 @@ static bool max98390_volatile_reg(struct device *dev, unsigned int reg)
        case MAX98390_BROWNOUT_LOWEST_STATUS:
        case MAX98390_ENV_TRACK_BOOST_VOUT_READ:
        case DSM_STBASS_HPF_B0_BYTE0 ... DSM_DEBUZZER_ATTACK_TIME_BYTE2:
-       case THERMAL_RDC_RD_BACK_BYTE1 ... THERMAL_COILTEMP_RD_BACK_BYTE0:
+       case THERMAL_RDC_RD_BACK_BYTE1 ... DSMIG_DEBUZZER_THRESHOLD:
        case DSM_THERMAL_GAIN ... DSM_WBDRC_GAIN:
                return true;
        default:
index 67e2e94..2cccb31 100644 (file)
@@ -34,30 +34,32 @@ static const struct reg_default rt1015_reg[] = {
        { 0x0000, 0x0000 },
        { 0x0004, 0xa000 },
        { 0x0006, 0x0003 },
-       { 0x000a, 0x0802 },
-       { 0x000c, 0x0020 },
+       { 0x000a, 0x081e },
+       { 0x000c, 0x0006 },
        { 0x000e, 0x0000 },
        { 0x0010, 0x0000 },
        { 0x0012, 0x0000 },
+       { 0x0014, 0x0000 },
+       { 0x0016, 0x0000 },
+       { 0x0018, 0x0000 },
        { 0x0020, 0x8000 },
-       { 0x0022, 0x471b },
-       { 0x006a, 0x0000 },
-       { 0x006c, 0x4020 },
+       { 0x0022, 0x8043 },
        { 0x0076, 0x0000 },
        { 0x0078, 0x0000 },
-       { 0x007a, 0x0000 },
+       { 0x007a, 0x0002 },
        { 0x007c, 0x10ec },
        { 0x007d, 0x1015 },
        { 0x00f0, 0x5000 },
-       { 0x00f2, 0x0774 },
-       { 0x00f3, 0x8400 },
+       { 0x00f2, 0x004c },
+       { 0x00f3, 0xecfe },
        { 0x00f4, 0x0000 },
+       { 0x00f6, 0x0400 },
        { 0x0100, 0x0028 },
        { 0x0102, 0xff02 },
-       { 0x0104, 0x8232 },
+       { 0x0104, 0xa213 },
        { 0x0106, 0x200c },
-       { 0x010c, 0x002f },
-       { 0x010e, 0xc000 },
+       { 0x010c, 0x0000 },
+       { 0x010e, 0x0058 },
        { 0x0111, 0x0200 },
        { 0x0112, 0x0400 },
        { 0x0114, 0x0022 },
@@ -65,38 +67,46 @@ static const struct reg_default rt1015_reg[] = {
        { 0x0118, 0x0000 },
        { 0x011a, 0x0123 },
        { 0x011c, 0x4567 },
-       { 0x0300, 0xdddd },
-       { 0x0302, 0x0000 },
-       { 0x0311, 0x9330 },
-       { 0x0313, 0x0000 },
-       { 0x0314, 0x0000 },
+       { 0x0300, 0x203d },
+       { 0x0302, 0x001e },
+       { 0x0311, 0x0000 },
+       { 0x0313, 0x6014 },
+       { 0x0314, 0x00a2 },
        { 0x031a, 0x00a0 },
        { 0x031c, 0x001f },
        { 0x031d, 0xffff },
        { 0x031e, 0x0000 },
        { 0x031f, 0x0000 },
+       { 0x0320, 0x0000 },
        { 0x0321, 0x0000 },
-       { 0x0322, 0x0000 },
-       { 0x0328, 0x0000 },
-       { 0x0329, 0x0000 },
-       { 0x032a, 0x0000 },
-       { 0x032b, 0x0000 },
-       { 0x032c, 0x0000 },
-       { 0x032d, 0x0000 },
-       { 0x032e, 0x030e },
-       { 0x0330, 0x0080 },
+       { 0x0322, 0xd7df },
+       { 0x0328, 0x10b2 },
+       { 0x0329, 0x0175 },
+       { 0x032a, 0x36ad },
+       { 0x032b, 0x7e55 },
+       { 0x032c, 0x0520 },
+       { 0x032d, 0xaa00 },
+       { 0x032e, 0x570e },
+       { 0x0330, 0xe180 },
        { 0x0332, 0x0034 },
-       { 0x0334, 0x0000 },
-       { 0x0336, 0x0000 },
+       { 0x0334, 0x0001 },
+       { 0x0336, 0x0010 },
+       { 0x0338, 0x0000 },
+       { 0x04fa, 0x0030 },
+       { 0x04fc, 0x35c8 },
+       { 0x04fe, 0x0800 },
+       { 0x0500, 0x0400 },
+       { 0x0502, 0x1000 },
+       { 0x0504, 0x0000 },
        { 0x0506, 0x04ff },
-       { 0x0508, 0x0030 },
-       { 0x050a, 0x0018 },
-       { 0x0519, 0x307f },
-       { 0x051a, 0xffff },
-       { 0x051b, 0x4000 },
+       { 0x0508, 0x0010 },
+       { 0x050a, 0x001a },
+       { 0x0519, 0x1c68 },
+       { 0x051a, 0x0ccc },
+       { 0x051b, 0x0666 },
        { 0x051d, 0x0000 },
        { 0x051f, 0x0000 },
-       { 0x0536, 0x1000 },
+       { 0x0536, 0x061c },
        { 0x0538, 0x0000 },
        { 0x053a, 0x0000 },
        { 0x053c, 0x0000 },
@@ -110,19 +120,18 @@ static const struct reg_default rt1015_reg[] = {
        { 0x0544, 0x0000 },
        { 0x0568, 0x0000 },
        { 0x056a, 0x0000 },
-       { 0x1000, 0x0000 },
-       { 0x1002, 0x6505 },
+       { 0x1000, 0x0040 },
+       { 0x1002, 0x5405 },
        { 0x1006, 0x5515 },
-       { 0x1007, 0x003f },
-       { 0x1009, 0x770f },
-       { 0x100a, 0x01ff },
-       { 0x100c, 0x0000 },
+       { 0x1007, 0x05f7 },
+       { 0x1009, 0x0b0a },
+       { 0x100a, 0x00ef },
        { 0x100d, 0x0003 },
        { 0x1010, 0xa433 },
        { 0x1020, 0x0000 },
-       { 0x1200, 0x3d02 },
-       { 0x1202, 0x0813 },
-       { 0x1204, 0x0211 },
+       { 0x1200, 0x5a01 },
+       { 0x1202, 0x6524 },
+       { 0x1204, 0x1f00 },
        { 0x1206, 0x0000 },
        { 0x1208, 0x0000 },
        { 0x120a, 0x0000 },
@@ -130,16 +139,16 @@ static const struct reg_default rt1015_reg[] = {
        { 0x120e, 0x0000 },
        { 0x1210, 0x0000 },
        { 0x1212, 0x0000 },
-       { 0x1300, 0x0701 },
-       { 0x1302, 0x12f9 },
-       { 0x1304, 0x3405 },
+       { 0x1300, 0x10a1 },
+       { 0x1302, 0x12ff },
+       { 0x1304, 0x0400 },
        { 0x1305, 0x0844 },
-       { 0x1306, 0x1611 },
+       { 0x1306, 0x4611 },
        { 0x1308, 0x555e },
        { 0x130a, 0x0000 },
-       { 0x130c, 0x2400},
-       { 0x130e, 0x7700 },
-       { 0x130f, 0x0000 },
+       { 0x130c, 0x2000 },
+       { 0x130e, 0x0100 },
+       { 0x130f, 0x0001 },
        { 0x1310, 0x0000 },
        { 0x1312, 0x0000 },
        { 0x1314, 0x0000 },
@@ -209,6 +218,9 @@ static bool rt1015_volatile_register(struct device *dev, unsigned int reg)
        case RT1015_DC_CALIB_CLSD7:
        case RT1015_DC_CALIB_CLSD8:
        case RT1015_S_BST_TIMING_INTER1:
+       case RT1015_OSCK_STA:
+       case RT1015_MONO_DYNA_CTRL1:
+       case RT1015_MONO_DYNA_CTRL5:
                return true;
 
        default:
@@ -224,6 +236,12 @@ static bool rt1015_readable_register(struct device *dev, unsigned int reg)
        case RT1015_CLK3:
        case RT1015_PLL1:
        case RT1015_PLL2:
+       case RT1015_DUM_RW1:
+       case RT1015_DUM_RW2:
+       case RT1015_DUM_RW3:
+       case RT1015_DUM_RW4:
+       case RT1015_DUM_RW5:
+       case RT1015_DUM_RW6:
        case RT1015_CLK_DET:
        case RT1015_SIL_DET:
        case RT1015_CUSTOMER_ID:
@@ -235,6 +253,7 @@ static bool rt1015_readable_register(struct device *dev, unsigned int reg)
        case RT1015_PAD_DRV2:
        case RT1015_GAT_BOOST:
        case RT1015_PRO_ALT:
+       case RT1015_OSCK_STA:
        case RT1015_MAN_I2C:
        case RT1015_DAC1:
        case RT1015_DAC2:
@@ -272,6 +291,13 @@ static bool rt1015_readable_register(struct device *dev, unsigned int reg)
        case RT1015_SMART_BST_CTRL2:
        case RT1015_ANA_CTRL1:
        case RT1015_ANA_CTRL2:
+       case RT1015_PWR_STATE_CTRL:
+       case RT1015_MONO_DYNA_CTRL:
+       case RT1015_MONO_DYNA_CTRL1:
+       case RT1015_MONO_DYNA_CTRL2:
+       case RT1015_MONO_DYNA_CTRL3:
+       case RT1015_MONO_DYNA_CTRL4:
+       case RT1015_MONO_DYNA_CTRL5:
        case RT1015_SPK_VOL:
        case RT1015_SHORT_DETTOP1:
        case RT1015_SHORT_DETTOP2:
index 6fbe802..8169962 100644 (file)
 #define RT1015_CLK3                            0x0006
 #define RT1015_PLL1                            0x000a
 #define RT1015_PLL2                            0x000c
+#define RT1015_DUM_RW1                         0x000e
+#define RT1015_DUM_RW2                         0x0010
+#define RT1015_DUM_RW3                         0x0012
+#define RT1015_DUM_RW4                         0x0014
+#define RT1015_DUM_RW5                         0x0016
+#define RT1015_DUM_RW6                         0x0018
 #define RT1015_CLK_DET                         0x0020
 #define RT1015_SIL_DET                         0x0022
 #define RT1015_CUSTOMER_ID                     0x0076
@@ -32,6 +38,7 @@
 #define RT1015_PAD_DRV2                                0x00f2
 #define RT1015_GAT_BOOST                       0x00f3
 #define RT1015_PRO_ALT                         0x00f4
+#define RT1015_OSCK_STA                                0x00f6
 #define RT1015_MAN_I2C                         0x0100
 #define RT1015_DAC1                            0x0102
 #define RT1015_DAC2                            0x0104
 #define RT1015_ANA_CTRL1                       0x0334
 #define RT1015_ANA_CTRL2                       0x0336
 #define RT1015_PWR_STATE_CTRL                  0x0338
-#define RT1015_SPK_VOL                         0x0506
+#define RT1015_MONO_DYNA_CTRL                  0x04fa
+#define RT1015_MONO_DYNA_CTRL1                 0x04fc
+#define RT1015_MONO_DYNA_CTRL2                 0x04fe
+#define RT1015_MONO_DYNA_CTRL3                 0x0500
+#define RT1015_MONO_DYNA_CTRL4                 0x0502
+#define RT1015_MONO_DYNA_CTRL5                 0x0504
+#define RT1015_SPK_VOL                                 0x0506
 #define RT1015_SHORT_DETTOP1                   0x0508
 #define RT1015_SHORT_DETTOP2                   0x050a
 #define RT1015_SPK_DC_DETECT1                  0x0519
index d324512..7d6670a 100644 (file)
@@ -932,7 +932,9 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
                        RT5682_PWR_ANLG_1, RT5682_PWR_FV2, RT5682_PWR_FV2);
                snd_soc_component_update_bits(component, RT5682_PWR_ANLG_3,
                        RT5682_PWR_CBJ, RT5682_PWR_CBJ);
-
+               snd_soc_component_update_bits(component,
+                       RT5682_HP_CHARGE_PUMP_1,
+                       RT5682_OSW_L_MASK | RT5682_OSW_R_MASK, 0);
                snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
                        RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_HIGH);
 
@@ -956,6 +958,11 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
                        rt5682->jack_type = SND_JACK_HEADPHONE;
                        break;
                }
+
+               snd_soc_component_update_bits(component,
+                       RT5682_HP_CHARGE_PUMP_1,
+                       RT5682_OSW_L_MASK | RT5682_OSW_R_MASK,
+                       RT5682_OSW_L_EN | RT5682_OSW_R_EN);
        } else {
                rt5682_enable_push_button_irq(component, false);
                snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
@@ -2829,12 +2836,13 @@ static int rt5682_probe(struct snd_soc_component *component)
                                return ret;
                        }
                        rt5682->mclk = NULL;
-               } else {
-                       /* Register CCF DAI clock control */
-                       ret = rt5682_register_dai_clks(component);
-                       if (ret)
-                               return ret;
                }
+
+               /* Register CCF DAI clock control */
+               ret = rt5682_register_dai_clks(component);
+               if (ret)
+                       return ret;
+
                /* Initial setup for CCF */
                rt5682->lrck[RT5682_AIF1] = CLK_48;
 #endif
index 77665b1..7e1c13c 100644 (file)
@@ -32,6 +32,7 @@ enum asrc_pair_index {
  * @dma_chan: inputer and output DMA channels
  * @dma_data: private dma data
  * @pos: hardware pointer position
+ * @req_dma_chan: flag to release dev_to_dev chan
  * @private: pair private area
  */
 struct fsl_asrc_pair {
@@ -45,6 +46,7 @@ struct fsl_asrc_pair {
        struct dma_chan *dma_chan[2];
        struct imx_dma_data dma_data;
        unsigned int pos;
+       bool req_dma_chan;
 
        void *private;
 };
index d6a3fc5..5f01a58 100644 (file)
@@ -135,6 +135,8 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
        struct snd_dmaengine_dai_dma_data *dma_params_be = NULL;
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct fsl_asrc_pair *pair = runtime->private_data;
+       struct dma_chan *tmp_chan = NULL, *be_chan = NULL;
+       struct snd_soc_component *component_be = NULL;
        struct fsl_asrc *asrc = pair->asrc;
        struct dma_slave_config config_fe, config_be;
        enum asrc_pair_index index = pair->index;
@@ -142,7 +144,6 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
        int stream = substream->stream;
        struct imx_dma_data *tmp_data;
        struct snd_soc_dpcm *dpcm;
-       struct dma_chan *tmp_chan;
        struct device *dev_be;
        u8 dir = tx ? OUT : IN;
        dma_cap_mask_t mask;
@@ -198,17 +199,29 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
        dma_cap_set(DMA_CYCLIC, mask);
 
        /*
+        * The Back-End device might have already requested a DMA channel,
+        * so try to reuse it first, and then request a new one upon NULL.
+        */
+       component_be = snd_soc_lookup_component_nolocked(dev_be, SND_DMAENGINE_PCM_DRV_NAME);
+       if (component_be) {
+               be_chan = soc_component_to_pcm(component_be)->chan[substream->stream];
+               tmp_chan = be_chan;
+       }
+       if (!tmp_chan)
+               tmp_chan = dma_request_slave_channel(dev_be, tx ? "tx" : "rx");
+
+       /*
         * An EDMA DEV_TO_DEV channel is fixed and bound with DMA event of each
         * peripheral, unlike SDMA channel that is allocated dynamically. So no
-        * need to configure dma_request and dma_request2, but get dma_chan via
-        * dma_request_slave_channel directly with dma name of Front-End device
+        * need to configure dma_request and dma_request2, but get dma_chan of
+        * Back-End device directly via dma_request_slave_channel.
         */
        if (!asrc->use_edma) {
                /* Get DMA request of Back-End */
-               tmp_chan = dma_request_slave_channel(dev_be, tx ? "tx" : "rx");
                tmp_data = tmp_chan->private;
                pair->dma_data.dma_request = tmp_data->dma_request;
-               dma_release_channel(tmp_chan);
+               if (!be_chan)
+                       dma_release_channel(tmp_chan);
 
                /* Get DMA request of Front-End */
                tmp_chan = asrc->get_dma_channel(pair, dir);
@@ -220,9 +233,11 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
 
                pair->dma_chan[dir] =
                        dma_request_channel(mask, filter, &pair->dma_data);
+               pair->req_dma_chan = true;
        } else {
-               pair->dma_chan[dir] =
-                       asrc->get_dma_channel(pair, dir);
+               pair->dma_chan[dir] = tmp_chan;
+               /* Do not flag to release if we are reusing the Back-End one */
+               pair->req_dma_chan = !be_chan;
        }
 
        if (!pair->dma_chan[dir]) {
@@ -261,7 +276,8 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
        ret = dmaengine_slave_config(pair->dma_chan[dir], &config_be);
        if (ret) {
                dev_err(dev, "failed to config DMA channel for Back-End\n");
-               dma_release_channel(pair->dma_chan[dir]);
+               if (pair->req_dma_chan)
+                       dma_release_channel(pair->dma_chan[dir]);
                return ret;
        }
 
@@ -273,19 +289,22 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
 static int fsl_asrc_dma_hw_free(struct snd_soc_component *component,
                                struct snd_pcm_substream *substream)
 {
+       bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct fsl_asrc_pair *pair = runtime->private_data;
+       u8 dir = tx ? OUT : IN;
 
        snd_pcm_set_runtime_buffer(substream, NULL);
 
-       if (pair->dma_chan[IN])
-               dma_release_channel(pair->dma_chan[IN]);
+       if (pair->dma_chan[!dir])
+               dma_release_channel(pair->dma_chan[!dir]);
 
-       if (pair->dma_chan[OUT])
-               dma_release_channel(pair->dma_chan[OUT]);
+       /* release dev_to_dev chan if we aren't reusing the Back-End one */
+       if (pair->dma_chan[dir] && pair->req_dma_chan)
+               dma_release_channel(pair->dma_chan[dir]);
 
-       pair->dma_chan[IN] = NULL;
-       pair->dma_chan[OUT] = NULL;
+       pair->dma_chan[!dir] = NULL;
+       pair->dma_chan[dir] = NULL;
 
        return 0;
 }
index 0c813a4..69aeb0e 100644 (file)
@@ -265,12 +265,20 @@ static int fsl_mqs_remove(struct platform_device *pdev)
 static int fsl_mqs_runtime_resume(struct device *dev)
 {
        struct fsl_mqs *mqs_priv = dev_get_drvdata(dev);
+       int ret;
 
-       if (mqs_priv->ipg)
-               clk_prepare_enable(mqs_priv->ipg);
+       ret = clk_prepare_enable(mqs_priv->ipg);
+       if (ret) {
+               dev_err(dev, "failed to enable ipg clock\n");
+               return ret;
+       }
 
-       if (mqs_priv->mclk)
-               clk_prepare_enable(mqs_priv->mclk);
+       ret = clk_prepare_enable(mqs_priv->mclk);
+       if (ret) {
+               dev_err(dev, "failed to enable mclk clock\n");
+               clk_disable_unprepare(mqs_priv->ipg);
+               return ret;
+       }
 
        if (mqs_priv->use_gpr)
                regmap_write(mqs_priv->regmap, IOMUXC_GPR2,
@@ -292,11 +300,8 @@ static int fsl_mqs_runtime_suspend(struct device *dev)
                regmap_read(mqs_priv->regmap, REG_MQS_CTRL,
                            &mqs_priv->reg_mqs_ctrl);
 
-       if (mqs_priv->mclk)
-               clk_disable_unprepare(mqs_priv->mclk);
-
-       if (mqs_priv->ipg)
-               clk_disable_unprepare(mqs_priv->ipg);
+       clk_disable_unprepare(mqs_priv->mclk);
+       clk_disable_unprepare(mqs_priv->ipg);
 
        return 0;
 }
index bad89b0..1a2fa7f 100644 (file)
@@ -678,8 +678,9 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
        struct regmap *regs = ssi->regs;
        u32 pm = 999, div2, psr, stccr, mask, afreq, factor, i;
        unsigned long clkrate, baudrate, tmprate;
-       unsigned int slots = params_channels(hw_params);
-       unsigned int slot_width = 32;
+       unsigned int channels = params_channels(hw_params);
+       unsigned int slot_width = params_width(hw_params);
+       unsigned int slots = 2;
        u64 sub, savesub = 100000;
        unsigned int freq;
        bool baudclk_is_used;
@@ -688,10 +689,14 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
        /* Override slots and slot_width if being specifically set... */
        if (ssi->slots)
                slots = ssi->slots;
-       /* ...but keep 32 bits if slots is 2 -- I2S Master mode */
-       if (ssi->slot_width && slots != 2)
+       if (ssi->slot_width)
                slot_width = ssi->slot_width;
 
+       /* ...but force 32 bits for stereo audio using I2S Master Mode */
+       if (channels == 2 &&
+           (ssi->i2s_net & SSI_SCR_I2S_MODE_MASK) == SSI_SCR_I2S_MODE_MASTER)
+               slot_width = 32;
+
        /* Generate bit clock based on the slot number and slot width */
        freq = slots * slot_width * params_rate(hw_params);
 
index a2a5798..5dc489a 100644 (file)
@@ -492,7 +492,7 @@ config SND_SOC_INTEL_SOF_PCM512x_MACH
 
 endif ## SND_SOC_SOF_HDA_LINK || SND_SOC_SOF_BAYTRAIL
 
-if (SND_SOC_SOF_COMETLAKE_LP && SND_SOC_SOF_HDA_LINK)
+if (SND_SOC_SOF_COMETLAKE && SND_SOC_SOF_HDA_LINK)
 
 config SND_SOC_INTEL_CML_LP_DA7219_MAX98357A_MACH
        tristate "CML_LP with DA7219 and MAX98357A in I2S Mode"
@@ -520,7 +520,7 @@ config SND_SOC_INTEL_SOF_CML_RT1011_RT5682_MACH
          Say Y if you have such a device.
          If unsure select "N".
 
-endif ## SND_SOC_SOF_COMETLAKE_LP && SND_SOC_SOF_HDA_LINK
+endif ## SND_SOC_SOF_COMETLAKE && SND_SOC_SOF_HDA_LINK
 
 if SND_SOC_SOF_JASPERLAKE
 
index 6c20bdd..8ada4ec 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/module.h>
 #include "common.h"
+#include "qdsp6/q6afe.h"
 
 int qcom_snd_parse_of(struct snd_soc_card *card)
 {
@@ -101,6 +102,15 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
                        }
                        link->no_pcm = 1;
                        link->ignore_pmdown_time = 1;
+
+                       if (q6afe_is_rx_port(link->id)) {
+                               link->dpcm_playback = 1;
+                               link->dpcm_capture = 0;
+                       } else {
+                               link->dpcm_playback = 0;
+                               link->dpcm_capture = 1;
+                       }
+
                } else {
                        dlc = devm_kzalloc(dev, sizeof(*dlc), GFP_KERNEL);
                        if (!dlc)
@@ -113,12 +123,12 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
                        link->codecs->dai_name = "snd-soc-dummy-dai";
                        link->codecs->name = "snd-soc-dummy";
                        link->dynamic = 1;
+                       link->dpcm_playback = 1;
+                       link->dpcm_capture = 1;
                }
 
                link->ignore_suspend = 1;
                link->nonatomic = 1;
-               link->dpcm_playback = 1;
-               link->dpcm_capture = 1;
                link->stream_name = link->name;
                link++;
 
index e0945f7..0ce4eb6 100644 (file)
@@ -800,6 +800,14 @@ int q6afe_get_port_id(int index)
 }
 EXPORT_SYMBOL_GPL(q6afe_get_port_id);
 
+int q6afe_is_rx_port(int index)
+{
+       if (index < 0 || index >= AFE_PORT_MAX)
+               return -EINVAL;
+
+       return port_maps[index].is_rx;
+}
+EXPORT_SYMBOL_GPL(q6afe_is_rx_port);
 static int afe_apr_send_pkt(struct q6afe *afe, struct apr_pkt *pkt,
                            struct q6afe_port *port)
 {
index c7ed542..1a0f80a 100644 (file)
@@ -198,6 +198,7 @@ int q6afe_port_start(struct q6afe_port *port);
 int q6afe_port_stop(struct q6afe_port *port);
 void q6afe_port_put(struct q6afe_port *port);
 int q6afe_get_port_id(int index);
+int q6afe_is_rx_port(int index);
 void q6afe_hdmi_port_prepare(struct q6afe_port *port,
                            struct q6afe_hdmi_cfg *cfg);
 void q6afe_slim_port_prepare(struct q6afe_port *port,
index 0e0e8f7..ae4b2ca 100644 (file)
@@ -25,6 +25,7 @@
 #define ASM_STREAM_CMD_FLUSH                   0x00010BCE
 #define ASM_SESSION_CMD_PAUSE                  0x00010BD3
 #define ASM_DATA_CMD_EOS                       0x00010BDB
+#define ASM_DATA_EVENT_RENDERED_EOS            0x00010C1C
 #define ASM_NULL_POPP_TOPOLOGY                 0x00010C68
 #define ASM_STREAM_CMD_FLUSH_READBUFS          0x00010C09
 #define ASM_STREAM_CMD_SET_ENCDEC_PARAM                0x00010C10
@@ -622,9 +623,6 @@ static int32_t q6asm_stream_callback(struct apr_device *adev,
                case ASM_SESSION_CMD_SUSPEND:
                        client_event = ASM_CLIENT_EVENT_CMD_SUSPEND_DONE;
                        break;
-               case ASM_DATA_CMD_EOS:
-                       client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE;
-                       break;
                case ASM_STREAM_CMD_FLUSH:
                        client_event = ASM_CLIENT_EVENT_CMD_FLUSH_DONE;
                        break;
@@ -728,6 +726,9 @@ static int32_t q6asm_stream_callback(struct apr_device *adev,
                }
 
                break;
+       case ASM_DATA_EVENT_RENDERED_EOS:
+               client_event = ASM_CLIENT_EVENT_CMD_EOS_DONE;
+               break;
        }
 
        if (ac->cb)
index 7cd42fc..1707414 100644 (file)
@@ -590,8 +590,10 @@ static int rockchip_pdm_resume(struct device *dev)
        int ret;
 
        ret = pm_runtime_get_sync(dev);
-       if (ret < 0)
+       if (ret < 0) {
+               pm_runtime_put(dev);
                return ret;
+       }
 
        ret = regcache_sync(pdm->regmap);
 
index 7b38720..0f30f5a 100644 (file)
@@ -310,7 +310,7 @@ struct snd_soc_component *snd_soc_rtdcom_lookup(struct snd_soc_pcm_runtime *rtd,
 }
 EXPORT_SYMBOL_GPL(snd_soc_rtdcom_lookup);
 
-static struct snd_soc_component
+struct snd_soc_component
 *snd_soc_lookup_component_nolocked(struct device *dev, const char *driver_name)
 {
        struct snd_soc_component *component;
@@ -329,6 +329,7 @@ static struct snd_soc_component
 
        return found_component;
 }
+EXPORT_SYMBOL_GPL(snd_soc_lookup_component_nolocked);
 
 struct snd_soc_component *snd_soc_lookup_component(struct device *dev,
                                                   const char *driver_name)
index a9ea172..11e5d79 100644 (file)
@@ -9,6 +9,43 @@
 #include <sound/soc.h>
 #include <sound/dmaengine_pcm.h>
 
+static void devm_dai_release(struct device *dev, void *res)
+{
+       snd_soc_unregister_dai(*(struct snd_soc_dai **)res);
+}
+
+/**
+ * devm_snd_soc_register_dai - resource-managed dai registration
+ * @dev: Device used to manage component
+ * @component: The component the DAIs are registered for
+ * @dai_drv: DAI driver to use for the DAI
+ * @legacy_dai_naming: if %true, use legacy single-name format;
+ *     if %false, use multiple-name format;
+ */
+struct snd_soc_dai *devm_snd_soc_register_dai(struct device *dev,
+                                             struct snd_soc_component *component,
+                                             struct snd_soc_dai_driver *dai_drv,
+                                             bool legacy_dai_naming)
+{
+       struct snd_soc_dai **ptr;
+       struct snd_soc_dai *dai;
+
+       ptr = devres_alloc(devm_dai_release, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return NULL;
+
+       dai = snd_soc_register_dai(component, dai_drv, legacy_dai_naming);
+       if (dai) {
+               *ptr = dai;
+               devres_add(dev, ptr);
+       } else {
+               devres_free(ptr);
+       }
+
+       return dai;
+}
+EXPORT_SYMBOL_GPL(devm_snd_soc_register_dai);
+
 static void devm_component_release(struct device *dev, void *res)
 {
        snd_soc_unregister_component(*(struct device **)res);
index f728309..80a4e71 100644 (file)
  */
 #define SND_DMAENGINE_PCM_FLAG_NO_RESIDUE BIT(31)
 
-struct dmaengine_pcm {
-       struct dma_chan *chan[SNDRV_PCM_STREAM_LAST + 1];
-       const struct snd_dmaengine_pcm_config *config;
-       struct snd_soc_component component;
-       unsigned int flags;
-};
-
-static struct dmaengine_pcm *soc_component_to_pcm(struct snd_soc_component *p)
-{
-       return container_of(p, struct dmaengine_pcm, component);
-}
-
 static struct device *dmaengine_dma_dev(struct dmaengine_pcm *pcm,
        struct snd_pcm_substream *substream)
 {
index 2c114b4..c517064 100644 (file)
@@ -2630,15 +2630,15 @@ static int soc_dpcm_fe_runtime_update(struct snd_soc_pcm_runtime *fe, int new)
        int count, paths;
        int ret;
 
+       if (!fe->dai_link->dynamic)
+               return 0;
+
        if (fe->num_cpus > 1) {
                dev_err(fe->dev,
                        "%s doesn't support Multi CPU yet\n", __func__);
                return -EINVAL;
        }
 
-       if (!fe->dai_link->dynamic)
-               return 0;
-
        /* only check active links */
        if (!snd_soc_dai_active(asoc_rtd_to_cpu(fe, 0)))
                return 0;
index 9e89633..43e5745 100644 (file)
@@ -1851,7 +1851,7 @@ static int soc_tplg_dai_create(struct soc_tplg *tplg,
        list_add(&dai_drv->dobj.list, &tplg->comp->dobj_list);
 
        /* register the DAI to the component */
-       dai = snd_soc_register_dai(tplg->comp, dai_drv, false);
+       dai = devm_snd_soc_register_dai(tplg->comp->dev, tplg->comp, dai_drv, false);
        if (!dai)
                return -ENOMEM;
 
@@ -1859,7 +1859,6 @@ static int soc_tplg_dai_create(struct soc_tplg *tplg,
        ret = snd_soc_dapm_new_dai_widgets(dapm, dai);
        if (ret != 0) {
                dev_err(dai->dev, "Failed to create DAI widgets %d\n", ret);
-               snd_soc_unregister_dai(dai);
                return ret;
        }
 
index c9a2bee..3aaf25e 100644 (file)
@@ -25,8 +25,7 @@ config SND_SOC_SOF_INTEL_PCI
        select SND_SOC_SOF_CANNONLAKE  if SND_SOC_SOF_CANNONLAKE_SUPPORT
        select SND_SOC_SOF_COFFEELAKE  if SND_SOC_SOF_COFFEELAKE_SUPPORT
        select SND_SOC_SOF_ICELAKE     if SND_SOC_SOF_ICELAKE_SUPPORT
-       select SND_SOC_SOF_COMETLAKE_LP if SND_SOC_SOF_COMETLAKE_LP_SUPPORT
-       select SND_SOC_SOF_COMETLAKE_H if SND_SOC_SOF_COMETLAKE_H_SUPPORT
+       select SND_SOC_SOF_COMETLAKE   if SND_SOC_SOF_COMETLAKE_SUPPORT
        select SND_SOC_SOF_TIGERLAKE   if SND_SOC_SOF_TIGERLAKE_SUPPORT
        select SND_SOC_SOF_ELKHARTLAKE if SND_SOC_SOF_ELKHARTLAKE_SUPPORT
        select SND_SOC_SOF_JASPERLAKE  if SND_SOC_SOF_JASPERLAKE_SUPPORT
@@ -201,34 +200,22 @@ config SND_SOC_SOF_ICELAKE
          This option is not user-selectable but automagically handled by
          'select' statements at a higher level
 
-config SND_SOC_SOF_COMETLAKE_LP
+config SND_SOC_SOF_COMETLAKE
        tristate
        select SND_SOC_SOF_HDA_COMMON
        help
          This option is not user-selectable but automagically handled by
          'select' statements at a higher level
 
-config SND_SOC_SOF_COMETLAKE_LP_SUPPORT
-       bool "SOF support for CometLake-LP"
-       help
-         This adds support for Sound Open Firmware for Intel(R) platforms
-         using the Cometlake-LP processors.
-         Say Y if you have such a device.
-         If unsure select "N".
+config SND_SOC_SOF_COMETLAKE_SUPPORT
+       bool
 
-config SND_SOC_SOF_COMETLAKE_H
-       tristate
-       select SND_SOC_SOF_HDA_COMMON
-       help
-         This option is not user-selectable but automagically handled by
-         'select' statements at a higher level
-
-config SND_SOC_SOF_COMETLAKE_H_SUPPORT
-       bool "SOF support for CometLake-H"
+config SND_SOC_SOF_COMETLAKE_LP_SUPPORT
+       bool "SOF support for CometLake"
+       select SND_SOC_SOF_COMETLAKE_SUPPORT
        help
          This adds support for Sound Open Firmware for Intel(R) platforms
-         using the Cometlake-H processors.
-         Say Y if you have such a device.
+         using the Cometlake processors.
          If unsure select "N".
 
 config SND_SOC_SOF_TIGERLAKE_SUPPORT
index 7f65dcc..1bda14c 100644 (file)
@@ -653,11 +653,16 @@ irqreturn_t hda_dsp_stream_threaded_handler(int irq, void *context)
                if (status & AZX_INT_CTRL_EN) {
                        rirb_status = snd_hdac_chip_readb(bus, RIRBSTS);
                        if (rirb_status & RIRB_INT_MASK) {
+                               /*
+                                * Clearing the interrupt status here ensures
+                                * that no interrupt gets masked after the RIRB
+                                * wp is read in snd_hdac_bus_update_rirb.
+                                */
+                               snd_hdac_chip_writeb(bus, RIRBSTS,
+                                                    RIRB_INT_MASK);
                                active = true;
                                if (rirb_status & RIRB_INT_RESPONSE)
                                        snd_hdac_bus_update_rirb(bus);
-                               snd_hdac_chip_writeb(bus, RIRBSTS,
-                                                    RIRB_INT_MASK);
                        }
                }
 #endif
index b04b728..5e159ab 100644 (file)
@@ -36,7 +36,7 @@ struct sof_probe_point_desc {
 struct sof_ipc_probe_dma_add_params {
        struct sof_ipc_cmd_hdr hdr;
        unsigned int num_elems;
-       struct sof_probe_dma dma[0];
+       struct sof_probe_dma dma[];
 } __packed;
 
 struct sof_ipc_probe_info_params {
@@ -51,19 +51,19 @@ struct sof_ipc_probe_info_params {
 struct sof_ipc_probe_dma_remove_params {
        struct sof_ipc_cmd_hdr hdr;
        unsigned int num_elems;
-       unsigned int stream_tag[0];
+       unsigned int stream_tag[];
 } __packed;
 
 struct sof_ipc_probe_point_add_params {
        struct sof_ipc_cmd_hdr hdr;
        unsigned int num_elems;
-       struct sof_probe_point_desc desc[0];
+       struct sof_probe_point_desc desc[];
 } __packed;
 
 struct sof_ipc_probe_point_remove_params {
        struct sof_ipc_cmd_hdr hdr;
        unsigned int num_elems;
-       unsigned int buffer_id[0];
+       unsigned int buffer_id[];
 } __packed;
 
 int sof_ipc_probe_init(struct snd_sof_dev *sdev,
index b13697d..aa3532b 100644 (file)
@@ -151,9 +151,7 @@ static const struct sof_dev_desc cfl_desc = {
 };
 #endif
 
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_LP) || \
-       IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_H)
-
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE)
 static const struct sof_dev_desc cml_desc = {
        .machines               = snd_soc_acpi_intel_cml_machines,
        .alt_machines           = snd_soc_acpi_intel_cml_sdw_machines,
@@ -411,8 +409,11 @@ static const struct pci_device_id sof_pci_ids[] = {
                .driver_data = (unsigned long)&cfl_desc},
 #endif
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_ICELAKE)
-       { PCI_DEVICE(0x8086, 0x34C8),
+       { PCI_DEVICE(0x8086, 0x34C8), /* ICL-LP */
+               .driver_data = (unsigned long)&icl_desc},
+       { PCI_DEVICE(0x8086, 0x3dc8), /* ICL-H */
                .driver_data = (unsigned long)&icl_desc},
+
 #endif
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_JASPERLAKE)
        { PCI_DEVICE(0x8086, 0x38c8),
@@ -420,17 +421,20 @@ static const struct pci_device_id sof_pci_ids[] = {
        { PCI_DEVICE(0x8086, 0x4dc8),
                .driver_data = (unsigned long)&jsl_desc},
 #endif
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_LP)
-       { PCI_DEVICE(0x8086, 0x02c8),
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE)
+       { PCI_DEVICE(0x8086, 0x02c8), /* CML-LP */
                .driver_data = (unsigned long)&cml_desc},
-#endif
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_COMETLAKE_H)
-       { PCI_DEVICE(0x8086, 0x06c8),
+       { PCI_DEVICE(0x8086, 0x06c8), /* CML-H */
+               .driver_data = (unsigned long)&cml_desc},
+       { PCI_DEVICE(0x8086, 0xa3f0), /* CML-S */
                .driver_data = (unsigned long)&cml_desc},
 #endif
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_TIGERLAKE)
-       { PCI_DEVICE(0x8086, 0xa0c8),
+       { PCI_DEVICE(0x8086, 0xa0c8), /* TGL-LP */
+               .driver_data = (unsigned long)&tgl_desc},
+       { PCI_DEVICE(0x8086, 0x43c8), /* TGL-H */
                .driver_data = (unsigned long)&tgl_desc},
+
 #endif
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_ELKHARTLAKE)
        { PCI_DEVICE(0x8086, 0x4b55),
index d6219fb..de43267 100644 (file)
@@ -84,10 +84,10 @@ struct snd_usb_endpoint {
        dma_addr_t sync_dma;            /* DMA address of syncbuf */
 
        unsigned int pipe;              /* the data i/o pipe */
-       unsigned int framesize[2];      /* small/large frame sizes in samples */
-       unsigned int sample_rem;        /* remainder from division fs/fps */
+       unsigned int packsize[2];       /* small/large packet sizes in samples */
+       unsigned int sample_rem;        /* remainder from division fs/pps */
        unsigned int sample_accum;      /* sample accumulator */
-       unsigned int fps;               /* frames per second */
+       unsigned int pps;               /* packets per second */
        unsigned int freqn;             /* nominal sampling rate in fs/fps in Q16.16 format */
        unsigned int freqm;             /* momentary sampling rate in fs/fps in Q16.16 format */
        int        freqshift;           /* how much to shift the feedback value to get Q16.16 */
index 9bea7d3..8876026 100644 (file)
@@ -159,11 +159,11 @@ int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep)
                return ep->maxframesize;
 
        ep->sample_accum += ep->sample_rem;
-       if (ep->sample_accum >= ep->fps) {
-               ep->sample_accum -= ep->fps;
-               ret = ep->framesize[1];
+       if (ep->sample_accum >= ep->pps) {
+               ep->sample_accum -= ep->pps;
+               ret = ep->packsize[1];
        } else {
-               ret = ep->framesize[0];
+               ret = ep->packsize[0];
        }
 
        return ret;
@@ -1088,15 +1088,15 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
 
        if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_FULL) {
                ep->freqn = get_usb_full_speed_rate(rate);
-               ep->fps = 1000;
+               ep->pps = 1000 >> ep->datainterval;
        } else {
                ep->freqn = get_usb_high_speed_rate(rate);
-               ep->fps = 8000;
+               ep->pps = 8000 >> ep->datainterval;
        }
 
-       ep->sample_rem = rate % ep->fps;
-       ep->framesize[0] = rate / ep->fps;
-       ep->framesize[1] = (rate + (ep->fps - 1)) / ep->fps;
+       ep->sample_rem = rate % ep->pps;
+       ep->packsize[0] = rate / ep->pps;
+       ep->packsize[1] = (rate + (ep->pps - 1)) / ep->pps;
 
        /* calculate the frequency in 16.16 format */
        ep->freqm = ep->freqn;
index 5ffb457..1b28d01 100644 (file)
@@ -394,8 +394,9 @@ skip_rate:
        return nr_rates;
 }
 
-/* Line6 Helix series don't support the UAC2_CS_RANGE usb function
- * call. Return a static table of known clock rates.
+/* Line6 Helix series and the Rode Rodecaster Pro don't support the
+ * UAC2_CS_RANGE usb function call. Return a static table of known
+ * clock rates.
  */
 static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip,
                                                struct audioformat *fp)
@@ -408,6 +409,7 @@ static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip,
        case USB_ID(0x0e41, 0x4248): /* Line6 Helix >= fw 2.82 */
        case USB_ID(0x0e41, 0x4249): /* Line6 Helix Rack >= fw 2.82 */
        case USB_ID(0x0e41, 0x424a): /* Line6 Helix LT >= fw 2.82 */
+       case USB_ID(0x19f7, 0x0011): /* Rode Rodecaster Pro */
                return set_fixed_rate(fp, 48000, SNDRV_PCM_RATE_48000);
        }
 
index 15769f2..eab0fd4 100644 (file)
@@ -581,8 +581,9 @@ static int check_matrix_bitmap(unsigned char *bmap,
  * if failed, give up and free the control instance.
  */
 
-int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
-                             struct snd_kcontrol *kctl)
+int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list,
+                          struct snd_kcontrol *kctl,
+                          bool is_std_info)
 {
        struct usb_mixer_interface *mixer = list->mixer;
        int err;
@@ -596,6 +597,7 @@ int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
                return err;
        }
        list->kctl = kctl;
+       list->is_std_info = is_std_info;
        list->next_id_elem = mixer->id_elems[list->id];
        mixer->id_elems[list->id] = list;
        return 0;
@@ -3234,8 +3236,11 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid)
        unitid = delegate_notify(mixer, unitid, NULL, NULL);
 
        for_each_mixer_elem(list, mixer, unitid) {
-               struct usb_mixer_elem_info *info =
-                       mixer_elem_list_to_info(list);
+               struct usb_mixer_elem_info *info;
+
+               if (!list->is_std_info)
+                       continue;
+               info = mixer_elem_list_to_info(list);
                /* invalidate cache, so the value is read from the device */
                info->cached = 0;
                snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
@@ -3315,6 +3320,8 @@ static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer,
 
                if (!list->kctl)
                        continue;
+               if (!list->is_std_info)
+                       continue;
 
                info = mixer_elem_list_to_info(list);
                if (count > 1 && info->control != control)
index 41ec9dc..c29e27a 100644 (file)
@@ -66,6 +66,7 @@ struct usb_mixer_elem_list {
        struct usb_mixer_elem_list *next_id_elem; /* list of controls with same id */
        struct snd_kcontrol *kctl;
        unsigned int id;
+       bool is_std_info;
        usb_mixer_elem_dump_func_t dump;
        usb_mixer_elem_resume_func_t resume;
 };
@@ -103,8 +104,12 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid);
 int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
                                int request, int validx, int value_set);
 
-int snd_usb_mixer_add_control(struct usb_mixer_elem_list *list,
-                             struct snd_kcontrol *kctl);
+int snd_usb_mixer_add_list(struct usb_mixer_elem_list *list,
+                          struct snd_kcontrol *kctl,
+                          bool is_std_info);
+
+#define snd_usb_mixer_add_control(list, kctl) \
+       snd_usb_mixer_add_list(list, kctl, true)
 
 void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list,
                                 struct usb_mixer_interface *mixer,
index b6bcf2f..cec1cfd 100644 (file)
@@ -158,7 +158,8 @@ static int add_single_ctl_with_resume(struct usb_mixer_interface *mixer,
                return -ENOMEM;
        }
        kctl->private_free = snd_usb_mixer_elem_free;
-       return snd_usb_mixer_add_control(list, kctl);
+       /* don't use snd_usb_mixer_add_control() here, this is a special list element */
+       return snd_usb_mixer_add_list(list, kctl, false);
 }
 
 /*
index 8a05dcb..40b7cd1 100644 (file)
@@ -367,6 +367,8 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
                ifnum = 0;
                goto add_sync_ep_from_ifnum;
        case USB_ID(0x07fd, 0x0008): /* MOTU M Series */
+       case USB_ID(0x31e9, 0x0002): /* Solid State Logic SSL2+ */
+       case USB_ID(0x0d9a, 0x00df): /* RTX6001 */
                ep = 0x81;
                ifnum = 2;
                goto add_sync_ep_from_ifnum;
@@ -1786,6 +1788,7 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream
                return 0;
        case SNDRV_PCM_TRIGGER_STOP:
                stop_endpoints(subs);
+               subs->data_endpoint->retire_data_urb = NULL;
                subs->running = 0;
                return 0;
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
index 4ec4910..9092cc0 100644 (file)
@@ -3633,4 +3633,56 @@ ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */
        }
 },
 
+/*
+ * MacroSilicon MS2109 based HDMI capture cards
+ *
+ * These claim 96kHz 1ch in the descriptors, but are actually 48kHz 2ch.
+ * They also need QUIRK_AUDIO_ALIGN_TRANSFER, which makes one wonder if
+ * they pretend to be 96kHz mono as a workaround for stereo being broken
+ * by that...
+ *
+ * They also have swapped L-R channels, but that's for userspace to deal
+ * with.
+ */
+{
+       USB_DEVICE(0x534d, 0x2109),
+       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+               .vendor_name = "MacroSilicon",
+               .product_name = "MS2109",
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = &(const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_AUDIO_ALIGN_TRANSFER,
+                       },
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_AUDIO_STANDARD_MIXER,
+                       },
+                       {
+                               .ifnum = 3,
+                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+                               .data = &(const struct audioformat) {
+                                       .formats = SNDRV_PCM_FMTBIT_S16_LE,
+                                       .channels = 2,
+                                       .iface = 3,
+                                       .altsetting = 1,
+                                       .altset_idx = 1,
+                                       .attributes = 0,
+                                       .endpoint = 0x82,
+                                       .ep_attr = USB_ENDPOINT_XFER_ISOC |
+                                               USB_ENDPOINT_SYNC_ASYNC,
+                                       .rates = SNDRV_PCM_RATE_CONTINUOUS,
+                                       .rate_min = 48000,
+                                       .rate_max = 48000,
+                               }
+                       },
+                       {
+                               .ifnum = -1
+                       }
+               }
+       }
+},
+
 #undef USB_DEVICE_VENDOR_SPEC
index bca0179..fca7273 100644 (file)
@@ -1532,6 +1532,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
 static bool is_itf_usb_dsd_dac(unsigned int id)
 {
        switch (id) {
+       case USB_ID(0x154e, 0x1002): /* Denon DCD-1500RE */
        case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */
        case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
        case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
@@ -1673,6 +1674,14 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
             chip->usb_id == USB_ID(0x0951, 0x16ad)) &&
            (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
                usleep_range(1000, 2000);
+
+       /*
+        * Samsung USBC Headset (AKG) need a tiny delay after each
+        * class compliant request. (Model number: AAM625R or AAM627R)
+        */
+       if (chip->usb_id == USB_ID(0x04e8, 0xa051) &&
+           (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+               usleep_range(5000, 6000);
 }
 
 /*
@@ -1856,6 +1865,7 @@ struct registration_quirk {
 static const struct registration_quirk registration_quirks[] = {
        REG_QUIRK_ENTRY(0x0951, 0x16d8, 2),     /* Kingston HyperX AMP */
        REG_QUIRK_ENTRY(0x0951, 0x16ed, 2),     /* Kingston HyperX Cloud Alpha S */
+       REG_QUIRK_ENTRY(0x0951, 0x16ea, 2),     /* Kingston HyperX Cloud Flight S */
        { 0 }                                   /* terminator */
 };
 
index bd77881..85af6eb 100644 (file)
@@ -67,6 +67,9 @@ cpupower: FORCE
 cgroup firewire hv guest bootconfig spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware debugging: FORCE
        $(call descend,$@)
 
+bpf/%: FORCE
+       $(call descend,$@)
+
 liblockdep: FORCE
        $(call descend,lib/lockdep)
 
index db18994..02dabc9 100644 (file)
 #define X86_FEATURE_AVX512_4FMAPS      (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
 #define X86_FEATURE_FSRM               (18*32+ 4) /* Fast Short Rep Mov */
 #define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */
+#define X86_FEATURE_SRBDS_CTRL         (18*32+ 9) /* "" SRBDS mitigation MSR available */
 #define X86_FEATURE_MD_CLEAR           (18*32+10) /* VERW clears CPU buffers */
 #define X86_FEATURE_TSX_FORCE_ABORT    (18*32+13) /* "" TSX_FORCE_ABORT */
 #define X86_FEATURE_PCONFIG            (18*32+18) /* Intel PCONFIG */
 #define X86_BUG_SWAPGS                 X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
 #define X86_BUG_TAA                    X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */
 #define X86_BUG_ITLB_MULTIHIT          X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
+#define X86_BUG_SRBDS                  X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
index ef452b8..e8370e6 100644 (file)
 #define TSX_CTRL_RTM_DISABLE           BIT(0)  /* Disable RTM feature */
 #define TSX_CTRL_CPUID_CLEAR           BIT(1)  /* Disable TSX enumeration */
 
+/* SRBDS support */
+#define MSR_IA32_MCU_OPT_CTRL          0x00000123
+#define RNGDS_MITG_DIS                 BIT(0)
+
 #define MSR_IA32_SYSENTER_CS           0x00000174
 #define MSR_IA32_SYSENTER_ESP          0x00000175
 #define MSR_IA32_SYSENTER_EIP          0x00000176
index 43e2490..17c5a03 100644 (file)
@@ -385,33 +385,48 @@ struct kvm_sync_regs {
 #define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)
 
 #define KVM_STATE_NESTED_FORMAT_VMX    0
-#define KVM_STATE_NESTED_FORMAT_SVM    1       /* unused */
+#define KVM_STATE_NESTED_FORMAT_SVM    1
 
 #define KVM_STATE_NESTED_GUEST_MODE    0x00000001
 #define KVM_STATE_NESTED_RUN_PENDING   0x00000002
 #define KVM_STATE_NESTED_EVMCS         0x00000004
 #define KVM_STATE_NESTED_MTF_PENDING   0x00000008
+#define KVM_STATE_NESTED_GIF_SET       0x00000100
 
 #define KVM_STATE_NESTED_SMM_GUEST_MODE        0x00000001
 #define KVM_STATE_NESTED_SMM_VMXON     0x00000002
 
 #define KVM_STATE_NESTED_VMX_VMCS_SIZE 0x1000
 
+#define KVM_STATE_NESTED_SVM_VMCB_SIZE 0x1000
+
+#define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE        0x00000001
+
 struct kvm_vmx_nested_state_data {
        __u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
        __u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
-       __u64 preemption_timer_deadline;
 };
 
 struct kvm_vmx_nested_state_hdr {
+       __u32 flags;
        __u64 vmxon_pa;
        __u64 vmcs12_pa;
+       __u64 preemption_timer_deadline;
 
        struct {
                __u16 flags;
        } smm;
 };
 
+struct kvm_svm_nested_state_data {
+       /* Save area only used if KVM_STATE_NESTED_RUN_PENDING.  */
+       __u8 vmcb12[KVM_STATE_NESTED_SVM_VMCB_SIZE];
+};
+
+struct kvm_svm_nested_state_hdr {
+       __u64 vmcb_pa;
+};
+
 /* for KVM_CAP_NESTED_STATE */
 struct kvm_nested_state {
        __u16 flags;
@@ -420,6 +435,7 @@ struct kvm_nested_state {
 
        union {
                struct kvm_vmx_nested_state_hdr vmx;
+               struct kvm_svm_nested_state_hdr svm;
 
                /* Pad the header to 128 bytes.  */
                __u8 pad[120];
@@ -432,6 +448,7 @@ struct kvm_nested_state {
         */
        union {
                struct kvm_vmx_nested_state_data vmx[0];
+               struct kvm_svm_nested_state_data svm[0];
        } data;
 };
 
index 30d7d04..be5e2e7 100644 (file)
@@ -2,7 +2,14 @@
 #ifndef _UAPI_ASM_X86_UNISTD_H
 #define _UAPI_ASM_X86_UNISTD_H
 
-/* x32 syscall flag bit */
+/*
+ * x32 syscall flag bit.  Some user programs expect syscall NR macros
+ * and __X32_SYSCALL_BIT to have type int, even though syscall numbers
+ * are, for practical purposes, unsigned long.
+ *
+ * Fortunately, expressions like (nr & ~__X32_SYSCALL_BIT) do the right
+ * thing regardless.
+ */
 #define __X32_SYSCALL_BIT      0x40000000
 
 #ifndef __KERNEL__
index e95b72e..b8ff9e8 100644 (file)
        { EXIT_REASON_UMWAIT,                "UMWAIT" }, \
        { EXIT_REASON_TPAUSE,                "TPAUSE" }
 
+#define VMX_EXIT_REASON_FLAGS \
+       { VMX_EXIT_REASONS_FAILED_VMENTRY,      "FAILED_VMENTRY" }
+
 #define VMX_ABORT_SAVE_GUEST_MSR_FAIL        1
 #define VMX_ABORT_LOAD_HOST_PDPTE_FAIL       2
 #define VMX_ABORT_LOAD_HOST_MSR_FAIL         4
index df767af..45f8e1b 100644 (file)
@@ -8,6 +8,8 @@
 #include <asm/alternative-asm.h>
 #include <asm/export.h>
 
+.pushsection .noinstr.text, "ax"
+
 /*
  * We build a jump to memcpy_orig by default which gets NOPped out on
  * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
@@ -184,6 +186,8 @@ SYM_FUNC_START(memcpy_orig)
        retq
 SYM_FUNC_END(memcpy_orig)
 
+.popsection
+
 #ifndef CONFIG_UML
 
 MCSAFE_TEST_CTL
index 0efaf45..e0878f5 100644 (file)
 #include <linux/kernel.h>
 #include <linux/bootconfig.h>
 
-static int xbc_show_array(struct xbc_node *node)
+static int xbc_show_value(struct xbc_node *node)
 {
        const char *val;
+       char q;
        int i = 0;
 
        xbc_array_for_each_value(node, val) {
-               printf("\"%s\"%s", val, node->next ? ", " : ";\n");
+               if (strchr(val, '"'))
+                       q = '\'';
+               else
+                       q = '"';
+               printf("%c%s%c%s", q, val, q, node->next ? ", " : ";\n");
                i++;
        }
        return i;
@@ -48,10 +53,7 @@ static void xbc_show_compact_tree(void)
                        continue;
                } else if (cnode && xbc_node_is_value(cnode)) {
                        printf("%s = ", xbc_node_get_data(node));
-                       if (cnode->next)
-                               xbc_show_array(cnode);
-                       else
-                               printf("\"%s\";\n", xbc_node_get_data(cnode));
+                       xbc_show_value(cnode);
                } else {
                        printf("%s;\n", xbc_node_get_data(node));
                }
@@ -205,11 +207,13 @@ int show_xbc(const char *path)
        }
 
        ret = load_xbc_from_initrd(fd, &buf);
-       if (ret < 0)
+       if (ret < 0) {
                pr_err("Failed to load a boot config from initrd: %d\n", ret);
-       else
-               xbc_show_compact_tree();
-
+               goto out;
+       }
+       xbc_show_compact_tree();
+       ret = 0;
+out:
        close(fd);
        free(buf);
 
index eff16b7..3c2ab9e 100755 (executable)
@@ -55,6 +55,9 @@ echo "Apply command test"
 xpass $BOOTCONF -a $TEMPCONF $INITRD
 new_size=$(stat -c %s $INITRD)
 
+echo "Show command test"
+xpass $BOOTCONF $INITRD
+
 echo "File size check"
 xpass test $new_size -eq $(expr $bconf_size + $initrd_size + 9 + 12)
 
@@ -114,6 +117,13 @@ xpass grep -q "bar" $OUTFILE
 xpass grep -q "baz" $OUTFILE
 xpass grep -q "qux" $OUTFILE
 
+echo "Double/single quotes test"
+echo "key = '\"string\"';" > $TEMPCONF
+$BOOTCONF -a $TEMPCONF $INITRD
+$BOOTCONF $INITRD > $TEMPCONF
+cat $TEMPCONF
+xpass grep \'\"string\"\' $TEMPCONF
+
 echo "=== expected failure cases ==="
 for i in samples/bad-* ; do
   xfail $BOOTCONF -a $i $INITRD
index 6df1850..74bc9a1 100644 (file)
@@ -123,5 +123,12 @@ runqslower_install:
 runqslower_clean:
        $(call descend,runqslower,clean)
 
+resolve_btfids:
+       $(call descend,resolve_btfids)
+
+resolve_btfids_clean:
+       $(call descend,resolve_btfids,clean)
+
 .PHONY: all install clean bpftool bpftool_install bpftool_clean \
-       runqslower runqslower_install runqslower_clean
+       runqslower runqslower_install runqslower_clean \
+       resolve_btfids resolve_btfids_clean
index 26cde83..3e601bc 100644 (file)
@@ -1,10 +1,11 @@
 # SPDX-License-Identifier: GPL-2.0-only
 *.d
-/_bpftool
+/bpftool-bootstrap
 /bpftool
 bpftool*.8
 bpf-helpers.*
 FEATURE-DUMP.bpftool
 feature
 libbpf
-profiler.skel.h
+/*.skel.h
+/vmlinux.h
index ce3a724..896f4c6 100644 (file)
@@ -36,6 +36,11 @@ DESCRIPTION
                  otherwise list all BTF objects currently loaded on the
                  system.
 
+                 Since Linux 5.8 bpftool is able to discover information about
+                 processes that hold open file descriptors (FDs) against BTF
+                 objects. On such kernels bpftool will automatically emit this
+                 information as well.
+
        **bpftool btf dump** *BTF_SRC*
                  Dump BTF entries from a given *BTF_SRC*.
 
index 0e43d7b..38b0949 100644 (file)
@@ -37,6 +37,11 @@ DESCRIPTION
                  zero or more named attributes, some of which depend on type
                  of link.
 
+                 Since Linux 5.8 bpftool is able to discover information about
+                 processes that hold open file descriptors (FDs) against BPF
+                 links. On such kernels bpftool will automatically emit this
+                 information as well.
+
        **bpftool link pin** *LINK* *FILE*
                  Pin link *LINK* as *FILE*.
 
@@ -82,6 +87,7 @@ EXAMPLES
 
     10: cgroup  prog 25
             cgroup_id 614  attach_type egress
+            pids test_progs(223)
 
 **# bpftool --json --pretty link show**
 
@@ -91,7 +97,12 @@ EXAMPLES
             "type": "cgroup",
             "prog_id": 25,
             "cgroup_id": 614,
-            "attach_type": "egress"
+            "attach_type": "egress",
+            "pids": [{
+                    "pid": 223,
+                    "comm": "test_progs"
+                }
+            ]
         }
     ]
 
index 3110164..41e2a74 100644 (file)
@@ -49,7 +49,7 @@ MAP COMMANDS
 |              | **lru_percpu_hash** | **lpm_trie** | **array_of_maps** | **hash_of_maps**
 |              | **devmap** | **devmap_hash** | **sockmap** | **cpumap** | **xskmap** | **sockhash**
 |              | **cgroup_storage** | **reuseport_sockarray** | **percpu_cgroup_storage**
-|              | **queue** | **stack** | **sk_storage** | **struct_ops** }
+|              | **queue** | **stack** | **sk_storage** | **struct_ops** | **ringbuf** }
 
 DESCRIPTION
 ===========
@@ -62,6 +62,11 @@ DESCRIPTION
                  Output will start with map ID followed by map type and
                  zero or more named attributes (depending on kernel version).
 
+                 Since Linux 5.8 bpftool is able to discover information about
+                 processes that hold open file descriptors (FDs) against BPF
+                 maps. On such kernels bpftool will automatically emit this
+                 information as well.
+
        **bpftool map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE*  **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**dev** *NAME*]
                  Create a new map with given parameters and pin it to *bpffs*
                  as *FILE*.
@@ -180,7 +185,8 @@ EXAMPLES
 ::
 
   10: hash  name some_map  flags 0x0
-       key 4B  value 8B  max_entries 2048  memlock 167936B
+        key 4B  value 8B  max_entries 2048  memlock 167936B
+        pids systemd(1)
 
 The following three commands are equivalent:
 
index 2b25495..82e356b 100644 (file)
@@ -45,7 +45,7 @@ PROG COMMANDS
 |               **cgroup/getsockname4** | **cgroup/getsockname6** | **cgroup/sendmsg4** | **cgroup/sendmsg6** |
 |              **cgroup/recvmsg4** | **cgroup/recvmsg6** | **cgroup/sysctl** |
 |              **cgroup/getsockopt** | **cgroup/setsockopt** |
-|              **struct_ops** | **fentry** | **fexit** | **freplace**
+|              **struct_ops** | **fentry** | **fexit** | **freplace** | **sk_lookup**
 |      }
 |       *ATTACH_TYPE* := {
 |              **msg_verdict** | **stream_verdict** | **stream_parser** | **flow_dissector**
@@ -75,6 +75,11 @@ DESCRIPTION
                  program run. Activation or deactivation of the feature is
                  performed via the **kernel.bpf_stats_enabled** sysctl knob.
 
+                 Since Linux 5.8 bpftool is able to discover information about
+                 processes that hold open file descriptors (FDs) against BPF
+                 programs. On such kernels bpftool will automatically emit this
+                 information as well.
+
        **bpftool prog dump xlated** *PROG* [{ **file** *FILE* | **opcodes** | **visual** | **linum** }]
                  Dump eBPF instructions of the programs from the kernel. By
                  default, eBPF will be disassembled and printed to standard
@@ -243,6 +248,7 @@ EXAMPLES
     10: xdp  name some_prog  tag 005a3d2123620c8b  gpl run_time_ns 81632 run_cnt 10
             loaded_at 2017-09-29T20:11:00+0000  uid 0
             xlated 528B  jited 370B  memlock 4096B  map_ids 10
+            pids systemd(1)
 
 **# bpftool --json --pretty prog show**
 
@@ -262,6 +268,11 @@ EXAMPLES
             "bytes_jited": 370,
             "bytes_memlock": 4096,
             "map_ids": [10
+            ],
+            "pids": [{
+                    "pid": 1,
+                    "comm": "systemd"
+                }
             ]
         }
     ]
index 9e85f10..51bd520 100644 (file)
@@ -40,8 +40,9 @@ bash_compdir ?= /usr/share/bash-completion/completions
 
 CFLAGS += -O2
 CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wno-missing-field-initializers
-CFLAGS += $(filter-out -Wswitch-enum,$(EXTRA_WARNINGS))
+CFLAGS += $(filter-out -Wswitch-enum -Wnested-externs,$(EXTRA_WARNINGS))
 CFLAGS += -DPACKAGE='"bpftool"' -D__EXPORTED_HEADERS__ \
+       -I$(if $(OUTPUT),$(OUTPUT),.) \
        -I$(srctree)/kernel/bpf/ \
        -I$(srctree)/tools/include \
        -I$(srctree)/tools/include/uapi \
@@ -61,9 +62,9 @@ CLANG ?= clang
 
 FEATURE_USER = .bpftool
 FEATURE_TESTS = libbfd disassembler-four-args reallocarray zlib libcap \
-       clang-bpf-global-var
+       clang-bpf-co-re
 FEATURE_DISPLAY = libbfd disassembler-four-args zlib libcap \
-       clang-bpf-global-var
+       clang-bpf-co-re
 
 check_feat := 1
 NON_CHECK_FEAT_TARGETS := clean uninstall doc doc-clean doc-install doc-uninstall
@@ -116,40 +117,60 @@ CFLAGS += -DHAVE_LIBBFD_SUPPORT
 SRCS += $(BFD_SRCS)
 endif
 
+BPFTOOL_BOOTSTRAP := $(if $(OUTPUT),$(OUTPUT)bpftool-bootstrap,./bpftool-bootstrap)
+
+BOOTSTRAP_OBJS = $(addprefix $(OUTPUT),main.o common.o json_writer.o gen.o btf.o)
 OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
-_OBJS = $(filter-out $(OUTPUT)prog.o,$(OBJS)) $(OUTPUT)_prog.o
 
-ifeq ($(feature-clang-bpf-global-var),1)
-       __OBJS = $(OBJS)
-else
-       __OBJS = $(_OBJS)
-endif
+VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux)                           \
+                    $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux)    \
+                    ../../../vmlinux                                   \
+                    /sys/kernel/btf/vmlinux                            \
+                    /boot/vmlinux-$(shell uname -r)
+VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
 
-$(OUTPUT)_prog.o: prog.c
-       $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -DBPFTOOL_WITHOUT_SKELETONS -o $@ $<
+ifneq ($(VMLINUX_BTF)$(VMLINUX_H),)
+ifeq ($(feature-clang-bpf-co-re),1)
 
-$(OUTPUT)_bpftool: $(_OBJS) $(LIBBPF)
-       $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(_OBJS) $(LIBS)
+BUILD_BPF_SKELS := 1
 
-skeleton/profiler.bpf.o: skeleton/profiler.bpf.c $(LIBBPF)
+$(OUTPUT)vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL_BOOTSTRAP)
+ifeq ($(VMLINUX_H),)
+       $(QUIET_GEN)$(BPFTOOL_BOOTSTRAP) btf dump file $< format c > $@
+else
+       $(Q)cp "$(VMLINUX_H)" $@
+endif
+
+$(OUTPUT)%.bpf.o: skeleton/%.bpf.c $(OUTPUT)vmlinux.h $(LIBBPF)
        $(QUIET_CLANG)$(CLANG) \
+               -I$(if $(OUTPUT),$(OUTPUT),.) \
                -I$(srctree)/tools/include/uapi/ \
-               -I$(LIBBPF_PATH) -I$(srctree)/tools/lib \
+               -I$(LIBBPF_PATH) \
+               -I$(srctree)/tools/lib \
                -g -O2 -target bpf -c $< -o $@
 
-profiler.skel.h: $(OUTPUT)_bpftool skeleton/profiler.bpf.o
-       $(QUIET_GEN)$(OUTPUT)./_bpftool gen skeleton skeleton/profiler.bpf.o > $@
+$(OUTPUT)%.skel.h: $(OUTPUT)%.bpf.o $(BPFTOOL_BOOTSTRAP)
+       $(QUIET_GEN)$(BPFTOOL_BOOTSTRAP) gen skeleton $< > $@
 
-$(OUTPUT)prog.o: prog.c profiler.skel.h
-       $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $<
+$(OUTPUT)prog.o: $(OUTPUT)profiler.skel.h
+
+$(OUTPUT)pids.o: $(OUTPUT)pid_iter.skel.h
+
+endif
+endif
+
+CFLAGS += $(if $(BUILD_BPF_SKELS),,-DBPFTOOL_WITHOUT_SKELETONS)
 
 $(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
        $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $<
 
 $(OUTPUT)feature.o: | zdep
 
-$(OUTPUT)bpftool: $(__OBJS) $(LIBBPF)
-       $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(__OBJS) $(LIBS)
+$(BPFTOOL_BOOTSTRAP): $(BOOTSTRAP_OBJS) $(LIBBPF)
+       $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(BOOTSTRAP_OBJS) $(LIBS)
+
+$(OUTPUT)bpftool: $(OBJS) $(LIBBPF)
+       $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJS) $(LIBS)
 
 $(OUTPUT)%.o: %.c
        $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $<
@@ -157,7 +178,7 @@ $(OUTPUT)%.o: %.c
 clean: $(LIBBPF)-clean
        $(call QUIET_CLEAN, bpftool)
        $(Q)$(RM) -- $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d
-       $(Q)$(RM) -- $(OUTPUT)_bpftool profiler.skel.h skeleton/profiler.bpf.o
+       $(Q)$(RM) -- $(BPFTOOL_BOOTSTRAP) $(OUTPUT)*.skel.h $(OUTPUT)vmlinux.h
        $(Q)$(RM) -r -- $(OUTPUT)libbpf/
        $(call QUIET_CLEAN, core-gen)
        $(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.bpftool
@@ -192,6 +213,7 @@ FORCE:
 zdep:
        @if [ "$(feature-zlib)" != "1" ]; then echo "No zlib found"; exit 1 ; fi
 
+.SECONDARY:
 .PHONY: all FORCE clean install uninstall zdep
 .PHONY: doc doc-clean doc-install doc-uninstall
 .DEFAULT_GOAL := all
index 25b25ac..7b13726 100644 (file)
@@ -479,7 +479,7 @@ _bpftool()
                                 cgroup/post_bind4 cgroup/post_bind6 \
                                 cgroup/sysctl cgroup/getsockopt \
                                 cgroup/setsockopt struct_ops \
-                                fentry fexit freplace" -- \
+                                fentry fexit freplace sk_lookup" -- \
                                                    "$cur" ) )
                             return 0
                             ;;
index faac818..fc9bc7a 100644 (file)
@@ -809,6 +809,7 @@ show_btf_plain(struct bpf_btf_info *info, int fd,
                        printf("%s%u", n++ == 0 ? "  map_ids " : ",",
                               obj->obj_id);
        }
+       emit_obj_refs_plain(&refs_table, info->id, "\n\tpids ");
 
        printf("\n");
 }
@@ -841,6 +842,9 @@ show_btf_json(struct bpf_btf_info *info, int fd,
                        jsonw_uint(json_wtr, obj->obj_id);
        }
        jsonw_end_array(json_wtr);      /* map_ids */
+
+       emit_obj_refs_json(&refs_table, info->id, json_wtr); /* pids */
+
        jsonw_end_object(json_wtr);     /* btf object */
 }
 
@@ -893,6 +897,7 @@ static int do_show(int argc, char **argv)
                        close(fd);
                return err;
        }
+       build_obj_refs_table(&refs_table, BPF_OBJ_BTF);
 
        if (fd >= 0) {
                err = show_btf(fd, &btf_prog_table, &btf_map_table);
@@ -939,6 +944,7 @@ static int do_show(int argc, char **argv)
 exit_free:
        delete_btf_table(&btf_prog_table);
        delete_btf_table(&btf_map_table);
+       delete_obj_refs_table(&refs_table);
 
        return err;
 }
index c47bdc6..6530366 100644 (file)
@@ -1,10 +1,11 @@
 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
 
+#define _GNU_SOURCE
 #include <ctype.h>
 #include <errno.h>
 #include <fcntl.h>
-#include <fts.h>
+#include <ftw.h>
 #include <libgen.h>
 #include <mntent.h>
 #include <stdbool.h>
 #define BPF_FS_MAGIC           0xcafe4a11
 #endif
 
+const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE] = {
+       [BPF_CGROUP_INET_INGRESS]       = "ingress",
+       [BPF_CGROUP_INET_EGRESS]        = "egress",
+       [BPF_CGROUP_INET_SOCK_CREATE]   = "sock_create",
+       [BPF_CGROUP_INET_SOCK_RELEASE]  = "sock_release",
+       [BPF_CGROUP_SOCK_OPS]           = "sock_ops",
+       [BPF_CGROUP_DEVICE]             = "device",
+       [BPF_CGROUP_INET4_BIND]         = "bind4",
+       [BPF_CGROUP_INET6_BIND]         = "bind6",
+       [BPF_CGROUP_INET4_CONNECT]      = "connect4",
+       [BPF_CGROUP_INET6_CONNECT]      = "connect6",
+       [BPF_CGROUP_INET4_POST_BIND]    = "post_bind4",
+       [BPF_CGROUP_INET6_POST_BIND]    = "post_bind6",
+       [BPF_CGROUP_INET4_GETPEERNAME]  = "getpeername4",
+       [BPF_CGROUP_INET6_GETPEERNAME]  = "getpeername6",
+       [BPF_CGROUP_INET4_GETSOCKNAME]  = "getsockname4",
+       [BPF_CGROUP_INET6_GETSOCKNAME]  = "getsockname6",
+       [BPF_CGROUP_UDP4_SENDMSG]       = "sendmsg4",
+       [BPF_CGROUP_UDP6_SENDMSG]       = "sendmsg6",
+       [BPF_CGROUP_SYSCTL]             = "sysctl",
+       [BPF_CGROUP_UDP4_RECVMSG]       = "recvmsg4",
+       [BPF_CGROUP_UDP6_RECVMSG]       = "recvmsg6",
+       [BPF_CGROUP_GETSOCKOPT]         = "getsockopt",
+       [BPF_CGROUP_SETSOCKOPT]         = "setsockopt",
+
+       [BPF_SK_SKB_STREAM_PARSER]      = "sk_skb_stream_parser",
+       [BPF_SK_SKB_STREAM_VERDICT]     = "sk_skb_stream_verdict",
+       [BPF_SK_MSG_VERDICT]            = "sk_msg_verdict",
+       [BPF_LIRC_MODE2]                = "lirc_mode2",
+       [BPF_FLOW_DISSECTOR]            = "flow_dissector",
+       [BPF_TRACE_RAW_TP]              = "raw_tp",
+       [BPF_TRACE_FENTRY]              = "fentry",
+       [BPF_TRACE_FEXIT]               = "fexit",
+       [BPF_MODIFY_RETURN]             = "mod_ret",
+       [BPF_LSM_MAC]                   = "lsm_mac",
+       [BPF_SK_LOOKUP]                 = "sk_lookup",
+};
+
 void p_err(const char *fmt, ...)
 {
        va_list ap;
@@ -123,24 +162,35 @@ int mount_tracefs(const char *target)
        return err;
 }
 
-int open_obj_pinned(char *path, bool quiet)
+int open_obj_pinned(const char *path, bool quiet)
 {
-       int fd;
+       char *pname;
+       int fd = -1;
+
+       pname = strdup(path);
+       if (!pname) {
+               if (!quiet)
+                       p_err("mem alloc failed");
+               goto out_ret;
+       }
 
-       fd = bpf_obj_get(path);
+       fd = bpf_obj_get(pname);
        if (fd < 0) {
                if (!quiet)
-                       p_err("bpf obj get (%s): %s", path,
-                             errno == EACCES && !is_bpffs(dirname(path)) ?
+                       p_err("bpf obj get (%s): %s", pname,
+                             errno == EACCES && !is_bpffs(dirname(pname)) ?
                            "directory not in bpf file system (bpffs)" :
                            strerror(errno));
-               return -1;
+               goto out_free;
        }
 
+out_free:
+       free(pname);
+out_ret:
        return fd;
 }
 
-int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type)
+int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type)
 {
        enum bpf_obj_type type;
        int fd;
@@ -330,71 +380,82 @@ void print_hex_data_json(uint8_t *data, size_t len)
        jsonw_end_array(json_wtr);
 }
 
+/* extra params for nftw cb */
+static struct pinned_obj_table *build_fn_table;
+static enum bpf_obj_type build_fn_type;
+
+static int do_build_table_cb(const char *fpath, const struct stat *sb,
+                            int typeflag, struct FTW *ftwbuf)
+{
+       struct bpf_prog_info pinned_info;
+       __u32 len = sizeof(pinned_info);
+       struct pinned_obj *obj_node;
+       enum bpf_obj_type objtype;
+       int fd, err = 0;
+
+       if (typeflag != FTW_F)
+               goto out_ret;
+
+       fd = open_obj_pinned(fpath, true);
+       if (fd < 0)
+               goto out_ret;
+
+       objtype = get_fd_type(fd);
+       if (objtype != build_fn_type)
+               goto out_close;
+
+       memset(&pinned_info, 0, sizeof(pinned_info));
+       if (bpf_obj_get_info_by_fd(fd, &pinned_info, &len))
+               goto out_close;
+
+       obj_node = calloc(1, sizeof(*obj_node));
+       if (!obj_node) {
+               err = -1;
+               goto out_close;
+       }
+
+       obj_node->id = pinned_info.id;
+       obj_node->path = strdup(fpath);
+       if (!obj_node->path) {
+               err = -1;
+               free(obj_node);
+               goto out_close;
+       }
+
+       hash_add(build_fn_table->table, &obj_node->hash, obj_node->id);
+out_close:
+       close(fd);
+out_ret:
+       return err;
+}
+
 int build_pinned_obj_table(struct pinned_obj_table *tab,
                           enum bpf_obj_type type)
 {
-       struct bpf_prog_info pinned_info = {};
-       struct pinned_obj *obj_node = NULL;
-       __u32 len = sizeof(pinned_info);
        struct mntent *mntent = NULL;
-       enum bpf_obj_type objtype;
        FILE *mntfile = NULL;
-       FTSENT *ftse = NULL;
-       FTS *fts = NULL;
-       int fd, err;
+       int flags = FTW_PHYS;
+       int nopenfd = 16;
+       int err = 0;
 
        mntfile = setmntent("/proc/mounts", "r");
        if (!mntfile)
                return -1;
 
+       build_fn_table = tab;
+       build_fn_type = type;
+
        while ((mntent = getmntent(mntfile))) {
-               char *path[] = { mntent->mnt_dir, NULL };
+               char *path = mntent->mnt_dir;
 
                if (strncmp(mntent->mnt_type, "bpf", 3) != 0)
                        continue;
-
-               fts = fts_open(path, 0, NULL);
-               if (!fts)
-                       continue;
-
-               while ((ftse = fts_read(fts))) {
-                       if (!(ftse->fts_info & FTS_F))
-                               continue;
-                       fd = open_obj_pinned(ftse->fts_path, true);
-                       if (fd < 0)
-                               continue;
-
-                       objtype = get_fd_type(fd);
-                       if (objtype != type) {
-                               close(fd);
-                               continue;
-                       }
-                       memset(&pinned_info, 0, sizeof(pinned_info));
-                       err = bpf_obj_get_info_by_fd(fd, &pinned_info, &len);
-                       if (err) {
-                               close(fd);
-                               continue;
-                       }
-
-                       obj_node = malloc(sizeof(*obj_node));
-                       if (!obj_node) {
-                               close(fd);
-                               fts_close(fts);
-                               fclose(mntfile);
-                               return -1;
-                       }
-
-                       memset(obj_node, 0, sizeof(*obj_node));
-                       obj_node->id = pinned_info.id;
-                       obj_node->path = strdup(ftse->fts_path);
-                       hash_add(tab->table, &obj_node->hash, obj_node->id);
-
-                       close(fd);
-               }
-               fts_close(fts);
+               err = nftw(path, do_build_table_cb, nopenfd, flags);
+               if (err)
+                       break;
        }
        fclose(mntfile);
-       return 0;
+       return err;
 }
 
 void delete_pinned_obj_table(struct pinned_obj_table *tab)
@@ -581,3 +642,311 @@ print_all_levels(__maybe_unused enum libbpf_print_level level,
 {
        return vfprintf(stderr, format, args);
 }
+
+static int prog_fd_by_nametag(void *nametag, int **fds, bool tag)
+{
+       unsigned int id = 0;
+       int fd, nb_fds = 0;
+       void *tmp;
+       int err;
+
+       while (true) {
+               struct bpf_prog_info info = {};
+               __u32 len = sizeof(info);
+
+               err = bpf_prog_get_next_id(id, &id);
+               if (err) {
+                       if (errno != ENOENT) {
+                               p_err("%s", strerror(errno));
+                               goto err_close_fds;
+                       }
+                       return nb_fds;
+               }
+
+               fd = bpf_prog_get_fd_by_id(id);
+               if (fd < 0) {
+                       p_err("can't get prog by id (%u): %s",
+                             id, strerror(errno));
+                       goto err_close_fds;
+               }
+
+               err = bpf_obj_get_info_by_fd(fd, &info, &len);
+               if (err) {
+                       p_err("can't get prog info (%u): %s",
+                             id, strerror(errno));
+                       goto err_close_fd;
+               }
+
+               if ((tag && memcmp(nametag, info.tag, BPF_TAG_SIZE)) ||
+                   (!tag && strncmp(nametag, info.name, BPF_OBJ_NAME_LEN))) {
+                       close(fd);
+                       continue;
+               }
+
+               if (nb_fds > 0) {
+                       tmp = realloc(*fds, (nb_fds + 1) * sizeof(int));
+                       if (!tmp) {
+                               p_err("failed to realloc");
+                               goto err_close_fd;
+                       }
+                       *fds = tmp;
+               }
+               (*fds)[nb_fds++] = fd;
+       }
+
+err_close_fd:
+       close(fd);
+err_close_fds:
+       while (--nb_fds >= 0)
+               close((*fds)[nb_fds]);
+       return -1;
+}
+
+int prog_parse_fds(int *argc, char ***argv, int **fds)
+{
+       if (is_prefix(**argv, "id")) {
+               unsigned int id;
+               char *endptr;
+
+               NEXT_ARGP();
+
+               id = strtoul(**argv, &endptr, 0);
+               if (*endptr) {
+                       p_err("can't parse %s as ID", **argv);
+                       return -1;
+               }
+               NEXT_ARGP();
+
+               (*fds)[0] = bpf_prog_get_fd_by_id(id);
+               if ((*fds)[0] < 0) {
+                       p_err("get by id (%u): %s", id, strerror(errno));
+                       return -1;
+               }
+               return 1;
+       } else if (is_prefix(**argv, "tag")) {
+               unsigned char tag[BPF_TAG_SIZE];
+
+               NEXT_ARGP();
+
+               if (sscanf(**argv, BPF_TAG_FMT, tag, tag + 1, tag + 2,
+                          tag + 3, tag + 4, tag + 5, tag + 6, tag + 7)
+                   != BPF_TAG_SIZE) {
+                       p_err("can't parse tag");
+                       return -1;
+               }
+               NEXT_ARGP();
+
+               return prog_fd_by_nametag(tag, fds, true);
+       } else if (is_prefix(**argv, "name")) {
+               char *name;
+
+               NEXT_ARGP();
+
+               name = **argv;
+               if (strlen(name) > BPF_OBJ_NAME_LEN - 1) {
+                       p_err("can't parse name");
+                       return -1;
+               }
+               NEXT_ARGP();
+
+               return prog_fd_by_nametag(name, fds, false);
+       } else if (is_prefix(**argv, "pinned")) {
+               char *path;
+
+               NEXT_ARGP();
+
+               path = **argv;
+               NEXT_ARGP();
+
+               (*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_PROG);
+               if ((*fds)[0] < 0)
+                       return -1;
+               return 1;
+       }
+
+       p_err("expected 'id', 'tag', 'name' or 'pinned', got: '%s'?", **argv);
+       return -1;
+}
+
+int prog_parse_fd(int *argc, char ***argv)
+{
+       int *fds = NULL;
+       int nb_fds, fd;
+
+       fds = malloc(sizeof(int));
+       if (!fds) {
+               p_err("mem alloc failed");
+               return -1;
+       }
+       nb_fds = prog_parse_fds(argc, argv, &fds);
+       if (nb_fds != 1) {
+               if (nb_fds > 1) {
+                       p_err("several programs match this handle");
+                       while (nb_fds--)
+                               close(fds[nb_fds]);
+               }
+               fd = -1;
+               goto exit_free;
+       }
+
+       fd = fds[0];
+exit_free:
+       free(fds);
+       return fd;
+}
+
+static int map_fd_by_name(char *name, int **fds)
+{
+       unsigned int id = 0;
+       int fd, nb_fds = 0;
+       void *tmp;
+       int err;
+
+       while (true) {
+               struct bpf_map_info info = {};
+               __u32 len = sizeof(info);
+
+               err = bpf_map_get_next_id(id, &id);
+               if (err) {
+                       if (errno != ENOENT) {
+                               p_err("%s", strerror(errno));
+                               goto err_close_fds;
+                       }
+                       return nb_fds;
+               }
+
+               fd = bpf_map_get_fd_by_id(id);
+               if (fd < 0) {
+                       p_err("can't get map by id (%u): %s",
+                             id, strerror(errno));
+                       goto err_close_fds;
+               }
+
+               err = bpf_obj_get_info_by_fd(fd, &info, &len);
+               if (err) {
+                       p_err("can't get map info (%u): %s",
+                             id, strerror(errno));
+                       goto err_close_fd;
+               }
+
+               if (strncmp(name, info.name, BPF_OBJ_NAME_LEN)) {
+                       close(fd);
+                       continue;
+               }
+
+               if (nb_fds > 0) {
+                       tmp = realloc(*fds, (nb_fds + 1) * sizeof(int));
+                       if (!tmp) {
+                               p_err("failed to realloc");
+                               goto err_close_fd;
+                       }
+                       *fds = tmp;
+               }
+               (*fds)[nb_fds++] = fd;
+       }
+
+err_close_fd:
+       close(fd);
+err_close_fds:
+       while (--nb_fds >= 0)
+               close((*fds)[nb_fds]);
+       return -1;
+}
+
+int map_parse_fds(int *argc, char ***argv, int **fds)
+{
+       if (is_prefix(**argv, "id")) {
+               unsigned int id;
+               char *endptr;
+
+               NEXT_ARGP();
+
+               id = strtoul(**argv, &endptr, 0);
+               if (*endptr) {
+                       p_err("can't parse %s as ID", **argv);
+                       return -1;
+               }
+               NEXT_ARGP();
+
+               (*fds)[0] = bpf_map_get_fd_by_id(id);
+               if ((*fds)[0] < 0) {
+                       p_err("get map by id (%u): %s", id, strerror(errno));
+                       return -1;
+               }
+               return 1;
+       } else if (is_prefix(**argv, "name")) {
+               char *name;
+
+               NEXT_ARGP();
+
+               name = **argv;
+               if (strlen(name) > BPF_OBJ_NAME_LEN - 1) {
+                       p_err("can't parse name");
+                       return -1;
+               }
+               NEXT_ARGP();
+
+               return map_fd_by_name(name, fds);
+       } else if (is_prefix(**argv, "pinned")) {
+               char *path;
+
+               NEXT_ARGP();
+
+               path = **argv;
+               NEXT_ARGP();
+
+               (*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_MAP);
+               if ((*fds)[0] < 0)
+                       return -1;
+               return 1;
+       }
+
+       p_err("expected 'id', 'name' or 'pinned', got: '%s'?", **argv);
+       return -1;
+}
+
+int map_parse_fd(int *argc, char ***argv)
+{
+       int *fds = NULL;
+       int nb_fds, fd;
+
+       fds = malloc(sizeof(int));
+       if (!fds) {
+               p_err("mem alloc failed");
+               return -1;
+       }
+       nb_fds = map_parse_fds(argc, argv, &fds);
+       if (nb_fds != 1) {
+               if (nb_fds > 1) {
+                       p_err("several maps match this handle");
+                       while (nb_fds--)
+                               close(fds[nb_fds]);
+               }
+               fd = -1;
+               goto exit_free;
+       }
+
+       fd = fds[0];
+exit_free:
+       free(fds);
+       return fd;
+}
+
+int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len)
+{
+       int err;
+       int fd;
+
+       fd = map_parse_fd(argc, argv);
+       if (fd < 0)
+               return -1;
+
+       err = bpf_obj_get_info_by_fd(fd, info, info_len);
+       if (err) {
+               p_err("can't get map info: %s", strerror(errno));
+               close(fd);
+               return err;
+       }
+
+       return fd;
+}
index 768bf77..1cd7580 100644 (file)
@@ -695,7 +695,7 @@ section_program_types(bool *supported_types, const char *define_prefix,
                            "/*** eBPF program types ***/",
                            define_prefix);
 
-       for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
+       for (i = BPF_PROG_TYPE_UNSPEC + 1; i < prog_type_name_size; i++)
                probe_prog_type(i, supported_types, define_prefix, ifindex);
 
        print_end_section();
@@ -741,7 +741,7 @@ section_helpers(bool *supported_types, const char *define_prefix, __u32 ifindex)
                       "        %sBPF__PROG_TYPE_ ## prog_type ## __HELPER_ ## helper\n",
                       define_prefix, define_prefix, define_prefix,
                       define_prefix);
-       for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
+       for (i = BPF_PROG_TYPE_UNSPEC + 1; i < prog_type_name_size; i++)
                probe_helpers_for_progtype(i, supported_types[i], define_prefix,
                                           ifindex);
 
index 10de76b..8a4c2b3 100644 (file)
@@ -88,7 +88,7 @@ static const char *get_map_ident(const struct bpf_map *map)
                return NULL;
 }
 
-static void codegen_btf_dump_printf(void *ct, const char *fmt, va_list args)
+static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args)
 {
        vprintf(fmt, args);
 }
@@ -104,17 +104,20 @@ static int codegen_datasec_def(struct bpf_object *obj,
        int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec);
        const char *sec_ident;
        char var_ident[256];
+       bool strip_mods = false;
 
-       if (strcmp(sec_name, ".data") == 0)
+       if (strcmp(sec_name, ".data") == 0) {
                sec_ident = "data";
-       else if (strcmp(sec_name, ".bss") == 0)
+       } else if (strcmp(sec_name, ".bss") == 0) {
                sec_ident = "bss";
-       else if (strcmp(sec_name, ".rodata") == 0)
+       } else if (strcmp(sec_name, ".rodata") == 0) {
                sec_ident = "rodata";
-       else if (strcmp(sec_name, ".kconfig") == 0)
+               strip_mods = true;
+       } else if (strcmp(sec_name, ".kconfig") == 0) {
                sec_ident = "kconfig";
-       else
+       } else {
                return 0;
+       }
 
        printf("        struct %s__%s {\n", obj_name, sec_ident);
        for (i = 0; i < vlen; i++, sec_var++) {
@@ -123,16 +126,10 @@ static int codegen_datasec_def(struct bpf_object *obj,
                DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
                        .field_name = var_ident,
                        .indent_level = 2,
+                       .strip_mods = strip_mods,
                );
                int need_off = sec_var->offset, align_off, align;
                __u32 var_type_id = var->type;
-               const struct btf_type *t;
-
-               t = btf__type_by_id(btf, var_type_id);
-               while (btf_is_mod(t)) {
-                       var_type_id = t->type;
-                       t = btf__type_by_id(btf, var_type_id);
-               }
 
                if (off > need_off) {
                        p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n",
@@ -305,8 +302,11 @@ static int do_skeleton(int argc, char **argv)
        opts.object_name = obj_name;
        obj = bpf_object__open_mem(obj_data, file_sz, &opts);
        if (IS_ERR(obj)) {
+               char err_buf[256];
+
+               libbpf_strerror(PTR_ERR(obj), err_buf, sizeof(err_buf));
+               p_err("failed to open BPF object file: %s", err_buf);
                obj = NULL;
-               p_err("failed to open BPF object file: %ld", PTR_ERR(obj));
                goto out;
        }
 
index fca57ee..326b8fd 100644 (file)
@@ -108,7 +108,7 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)
                if (err)
                        return err;
 
-               if (prog_info.type < ARRAY_SIZE(prog_type_name))
+               if (prog_info.type < prog_type_name_size)
                        jsonw_string_field(json_wtr, "prog_type",
                                           prog_type_name[prog_info.type]);
                else
@@ -143,6 +143,9 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)
                }
                jsonw_end_array(json_wtr);
        }
+
+       emit_obj_refs_json(&refs_table, info->id, json_wtr);
+
        jsonw_end_object(json_wtr);
 
        return 0;
@@ -184,7 +187,7 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info)
                if (err)
                        return err;
 
-               if (prog_info.type < ARRAY_SIZE(prog_type_name))
+               if (prog_info.type < prog_type_name_size)
                        printf("\n\tprog_type %s  ",
                               prog_type_name[prog_info.type]);
                else
@@ -212,6 +215,7 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info)
                                printf("\n\tpinned %s", obj->path);
                }
        }
+       emit_obj_refs_plain(&refs_table, info->id, "\n\tpids ");
 
        printf("\n");
 
@@ -257,6 +261,7 @@ static int do_show(int argc, char **argv)
 
        if (show_pinned)
                build_pinned_obj_table(&link_table, BPF_OBJ_LINK);
+       build_obj_refs_table(&refs_table, BPF_OBJ_LINK);
 
        if (argc == 2) {
                fd = link_parse_fd(&argc, &argv);
@@ -296,6 +301,8 @@ static int do_show(int argc, char **argv)
        if (json_output)
                jsonw_end_array(json_wtr);
 
+       delete_obj_refs_table(&refs_table);
+
        return errno == ENOENT ? 0 : -1;
 }
 
index 46bd716..4a191fc 100644 (file)
@@ -31,6 +31,7 @@ bool relaxed_maps;
 struct pinned_obj_table prog_table;
 struct pinned_obj_table map_table;
 struct pinned_obj_table link_table;
+struct obj_refs_table refs_table;
 
 static void __noreturn clean_and_exit(int i)
 {
@@ -92,9 +93,16 @@ int cmd_select(const struct cmd *cmds, int argc, char **argv,
        if (argc < 1 && cmds[0].func)
                return cmds[0].func(argc, argv);
 
-       for (i = 0; cmds[i].func; i++)
-               if (is_prefix(*argv, cmds[i].cmd))
+       for (i = 0; cmds[i].cmd; i++) {
+               if (is_prefix(*argv, cmds[i].cmd)) {
+                       if (!cmds[i].func) {
+                               p_err("command '%s' is not supported in bootstrap mode",
+                                     cmds[i].cmd);
+                               return -1;
+                       }
                        return cmds[i].func(argc - 1, argv + 1);
+               }
+       }
 
        help(argc - 1, argv + 1);
 
index 5cdf0bc..e3a79b5 100644 (file)
 #define HELP_SPEC_LINK                                                 \
        "LINK := { id LINK_ID | pinned FILE }"
 
-static const char * const prog_type_name[] = {
-       [BPF_PROG_TYPE_UNSPEC]                  = "unspec",
-       [BPF_PROG_TYPE_SOCKET_FILTER]           = "socket_filter",
-       [BPF_PROG_TYPE_KPROBE]                  = "kprobe",
-       [BPF_PROG_TYPE_SCHED_CLS]               = "sched_cls",
-       [BPF_PROG_TYPE_SCHED_ACT]               = "sched_act",
-       [BPF_PROG_TYPE_TRACEPOINT]              = "tracepoint",
-       [BPF_PROG_TYPE_XDP]                     = "xdp",
-       [BPF_PROG_TYPE_PERF_EVENT]              = "perf_event",
-       [BPF_PROG_TYPE_CGROUP_SKB]              = "cgroup_skb",
-       [BPF_PROG_TYPE_CGROUP_SOCK]             = "cgroup_sock",
-       [BPF_PROG_TYPE_LWT_IN]                  = "lwt_in",
-       [BPF_PROG_TYPE_LWT_OUT]                 = "lwt_out",
-       [BPF_PROG_TYPE_LWT_XMIT]                = "lwt_xmit",
-       [BPF_PROG_TYPE_SOCK_OPS]                = "sock_ops",
-       [BPF_PROG_TYPE_SK_SKB]                  = "sk_skb",
-       [BPF_PROG_TYPE_CGROUP_DEVICE]           = "cgroup_device",
-       [BPF_PROG_TYPE_SK_MSG]                  = "sk_msg",
-       [BPF_PROG_TYPE_RAW_TRACEPOINT]          = "raw_tracepoint",
-       [BPF_PROG_TYPE_CGROUP_SOCK_ADDR]        = "cgroup_sock_addr",
-       [BPF_PROG_TYPE_LWT_SEG6LOCAL]           = "lwt_seg6local",
-       [BPF_PROG_TYPE_LIRC_MODE2]              = "lirc_mode2",
-       [BPF_PROG_TYPE_SK_REUSEPORT]            = "sk_reuseport",
-       [BPF_PROG_TYPE_FLOW_DISSECTOR]          = "flow_dissector",
-       [BPF_PROG_TYPE_CGROUP_SYSCTL]           = "cgroup_sysctl",
-       [BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable",
-       [BPF_PROG_TYPE_CGROUP_SOCKOPT]          = "cgroup_sockopt",
-       [BPF_PROG_TYPE_TRACING]                 = "tracing",
-       [BPF_PROG_TYPE_STRUCT_OPS]              = "struct_ops",
-       [BPF_PROG_TYPE_EXT]                     = "ext",
-};
+extern const char * const prog_type_name[];
+extern const size_t prog_type_name_size;
 
-static const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE] = {
-       [BPF_CGROUP_INET_INGRESS] = "ingress",
-       [BPF_CGROUP_INET_EGRESS] = "egress",
-       [BPF_CGROUP_INET_SOCK_CREATE] = "sock_create",
-       [BPF_CGROUP_SOCK_OPS] = "sock_ops",
-       [BPF_CGROUP_DEVICE] = "device",
-       [BPF_CGROUP_INET4_BIND] = "bind4",
-       [BPF_CGROUP_INET6_BIND] = "bind6",
-       [BPF_CGROUP_INET4_CONNECT] = "connect4",
-       [BPF_CGROUP_INET6_CONNECT] = "connect6",
-       [BPF_CGROUP_INET4_POST_BIND] = "post_bind4",
-       [BPF_CGROUP_INET6_POST_BIND] = "post_bind6",
-       [BPF_CGROUP_INET4_GETPEERNAME] = "getpeername4",
-       [BPF_CGROUP_INET6_GETPEERNAME] = "getpeername6",
-       [BPF_CGROUP_INET4_GETSOCKNAME] = "getsockname4",
-       [BPF_CGROUP_INET6_GETSOCKNAME] = "getsockname6",
-       [BPF_CGROUP_UDP4_SENDMSG] = "sendmsg4",
-       [BPF_CGROUP_UDP6_SENDMSG] = "sendmsg6",
-       [BPF_CGROUP_SYSCTL] = "sysctl",
-       [BPF_CGROUP_UDP4_RECVMSG] = "recvmsg4",
-       [BPF_CGROUP_UDP6_RECVMSG] = "recvmsg6",
-       [BPF_CGROUP_GETSOCKOPT] = "getsockopt",
-       [BPF_CGROUP_SETSOCKOPT] = "setsockopt",
-
-       [BPF_SK_SKB_STREAM_PARSER] = "sk_skb_stream_parser",
-       [BPF_SK_SKB_STREAM_VERDICT] = "sk_skb_stream_verdict",
-       [BPF_SK_MSG_VERDICT] = "sk_msg_verdict",
-       [BPF_LIRC_MODE2] = "lirc_mode2",
-       [BPF_FLOW_DISSECTOR] = "flow_dissector",
-       [BPF_TRACE_RAW_TP] = "raw_tp",
-       [BPF_TRACE_FENTRY] = "fentry",
-       [BPF_TRACE_FEXIT] = "fexit",
-       [BPF_MODIFY_RETURN] = "mod_ret",
-       [BPF_LSM_MAC] = "lsm_mac",
-};
+extern const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE];
 
 extern const char * const map_type_name[];
 extern const size_t map_type_name_size;
 
+/* keep in sync with the definition in skeleton/pid_iter.bpf.c */
 enum bpf_obj_type {
        BPF_OBJ_UNKNOWN,
        BPF_OBJ_PROG,
        BPF_OBJ_MAP,
        BPF_OBJ_LINK,
+       BPF_OBJ_BTF,
 };
 
 extern const char *bin_name;
@@ -139,12 +78,14 @@ extern const char *bin_name;
 extern json_writer_t *json_wtr;
 extern bool json_output;
 extern bool show_pinned;
+extern bool show_pids;
 extern bool block_mount;
 extern bool verifier_logs;
 extern bool relaxed_maps;
 extern struct pinned_obj_table prog_table;
 extern struct pinned_obj_table map_table;
 extern struct pinned_obj_table link_table;
+extern struct obj_refs_table refs_table;
 
 void __printf(1, 2) p_err(const char *fmt, ...);
 void __printf(1, 2) p_info(const char *fmt, ...);
@@ -168,12 +109,35 @@ struct pinned_obj {
        struct hlist_node hash;
 };
 
+struct obj_refs_table {
+       DECLARE_HASHTABLE(table, 16);
+};
+
+struct obj_ref {
+       int pid;
+       char comm[16];
+};
+
+struct obj_refs {
+       struct hlist_node node;
+       __u32 id;
+       int ref_cnt;
+       struct obj_ref *refs;
+};
+
 struct btf;
 struct bpf_line_info;
 
 int build_pinned_obj_table(struct pinned_obj_table *table,
                           enum bpf_obj_type type);
 void delete_pinned_obj_table(struct pinned_obj_table *tab);
+__weak int build_obj_refs_table(struct obj_refs_table *table,
+                               enum bpf_obj_type type);
+__weak void delete_obj_refs_table(struct obj_refs_table *table);
+__weak void emit_obj_refs_json(struct obj_refs_table *table, __u32 id,
+                              json_writer_t *json_wtr);
+__weak void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id,
+                               const char *prefix);
 void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode);
 void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode);
 
@@ -188,29 +152,34 @@ int cmd_select(const struct cmd *cmds, int argc, char **argv,
 int get_fd_type(int fd);
 const char *get_fd_type_name(enum bpf_obj_type type);
 char *get_fdinfo(int fd, const char *key);
-int open_obj_pinned(char *path, bool quiet);
-int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type);
+int open_obj_pinned(const char *path, bool quiet);
+int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type);
 int mount_bpffs_for_pin(const char *name);
 int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(int *, char ***));
 int do_pin_fd(int fd, const char *name);
 
-int do_prog(int argc, char **arg);
-int do_map(int argc, char **arg);
-int do_link(int argc, char **arg);
-int do_event_pipe(int argc, char **argv);
-int do_cgroup(int argc, char **arg);
-int do_perf(int argc, char **arg);
-int do_net(int argc, char **arg);
-int do_tracelog(int argc, char **arg);
-int do_feature(int argc, char **argv);
-int do_btf(int argc, char **argv);
+/* commands available in bootstrap mode */
 int do_gen(int argc, char **argv);
-int do_struct_ops(int argc, char **argv);
-int do_iter(int argc, char **argv);
+int do_btf(int argc, char **argv);
+
+/* non-bootstrap only commands */
+int do_prog(int argc, char **arg) __weak;
+int do_map(int argc, char **arg) __weak;
+int do_link(int argc, char **arg) __weak;
+int do_event_pipe(int argc, char **argv) __weak;
+int do_cgroup(int argc, char **arg) __weak;
+int do_perf(int argc, char **arg) __weak;
+int do_net(int argc, char **arg) __weak;
+int do_tracelog(int argc, char **arg) __weak;
+int do_feature(int argc, char **argv) __weak;
+int do_struct_ops(int argc, char **argv) __weak;
+int do_iter(int argc, char **argv) __weak;
 
 int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what);
 int prog_parse_fd(int *argc, char ***argv);
+int prog_parse_fds(int *argc, char ***argv, int **fds);
 int map_parse_fd(int *argc, char ***argv);
+int map_parse_fds(int *argc, char ***argv, int **fds);
 int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len);
 
 struct bpf_prog_linfo;
index c5fac80..3a27d31 100644 (file)
@@ -49,6 +49,7 @@ const char * const map_type_name[] = {
        [BPF_MAP_TYPE_STACK]                    = "stack",
        [BPF_MAP_TYPE_SK_STORAGE]               = "sk_storage",
        [BPF_MAP_TYPE_STRUCT_OPS]               = "struct_ops",
+       [BPF_MAP_TYPE_RINGBUF]                  = "ringbuf",
 };
 
 const size_t map_type_name_size = ARRAY_SIZE(map_type_name);
@@ -92,162 +93,6 @@ static void *alloc_value(struct bpf_map_info *info)
                return malloc(info->value_size);
 }
 
-static int map_fd_by_name(char *name, int **fds)
-{
-       unsigned int id = 0;
-       int fd, nb_fds = 0;
-       void *tmp;
-       int err;
-
-       while (true) {
-               struct bpf_map_info info = {};
-               __u32 len = sizeof(info);
-
-               err = bpf_map_get_next_id(id, &id);
-               if (err) {
-                       if (errno != ENOENT) {
-                               p_err("%s", strerror(errno));
-                               goto err_close_fds;
-                       }
-                       return nb_fds;
-               }
-
-               fd = bpf_map_get_fd_by_id(id);
-               if (fd < 0) {
-                       p_err("can't get map by id (%u): %s",
-                             id, strerror(errno));
-                       goto err_close_fds;
-               }
-
-               err = bpf_obj_get_info_by_fd(fd, &info, &len);
-               if (err) {
-                       p_err("can't get map info (%u): %s",
-                             id, strerror(errno));
-                       goto err_close_fd;
-               }
-
-               if (strncmp(name, info.name, BPF_OBJ_NAME_LEN)) {
-                       close(fd);
-                       continue;
-               }
-
-               if (nb_fds > 0) {
-                       tmp = realloc(*fds, (nb_fds + 1) * sizeof(int));
-                       if (!tmp) {
-                               p_err("failed to realloc");
-                               goto err_close_fd;
-                       }
-                       *fds = tmp;
-               }
-               (*fds)[nb_fds++] = fd;
-       }
-
-err_close_fd:
-       close(fd);
-err_close_fds:
-       while (--nb_fds >= 0)
-               close((*fds)[nb_fds]);
-       return -1;
-}
-
-static int map_parse_fds(int *argc, char ***argv, int **fds)
-{
-       if (is_prefix(**argv, "id")) {
-               unsigned int id;
-               char *endptr;
-
-               NEXT_ARGP();
-
-               id = strtoul(**argv, &endptr, 0);
-               if (*endptr) {
-                       p_err("can't parse %s as ID", **argv);
-                       return -1;
-               }
-               NEXT_ARGP();
-
-               (*fds)[0] = bpf_map_get_fd_by_id(id);
-               if ((*fds)[0] < 0) {
-                       p_err("get map by id (%u): %s", id, strerror(errno));
-                       return -1;
-               }
-               return 1;
-       } else if (is_prefix(**argv, "name")) {
-               char *name;
-
-               NEXT_ARGP();
-
-               name = **argv;
-               if (strlen(name) > BPF_OBJ_NAME_LEN - 1) {
-                       p_err("can't parse name");
-                       return -1;
-               }
-               NEXT_ARGP();
-
-               return map_fd_by_name(name, fds);
-       } else if (is_prefix(**argv, "pinned")) {
-               char *path;
-
-               NEXT_ARGP();
-
-               path = **argv;
-               NEXT_ARGP();
-
-               (*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_MAP);
-               if ((*fds)[0] < 0)
-                       return -1;
-               return 1;
-       }
-
-       p_err("expected 'id', 'name' or 'pinned', got: '%s'?", **argv);
-       return -1;
-}
-
-int map_parse_fd(int *argc, char ***argv)
-{
-       int *fds = NULL;
-       int nb_fds, fd;
-
-       fds = malloc(sizeof(int));
-       if (!fds) {
-               p_err("mem alloc failed");
-               return -1;
-       }
-       nb_fds = map_parse_fds(argc, argv, &fds);
-       if (nb_fds != 1) {
-               if (nb_fds > 1) {
-                       p_err("several maps match this handle");
-                       while (nb_fds--)
-                               close(fds[nb_fds]);
-               }
-               fd = -1;
-               goto exit_free;
-       }
-
-       fd = fds[0];
-exit_free:
-       free(fds);
-       return fd;
-}
-
-int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len)
-{
-       int err;
-       int fd;
-
-       fd = map_parse_fd(argc, argv);
-       if (fd < 0)
-               return -1;
-
-       err = bpf_obj_get_info_by_fd(fd, info, info_len);
-       if (err) {
-               p_err("can't get map info: %s", strerror(errno));
-               close(fd);
-               return err;
-       }
-
-       return fd;
-}
-
 static int do_dump_btf(const struct btf_dumper *d,
                       struct bpf_map_info *map_info, void *key,
                       void *value)
@@ -628,7 +473,7 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
                if (owner_prog_type) {
                        unsigned int prog_type = atoi(owner_prog_type);
 
-                       if (prog_type < ARRAY_SIZE(prog_type_name))
+                       if (prog_type < prog_type_name_size)
                                jsonw_string_field(json_wtr, "owner_prog_type",
                                                   prog_type_name[prog_type]);
                        else
@@ -665,6 +510,8 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
                jsonw_end_array(json_wtr);
        }
 
+       emit_obj_refs_json(&refs_table, info->id, json_wtr);
+
        jsonw_end_object(json_wtr);
 
        return 0;
@@ -711,7 +558,7 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
                if (owner_prog_type) {
                        unsigned int prog_type = atoi(owner_prog_type);
 
-                       if (prog_type < ARRAY_SIZE(prog_type_name))
+                       if (prog_type < prog_type_name_size)
                                printf("owner_prog_type %s  ",
                                       prog_type_name[prog_type]);
                        else
@@ -752,6 +599,8 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
        if (frozen)
                printf("%sfrozen", info->btf_id ? "  " : "");
 
+       emit_obj_refs_plain(&refs_table, info->id, "\n\tpids ");
+
        printf("\n");
        return 0;
 }
@@ -810,6 +659,7 @@ static int do_show(int argc, char **argv)
 
        if (show_pinned)
                build_pinned_obj_table(&map_table, BPF_OBJ_MAP);
+       build_obj_refs_table(&refs_table, BPF_OBJ_MAP);
 
        if (argc == 2)
                return do_show_subset(argc, argv);
@@ -853,6 +703,8 @@ static int do_show(int argc, char **argv)
        if (json_output)
                jsonw_end_array(json_wtr);
 
+       delete_obj_refs_table(&refs_table);
+
        return errno == ENOENT ? 0 : -1;
 }
 
@@ -1590,7 +1442,7 @@ static int do_help(int argc, char **argv)
                "                 lru_percpu_hash | lpm_trie | array_of_maps | hash_of_maps |\n"
                "                 devmap | devmap_hash | sockmap | cpumap | xskmap | sockhash |\n"
                "                 cgroup_storage | reuseport_sockarray | percpu_cgroup_storage |\n"
-               "                 queue | stack | sk_storage | struct_ops }\n"
+               "                 queue | stack | sk_storage | struct_ops | ringbuf }\n"
                "       " HELP_SPEC_OPTIONS "\n"
                "",
                bin_name, argv[-2]);
diff --git a/tools/bpf/bpftool/pids.c b/tools/bpf/bpftool/pids.c
new file mode 100644 (file)
index 0000000..e3b1163
--- /dev/null
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2020 Facebook */
+#include <errno.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <bpf/bpf.h>
+
+#include "main.h"
+#include "skeleton/pid_iter.h"
+
+#ifdef BPFTOOL_WITHOUT_SKELETONS
+
+int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type)
+{
+       return -ENOTSUP;
+}
+void delete_obj_refs_table(struct obj_refs_table *table) {}
+void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id, const char *prefix) {}
+void emit_obj_refs_json(struct obj_refs_table *table, __u32 id, json_writer_t *json_writer) {}
+
+#else /* BPFTOOL_WITHOUT_SKELETONS */
+
+#include "pid_iter.skel.h"
+
+static void add_ref(struct obj_refs_table *table, struct pid_iter_entry *e)
+{
+       struct obj_refs *refs;
+       struct obj_ref *ref;
+       void *tmp;
+       int i;
+
+       hash_for_each_possible(table->table, refs, node, e->id) {
+               if (refs->id != e->id)
+                       continue;
+
+               for (i = 0; i < refs->ref_cnt; i++) {
+                       if (refs->refs[i].pid == e->pid)
+                               return;
+               }
+
+               tmp = realloc(refs->refs, (refs->ref_cnt + 1) * sizeof(*ref));
+               if (!tmp) {
+                       p_err("failed to re-alloc memory for ID %u, PID %d, COMM %s...",
+                             e->id, e->pid, e->comm);
+                       return;
+               }
+               refs->refs = tmp;
+               ref = &refs->refs[refs->ref_cnt];
+               ref->pid = e->pid;
+               memcpy(ref->comm, e->comm, sizeof(ref->comm));
+               refs->ref_cnt++;
+
+               return;
+       }
+
+       /* new ref */
+       refs = calloc(1, sizeof(*refs));
+       if (!refs) {
+               p_err("failed to alloc memory for ID %u, PID %d, COMM %s...",
+                     e->id, e->pid, e->comm);
+               return;
+       }
+
+       refs->id = e->id;
+       refs->refs = malloc(sizeof(*refs->refs));
+       if (!refs->refs) {
+               free(refs);
+               p_err("failed to alloc memory for ID %u, PID %d, COMM %s...",
+                     e->id, e->pid, e->comm);
+               return;
+       }
+       ref = &refs->refs[0];
+       ref->pid = e->pid;
+       memcpy(ref->comm, e->comm, sizeof(ref->comm));
+       refs->ref_cnt = 1;
+       hash_add(table->table, &refs->node, e->id);
+}
+
+static int __printf(2, 0)
+libbpf_print_none(__maybe_unused enum libbpf_print_level level,
+                 __maybe_unused const char *format,
+                 __maybe_unused va_list args)
+{
+       return 0;
+}
+
+int build_obj_refs_table(struct obj_refs_table *table, enum bpf_obj_type type)
+{
+       char buf[4096];
+       struct pid_iter_bpf *skel;
+       struct pid_iter_entry *e;
+       int err, ret, fd = -1, i;
+       libbpf_print_fn_t default_print;
+
+       hash_init(table->table);
+       set_max_rlimit();
+
+       skel = pid_iter_bpf__open();
+       if (!skel) {
+               p_err("failed to open PID iterator skeleton");
+               return -1;
+       }
+
+       skel->rodata->obj_type = type;
+
+       /* we don't want output polluted with libbpf errors if bpf_iter is not
+        * supported
+        */
+       default_print = libbpf_set_print(libbpf_print_none);
+       err = pid_iter_bpf__load(skel);
+       libbpf_set_print(default_print);
+       if (err) {
+               /* too bad, kernel doesn't support BPF iterators yet */
+               err = 0;
+               goto out;
+       }
+       err = pid_iter_bpf__attach(skel);
+       if (err) {
+               /* if we loaded above successfully, attach has to succeed */
+               p_err("failed to attach PID iterator: %d", err);
+               goto out;
+       }
+
+       fd = bpf_iter_create(bpf_link__fd(skel->links.iter));
+       if (fd < 0) {
+               err = -errno;
+               p_err("failed to create PID iterator session: %d", err);
+               goto out;
+       }
+
+       while (true) {
+               ret = read(fd, buf, sizeof(buf));
+               if (ret < 0) {
+                       err = -errno;
+                       p_err("failed to read PID iterator output: %d", err);
+                       goto out;
+               }
+               if (ret == 0)
+                       break;
+               if (ret % sizeof(*e)) {
+                       err = -EINVAL;
+                       p_err("invalid PID iterator output format");
+                       goto out;
+               }
+               ret /= sizeof(*e);
+
+               e = (void *)buf;
+               for (i = 0; i < ret; i++, e++) {
+                       add_ref(table, e);
+               }
+       }
+       err = 0;
+out:
+       if (fd >= 0)
+               close(fd);
+       pid_iter_bpf__destroy(skel);
+       return err;
+}
+
+void delete_obj_refs_table(struct obj_refs_table *table)
+{
+       struct obj_refs *refs;
+       struct hlist_node *tmp;
+       unsigned int bkt;
+
+       hash_for_each_safe(table->table, bkt, tmp, refs, node) {
+               hash_del(&refs->node);
+               free(refs->refs);
+               free(refs);
+       }
+}
+
+void emit_obj_refs_json(struct obj_refs_table *table, __u32 id,
+                       json_writer_t *json_writer)
+{
+       struct obj_refs *refs;
+       struct obj_ref *ref;
+       int i;
+
+       if (hash_empty(table->table))
+               return;
+
+       hash_for_each_possible(table->table, refs, node, id) {
+               if (refs->id != id)
+                       continue;
+               if (refs->ref_cnt == 0)
+                       break;
+
+               jsonw_name(json_writer, "pids");
+               jsonw_start_array(json_writer);
+               for (i = 0; i < refs->ref_cnt; i++) {
+                       ref = &refs->refs[i];
+                       jsonw_start_object(json_writer);
+                       jsonw_int_field(json_writer, "pid", ref->pid);
+                       jsonw_string_field(json_writer, "comm", ref->comm);
+                       jsonw_end_object(json_writer);
+               }
+               jsonw_end_array(json_writer);
+               break;
+       }
+}
+
+void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id, const char *prefix)
+{
+       struct obj_refs *refs;
+       struct obj_ref *ref;
+       int i;
+
+       if (hash_empty(table->table))
+               return;
+
+       hash_for_each_possible(table->table, refs, node, id) {
+               if (refs->id != id)
+                       continue;
+               if (refs->ref_cnt == 0)
+                       break;
+
+               printf("%s", prefix);
+               for (i = 0; i < refs->ref_cnt; i++) {
+                       ref = &refs->refs[i];
+                       printf("%s%s(%d)", i == 0 ? "" : ", ", ref->comm, ref->pid);
+               }
+               break;
+       }
+}
+
+
+#endif
index a5eff83..3e6ecc6 100644 (file)
 #include "main.h"
 #include "xlated_dumper.h"
 
+const char * const prog_type_name[] = {
+       [BPF_PROG_TYPE_UNSPEC]                  = "unspec",
+       [BPF_PROG_TYPE_SOCKET_FILTER]           = "socket_filter",
+       [BPF_PROG_TYPE_KPROBE]                  = "kprobe",
+       [BPF_PROG_TYPE_SCHED_CLS]               = "sched_cls",
+       [BPF_PROG_TYPE_SCHED_ACT]               = "sched_act",
+       [BPF_PROG_TYPE_TRACEPOINT]              = "tracepoint",
+       [BPF_PROG_TYPE_XDP]                     = "xdp",
+       [BPF_PROG_TYPE_PERF_EVENT]              = "perf_event",
+       [BPF_PROG_TYPE_CGROUP_SKB]              = "cgroup_skb",
+       [BPF_PROG_TYPE_CGROUP_SOCK]             = "cgroup_sock",
+       [BPF_PROG_TYPE_LWT_IN]                  = "lwt_in",
+       [BPF_PROG_TYPE_LWT_OUT]                 = "lwt_out",
+       [BPF_PROG_TYPE_LWT_XMIT]                = "lwt_xmit",
+       [BPF_PROG_TYPE_SOCK_OPS]                = "sock_ops",
+       [BPF_PROG_TYPE_SK_SKB]                  = "sk_skb",
+       [BPF_PROG_TYPE_CGROUP_DEVICE]           = "cgroup_device",
+       [BPF_PROG_TYPE_SK_MSG]                  = "sk_msg",
+       [BPF_PROG_TYPE_RAW_TRACEPOINT]          = "raw_tracepoint",
+       [BPF_PROG_TYPE_CGROUP_SOCK_ADDR]        = "cgroup_sock_addr",
+       [BPF_PROG_TYPE_LWT_SEG6LOCAL]           = "lwt_seg6local",
+       [BPF_PROG_TYPE_LIRC_MODE2]              = "lirc_mode2",
+       [BPF_PROG_TYPE_SK_REUSEPORT]            = "sk_reuseport",
+       [BPF_PROG_TYPE_FLOW_DISSECTOR]          = "flow_dissector",
+       [BPF_PROG_TYPE_CGROUP_SYSCTL]           = "cgroup_sysctl",
+       [BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable",
+       [BPF_PROG_TYPE_CGROUP_SOCKOPT]          = "cgroup_sockopt",
+       [BPF_PROG_TYPE_TRACING]                 = "tracing",
+       [BPF_PROG_TYPE_STRUCT_OPS]              = "struct_ops",
+       [BPF_PROG_TYPE_EXT]                     = "ext",
+       [BPF_PROG_TYPE_SK_LOOKUP]               = "sk_lookup",
+};
+
+const size_t prog_type_name_size = ARRAY_SIZE(prog_type_name);
+
 enum dump_mode {
        DUMP_JITED,
        DUMP_XLATED,
@@ -86,158 +121,6 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
                strftime(buf, size, "%FT%T%z", &load_tm);
 }
 
-static int prog_fd_by_nametag(void *nametag, int **fds, bool tag)
-{
-       unsigned int id = 0;
-       int fd, nb_fds = 0;
-       void *tmp;
-       int err;
-
-       while (true) {
-               struct bpf_prog_info info = {};
-               __u32 len = sizeof(info);
-
-               err = bpf_prog_get_next_id(id, &id);
-               if (err) {
-                       if (errno != ENOENT) {
-                               p_err("%s", strerror(errno));
-                               goto err_close_fds;
-                       }
-                       return nb_fds;
-               }
-
-               fd = bpf_prog_get_fd_by_id(id);
-               if (fd < 0) {
-                       p_err("can't get prog by id (%u): %s",
-                             id, strerror(errno));
-                       goto err_close_fds;
-               }
-
-               err = bpf_obj_get_info_by_fd(fd, &info, &len);
-               if (err) {
-                       p_err("can't get prog info (%u): %s",
-                             id, strerror(errno));
-                       goto err_close_fd;
-               }
-
-               if ((tag && memcmp(nametag, info.tag, BPF_TAG_SIZE)) ||
-                   (!tag && strncmp(nametag, info.name, BPF_OBJ_NAME_LEN))) {
-                       close(fd);
-                       continue;
-               }
-
-               if (nb_fds > 0) {
-                       tmp = realloc(*fds, (nb_fds + 1) * sizeof(int));
-                       if (!tmp) {
-                               p_err("failed to realloc");
-                               goto err_close_fd;
-                       }
-                       *fds = tmp;
-               }
-               (*fds)[nb_fds++] = fd;
-       }
-
-err_close_fd:
-       close(fd);
-err_close_fds:
-       while (--nb_fds >= 0)
-               close((*fds)[nb_fds]);
-       return -1;
-}
-
-static int prog_parse_fds(int *argc, char ***argv, int **fds)
-{
-       if (is_prefix(**argv, "id")) {
-               unsigned int id;
-               char *endptr;
-
-               NEXT_ARGP();
-
-               id = strtoul(**argv, &endptr, 0);
-               if (*endptr) {
-                       p_err("can't parse %s as ID", **argv);
-                       return -1;
-               }
-               NEXT_ARGP();
-
-               (*fds)[0] = bpf_prog_get_fd_by_id(id);
-               if ((*fds)[0] < 0) {
-                       p_err("get by id (%u): %s", id, strerror(errno));
-                       return -1;
-               }
-               return 1;
-       } else if (is_prefix(**argv, "tag")) {
-               unsigned char tag[BPF_TAG_SIZE];
-
-               NEXT_ARGP();
-
-               if (sscanf(**argv, BPF_TAG_FMT, tag, tag + 1, tag + 2,
-                          tag + 3, tag + 4, tag + 5, tag + 6, tag + 7)
-                   != BPF_TAG_SIZE) {
-                       p_err("can't parse tag");
-                       return -1;
-               }
-               NEXT_ARGP();
-
-               return prog_fd_by_nametag(tag, fds, true);
-       } else if (is_prefix(**argv, "name")) {
-               char *name;
-
-               NEXT_ARGP();
-
-               name = **argv;
-               if (strlen(name) > BPF_OBJ_NAME_LEN - 1) {
-                       p_err("can't parse name");
-                       return -1;
-               }
-               NEXT_ARGP();
-
-               return prog_fd_by_nametag(name, fds, false);
-       } else if (is_prefix(**argv, "pinned")) {
-               char *path;
-
-               NEXT_ARGP();
-
-               path = **argv;
-               NEXT_ARGP();
-
-               (*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_PROG);
-               if ((*fds)[0] < 0)
-                       return -1;
-               return 1;
-       }
-
-       p_err("expected 'id', 'tag', 'name' or 'pinned', got: '%s'?", **argv);
-       return -1;
-}
-
-int prog_parse_fd(int *argc, char ***argv)
-{
-       int *fds = NULL;
-       int nb_fds, fd;
-
-       fds = malloc(sizeof(int));
-       if (!fds) {
-               p_err("mem alloc failed");
-               return -1;
-       }
-       nb_fds = prog_parse_fds(argc, argv, &fds);
-       if (nb_fds != 1) {
-               if (nb_fds > 1) {
-                       p_err("several programs match this handle");
-                       while (nb_fds--)
-                               close(fds[nb_fds]);
-               }
-               fd = -1;
-               goto exit_free;
-       }
-
-       fd = fds[0];
-exit_free:
-       free(fds);
-       return fd;
-}
-
 static void show_prog_maps(int fd, __u32 num_maps)
 {
        struct bpf_prog_info info = {};
@@ -342,6 +225,8 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
                jsonw_end_array(json_wtr);
        }
 
+       emit_obj_refs_json(&refs_table, info->id, json_wtr);
+
        jsonw_end_object(json_wtr);
 }
 
@@ -408,6 +293,8 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
        if (info->btf_id)
                printf("\n\tbtf_id %d", info->btf_id);
 
+       emit_obj_refs_plain(&refs_table, info->id, "\n\tpids ");
+
        printf("\n");
 }
 
@@ -473,6 +360,7 @@ static int do_show(int argc, char **argv)
 
        if (show_pinned)
                build_pinned_obj_table(&prog_table, BPF_OBJ_PROG);
+       build_obj_refs_table(&refs_table, BPF_OBJ_PROG);
 
        if (argc == 2)
                return do_show_subset(argc, argv);
@@ -514,6 +402,8 @@ static int do_show(int argc, char **argv)
        if (json_output)
                jsonw_end_array(json_wtr);
 
+       delete_obj_refs_table(&refs_table);
+
        return err;
 }
 
@@ -2016,7 +1906,7 @@ static int do_help(int argc, char **argv)
                "                 cgroup/getsockname4 | cgroup/getsockname6 | cgroup/sendmsg4 |\n"
                "                 cgroup/sendmsg6 | cgroup/recvmsg4 | cgroup/recvmsg6 |\n"
                "                 cgroup/getsockopt | cgroup/setsockopt |\n"
-               "                 struct_ops | fentry | fexit | freplace }\n"
+               "                 struct_ops | fentry | fexit | freplace | sk_lookup }\n"
                "       ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n"
                "                        flow_dissector }\n"
                "       METRIC := { cycles | instructions | l1d_loads | llc_misses }\n"
diff --git a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
new file mode 100644 (file)
index 0000000..d9b4209
--- /dev/null
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (c) 2020 Facebook */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_tracing.h>
+#include "pid_iter.h"
+
+/* keep in sync with the definition in main.h */
+enum bpf_obj_type {
+       BPF_OBJ_UNKNOWN,
+       BPF_OBJ_PROG,
+       BPF_OBJ_MAP,
+       BPF_OBJ_LINK,
+       BPF_OBJ_BTF,
+};
+
+extern const void bpf_link_fops __ksym;
+extern const void bpf_map_fops __ksym;
+extern const void bpf_prog_fops __ksym;
+extern const void btf_fops __ksym;
+
+const volatile enum bpf_obj_type obj_type = BPF_OBJ_UNKNOWN;
+
+static __always_inline __u32 get_obj_id(void *ent, enum bpf_obj_type type)
+{
+       switch (type) {
+       case BPF_OBJ_PROG:
+               return BPF_CORE_READ((struct bpf_prog *)ent, aux, id);
+       case BPF_OBJ_MAP:
+               return BPF_CORE_READ((struct bpf_map *)ent, id);
+       case BPF_OBJ_BTF:
+               return BPF_CORE_READ((struct btf *)ent, id);
+       case BPF_OBJ_LINK:
+               return BPF_CORE_READ((struct bpf_link *)ent, id);
+       default:
+               return 0;
+       }
+}
+
+SEC("iter/task_file")
+int iter(struct bpf_iter__task_file *ctx)
+{
+       struct file *file = ctx->file;
+       struct task_struct *task = ctx->task;
+       struct pid_iter_entry e;
+       const void *fops;
+
+       if (!file || !task)
+               return 0;
+
+       switch (obj_type) {
+       case BPF_OBJ_PROG:
+               fops = &bpf_prog_fops;
+               break;
+       case BPF_OBJ_MAP:
+               fops = &bpf_map_fops;
+               break;
+       case BPF_OBJ_BTF:
+               fops = &btf_fops;
+               break;
+       case BPF_OBJ_LINK:
+               fops = &bpf_link_fops;
+               break;
+       default:
+               return 0;
+       }
+
+       if (file->f_op != fops)
+               return 0;
+
+       e.pid = task->tgid;
+       e.id = get_obj_id(file->private_data, obj_type);
+       bpf_probe_read_kernel(&e.comm, sizeof(e.comm),
+                             task->group_leader->comm);
+       bpf_seq_write(ctx->meta->seq, &e, sizeof(e));
+
+       return 0;
+}
+
+char LICENSE[] SEC("license") = "Dual BSD/GPL";
diff --git a/tools/bpf/bpftool/skeleton/pid_iter.h b/tools/bpf/bpftool/skeleton/pid_iter.h
new file mode 100644 (file)
index 0000000..5692cf2
--- /dev/null
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (c) 2020 Facebook */
+#ifndef __PID_ITER_H
+#define __PID_ITER_H
+
+struct pid_iter_entry {
+       __u32 id;
+       int pid;
+       char comm[16];
+};
+
+#endif
index 20034c1..4e3512f 100644 (file)
@@ -1,7 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
 // Copyright (c) 2020 Facebook
-#include "profiler.h"
-#include <linux/bpf.h>
+#include <vmlinux.h>
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
 
@@ -116,4 +115,4 @@ int BPF_PROG(fexit_XXX)
        return 0;
 }
 
-char LICENSE[] SEC("license") = "GPL";
+char LICENSE[] SEC("license") = "Dual BSD/GPL";
diff --git a/tools/bpf/bpftool/skeleton/profiler.h b/tools/bpf/bpftool/skeleton/profiler.h
deleted file mode 100644 (file)
index 1f767e9..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
-#ifndef __PROFILER_H
-#define __PROFILER_H
-
-/* useful typedefs from vmlinux.h */
-
-typedef signed char __s8;
-typedef unsigned char __u8;
-typedef short int __s16;
-typedef short unsigned int __u16;
-typedef int __s32;
-typedef unsigned int __u32;
-typedef long long int __s64;
-typedef long long unsigned int __u64;
-
-typedef __s8 s8;
-typedef __u8 u8;
-typedef __s16 s16;
-typedef __u16 u16;
-typedef __s32 s32;
-typedef __u32 u32;
-typedef __s64 s64;
-typedef __u64 u64;
-
-enum {
-       false = 0,
-       true = 1,
-};
-
-#ifdef __CHECKER__
-#define __bitwise__ __attribute__((bitwise))
-#else
-#define __bitwise__
-#endif
-
-typedef __u16 __bitwise__ __le16;
-typedef __u16 __bitwise__ __be16;
-typedef __u32 __bitwise__ __le32;
-typedef __u32 __bitwise__ __be32;
-typedef __u64 __bitwise__ __le64;
-typedef __u64 __bitwise__ __be64;
-
-typedef __u16 __bitwise__ __sum16;
-typedef __u32 __bitwise__ __wsum;
-
-#endif /* __PROFILER_H */
diff --git a/tools/bpf/resolve_btfids/Build b/tools/bpf/resolve_btfids/Build
new file mode 100644 (file)
index 0000000..ae82da0
--- /dev/null
@@ -0,0 +1,10 @@
+resolve_btfids-y += main.o
+resolve_btfids-y += rbtree.o
+resolve_btfids-y += zalloc.o
+resolve_btfids-y += string.o
+resolve_btfids-y += ctype.o
+resolve_btfids-y += str_error_r.o
+
+$(OUTPUT)%.o: ../../lib/%.c FORCE
+       $(call rule_mkdir)
+       $(call if_changed_dep,cc_o_c)
diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile
new file mode 100644 (file)
index 0000000..a88cd44
--- /dev/null
@@ -0,0 +1,91 @@
+# SPDX-License-Identifier: GPL-2.0-only
+include ../../scripts/Makefile.include
+
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+endif
+
+ifeq ($(V),1)
+  Q =
+  msg =
+else
+  Q = @
+  msg = @printf '  %-8s %s%s\n' "$(1)" "$(notdir $(2))" "$(if $(3), $(3))";
+  MAKEFLAGS=--no-print-directory
+endif
+
+# always use the host compiler
+ifneq ($(LLVM),)
+HOSTAR  ?= llvm-ar
+HOSTCC  ?= clang
+HOSTLD  ?= ld.lld
+else
+HOSTAR  ?= ar
+HOSTCC  ?= gcc
+HOSTLD  ?= ld
+endif
+AR       = $(HOSTAR)
+CC       = $(HOSTCC)
+LD       = $(HOSTLD)
+
+OUTPUT ?= $(srctree)/tools/bpf/resolve_btfids/
+
+LIBBPF_SRC := $(srctree)/tools/lib/bpf/
+SUBCMD_SRC := $(srctree)/tools/lib/subcmd/
+
+BPFOBJ     := $(OUTPUT)/libbpf.a
+SUBCMDOBJ  := $(OUTPUT)/libsubcmd.a
+
+BINARY     := $(OUTPUT)/resolve_btfids
+BINARY_IN  := $(BINARY)-in.o
+
+all: $(BINARY)
+
+$(OUTPUT):
+       $(call msg,MKDIR,,$@)
+       $(Q)mkdir -p $(OUTPUT)
+
+$(SUBCMDOBJ): fixdep FORCE
+       $(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(OUTPUT)
+
+$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT)
+       $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC)  OUTPUT=$(abspath $(dir $@))/ $(abspath $@)
+
+CFLAGS := -g \
+          -I$(srctree)/tools/include \
+          -I$(srctree)/tools/include/uapi \
+          -I$(LIBBPF_SRC) \
+          -I$(SUBCMD_SRC)
+
+LIBS = -lelf -lz
+
+export srctree OUTPUT CFLAGS Q
+include $(srctree)/tools/build/Makefile.include
+
+$(BINARY_IN): fixdep FORCE
+       $(Q)$(MAKE) $(build)=resolve_btfids
+
+$(BINARY): $(BPFOBJ) $(SUBCMDOBJ) $(BINARY_IN)
+       $(call msg,LINK,$@)
+       $(Q)$(CC) $(BINARY_IN) $(LDFLAGS) -o $@ $(BPFOBJ) $(SUBCMDOBJ) $(LIBS)
+
+libsubcmd-clean:
+       $(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(OUTPUT) clean
+
+libbpf-clean:
+       $(Q)$(MAKE) -C $(LIBBPF_SRC) OUTPUT=$(OUTPUT) clean
+
+clean: libsubcmd-clean libbpf-clean fixdep-clean
+       $(call msg,CLEAN,$(BINARY))
+       $(Q)$(RM) -f $(BINARY); \
+       find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
+
+tags:
+       $(call msg,GEN,,tags)
+       $(Q)ctags -R . $(LIBBPF_SRC) $(SUBCMD_SRC)
+
+FORCE:
+
+.PHONY: all FORCE clean tags
diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
new file mode 100644 (file)
index 0000000..6956b63
--- /dev/null
@@ -0,0 +1,721 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+/*
+ * resolve_btfids scans Elf object for .BTF_ids section and resolves
+ * its symbols with BTF ID values.
+ *
+ * Each symbol points to 4 bytes data and is expected to have
+ * following name syntax:
+ *
+ * __BTF_ID__<type>__<symbol>[__<id>]
+ *
+ * type is:
+ *
+ *   func    - lookup BTF_KIND_FUNC symbol with <symbol> name
+ *             and store its ID into the data:
+ *
+ *             __BTF_ID__func__vfs_close__1:
+ *             .zero 4
+ *
+ *   struct  - lookup BTF_KIND_STRUCT symbol with <symbol> name
+ *             and store its ID into the data:
+ *
+ *             __BTF_ID__struct__sk_buff__1:
+ *             .zero 4
+ *
+ *   union   - lookup BTF_KIND_UNION symbol with <symbol> name
+ *             and store its ID into the data:
+ *
+ *             __BTF_ID__union__thread_union__1:
+ *             .zero 4
+ *
+ *   typedef - lookup BTF_KIND_TYPEDEF symbol with <symbol> name
+ *             and store its ID into the data:
+ *
+ *             __BTF_ID__typedef__pid_t__1:
+ *             .zero 4
+ *
+ *   set     - store symbol size into first 4 bytes and sort following
+ *             ID list
+ *
+ *             __BTF_ID__set__list:
+ *             .zero 4
+ *             list:
+ *             __BTF_ID__func__vfs_getattr__3:
+ *             .zero 4
+ *             __BTF_ID__func__vfs_fallocate__4:
+ *             .zero 4
+ */
+
+#define  _GNU_SOURCE
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <libelf.h>
+#include <gelf.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <linux/rbtree.h>
+#include <linux/zalloc.h>
+#include <linux/err.h>
+#include <btf.h>
+#include <libbpf.h>
+#include <parse-options.h>
+
+#define BTF_IDS_SECTION        ".BTF_ids"
+#define BTF_ID         "__BTF_ID__"
+
+#define BTF_STRUCT     "struct"
+#define BTF_UNION      "union"
+#define BTF_TYPEDEF    "typedef"
+#define BTF_FUNC       "func"
+#define BTF_SET                "set"
+
+#define ADDR_CNT       100
+
+struct btf_id {
+       struct rb_node   rb_node;
+       char            *name;
+       union {
+               int      id;
+               int      cnt;
+       };
+       int              addr_cnt;
+       Elf64_Addr       addr[ADDR_CNT];
+};
+
+struct object {
+       const char *path;
+       const char *btf;
+
+       struct {
+               int              fd;
+               Elf             *elf;
+               Elf_Data        *symbols;
+               Elf_Data        *idlist;
+               int              symbols_shndx;
+               int              idlist_shndx;
+               size_t           strtabidx;
+               unsigned long    idlist_addr;
+       } efile;
+
+       struct rb_root  sets;
+       struct rb_root  structs;
+       struct rb_root  unions;
+       struct rb_root  typedefs;
+       struct rb_root  funcs;
+
+       int nr_funcs;
+       int nr_structs;
+       int nr_unions;
+       int nr_typedefs;
+};
+
+static int verbose;
+
+int eprintf(int level, int var, const char *fmt, ...)
+{
+       va_list args;
+       int ret;
+
+       if (var >= level) {
+               va_start(args, fmt);
+               ret = vfprintf(stderr, fmt, args);
+               va_end(args);
+       }
+       return ret;
+}
+
+#ifndef pr_fmt
+#define pr_fmt(fmt) fmt
+#endif
+
+#define pr_debug(fmt, ...) \
+       eprintf(1, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debugN(n, fmt, ...) \
+       eprintf(n, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_err(fmt, ...) \
+       eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+
+static bool is_btf_id(const char *name)
+{
+       return name && !strncmp(name, BTF_ID, sizeof(BTF_ID) - 1);
+}
+
+static struct btf_id *btf_id__find(struct rb_root *root, const char *name)
+{
+       struct rb_node *p = root->rb_node;
+       struct btf_id *id;
+       int cmp;
+
+       while (p) {
+               id = rb_entry(p, struct btf_id, rb_node);
+               cmp = strcmp(id->name, name);
+               if (cmp < 0)
+                       p = p->rb_left;
+               else if (cmp > 0)
+                       p = p->rb_right;
+               else
+                       return id;
+       }
+       return NULL;
+}
+
+static struct btf_id*
+btf_id__add(struct rb_root *root, char *name, bool unique)
+{
+       struct rb_node **p = &root->rb_node;
+       struct rb_node *parent = NULL;
+       struct btf_id *id;
+       int cmp;
+
+       while (*p != NULL) {
+               parent = *p;
+               id = rb_entry(parent, struct btf_id, rb_node);
+               cmp = strcmp(id->name, name);
+               if (cmp < 0)
+                       p = &(*p)->rb_left;
+               else if (cmp > 0)
+                       p = &(*p)->rb_right;
+               else
+                       return unique ? NULL : id;
+       }
+
+       id = zalloc(sizeof(*id));
+       if (id) {
+               pr_debug("adding symbol %s\n", name);
+               id->name = name;
+               rb_link_node(&id->rb_node, parent, p);
+               rb_insert_color(&id->rb_node, root);
+       }
+       return id;
+}
+
+static char *get_id(const char *prefix_end)
+{
+       /*
+        * __BTF_ID__func__vfs_truncate__0
+        * prefix_end =  ^
+        */
+       char *p, *id = strdup(prefix_end + sizeof("__") - 1);
+
+       if (id) {
+               /*
+                * __BTF_ID__func__vfs_truncate__0
+                * id =            ^
+                *
+                * cut the unique id part
+                */
+               p = strrchr(id, '_');
+               p--;
+               if (*p != '_') {
+                       free(id);
+                       return NULL;
+               }
+               *p = '\0';
+       }
+       return id;
+}
+
+static struct btf_id *add_symbol(struct rb_root *root, char *name, size_t size)
+{
+       char *id;
+
+       id = get_id(name + size);
+       if (!id) {
+               pr_err("FAILED to parse symbol name: %s\n", name);
+               return NULL;
+       }
+
+       return btf_id__add(root, id, false);
+}
+
+static int elf_collect(struct object *obj)
+{
+       Elf_Scn *scn = NULL;
+       size_t shdrstrndx;
+       int idx = 0;
+       Elf *elf;
+       int fd;
+
+       fd = open(obj->path, O_RDWR, 0666);
+       if (fd == -1) {
+               pr_err("FAILED cannot open %s: %s\n",
+                       obj->path, strerror(errno));
+               return -1;
+       }
+
+       elf_version(EV_CURRENT);
+
+       elf = elf_begin(fd, ELF_C_RDWR_MMAP, NULL);
+       if (!elf) {
+               pr_err("FAILED cannot create ELF descriptor: %s\n",
+                       elf_errmsg(-1));
+               return -1;
+       }
+
+       obj->efile.fd  = fd;
+       obj->efile.elf = elf;
+
+       elf_flagelf(elf, ELF_C_SET, ELF_F_LAYOUT);
+
+       if (elf_getshdrstrndx(elf, &shdrstrndx) != 0) {
+               pr_err("FAILED cannot get shdr str ndx\n");
+               return -1;
+       }
+
+       /*
+        * Scan all the elf sections and look for save data
+        * from .BTF_ids section and symbols.
+        */
+       while ((scn = elf_nextscn(elf, scn)) != NULL) {
+               Elf_Data *data;
+               GElf_Shdr sh;
+               char *name;
+
+               idx++;
+               if (gelf_getshdr(scn, &sh) != &sh) {
+                       pr_err("FAILED get section(%d) header\n", idx);
+                       return -1;
+               }
+
+               name = elf_strptr(elf, shdrstrndx, sh.sh_name);
+               if (!name) {
+                       pr_err("FAILED get section(%d) name\n", idx);
+                       return -1;
+               }
+
+               data = elf_getdata(scn, 0);
+               if (!data) {
+                       pr_err("FAILED to get section(%d) data from %s\n",
+                               idx, name);
+                       return -1;
+               }
+
+               pr_debug2("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
+                         idx, name, (unsigned long) data->d_size,
+                         (int) sh.sh_link, (unsigned long) sh.sh_flags,
+                         (int) sh.sh_type);
+
+               if (sh.sh_type == SHT_SYMTAB) {
+                       obj->efile.symbols       = data;
+                       obj->efile.symbols_shndx = idx;
+                       obj->efile.strtabidx     = sh.sh_link;
+               } else if (!strcmp(name, BTF_IDS_SECTION)) {
+                       obj->efile.idlist       = data;
+                       obj->efile.idlist_shndx = idx;
+                       obj->efile.idlist_addr  = sh.sh_addr;
+               }
+       }
+
+       return 0;
+}
+
+static int symbols_collect(struct object *obj)
+{
+       Elf_Scn *scn = NULL;
+       int n, i, err = 0;
+       GElf_Shdr sh;
+       char *name;
+
+       scn = elf_getscn(obj->efile.elf, obj->efile.symbols_shndx);
+       if (!scn)
+               return -1;
+
+       if (gelf_getshdr(scn, &sh) != &sh)
+               return -1;
+
+       n = sh.sh_size / sh.sh_entsize;
+
+       /*
+        * Scan symbols and look for the ones starting with
+        * __BTF_ID__* over .BTF_ids section.
+        */
+       for (i = 0; !err && i < n; i++) {
+               char *tmp, *prefix;
+               struct btf_id *id;
+               GElf_Sym sym;
+               int err = -1;
+
+               if (!gelf_getsym(obj->efile.symbols, i, &sym))
+                       return -1;
+
+               if (sym.st_shndx != obj->efile.idlist_shndx)
+                       continue;
+
+               name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
+                                 sym.st_name);
+
+               if (!is_btf_id(name))
+                       continue;
+
+               /*
+                * __BTF_ID__TYPE__vfs_truncate__0
+                * prefix =  ^
+                */
+               prefix = name + sizeof(BTF_ID) - 1;
+
+               /* struct */
+               if (!strncmp(prefix, BTF_STRUCT, sizeof(BTF_STRUCT) - 1)) {
+                       obj->nr_structs++;
+                       id = add_symbol(&obj->structs, prefix, sizeof(BTF_STRUCT) - 1);
+               /* union  */
+               } else if (!strncmp(prefix, BTF_UNION, sizeof(BTF_UNION) - 1)) {
+                       obj->nr_unions++;
+                       id = add_symbol(&obj->unions, prefix, sizeof(BTF_UNION) - 1);
+               /* typedef */
+               } else if (!strncmp(prefix, BTF_TYPEDEF, sizeof(BTF_TYPEDEF) - 1)) {
+                       obj->nr_typedefs++;
+                       id = add_symbol(&obj->typedefs, prefix, sizeof(BTF_TYPEDEF) - 1);
+               /* func */
+               } else if (!strncmp(prefix, BTF_FUNC, sizeof(BTF_FUNC) - 1)) {
+                       obj->nr_funcs++;
+                       id = add_symbol(&obj->funcs, prefix, sizeof(BTF_FUNC) - 1);
+               /* set */
+               } else if (!strncmp(prefix, BTF_SET, sizeof(BTF_SET) - 1)) {
+                       id = add_symbol(&obj->sets, prefix, sizeof(BTF_SET) - 1);
+                       /*
+                        * SET objects store list's count, which is encoded
+                        * in symbol's size, together with 'cnt' field hence
+                        * that - 1.
+                        */
+                       if (id)
+                               id->cnt = sym.st_size / sizeof(int) - 1;
+               } else {
+                       pr_err("FAILED unsupported prefix %s\n", prefix);
+                       return -1;
+               }
+
+               if (!id)
+                       return -ENOMEM;
+
+               if (id->addr_cnt >= ADDR_CNT) {
+                       pr_err("FAILED symbol %s crossed the number of allowed lists",
+                               id->name);
+                       return -1;
+               }
+               id->addr[id->addr_cnt++] = sym.st_value;
+       }
+
+       return 0;
+}
+
+static struct btf *btf__parse_raw(const char *file)
+{
+       struct btf *btf;
+       struct stat st;
+       __u8 *buf;
+       FILE *f;
+
+       if (stat(file, &st))
+               return NULL;
+
+       f = fopen(file, "rb");
+       if (!f)
+               return NULL;
+
+       buf = malloc(st.st_size);
+       if (!buf) {
+               btf = ERR_PTR(-ENOMEM);
+               goto exit_close;
+       }
+
+       if ((size_t) st.st_size != fread(buf, 1, st.st_size, f)) {
+               btf = ERR_PTR(-EINVAL);
+               goto exit_free;
+       }
+
+       btf = btf__new(buf, st.st_size);
+
+exit_free:
+       free(buf);
+exit_close:
+       fclose(f);
+       return btf;
+}
+
+static bool is_btf_raw(const char *file)
+{
+       __u16 magic = 0;
+       int fd, nb_read;
+
+       fd = open(file, O_RDONLY);
+       if (fd < 0)
+               return false;
+
+       nb_read = read(fd, &magic, sizeof(magic));
+       close(fd);
+       return nb_read == sizeof(magic) && magic == BTF_MAGIC;
+}
+
+static struct btf *btf_open(const char *path)
+{
+       if (is_btf_raw(path))
+               return btf__parse_raw(path);
+       else
+               return btf__parse_elf(path, NULL);
+}
+
+static int symbols_resolve(struct object *obj)
+{
+       int nr_typedefs = obj->nr_typedefs;
+       int nr_structs  = obj->nr_structs;
+       int nr_unions   = obj->nr_unions;
+       int nr_funcs    = obj->nr_funcs;
+       int err, type_id;
+       struct btf *btf;
+       __u32 nr;
+
+       btf = btf_open(obj->btf ?: obj->path);
+       err = libbpf_get_error(btf);
+       if (err) {
+               pr_err("FAILED: load BTF from %s: %s",
+                       obj->path, strerror(err));
+               return -1;
+       }
+
+       err = -1;
+       nr  = btf__get_nr_types(btf);
+
+       /*
+        * Iterate all the BTF types and search for collected symbol IDs.
+        */
+       for (type_id = 1; type_id <= nr; type_id++) {
+               const struct btf_type *type;
+               struct rb_root *root;
+               struct btf_id *id;
+               const char *str;
+               int *nr;
+
+               type = btf__type_by_id(btf, type_id);
+               if (!type) {
+                       pr_err("FAILED: malformed BTF, can't resolve type for ID %d\n",
+                               type_id);
+                       goto out;
+               }
+
+               if (btf_is_func(type) && nr_funcs) {
+                       nr   = &nr_funcs;
+                       root = &obj->funcs;
+               } else if (btf_is_struct(type) && nr_structs) {
+                       nr   = &nr_structs;
+                       root = &obj->structs;
+               } else if (btf_is_union(type) && nr_unions) {
+                       nr   = &nr_unions;
+                       root = &obj->unions;
+               } else if (btf_is_typedef(type) && nr_typedefs) {
+                       nr   = &nr_typedefs;
+                       root = &obj->typedefs;
+               } else
+                       continue;
+
+               str = btf__name_by_offset(btf, type->name_off);
+               if (!str) {
+                       pr_err("FAILED: malformed BTF, can't resolve name for ID %d\n",
+                               type_id);
+                       goto out;
+               }
+
+               id = btf_id__find(root, str);
+               if (id) {
+                       id->id = type_id;
+                       (*nr)--;
+               }
+       }
+
+       err = 0;
+out:
+       btf__free(btf);
+       return err;
+}
+
+static int id_patch(struct object *obj, struct btf_id *id)
+{
+       Elf_Data *data = obj->efile.idlist;
+       int *ptr = data->d_buf;
+       int i;
+
+       if (!id->id) {
+               pr_err("FAILED unresolved symbol %s\n", id->name);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < id->addr_cnt; i++) {
+               unsigned long addr = id->addr[i];
+               unsigned long idx = addr - obj->efile.idlist_addr;
+
+               pr_debug("patching addr %5lu: ID %7d [%s]\n",
+                        idx, id->id, id->name);
+
+               if (idx >= data->d_size) {
+                       pr_err("FAILED patching index %lu out of bounds %lu\n",
+                               idx, data->d_size);
+                       return -1;
+               }
+
+               idx = idx / sizeof(int);
+               ptr[idx] = id->id;
+       }
+
+       return 0;
+}
+
+static int __symbols_patch(struct object *obj, struct rb_root *root)
+{
+       struct rb_node *next;
+       struct btf_id *id;
+
+       next = rb_first(root);
+       while (next) {
+               id = rb_entry(next, struct btf_id, rb_node);
+
+               if (id_patch(obj, id))
+                       return -1;
+
+               next = rb_next(next);
+       }
+       return 0;
+}
+
+static int cmp_id(const void *pa, const void *pb)
+{
+       const int *a = pa, *b = pb;
+
+       return *a - *b;
+}
+
+static int sets_patch(struct object *obj)
+{
+       Elf_Data *data = obj->efile.idlist;
+       int *ptr = data->d_buf;
+       struct rb_node *next;
+
+       next = rb_first(&obj->sets);
+       while (next) {
+               unsigned long addr, idx;
+               struct btf_id *id;
+               int *base;
+               int cnt;
+
+               id   = rb_entry(next, struct btf_id, rb_node);
+               addr = id->addr[0];
+               idx  = addr - obj->efile.idlist_addr;
+
+               /* sets are unique */
+               if (id->addr_cnt != 1) {
+                       pr_err("FAILED malformed data for set '%s'\n",
+                               id->name);
+                       return -1;
+               }
+
+               idx = idx / sizeof(int);
+               base = &ptr[idx] + 1;
+               cnt = ptr[idx];
+
+               pr_debug("sorting  addr %5lu: cnt %6d [%s]\n",
+                        (idx + 1) * sizeof(int), cnt, id->name);
+
+               qsort(base, cnt, sizeof(int), cmp_id);
+
+               next = rb_next(next);
+       }
+}
+
+static int symbols_patch(struct object *obj)
+{
+       int err;
+
+       if (__symbols_patch(obj, &obj->structs)  ||
+           __symbols_patch(obj, &obj->unions)   ||
+           __symbols_patch(obj, &obj->typedefs) ||
+           __symbols_patch(obj, &obj->funcs)    ||
+           __symbols_patch(obj, &obj->sets))
+               return -1;
+
+       if (sets_patch(obj))
+               return -1;
+
+       elf_flagdata(obj->efile.idlist, ELF_C_SET, ELF_F_DIRTY);
+
+       err = elf_update(obj->efile.elf, ELF_C_WRITE);
+       if (err < 0) {
+               pr_err("FAILED elf_update(WRITE): %s\n",
+                       elf_errmsg(-1));
+       }
+
+       pr_debug("update %s for %s\n",
+                err >= 0 ? "ok" : "failed", obj->path);
+       return err < 0 ? -1 : 0;
+}
+
+static const char * const resolve_btfids_usage[] = {
+       "resolve_btfids [<options>] <ELF object>",
+       NULL
+};
+
+int main(int argc, const char **argv)
+{
+       bool no_fail = false;
+       struct object obj = {
+               .efile = {
+                       .idlist_shndx  = -1,
+                       .symbols_shndx = -1,
+               },
+               .structs  = RB_ROOT,
+               .unions   = RB_ROOT,
+               .typedefs = RB_ROOT,
+               .funcs    = RB_ROOT,
+               .sets     = RB_ROOT,
+       };
+       struct option btfid_options[] = {
+               OPT_INCR('v', "verbose", &verbose,
+                        "be more verbose (show errors, etc)"),
+               OPT_STRING(0, "btf", &obj.btf, "BTF data",
+                          "BTF data"),
+               OPT_BOOLEAN(0, "no-fail", &no_fail,
+                          "do not fail if " BTF_IDS_SECTION " section is not found"),
+               OPT_END()
+       };
+       int err = -1;
+
+       argc = parse_options(argc, argv, btfid_options, resolve_btfids_usage,
+                            PARSE_OPT_STOP_AT_NON_OPTION);
+       if (argc != 1)
+               usage_with_options(resolve_btfids_usage, btfid_options);
+
+       obj.path = argv[0];
+
+       if (elf_collect(&obj))
+               goto out;
+
+       /*
+        * We did not find .BTF_ids section or symbols section,
+        * nothing to do..
+        */
+       if (obj.efile.idlist_shndx == -1 ||
+           obj.efile.symbols_shndx == -1) {
+               if (no_fail)
+                       return 0;
+               pr_err("FAILED to find needed sections\n");
+               return -1;
+       }
+
+       if (symbols_collect(&obj))
+               goto out;
+
+       if (symbols_resolve(&obj))
+               goto out;
+
+       if (symbols_patch(&obj))
+               goto out;
+
+       err = 0;
+out:
+       if (obj.efile.elf)
+               elf_end(obj.efile.elf);
+       close(obj.efile.fd);
+       return err;
+}
index b1f0321..88371f7 100644 (file)
@@ -68,7 +68,7 @@ FILES=                                          \
          test-llvm-version.bin                 \
          test-libaio.bin                       \
          test-libzstd.bin                      \
-         test-clang-bpf-global-var.bin         \
+         test-clang-bpf-co-re.bin              \
          test-file-handle.bin                  \
          test-libpfm4.bin
 
@@ -325,7 +325,7 @@ $(OUTPUT)test-libaio.bin:
 $(OUTPUT)test-libzstd.bin:
        $(BUILD) -lzstd
 
-$(OUTPUT)test-clang-bpf-global-var.bin:
+$(OUTPUT)test-clang-bpf-co-re.bin:
        $(CLANG) -S -g -target bpf -o - $(patsubst %.bin,%.c,$(@F)) |   \
                grep BTF_KIND_VAR
 
diff --git a/tools/build/feature/test-clang-bpf-co-re.c b/tools/build/feature/test-clang-bpf-co-re.c
new file mode 100644 (file)
index 0000000..cb5265b
--- /dev/null
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+
+struct test {
+       int a;
+       int b;
+} __attribute__((preserve_access_index));
+
+volatile struct test global_value_for_test = {};
diff --git a/tools/build/feature/test-clang-bpf-global-var.c b/tools/build/feature/test-clang-bpf-global-var.c
deleted file mode 100644 (file)
index 221f148..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2020 Facebook
-
-volatile int global_value_for_test = 1;
index 4671fbf..7f475d5 100644 (file)
@@ -18,8 +18,7 @@
  * position @h. For example
  * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
  */
-#if !defined(__ASSEMBLY__) && \
-       (!defined(CONFIG_CC_IS_GCC) || CONFIG_GCC_VERSION >= 49000)
+#if !defined(__ASSEMBLY__)
 #include <linux/build_bug.h>
 #define GENMASK_INPUT_CHECK(h, l) \
        (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
diff --git a/tools/include/linux/btf_ids.h b/tools/include/linux/btf_ids.h
new file mode 100644 (file)
index 0000000..4867d54
--- /dev/null
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_BTF_IDS_H
+#define _LINUX_BTF_IDS_H
+
+#ifdef CONFIG_DEBUG_INFO_BTF
+
+#include <linux/compiler.h> /* for __PASTE */
+
+/*
+ * Following macros help to define lists of BTF IDs placed
+ * in .BTF_ids section. They are initially filled with zeros
+ * (during compilation) and resolved later during the
+ * linking phase by resolve_btfids tool.
+ *
+ * Any change in list layout must be reflected in resolve_btfids
+ * tool logic.
+ */
+
+#define BTF_IDS_SECTION ".BTF_ids"
+
+#define ____BTF_ID(symbol)                             \
+asm(                                                   \
+".pushsection " BTF_IDS_SECTION ",\"a\";       \n"     \
+".local " #symbol " ;                          \n"     \
+".type  " #symbol ", STT_OBJECT;               \n"     \
+".size  " #symbol ", 4;                        \n"     \
+#symbol ":                                     \n"     \
+".zero 4                                       \n"     \
+".popsection;                                  \n");
+
+#define __BTF_ID(symbol) \
+       ____BTF_ID(symbol)
+
+#define __ID(prefix) \
+       __PASTE(prefix, __COUNTER__)
+
+/*
+ * The BTF_ID defines unique symbol for each ID pointing
+ * to 4 zero bytes.
+ */
+#define BTF_ID(prefix, name) \
+       __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__))
+
+/*
+ * The BTF_ID_LIST macro defines pure (unsorted) list
+ * of BTF IDs, with following layout:
+ *
+ * BTF_ID_LIST(list1)
+ * BTF_ID(type1, name1)
+ * BTF_ID(type2, name2)
+ *
+ * list1:
+ * __BTF_ID__type1__name1__1:
+ * .zero 4
+ * __BTF_ID__type2__name2__2:
+ * .zero 4
+ *
+ */
+#define __BTF_ID_LIST(name, scope)                     \
+asm(                                                   \
+".pushsection " BTF_IDS_SECTION ",\"a\";       \n"     \
+"." #scope " " #name ";                        \n"     \
+#name ":;                                      \n"     \
+".popsection;                                  \n");   \
+
+#define BTF_ID_LIST(name)                              \
+__BTF_ID_LIST(name, local)                             \
+extern u32 name[];
+
+#define BTF_ID_LIST_GLOBAL(name)                       \
+__BTF_ID_LIST(name, globl)
+
+/*
+ * The BTF_ID_UNUSED macro defines 4 zero bytes.
+ * It's used when we want to define 'unused' entry
+ * in BTF_ID_LIST, like:
+ *
+ *   BTF_ID_LIST(bpf_skb_output_btf_ids)
+ *   BTF_ID(struct, sk_buff)
+ *   BTF_ID_UNUSED
+ *   BTF_ID(struct, task_struct)
+ */
+
+#define BTF_ID_UNUSED                                  \
+asm(                                                   \
+".pushsection " BTF_IDS_SECTION ",\"a\";       \n"     \
+".zero 4                                       \n"     \
+".popsection;                                  \n");
+
+#else
+
+#define BTF_ID_LIST(name) static u32 name[5];
+#define BTF_ID(prefix, name)
+#define BTF_ID_UNUSED
+#define BTF_ID_LIST_GLOBAL(name) u32 name[1];
+
+#endif /* CONFIG_DEBUG_INFO_BTF */
+
+#ifdef CONFIG_NET
+/* Define a list of socket types which can be the argument for
+ * skc_to_*_sock() helpers. All these sockets should have
+ * sock_common as the first argument in its memory layout.
+ */
+#define BTF_SOCK_TYPE_xxx \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET, inet_sock)                    \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_CONN, inet_connection_sock)    \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_REQ, inet_request_sock)        \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_TW, inet_timewait_sock)        \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_REQ, request_sock)                  \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK, sock)                         \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK_COMMON, sock_common)           \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP, tcp_sock)                      \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_REQ, tcp_request_sock)          \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock)          \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock)                    \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock)                      \
+       BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock)
+
+enum {
+#define BTF_SOCK_TYPE(name, str) name,
+BTF_SOCK_TYPE_xxx
+#undef BTF_SOCK_TYPE
+MAX_BTF_SOCK_TYPE,
+};
+
+extern u32 btf_sock_ids[];
+#endif
+
+#endif
index 9f90027..6eac24d 100644 (file)
@@ -201,4 +201,8 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
 # define __fallthrough
 #endif
 
+/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
+#define ___PASTE(a, b) a##b
+#define __PASTE(a, b) ___PASTE(a, b)
+
 #endif /* _TOOLS_LINUX_COMPILER_H */
index 3a3201e..c8c189a 100644 (file)
@@ -606,9 +606,9 @@ __SYSCALL(__NR_sendto, sys_sendto)
 #define __NR_recvfrom 207
 __SC_COMP(__NR_recvfrom, sys_recvfrom, compat_sys_recvfrom)
 #define __NR_setsockopt 208
-__SC_COMP(__NR_setsockopt, sys_setsockopt, compat_sys_setsockopt)
+__SC_COMP(__NR_setsockopt, sys_setsockopt, sys_setsockopt)
 #define __NR_getsockopt 209
-__SC_COMP(__NR_getsockopt, sys_getsockopt, compat_sys_getsockopt)
+__SC_COMP(__NR_getsockopt, sys_getsockopt, sys_getsockopt)
 #define __NR_shutdown 210
 __SYSCALL(__NR_shutdown, sys_shutdown)
 #define __NR_sendmsg 211
@@ -855,9 +855,11 @@ __SYSCALL(__NR_clone3, sys_clone3)
 __SYSCALL(__NR_openat2, sys_openat2)
 #define __NR_pidfd_getfd 438
 __SYSCALL(__NR_pidfd_getfd, sys_pidfd_getfd)
+#define __NR_faccessat2 439
+__SYSCALL(__NR_faccessat2, sys_faccessat2)
 
 #undef __NR_syscalls
-#define __NR_syscalls 439
+#define __NR_syscalls 440
 
 /*
  * 32 bit systems traditionally used different
index 2813e57..14b67cd 100644 (file)
@@ -1969,6 +1969,30 @@ enum drm_i915_perf_property_id {
         */
        DRM_I915_PERF_PROP_HOLD_PREEMPTION,
 
+       /**
+        * Specifying this pins all contexts to the specified SSEU power
+        * configuration for the duration of the recording.
+        *
+        * This parameter's value is a pointer to a struct
+        * drm_i915_gem_context_param_sseu.
+        *
+        * This property is available in perf revision 4.
+        */
+       DRM_I915_PERF_PROP_GLOBAL_SSEU,
+
+       /**
+        * This optional parameter specifies the timer interval in nanoseconds
+        * at which the i915 driver will check the OA buffer for available data.
+        * Minimum allowed value is 100 microseconds. A default value is used by
+        * the driver if this parameter is not specified. Note that larger timer
+        * values will reduce cpu consumption during OA perf captures. However,
+        * excessively large values would potentially result in OA buffer
+        * overwrites as captures reach end of the OA buffer.
+        *
+        * This property is available in perf revision 5.
+        */
+       DRM_I915_PERF_PROP_POLL_OA_PERIOD,
+
        DRM_I915_PERF_PROP_MAX /* non-ABI */
 };
 
index 1968481..54d0c88 100644 (file)
@@ -189,6 +189,7 @@ enum bpf_prog_type {
        BPF_PROG_TYPE_STRUCT_OPS,
        BPF_PROG_TYPE_EXT,
        BPF_PROG_TYPE_LSM,
+       BPF_PROG_TYPE_SK_LOOKUP,
 };
 
 enum bpf_attach_type {
@@ -226,6 +227,9 @@ enum bpf_attach_type {
        BPF_CGROUP_INET4_GETSOCKNAME,
        BPF_CGROUP_INET6_GETSOCKNAME,
        BPF_XDP_DEVMAP,
+       BPF_CGROUP_INET_SOCK_RELEASE,
+       BPF_XDP_CPUMAP,
+       BPF_SK_LOOKUP,
        __MAX_BPF_ATTACH_TYPE
 };
 
@@ -653,7 +657,7 @@ union bpf_attr {
  *             Map value associated to *key*, or **NULL** if no entry was
  *             found.
  *
- * int bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags)
+ * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags)
  *     Description
  *             Add or update the value of the entry associated to *key* in
  *             *map* with *value*. *flags* is one of:
@@ -671,13 +675,13 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_map_delete_elem(struct bpf_map *map, const void *key)
+ * long bpf_map_delete_elem(struct bpf_map *map, const void *key)
  *     Description
  *             Delete entry with *key* from *map*.
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr)
  *     Description
  *             For tracing programs, safely attempt to read *size* bytes from
  *             kernel space address *unsafe_ptr* and store the data in *dst*.
@@ -695,7 +699,7 @@ union bpf_attr {
  *     Return
  *             Current *ktime*.
  *
- * int bpf_trace_printk(const char *fmt, u32 fmt_size, ...)
+ * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...)
  *     Description
  *             This helper is a "printk()-like" facility for debugging. It
  *             prints a message defined by format *fmt* (of size *fmt_size*)
@@ -775,7 +779,7 @@ union bpf_attr {
  *     Return
  *             The SMP id of the processor running the program.
  *
- * int bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
+ * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
  *     Description
  *             Store *len* bytes from address *from* into the packet
  *             associated to *skb*, at *offset*. *flags* are a combination of
@@ -792,7 +796,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size)
+ * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size)
  *     Description
  *             Recompute the layer 3 (e.g. IP) checksum for the packet
  *             associated to *skb*. Computation is incremental, so the helper
@@ -817,7 +821,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags)
+ * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags)
  *     Description
  *             Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the
  *             packet associated to *skb*. Computation is incremental, so the
@@ -849,7 +853,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index)
+ * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index)
  *     Description
  *             This special helper is used to trigger a "tail call", or in
  *             other words, to jump into another eBPF program. The same stack
@@ -880,7 +884,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags)
+ * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags)
  *     Description
  *             Clone and redirect the packet associated to *skb* to another
  *             net device of index *ifindex*. Both ingress and egress
@@ -916,7 +920,7 @@ union bpf_attr {
  *             A 64-bit integer containing the current GID and UID, and
  *             created as such: *current_gid* **<< 32 \|** *current_uid*.
  *
- * int bpf_get_current_comm(void *buf, u32 size_of_buf)
+ * long bpf_get_current_comm(void *buf, u32 size_of_buf)
  *     Description
  *             Copy the **comm** attribute of the current task into *buf* of
  *             *size_of_buf*. The **comm** attribute contains the name of
@@ -953,7 +957,7 @@ union bpf_attr {
  *     Return
  *             The classid, or 0 for the default unconfigured classid.
  *
- * int bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
+ * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
  *     Description
  *             Push a *vlan_tci* (VLAN tag control information) of protocol
  *             *vlan_proto* to the packet associated to *skb*, then update
@@ -969,7 +973,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_skb_vlan_pop(struct sk_buff *skb)
+ * long bpf_skb_vlan_pop(struct sk_buff *skb)
  *     Description
  *             Pop a VLAN header from the packet associated to *skb*.
  *
@@ -981,7 +985,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
+ * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
  *     Description
  *             Get tunnel metadata. This helper takes a pointer *key* to an
  *             empty **struct bpf_tunnel_key** of **size**, that will be
@@ -1032,7 +1036,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
+ * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
  *     Description
  *             Populate tunnel metadata for packet associated to *skb.* The
  *             tunnel metadata is set to the contents of *key*, of *size*. The
@@ -1098,7 +1102,7 @@ union bpf_attr {
  *             The value of the perf event counter read from the map, or a
  *             negative error code in case of failure.
  *
- * int bpf_redirect(u32 ifindex, u64 flags)
+ * long bpf_redirect(u32 ifindex, u64 flags)
  *     Description
  *             Redirect the packet to another net device of index *ifindex*.
  *             This helper is somewhat similar to **bpf_clone_redirect**\
@@ -1145,7 +1149,7 @@ union bpf_attr {
  *             The realm of the route for the packet associated to *skb*, or 0
  *             if none was found.
  *
- * int bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
  *     Description
  *             Write raw *data* blob into a special BPF perf event held by
  *             *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
@@ -1190,7 +1194,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len)
+ * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len)
  *     Description
  *             This helper was provided as an easy way to load data from a
  *             packet. It can be used to load *len* bytes from *offset* from
@@ -1207,7 +1211,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags)
+ * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags)
  *     Description
  *             Walk a user or a kernel stack and return its id. To achieve
  *             this, the helper needs *ctx*, which is a pointer to the context
@@ -1276,7 +1280,7 @@ union bpf_attr {
  *             The checksum result, or a negative error code in case of
  *             failure.
  *
- * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
+ * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
  *     Description
  *             Retrieve tunnel options metadata for the packet associated to
  *             *skb*, and store the raw tunnel option data to the buffer *opt*
@@ -1294,7 +1298,7 @@ union bpf_attr {
  *     Return
  *             The size of the option data retrieved.
  *
- * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
+ * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
  *     Description
  *             Set tunnel options metadata for the packet associated to *skb*
  *             to the option data contained in the raw buffer *opt* of *size*.
@@ -1304,7 +1308,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags)
+ * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags)
  *     Description
  *             Change the protocol of the *skb* to *proto*. Currently
  *             supported are transition from IPv4 to IPv6, and from IPv6 to
@@ -1331,7 +1335,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_skb_change_type(struct sk_buff *skb, u32 type)
+ * long bpf_skb_change_type(struct sk_buff *skb, u32 type)
  *     Description
  *             Change the packet type for the packet associated to *skb*. This
  *             comes down to setting *skb*\ **->pkt_type** to *type*, except
@@ -1358,7 +1362,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index)
+ * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index)
  *     Description
  *             Check whether *skb* is a descendant of the cgroup2 held by
  *             *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
@@ -1389,7 +1393,7 @@ union bpf_attr {
  *     Return
  *             A pointer to the current task struct.
  *
- * int bpf_probe_write_user(void *dst, const void *src, u32 len)
+ * long bpf_probe_write_user(void *dst, const void *src, u32 len)
  *     Description
  *             Attempt in a safe way to write *len* bytes from the buffer
  *             *src* to *dst* in memory. It only works for threads that are in
@@ -1408,7 +1412,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_current_task_under_cgroup(struct bpf_map *map, u32 index)
+ * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index)
  *     Description
  *             Check whether the probe is being run is the context of a given
  *             subset of the cgroup2 hierarchy. The cgroup2 to test is held by
@@ -1420,7 +1424,7 @@ union bpf_attr {
  *             * 1, if the *skb* task does not belong to the cgroup2.
  *             * A negative error code, if an error occurred.
  *
- * int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
+ * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
  *     Description
  *             Resize (trim or grow) the packet associated to *skb* to the
  *             new *len*. The *flags* are reserved for future usage, and must
@@ -1444,7 +1448,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_skb_pull_data(struct sk_buff *skb, u32 len)
+ * long bpf_skb_pull_data(struct sk_buff *skb, u32 len)
  *     Description
  *             Pull in non-linear data in case the *skb* is non-linear and not
  *             all of *len* are part of the linear section. Make *len* bytes
@@ -1500,7 +1504,7 @@ union bpf_attr {
  *             recalculation the next time the kernel tries to access this
  *             hash or when the **bpf_get_hash_recalc**\ () helper is called.
  *
- * int bpf_get_numa_node_id(void)
+ * long bpf_get_numa_node_id(void)
  *     Description
  *             Return the id of the current NUMA node. The primary use case
  *             for this helper is the selection of sockets for the local NUMA
@@ -1511,7 +1515,7 @@ union bpf_attr {
  *     Return
  *             The id of current NUMA node.
  *
- * int bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags)
+ * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags)
  *     Description
  *             Grows headroom of packet associated to *skb* and adjusts the
  *             offset of the MAC header accordingly, adding *len* bytes of
@@ -1532,7 +1536,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta)
+ * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta)
  *     Description
  *             Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that
  *             it is possible to use a negative value for *delta*. This helper
@@ -1547,7 +1551,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr)
  *     Description
  *             Copy a NUL terminated string from an unsafe kernel address
  *             *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for
@@ -1595,14 +1599,14 @@ union bpf_attr {
  *             is returned (note that **overflowuid** might also be the actual
  *             UID value for the socket).
  *
- * u32 bpf_set_hash(struct sk_buff *skb, u32 hash)
+ * long bpf_set_hash(struct sk_buff *skb, u32 hash)
  *     Description
  *             Set the full hash for *skb* (set the field *skb*\ **->hash**)
  *             to value *hash*.
  *     Return
  *             0
  *
- * int bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
+ * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
  *     Description
  *             Emulate a call to **setsockopt()** on the socket associated to
  *             *bpf_socket*, which must be a full socket. The *level* at
@@ -1621,16 +1625,19 @@ union bpf_attr {
  *
  *             * **SOL_SOCKET**, which supports the following *optname*\ s:
  *               **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**,
- *               **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**.
+ *               **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**,
+ *               **SO_BINDTODEVICE**, **SO_KEEPALIVE**.
  *             * **IPPROTO_TCP**, which supports the following *optname*\ s:
  *               **TCP_CONGESTION**, **TCP_BPF_IW**,
- *               **TCP_BPF_SNDCWND_CLAMP**.
+ *               **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**,
+ *               **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**,
+ *               **TCP_SYNCNT**, **TCP_USER_TIMEOUT**.
  *             * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
  *             * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags)
+ * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags)
  *     Description
  *             Grow or shrink the room for data in the packet associated to
  *             *skb* by *len_diff*, and according to the selected *mode*.
@@ -1676,7 +1683,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags)
+ * long bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags)
  *     Description
  *             Redirect the packet to the endpoint referenced by *map* at
  *             index *key*. Depending on its type, this *map* can contain
@@ -1697,7 +1704,7 @@ union bpf_attr {
  *             **XDP_REDIRECT** on success, or the value of the two lower bits
  *             of the *flags* argument on error.
  *
- * int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
+ * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
  *     Description
  *             Redirect the packet to the socket referenced by *map* (of type
  *             **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
@@ -1708,7 +1715,7 @@ union bpf_attr {
  *     Return
  *             **SK_PASS** on success, or **SK_DROP** on error.
  *
- * int bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
+ * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
  *     Description
  *             Add an entry to, or update a *map* referencing sockets. The
  *             *skops* is used as a new value for the entry associated to
@@ -1727,7 +1734,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta)
+ * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta)
  *     Description
  *             Adjust the address pointed by *xdp_md*\ **->data_meta** by
  *             *delta* (which can be positive or negative). Note that this
@@ -1756,7 +1763,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size)
+ * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size)
  *     Description
  *             Read the value of a perf event counter, and store it into *buf*
  *             of size *buf_size*. This helper relies on a *map* of type
@@ -1806,7 +1813,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
+ * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
  *     Description
  *             For en eBPF program attached to a perf event, retrieve the
  *             value of the event counter associated to *ctx* and store it in
@@ -1817,7 +1824,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
+ * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
  *     Description
  *             Emulate a call to **getsockopt()** on the socket associated to
  *             *bpf_socket*, which must be a full socket. The *level* at
@@ -1842,7 +1849,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_override_return(struct pt_regs *regs, u64 rc)
+ * long bpf_override_return(struct pt_regs *regs, u64 rc)
  *     Description
  *             Used for error injection, this helper uses kprobes to override
  *             the return value of the probed function, and to set it to *rc*.
@@ -1867,7 +1874,7 @@ union bpf_attr {
  *     Return
  *             0
  *
- * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval)
+ * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval)
  *     Description
  *             Attempt to set the value of the **bpf_sock_ops_cb_flags** field
  *             for the full TCP socket associated to *bpf_sock_ops* to
@@ -1911,7 +1918,7 @@ union bpf_attr {
  *             be set is returned (which comes down to 0 if all bits were set
  *             as required).
  *
- * int bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags)
+ * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags)
  *     Description
  *             This helper is used in programs implementing policies at the
  *             socket level. If the message *msg* is allowed to pass (i.e. if
@@ -1925,7 +1932,7 @@ union bpf_attr {
  *     Return
  *             **SK_PASS** on success, or **SK_DROP** on error.
  *
- * int bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes)
+ * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes)
  *     Description
  *             For socket policies, apply the verdict of the eBPF program to
  *             the next *bytes* (number of bytes) of message *msg*.
@@ -1959,7 +1966,7 @@ union bpf_attr {
  *     Return
  *             0
  *
- * int bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes)
+ * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes)
  *     Description
  *             For socket policies, prevent the execution of the verdict eBPF
  *             program for message *msg* until *bytes* (byte number) have been
@@ -1977,7 +1984,7 @@ union bpf_attr {
  *     Return
  *             0
  *
- * int bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags)
+ * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags)
  *     Description
  *             For socket policies, pull in non-linear data from user space
  *             for *msg* and set pointers *msg*\ **->data** and *msg*\
@@ -2008,7 +2015,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len)
+ * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len)
  *     Description
  *             Bind the socket associated to *ctx* to the address pointed by
  *             *addr*, of length *addr_len*. This allows for making outgoing
@@ -2026,7 +2033,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta)
+ * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta)
  *     Description
  *             Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is
  *             possible to both shrink and grow the packet tail.
@@ -2040,7 +2047,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags)
+ * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags)
  *     Description
  *             Retrieve the XFRM state (IP transform framework, see also
  *             **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*.
@@ -2056,7 +2063,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags)
+ * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags)
  *     Description
  *             Return a user or a kernel stack in bpf program provided buffer.
  *             To achieve this, the helper needs *ctx*, which is a pointer
@@ -2089,7 +2096,7 @@ union bpf_attr {
  *             A non-negative value equal to or less than *size* on success,
  *             or a negative error in case of failure.
  *
- * int bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
+ * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
  *     Description
  *             This helper is similar to **bpf_skb_load_bytes**\ () in that
  *             it provides an easy way to load *len* bytes from *offset*
@@ -2111,7 +2118,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags)
+ * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags)
  *     Description
  *             Do FIB lookup in kernel tables using parameters in *params*.
  *             If lookup is successful and result shows packet is to be
@@ -2142,7 +2149,7 @@ union bpf_attr {
  *             * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
  *               packet is not forwarded or needs assist from full stack
  *
- * int bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
+ * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
  *     Description
  *             Add an entry to, or update a sockhash *map* referencing sockets.
  *             The *skops* is used as a new value for the entry associated to
@@ -2161,7 +2168,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags)
+ * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags)
  *     Description
  *             This helper is used in programs implementing policies at the
  *             socket level. If the message *msg* is allowed to pass (i.e. if
@@ -2175,7 +2182,7 @@ union bpf_attr {
  *     Return
  *             **SK_PASS** on success, or **SK_DROP** on error.
  *
- * int bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags)
+ * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags)
  *     Description
  *             This helper is used in programs implementing policies at the
  *             skb socket level. If the sk_buff *skb* is allowed to pass (i.e.
@@ -2189,7 +2196,7 @@ union bpf_attr {
  *     Return
  *             **SK_PASS** on success, or **SK_DROP** on error.
  *
- * int bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
+ * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
  *     Description
  *             Encapsulate the packet associated to *skb* within a Layer 3
  *             protocol header. This header is provided in the buffer at
@@ -2226,7 +2233,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len)
+ * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len)
  *     Description
  *             Store *len* bytes from address *from* into the packet
  *             associated to *skb*, at *offset*. Only the flags, tag and TLVs
@@ -2241,7 +2248,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta)
+ * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta)
  *     Description
  *             Adjust the size allocated to TLVs in the outermost IPv6
  *             Segment Routing Header contained in the packet associated to
@@ -2257,7 +2264,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len)
+ * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len)
  *     Description
  *             Apply an IPv6 Segment Routing action of type *action* to the
  *             packet associated to *skb*. Each action takes a parameter
@@ -2286,7 +2293,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_rc_repeat(void *ctx)
+ * long bpf_rc_repeat(void *ctx)
  *     Description
  *             This helper is used in programs implementing IR decoding, to
  *             report a successfully decoded repeat key message. This delays
@@ -2305,7 +2312,7 @@ union bpf_attr {
  *     Return
  *             0
  *
- * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
+ * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
  *     Description
  *             This helper is used in programs implementing IR decoding, to
  *             report a successfully decoded key press with *scancode*,
@@ -2370,7 +2377,7 @@ union bpf_attr {
  *     Return
  *             A pointer to the local storage area.
  *
- * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
+ * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
  *     Description
  *             Select a **SO_REUSEPORT** socket from a
  *             **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*.
@@ -2415,7 +2422,7 @@ union bpf_attr {
  *                     Look for an IPv6 socket.
  *
  *             If the *netns* is a negative signed 32-bit integer, then the
- *             socket lookup table in the netns associated with the *ctx* will
+ *             socket lookup table in the netns associated with the *ctx*
  *             will be used. For the TC hooks, this is the netns of the device
  *             in the skb. For socket hooks, this is the netns of the socket.
  *             If *netns* is any other signed 32-bit value greater than or
@@ -2452,7 +2459,7 @@ union bpf_attr {
  *                     Look for an IPv6 socket.
  *
  *             If the *netns* is a negative signed 32-bit integer, then the
- *             socket lookup table in the netns associated with the *ctx* will
+ *             socket lookup table in the netns associated with the *ctx*
  *             will be used. For the TC hooks, this is the netns of the device
  *             in the skb. For socket hooks, this is the netns of the socket.
  *             If *netns* is any other signed 32-bit value greater than or
@@ -2471,7 +2478,7 @@ union bpf_attr {
  *             result is from *reuse*\ **->socks**\ [] using the hash of the
  *             tuple.
  *
- * int bpf_sk_release(struct bpf_sock *sock)
+ * long bpf_sk_release(struct bpf_sock *sock)
  *     Description
  *             Release the reference held by *sock*. *sock* must be a
  *             non-**NULL** pointer that was returned from
@@ -2479,7 +2486,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
+ * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
  *     Description
  *             Push an element *value* in *map*. *flags* is one of:
  *
@@ -2489,19 +2496,19 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_map_pop_elem(struct bpf_map *map, void *value)
+ * long bpf_map_pop_elem(struct bpf_map *map, void *value)
  *     Description
  *             Pop an element from *map*.
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_map_peek_elem(struct bpf_map *map, void *value)
+ * long bpf_map_peek_elem(struct bpf_map *map, void *value)
  *     Description
  *             Get an element from *map* without removing it.
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
+ * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
  *     Description
  *             For socket policies, insert *len* bytes into *msg* at offset
  *             *start*.
@@ -2517,7 +2524,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
+ * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
  *     Description
  *             Will remove *len* bytes from a *msg* starting at byte *start*.
  *             This may result in **ENOMEM** errors under certain situations if
@@ -2529,7 +2536,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y)
+ * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y)
  *     Description
  *             This helper is used in programs implementing IR decoding, to
  *             report a successfully decoded pointer movement.
@@ -2543,7 +2550,7 @@ union bpf_attr {
  *     Return
  *             0
  *
- * int bpf_spin_lock(struct bpf_spin_lock *lock)
+ * long bpf_spin_lock(struct bpf_spin_lock *lock)
  *     Description
  *             Acquire a spinlock represented by the pointer *lock*, which is
  *             stored as part of a value of a map. Taking the lock allows to
@@ -2591,7 +2598,7 @@ union bpf_attr {
  *     Return
  *             0
  *
- * int bpf_spin_unlock(struct bpf_spin_lock *lock)
+ * long bpf_spin_unlock(struct bpf_spin_lock *lock)
  *     Description
  *             Release the *lock* previously locked by a call to
  *             **bpf_spin_lock**\ (\ *lock*\ ).
@@ -2614,7 +2621,7 @@ union bpf_attr {
  *             A **struct bpf_tcp_sock** pointer on success, or **NULL** in
  *             case of failure.
  *
- * int bpf_skb_ecn_set_ce(struct sk_buff *skb)
+ * long bpf_skb_ecn_set_ce(struct sk_buff *skb)
  *     Description
  *             Set ECN (Explicit Congestion Notification) field of IP header
  *             to **CE** (Congestion Encountered) if current value is **ECT**
@@ -2651,7 +2658,7 @@ union bpf_attr {
  *             result is from *reuse*\ **->socks**\ [] using the hash of the
  *             tuple.
  *
- * int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
+ * long bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
  *     Description
  *             Check whether *iph* and *th* contain a valid SYN cookie ACK for
  *             the listening socket in *sk*.
@@ -2666,7 +2673,7 @@ union bpf_attr {
  *             0 if *iph* and *th* are a valid SYN cookie ACK, or a negative
  *             error otherwise.
  *
- * int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags)
+ * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags)
  *     Description
  *             Get name of sysctl in /proc/sys/ and copy it into provided by
  *             program buffer *buf* of size *buf_len*.
@@ -2682,7 +2689,7 @@ union bpf_attr {
  *             **-E2BIG** if the buffer wasn't big enough (*buf* will contain
  *             truncated name in this case).
  *
- * int bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
+ * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
  *     Description
  *             Get current value of sysctl as it is presented in /proc/sys
  *             (incl. newline, etc), and copy it as a string into provided
@@ -2701,7 +2708,7 @@ union bpf_attr {
  *             **-EINVAL** if current value was unavailable, e.g. because
  *             sysctl is uninitialized and read returns -EIO for it.
  *
- * int bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
+ * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
  *     Description
  *             Get new value being written by user space to sysctl (before
  *             the actual write happens) and copy it as a string into
@@ -2718,7 +2725,7 @@ union bpf_attr {
  *
  *             **-EINVAL** if sysctl is being read.
  *
- * int bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len)
+ * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len)
  *     Description
  *             Override new value being written by user space to sysctl with
  *             value provided by program in buffer *buf* of size *buf_len*.
@@ -2735,7 +2742,7 @@ union bpf_attr {
  *
  *             **-EINVAL** if sysctl is being read.
  *
- * int bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res)
+ * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res)
  *     Description
  *             Convert the initial part of the string from buffer *buf* of
  *             size *buf_len* to a long integer according to the given base
@@ -2759,7 +2766,7 @@ union bpf_attr {
  *
  *             **-ERANGE** if resulting value was out of range.
  *
- * int bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res)
+ * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res)
  *     Description
  *             Convert the initial part of the string from buffer *buf* of
  *             size *buf_len* to an unsigned long integer according to the
@@ -2810,7 +2817,7 @@ union bpf_attr {
  *             **NULL** if not found or there was an error in adding
  *             a new bpf-local-storage.
  *
- * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk)
+ * long bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk)
  *     Description
  *             Delete a bpf-local-storage from a *sk*.
  *     Return
@@ -2818,7 +2825,7 @@ union bpf_attr {
  *
  *             **-ENOENT** if the bpf-local-storage cannot be found.
  *
- * int bpf_send_signal(u32 sig)
+ * long bpf_send_signal(u32 sig)
  *     Description
  *             Send signal *sig* to the process of the current task.
  *             The signal may be delivered to any of this process's threads.
@@ -2859,7 +2866,7 @@ union bpf_attr {
  *
  *             **-EPROTONOSUPPORT** IP packet version is not 4 or 6
  *
- * int bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
  *     Description
  *             Write raw *data* blob into a special BPF perf event held by
  *             *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
@@ -2883,21 +2890,21 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr)
  *     Description
  *             Safely attempt to read *size* bytes from user space address
  *             *unsafe_ptr* and store the data in *dst*.
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
  *     Description
  *             Safely attempt to read *size* bytes from kernel space address
  *             *unsafe_ptr* and store the data in *dst*.
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr)
  *     Description
  *             Copy a NUL terminated string from an unsafe user address
  *             *unsafe_ptr* to *dst*. The *size* should include the
@@ -2941,7 +2948,7 @@ union bpf_attr {
  *             including the trailing NUL character. On error, a negative
  *             value.
  *
- * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr)
  *     Description
  *             Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr*
  *             to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply.
@@ -2949,14 +2956,14 @@ union bpf_attr {
  *             On success, the strictly positive length of the string, including
  *             the trailing NUL character. On error, a negative value.
  *
- * int bpf_tcp_send_ack(void *tp, u32 rcv_nxt)
+ * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt)
  *     Description
  *             Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**.
  *             *rcv_nxt* is the ack_seq to be sent out.
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_send_signal_thread(u32 sig)
+ * long bpf_send_signal_thread(u32 sig)
  *     Description
  *             Send signal *sig* to the thread corresponding to the current task.
  *     Return
@@ -2976,7 +2983,7 @@ union bpf_attr {
  *     Return
  *             The 64 bit jiffies
  *
- * int bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags)
+ * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags)
  *     Description
  *             For an eBPF program attached to a perf event, retrieve the
  *             branch records (**struct perf_branch_entry**) associated to *ctx*
@@ -2995,7 +3002,7 @@ union bpf_attr {
  *
  *             **-ENOENT** if architecture does not support branch records.
  *
- * int bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size)
+ * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size)
  *     Description
  *             Returns 0 on success, values for *pid* and *tgid* as seen from the current
  *             *namespace* will be returned in *nsdata*.
@@ -3007,7 +3014,7 @@ union bpf_attr {
  *
  *             **-ENOENT** if pidns does not exists for the current task.
  *
- * int bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
  *     Description
  *             Write raw *data* blob into a special BPF perf event held by
  *             *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
@@ -3062,8 +3069,12 @@ union bpf_attr {
  *     Return
  *             The id is returned or 0 in case the id could not be retrieved.
  *
- * int bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags)
+ * long bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags)
  *     Description
+ *             Helper is overloaded depending on BPF program type. This
+ *             description applies to **BPF_PROG_TYPE_SCHED_CLS** and
+ *             **BPF_PROG_TYPE_SCHED_ACT** programs.
+ *
  *             Assign the *sk* to the *skb*. When combined with appropriate
  *             routing configuration to receive the packet towards the socket,
  *             will cause *skb* to be delivered to the specified socket.
@@ -3089,6 +3100,56 @@ union bpf_attr {
  *             **-ESOCKTNOSUPPORT** if the socket type is not supported
  *             (reuseport).
  *
+ * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags)
+ *     Description
+ *             Helper is overloaded depending on BPF program type. This
+ *             description applies to **BPF_PROG_TYPE_SK_LOOKUP** programs.
+ *
+ *             Select the *sk* as a result of a socket lookup.
+ *
+ *             For the operation to succeed passed socket must be compatible
+ *             with the packet description provided by the *ctx* object.
+ *
+ *             L4 protocol (**IPPROTO_TCP** or **IPPROTO_UDP**) must
+ *             be an exact match. While IP family (**AF_INET** or
+ *             **AF_INET6**) must be compatible, that is IPv6 sockets
+ *             that are not v6-only can be selected for IPv4 packets.
+ *
+ *             Only TCP listeners and UDP unconnected sockets can be
+ *             selected. *sk* can also be NULL to reset any previous
+ *             selection.
+ *
+ *             *flags* argument can combination of following values:
+ *
+ *             * **BPF_SK_LOOKUP_F_REPLACE** to override the previous
+ *               socket selection, potentially done by a BPF program
+ *               that ran before us.
+ *
+ *             * **BPF_SK_LOOKUP_F_NO_REUSEPORT** to skip
+ *               load-balancing within reuseport group for the socket
+ *               being selected.
+ *
+ *             On success *ctx->sk* will point to the selected socket.
+ *
+ *     Return
+ *             0 on success, or a negative errno in case of failure.
+ *
+ *             * **-EAFNOSUPPORT** if socket family (*sk->family*) is
+ *               not compatible with packet family (*ctx->family*).
+ *
+ *             * **-EEXIST** if socket has been already selected,
+ *               potentially by another program, and
+ *               **BPF_SK_LOOKUP_F_REPLACE** flag was not specified.
+ *
+ *             * **-EINVAL** if unsupported flags were specified.
+ *
+ *             * **-EPROTOTYPE** if socket L4 protocol
+ *               (*sk->protocol*) doesn't match packet protocol
+ *               (*ctx->protocol*).
+ *
+ *             * **-ESOCKTNOSUPPORT** if socket is not in allowed
+ *               state (TCP listening or UDP unconnected).
+ *
  * u64 bpf_ktime_get_boot_ns(void)
  *     Description
  *             Return the time elapsed since system boot, in nanoseconds.
@@ -3097,7 +3158,7 @@ union bpf_attr {
  *     Return
  *             Current *ktime*.
  *
- * int bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len)
+ * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len)
  *     Description
  *             **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print
  *             out the format string.
@@ -3126,7 +3187,7 @@ union bpf_attr {
  *
  *             **-EOVERFLOW** if an overflow happened: The same object will be tried again.
  *
- * int bpf_seq_write(struct seq_file *m, const void *data, u32 len)
+ * long bpf_seq_write(struct seq_file *m, const void *data, u32 len)
  *     Description
  *             **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data.
  *             The *m* represents the seq_file. The *data* and *len* represent the
@@ -3168,16 +3229,15 @@ union bpf_attr {
  *     Return
  *             The id is returned or 0 in case the id could not be retrieved.
  *
- * void *bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
+ * int bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
  *     Description
  *             Copy *size* bytes from *data* into a ring buffer *ringbuf*.
- *             If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
- *             new data availability is sent.
- *             IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
- *             new data availability is sent unconditionally.
+ *             If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ *             of new data availability is sent.
+ *             If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ *             of new data availability is sent unconditionally.
  *     Return
- *             0, on success;
- *             < 0, on error.
+ *             0 on success, or a negative error in case of failure.
  *
  * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags)
  *     Description
@@ -3189,20 +3249,20 @@ union bpf_attr {
  * void bpf_ringbuf_submit(void *data, u64 flags)
  *     Description
  *             Submit reserved ring buffer sample, pointed to by *data*.
- *             If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
- *             new data availability is sent.
- *             IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
- *             new data availability is sent unconditionally.
+ *             If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ *             of new data availability is sent.
+ *             If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ *             of new data availability is sent unconditionally.
  *     Return
  *             Nothing. Always succeeds.
  *
  * void bpf_ringbuf_discard(void *data, u64 flags)
  *     Description
  *             Discard reserved ring buffer sample, pointed to by *data*.
- *             If BPF_RB_NO_WAKEUP is specified in *flags*, no notification of
- *             new data availability is sent.
- *             IF BPF_RB_FORCE_WAKEUP is specified in *flags*, notification of
- *             new data availability is sent unconditionally.
+ *             If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ *             of new data availability is sent.
+ *             If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ *             of new data availability is sent unconditionally.
  *     Return
  *             Nothing. Always succeeds.
  *
@@ -3210,18 +3270,20 @@ union bpf_attr {
  *     Description
  *             Query various characteristics of provided ring buffer. What
  *             exactly is queries is determined by *flags*:
- *               - BPF_RB_AVAIL_DATA - amount of data not yet consumed;
- *               - BPF_RB_RING_SIZE - the size of ring buffer;
- *               - BPF_RB_CONS_POS - consumer position (can wrap around);
- *               - BPF_RB_PROD_POS - producer(s) position (can wrap around);
- *             Data returned is just a momentary snapshots of actual values
+ *
+ *             * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed.
+ *             * **BPF_RB_RING_SIZE**: The size of ring buffer.
+ *             * **BPF_RB_CONS_POS**: Consumer position (can wrap around).
+ *             * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around).
+ *
+ *             Data returned is just a momentary snapshot of actual values
  *             and could be inaccurate, so this facility should be used to
  *             power heuristics and for reporting, not to make 100% correct
  *             calculation.
  *     Return
- *             Requested value, or 0, if flags are not recognized.
+ *             Requested value, or 0, if *flags* are not recognized.
  *
- * int bpf_csum_level(struct sk_buff *skb, u64 level)
+ * long bpf_csum_level(struct sk_buff *skb, u64 level)
  *     Description
  *             Change the skbs checksum level by one layer up or down, or
  *             reset it entirely to none in order to have the stack perform
@@ -3252,6 +3314,69 @@ union bpf_attr {
  *             case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level
  *             is returned or the error code -EACCES in case the skb is not
  *             subject to CHECKSUM_UNNECESSARY.
+ *
+ * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk)
+ *     Description
+ *             Dynamically cast a *sk* pointer to a *tcp6_sock* pointer.
+ *     Return
+ *             *sk* if casting is valid, or NULL otherwise.
+ *
+ * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk)
+ *     Description
+ *             Dynamically cast a *sk* pointer to a *tcp_sock* pointer.
+ *     Return
+ *             *sk* if casting is valid, or NULL otherwise.
+ *
+ * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk)
+ *     Description
+ *             Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer.
+ *     Return
+ *             *sk* if casting is valid, or NULL otherwise.
+ *
+ * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk)
+ *     Description
+ *             Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer.
+ *     Return
+ *             *sk* if casting is valid, or NULL otherwise.
+ *
+ * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk)
+ *     Description
+ *             Dynamically cast a *sk* pointer to a *udp6_sock* pointer.
+ *     Return
+ *             *sk* if casting is valid, or NULL otherwise.
+ *
+ * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
+ *     Description
+ *             Return a user or a kernel stack in bpf program provided buffer.
+ *             To achieve this, the helper needs *task*, which is a valid
+ *             pointer to struct task_struct. To store the stacktrace, the
+ *             bpf program provides *buf* with a nonnegative *size*.
+ *
+ *             The last argument, *flags*, holds the number of stack frames to
+ *             skip (from 0 to 255), masked with
+ *             **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
+ *             the following flags:
+ *
+ *             **BPF_F_USER_STACK**
+ *                     Collect a user space stack instead of a kernel stack.
+ *             **BPF_F_USER_BUILD_ID**
+ *                     Collect buildid+offset instead of ips for user stack,
+ *                     only valid if **BPF_F_USER_STACK** is also specified.
+ *
+ *             **bpf_get_task_stack**\ () can collect up to
+ *             **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
+ *             to sufficient large buffer size. Note that
+ *             this limit can be controlled with the **sysctl** program, and
+ *             that it should be manually increased in order to profile long
+ *             user stacks (such as stacks for Java programs). To do so, use:
+ *
+ *             ::
+ *
+ *                     # sysctl kernel.perf_event_max_stack=<new value>
+ *     Return
+ *             A non-negative value equal to or less than *size* on success,
+ *             or a negative error in case of failure.
+ *
  */
 #define __BPF_FUNC_MAPPER(FN)          \
        FN(unspec),                     \
@@ -3389,7 +3514,14 @@ union bpf_attr {
        FN(ringbuf_submit),             \
        FN(ringbuf_discard),            \
        FN(ringbuf_query),              \
-       FN(csum_level),
+       FN(csum_level),                 \
+       FN(skc_to_tcp6_sock),           \
+       FN(skc_to_tcp_sock),            \
+       FN(skc_to_tcp_timewait_sock),   \
+       FN(skc_to_tcp_request_sock),    \
+       FN(skc_to_udp6_sock),           \
+       FN(get_task_stack),             \
+       /* */
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
@@ -3531,6 +3663,12 @@ enum {
        BPF_RINGBUF_HDR_SZ              = 8,
 };
 
+/* BPF_FUNC_sk_assign flags in bpf_sk_lookup context. */
+enum {
+       BPF_SK_LOOKUP_F_REPLACE         = (1ULL << 0),
+       BPF_SK_LOOKUP_F_NO_REUSEPORT    = (1ULL << 1),
+};
+
 /* Mode for BPF_FUNC_skb_adjust_room helper. */
 enum bpf_adj_room_mode {
        BPF_ADJ_ROOM_NET,
@@ -3774,6 +3912,19 @@ struct bpf_devmap_val {
        } bpf_prog;
 };
 
+/* CPUMAP map-value layout
+ *
+ * The struct data-layout of map-value is a configuration interface.
+ * New members can only be added to the end of this structure.
+ */
+struct bpf_cpumap_val {
+       __u32 qsize;    /* queue size to remote target CPU */
+       union {
+               int   fd;       /* prog fd on map write */
+               __u32 id;       /* prog id on map read */
+       } bpf_prog;
+};
+
 enum sk_action {
        SK_DROP = 0,
        SK_PASS,
@@ -3911,7 +4062,7 @@ struct bpf_link_info {
 
 /* User bpf_sock_addr struct to access socket fields and sockaddr struct passed
  * by user and intended to be used by socket (e.g. to bind to, depends on
- * attach attach type).
+ * attach type).
  */
 struct bpf_sock_addr {
        __u32 user_family;      /* Allows 4-byte read, but no write. */
@@ -4260,4 +4411,19 @@ struct bpf_pidns_info {
        __u32 pid;
        __u32 tgid;
 };
+
+/* User accessible data for SK_LOOKUP programs. Add new fields at the end. */
+struct bpf_sk_lookup {
+       __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
+
+       __u32 family;           /* Protocol family (AF_INET, AF_INET6) */
+       __u32 protocol;         /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */
+       __u32 remote_ip4;       /* Network byte order */
+       __u32 remote_ip6[4];    /* Network byte order */
+       __u32 remote_port;      /* Network byte order */
+       __u32 local_ip4;        /* Network byte order */
+       __u32 local_ip6[4];     /* Network byte order */
+       __u32 local_port;       /* Host byte order */
+};
+
 #endif /* _UAPI__LINUX_BPF_H__ */
index ca88b7b..2f86b2a 100644 (file)
 #define DN_ATTRIB      0x00000020      /* File changed attibutes */
 #define DN_MULTISHOT   0x80000000      /* Don't remove notifier */
 
+/*
+ * The constants AT_REMOVEDIR and AT_EACCESS have the same value.  AT_EACCESS is
+ * meaningful only to faccessat, while AT_REMOVEDIR is meaningful only to
+ * unlinkat.  The two functions do completely different things and therefore,
+ * the flags can be allowed to overlap.  For example, passing AT_REMOVEDIR to
+ * faccessat would be undefined behavior and thus treating it equivalent to
+ * AT_EACCESS is valid undefined behavior.
+ */
 #define AT_FDCWD               -100    /* Special value used to indicate
                                            openat should use the current
                                            working directory. */
 #define AT_SYMLINK_NOFOLLOW    0x100   /* Do not follow symbolic links.  */
+#define AT_EACCESS             0x200   /* Test access permitted for
+                                           effective IDs, not real IDs.  */
 #define AT_REMOVEDIR           0x200   /* Remove directory instead of
                                            unlinking file.  */
 #define AT_SYMLINK_FOLLOW      0x400   /* Follow symbolic links.  */
index 379a612..f44eb0a 100644 (file)
@@ -262,6 +262,7 @@ struct fsxattr {
 #define FS_EA_INODE_FL                 0x00200000 /* Inode used for large EA */
 #define FS_EOFBLOCKS_FL                        0x00400000 /* Reserved for ext4 */
 #define FS_NOCOW_FL                    0x00800000 /* Do not cow file */
+#define FS_DAX_FL                      0x02000000 /* Inode is DAX */
 #define FS_INLINE_DATA_FL              0x10000000 /* Reserved for ext4 */
 #define FS_PROJINHERIT_FL              0x20000000 /* Create with parents projid */
 #define FS_CASEFOLD_FL                 0x40000000 /* Folder is case insensitive */
index a10e3cd..7875709 100644 (file)
@@ -19,7 +19,8 @@
 #define FSCRYPT_POLICY_FLAGS_PAD_MASK          0x03
 #define FSCRYPT_POLICY_FLAG_DIRECT_KEY         0x04
 #define FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64     0x08
-#define FSCRYPT_POLICY_FLAGS_VALID             0x0F
+#define FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32     0x10
+#define FSCRYPT_POLICY_FLAGS_VALID             0x1F
 
 /* Encryption algorithms */
 #define FSCRYPT_MODE_AES_256_XTS               1
index cafedbb..781e482 100644 (file)
@@ -344,6 +344,7 @@ enum {
        IFLA_BRPORT_ISOLATED,
        IFLA_BRPORT_BACKUP_PORT,
        IFLA_BRPORT_MRP_RING_OPEN,
+       IFLA_BRPORT_MRP_IN_OPEN,
        __IFLA_BRPORT_MAX
 };
 #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
index be328c5..a78a809 100644 (file)
@@ -73,9 +73,12 @@ struct xdp_umem_reg {
 };
 
 struct xdp_statistics {
-       __u64 rx_dropped; /* Dropped for reasons other than invalid desc */
+       __u64 rx_dropped; /* Dropped for other reasons */
        __u64 rx_invalid_descs; /* Dropped due to invalid descriptor */
        __u64 tx_invalid_descs; /* Dropped due to invalid descriptor */
+       __u64 rx_ring_full; /* Dropped due to rx ring being full */
+       __u64 rx_fill_ring_empty_descs; /* Failed to retrieve item from fill ring */
+       __u64 tx_ring_empty_descs; /* Failed to retrieve item from tx ring */
 };
 
 struct xdp_options {
index fdd632c..4fdf303 100644 (file)
@@ -188,10 +188,13 @@ struct kvm_s390_cmma_log {
 struct kvm_hyperv_exit {
 #define KVM_EXIT_HYPERV_SYNIC          1
 #define KVM_EXIT_HYPERV_HCALL          2
+#define KVM_EXIT_HYPERV_SYNDBG         3
        __u32 type;
+       __u32 pad1;
        union {
                struct {
                        __u32 msr;
+                       __u32 pad2;
                        __u64 control;
                        __u64 evt_page;
                        __u64 msg_page;
@@ -201,6 +204,15 @@ struct kvm_hyperv_exit {
                        __u64 result;
                        __u64 params[2];
                } hcall;
+               struct {
+                       __u32 msr;
+                       __u32 pad2;
+                       __u64 control;
+                       __u64 status;
+                       __u64 send_page;
+                       __u64 recv_page;
+                       __u64 pending_page;
+               } syndbg;
        } u;
 };
 
@@ -1017,6 +1029,8 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_S390_VCPU_RESETS 179
 #define KVM_CAP_S390_PROTECTED 180
 #define KVM_CAP_PPC_SECURE_GUEST 181
+#define KVM_CAP_HALT_POLL 182
+#define KVM_CAP_ASYNC_PF_INT 183
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index d119278..82cc58f 100644 (file)
@@ -123,7 +123,10 @@ struct statx {
        __u32   stx_dev_major;  /* ID of device containing file [uncond] */
        __u32   stx_dev_minor;
        /* 0x90 */
-       __u64   __spare2[14];   /* Spare space for future expansion */
+       __u64   stx_mnt_id;
+       __u64   __spare2;
+       /* 0xa0 */
+       __u64   __spare3[12];   /* Spare space for future expansion */
        /* 0x100 */
 };
 
@@ -148,6 +151,7 @@ struct statx {
 #define STATX_BLOCKS           0x00000400U     /* Want/got stx_blocks */
 #define STATX_BASIC_STATS      0x000007ffU     /* The stuff in the normal stat struct */
 #define STATX_BTIME            0x00000800U     /* Want/got stx_btime */
+#define STATX_MNT_ID           0x00001000U     /* Got stx_mnt_id */
 
 #define STATX__RESERVED                0x80000000U     /* Reserved for future struct statx expansion */
 
@@ -177,7 +181,9 @@ struct statx {
 #define STATX_ATTR_NODUMP              0x00000040 /* [I] File is not to be dumped */
 #define STATX_ATTR_ENCRYPTED           0x00000800 /* [I] File requires key to decrypt in fs */
 #define STATX_ATTR_AUTOMOUNT           0x00001000 /* Dir: Automount trigger */
+#define STATX_ATTR_MOUNT_ROOT          0x00002000 /* Root of a mount */
 #define STATX_ATTR_VERITY              0x00100000 /* [I] Verity protected file */
+#define STATX_ATTR_DAX                 0x00002000 /* [I] File is DAX */
 
 
 #endif /* _UAPI_LINUX_STAT_H */
index 9fe72e4..0c23496 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/types.h>
 #include <linux/ioctl.h>
 
+#define VHOST_FILE_UNBIND -1
+
 /* ioctls */
 
 #define VHOST_VIRTIO 0xAF
 /* Get the max ring size. */
 #define VHOST_VDPA_GET_VRING_NUM       _IOR(VHOST_VIRTIO, 0x76, __u16)
 
+/* Set event fd for config interrupt*/
+#define VHOST_VDPA_SET_CONFIG_CALL     _IOW(VHOST_VIRTIO, 0x77, int)
 #endif
index 1b6015b..dbef24e 100644 (file)
@@ -233,6 +233,8 @@ LIBBPF_API int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf,
 LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
                                 __u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
                                 __u64 *probe_offset, __u64 *probe_addr);
+
+enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */
 LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type);
 
 #ifdef __cplusplus
index 7009dc9..eae5ccc 100644 (file)
@@ -217,7 +217,7 @@ enum bpf_field_info_kind {
  */
 #define BPF_CORE_READ_INTO(dst, src, a, ...)                               \
        ({                                                                  \
-               ___core_read(bpf_core_read, dst, src, a, ##__VA_ARGS__)     \
+               ___core_read(bpf_core_read, dst, (src), a, ##__VA_ARGS__)   \
        })
 
 /*
@@ -227,7 +227,7 @@ enum bpf_field_info_kind {
  */
 #define BPF_CORE_READ_STR_INTO(dst, src, a, ...)                           \
        ({                                                                  \
-               ___core_read(bpf_core_read_str, dst, src, a, ##__VA_ARGS__) \
+               ___core_read(bpf_core_read_str, dst, (src), a, ##__VA_ARGS__)\
        })
 
 /*
@@ -254,8 +254,8 @@ enum bpf_field_info_kind {
  */
 #define BPF_CORE_READ(src, a, ...)                                         \
        ({                                                                  \
-               ___type(src, a, ##__VA_ARGS__) __r;                         \
-               BPF_CORE_READ_INTO(&__r, src, a, ##__VA_ARGS__);            \
+               ___type((src), a, ##__VA_ARGS__) __r;                       \
+               BPF_CORE_READ_INTO(&__r, (src), a, ##__VA_ARGS__);          \
                __r;                                                        \
        })
 
index fbe2800..ec9db4f 100644 (file)
@@ -2,8 +2,35 @@
 #ifndef __BPF_ENDIAN__
 #define __BPF_ENDIAN__
 
-#include <linux/stddef.h>
-#include <linux/swab.h>
+/*
+ * Isolate byte #n and put it into byte #m, for __u##b type.
+ * E.g., moving byte #6 (nnnnnnnn) into byte #1 (mmmmmmmm) for __u64:
+ * 1) xxxxxxxx nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx
+ * 2) nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx 00000000
+ * 3) 00000000 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn
+ * 4) 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn 00000000
+ */
+#define ___bpf_mvb(x, b, n, m) ((__u##b)(x) << (b-(n+1)*8) >> (b-8) << (m*8))
+
+#define ___bpf_swab16(x) ((__u16)(                     \
+                         ___bpf_mvb(x, 16, 0, 1) |     \
+                         ___bpf_mvb(x, 16, 1, 0)))
+
+#define ___bpf_swab32(x) ((__u32)(                     \
+                         ___bpf_mvb(x, 32, 0, 3) |     \
+                         ___bpf_mvb(x, 32, 1, 2) |     \
+                         ___bpf_mvb(x, 32, 2, 1) |     \
+                         ___bpf_mvb(x, 32, 3, 0)))
+
+#define ___bpf_swab64(x) ((__u64)(                     \
+                         ___bpf_mvb(x, 64, 0, 7) |     \
+                         ___bpf_mvb(x, 64, 1, 6) |     \
+                         ___bpf_mvb(x, 64, 2, 5) |     \
+                         ___bpf_mvb(x, 64, 3, 4) |     \
+                         ___bpf_mvb(x, 64, 4, 3) |     \
+                         ___bpf_mvb(x, 64, 5, 2) |     \
+                         ___bpf_mvb(x, 64, 6, 1) |     \
+                         ___bpf_mvb(x, 64, 7, 0)))
 
 /* LLVM's BPF target selects the endianness of the CPU
  * it compiles on, or the user specifies (bpfel/bpfeb),
 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
 # define __bpf_ntohs(x)                        __builtin_bswap16(x)
 # define __bpf_htons(x)                        __builtin_bswap16(x)
-# define __bpf_constant_ntohs(x)       ___constant_swab16(x)
-# define __bpf_constant_htons(x)       ___constant_swab16(x)
+# define __bpf_constant_ntohs(x)       ___bpf_swab16(x)
+# define __bpf_constant_htons(x)       ___bpf_swab16(x)
 # define __bpf_ntohl(x)                        __builtin_bswap32(x)
 # define __bpf_htonl(x)                        __builtin_bswap32(x)
-# define __bpf_constant_ntohl(x)       ___constant_swab32(x)
-# define __bpf_constant_htonl(x)       ___constant_swab32(x)
+# define __bpf_constant_ntohl(x)       ___bpf_swab32(x)
+# define __bpf_constant_htonl(x)       ___bpf_swab32(x)
 # define __bpf_be64_to_cpu(x)          __builtin_bswap64(x)
 # define __bpf_cpu_to_be64(x)          __builtin_bswap64(x)
-# define __bpf_constant_be64_to_cpu(x) ___constant_swab64(x)
-# define __bpf_constant_cpu_to_be64(x) ___constant_swab64(x)
+# define __bpf_constant_be64_to_cpu(x) ___bpf_swab64(x)
+# define __bpf_constant_cpu_to_be64(x) ___bpf_swab64(x)
 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
 # define __bpf_ntohs(x)                        (x)
 # define __bpf_htons(x)                        (x)
index f67dce2..bc14db7 100644 (file)
@@ -40,7 +40,7 @@
  * Helper macro to manipulate data structures
  */
 #ifndef offsetof
-#define offsetof(TYPE, MEMBER)  ((size_t)&((TYPE *)0)->MEMBER)
+#define offsetof(TYPE, MEMBER)  __builtin_offsetof(TYPE, MEMBER)
 #endif
 #ifndef container_of
 #define container_of(ptr, type, member)                                \
@@ -75,5 +75,6 @@ enum libbpf_tristate {
 };
 
 #define __kconfig __attribute__((section(".kconfig")))
+#define __ksym __attribute__((section(".ksyms")))
 
 #endif
index bfef3d6..c9e760e 100644 (file)
@@ -389,7 +389,7 @@ void btf__free(struct btf *btf)
        if (!btf)
                return;
 
-       if (btf->fd != -1)
+       if (btf->fd >= 0)
                close(btf->fd);
 
        free(btf->data);
@@ -397,7 +397,7 @@ void btf__free(struct btf *btf)
        free(btf);
 }
 
-struct btf *btf__new(__u8 *data, __u32 size)
+struct btf *btf__new(const void *data, __u32 size)
 {
        struct btf *btf;
        int err;
@@ -700,6 +700,11 @@ int btf__fd(const struct btf *btf)
        return btf->fd;
 }
 
+void btf__set_fd(struct btf *btf, int fd)
+{
+       btf->fd = fd;
+}
+
 const void *btf__get_raw_data(const struct btf *btf, __u32 *size)
 {
        *size = btf->data_size;
index 70c1b7e..491c7b4 100644 (file)
@@ -63,7 +63,7 @@ struct btf_ext_header {
 };
 
 LIBBPF_API void btf__free(struct btf *btf);
-LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size);
+LIBBPF_API struct btf *btf__new(const void *data, __u32 size);
 LIBBPF_API struct btf *btf__parse_elf(const char *path,
                                      struct btf_ext **btf_ext);
 LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf);
@@ -79,6 +79,7 @@ LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
 LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id);
 LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id);
 LIBBPF_API int btf__fd(const struct btf *btf);
+LIBBPF_API void btf__set_fd(struct btf *btf, int fd);
 LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
 LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
 LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf);
@@ -143,8 +144,10 @@ struct btf_dump_emit_type_decl_opts {
         * necessary indentation already
         */
        int indent_level;
+       /* strip all the const/volatile/restrict mods */
+       bool strip_mods;
 };
-#define btf_dump_emit_type_decl_opts__last_field indent_level
+#define btf_dump_emit_type_decl_opts__last_field strip_mods
 
 LIBBPF_API int
 btf_dump__emit_type_decl(struct btf_dump *d, __u32 id,
@@ -168,6 +171,11 @@ static inline bool btf_kflag(const struct btf_type *t)
        return BTF_INFO_KFLAG(t->info);
 }
 
+static inline bool btf_is_void(const struct btf_type *t)
+{
+       return btf_kind(t) == BTF_KIND_UNKN;
+}
+
 static inline bool btf_is_int(const struct btf_type *t)
 {
        return btf_kind(t) == BTF_KIND_INT;
index bbb4303..e1c3445 100644 (file)
@@ -60,6 +60,7 @@ struct btf_dump {
        const struct btf_ext *btf_ext;
        btf_dump_printf_fn_t printf_fn;
        struct btf_dump_opts opts;
+       bool strip_mods;
 
        /* per-type auxiliary state */
        struct btf_dump_type_aux_state *type_states;
@@ -1032,7 +1033,9 @@ int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id,
 
        fname = OPTS_GET(opts, field_name, "");
        lvl = OPTS_GET(opts, indent_level, 0);
+       d->strip_mods = OPTS_GET(opts, strip_mods, false);
        btf_dump_emit_type_decl(d, id, fname, lvl);
+       d->strip_mods = false;
        return 0;
 }
 
@@ -1045,6 +1048,10 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
 
        stack_start = d->decl_stack_cnt;
        for (;;) {
+               t = btf__type_by_id(d->btf, id);
+               if (d->strip_mods && btf_is_mod(t))
+                       goto skip_mod;
+
                err = btf_dump_push_decl_stack_id(d, id);
                if (err < 0) {
                        /*
@@ -1056,12 +1063,11 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
                        d->decl_stack_cnt = stack_start;
                        return;
                }
-
+skip_mod:
                /* VOID */
                if (id == 0)
                        break;
 
-               t = btf__type_by_id(d->btf, id);
                switch (btf_kind(t)) {
                case BTF_KIND_PTR:
                case BTF_KIND_VOLATILE:
index df59fd4..e0af36b 100644 (file)
 #include <stdbool.h>
 #include <stddef.h>
 #include <limits.h>
-#ifndef __WORDSIZE
-#define __WORDSIZE (__SIZEOF_LONG__ * 8)
-#endif
 
 static inline size_t hash_bits(size_t h, int bits)
 {
        /* shuffle bits and return requested number of upper bits */
-       return (h * 11400714819323198485llu) >> (__WORDSIZE - bits);
+#if (__SIZEOF_SIZE_T__ == __SIZEOF_LONG_LONG__)
+       /* LP64 case */
+       return (h * 11400714819323198485llu) >> (__SIZEOF_LONG_LONG__ * 8 - bits);
+#elif (__SIZEOF_SIZE_T__ <= __SIZEOF_LONG__)
+       return (h * 2654435769lu) >> (__SIZEOF_LONG__ * 8 - bits);
+#else
+#      error "Unsupported size_t size"
+#endif
 }
 
 typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx);
index 477c679..846164c 100644 (file)
@@ -230,6 +230,7 @@ struct bpf_program {
        struct bpf_insn *insns;
        size_t insns_cnt, main_prog_cnt;
        enum bpf_prog_type type;
+       bool load;
 
        struct reloc_desc *reloc_desc;
        int nr_reloc;
@@ -285,6 +286,7 @@ struct bpf_struct_ops {
 #define BSS_SEC ".bss"
 #define RODATA_SEC ".rodata"
 #define KCONFIG_SEC ".kconfig"
+#define KSYMS_SEC ".ksyms"
 #define STRUCT_OPS_SEC ".struct_ops"
 
 enum libbpf_map_type {
@@ -310,6 +312,7 @@ struct bpf_map {
        int map_ifindex;
        int inner_map_fd;
        struct bpf_map_def def;
+       __u32 numa_node;
        __u32 btf_var_idx;
        __u32 btf_key_type_id;
        __u32 btf_value_type_id;
@@ -329,24 +332,39 @@ struct bpf_map {
 
 enum extern_type {
        EXT_UNKNOWN,
-       EXT_CHAR,
-       EXT_BOOL,
-       EXT_INT,
-       EXT_TRISTATE,
-       EXT_CHAR_ARR,
+       EXT_KCFG,
+       EXT_KSYM,
+};
+
+enum kcfg_type {
+       KCFG_UNKNOWN,
+       KCFG_CHAR,
+       KCFG_BOOL,
+       KCFG_INT,
+       KCFG_TRISTATE,
+       KCFG_CHAR_ARR,
 };
 
 struct extern_desc {
-       const char *name;
+       enum extern_type type;
        int sym_idx;
        int btf_id;
-       enum extern_type type;
-       int sz;
-       int align;
-       int data_off;
-       bool is_signed;
-       bool is_weak;
+       int sec_btf_id;
+       const char *name;
        bool is_set;
+       bool is_weak;
+       union {
+               struct {
+                       enum kcfg_type type;
+                       int sz;
+                       int align;
+                       int data_off;
+                       bool is_signed;
+               } kcfg;
+               struct {
+                       unsigned long long addr;
+               } ksym;
+       };
 };
 
 static LIST_HEAD(bpf_objects_list);
@@ -524,6 +542,7 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx,
        prog->instances.fds = NULL;
        prog->instances.nr = -1;
        prog->type = BPF_PROG_TYPE_UNSPEC;
+       prog->load = true;
 
        return 0;
 errout:
@@ -1423,19 +1442,19 @@ static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
        return NULL;
 }
 
-static int set_ext_value_tri(struct extern_desc *ext, void *ext_val,
-                            char value)
+static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
+                             char value)
 {
-       switch (ext->type) {
-       case EXT_BOOL:
+       switch (ext->kcfg.type) {
+       case KCFG_BOOL:
                if (value == 'm') {
-                       pr_warn("extern %s=%c should be tristate or char\n",
+                       pr_warn("extern (kcfg) %s=%c should be tristate or char\n",
                                ext->name, value);
                        return -EINVAL;
                }
                *(bool *)ext_val = value == 'y' ? true : false;
                break;
-       case EXT_TRISTATE:
+       case KCFG_TRISTATE:
                if (value == 'y')
                        *(enum libbpf_tristate *)ext_val = TRI_YES;
                else if (value == 'm')
@@ -1443,14 +1462,14 @@ static int set_ext_value_tri(struct extern_desc *ext, void *ext_val,
                else /* value == 'n' */
                        *(enum libbpf_tristate *)ext_val = TRI_NO;
                break;
-       case EXT_CHAR:
+       case KCFG_CHAR:
                *(char *)ext_val = value;
                break;
-       case EXT_UNKNOWN:
-       case EXT_INT:
-       case EXT_CHAR_ARR:
+       case KCFG_UNKNOWN:
+       case KCFG_INT:
+       case KCFG_CHAR_ARR:
        default:
-               pr_warn("extern %s=%c should be bool, tristate, or char\n",
+               pr_warn("extern (kcfg) %s=%c should be bool, tristate, or char\n",
                        ext->name, value);
                return -EINVAL;
        }
@@ -1458,29 +1477,29 @@ static int set_ext_value_tri(struct extern_desc *ext, void *ext_val,
        return 0;
 }
 
-static int set_ext_value_str(struct extern_desc *ext, char *ext_val,
-                            const char *value)
+static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
+                             const char *value)
 {
        size_t len;
 
-       if (ext->type != EXT_CHAR_ARR) {
-               pr_warn("extern %s=%s should char array\n", ext->name, value);
+       if (ext->kcfg.type != KCFG_CHAR_ARR) {
+               pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value);
                return -EINVAL;
        }
 
        len = strlen(value);
        if (value[len - 1] != '"') {
-               pr_warn("extern '%s': invalid string config '%s'\n",
+               pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
                        ext->name, value);
                return -EINVAL;
        }
 
        /* strip quotes */
        len -= 2;
-       if (len >= ext->sz) {
-               pr_warn("extern '%s': long string config %s of (%zu bytes) truncated to %d bytes\n",
-                       ext->name, value, len, ext->sz - 1);
-               len = ext->sz - 1;
+       if (len >= ext->kcfg.sz) {
+               pr_warn("extern (kcfg) '%s': long string config %s of (%zu bytes) truncated to %d bytes\n",
+                       ext->name, value, len, ext->kcfg.sz - 1);
+               len = ext->kcfg.sz - 1;
        }
        memcpy(ext_val, value + 1, len);
        ext_val[len] = '\0';
@@ -1507,11 +1526,11 @@ static int parse_u64(const char *value, __u64 *res)
        return 0;
 }
 
-static bool is_ext_value_in_range(const struct extern_desc *ext, __u64 v)
+static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
 {
-       int bit_sz = ext->sz * 8;
+       int bit_sz = ext->kcfg.sz * 8;
 
-       if (ext->sz == 8)
+       if (ext->kcfg.sz == 8)
                return true;
 
        /* Validate that value stored in u64 fits in integer of `ext->sz`
@@ -1526,26 +1545,26 @@ static bool is_ext_value_in_range(const struct extern_desc *ext, __u64 v)
         *  For unsigned target integer, check that all the (64 - Y) bits are
         *  zero.
         */
-       if (ext->is_signed)
+       if (ext->kcfg.is_signed)
                return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
        else
                return (v >> bit_sz) == 0;
 }
 
-static int set_ext_value_num(struct extern_desc *ext, void *ext_val,
-                            __u64 value)
+static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
+                             __u64 value)
 {
-       if (ext->type != EXT_INT && ext->type != EXT_CHAR) {
-               pr_warn("extern %s=%llu should be integer\n",
+       if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
+               pr_warn("extern (kcfg) %s=%llu should be integer\n",
                        ext->name, (unsigned long long)value);
                return -EINVAL;
        }
-       if (!is_ext_value_in_range(ext, value)) {
-               pr_warn("extern %s=%llu value doesn't fit in %d bytes\n",
-                       ext->name, (unsigned long long)value, ext->sz);
+       if (!is_kcfg_value_in_range(ext, value)) {
+               pr_warn("extern (kcfg) %s=%llu value doesn't fit in %d bytes\n",
+                       ext->name, (unsigned long long)value, ext->kcfg.sz);
                return -ERANGE;
        }
-       switch (ext->sz) {
+       switch (ext->kcfg.sz) {
                case 1: *(__u8 *)ext_val = value; break;
                case 2: *(__u16 *)ext_val = value; break;
                case 4: *(__u32 *)ext_val = value; break;
@@ -1591,30 +1610,30 @@ static int bpf_object__process_kconfig_line(struct bpf_object *obj,
        if (!ext || ext->is_set)
                return 0;
 
-       ext_val = data + ext->data_off;
+       ext_val = data + ext->kcfg.data_off;
        value = sep + 1;
 
        switch (*value) {
        case 'y': case 'n': case 'm':
-               err = set_ext_value_tri(ext, ext_val, *value);
+               err = set_kcfg_value_tri(ext, ext_val, *value);
                break;
        case '"':
-               err = set_ext_value_str(ext, ext_val, value);
+               err = set_kcfg_value_str(ext, ext_val, value);
                break;
        default:
                /* assume integer */
                err = parse_u64(value, &num);
                if (err) {
-                       pr_warn("extern %s=%s should be integer\n",
+                       pr_warn("extern (kcfg) %s=%s should be integer\n",
                                ext->name, value);
                        return err;
                }
-               err = set_ext_value_num(ext, ext_val, num);
+               err = set_kcfg_value_num(ext, ext_val, num);
                break;
        }
        if (err)
                return err;
-       pr_debug("extern %s=%s\n", ext->name, value);
+       pr_debug("extern (kcfg) %s=%s\n", ext->name, value);
        return 0;
 }
 
@@ -1685,16 +1704,20 @@ static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
 
 static int bpf_object__init_kconfig_map(struct bpf_object *obj)
 {
-       struct extern_desc *last_ext;
+       struct extern_desc *last_ext = NULL, *ext;
        size_t map_sz;
-       int err;
+       int i, err;
 
-       if (obj->nr_extern == 0)
-               return 0;
+       for (i = 0; i < obj->nr_extern; i++) {
+               ext = &obj->externs[i];
+               if (ext->type == EXT_KCFG)
+                       last_ext = ext;
+       }
 
-       last_ext = &obj->externs[obj->nr_extern - 1];
-       map_sz = last_ext->data_off + last_ext->sz;
+       if (!last_ext)
+               return 0;
 
+       map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
        err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
                                            obj->efile.symbols_shndx,
                                            NULL, map_sz);
@@ -1957,6 +1980,10 @@ static int parse_btf_map_def(struct bpf_object *obj,
                                return -EINVAL;
                        pr_debug("map '%s': found map_flags = %u.\n",
                                 map->name, map->def.map_flags);
+               } else if (strcmp(name, "numa_node") == 0) {
+                       if (!get_map_field_int(map->name, obj->btf, m, &map->numa_node))
+                               return -EINVAL;
+                       pr_debug("map '%s': found numa_node = %u.\n", map->name, map->numa_node);
                } else if (strcmp(name, "key_size") == 0) {
                        __u32 sz;
 
@@ -2311,18 +2338,23 @@ static bool section_have_execinstr(struct bpf_object *obj, int idx)
        return false;
 }
 
-static void bpf_object__sanitize_btf(struct bpf_object *obj)
+static bool btf_needs_sanitization(struct bpf_object *obj)
+{
+       bool has_func_global = obj->caps.btf_func_global;
+       bool has_datasec = obj->caps.btf_datasec;
+       bool has_func = obj->caps.btf_func;
+
+       return !has_func || !has_datasec || !has_func_global;
+}
+
+static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
 {
        bool has_func_global = obj->caps.btf_func_global;
        bool has_datasec = obj->caps.btf_datasec;
        bool has_func = obj->caps.btf_func;
-       struct btf *btf = obj->btf;
        struct btf_type *t;
        int i, j, vlen;
 
-       if (!obj->btf || (has_func && has_datasec && has_func_global))
-               return;
-
        for (i = 1; i <= btf__get_nr_types(btf); i++) {
                t = (struct btf_type *)btf__type_by_id(btf, i);
 
@@ -2375,17 +2407,6 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj)
        }
 }
 
-static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
-{
-       if (!obj->btf_ext)
-               return;
-
-       if (!obj->caps.btf_func) {
-               btf_ext__free(obj->btf_ext);
-               obj->btf_ext = NULL;
-       }
-}
-
 static bool libbpf_needs_btf(const struct bpf_object *obj)
 {
        return obj->efile.btf_maps_shndx >= 0 ||
@@ -2446,19 +2467,11 @@ static int bpf_object__finalize_btf(struct bpf_object *obj)
                return 0;
 
        err = btf__finalize_data(obj, obj->btf);
-       if (!err)
-               return 0;
-
-       pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
-       btf__free(obj->btf);
-       obj->btf = NULL;
-       btf_ext__free(obj->btf_ext);
-       obj->btf_ext = NULL;
-
-       if (libbpf_needs_btf(obj)) {
-               pr_warn("BTF is required, but is missing or corrupted.\n");
-               return -ENOENT;
+       if (err) {
+               pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
+               return err;
        }
+
        return 0;
 }
 
@@ -2479,51 +2492,77 @@ static inline bool libbpf_prog_needs_vmlinux_btf(struct bpf_program *prog)
 
 static int bpf_object__load_vmlinux_btf(struct bpf_object *obj)
 {
+       bool need_vmlinux_btf = false;
        struct bpf_program *prog;
        int err;
 
+       /* CO-RE relocations need kernel BTF */
+       if (obj->btf_ext && obj->btf_ext->field_reloc_info.len)
+               need_vmlinux_btf = true;
+
        bpf_object__for_each_program(prog, obj) {
+               if (!prog->load)
+                       continue;
                if (libbpf_prog_needs_vmlinux_btf(prog)) {
-                       obj->btf_vmlinux = libbpf_find_kernel_btf();
-                       if (IS_ERR(obj->btf_vmlinux)) {
-                               err = PTR_ERR(obj->btf_vmlinux);
-                               pr_warn("Error loading vmlinux BTF: %d\n", err);
-                               obj->btf_vmlinux = NULL;
-                               return err;
-                       }
-                       return 0;
+                       need_vmlinux_btf = true;
+                       break;
                }
        }
 
+       if (!need_vmlinux_btf)
+               return 0;
+
+       obj->btf_vmlinux = libbpf_find_kernel_btf();
+       if (IS_ERR(obj->btf_vmlinux)) {
+               err = PTR_ERR(obj->btf_vmlinux);
+               pr_warn("Error loading vmlinux BTF: %d\n", err);
+               obj->btf_vmlinux = NULL;
+               return err;
+       }
        return 0;
 }
 
 static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
 {
+       struct btf *kern_btf = obj->btf;
+       bool btf_mandatory, sanitize;
        int err = 0;
 
        if (!obj->btf)
                return 0;
 
-       bpf_object__sanitize_btf(obj);
-       bpf_object__sanitize_btf_ext(obj);
+       sanitize = btf_needs_sanitization(obj);
+       if (sanitize) {
+               const void *raw_data;
+               __u32 sz;
 
-       err = btf__load(obj->btf);
-       if (err) {
-               pr_warn("Error loading %s into kernel: %d.\n",
-                       BTF_ELF_SEC, err);
-               btf__free(obj->btf);
-               obj->btf = NULL;
-               /* btf_ext can't exist without btf, so free it as well */
-               if (obj->btf_ext) {
-                       btf_ext__free(obj->btf_ext);
-                       obj->btf_ext = NULL;
-               }
+               /* clone BTF to sanitize a copy and leave the original intact */
+               raw_data = btf__get_raw_data(obj->btf, &sz);
+               kern_btf = btf__new(raw_data, sz);
+               if (IS_ERR(kern_btf))
+                       return PTR_ERR(kern_btf);
 
-               if (kernel_needs_btf(obj))
-                       return err;
+               bpf_object__sanitize_btf(obj, kern_btf);
        }
-       return 0;
+
+       err = btf__load(kern_btf);
+       if (sanitize) {
+               if (!err) {
+                       /* move fd to libbpf's BTF */
+                       btf__set_fd(obj->btf, btf__fd(kern_btf));
+                       btf__set_fd(kern_btf, -1);
+               }
+               btf__free(kern_btf);
+       }
+       if (err) {
+               btf_mandatory = kernel_needs_btf(obj);
+               pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
+                       btf_mandatory ? "BTF is mandatory, can't proceed."
+                                     : "BTF is optional, ignoring.");
+               if (!btf_mandatory)
+                       err = 0;
+       }
+       return err;
 }
 
 static int bpf_object__elf_collect(struct bpf_object *obj)
@@ -2709,8 +2748,33 @@ static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
        return -ENOENT;
 }
 
-static enum extern_type find_extern_type(const struct btf *btf, int id,
-                                        bool *is_signed)
+static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
+       const struct btf_var_secinfo *vs;
+       const struct btf_type *t;
+       int i, j, n;
+
+       if (!btf)
+               return -ESRCH;
+
+       n = btf__get_nr_types(btf);
+       for (i = 1; i <= n; i++) {
+               t = btf__type_by_id(btf, i);
+
+               if (!btf_is_datasec(t))
+                       continue;
+
+               vs = btf_var_secinfos(t);
+               for (j = 0; j < btf_vlen(t); j++, vs++) {
+                       if (vs->type == ext_btf_id)
+                               return i;
+               }
+       }
+
+       return -ENOENT;
+}
+
+static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
+                                    bool *is_signed)
 {
        const struct btf_type *t;
        const char *name;
@@ -2725,29 +2789,29 @@ static enum extern_type find_extern_type(const struct btf *btf, int id,
                int enc = btf_int_encoding(t);
 
                if (enc & BTF_INT_BOOL)
-                       return t->size == 1 ? EXT_BOOL : EXT_UNKNOWN;
+                       return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
                if (is_signed)
                        *is_signed = enc & BTF_INT_SIGNED;
                if (t->size == 1)
-                       return EXT_CHAR;
+                       return KCFG_CHAR;
                if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
-                       return EXT_UNKNOWN;
-               return EXT_INT;
+                       return KCFG_UNKNOWN;
+               return KCFG_INT;
        }
        case BTF_KIND_ENUM:
                if (t->size != 4)
-                       return EXT_UNKNOWN;
+                       return KCFG_UNKNOWN;
                if (strcmp(name, "libbpf_tristate"))
-                       return EXT_UNKNOWN;
-               return EXT_TRISTATE;
+                       return KCFG_UNKNOWN;
+               return KCFG_TRISTATE;
        case BTF_KIND_ARRAY:
                if (btf_array(t)->nelems == 0)
-                       return EXT_UNKNOWN;
-               if (find_extern_type(btf, btf_array(t)->type, NULL) != EXT_CHAR)
-                       return EXT_UNKNOWN;
-               return EXT_CHAR_ARR;
+                       return KCFG_UNKNOWN;
+               if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
+                       return KCFG_UNKNOWN;
+               return KCFG_CHAR_ARR;
        default:
-               return EXT_UNKNOWN;
+               return KCFG_UNKNOWN;
        }
 }
 
@@ -2756,23 +2820,45 @@ static int cmp_externs(const void *_a, const void *_b)
        const struct extern_desc *a = _a;
        const struct extern_desc *b = _b;
 
-       /* descending order by alignment requirements */
-       if (a->align != b->align)
-               return a->align > b->align ? -1 : 1;
-       /* ascending order by size, within same alignment class */
-       if (a->sz != b->sz)
-               return a->sz < b->sz ? -1 : 1;
+       if (a->type != b->type)
+               return a->type < b->type ? -1 : 1;
+
+       if (a->type == EXT_KCFG) {
+               /* descending order by alignment requirements */
+               if (a->kcfg.align != b->kcfg.align)
+                       return a->kcfg.align > b->kcfg.align ? -1 : 1;
+               /* ascending order by size, within same alignment class */
+               if (a->kcfg.sz != b->kcfg.sz)
+                       return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
+       }
+
        /* resolve ties by name */
        return strcmp(a->name, b->name);
 }
 
+static int find_int_btf_id(const struct btf *btf)
+{
+       const struct btf_type *t;
+       int i, n;
+
+       n = btf__get_nr_types(btf);
+       for (i = 1; i <= n; i++) {
+               t = btf__type_by_id(btf, i);
+
+               if (btf_is_int(t) && btf_int_bits(t) == 32)
+                       return i;
+       }
+
+       return 0;
+}
+
 static int bpf_object__collect_externs(struct bpf_object *obj)
 {
+       struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
        const struct btf_type *t;
        struct extern_desc *ext;
-       int i, n, off, btf_id;
-       struct btf_type *sec;
-       const char *ext_name;
+       int i, n, off;
+       const char *ext_name, *sec_name;
        Elf_Scn *scn;
        GElf_Shdr sh;
 
@@ -2818,22 +2904,50 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
                ext->name = btf__name_by_offset(obj->btf, t->name_off);
                ext->sym_idx = i;
                ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK;
-               ext->sz = btf__resolve_size(obj->btf, t->type);
-               if (ext->sz <= 0) {
-                       pr_warn("failed to resolve size of extern '%s': %d\n",
-                               ext_name, ext->sz);
-                       return ext->sz;
-               }
-               ext->align = btf__align_of(obj->btf, t->type);
-               if (ext->align <= 0) {
-                       pr_warn("failed to determine alignment of extern '%s': %d\n",
-                               ext_name, ext->align);
-                       return -EINVAL;
-               }
-               ext->type = find_extern_type(obj->btf, t->type,
-                                            &ext->is_signed);
-               if (ext->type == EXT_UNKNOWN) {
-                       pr_warn("extern '%s' type is unsupported\n", ext_name);
+
+               ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
+               if (ext->sec_btf_id <= 0) {
+                       pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
+                               ext_name, ext->btf_id, ext->sec_btf_id);
+                       return ext->sec_btf_id;
+               }
+               sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
+               sec_name = btf__name_by_offset(obj->btf, sec->name_off);
+
+               if (strcmp(sec_name, KCONFIG_SEC) == 0) {
+                       kcfg_sec = sec;
+                       ext->type = EXT_KCFG;
+                       ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
+                       if (ext->kcfg.sz <= 0) {
+                               pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
+                                       ext_name, ext->kcfg.sz);
+                               return ext->kcfg.sz;
+                       }
+                       ext->kcfg.align = btf__align_of(obj->btf, t->type);
+                       if (ext->kcfg.align <= 0) {
+                               pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
+                                       ext_name, ext->kcfg.align);
+                               return -EINVAL;
+                       }
+                       ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
+                                                       &ext->kcfg.is_signed);
+                       if (ext->kcfg.type == KCFG_UNKNOWN) {
+                               pr_warn("extern (kcfg) '%s' type is unsupported\n", ext_name);
+                               return -ENOTSUP;
+                       }
+               } else if (strcmp(sec_name, KSYMS_SEC) == 0) {
+                       const struct btf_type *vt;
+
+                       ksym_sec = sec;
+                       ext->type = EXT_KSYM;
+
+                       vt = skip_mods_and_typedefs(obj->btf, t->type, NULL);
+                       if (!btf_is_void(vt)) {
+                               pr_warn("extern (ksym) '%s' is not typeless (void)\n", ext_name);
+                               return -ENOTSUP;
+                       }
+               } else {
+                       pr_warn("unrecognized extern section '%s'\n", sec_name);
                        return -ENOTSUP;
                }
        }
@@ -2842,42 +2956,80 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
        if (!obj->nr_extern)
                return 0;
 
-       /* sort externs by (alignment, size, name) and calculate their offsets
-        * within a map */
+       /* sort externs by type, for kcfg ones also by (align, size, name) */
        qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
-       off = 0;
-       for (i = 0; i < obj->nr_extern; i++) {
-               ext = &obj->externs[i];
-               ext->data_off = roundup(off, ext->align);
-               off = ext->data_off + ext->sz;
-               pr_debug("extern #%d: symbol %d, off %u, name %s\n",
-                        i, ext->sym_idx, ext->data_off, ext->name);
-       }
 
-       btf_id = btf__find_by_name(obj->btf, KCONFIG_SEC);
-       if (btf_id <= 0) {
-               pr_warn("no BTF info found for '%s' datasec\n", KCONFIG_SEC);
-               return -ESRCH;
-       }
+       /* for .ksyms section, we need to turn all externs into allocated
+        * variables in BTF to pass kernel verification; we do this by
+        * pretending that each extern is a 8-byte variable
+        */
+       if (ksym_sec) {
+               /* find existing 4-byte integer type in BTF to use for fake
+                * extern variables in DATASEC
+                */
+               int int_btf_id = find_int_btf_id(obj->btf);
 
-       sec = (struct btf_type *)btf__type_by_id(obj->btf, btf_id);
-       sec->size = off;
-       n = btf_vlen(sec);
-       for (i = 0; i < n; i++) {
-               struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
-
-               t = btf__type_by_id(obj->btf, vs->type);
-               ext_name = btf__name_by_offset(obj->btf, t->name_off);
-               ext = find_extern_by_name(obj, ext_name);
-               if (!ext) {
-                       pr_warn("failed to find extern definition for BTF var '%s'\n",
-                               ext_name);
-                       return -ESRCH;
+               for (i = 0; i < obj->nr_extern; i++) {
+                       ext = &obj->externs[i];
+                       if (ext->type != EXT_KSYM)
+                               continue;
+                       pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
+                                i, ext->sym_idx, ext->name);
                }
-               vs->offset = ext->data_off;
-               btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
+
+               sec = ksym_sec;
+               n = btf_vlen(sec);
+               for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
+                       struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
+                       struct btf_type *vt;
+
+                       vt = (void *)btf__type_by_id(obj->btf, vs->type);
+                       ext_name = btf__name_by_offset(obj->btf, vt->name_off);
+                       ext = find_extern_by_name(obj, ext_name);
+                       if (!ext) {
+                               pr_warn("failed to find extern definition for BTF var '%s'\n",
+                                       ext_name);
+                               return -ESRCH;
+                       }
+                       btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
+                       vt->type = int_btf_id;
+                       vs->offset = off;
+                       vs->size = sizeof(int);
+               }
+               sec->size = off;
        }
 
+       if (kcfg_sec) {
+               sec = kcfg_sec;
+               /* for kcfg externs calculate their offsets within a .kconfig map */
+               off = 0;
+               for (i = 0; i < obj->nr_extern; i++) {
+                       ext = &obj->externs[i];
+                       if (ext->type != EXT_KCFG)
+                               continue;
+
+                       ext->kcfg.data_off = roundup(off, ext->kcfg.align);
+                       off = ext->kcfg.data_off + ext->kcfg.sz;
+                       pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
+                                i, ext->sym_idx, ext->kcfg.data_off, ext->name);
+               }
+               sec->size = off;
+               n = btf_vlen(sec);
+               for (i = 0; i < n; i++) {
+                       struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
+
+                       t = btf__type_by_id(obj->btf, vs->type);
+                       ext_name = btf__name_by_offset(obj->btf, t->name_off);
+                       ext = find_extern_by_name(obj, ext_name);
+                       if (!ext) {
+                               pr_warn("failed to find extern definition for BTF var '%s'\n",
+                                       ext_name);
+                               return -ESRCH;
+                       }
+                       btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
+                       vs->offset = ext->kcfg.data_off;
+               }
+       }
        return 0;
 }
 
@@ -3007,11 +3159,11 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
                                sym_idx);
                        return -LIBBPF_ERRNO__RELOC;
                }
-               pr_debug("found extern #%d '%s' (sym %d, off %u) for insn %u\n",
-                        i, ext->name, ext->sym_idx, ext->data_off, insn_idx);
+               pr_debug("found extern #%d '%s' (sym %d) for insn %u\n",
+                        i, ext->name, ext->sym_idx, insn_idx);
                reloc_desc->type = RELO_EXTERN;
                reloc_desc->insn_idx = insn_idx;
-               reloc_desc->sym_off = ext->data_off;
+               reloc_desc->sym_off = i; /* sym_off stores extern index */
                return 0;
        }
 
@@ -3222,20 +3374,27 @@ err_free_new_name:
        return err;
 }
 
-int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
+__u32 bpf_map__max_entries(const struct bpf_map *map)
 {
-       if (!map || !max_entries)
-               return -EINVAL;
+       return map->def.max_entries;
+}
 
-       /* If map already created, its attributes can't be changed. */
+int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
+{
        if (map->fd >= 0)
                return -EBUSY;
-
        map->def.max_entries = max_entries;
-
        return 0;
 }
 
+int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
+{
+       if (!map || !max_entries)
+               return -EINVAL;
+
+       return bpf_map__set_max_entries(map, max_entries);
+}
+
 static int
 bpf_object__probe_loading(struct bpf_object *obj)
 {
@@ -3603,6 +3762,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
        create_attr.map_flags = def->map_flags;
        create_attr.key_size = def->key_size;
        create_attr.value_size = def->value_size;
+       create_attr.numa_node = map->numa_node;
 
        if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) {
                int nr_cpus;
@@ -3626,7 +3786,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
        create_attr.btf_fd = 0;
        create_attr.btf_key_type_id = 0;
        create_attr.btf_value_type_id = 0;
-       if (obj->btf && !bpf_map_find_btf_info(obj, map)) {
+       if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) {
                create_attr.btf_fd = btf__fd(obj->btf);
                create_attr.btf_key_type_id = map->btf_key_type_id;
                create_attr.btf_value_type_id = map->btf_value_type_id;
@@ -4799,8 +4959,8 @@ bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
        if (targ_btf_path)
                targ_btf = btf__parse_elf(targ_btf_path, NULL);
        else
-               targ_btf = libbpf_find_kernel_btf();
-       if (IS_ERR(targ_btf)) {
+               targ_btf = obj->btf_vmlinux;
+       if (IS_ERR_OR_NULL(targ_btf)) {
                pr_warn("failed to get target BTF: %ld\n", PTR_ERR(targ_btf));
                return PTR_ERR(targ_btf);
        }
@@ -4818,7 +4978,13 @@ bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
                        err = -EINVAL;
                        goto out;
                }
-               prog = bpf_object__find_program_by_title(obj, sec_name);
+               prog = NULL;
+               for (i = 0; i < obj->nr_programs; i++) {
+                       if (!strcmp(obj->programs[i].section_name, sec_name)) {
+                               prog = &obj->programs[i];
+                               break;
+                       }
+               }
                if (!prog) {
                        pr_warn("failed to find program '%s' for CO-RE offset relocation\n",
                                sec_name);
@@ -4841,7 +5007,9 @@ bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
        }
 
 out:
-       btf__free(targ_btf);
+       /* obj->btf_vmlinux is freed at the end of object load phase */
+       if (targ_btf != obj->btf_vmlinux)
+               btf__free(targ_btf);
        if (!IS_ERR_OR_NULL(cand_cache)) {
                hashmap__for_each_entry(cand_cache, entry, i) {
                        bpf_core_free_cands(entry->value);
@@ -4928,6 +5096,7 @@ bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
        for (i = 0; i < prog->nr_reloc; i++) {
                struct reloc_desc *relo = &prog->reloc_desc[i];
                struct bpf_insn *insn = &prog->insns[relo->insn_idx];
+               struct extern_desc *ext;
 
                if (relo->insn_idx + 1 >= (int)prog->insns_cnt) {
                        pr_warn("relocation out of range: '%s'\n",
@@ -4946,9 +5115,15 @@ bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
                        insn[0].imm = obj->maps[relo->map_idx].fd;
                        break;
                case RELO_EXTERN:
-                       insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
-                       insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
-                       insn[1].imm = relo->sym_off;
+                       ext = &obj->externs[relo->sym_off];
+                       if (ext->type == EXT_KCFG) {
+                               insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
+                               insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
+                               insn[1].imm = ext->kcfg.data_off;
+                       } else /* EXT_KSYM */ {
+                               insn[0].imm = (__u32)ext->ksym.addr;
+                               insn[1].imm = ext->ksym.addr >> 32;
+                       }
                        break;
                case RELO_CALL:
                        err = bpf_program__reloc_text(prog, obj, relo);
@@ -5201,18 +5376,17 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
                load_attr.kern_version = kern_version;
                load_attr.prog_ifindex = prog->prog_ifindex;
        }
-       /* if .BTF.ext was loaded, kernel supports associated BTF for prog */
-       if (prog->obj->btf_ext)
-               btf_fd = bpf_object__btf_fd(prog->obj);
-       else
-               btf_fd = -1;
-       load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0;
-       load_attr.func_info = prog->func_info;
-       load_attr.func_info_rec_size = prog->func_info_rec_size;
-       load_attr.func_info_cnt = prog->func_info_cnt;
-       load_attr.line_info = prog->line_info;
-       load_attr.line_info_rec_size = prog->line_info_rec_size;
-       load_attr.line_info_cnt = prog->line_info_cnt;
+       /* specify func_info/line_info only if kernel supports them */
+       btf_fd = bpf_object__btf_fd(prog->obj);
+       if (btf_fd >= 0 && prog->obj->caps.btf_func) {
+               load_attr.prog_btf_fd = btf_fd;
+               load_attr.func_info = prog->func_info;
+               load_attr.func_info_rec_size = prog->func_info_rec_size;
+               load_attr.func_info_cnt = prog->func_info_cnt;
+               load_attr.line_info = prog->line_info;
+               load_attr.line_info_rec_size = prog->line_info_rec_size;
+               load_attr.line_info_cnt = prog->line_info_cnt;
+       }
        load_attr.log_level = prog->log_level;
        load_attr.prog_flags = prog->prog_flags;
 
@@ -5281,6 +5455,12 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
 {
        int err = 0, fd, i, btf_id;
 
+       if (prog->obj->loaded) {
+               pr_warn("prog '%s'('%s'): can't load after object was loaded\n",
+                       prog->name, prog->section_name);
+               return -EINVAL;
+       }
+
        if ((prog->type == BPF_PROG_TYPE_TRACING ||
             prog->type == BPF_PROG_TYPE_LSM ||
             prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
@@ -5369,16 +5549,21 @@ static bool bpf_program__is_function_storage(const struct bpf_program *prog,
 static int
 bpf_object__load_progs(struct bpf_object *obj, int log_level)
 {
+       struct bpf_program *prog;
        size_t i;
        int err;
 
        for (i = 0; i < obj->nr_programs; i++) {
-               if (bpf_program__is_function_storage(&obj->programs[i], obj))
+               prog = &obj->programs[i];
+               if (bpf_program__is_function_storage(prog, obj))
+                       continue;
+               if (!prog->load) {
+                       pr_debug("prog '%s'('%s'): skipped loading\n",
+                                prog->name, prog->section_name);
                        continue;
-               obj->programs[i].log_level |= log_level;
-               err = bpf_program__load(&obj->programs[i],
-                                       obj->license,
-                                       obj->kern_version);
+               }
+               prog->log_level |= log_level;
+               err = bpf_program__load(prog, obj->license, obj->kern_version);
                if (err)
                        return err;
        }
@@ -5567,56 +5752,114 @@ static int bpf_object__sanitize_maps(struct bpf_object *obj)
        return 0;
 }
 
+static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
+{
+       char sym_type, sym_name[500];
+       unsigned long long sym_addr;
+       struct extern_desc *ext;
+       int ret, err = 0;
+       FILE *f;
+
+       f = fopen("/proc/kallsyms", "r");
+       if (!f) {
+               err = -errno;
+               pr_warn("failed to open /proc/kallsyms: %d\n", err);
+               return err;
+       }
+
+       while (true) {
+               ret = fscanf(f, "%llx %c %499s%*[^\n]\n",
+                            &sym_addr, &sym_type, sym_name);
+               if (ret == EOF && feof(f))
+                       break;
+               if (ret != 3) {
+                       pr_warn("failed to read kallsyms entry: %d\n", ret);
+                       err = -EINVAL;
+                       goto out;
+               }
+
+               ext = find_extern_by_name(obj, sym_name);
+               if (!ext || ext->type != EXT_KSYM)
+                       continue;
+
+               if (ext->is_set && ext->ksym.addr != sym_addr) {
+                       pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n",
+                               sym_name, ext->ksym.addr, sym_addr);
+                       err = -EINVAL;
+                       goto out;
+               }
+               if (!ext->is_set) {
+                       ext->is_set = true;
+                       ext->ksym.addr = sym_addr;
+                       pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr);
+               }
+       }
+
+out:
+       fclose(f);
+       return err;
+}
+
 static int bpf_object__resolve_externs(struct bpf_object *obj,
                                       const char *extra_kconfig)
 {
-       bool need_config = false;
+       bool need_config = false, need_kallsyms = false;
        struct extern_desc *ext;
+       void *kcfg_data = NULL;
        int err, i;
-       void *data;
 
        if (obj->nr_extern == 0)
                return 0;
 
-       data = obj->maps[obj->kconfig_map_idx].mmaped;
+       if (obj->kconfig_map_idx >= 0)
+               kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
 
        for (i = 0; i < obj->nr_extern; i++) {
                ext = &obj->externs[i];
 
-               if (strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
-                       void *ext_val = data + ext->data_off;
+               if (ext->type == EXT_KCFG &&
+                   strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
+                       void *ext_val = kcfg_data + ext->kcfg.data_off;
                        __u32 kver = get_kernel_version();
 
                        if (!kver) {
                                pr_warn("failed to get kernel version\n");
                                return -EINVAL;
                        }
-                       err = set_ext_value_num(ext, ext_val, kver);
+                       err = set_kcfg_value_num(ext, ext_val, kver);
                        if (err)
                                return err;
-                       pr_debug("extern %s=0x%x\n", ext->name, kver);
-               } else if (strncmp(ext->name, "CONFIG_", 7) == 0) {
+                       pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver);
+               } else if (ext->type == EXT_KCFG &&
+                          strncmp(ext->name, "CONFIG_", 7) == 0) {
                        need_config = true;
+               } else if (ext->type == EXT_KSYM) {
+                       need_kallsyms = true;
                } else {
                        pr_warn("unrecognized extern '%s'\n", ext->name);
                        return -EINVAL;
                }
        }
        if (need_config && extra_kconfig) {
-               err = bpf_object__read_kconfig_mem(obj, extra_kconfig, data);
+               err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
                if (err)
                        return -EINVAL;
                need_config = false;
                for (i = 0; i < obj->nr_extern; i++) {
                        ext = &obj->externs[i];
-                       if (!ext->is_set) {
+                       if (ext->type == EXT_KCFG && !ext->is_set) {
                                need_config = true;
                                break;
                        }
                }
        }
        if (need_config) {
-               err = bpf_object__read_kconfig_file(obj, data);
+               err = bpf_object__read_kconfig_file(obj, kcfg_data);
+               if (err)
+                       return -EINVAL;
+       }
+       if (need_kallsyms) {
+               err = bpf_object__read_kallsyms_file(obj);
                if (err)
                        return -EINVAL;
        }
@@ -5647,12 +5890,10 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
                return -EINVAL;
 
        if (obj->loaded) {
-               pr_warn("object should not be loaded twice\n");
+               pr_warn("object '%s': load can't be attempted twice\n", obj->name);
                return -EINVAL;
        }
 
-       obj->loaded = true;
-
        err = bpf_object__probe_loading(obj);
        err = err ? : bpf_object__probe_caps(obj);
        err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
@@ -5667,6 +5908,8 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
        btf__free(obj->btf_vmlinux);
        obj->btf_vmlinux = NULL;
 
+       obj->loaded = true; /* doesn't matter if successfully or not */
+
        if (err)
                goto out;
 
@@ -6439,6 +6682,20 @@ const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
        return title;
 }
 
+bool bpf_program__autoload(const struct bpf_program *prog)
+{
+       return prog->load;
+}
+
+int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
+{
+       if (prog->obj->loaded)
+               return -EINVAL;
+
+       prog->load = autoload;
+       return 0;
+}
+
 int bpf_program__fd(const struct bpf_program *prog)
 {
        return bpf_program__nth_fd(prog, 0);
@@ -6542,6 +6799,7 @@ BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
 BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
 BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS);
 BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT);
+BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP);
 
 enum bpf_attach_type
 bpf_program__get_expected_attach_type(struct bpf_program *prog)
@@ -6653,8 +6911,10 @@ static const struct bpf_sec_def section_defs[] = {
                .expected_attach_type = BPF_TRACE_ITER,
                .is_attach_btf = true,
                .attach_fn = attach_iter),
-       BPF_EAPROG_SEC("xdp_devmap",            BPF_PROG_TYPE_XDP,
+       BPF_EAPROG_SEC("xdp_devmap/",           BPF_PROG_TYPE_XDP,
                                                BPF_XDP_DEVMAP),
+       BPF_EAPROG_SEC("xdp_cpumap/",           BPF_PROG_TYPE_XDP,
+                                               BPF_XDP_CPUMAP),
        BPF_PROG_SEC("xdp",                     BPF_PROG_TYPE_XDP),
        BPF_PROG_SEC("perf_event",              BPF_PROG_TYPE_PERF_EVENT),
        BPF_PROG_SEC("lwt_in",                  BPF_PROG_TYPE_LWT_IN),
@@ -6666,6 +6926,10 @@ static const struct bpf_sec_def section_defs[] = {
        BPF_APROG_SEC("cgroup_skb/egress",      BPF_PROG_TYPE_CGROUP_SKB,
                                                BPF_CGROUP_INET_EGRESS),
        BPF_APROG_COMPAT("cgroup/skb",          BPF_PROG_TYPE_CGROUP_SKB),
+       BPF_EAPROG_SEC("cgroup/sock_create",    BPF_PROG_TYPE_CGROUP_SOCK,
+                                               BPF_CGROUP_INET_SOCK_CREATE),
+       BPF_EAPROG_SEC("cgroup/sock_release",   BPF_PROG_TYPE_CGROUP_SOCK,
+                                               BPF_CGROUP_INET_SOCK_RELEASE),
        BPF_APROG_SEC("cgroup/sock",            BPF_PROG_TYPE_CGROUP_SOCK,
                                                BPF_CGROUP_INET_SOCK_CREATE),
        BPF_EAPROG_SEC("cgroup/post_bind4",     BPF_PROG_TYPE_CGROUP_SOCK,
@@ -6718,6 +6982,8 @@ static const struct bpf_sec_def section_defs[] = {
        BPF_EAPROG_SEC("cgroup/setsockopt",     BPF_PROG_TYPE_CGROUP_SOCKOPT,
                                                BPF_CGROUP_SETSOCKOPT),
        BPF_PROG_SEC("struct_ops",              BPF_PROG_TYPE_STRUCT_OPS),
+       BPF_EAPROG_SEC("sk_lookup/",            BPF_PROG_TYPE_SK_LOOKUP,
+                                               BPF_SK_LOOKUP),
 };
 
 #undef BPF_PROG_SEC_IMPL
@@ -7088,6 +7354,71 @@ const char *bpf_map__name(const struct bpf_map *map)
        return map ? map->name : NULL;
 }
 
+enum bpf_map_type bpf_map__type(const struct bpf_map *map)
+{
+       return map->def.type;
+}
+
+int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
+{
+       if (map->fd >= 0)
+               return -EBUSY;
+       map->def.type = type;
+       return 0;
+}
+
+__u32 bpf_map__map_flags(const struct bpf_map *map)
+{
+       return map->def.map_flags;
+}
+
+int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
+{
+       if (map->fd >= 0)
+               return -EBUSY;
+       map->def.map_flags = flags;
+       return 0;
+}
+
+__u32 bpf_map__numa_node(const struct bpf_map *map)
+{
+       return map->numa_node;
+}
+
+int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
+{
+       if (map->fd >= 0)
+               return -EBUSY;
+       map->numa_node = numa_node;
+       return 0;
+}
+
+__u32 bpf_map__key_size(const struct bpf_map *map)
+{
+       return map->def.key_size;
+}
+
+int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
+{
+       if (map->fd >= 0)
+               return -EBUSY;
+       map->def.key_size = size;
+       return 0;
+}
+
+__u32 bpf_map__value_size(const struct bpf_map *map)
+{
+       return map->def.value_size;
+}
+
+int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
+{
+       if (map->fd >= 0)
+               return -EBUSY;
+       map->def.value_size = size;
+       return 0;
+}
+
 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
 {
        return map ? map->btf_key_type_id : 0;
@@ -7140,9 +7471,17 @@ bool bpf_map__is_internal(const struct bpf_map *map)
        return map->libbpf_type != LIBBPF_MAP_UNSPEC;
 }
 
-void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
+__u32 bpf_map__ifindex(const struct bpf_map *map)
 {
+       return map->map_ifindex;
+}
+
+int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
+{
+       if (map->fd >= 0)
+               return -EBUSY;
        map->map_ifindex = ifindex;
+       return 0;
 }
 
 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
@@ -8258,7 +8597,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
                                              struct perf_buffer_params *p)
 {
        const char *online_cpus_file = "/sys/devices/system/cpu/online";
-       struct bpf_map_info map = {};
+       struct bpf_map_info map;
        char msg[STRERR_BUFSIZE];
        struct perf_buffer *pb;
        bool *online = NULL;
@@ -8271,19 +8610,28 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
                return ERR_PTR(-EINVAL);
        }
 
+       /* best-effort sanity checks */
+       memset(&map, 0, sizeof(map));
        map_info_len = sizeof(map);
        err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
        if (err) {
                err = -errno;
-               pr_warn("failed to get map info for map FD %d: %s\n",
-                       map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
-               return ERR_PTR(err);
-       }
-
-       if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
-               pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
-                       map.name);
-               return ERR_PTR(-EINVAL);
+               /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
+                * -EBADFD, -EFAULT, or -E2BIG on real error
+                */
+               if (err != -EINVAL) {
+                       pr_warn("failed to get map info for map FD %d: %s\n",
+                               map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
+                       return ERR_PTR(err);
+               }
+               pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
+                        map_fd);
+       } else {
+               if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
+                       pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
+                               map.name);
+                       return ERR_PTR(-EINVAL);
+               }
        }
 
        pb = calloc(1, sizeof(*pb));
@@ -8315,7 +8663,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
                        err = pb->cpu_cnt;
                        goto error;
                }
-               if (map.max_entries < pb->cpu_cnt)
+               if (map.max_entries && map.max_entries < pb->cpu_cnt)
                        pb->cpu_cnt = map.max_entries;
        }
 
@@ -8988,6 +9336,9 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
                const struct bpf_sec_def *sec_def;
                const char *sec_name = bpf_program__title(prog, false);
 
+               if (!prog->load)
+                       continue;
+
                sec_def = find_sec_def(sec_name);
                if (!sec_def || !sec_def->attach_fn)
                        continue;
index 334437a..c227213 100644 (file)
@@ -200,6 +200,8 @@ LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog,
 LIBBPF_API const char *bpf_program__name(const struct bpf_program *prog);
 LIBBPF_API const char *bpf_program__title(const struct bpf_program *prog,
                                          bool needs_copy);
+LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog);
+LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload);
 
 /* returns program size in bytes */
 LIBBPF_API size_t bpf_program__size(const struct bpf_program *prog);
@@ -348,6 +350,7 @@ LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog);
 LIBBPF_API int bpf_program__set_tracing(struct bpf_program *prog);
 LIBBPF_API int bpf_program__set_struct_ops(struct bpf_program *prog);
 LIBBPF_API int bpf_program__set_extension(struct bpf_program *prog);
+LIBBPF_API int bpf_program__set_sk_lookup(struct bpf_program *prog);
 
 LIBBPF_API enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog);
 LIBBPF_API void bpf_program__set_type(struct bpf_program *prog,
@@ -375,6 +378,7 @@ LIBBPF_API bool bpf_program__is_perf_event(const struct bpf_program *prog);
 LIBBPF_API bool bpf_program__is_tracing(const struct bpf_program *prog);
 LIBBPF_API bool bpf_program__is_struct_ops(const struct bpf_program *prog);
 LIBBPF_API bool bpf_program__is_extension(const struct bpf_program *prog);
+LIBBPF_API bool bpf_program__is_sk_lookup(const struct bpf_program *prog);
 
 /*
  * No need for __attribute__((packed)), all members of 'bpf_map_def'
@@ -418,11 +422,38 @@ bpf_map__next(const struct bpf_map *map, const struct bpf_object *obj);
 LIBBPF_API struct bpf_map *
 bpf_map__prev(const struct bpf_map *map, const struct bpf_object *obj);
 
+/* get/set map FD */
 LIBBPF_API int bpf_map__fd(const struct bpf_map *map);
+LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
+/* get map definition */
 LIBBPF_API const struct bpf_map_def *bpf_map__def(const struct bpf_map *map);
+/* get map name */
 LIBBPF_API const char *bpf_map__name(const struct bpf_map *map);
+/* get/set map type */
+LIBBPF_API enum bpf_map_type bpf_map__type(const struct bpf_map *map);
+LIBBPF_API int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type);
+/* get/set map size (max_entries) */
+LIBBPF_API __u32 bpf_map__max_entries(const struct bpf_map *map);
+LIBBPF_API int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries);
+LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries);
+/* get/set map flags */
+LIBBPF_API __u32 bpf_map__map_flags(const struct bpf_map *map);
+LIBBPF_API int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags);
+/* get/set map NUMA node */
+LIBBPF_API __u32 bpf_map__numa_node(const struct bpf_map *map);
+LIBBPF_API int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node);
+/* get/set map key size */
+LIBBPF_API __u32 bpf_map__key_size(const struct bpf_map *map);
+LIBBPF_API int bpf_map__set_key_size(struct bpf_map *map, __u32 size);
+/* get/set map value size */
+LIBBPF_API __u32 bpf_map__value_size(const struct bpf_map *map);
+LIBBPF_API int bpf_map__set_value_size(struct bpf_map *map, __u32 size);
+/* get map key/value BTF type IDs */
 LIBBPF_API __u32 bpf_map__btf_key_type_id(const struct bpf_map *map);
 LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map);
+/* get/set map if_index */
+LIBBPF_API __u32 bpf_map__ifindex(const struct bpf_map *map);
+LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
 
 typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
 LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
@@ -430,11 +461,8 @@ LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
 LIBBPF_API void *bpf_map__priv(const struct bpf_map *map);
 LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map,
                                          const void *data, size_t size);
-LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
-LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries);
 LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map);
 LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map);
-LIBBPF_API void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
 LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path);
 LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map);
 LIBBPF_API bool bpf_map__is_pinned(const struct bpf_map *map);
index f732c77..6f0856a 100644 (file)
@@ -270,3 +270,25 @@ LIBBPF_0.0.9 {
                ring_buffer__new;
                ring_buffer__poll;
 } LIBBPF_0.0.8;
+
+LIBBPF_0.1.0 {
+       global:
+               bpf_map__ifindex;
+               bpf_map__key_size;
+               bpf_map__map_flags;
+               bpf_map__max_entries;
+               bpf_map__numa_node;
+               bpf_map__set_key_size;
+               bpf_map__set_map_flags;
+               bpf_map__set_max_entries;
+               bpf_map__set_numa_node;
+               bpf_map__set_type;
+               bpf_map__set_value_size;
+               bpf_map__type;
+               bpf_map__value_size;
+               bpf_program__autoload;
+               bpf_program__is_sk_lookup;
+               bpf_program__set_autoload;
+               bpf_program__set_sk_lookup;
+               btf__set_fd;
+} LIBBPF_0.0.9;
index 10cd8d1..5a3d3f0 100644 (file)
@@ -78,6 +78,9 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
        case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
                xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
                break;
+       case BPF_PROG_TYPE_SK_LOOKUP:
+               xattr.expected_attach_type = BPF_SK_LOOKUP;
+               break;
        case BPF_PROG_TYPE_KPROBE:
                xattr.kern_version = get_kernel_version();
                break;
index e1bd2a9..5b36c58 100644 (file)
@@ -1425,13 +1425,28 @@ static unsigned int type_size(const char *name)
        return 0;
 }
 
+static int append(char **buf, const char *delim, const char *str)
+{
+       char *new_buf;
+
+       new_buf = realloc(*buf, strlen(*buf) + strlen(delim) + strlen(str) + 1);
+       if (!new_buf)
+               return -1;
+       strcat(new_buf, delim);
+       strcat(new_buf, str);
+       *buf = new_buf;
+       return 0;
+}
+
 static int event_read_fields(struct tep_event *event, struct tep_format_field **fields)
 {
        struct tep_format_field *field = NULL;
        enum tep_event_type type;
        char *token;
        char *last_token;
+       char *delim = " ";
        int count = 0;
+       int ret;
 
        do {
                unsigned int size_dynamic = 0;
@@ -1490,24 +1505,51 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field **
                                        field->flags |= TEP_FIELD_IS_POINTER;
 
                                if (field->type) {
-                                       char *new_type;
-                                       new_type = realloc(field->type,
-                                                          strlen(field->type) +
-                                                          strlen(last_token) + 2);
-                                       if (!new_type) {
-                                               free(last_token);
-                                               goto fail;
-                                       }
-                                       field->type = new_type;
-                                       strcat(field->type, " ");
-                                       strcat(field->type, last_token);
+                                       ret = append(&field->type, delim, last_token);
                                        free(last_token);
+                                       if (ret < 0)
+                                               goto fail;
                                } else
                                        field->type = last_token;
                                last_token = token;
+                               delim = " ";
                                continue;
                        }
 
+                       /* Handle __attribute__((user)) */
+                       if ((type == TEP_EVENT_DELIM) &&
+                           strcmp("__attribute__", last_token) == 0 &&
+                           token[0] == '(') {
+                               int depth = 1;
+                               int ret;
+
+                               ret = append(&field->type, " ", last_token);
+                               ret |= append(&field->type, "", "(");
+                               if (ret < 0)
+                                       goto fail;
+
+                               delim = " ";
+                               while ((type = read_token(&token)) != TEP_EVENT_NONE) {
+                                       if (type == TEP_EVENT_DELIM) {
+                                               if (token[0] == '(')
+                                                       depth++;
+                                               else if (token[0] == ')')
+                                                       depth--;
+                                               if (!depth)
+                                                       break;
+                                               ret = append(&field->type, "", token);
+                                               delim = "";
+                                       } else {
+                                               ret = append(&field->type, delim, token);
+                                               delim = " ";
+                                       }
+                                       if (ret < 0)
+                                               goto fail;
+                                       free(last_token);
+                                       last_token = token;
+                               }
+                               continue;
+                       }
                        break;
                }
 
@@ -1523,8 +1565,6 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field **
                if (strcmp(token, "[") == 0) {
                        enum tep_event_type last_type = type;
                        char *brackets = token;
-                       char *new_brackets;
-                       int len;
 
                        field->flags |= TEP_FIELD_IS_ARRAY;
 
@@ -1536,29 +1576,27 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field **
                                field->arraylen = 0;
 
                        while (strcmp(token, "]") != 0) {
+                               const char *delim;
+
                                if (last_type == TEP_EVENT_ITEM &&
                                    type == TEP_EVENT_ITEM)
-                                       len = 2;
+                                       delim = " ";
                                else
-                                       len = 1;
+                                       delim = "";
+
                                last_type = type;
 
-                               new_brackets = realloc(brackets,
-                                                      strlen(brackets) +
-                                                      strlen(token) + len);
-                               if (!new_brackets) {
+                               ret = append(&brackets, delim, token);
+                               if (ret < 0) {
                                        free(brackets);
                                        goto fail;
                                }
-                               brackets = new_brackets;
-                               if (len == 2)
-                                       strcat(brackets, " ");
-                               strcat(brackets, token);
                                /* We only care about the last token */
                                field->arraylen = strtoul(token, NULL, 0);
                                free_token(token);
                                type = read_token(&token);
                                if (type == TEP_EVENT_NONE) {
+                                       free(brackets);
                                        do_warning_event(event, "failed to find token");
                                        goto fail;
                                }
@@ -1566,13 +1604,11 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field **
 
                        free_token(token);
 
-                       new_brackets = realloc(brackets, strlen(brackets) + 2);
-                       if (!new_brackets) {
+                       ret = append(&brackets, "", "]");
+                       if (ret < 0) {
                                free(brackets);
                                goto fail;
                        }
-                       brackets = new_brackets;
-                       strcat(brackets, "]");
 
                        /* add brackets to type */
 
@@ -1582,34 +1618,23 @@ static int event_read_fields(struct tep_event *event, struct tep_format_field **
                         * the format: type [] item;
                         */
                        if (type == TEP_EVENT_ITEM) {
-                               char *new_type;
-                               new_type = realloc(field->type,
-                                                  strlen(field->type) +
-                                                  strlen(field->name) +
-                                                  strlen(brackets) + 2);
-                               if (!new_type) {
+                               ret = append(&field->type, " ", field->name);
+                               if (ret < 0) {
                                        free(brackets);
                                        goto fail;
                                }
-                               field->type = new_type;
-                               strcat(field->type, " ");
-                               strcat(field->type, field->name);
+                               ret = append(&field->type, "", brackets);
+
                                size_dynamic = type_size(field->name);
                                free_token(field->name);
-                               strcat(field->type, brackets);
                                field->name = field->alias = token;
                                type = read_token(&token);
                        } else {
-                               char *new_type;
-                               new_type = realloc(field->type,
-                                                  strlen(field->type) +
-                                                  strlen(brackets) + 1);
-                               if (!new_type) {
+                               ret = append(&field->type, "", brackets);
+                               if (ret < 0) {
                                        free(brackets);
                                        goto fail;
                                }
-                               field->type = new_type;
-                               strcat(field->type, brackets);
                        }
                        free(brackets);
                }
@@ -2046,19 +2071,16 @@ process_op(struct tep_event *event, struct tep_print_arg *arg, char **tok)
                /* could just be a type pointer */
                if ((strcmp(arg->op.op, "*") == 0) &&
                    type == TEP_EVENT_DELIM && (strcmp(token, ")") == 0)) {
-                       char *new_atom;
+                       int ret;
 
                        if (left->type != TEP_PRINT_ATOM) {
                                do_warning_event(event, "bad pointer type");
                                goto out_free;
                        }
-                       new_atom = realloc(left->atom.atom,
-                                           strlen(left->atom.atom) + 3);
-                       if (!new_atom)
+                       ret = append(&left->atom.atom, " ", "*");
+                       if (ret < 0)
                                goto out_warn_free;
 
-                       left->atom.atom = new_atom;
-                       strcat(left->atom.atom, " *");
                        free(arg->op.op);
                        *arg = *left;
                        free(left);
@@ -3063,6 +3085,37 @@ err:
 }
 
 static enum tep_event_type
+process_builtin_expect(struct tep_event *event, struct tep_print_arg *arg, char **tok)
+{
+       enum tep_event_type type;
+       char *token = NULL;
+
+       /* Handle __builtin_expect( cond, #) */
+       type = process_arg(event, arg, &token);
+
+       if (type != TEP_EVENT_DELIM || token[0] != ',')
+               goto out_free;
+
+       free_token(token);
+
+       /* We don't care what the second parameter is of the __builtin_expect() */
+       if (read_expect_type(TEP_EVENT_ITEM, &token) < 0)
+               goto out_free;
+
+       if (read_expected(TEP_EVENT_DELIM, ")") < 0)
+               goto out_free;
+
+       free_token(token);
+       type = read_token_item(tok);
+       return type;
+
+out_free:
+       free_token(token);
+       *tok = NULL;
+       return TEP_EVENT_ERROR;
+}
+
+static enum tep_event_type
 process_function(struct tep_event *event, struct tep_print_arg *arg,
                 char *token, char **tok)
 {
@@ -3106,6 +3159,10 @@ process_function(struct tep_event *event, struct tep_print_arg *arg,
                free_token(token);
                return process_dynamic_array_len(event, arg, tok);
        }
+       if (strcmp(token, "__builtin_expect") == 0) {
+               free_token(token);
+               return process_builtin_expect(event, arg, tok);
+       }
 
        func = find_func_handler(event->tep, token);
        if (func) {
@@ -3151,18 +3208,15 @@ process_arg_token(struct tep_event *event, struct tep_print_arg *arg,
                }
                /* atoms can be more than one token long */
                while (type == TEP_EVENT_ITEM) {
-                       char *new_atom;
-                       new_atom = realloc(atom,
-                                          strlen(atom) + strlen(token) + 2);
-                       if (!new_atom) {
+                       int ret;
+
+                       ret = append(&atom, " ", token);
+                       if (ret < 0) {
                                free(atom);
                                *tok = NULL;
                                free_token(token);
                                return TEP_EVENT_ERROR;
                        }
-                       atom = new_atom;
-                       strcat(atom, " ");
-                       strcat(atom, token);
                        free_token(token);
                        type = read_token_item(&token);
                }
index 27f3b07..f1640d6 100644 (file)
@@ -361,6 +361,7 @@ translate_data(struct kbuffer *kbuf, void *data, void **rptr,
                break;
 
        case KBUFFER_TYPE_TIME_EXTEND:
+       case KBUFFER_TYPE_TIME_STAMP:
                extend = read_4(kbuf, data);
                data += 4;
                extend <<= TS_SHIFT;
@@ -369,10 +370,6 @@ translate_data(struct kbuffer *kbuf, void *data, void **rptr,
                *length = 0;
                break;
 
-       case KBUFFER_TYPE_TIME_STAMP:
-               data += 12;
-               *length = 0;
-               break;
        case 0:
                *length = read_4(kbuf, data) - 4;
                *length = (*length + 3) & ~3;
@@ -397,7 +394,11 @@ static unsigned int update_pointers(struct kbuffer *kbuf)
 
        type_len = translate_data(kbuf, ptr, &ptr, &delta, &length);
 
-       kbuf->timestamp += delta;
+       if (type_len == KBUFFER_TYPE_TIME_STAMP)
+               kbuf->timestamp = delta;
+       else
+               kbuf->timestamp += delta;
+
        kbuf->index = calc_index(kbuf, ptr);
        kbuf->next = kbuf->index + length;
 
@@ -454,7 +455,9 @@ static int __next_event(struct kbuffer *kbuf)
                if (kbuf->next >= kbuf->size)
                        return -1;
                type = update_pointers(kbuf);
-       } while (type == KBUFFER_TYPE_TIME_EXTEND || type == KBUFFER_TYPE_PADDING);
+       } while (type == KBUFFER_TYPE_TIME_EXTEND ||
+                type == KBUFFER_TYPE_TIME_STAMP ||
+                type == KBUFFER_TYPE_PADDING);
 
        return 0;
 }
@@ -547,6 +550,34 @@ int kbuffer_load_subbuffer(struct kbuffer *kbuf, void *subbuffer)
 }
 
 /**
+ * kbuffer_subbuf_timestamp - read the timestamp from a sub buffer
+ * @kbuf:      The kbuffer to load
+ * @subbuf:    The subbuffer to read from.
+ *
+ * Return the timestamp from a subbuffer.
+ */
+unsigned long long kbuffer_subbuf_timestamp(struct kbuffer *kbuf, void *subbuf)
+{
+       return kbuf->read_8(subbuf);
+}
+
+/**
+ * kbuffer_ptr_delta - read the delta field from a record
+ * @kbuf:      The kbuffer to load
+ * @ptr:       The record in the buffe.
+ *
+ * Return the timestamp delta from a record
+ */
+unsigned int kbuffer_ptr_delta(struct kbuffer *kbuf, void *ptr)
+{
+       unsigned int type_len_ts;
+
+       type_len_ts = read_4(kbuf, ptr);
+       return ts4host(kbuf, type_len_ts);
+}
+
+
+/**
  * kbuffer_read_event - read the next event in the kbuffer subbuffer
  * @kbuf:      The kbuffer to read from
  * @ts:                The address to store the timestamp of the event (may be NULL to ignore)
index ed4d697..5fa8292 100644 (file)
@@ -49,6 +49,8 @@ int kbuffer_load_subbuffer(struct kbuffer *kbuf, void *subbuffer);
 void *kbuffer_read_event(struct kbuffer *kbuf, unsigned long long *ts);
 void *kbuffer_next_event(struct kbuffer *kbuf, unsigned long long *ts);
 unsigned long long kbuffer_timestamp(struct kbuffer *kbuf);
+unsigned long long kbuffer_subbuf_timestamp(struct kbuffer *kbuf, void *subbuf);
+unsigned int kbuffer_ptr_delta(struct kbuffer *kbuf, void *ptr);
 
 void *kbuffer_translate_data(int swap, void *data, unsigned int *size);
 
index eda15a5..3c59677 100644 (file)
@@ -84,4 +84,6 @@ unsigned long arch_jump_destination(struct instruction *insn);
 
 unsigned long arch_dest_rela_offset(int addend);
 
+const char *arch_nop_insn(int len);
+
 #endif /* _ARCH_H */
index 4b504fc..9872195 100644 (file)
@@ -565,3 +565,21 @@ void arch_initial_func_cfi_state(struct cfi_init_state *state)
        state->regs[16].base = CFI_CFA;
        state->regs[16].offset = -8;
 }
+
+const char *arch_nop_insn(int len)
+{
+       static const char nops[5][5] = {
+               /* 1 */ { 0x90 },
+               /* 2 */ { 0x66, 0x90 },
+               /* 3 */ { 0x0f, 0x1f, 0x00 },
+               /* 4 */ { 0x0f, 0x1f, 0x40, 0x00 },
+               /* 5 */ { 0x0f, 0x1f, 0x44, 0x00, 0x00 },
+       };
+
+       if (len < 1 || len > 5) {
+               WARN("invalid NOP size: %d\n", len);
+               return NULL;
+       }
+
+       return nops[len-1];
+}
diff --git a/tools/objtool/arch/x86/include/arch_elf.h b/tools/objtool/arch/x86/include/arch_elf.h
new file mode 100644 (file)
index 0000000..69cc426
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef _OBJTOOL_ARCH_ELF
+#define _OBJTOOL_ARCH_ELF
+
+#define R_NONE R_X86_64_NONE
+
+#endif /* _OBJTOOL_ARCH_ELF */
index 5fbb90a..5e0d70a 100644 (file)
@@ -12,6 +12,7 @@
 #include "check.h"
 #include "special.h"
 #include "warn.h"
+#include "arch_elf.h"
 
 #include <linux/hashtable.h>
 #include <linux/kernel.h>
@@ -766,6 +767,24 @@ static int add_call_destinations(struct objtool_file *file)
                        insn->call_dest = rela->sym;
 
                /*
+                * Many compilers cannot disable KCOV with a function attribute
+                * so they need a little help, NOP out any KCOV calls from noinstr
+                * text.
+                */
+               if (insn->sec->noinstr &&
+                   !strncmp(insn->call_dest->name, "__sanitizer_cov_", 16)) {
+                       if (rela) {
+                               rela->type = R_NONE;
+                               elf_write_rela(file->elf, rela);
+                       }
+
+                       elf_write_insn(file->elf, insn->sec,
+                                      insn->offset, insn->len,
+                                      arch_nop_insn(insn->len));
+                       insn->type = INSN_NOP;
+               }
+
+               /*
                 * Whatever stack impact regular CALLs have, should be undone
                 * by the RETURN of the called function.
                 *
@@ -2190,10 +2209,36 @@ static inline const char *call_dest_name(struct instruction *insn)
        return "{dynamic}";
 }
 
+static inline bool noinstr_call_dest(struct symbol *func)
+{
+       /*
+        * We can't deal with indirect function calls at present;
+        * assume they're instrumented.
+        */
+       if (!func)
+               return false;
+
+       /*
+        * If the symbol is from a noinstr section; we good.
+        */
+       if (func->sec->noinstr)
+               return true;
+
+       /*
+        * The __ubsan_handle_*() calls are like WARN(), they only happen when
+        * something 'BAD' happened. At the risk of taking the machine down,
+        * let them proceed to get the message out.
+        */
+       if (!strncmp(func->name, "__ubsan_handle_", 15))
+               return true;
+
+       return false;
+}
+
 static int validate_call(struct instruction *insn, struct insn_state *state)
 {
        if (state->noinstr && state->instr <= 0 &&
-           (!insn->call_dest || !insn->call_dest->sec->noinstr)) {
+           !noinstr_call_dest(insn->call_dest)) {
                WARN_FUNC("call to %s() leaves .noinstr.text section",
                                insn->sec, insn->offset, call_dest_name(insn));
                return 1;
@@ -2740,13 +2785,13 @@ int check(const char *_objname, bool orc)
 
        objname = _objname;
 
-       file.elf = elf_open_read(objname, orc ? O_RDWR : O_RDONLY);
+       file.elf = elf_open_read(objname, O_RDWR);
        if (!file.elf)
                return 1;
 
        INIT_LIST_HEAD(&file.insn_list);
        hash_init(file.insn_hash);
-       file.c_file = find_section_by_name(file.elf, ".comment");
+       file.c_file = !vmlinux && find_section_by_name(file.elf, ".comment");
        file.ignore_unreachables = no_unreachable;
        file.hints = false;
 
@@ -2801,7 +2846,9 @@ int check(const char *_objname, bool orc)
                ret = create_orc_sections(&file);
                if (ret < 0)
                        goto out;
+       }
 
+       if (file.elf->changed) {
                ret = elf_write(file.elf);
                if (ret < 0)
                        goto out;
index 8422567..26d11d8 100644 (file)
@@ -529,8 +529,9 @@ static int read_relas(struct elf *elf)
                        rela->addend = rela->rela.r_addend;
                        rela->offset = rela->rela.r_offset;
                        symndx = GELF_R_SYM(rela->rela.r_info);
-                       rela->sym = find_symbol_by_index(elf, symndx);
                        rela->sec = sec;
+                       rela->idx = i;
+                       rela->sym = find_symbol_by_index(elf, symndx);
                        if (!rela->sym) {
                                WARN("can't find rela entry symbol %d for %s",
                                     symndx, sec->name);
@@ -713,6 +714,8 @@ struct section *elf_create_section(struct elf *elf, const char *name,
        elf_hash_add(elf->section_hash, &sec->hash, sec->idx);
        elf_hash_add(elf->section_name_hash, &sec->name_hash, str_hash(sec->name));
 
+       elf->changed = true;
+
        return sec;
 }
 
@@ -746,7 +749,7 @@ struct section *elf_create_rela_section(struct elf *elf, struct section *base)
        return sec;
 }
 
-int elf_rebuild_rela_section(struct section *sec)
+int elf_rebuild_rela_section(struct elf *elf, struct section *sec)
 {
        struct rela *rela;
        int nr, idx = 0, size;
@@ -763,6 +766,9 @@ int elf_rebuild_rela_section(struct section *sec)
                return -1;
        }
 
+       sec->changed = true;
+       elf->changed = true;
+
        sec->data->d_buf = relas;
        sec->data->d_size = size;
 
@@ -779,7 +785,44 @@ int elf_rebuild_rela_section(struct section *sec)
        return 0;
 }
 
-int elf_write(const struct elf *elf)
+int elf_write_insn(struct elf *elf, struct section *sec,
+                  unsigned long offset, unsigned int len,
+                  const char *insn)
+{
+       Elf_Data *data = sec->data;
+
+       if (data->d_type != ELF_T_BYTE || data->d_off) {
+               WARN("write to unexpected data for section: %s", sec->name);
+               return -1;
+       }
+
+       memcpy(data->d_buf + offset, insn, len);
+       elf_flagdata(data, ELF_C_SET, ELF_F_DIRTY);
+
+       elf->changed = true;
+
+       return 0;
+}
+
+int elf_write_rela(struct elf *elf, struct rela *rela)
+{
+       struct section *sec = rela->sec;
+
+       rela->rela.r_info = GELF_R_INFO(rela->sym->idx, rela->type);
+       rela->rela.r_addend = rela->addend;
+       rela->rela.r_offset = rela->offset;
+
+       if (!gelf_update_rela(sec->data, rela->idx, &rela->rela)) {
+               WARN_ELF("gelf_update_rela");
+               return -1;
+       }
+
+       elf->changed = true;
+
+       return 0;
+}
+
+int elf_write(struct elf *elf)
 {
        struct section *sec;
        Elf_Scn *s;
@@ -796,6 +839,8 @@ int elf_write(const struct elf *elf)
                                WARN_ELF("gelf_update_shdr");
                                return -1;
                        }
+
+                       sec->changed = false;
                }
        }
 
@@ -808,6 +853,8 @@ int elf_write(const struct elf *elf)
                return -1;
        }
 
+       elf->changed = false;
+
        return 0;
 }
 
index f4fe1d6..7324e77 100644 (file)
@@ -64,9 +64,10 @@ struct rela {
        GElf_Rela rela;
        struct section *sec;
        struct symbol *sym;
-       unsigned int type;
        unsigned long offset;
+       unsigned int type;
        int addend;
+       int idx;
        bool jump_table_start;
 };
 
@@ -76,6 +77,7 @@ struct elf {
        Elf *elf;
        GElf_Ehdr ehdr;
        int fd;
+       bool changed;
        char *name;
        struct list_head sections;
        DECLARE_HASHTABLE(symbol_hash, ELF_HASH_BITS);
@@ -118,7 +120,11 @@ struct elf *elf_open_read(const char *name, int flags);
 struct section *elf_create_section(struct elf *elf, const char *name, size_t entsize, int nr);
 struct section *elf_create_rela_section(struct elf *elf, struct section *base);
 void elf_add_rela(struct elf *elf, struct rela *rela);
-int elf_write(const struct elf *elf);
+int elf_write_insn(struct elf *elf, struct section *sec,
+                  unsigned long offset, unsigned int len,
+                  const char *insn);
+int elf_write_rela(struct elf *elf, struct rela *rela);
+int elf_write(struct elf *elf);
 void elf_close(struct elf *elf);
 
 struct section *find_section_by_name(const struct elf *elf, const char *name);
@@ -130,7 +136,7 @@ struct rela *find_rela_by_dest(const struct elf *elf, struct section *sec, unsig
 struct rela *find_rela_by_dest_range(const struct elf *elf, struct section *sec,
                                     unsigned long offset, unsigned int len);
 struct symbol *find_func_containing(struct section *sec, unsigned long offset);
-int elf_rebuild_rela_section(struct section *sec);
+int elf_rebuild_rela_section(struct elf *elf, struct section *sec);
 
 #define for_each_sec(file, sec)                                                \
        list_for_each_entry(sec, &file->elf->sections, list)
index c954998..4c37f80 100644 (file)
@@ -222,7 +222,7 @@ int create_orc_sections(struct objtool_file *file)
                }
        }
 
-       if (elf_rebuild_rela_section(ip_relasec))
+       if (elf_rebuild_rela_section(file->elf, ip_relasec))
                return -1;
 
        return 0;
index 877ca6b..5136338 100644 (file)
@@ -396,6 +396,18 @@ else
       NO_LIBBPF := 1
       NO_JVMTI := 1
     else
+      ifneq ($(filter s% -fsanitize=address%,$(EXTRA_CFLAGS),),)
+        ifneq ($(shell ldconfig -p | grep libasan >/dev/null 2>&1; echo $$?), 0)
+          msg := $(error No libasan found, please install libasan);
+        endif
+      endif
+
+      ifneq ($(filter s% -fsanitize=undefined%,$(EXTRA_CFLAGS),),)
+        ifneq ($(shell ldconfig -p | grep libubsan >/dev/null 2>&1; echo $$?), 0)
+          msg := $(error No libubsan found, please install libubsan);
+        endif
+      endif
+
       ifneq ($(filter s% -static%,$(LDFLAGS),),)
         msg := $(error No static glibc found, please install glibc-static);
       else
index 35b61bf..b190f2e 100644 (file)
 336    common  recv                            sys_recv                        compat_sys_recv
 337    common  recvfrom                        sys_recvfrom                    compat_sys_recvfrom
 338    common  shutdown                        sys_shutdown
-339    common  setsockopt                      sys_setsockopt                  compat_sys_setsockopt
-340    common  getsockopt                      sys_getsockopt                  compat_sys_getsockopt
+339    common  setsockopt                      sys_setsockopt                  sys_setsockopt
+340    common  getsockopt                      sys_getsockopt                  sys_getsockopt
 341    common  sendmsg                         sys_sendmsg                     compat_sys_sendmsg
 342    common  recvmsg                         sys_recvmsg                     compat_sys_recvmsg
 343    32      recvmmsg                        sys_recvmmsg_time32             compat_sys_recvmmsg_time32
index b38d484..56ae24b 100644 (file)
 362  common    connect                 sys_connect                     compat_sys_connect
 363  common    listen                  sys_listen                      sys_listen
 364  common    accept4                 sys_accept4                     compat_sys_accept4
-365  common    getsockopt              sys_getsockopt                  compat_sys_getsockopt
-366  common    setsockopt              sys_setsockopt                  compat_sys_setsockopt
+365  common    getsockopt              sys_getsockopt                  sys_getsockopt
+366  common    setsockopt              sys_setsockopt                  sys_setsockopt
 367  common    getsockname             sys_getsockname                 compat_sys_getsockname
 368  common    getpeername             sys_getpeername                 compat_sys_getpeername
 369  common    sendto                  sys_sendto                      compat_sys_sendto
index 37b844f..e008d63 100644 (file)
 435    common  clone3                  sys_clone3
 437    common  openat2                 sys_openat2
 438    common  pidfd_getfd             sys_pidfd_getfd
+439    common  faccessat2              sys_faccessat2
 
 #
 # x32-specific system call numbers start at 512 to avoid cache impact
 538    x32     sendmmsg                compat_sys_sendmmsg
 539    x32     process_vm_readv        compat_sys_process_vm_readv
 540    x32     process_vm_writev       compat_sys_process_vm_writev
-541    x32     setsockopt              compat_sys_setsockopt
-542    x32     getsockopt              compat_sys_getsockopt
+541    x32     setsockopt              sys_setsockopt
+542    x32     getsockopt              sys_getsockopt
 543    x32     io_setup                compat_sys_io_setup
 544    x32     io_submit               compat_sys_io_submit
 545    x32     execveat                compat_sys_execveat
index 839ef52..6ce4512 100644 (file)
@@ -641,6 +641,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
                        }
                        evsel->core.attr.freq = 0;
                        evsel->core.attr.sample_period = 1;
+                       evsel->no_aux_samples = true;
                        intel_pt_evsel = evsel;
                        opts->full_auxtrace = true;
                }
index e108d90..a37e791 100644 (file)
@@ -852,20 +852,20 @@ static int record__open(struct record *rec)
         * event synthesis.
         */
        if (opts->initial_delay || target__has_cpu(&opts->target)) {
-               if (perf_evlist__add_dummy(evlist))
-                       return -ENOMEM;
+               pos = perf_evlist__get_tracking_event(evlist);
+               if (!evsel__is_dummy_event(pos)) {
+                       /* Set up dummy event. */
+                       if (perf_evlist__add_dummy(evlist))
+                               return -ENOMEM;
+                       pos = evlist__last(evlist);
+                       perf_evlist__set_tracking_event(evlist, pos);
+               }
 
-               /* Disable tracking of mmaps on lead event. */
-               pos = evlist__first(evlist);
-               pos->tracking = 0;
-               /* Set up dummy event. */
-               pos = evlist__last(evlist);
-               pos->tracking = 1;
                /*
                 * Enable the dummy event when the process is forked for
                 * initial_delay, immediately for system wide.
                 */
-               if (opts->initial_delay)
+               if (opts->initial_delay && !pos->immediate)
                        pos->core.attr.enable_on_exec = 1;
                else
                        pos->immediate = 1;
index b63b3fb..5f1d2a8 100644 (file)
@@ -478,8 +478,7 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
        if (rep->time_str)
                ret += fprintf(fp, " (time slices: %s)", rep->time_str);
 
-       if (symbol_conf.show_ref_callgraph &&
-           strstr(evname, "call-graph=no")) {
+       if (symbol_conf.show_ref_callgraph && evname && strstr(evname, "call-graph=no")) {
                ret += fprintf(fp, ", show reference callgraph");
        }
 
index 5da2436..4474577 100644 (file)
@@ -462,7 +462,7 @@ static int perf_evsel__check_attr(struct evsel *evsel, struct perf_session *sess
                return -EINVAL;
 
        if (PRINT_FIELD(IREGS) &&
-           evsel__check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS", PERF_OUTPUT_IREGS))
+           evsel__do_check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS", PERF_OUTPUT_IREGS, allow_user_set))
                return -EINVAL;
 
        if (PRINT_FIELD(UREGS) &&
@@ -3837,6 +3837,9 @@ int cmd_script(int argc, const char **argv)
        if (err)
                goto out_delete;
 
+       if (zstd_init(&(session->zstd_data), 0) < 0)
+               pr_warning("Decompression initialization failed. Reported data may be incomplete.\n");
+
        err = __cmd_script(&script);
 
        flush_scripting();
index 7bd73a9..d187e46 100644 (file)
@@ -1055,7 +1055,7 @@ def cbr(id, raw_buf):
        cbr = data[0]
        MHz = (data[4] + 500) / 1000
        percent = ((cbr * 1000 / data[2]) + 5) / 10
-       value = struct.pack("!hiqiiiiii", 4, 8, id, 4, cbr, 4, MHz, 4, percent)
+       value = struct.pack("!hiqiiiiii", 4, 8, id, 4, cbr, 4, int(MHz), 4, int(percent))
        cbr_file.write(value)
 
 def mwait(id, raw_buf):
index 26d7be7..7daa8bb 100755 (executable)
@@ -768,7 +768,8 @@ class CallGraphModel(CallGraphModelBase):
                                                " FROM calls"
                                                " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
                                                " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
-                                               " WHERE symbols.name" + match +
+                                               " WHERE calls.id <> 0"
+                                               " AND symbols.name" + match +
                                                " GROUP BY comm_id, thread_id, call_path_id"
                                                " ORDER BY comm_id, thread_id, call_path_id")
 
@@ -963,7 +964,8 @@ class CallTreeModel(CallGraphModelBase):
                                                " FROM calls"
                                                " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
                                                " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
-                                               " WHERE symbols.name" + match +
+                                               " WHERE calls.id <> 0"
+                                               " AND symbols.name" + match +
                                                " ORDER BY comm_id, thread_id, call_time, calls.id")
 
        def FindPath(self, query):
@@ -1050,6 +1052,7 @@ class TreeWindowBase(QMdiSubWindow):
                                child = self.model.index(row, 0, parent)
                                if child.internalPointer().dbid == dbid:
                                        found = True
+                                       self.view.setExpanded(parent, True)
                                        self.view.setCurrentIndex(child)
                                        parent = child
                                        break
@@ -1127,6 +1130,7 @@ class CallTreeWindow(TreeWindowBase):
                                child = self.model.index(row, 0, parent)
                                if child.internalPointer().dbid == dbid:
                                        found = True
+                                       self.view.setExpanded(parent, True)
                                        self.view.setCurrentIndex(child)
                                        parent = child
                                        break
@@ -1139,6 +1143,7 @@ class CallTreeWindow(TreeWindowBase):
                                return
                        last_child = None
                        for row in xrange(n):
+                               self.view.setExpanded(parent, True)
                                child = self.model.index(row, 0, parent)
                                child_call_time = child.internalPointer().call_time
                                if child_call_time < time:
@@ -1151,9 +1156,11 @@ class CallTreeWindow(TreeWindowBase):
                        if not last_child:
                                if not found:
                                        child = self.model.index(0, 0, parent)
+                                       self.view.setExpanded(parent, True)
                                        self.view.setCurrentIndex(child)
                                return
                        found = True
+                       self.view.setExpanded(parent, True)
                        self.view.setCurrentIndex(last_child)
                        parent = last_child
 
index 61f3be9..6578001 100755 (executable)
@@ -17,6 +17,7 @@
 from __future__ import print_function
 import sys
 import os
+import io
 import argparse
 import json
 
@@ -81,7 +82,7 @@ class FlameGraphCLI:
 
         if self.args.format == "html":
             try:
-                with open(self.args.template) as f:
+                with io.open(self.args.template, encoding="utf-8") as f:
                     output_str = f.read().replace("/** @flamegraph_json **/",
                                                   json_str)
             except IOError as e:
@@ -93,11 +94,12 @@ class FlameGraphCLI:
             output_fn = self.args.output or "stacks.json"
 
         if output_fn == "-":
-            sys.stdout.write(output_str)
+            with io.open(sys.stdout.fileno(), "w", encoding="utf-8", closefd=False) as out:
+                out.write(output_str)
         else:
             print("dumping data to {}".format(output_fn))
             try:
-                with open(output_fn, "w") as out:
+                with io.open(output_fn, "w", encoding="utf-8") as out:
                     out.write(output_str)
             except IOError as e:
                 print("Error writing output file: {}".format(e), file=sys.stderr)
index 811cc0e..110f0c6 100644 (file)
@@ -65,6 +65,7 @@ size_t syscall_arg__scnprintf_statx_mask(char *bf, size_t size, struct syscall_a
        P_FLAG(SIZE);
        P_FLAG(BLOCKS);
        P_FLAG(BTIME);
+       P_FLAG(MNT_ID);
 
 #undef P_FLAG
 
index f98a118..be9c4c0 100644 (file)
@@ -2288,6 +2288,11 @@ static struct thread *hist_browser__selected_thread(struct hist_browser *browser
        return browser->he_selection->thread;
 }
 
+static struct res_sample *hist_browser__selected_res_sample(struct hist_browser *browser)
+{
+       return browser->he_selection ? browser->he_selection->res_samples : NULL;
+}
+
 /* Check whether the browser is for 'top' or 'report' */
 static inline bool is_report_browser(void *timer)
 {
@@ -3357,16 +3362,16 @@ skip_annotation:
                                             &options[nr_options], NULL, NULL, evsel);
                nr_options += add_res_sample_opt(browser, &actions[nr_options],
                                                 &options[nr_options],
-                                hist_browser__selected_entry(browser)->res_samples,
-                                evsel, A_NORMAL);
+                                                hist_browser__selected_res_sample(browser),
+                                                evsel, A_NORMAL);
                nr_options += add_res_sample_opt(browser, &actions[nr_options],
                                                 &options[nr_options],
-                                hist_browser__selected_entry(browser)->res_samples,
-                                evsel, A_ASM);
+                                                hist_browser__selected_res_sample(browser),
+                                                evsel, A_ASM);
                nr_options += add_res_sample_opt(browser, &actions[nr_options],
                                                 &options[nr_options],
-                                hist_browser__selected_entry(browser)->res_samples,
-                                evsel, A_SOURCE);
+                                                hist_browser__selected_res_sample(browser),
+                                                evsel, A_SOURCE);
                nr_options += add_switch_opt(browser, &actions[nr_options],
                                             &options[nr_options]);
 skip_scripting:
@@ -3598,6 +3603,23 @@ static int __perf_evlist__tui_browse_hists(struct evlist *evlist,
                                    hbt, warn_lost_event);
 }
 
+static bool perf_evlist__single_entry(struct evlist *evlist)
+{
+       int nr_entries = evlist->core.nr_entries;
+
+       if (nr_entries == 1)
+              return true;
+
+       if (nr_entries == 2) {
+               struct evsel *last = evlist__last(evlist);
+
+               if (evsel__is_dummy_event(last))
+                       return true;
+       }
+
+       return false;
+}
+
 int perf_evlist__tui_browse_hists(struct evlist *evlist, const char *help,
                                  struct hist_browser_timer *hbt,
                                  float min_pcnt,
@@ -3608,7 +3630,7 @@ int perf_evlist__tui_browse_hists(struct evlist *evlist, const char *help,
        int nr_entries = evlist->core.nr_entries;
 
 single_entry:
-       if (nr_entries == 1) {
+       if (perf_evlist__single_entry(evlist)) {
                struct evsel *first = evlist__first(evlist);
 
                return perf_evsel__hists_browse(first, nr_entries, help,
index b020a86..9887ae0 100644 (file)
@@ -142,7 +142,8 @@ static int
 gen_read_mem(struct bpf_insn_pos *pos,
             int src_base_addr_reg,
             int dst_addr_reg,
-            long offset)
+            long offset,
+            int probeid)
 {
        /* mov arg3, src_base_addr_reg */
        if (src_base_addr_reg != BPF_REG_ARG3)
@@ -159,7 +160,7 @@ gen_read_mem(struct bpf_insn_pos *pos,
                ins(BPF_MOV64_REG(BPF_REG_ARG1, dst_addr_reg), pos);
 
        /* Call probe_read  */
-       ins(BPF_EMIT_CALL(BPF_FUNC_probe_read), pos);
+       ins(BPF_EMIT_CALL(probeid), pos);
        /*
         * Error processing: if read fail, goto error code,
         * will be relocated. Target should be the start of
@@ -241,7 +242,7 @@ static int
 gen_prologue_slowpath(struct bpf_insn_pos *pos,
                      struct probe_trace_arg *args, int nargs)
 {
-       int err, i;
+       int err, i, probeid;
 
        for (i = 0; i < nargs; i++) {
                struct probe_trace_arg *arg = &args[i];
@@ -276,11 +277,16 @@ gen_prologue_slowpath(struct bpf_insn_pos *pos,
                                stack_offset), pos);
 
                ref = arg->ref;
+               probeid = BPF_FUNC_probe_read_kernel;
                while (ref) {
                        pr_debug("prologue: arg %d: offset %ld\n",
                                 i, ref->offset);
+
+                       if (ref->user_access)
+                               probeid = BPF_FUNC_probe_read_user;
+
                        err = gen_read_mem(pos, BPF_REG_3, BPF_REG_7,
-                                          ref->offset);
+                                          ref->offset, probeid);
                        if (err) {
                                pr_err("prologue: failed to generate probe_read function call\n");
                                goto errout;
index 173b4f0..ab48be4 100644 (file)
@@ -1566,6 +1566,18 @@ void perf_evlist__to_front(struct evlist *evlist,
        list_splice(&move, &evlist->core.entries);
 }
 
+struct evsel *perf_evlist__get_tracking_event(struct evlist *evlist)
+{
+       struct evsel *evsel;
+
+       evlist__for_each_entry(evlist, evsel) {
+               if (evsel->tracking)
+                       return evsel;
+       }
+
+       return evlist__first(evlist);
+}
+
 void perf_evlist__set_tracking_event(struct evlist *evlist,
                                     struct evsel *tracking_evsel)
 {
index b6f325d..a8081df 100644 (file)
@@ -335,6 +335,7 @@ void perf_evlist__to_front(struct evlist *evlist,
        evlist__cpu_iter_start(evlist);                 \
        perf_cpu_map__for_each_cpu (cpu, index, (evlist)->core.all_cpus)
 
+struct evsel *perf_evlist__get_tracking_event(struct evlist *evlist);
 void perf_evlist__set_tracking_event(struct evlist *evlist,
                                     struct evsel *tracking_evsel);
 
index 96e5171..ef802f6 100644 (file)
@@ -898,12 +898,6 @@ static void evsel__apply_config_terms(struct evsel *evsel,
        }
 }
 
-static bool is_dummy_event(struct evsel *evsel)
-{
-       return (evsel->core.attr.type == PERF_TYPE_SOFTWARE) &&
-              (evsel->core.attr.config == PERF_COUNT_SW_DUMMY);
-}
-
 struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evsel_term_type type)
 {
        struct evsel_config_term *term, *found_term = NULL;
@@ -1020,12 +1014,12 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
        if (callchain && callchain->enabled && !evsel->no_aux_samples)
                evsel__config_callchain(evsel, opts, callchain);
 
-       if (opts->sample_intr_regs) {
+       if (opts->sample_intr_regs && !evsel->no_aux_samples) {
                attr->sample_regs_intr = opts->sample_intr_regs;
                evsel__set_sample_bit(evsel, REGS_INTR);
        }
 
-       if (opts->sample_user_regs) {
+       if (opts->sample_user_regs && !evsel->no_aux_samples) {
                attr->sample_regs_user |= opts->sample_user_regs;
                evsel__set_sample_bit(evsel, REGS_USER);
        }
@@ -1161,7 +1155,7 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
         * The software event will trigger -EOPNOTSUPP error out,
         * if BRANCH_STACK bit is set.
         */
-       if (is_dummy_event(evsel))
+       if (evsel__is_dummy_event(evsel))
                evsel__reset_sample_bit(evsel, BRANCH_STACK);
 }
 
index 0f963c2..35e3f6d 100644 (file)
@@ -399,6 +399,12 @@ static inline bool evsel__has_br_stack(const struct evsel *evsel)
               evsel->synth_sample_type & PERF_SAMPLE_BRANCH_STACK;
 }
 
+static inline bool evsel__is_dummy_event(struct evsel *evsel)
+{
+       return (evsel->core.attr.type == PERF_TYPE_SOFTWARE) &&
+              (evsel->core.attr.config == PERF_COUNT_SW_DUMMY);
+}
+
 struct perf_env *evsel__env(struct evsel *evsel);
 
 int evsel__store_ids(struct evsel *evsel, struct evlist *evlist);
index e4dd8bf..cb3c1e5 100644 (file)
@@ -1735,6 +1735,7 @@ static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
        u64 sample_type = evsel->core.attr.sample_type;
        u64 id = evsel->core.id[0];
        u8 cpumode;
+       u64 regs[8 * sizeof(sample.intr_regs.mask)];
 
        if (intel_pt_skip_event(pt))
                return 0;
@@ -1784,8 +1785,8 @@ static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
        }
 
        if (sample_type & PERF_SAMPLE_REGS_INTR &&
-           items->mask[INTEL_PT_GP_REGS_POS]) {
-               u64 regs[sizeof(sample.intr_regs.mask)];
+           (items->mask[INTEL_PT_GP_REGS_POS] ||
+            items->mask[INTEL_PT_XMM_POS])) {
                u64 regs_mask = evsel->core.attr.sample_regs_intr;
                u64 *pos;
 
index c4ca932..acef87d 100644 (file)
@@ -26,7 +26,7 @@ do { \
                YYABORT; \
 } while (0)
 
-static struct list_head* alloc_list()
+static struct list_head* alloc_list(void)
 {
        struct list_head *list;
 
@@ -349,7 +349,7 @@ PE_PMU_EVENT_PRE '-' PE_PMU_EVENT_SUF sep_dc
        struct list_head *list;
        char pmu_name[128];
 
-       snprintf(&pmu_name, 128, "%s-%s", $1, $3);
+       snprintf(pmu_name, sizeof(pmu_name), "%s-%s", $1, $3);
        free($1);
        free($3);
        if (parse_events_multi_pmu_add(_parse_state, pmu_name, &list) < 0)
index 85e0c7f..f971d9a 100644 (file)
@@ -86,7 +86,6 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
                          struct perf_pmu_info *info);
 struct list_head *perf_pmu__alias(struct perf_pmu *pmu,
                                  struct list_head *head_terms);
-int perf_pmu_wrap(void);
 void perf_pmu_error(struct list_head *list, char *name, char const *msg);
 
 int perf_pmu__new_format(struct list_head *list, char *name,
index a08f373..df713a5 100644 (file)
@@ -1575,7 +1575,7 @@ static int parse_perf_probe_arg(char *str, struct perf_probe_arg *arg)
        }
 
        tmp = strchr(str, '@');
-       if (tmp && tmp != str && strcmp(tmp + 1, "user")) { /* user attr */
+       if (tmp && tmp != str && !strcmp(tmp + 1, "user")) { /* user attr */
                if (!user_access_is_supported()) {
                        semantic_error("ftrace does not support user access\n");
                        return -EINVAL;
@@ -1995,7 +1995,10 @@ static int __synthesize_probe_trace_arg_ref(struct probe_trace_arg_ref *ref,
                if (depth < 0)
                        return depth;
        }
-       err = strbuf_addf(buf, "%+ld(", ref->offset);
+       if (ref->user_access)
+               err = strbuf_addf(buf, "%s%ld(", "+u", ref->offset);
+       else
+               err = strbuf_addf(buf, "%+ld(", ref->offset);
        return (err < 0) ? err : depth;
 }
 
index 8c85294..064b63a 100644 (file)
@@ -1044,7 +1044,7 @@ static struct {
        DEFINE_TYPE(FTRACE_README_PROBE_TYPE_X, "*type: * x8/16/32/64,*"),
        DEFINE_TYPE(FTRACE_README_KRETPROBE_OFFSET, "*place (kretprobe): *"),
        DEFINE_TYPE(FTRACE_README_UPROBE_REF_CTR, "*ref_ctr_offset*"),
-       DEFINE_TYPE(FTRACE_README_USER_ACCESS, "*[u]<offset>*"),
+       DEFINE_TYPE(FTRACE_README_USER_ACCESS, "*u]<offset>*"),
        DEFINE_TYPE(FTRACE_README_MULTIPROBE_EVENT, "*Create/append/*"),
        DEFINE_TYPE(FTRACE_README_IMMEDIATE_VALUE, "*\\imm-value,*"),
 };
index 3c6976f..57d0706 100644 (file)
@@ -668,7 +668,7 @@ static void print_aggr(struct perf_stat_config *config,
        int s;
        bool first;
 
-       if (!(config->aggr_map || config->aggr_get_id))
+       if (!config->aggr_map || !config->aggr_get_id)
                return;
 
        aggr_update_shadow(config, evlist);
@@ -1169,7 +1169,7 @@ static void print_percore(struct perf_stat_config *config,
        int s;
        bool first = true;
 
-       if (!(config->aggr_map || config->aggr_get_id))
+       if (!config->aggr_map || !config->aggr_get_id)
                return;
 
        if (config->percore_show_thread)
index eec23fa..83844f8 100644 (file)
@@ -47,7 +47,7 @@ static int transfer_size;
 static int iterations;
 static int interval = 5; /* interval in seconds for showing transfer rate */
 
-uint8_t default_tx[] = {
+static uint8_t default_tx[] = {
        0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
        0x40, 0x00, 0x00, 0x00, 0x00, 0x95,
        0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
@@ -56,8 +56,8 @@ uint8_t default_tx[] = {
        0xF0, 0x0D,
 };
 
-uint8_t default_rx[ARRAY_SIZE(default_tx)] = {0, };
-char *input_tx;
+static uint8_t default_rx[ARRAY_SIZE(default_tx)] = {0, };
+static char *input_tx;
 
 static void hex_dump(const void *src, size_t length, size_t line_size,
                     char *prefix)
@@ -461,8 +461,8 @@ int main(int argc, char *argv[])
                pabort("can't get max speed hz");
 
        printf("spi mode: 0x%x\n", mode);
-       printf("bits per word: %d\n", bits);
-       printf("max speed: %d Hz (%d KHz)\n", speed, speed/1000);
+       printf("bits per word: %u\n", bits);
+       printf("max speed: %u Hz (%u kHz)\n", speed, speed/1000);
 
        if (input_tx)
                transfer_escaped_string(fd, input_tx);
index 787b6d4..f9b769f 100755 (executable)
@@ -82,7 +82,9 @@ def build_tests(linux: kunit_kernel.LinuxSourceTree,
                                        request.make_options)
        build_end = time.time()
        if not success:
-               return KunitResult(KunitStatus.BUILD_FAILURE, 'could not build kernel')
+               return KunitResult(KunitStatus.BUILD_FAILURE,
+                                  'could not build kernel',
+                                  build_end - build_start)
        if not success:
                return KunitResult(KunitStatus.BUILD_FAILURE,
                                   'could not build kernel',
index e75063d..02ffc3a 100644 (file)
@@ -10,7 +10,7 @@ import collections
 import re
 
 CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_(\w+) is not set$'
-CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+)$'
+CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+|".*")$'
 
 KconfigEntryBase = collections.namedtuple('KconfigEntry', ['name', 'value'])
 
index 64aac9d..f13e0c0 100644 (file)
@@ -265,11 +265,9 @@ def bubble_up_suite_errors(test_suite_list: List[TestSuite]) -> TestStatus:
        return bubble_up_errors(lambda x: x.status, test_suite_list)
 
 def parse_test_result(lines: List[str]) -> TestResult:
-       if not lines:
-               return TestResult(TestStatus.NO_TESTS, [], lines)
        consume_non_diagnositic(lines)
-       if not parse_tap_header(lines):
-               return None
+       if not lines or not parse_tap_header(lines):
+               return TestResult(TestStatus.NO_TESTS, [], lines)
        test_suites = []
        test_suite = parse_test_suite(lines)
        while test_suite:
@@ -282,6 +280,8 @@ def parse_run_tests(kernel_output) -> TestResult:
        failed_tests = 0
        crashed_tests = 0
        test_result = parse_test_result(list(isolate_kunit_output(kernel_output)))
+       if test_result.status == TestStatus.NO_TESTS:
+               print_with_timestamp(red('[ERROR] ') + 'no kunit output detected')
        for test_suite in test_result.suites:
                if test_suite.status == TestStatus.SUCCESS:
                        print_suite_divider(green('[PASSED] ') + test_suite.name)
index 5bb7b11..f9eeaea 100755 (executable)
@@ -170,6 +170,17 @@ class KUnitParserTest(unittest.TestCase):
                        result.status)
                file.close()
 
+       def test_no_kunit_output(self):
+               crash_log = get_absolute_path(
+                       'test_data/test_insufficient_memory.log')
+               file = open(crash_log)
+               print_mock = mock.patch('builtins.print').start()
+               result = kunit_parser.parse_run_tests(
+                       kunit_parser.isolate_kunit_output(file.readlines()))
+               print_mock.assert_any_call(StrContains("no kunit output detected"))
+               print_mock.stop()
+               file.close()
+
        def test_crashed_test(self):
                crashed_log = get_absolute_path(
                        'test_data/test_is_test_passed-crash.log')
diff --git a/tools/testing/kunit/test_data/test_insufficient_memory.log b/tools/testing/kunit/test_data/test_insufficient_memory.log
new file mode 100644 (file)
index 0000000..e69de29
index db3c07b..b5f7a99 100644 (file)
@@ -51,7 +51,7 @@ struct nd_cmd_translate_spa {
                __u32 nfit_device_handle;
                __u32 _reserved;
                __u64 dpa;
-       } __packed devices[0];
+       } __packed devices[];
 
 } __packed;
 
@@ -74,7 +74,7 @@ struct nd_cmd_ars_err_inj_stat {
        struct nd_error_stat_query_record {
                __u64 err_inj_stat_spa_range_base;
                __u64 err_inj_stat_spa_range_length;
-       } __packed record[0];
+       } __packed record[];
 } __packed;
 
 #define ND_INTEL_SMART                  1
@@ -180,7 +180,7 @@ struct nd_intel_fw_send_data {
        __u32 context;
        __u32 offset;
        __u32 length;
-       __u8 data[0];
+       __u8 data[];
 /* this field is not declared due ot variable data from input */
 /*     __u32 status; */
 } __packed;
index 1195bd8..f4522e0 100644 (file)
@@ -54,6 +54,7 @@ TARGETS += splice
 TARGETS += static_keys
 TARGETS += sync
 TARGETS += sysctl
+TARGETS += tc-testing
 TARGETS += timens
 ifneq (1, $(quicktest))
 TARGETS += timers
index b497cfe..ac4ad00 100644 (file)
@@ -21,10 +21,6 @@ include ../../lib.mk
 $(TEST_GEN_PROGS): $(PROGS)
        cp $(PROGS) $(OUTPUT)/
 
-clean:
-       $(CLEAN)
-       rm -f $(PROGS)
-
 # Common test-unit targets to build common-layout test-cases executables
 # Needs secondary expansion to properly include the testcase c-file in pre-reqs
 .SECONDEXPANSION:
index 22aaec7..e7a8cf8 100644 (file)
@@ -111,6 +111,7 @@ SCRATCH_DIR := $(OUTPUT)/tools
 BUILD_DIR := $(SCRATCH_DIR)/build
 INCLUDE_DIR := $(SCRATCH_DIR)/include
 BPFOBJ := $(BUILD_DIR)/libbpf/libbpf.a
+RESOLVE_BTFIDS := $(BUILD_DIR)/resolve_btfids/resolve_btfids
 
 # Define simple and short `make test_progs`, `make test_sysctl`, etc targets
 # to build individual tests.
@@ -134,12 +135,12 @@ $(OUTPUT)/test_stub.o: test_stub.c $(BPFOBJ)
        $(call msg,CC,,$@)
        $(CC) -c $(CFLAGS) -o $@ $<
 
-VMLINUX_BTF_PATHS := $(if $(O),$(O)/vmlinux)                           \
+VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux)                           \
                     $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux)    \
                     ../../../../vmlinux                                \
                     /sys/kernel/btf/vmlinux                            \
                     /boot/vmlinux-$(shell uname -r)
-VMLINUX_BTF := $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
+VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
 
 $(OUTPUT)/runqslower: $(BPFOBJ)
        $(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower     \
@@ -177,13 +178,28 @@ $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile)                       \
        $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/ \
                    DESTDIR=$(SCRATCH_DIR) prefix= all install_headers
 
-$(BUILD_DIR)/libbpf $(BUILD_DIR)/bpftool $(INCLUDE_DIR):
+$(BUILD_DIR)/libbpf $(BUILD_DIR)/bpftool $(BUILD_DIR)/resolve_btfids $(INCLUDE_DIR):
        $(call msg,MKDIR,,$@)
        mkdir -p $@
 
 $(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) | $(BPFTOOL) $(INCLUDE_DIR)
+ifeq ($(VMLINUX_H),)
        $(call msg,GEN,,$@)
        $(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
+else
+       $(call msg,CP,,$@)
+       cp "$(VMLINUX_H)" $@
+endif
+
+$(RESOLVE_BTFIDS): $(BPFOBJ) | $(BUILD_DIR)/resolve_btfids     \
+                      $(TOOLSDIR)/bpf/resolve_btfids/main.c    \
+                      $(TOOLSDIR)/lib/rbtree.c                 \
+                      $(TOOLSDIR)/lib/zalloc.c                 \
+                      $(TOOLSDIR)/lib/string.c                 \
+                      $(TOOLSDIR)/lib/ctype.c                  \
+                      $(TOOLSDIR)/lib/str_error_r.c
+       $(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/resolve_btfids \
+               OUTPUT=$(BUILD_DIR)/resolve_btfids/ BPFOBJ=$(BPFOBJ)
 
 # Get Clang's default includes on this system, as opposed to those seen by
 # '-target bpf'. This fixes "missing" files on some architectures/distros,
@@ -347,9 +363,11 @@ endif
 
 $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS)                      \
                             $(TRUNNER_EXTRA_OBJS) $$(BPFOBJ)           \
+                            $(RESOLVE_BTFIDS)                          \
                             | $(TRUNNER_BINARY)-extras
        $$(call msg,BINARY,,$$@)
        $$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@
+       $(RESOLVE_BTFIDS) --no-fail --btf btf_data.o $$@
 
 endef
 
index 6f89887..719ab56 100644 (file)
@@ -2,20 +2,6 @@
 #ifndef __BPF_LEGACY__
 #define __BPF_LEGACY__
 
-/*
- * legacy bpf_map_def with extra fields supported only by bpf_load(), do not
- * use outside of samples/bpf
- */
-struct bpf_map_def_legacy {
-       unsigned int type;
-       unsigned int key_size;
-       unsigned int value_size;
-       unsigned int max_entries;
-       unsigned int map_flags;
-       unsigned int inner_map_idx;
-       unsigned int numa_node;
-};
-
 #define BPF_ANNOTATE_KV_PAIR(name, type_key, type_val)         \
        struct ____btf_map_##name {                             \
                type_key key;                                   \
index e36dd1a..f566556 100644 (file)
@@ -7,8 +7,6 @@
 
 #include <arpa/inet.h>
 
-#include <sys/epoll.h>
-
 #include <linux/err.h>
 #include <linux/in.h>
 #include <linux/in6.h>
 #include "network_helpers.h"
 
 #define clean_errno() (errno == 0 ? "None" : strerror(errno))
-#define log_err(MSG, ...) fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \
-       __FILE__, __LINE__, clean_errno(), ##__VA_ARGS__)
+#define log_err(MSG, ...) ({                                           \
+                       int __save = errno;                             \
+                       fprintf(stderr, "(%s:%d: errno: %s) " MSG "\n", \
+                               __FILE__, __LINE__, clean_errno(),      \
+                               ##__VA_ARGS__);                         \
+                       errno = __save;                                 \
+})
 
 struct ipv4_packet pkt_v4 = {
        .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
@@ -37,131 +40,169 @@ struct ipv6_packet pkt_v6 = {
        .tcp.doff = 5,
 };
 
-int start_server_with_port(int family, int type, __u16 port)
+static int settimeo(int fd, int timeout_ms)
 {
-       struct sockaddr_storage addr = {};
-       socklen_t len;
-       int fd;
+       struct timeval timeout = { .tv_sec = 3 };
 
-       if (family == AF_INET) {
-               struct sockaddr_in *sin = (void *)&addr;
+       if (timeout_ms > 0) {
+               timeout.tv_sec = timeout_ms / 1000;
+               timeout.tv_usec = (timeout_ms % 1000) * 1000;
+       }
 
-               sin->sin_family = AF_INET;
-               sin->sin_port = htons(port);
-               len = sizeof(*sin);
-       } else {
-               struct sockaddr_in6 *sin6 = (void *)&addr;
+       if (setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeout,
+                      sizeof(timeout))) {
+               log_err("Failed to set SO_RCVTIMEO");
+               return -1;
+       }
 
-               sin6->sin6_family = AF_INET6;
-               sin6->sin6_port = htons(port);
-               len = sizeof(*sin6);
+       if (setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeout,
+                      sizeof(timeout))) {
+               log_err("Failed to set SO_SNDTIMEO");
+               return -1;
        }
 
-       fd = socket(family, type | SOCK_NONBLOCK, 0);
+       return 0;
+}
+
+#define save_errno_close(fd) ({ int __save = errno; close(fd); errno = __save; })
+
+int start_server(int family, int type, const char *addr_str, __u16 port,
+                int timeout_ms)
+{
+       struct sockaddr_storage addr = {};
+       socklen_t len;
+       int fd;
+
+       if (make_sockaddr(family, addr_str, port, &addr, &len))
+               return -1;
+
+       fd = socket(family, type, 0);
        if (fd < 0) {
                log_err("Failed to create server socket");
                return -1;
        }
 
+       if (settimeo(fd, timeout_ms))
+               goto error_close;
+
        if (bind(fd, (const struct sockaddr *)&addr, len) < 0) {
                log_err("Failed to bind socket");
-               close(fd);
-               return -1;
+               goto error_close;
        }
 
        if (type == SOCK_STREAM) {
                if (listen(fd, 1) < 0) {
                        log_err("Failed to listed on socket");
-                       close(fd);
-                       return -1;
+                       goto error_close;
                }
        }
 
        return fd;
+
+error_close:
+       save_errno_close(fd);
+       return -1;
 }
 
-int start_server(int family, int type)
+static int connect_fd_to_addr(int fd,
+                             const struct sockaddr_storage *addr,
+                             socklen_t addrlen)
 {
-       return start_server_with_port(family, type, 0);
-}
+       if (connect(fd, (const struct sockaddr *)addr, addrlen)) {
+               log_err("Failed to connect to server");
+               return -1;
+       }
 
-static const struct timeval timeo_sec = { .tv_sec = 3 };
-static const size_t timeo_optlen = sizeof(timeo_sec);
+       return 0;
+}
 
-int connect_to_fd(int family, int type, int server_fd)
+int connect_to_fd(int server_fd, int timeout_ms)
 {
-       int fd, save_errno;
+       struct sockaddr_storage addr;
+       struct sockaddr_in *addr_in;
+       socklen_t addrlen, optlen;
+       int fd, type;
 
-       fd = socket(family, type, 0);
-       if (fd < 0) {
-               log_err("Failed to create client socket");
+       optlen = sizeof(type);
+       if (getsockopt(server_fd, SOL_SOCKET, SO_TYPE, &type, &optlen)) {
+               log_err("getsockopt(SOL_TYPE)");
                return -1;
        }
 
-       if (connect_fd_to_fd(fd, server_fd) < 0 && errno != EINPROGRESS) {
-               save_errno = errno;
-               close(fd);
-               errno = save_errno;
+       addrlen = sizeof(addr);
+       if (getsockname(server_fd, (struct sockaddr *)&addr, &addrlen)) {
+               log_err("Failed to get server addr");
                return -1;
        }
 
+       addr_in = (struct sockaddr_in *)&addr;
+       fd = socket(addr_in->sin_family, type, 0);
+       if (fd < 0) {
+               log_err("Failed to create client socket");
+               return -1;
+       }
+
+       if (settimeo(fd, timeout_ms))
+               goto error_close;
+
+       if (connect_fd_to_addr(fd, &addr, addrlen))
+               goto error_close;
+
        return fd;
+
+error_close:
+       save_errno_close(fd);
+       return -1;
 }
 
-int connect_fd_to_fd(int client_fd, int server_fd)
+int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms)
 {
        struct sockaddr_storage addr;
        socklen_t len = sizeof(addr);
-       int save_errno;
 
-       if (setsockopt(client_fd, SOL_SOCKET, SO_RCVTIMEO, &timeo_sec,
-                      timeo_optlen)) {
-               log_err("Failed to set SO_RCVTIMEO");
+       if (settimeo(client_fd, timeout_ms))
                return -1;
-       }
 
        if (getsockname(server_fd, (struct sockaddr *)&addr, &len)) {
                log_err("Failed to get server addr");
                return -1;
        }
 
-       if (connect(client_fd, (const struct sockaddr *)&addr, len) < 0) {
-               if (errno != EINPROGRESS) {
-                       save_errno = errno;
-                       log_err("Failed to connect to server");
-                       errno = save_errno;
-               }
+       if (connect_fd_to_addr(client_fd, &addr, len))
                return -1;
-       }
 
        return 0;
 }
 
-int connect_wait(int fd)
+int make_sockaddr(int family, const char *addr_str, __u16 port,
+                 struct sockaddr_storage *addr, socklen_t *len)
 {
-       struct epoll_event ev = {}, events[2];
-       int timeout_ms = 1000;
-       int efd, nfd;
-
-       efd = epoll_create1(EPOLL_CLOEXEC);
-       if (efd < 0) {
-               log_err("Failed to open epoll fd");
-               return -1;
-       }
+       if (family == AF_INET) {
+               struct sockaddr_in *sin = (void *)addr;
 
-       ev.events = EPOLLRDHUP | EPOLLOUT;
-       ev.data.fd = fd;
+               sin->sin_family = AF_INET;
+               sin->sin_port = htons(port);
+               if (addr_str &&
+                   inet_pton(AF_INET, addr_str, &sin->sin_addr) != 1) {
+                       log_err("inet_pton(AF_INET, %s)", addr_str);
+                       return -1;
+               }
+               if (len)
+                       *len = sizeof(*sin);
+               return 0;
+       } else if (family == AF_INET6) {
+               struct sockaddr_in6 *sin6 = (void *)addr;
 
-       if (epoll_ctl(efd, EPOLL_CTL_ADD, fd, &ev) < 0) {
-               log_err("Failed to register fd=%d on epoll fd=%d", fd, efd);
-               close(efd);
-               return -1;
+               sin6->sin6_family = AF_INET6;
+               sin6->sin6_port = htons(port);
+               if (addr_str &&
+                   inet_pton(AF_INET6, addr_str, &sin6->sin6_addr) != 1) {
+                       log_err("inet_pton(AF_INET6, %s)", addr_str);
+                       return -1;
+               }
+               if (len)
+                       *len = sizeof(*sin6);
+               return 0;
        }
-
-       nfd = epoll_wait(efd, events, ARRAY_SIZE(events), timeout_ms);
-       if (nfd < 0)
-               log_err("Failed to wait for I/O event on epoll fd=%d", efd);
-
-       close(efd);
-       return nfd;
+       return -1;
 }
index 6a80096..c3728f6 100644 (file)
@@ -33,10 +33,11 @@ struct ipv6_packet {
 } __packed;
 extern struct ipv6_packet pkt_v6;
 
-int start_server(int family, int type);
-int start_server_with_port(int family, int type, __u16 port);
-int connect_to_fd(int family, int type, int server_fd);
-int connect_fd_to_fd(int client_fd, int server_fd);
-int connect_wait(int client_fd);
+int start_server(int family, int type, const char *addr, __u16 port,
+                int timeout_ms);
+int connect_to_fd(int server_fd, int timeout_ms);
+int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms);
+int make_sockaddr(int family, const char *addr_str, __u16 port,
+                 struct sockaddr_storage *addr, socklen_t *len);
 
 #endif
diff --git a/tools/testing/selftests/bpf/prog_tests/autoload.c b/tools/testing/selftests/bpf/prog_tests/autoload.c
new file mode 100644 (file)
index 0000000..3693f7d
--- /dev/null
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include <test_progs.h>
+#include <time.h>
+#include "test_autoload.skel.h"
+
+void test_autoload(void)
+{
+       int duration = 0, err;
+       struct test_autoload* skel;
+
+       skel = test_autoload__open_and_load();
+       /* prog3 should be broken */
+       if (CHECK(skel, "skel_open_and_load", "unexpected success\n"))
+               goto cleanup;
+
+       skel = test_autoload__open();
+       if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+               goto cleanup;
+
+       /* don't load prog3 */
+       bpf_program__set_autoload(skel->progs.prog3, false);
+
+       err = test_autoload__load(skel);
+       if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err))
+               goto cleanup;
+
+       err = test_autoload__attach(skel);
+       if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
+               goto cleanup;
+
+       usleep(1);
+
+       CHECK(!skel->bss->prog1_called, "prog1", "not called\n");
+       CHECK(!skel->bss->prog2_called, "prog2", "not called\n");
+       CHECK(skel->bss->prog3_called, "prog3", "called?!\n");
+
+cleanup:
+       test_autoload__destroy(skel);
+}
index 87c29dd..fed4275 100644 (file)
@@ -5,7 +5,12 @@
 #include "bpf_iter_netlink.skel.h"
 #include "bpf_iter_bpf_map.skel.h"
 #include "bpf_iter_task.skel.h"
+#include "bpf_iter_task_stack.skel.h"
 #include "bpf_iter_task_file.skel.h"
+#include "bpf_iter_tcp4.skel.h"
+#include "bpf_iter_tcp6.skel.h"
+#include "bpf_iter_udp4.skel.h"
+#include "bpf_iter_udp6.skel.h"
 #include "bpf_iter_test_kern1.skel.h"
 #include "bpf_iter_test_kern2.skel.h"
 #include "bpf_iter_test_kern3.skel.h"
@@ -106,6 +111,20 @@ static void test_task(void)
        bpf_iter_task__destroy(skel);
 }
 
+static void test_task_stack(void)
+{
+       struct bpf_iter_task_stack *skel;
+
+       skel = bpf_iter_task_stack__open_and_load();
+       if (CHECK(!skel, "bpf_iter_task_stack__open_and_load",
+                 "skeleton open_and_load failed\n"))
+               return;
+
+       do_dummy_read(skel->progs.dump_task_stack);
+
+       bpf_iter_task_stack__destroy(skel);
+}
+
 static void test_task_file(void)
 {
        struct bpf_iter_task_file *skel;
@@ -120,6 +139,62 @@ static void test_task_file(void)
        bpf_iter_task_file__destroy(skel);
 }
 
+static void test_tcp4(void)
+{
+       struct bpf_iter_tcp4 *skel;
+
+       skel = bpf_iter_tcp4__open_and_load();
+       if (CHECK(!skel, "bpf_iter_tcp4__open_and_load",
+                 "skeleton open_and_load failed\n"))
+               return;
+
+       do_dummy_read(skel->progs.dump_tcp4);
+
+       bpf_iter_tcp4__destroy(skel);
+}
+
+static void test_tcp6(void)
+{
+       struct bpf_iter_tcp6 *skel;
+
+       skel = bpf_iter_tcp6__open_and_load();
+       if (CHECK(!skel, "bpf_iter_tcp6__open_and_load",
+                 "skeleton open_and_load failed\n"))
+               return;
+
+       do_dummy_read(skel->progs.dump_tcp6);
+
+       bpf_iter_tcp6__destroy(skel);
+}
+
+static void test_udp4(void)
+{
+       struct bpf_iter_udp4 *skel;
+
+       skel = bpf_iter_udp4__open_and_load();
+       if (CHECK(!skel, "bpf_iter_udp4__open_and_load",
+                 "skeleton open_and_load failed\n"))
+               return;
+
+       do_dummy_read(skel->progs.dump_udp4);
+
+       bpf_iter_udp4__destroy(skel);
+}
+
+static void test_udp6(void)
+{
+       struct bpf_iter_udp6 *skel;
+
+       skel = bpf_iter_udp6__open_and_load();
+       if (CHECK(!skel, "bpf_iter_udp6__open_and_load",
+                 "skeleton open_and_load failed\n"))
+               return;
+
+       do_dummy_read(skel->progs.dump_udp6);
+
+       bpf_iter_udp6__destroy(skel);
+}
+
 /* The expected string is less than 16 bytes */
 static int do_read_with_fd(int iter_fd, const char *expected,
                           bool read_one_char)
@@ -392,8 +467,18 @@ void test_bpf_iter(void)
                test_bpf_map();
        if (test__start_subtest("task"))
                test_task();
+       if (test__start_subtest("task_stack"))
+               test_task_stack();
        if (test__start_subtest("task_file"))
                test_task_file();
+       if (test__start_subtest("tcp4"))
+               test_tcp4();
+       if (test__start_subtest("tcp6"))
+               test_tcp6();
+       if (test__start_subtest("udp4"))
+               test_udp4();
+       if (test__start_subtest("udp6"))
+               test_udp6();
        if (test__start_subtest("anon"))
                test_anon_iter(false);
        if (test__start_subtest("anon-read-one-char"))
index 059047a..464edc1 100644 (file)
@@ -13,7 +13,7 @@ static void run_lookup_test(__u16 *g_serv_port, int out_sk)
        socklen_t addr_len = sizeof(addr);
        __u32 duration = 0;
 
-       serv_sk = start_server(AF_INET6, SOCK_STREAM);
+       serv_sk = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
        if (CHECK(serv_sk < 0, "start_server", "failed to start server\n"))
                return;
 
@@ -24,17 +24,13 @@ static void run_lookup_test(__u16 *g_serv_port, int out_sk)
        *g_serv_port = addr.sin6_port;
 
        /* Client outside of test cgroup should fail to connect by timeout. */
-       err = connect_fd_to_fd(out_sk, serv_sk);
+       err = connect_fd_to_fd(out_sk, serv_sk, 1000);
        if (CHECK(!err || errno != EINPROGRESS, "connect_fd_to_fd",
                  "unexpected result err %d errno %d\n", err, errno))
                goto cleanup;
 
-       err = connect_wait(out_sk);
-       if (CHECK(err, "connect_wait", "unexpected result %d\n", err))
-               goto cleanup;
-
        /* Client inside test cgroup should connect just fine. */
-       in_sk = connect_to_fd(AF_INET6, SOCK_STREAM, serv_sk);
+       in_sk = connect_to_fd(serv_sk, 0);
        if (CHECK(in_sk < 0, "connect_to_fd", "errno %d\n", errno))
                goto cleanup;
 
@@ -85,7 +81,7 @@ void test_cgroup_skb_sk_lookup(void)
         * differs from that of testing cgroup. Moving selftests process to
         * testing cgroup won't change cgroup id of an already created socket.
         */
-       out_sk = socket(AF_INET6, SOCK_STREAM | SOCK_NONBLOCK, 0);
+       out_sk = socket(AF_INET6, SOCK_STREAM, 0);
        if (CHECK_FAIL(out_sk < 0))
                return;
 
index 17bbf76..9229db2 100644 (file)
@@ -114,7 +114,7 @@ static int run_test(int cgroup_fd, int server_fd, int family, int type)
                goto close_bpf_object;
        }
 
-       fd = connect_to_fd(family, type, server_fd);
+       fd = connect_to_fd(server_fd, 0);
        if (fd < 0) {
                err = -1;
                goto close_bpf_object;
@@ -137,25 +137,25 @@ void test_connect_force_port(void)
        if (CHECK_FAIL(cgroup_fd < 0))
                return;
 
-       server_fd = start_server_with_port(AF_INET, SOCK_STREAM, 60123);
+       server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 60123, 0);
        if (CHECK_FAIL(server_fd < 0))
                goto close_cgroup_fd;
        CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET, SOCK_STREAM));
        close(server_fd);
 
-       server_fd = start_server_with_port(AF_INET6, SOCK_STREAM, 60124);
+       server_fd = start_server(AF_INET6, SOCK_STREAM, NULL, 60124, 0);
        if (CHECK_FAIL(server_fd < 0))
                goto close_cgroup_fd;
        CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET6, SOCK_STREAM));
        close(server_fd);
 
-       server_fd = start_server_with_port(AF_INET, SOCK_DGRAM, 60123);
+       server_fd = start_server(AF_INET, SOCK_DGRAM, NULL, 60123, 0);
        if (CHECK_FAIL(server_fd < 0))
                goto close_cgroup_fd;
        CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET, SOCK_DGRAM));
        close(server_fd);
 
-       server_fd = start_server_with_port(AF_INET6, SOCK_DGRAM, 60124);
+       server_fd = start_server(AF_INET6, SOCK_DGRAM, NULL, 60124, 0);
        if (CHECK_FAIL(server_fd < 0))
                goto close_cgroup_fd;
        CHECK_FAIL(run_test(cgroup_fd, server_fd, AF_INET6, SOCK_DGRAM));
diff --git a/tools/testing/selftests/bpf/prog_tests/core_retro.c b/tools/testing/selftests/bpf/prog_tests/core_retro.c
new file mode 100644 (file)
index 0000000..78e30d3
--- /dev/null
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+#define _GNU_SOURCE
+#include <test_progs.h>
+#include "test_core_retro.skel.h"
+
+void test_core_retro(void)
+{
+       int err, zero = 0, res, duration = 0;
+       struct test_core_retro *skel;
+
+       /* load program */
+       skel = test_core_retro__open_and_load();
+       if (CHECK(!skel, "skel_load", "skeleton open/load failed\n"))
+               goto out_close;
+
+       /* attach probe */
+       err = test_core_retro__attach(skel);
+       if (CHECK(err, "attach_kprobe", "err %d\n", err))
+               goto out_close;
+
+       /* trigger */
+       usleep(1);
+
+       err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.results), &zero, &res);
+       if (CHECK(err, "map_lookup", "failed to lookup result: %d\n", errno))
+               goto out_close;
+
+       CHECK(res != getpid(), "pid_check", "got %d != exp %d\n", res, getpid());
+
+out_close:
+       test_core_retro__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/endian.c b/tools/testing/selftests/bpf/prog_tests/endian.c
new file mode 100644 (file)
index 0000000..1a11612
--- /dev/null
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include <test_progs.h>
+#include "test_endian.skel.h"
+
+static int duration;
+
+#define IN16 0x1234
+#define IN32 0x12345678U
+#define IN64 0x123456789abcdef0ULL
+
+#define OUT16 0x3412
+#define OUT32 0x78563412U
+#define OUT64 0xf0debc9a78563412ULL
+
+void test_endian(void)
+{
+       struct test_endian* skel;
+       struct test_endian__bss *bss;
+       int err;
+
+       skel = test_endian__open_and_load();
+       if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+               return;
+       bss = skel->bss;
+
+       bss->in16 = IN16;
+       bss->in32 = IN32;
+       bss->in64 = IN64;
+
+       err = test_endian__attach(skel);
+       if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
+               goto cleanup;
+
+       usleep(1);
+
+       CHECK(bss->out16 != OUT16, "out16", "got 0x%llx != exp 0x%llx\n",
+             (__u64)bss->out16, (__u64)OUT16);
+       CHECK(bss->out32 != OUT32, "out32", "got 0x%llx != exp 0x%llx\n",
+             (__u64)bss->out32, (__u64)OUT32);
+       CHECK(bss->out64 != OUT64, "out16", "got 0x%llx != exp 0x%llx\n",
+             (__u64)bss->out64, (__u64)OUT64);
+
+       CHECK(bss->const16 != OUT16, "const16", "got 0x%llx != exp 0x%llx\n",
+             (__u64)bss->const16, (__u64)OUT16);
+       CHECK(bss->const32 != OUT32, "const32", "got 0x%llx != exp 0x%llx\n",
+             (__u64)bss->const32, (__u64)OUT32);
+       CHECK(bss->const64 != OUT64, "const64", "got 0x%llx != exp 0x%llx\n",
+             (__u64)bss->const64, (__u64)OUT64);
+cleanup:
+       test_endian__destroy(skel);
+}
index 83493bd..109d034 100644 (file)
@@ -36,7 +36,7 @@ void test_fentry_fexit(void)
        fentry_res = (__u64 *)fentry_skel->bss;
        fexit_res = (__u64 *)fexit_skel->bss;
        printf("%lld\n", fentry_skel->bss->test1_result);
-       for (i = 0; i < 6; i++) {
+       for (i = 0; i < 8; i++) {
                CHECK(fentry_res[i] != 1, "result",
                      "fentry_test%d failed err %lld\n", i + 1, fentry_res[i]);
                CHECK(fexit_res[i] != 1, "result",
index ea14e3e..f11f187 100644 (file)
@@ -527,8 +527,8 @@ static void test_skb_less_prog_attach(struct bpf_flow *skel, int tap_fd)
 
        run_tests_skb_less(tap_fd, skel->maps.last_dissection);
 
-       err = bpf_prog_detach(prog_fd, BPF_FLOW_DISSECTOR);
-       CHECK(err, "bpf_prog_detach", "err %d errno %d\n", err, errno);
+       err = bpf_prog_detach2(prog_fd, 0, BPF_FLOW_DISSECTOR);
+       CHECK(err, "bpf_prog_detach2", "err %d errno %d\n", err, errno);
 }
 
 static void test_skb_less_link_create(struct bpf_flow *skel, int tap_fd)
index 15cb554..172c586 100644 (file)
@@ -1,9 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Test that the flow_dissector program can be updated with a single
- * syscall by attaching a new program that replaces the existing one.
- *
- * Corner case - the same program cannot be attached twice.
+ * Tests for attaching, detaching, and replacing flow_dissector BPF program.
  */
 
 #define _GNU_SOURCE
@@ -116,7 +113,7 @@ static void test_prog_attach_prog_attach(int netns, int prog1, int prog2)
        CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog2));
 
 out_detach:
-       err = bpf_prog_detach(0, BPF_FLOW_DISSECTOR);
+       err = bpf_prog_detach2(prog2, 0, BPF_FLOW_DISSECTOR);
        if (CHECK_FAIL(err))
                perror("bpf_prog_detach");
        CHECK_FAIL(prog_is_attached(netns));
@@ -152,7 +149,7 @@ static void test_prog_attach_link_create(int netns, int prog1, int prog2)
        DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts);
        int err, link;
 
-       err = bpf_prog_attach(prog1, -1, BPF_FLOW_DISSECTOR, 0);
+       err = bpf_prog_attach(prog1, 0, BPF_FLOW_DISSECTOR, 0);
        if (CHECK_FAIL(err)) {
                perror("bpf_prog_attach(prog1)");
                return;
@@ -168,7 +165,7 @@ static void test_prog_attach_link_create(int netns, int prog1, int prog2)
                close(link);
        CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
 
-       err = bpf_prog_detach(-1, BPF_FLOW_DISSECTOR);
+       err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR);
        if (CHECK_FAIL(err))
                perror("bpf_prog_detach");
        CHECK_FAIL(prog_is_attached(netns));
@@ -188,7 +185,7 @@ static void test_link_create_prog_attach(int netns, int prog1, int prog2)
 
        /* Expect failure attaching prog when link exists */
        errno = 0;
-       err = bpf_prog_attach(prog2, -1, BPF_FLOW_DISSECTOR, 0);
+       err = bpf_prog_attach(prog2, 0, BPF_FLOW_DISSECTOR, 0);
        if (CHECK_FAIL(!err || errno != EEXIST))
                perror("bpf_prog_attach(prog2) expected EEXIST");
        CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
@@ -211,7 +208,7 @@ static void test_link_create_prog_detach(int netns, int prog1, int prog2)
 
        /* Expect failure detaching prog when link exists */
        errno = 0;
-       err = bpf_prog_detach(-1, BPF_FLOW_DISSECTOR);
+       err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR);
        if (CHECK_FAIL(!err || errno != EINVAL))
                perror("bpf_prog_detach expected EINVAL");
        CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
@@ -231,7 +228,7 @@ static void test_prog_attach_detach_query(int netns, int prog1, int prog2)
        }
        CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
 
-       err = bpf_prog_detach(0, BPF_FLOW_DISSECTOR);
+       err = bpf_prog_detach2(prog1, 0, BPF_FLOW_DISSECTOR);
        if (CHECK_FAIL(err)) {
                perror("bpf_prog_detach");
                return;
@@ -308,6 +305,31 @@ static void test_link_update_replace_old_prog(int netns, int prog1, int prog2)
        CHECK_FAIL(prog_is_attached(netns));
 }
 
+static void test_link_update_same_prog(int netns, int prog1, int prog2)
+{
+       DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts);
+       DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts);
+       int err, link;
+
+       link = bpf_link_create(prog1, netns, BPF_FLOW_DISSECTOR, &create_opts);
+       if (CHECK_FAIL(link < 0)) {
+               perror("bpf_link_create(prog1)");
+               return;
+       }
+       CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
+
+       /* Expect success updating the prog with the same one */
+       update_opts.flags = 0;
+       update_opts.old_prog_fd = 0;
+       err = bpf_link_update(link, prog1, &update_opts);
+       if (CHECK_FAIL(err))
+               perror("bpf_link_update");
+       CHECK_FAIL(query_attached_prog_id(netns) != query_prog_id(prog1));
+
+       close(link);
+       CHECK_FAIL(prog_is_attached(netns));
+}
+
 static void test_link_update_invalid_opts(int netns, int prog1, int prog2)
 {
        DECLARE_LIBBPF_OPTS(bpf_link_create_opts, create_opts);
@@ -571,6 +593,8 @@ static void run_tests(int netns)
                  test_link_update_no_old_prog },
                { "link update with replace old prog",
                  test_link_update_replace_old_prog },
+               { "link update with same prog",
+                 test_link_update_same_prog },
                { "link update invalid opts",
                  test_link_update_invalid_opts },
                { "link update invalid prog",
diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms.c b/tools/testing/selftests/bpf/prog_tests/ksyms.c
new file mode 100644 (file)
index 0000000..e3d6777
--- /dev/null
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+
+#include <test_progs.h>
+#include "test_ksyms.skel.h"
+#include <sys/stat.h>
+
+static int duration;
+
+static __u64 kallsyms_find(const char *sym)
+{
+       char type, name[500];
+       __u64 addr, res = 0;
+       FILE *f;
+
+       f = fopen("/proc/kallsyms", "r");
+       if (CHECK(!f, "kallsyms_fopen", "failed to open: %d\n", errno))
+               return 0;
+
+       while (fscanf(f, "%llx %c %499s%*[^\n]\n", &addr, &type, name) > 0) {
+               if (strcmp(name, sym) == 0) {
+                       res = addr;
+                       goto out;
+               }
+       }
+
+       CHECK(false, "not_found", "symbol %s not found\n", sym);
+out:
+       fclose(f);
+       return res;
+}
+
+void test_ksyms(void)
+{
+       __u64 link_fops_addr = kallsyms_find("bpf_link_fops");
+       const char *btf_path = "/sys/kernel/btf/vmlinux";
+       struct test_ksyms *skel;
+       struct test_ksyms__data *data;
+       struct stat st;
+       __u64 btf_size;
+       int err;
+
+       if (CHECK(stat(btf_path, &st), "stat_btf", "err %d\n", errno))
+               return;
+       btf_size = st.st_size;
+
+       skel = test_ksyms__open_and_load();
+       if (CHECK(!skel, "skel_open", "failed to open and load skeleton\n"))
+               return;
+
+       err = test_ksyms__attach(skel);
+       if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
+               goto cleanup;
+
+       /* trigger tracepoint */
+       usleep(1);
+
+       data = skel->data;
+       CHECK(data->out__bpf_link_fops != link_fops_addr, "bpf_link_fops",
+             "got 0x%llx, exp 0x%llx\n",
+             data->out__bpf_link_fops, link_fops_addr);
+       CHECK(data->out__bpf_link_fops1 != 0, "bpf_link_fops1",
+             "got %llu, exp %llu\n", data->out__bpf_link_fops1, (__u64)0);
+       CHECK(data->out__btf_size != btf_size, "btf_size",
+             "got %llu, exp %llu\n", data->out__btf_size, btf_size);
+       CHECK(data->out__per_cpu_start != 0, "__per_cpu_start",
+             "got %llu, exp %llu\n", data->out__per_cpu_start, (__u64)0);
+
+cleanup:
+       test_ksyms__destroy(skel);
+}
index c1168e4..5a2a689 100644 (file)
@@ -23,7 +23,7 @@ void test_load_bytes_relative(void)
        if (CHECK_FAIL(cgroup_fd < 0))
                return;
 
-       server_fd = start_server(AF_INET, SOCK_STREAM);
+       server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0);
        if (CHECK_FAIL(server_fd < 0))
                goto close_cgroup_fd;
 
@@ -49,7 +49,7 @@ void test_load_bytes_relative(void)
        if (CHECK_FAIL(err))
                goto close_bpf_object;
 
-       client_fd = connect_to_fd(AF_INET, SOCK_STREAM, server_fd);
+       client_fd = connect_to_fd(server_fd, 0);
        if (CHECK_FAIL(client_fd < 0))
                goto close_bpf_object;
        close(client_fd);
diff --git a/tools/testing/selftests/bpf/prog_tests/map_ptr.c b/tools/testing/selftests/bpf/prog_tests/map_ptr.c
new file mode 100644 (file)
index 0000000..c230a57
--- /dev/null
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+
+#include <test_progs.h>
+#include <network_helpers.h>
+
+#include "map_ptr_kern.skel.h"
+
+void test_map_ptr(void)
+{
+       struct map_ptr_kern *skel;
+       __u32 duration = 0, retval;
+       char buf[128];
+       int err;
+
+       skel = map_ptr_kern__open_and_load();
+       if (CHECK(!skel, "skel_open_load", "open_load failed\n"))
+               return;
+
+       err = bpf_prog_test_run(bpf_program__fd(skel->progs.cg_skb), 1, &pkt_v4,
+                               sizeof(pkt_v4), buf, NULL, &retval, NULL);
+
+       if (CHECK(err, "test_run", "err=%d errno=%d\n", err, errno))
+               goto cleanup;
+
+       if (CHECK(!retval, "retval", "retval=%d map_type=%u line=%u\n", retval,
+                 skel->bss->g_map_type, skel->bss->g_line))
+               goto cleanup;
+
+cleanup:
+       map_ptr_kern__destroy(skel);
+}
index a122ce3..c33ec18 100644 (file)
@@ -4,6 +4,7 @@
 #include <sched.h>
 #include <sys/socket.h>
 #include <test_progs.h>
+#include "test_perf_buffer.skel.h"
 #include "bpf/libbpf_internal.h"
 
 /* AddressSanitizer sometimes crashes due to data dereference below, due to
@@ -25,16 +26,11 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size)
 
 void test_perf_buffer(void)
 {
-       int err, prog_fd, on_len, nr_on_cpus = 0,  nr_cpus, i, duration = 0;
-       const char *prog_name = "kprobe/sys_nanosleep";
-       const char *file = "./test_perf_buffer.o";
+       int err, on_len, nr_on_cpus = 0,  nr_cpus, i, duration = 0;
        struct perf_buffer_opts pb_opts = {};
-       struct bpf_map *perf_buf_map;
+       struct test_perf_buffer *skel;
        cpu_set_t cpu_set, cpu_seen;
-       struct bpf_program *prog;
-       struct bpf_object *obj;
        struct perf_buffer *pb;
-       struct bpf_link *link;
        bool *online;
 
        nr_cpus = libbpf_num_possible_cpus();
@@ -51,33 +47,21 @@ void test_perf_buffer(void)
                        nr_on_cpus++;
 
        /* load program */
-       err = bpf_prog_load(file, BPF_PROG_TYPE_KPROBE, &obj, &prog_fd);
-       if (CHECK(err, "obj_load", "err %d errno %d\n", err, errno)) {
-               obj = NULL;
-               goto out_close;
-       }
-
-       prog = bpf_object__find_program_by_title(obj, prog_name);
-       if (CHECK(!prog, "find_probe", "prog '%s' not found\n", prog_name))
+       skel = test_perf_buffer__open_and_load();
+       if (CHECK(!skel, "skel_load", "skeleton open/load failed\n"))
                goto out_close;
 
-       /* load map */
-       perf_buf_map = bpf_object__find_map_by_name(obj, "perf_buf_map");
-       if (CHECK(!perf_buf_map, "find_perf_buf_map", "not found\n"))
-               goto out_close;
-
-       /* attach kprobe */
-       link = bpf_program__attach_kprobe(prog, false /* retprobe */,
-                                         SYS_NANOSLEEP_KPROBE_NAME);
-       if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link)))
+       /* attach probe */
+       err = test_perf_buffer__attach(skel);
+       if (CHECK(err, "attach_kprobe", "err %d\n", err))
                goto out_close;
 
        /* set up perf buffer */
        pb_opts.sample_cb = on_sample;
        pb_opts.ctx = &cpu_seen;
-       pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, &pb_opts);
+       pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1, &pb_opts);
        if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
-               goto out_detach;
+               goto out_close;
 
        /* trigger kprobe on every CPU */
        CPU_ZERO(&cpu_seen);
@@ -94,7 +78,7 @@ void test_perf_buffer(void)
                                             &cpu_set);
                if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n",
                                 i, err))
-                       goto out_detach;
+                       goto out_close;
 
                usleep(1);
        }
@@ -110,9 +94,7 @@ void test_perf_buffer(void)
 
 out_free_pb:
        perf_buffer__free(pb);
-out_detach:
-       bpf_link__destroy(link);
 out_close:
-       bpf_object__close(obj);
+       test_perf_buffer__destroy(skel);
        free(online);
 }
diff --git a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c
new file mode 100644 (file)
index 0000000..3b127ca
--- /dev/null
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/err.h>
+#include <string.h>
+#include <bpf/btf.h>
+#include <bpf/libbpf.h>
+#include <linux/btf.h>
+#include <linux/kernel.h>
+#define CONFIG_DEBUG_INFO_BTF
+#include <linux/btf_ids.h>
+#include "test_progs.h"
+
+static int duration;
+
+struct symbol {
+       const char      *name;
+       int              type;
+       int              id;
+};
+
+struct symbol test_symbols[] = {
+       { "unused",  BTF_KIND_UNKN,     0 },
+       { "S",       BTF_KIND_TYPEDEF, -1 },
+       { "T",       BTF_KIND_TYPEDEF, -1 },
+       { "U",       BTF_KIND_TYPEDEF, -1 },
+       { "S",       BTF_KIND_STRUCT,  -1 },
+       { "U",       BTF_KIND_UNION,   -1 },
+       { "func",    BTF_KIND_FUNC,    -1 },
+};
+
+BTF_ID_LIST(test_list_local)
+BTF_ID_UNUSED
+BTF_ID(typedef, S)
+BTF_ID(typedef, T)
+BTF_ID(typedef, U)
+BTF_ID(struct,  S)
+BTF_ID(union,   U)
+BTF_ID(func,    func)
+
+extern __u32 test_list_global[];
+BTF_ID_LIST_GLOBAL(test_list_global)
+BTF_ID_UNUSED
+BTF_ID(typedef, S)
+BTF_ID(typedef, T)
+BTF_ID(typedef, U)
+BTF_ID(struct,  S)
+BTF_ID(union,   U)
+BTF_ID(func,    func)
+
+static int
+__resolve_symbol(struct btf *btf, int type_id)
+{
+       const struct btf_type *type;
+       const char *str;
+       unsigned int i;
+
+       type = btf__type_by_id(btf, type_id);
+       if (!type) {
+               PRINT_FAIL("Failed to get type for ID %d\n", type_id);
+               return -1;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(test_symbols); i++) {
+               if (test_symbols[i].id != -1)
+                       continue;
+
+               if (BTF_INFO_KIND(type->info) != test_symbols[i].type)
+                       continue;
+
+               str = btf__name_by_offset(btf, type->name_off);
+               if (!str) {
+                       PRINT_FAIL("Failed to get name for BTF ID %d\n", type_id);
+                       return -1;
+               }
+
+               if (!strcmp(str, test_symbols[i].name))
+                       test_symbols[i].id = type_id;
+       }
+
+       return 0;
+}
+
+static int resolve_symbols(void)
+{
+       struct btf *btf;
+       int type_id;
+       __u32 nr;
+
+       btf = btf__parse_elf("btf_data.o", NULL);
+       if (CHECK(libbpf_get_error(btf), "resolve",
+                 "Failed to load BTF from btf_data.o\n"))
+               return -1;
+
+       nr = btf__get_nr_types(btf);
+
+       for (type_id = 1; type_id <= nr; type_id++) {
+               if (__resolve_symbol(btf, type_id))
+                       break;
+       }
+
+       btf__free(btf);
+       return 0;
+}
+
+int test_resolve_btfids(void)
+{
+       __u32 *test_list, *test_lists[] = { test_list_local, test_list_global };
+       unsigned int i, j;
+       int ret = 0;
+
+       if (resolve_symbols())
+               return -1;
+
+       /* Check BTF_ID_LIST(test_list_local) and
+        * BTF_ID_LIST_GLOBAL(test_list_global) IDs
+        */
+       for (j = 0; j < ARRAY_SIZE(test_lists); j++) {
+               test_list = test_lists[j];
+               for (i = 0; i < ARRAY_SIZE(test_symbols) && !ret; i++) {
+                       ret = CHECK(test_list[i] != test_symbols[i].id,
+                                   "id_check",
+                                   "wrong ID for %s (%d != %d)\n",
+                                   test_symbols[i].name,
+                                   test_list[i], test_symbols[i].id);
+               }
+       }
+
+       return ret;
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c
new file mode 100644 (file)
index 0000000..f1784ae
--- /dev/null
@@ -0,0 +1,1282 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+// Copyright (c) 2020 Cloudflare
+/*
+ * Test BPF attach point for INET socket lookup (BPF_SK_LOOKUP).
+ *
+ * Tests exercise:
+ *  - attaching/detaching/querying programs to BPF_SK_LOOKUP hook,
+ *  - redirecting socket lookup to a socket selected by BPF program,
+ *  - failing a socket lookup on BPF program's request,
+ *  - error scenarios for selecting a socket from BPF program,
+ *  - accessing BPF program context,
+ *  - attaching and running multiple BPF programs.
+ *
+ * Tests run in a dedicated network namespace.
+ */
+
+#define _GNU_SOURCE
+#include <arpa/inet.h>
+#include <assert.h>
+#include <errno.h>
+#include <error.h>
+#include <fcntl.h>
+#include <sched.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <bpf/libbpf.h>
+#include <bpf/bpf.h>
+
+#include "test_progs.h"
+#include "bpf_rlimit.h"
+#include "bpf_util.h"
+#include "cgroup_helpers.h"
+#include "network_helpers.h"
+#include "test_sk_lookup.skel.h"
+
+/* External (address, port) pairs the client sends packets to. */
+#define EXT_IP4                "127.0.0.1"
+#define EXT_IP6                "fd00::1"
+#define EXT_PORT       7007
+
+/* Internal (address, port) pairs the server listens/receives at. */
+#define INT_IP4                "127.0.0.2"
+#define INT_IP4_V6     "::ffff:127.0.0.2"
+#define INT_IP6                "fd00::2"
+#define INT_PORT       8008
+
+#define IO_TIMEOUT_SEC 3
+
+enum server {
+       SERVER_A = 0,
+       SERVER_B = 1,
+       MAX_SERVERS,
+};
+
+enum {
+       PROG1 = 0,
+       PROG2,
+};
+
+struct inet_addr {
+       const char *ip;
+       unsigned short port;
+};
+
+struct test {
+       const char *desc;
+       struct bpf_program *lookup_prog;
+       struct bpf_program *reuseport_prog;
+       struct bpf_map *sock_map;
+       int sotype;
+       struct inet_addr connect_to;
+       struct inet_addr listen_at;
+       enum server accept_on;
+};
+
+static __u32 duration;         /* for CHECK macro */
+
+static bool is_ipv6(const char *ip)
+{
+       return !!strchr(ip, ':');
+}
+
+static int attach_reuseport(int sock_fd, struct bpf_program *reuseport_prog)
+{
+       int err, prog_fd;
+
+       prog_fd = bpf_program__fd(reuseport_prog);
+       if (prog_fd < 0) {
+               errno = -prog_fd;
+               return -1;
+       }
+
+       err = setsockopt(sock_fd, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF,
+                        &prog_fd, sizeof(prog_fd));
+       if (err)
+               return -1;
+
+       return 0;
+}
+
+static socklen_t inetaddr_len(const struct sockaddr_storage *addr)
+{
+       return (addr->ss_family == AF_INET ? sizeof(struct sockaddr_in) :
+               addr->ss_family == AF_INET6 ? sizeof(struct sockaddr_in6) : 0);
+}
+
+static int make_socket(int sotype, const char *ip, int port,
+                      struct sockaddr_storage *addr)
+{
+       struct timeval timeo = { .tv_sec = IO_TIMEOUT_SEC };
+       int err, family, fd;
+
+       family = is_ipv6(ip) ? AF_INET6 : AF_INET;
+       err = make_sockaddr(family, ip, port, addr, NULL);
+       if (CHECK(err, "make_address", "failed\n"))
+               return -1;
+
+       fd = socket(addr->ss_family, sotype, 0);
+       if (CHECK(fd < 0, "socket", "failed\n")) {
+               log_err("failed to make socket");
+               return -1;
+       }
+
+       err = setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, &timeo, sizeof(timeo));
+       if (CHECK(err, "setsockopt(SO_SNDTIMEO)", "failed\n")) {
+               log_err("failed to set SNDTIMEO");
+               close(fd);
+               return -1;
+       }
+
+       err = setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, &timeo, sizeof(timeo));
+       if (CHECK(err, "setsockopt(SO_RCVTIMEO)", "failed\n")) {
+               log_err("failed to set RCVTIMEO");
+               close(fd);
+               return -1;
+       }
+
+       return fd;
+}
+
+static int make_server(int sotype, const char *ip, int port,
+                      struct bpf_program *reuseport_prog)
+{
+       struct sockaddr_storage addr = {0};
+       const int one = 1;
+       int err, fd = -1;
+
+       fd = make_socket(sotype, ip, port, &addr);
+       if (fd < 0)
+               return -1;
+
+       /* Enabled for UDPv6 sockets for IPv4-mapped IPv6 to work. */
+       if (sotype == SOCK_DGRAM) {
+               err = setsockopt(fd, SOL_IP, IP_RECVORIGDSTADDR, &one,
+                                sizeof(one));
+               if (CHECK(err, "setsockopt(IP_RECVORIGDSTADDR)", "failed\n")) {
+                       log_err("failed to enable IP_RECVORIGDSTADDR");
+                       goto fail;
+               }
+       }
+
+       if (sotype == SOCK_DGRAM && addr.ss_family == AF_INET6) {
+               err = setsockopt(fd, SOL_IPV6, IPV6_RECVORIGDSTADDR, &one,
+                                sizeof(one));
+               if (CHECK(err, "setsockopt(IPV6_RECVORIGDSTADDR)", "failed\n")) {
+                       log_err("failed to enable IPV6_RECVORIGDSTADDR");
+                       goto fail;
+               }
+       }
+
+       if (sotype == SOCK_STREAM) {
+               err = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one,
+                                sizeof(one));
+               if (CHECK(err, "setsockopt(SO_REUSEADDR)", "failed\n")) {
+                       log_err("failed to enable SO_REUSEADDR");
+                       goto fail;
+               }
+       }
+
+       if (reuseport_prog) {
+               err = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &one,
+                                sizeof(one));
+               if (CHECK(err, "setsockopt(SO_REUSEPORT)", "failed\n")) {
+                       log_err("failed to enable SO_REUSEPORT");
+                       goto fail;
+               }
+       }
+
+       err = bind(fd, (void *)&addr, inetaddr_len(&addr));
+       if (CHECK(err, "bind", "failed\n")) {
+               log_err("failed to bind listen socket");
+               goto fail;
+       }
+
+       if (sotype == SOCK_STREAM) {
+               err = listen(fd, SOMAXCONN);
+               if (CHECK(err, "make_server", "listen")) {
+                       log_err("failed to listen on port %d", port);
+                       goto fail;
+               }
+       }
+
+       /* Late attach reuseport prog so we can have one init path */
+       if (reuseport_prog) {
+               err = attach_reuseport(fd, reuseport_prog);
+               if (CHECK(err, "attach_reuseport", "failed\n")) {
+                       log_err("failed to attach reuseport prog");
+                       goto fail;
+               }
+       }
+
+       return fd;
+fail:
+       close(fd);
+       return -1;
+}
+
+static int make_client(int sotype, const char *ip, int port)
+{
+       struct sockaddr_storage addr = {0};
+       int err, fd;
+
+       fd = make_socket(sotype, ip, port, &addr);
+       if (fd < 0)
+               return -1;
+
+       err = connect(fd, (void *)&addr, inetaddr_len(&addr));
+       if (CHECK(err, "make_client", "connect")) {
+               log_err("failed to connect client socket");
+               goto fail;
+       }
+
+       return fd;
+fail:
+       close(fd);
+       return -1;
+}
+
+static int send_byte(int fd)
+{
+       ssize_t n;
+
+       errno = 0;
+       n = send(fd, "a", 1, 0);
+       if (CHECK(n <= 0, "send_byte", "send")) {
+               log_err("failed/partial send");
+               return -1;
+       }
+       return 0;
+}
+
+static int recv_byte(int fd)
+{
+       char buf[1];
+       ssize_t n;
+
+       n = recv(fd, buf, sizeof(buf), 0);
+       if (CHECK(n <= 0, "recv_byte", "recv")) {
+               log_err("failed/partial recv");
+               return -1;
+       }
+       return 0;
+}
+
+static int tcp_recv_send(int server_fd)
+{
+       char buf[1];
+       int ret, fd;
+       ssize_t n;
+
+       fd = accept(server_fd, NULL, NULL);
+       if (CHECK(fd < 0, "accept", "failed\n")) {
+               log_err("failed to accept");
+               return -1;
+       }
+
+       n = recv(fd, buf, sizeof(buf), 0);
+       if (CHECK(n <= 0, "recv", "failed\n")) {
+               log_err("failed/partial recv");
+               ret = -1;
+               goto close;
+       }
+
+       n = send(fd, buf, n, 0);
+       if (CHECK(n <= 0, "send", "failed\n")) {
+               log_err("failed/partial send");
+               ret = -1;
+               goto close;
+       }
+
+       ret = 0;
+close:
+       close(fd);
+       return ret;
+}
+
+static void v4_to_v6(struct sockaddr_storage *ss)
+{
+       struct sockaddr_in6 *v6 = (struct sockaddr_in6 *)ss;
+       struct sockaddr_in v4 = *(struct sockaddr_in *)ss;
+
+       v6->sin6_family = AF_INET6;
+       v6->sin6_port = v4.sin_port;
+       v6->sin6_addr.s6_addr[10] = 0xff;
+       v6->sin6_addr.s6_addr[11] = 0xff;
+       memcpy(&v6->sin6_addr.s6_addr[12], &v4.sin_addr.s_addr, 4);
+}
+
+static int udp_recv_send(int server_fd)
+{
+       char cmsg_buf[CMSG_SPACE(sizeof(struct sockaddr_storage))];
+       struct sockaddr_storage _src_addr = { 0 };
+       struct sockaddr_storage *src_addr = &_src_addr;
+       struct sockaddr_storage *dst_addr = NULL;
+       struct msghdr msg = { 0 };
+       struct iovec iov = { 0 };
+       struct cmsghdr *cm;
+       char buf[1];
+       int ret, fd;
+       ssize_t n;
+
+       iov.iov_base = buf;
+       iov.iov_len = sizeof(buf);
+
+       msg.msg_name = src_addr;
+       msg.msg_namelen = sizeof(*src_addr);
+       msg.msg_iov = &iov;
+       msg.msg_iovlen = 1;
+       msg.msg_control = cmsg_buf;
+       msg.msg_controllen = sizeof(cmsg_buf);
+
+       errno = 0;
+       n = recvmsg(server_fd, &msg, 0);
+       if (CHECK(n <= 0, "recvmsg", "failed\n")) {
+               log_err("failed to receive");
+               return -1;
+       }
+       if (CHECK(msg.msg_flags & MSG_CTRUNC, "recvmsg", "truncated cmsg\n"))
+               return -1;
+
+       for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) {
+               if ((cm->cmsg_level == SOL_IP &&
+                    cm->cmsg_type == IP_ORIGDSTADDR) ||
+                   (cm->cmsg_level == SOL_IPV6 &&
+                    cm->cmsg_type == IPV6_ORIGDSTADDR)) {
+                       dst_addr = (struct sockaddr_storage *)CMSG_DATA(cm);
+                       break;
+               }
+               log_err("warning: ignored cmsg at level %d type %d",
+                       cm->cmsg_level, cm->cmsg_type);
+       }
+       if (CHECK(!dst_addr, "recvmsg", "missing ORIGDSTADDR\n"))
+               return -1;
+
+       /* Server socket bound to IPv4-mapped IPv6 address */
+       if (src_addr->ss_family == AF_INET6 &&
+           dst_addr->ss_family == AF_INET) {
+               v4_to_v6(dst_addr);
+       }
+
+       /* Reply from original destination address. */
+       fd = socket(dst_addr->ss_family, SOCK_DGRAM, 0);
+       if (CHECK(fd < 0, "socket", "failed\n")) {
+               log_err("failed to create tx socket");
+               return -1;
+       }
+
+       ret = bind(fd, (struct sockaddr *)dst_addr, sizeof(*dst_addr));
+       if (CHECK(ret, "bind", "failed\n")) {
+               log_err("failed to bind tx socket");
+               goto out;
+       }
+
+       msg.msg_control = NULL;
+       msg.msg_controllen = 0;
+       n = sendmsg(fd, &msg, 0);
+       if (CHECK(n <= 0, "sendmsg", "failed\n")) {
+               log_err("failed to send echo reply");
+               ret = -1;
+               goto out;
+       }
+
+       ret = 0;
+out:
+       close(fd);
+       return ret;
+}
+
+static int tcp_echo_test(int client_fd, int server_fd)
+{
+       int err;
+
+       err = send_byte(client_fd);
+       if (err)
+               return -1;
+       err = tcp_recv_send(server_fd);
+       if (err)
+               return -1;
+       err = recv_byte(client_fd);
+       if (err)
+               return -1;
+
+       return 0;
+}
+
+static int udp_echo_test(int client_fd, int server_fd)
+{
+       int err;
+
+       err = send_byte(client_fd);
+       if (err)
+               return -1;
+       err = udp_recv_send(server_fd);
+       if (err)
+               return -1;
+       err = recv_byte(client_fd);
+       if (err)
+               return -1;
+
+       return 0;
+}
+
+static struct bpf_link *attach_lookup_prog(struct bpf_program *prog)
+{
+       struct bpf_link *link;
+       int net_fd;
+
+       net_fd = open("/proc/self/ns/net", O_RDONLY);
+       if (CHECK(net_fd < 0, "open", "failed\n")) {
+               log_err("failed to open /proc/self/ns/net");
+               return NULL;
+       }
+
+       link = bpf_program__attach_netns(prog, net_fd);
+       if (CHECK(IS_ERR(link), "bpf_program__attach_netns", "failed\n")) {
+               errno = -PTR_ERR(link);
+               log_err("failed to attach program '%s' to netns",
+                       bpf_program__name(prog));
+               link = NULL;
+       }
+
+       close(net_fd);
+       return link;
+}
+
+static int update_lookup_map(struct bpf_map *map, int index, int sock_fd)
+{
+       int err, map_fd;
+       uint64_t value;
+
+       map_fd = bpf_map__fd(map);
+       if (CHECK(map_fd < 0, "bpf_map__fd", "failed\n")) {
+               errno = -map_fd;
+               log_err("failed to get map FD");
+               return -1;
+       }
+
+       value = (uint64_t)sock_fd;
+       err = bpf_map_update_elem(map_fd, &index, &value, BPF_NOEXIST);
+       if (CHECK(err, "bpf_map_update_elem", "failed\n")) {
+               log_err("failed to update redir_map @ %d", index);
+               return -1;
+       }
+
+       return 0;
+}
+
+static __u32 link_info_prog_id(struct bpf_link *link)
+{
+       struct bpf_link_info info = {};
+       __u32 info_len = sizeof(info);
+       int link_fd, err;
+
+       link_fd = bpf_link__fd(link);
+       if (CHECK(link_fd < 0, "bpf_link__fd", "failed\n")) {
+               errno = -link_fd;
+               log_err("bpf_link__fd failed");
+               return 0;
+       }
+
+       err = bpf_obj_get_info_by_fd(link_fd, &info, &info_len);
+       if (CHECK(err, "bpf_obj_get_info_by_fd", "failed\n")) {
+               log_err("bpf_obj_get_info_by_fd");
+               return 0;
+       }
+       if (CHECK(info_len != sizeof(info), "bpf_obj_get_info_by_fd",
+                 "unexpected info len %u\n", info_len))
+               return 0;
+
+       return info.prog_id;
+}
+
+static void query_lookup_prog(struct test_sk_lookup *skel)
+{
+       struct bpf_link *link[3] = {};
+       __u32 attach_flags = 0;
+       __u32 prog_ids[3] = {};
+       __u32 prog_cnt = 3;
+       __u32 prog_id;
+       int net_fd;
+       int err;
+
+       net_fd = open("/proc/self/ns/net", O_RDONLY);
+       if (CHECK(net_fd < 0, "open", "failed\n")) {
+               log_err("failed to open /proc/self/ns/net");
+               return;
+       }
+
+       link[0] = attach_lookup_prog(skel->progs.lookup_pass);
+       if (!link[0])
+               goto close;
+       link[1] = attach_lookup_prog(skel->progs.lookup_pass);
+       if (!link[1])
+               goto detach;
+       link[2] = attach_lookup_prog(skel->progs.lookup_drop);
+       if (!link[2])
+               goto detach;
+
+       err = bpf_prog_query(net_fd, BPF_SK_LOOKUP, 0 /* query flags */,
+                            &attach_flags, prog_ids, &prog_cnt);
+       if (CHECK(err, "bpf_prog_query", "failed\n")) {
+               log_err("failed to query lookup prog");
+               goto detach;
+       }
+
+       errno = 0;
+       if (CHECK(attach_flags != 0, "bpf_prog_query",
+                 "wrong attach_flags on query: %u", attach_flags))
+               goto detach;
+       if (CHECK(prog_cnt != 3, "bpf_prog_query",
+                 "wrong program count on query: %u", prog_cnt))
+               goto detach;
+       prog_id = link_info_prog_id(link[0]);
+       CHECK(prog_ids[0] != prog_id, "bpf_prog_query",
+             "invalid program #0 id on query: %u != %u\n",
+             prog_ids[0], prog_id);
+       prog_id = link_info_prog_id(link[1]);
+       CHECK(prog_ids[1] != prog_id, "bpf_prog_query",
+             "invalid program #1 id on query: %u != %u\n",
+             prog_ids[1], prog_id);
+       prog_id = link_info_prog_id(link[2]);
+       CHECK(prog_ids[2] != prog_id, "bpf_prog_query",
+             "invalid program #2 id on query: %u != %u\n",
+             prog_ids[2], prog_id);
+
+detach:
+       if (link[2])
+               bpf_link__destroy(link[2]);
+       if (link[1])
+               bpf_link__destroy(link[1]);
+       if (link[0])
+               bpf_link__destroy(link[0]);
+close:
+       close(net_fd);
+}
+
+static void run_lookup_prog(const struct test *t)
+{
+       int client_fd, server_fds[MAX_SERVERS] = { -1 };
+       struct bpf_link *lookup_link;
+       int i, err;
+
+       lookup_link = attach_lookup_prog(t->lookup_prog);
+       if (!lookup_link)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(server_fds); i++) {
+               server_fds[i] = make_server(t->sotype, t->listen_at.ip,
+                                           t->listen_at.port,
+                                           t->reuseport_prog);
+               if (server_fds[i] < 0)
+                       goto close;
+
+               err = update_lookup_map(t->sock_map, i, server_fds[i]);
+               if (err)
+                       goto close;
+
+               /* want just one server for non-reuseport test */
+               if (!t->reuseport_prog)
+                       break;
+       }
+
+       client_fd = make_client(t->sotype, t->connect_to.ip, t->connect_to.port);
+       if (client_fd < 0)
+               goto close;
+
+       if (t->sotype == SOCK_STREAM)
+               tcp_echo_test(client_fd, server_fds[t->accept_on]);
+       else
+               udp_echo_test(client_fd, server_fds[t->accept_on]);
+
+       close(client_fd);
+close:
+       for (i = 0; i < ARRAY_SIZE(server_fds); i++) {
+               if (server_fds[i] != -1)
+                       close(server_fds[i]);
+       }
+       bpf_link__destroy(lookup_link);
+}
+
+static void test_redirect_lookup(struct test_sk_lookup *skel)
+{
+       const struct test tests[] = {
+               {
+                       .desc           = "TCP IPv4 redir port",
+                       .lookup_prog    = skel->progs.redir_port,
+                       .sock_map       = skel->maps.redir_map,
+                       .sotype         = SOCK_STREAM,
+                       .connect_to     = { EXT_IP4, EXT_PORT },
+                       .listen_at      = { EXT_IP4, INT_PORT },
+               },
+               {
+                       .desc           = "TCP IPv4 redir addr",
+                       .lookup_prog    = skel->progs.redir_ip4,
+                       .sock_map       = skel->maps.redir_map,
+                       .sotype         = SOCK_STREAM,
+                       .connect_to     = { EXT_IP4, EXT_PORT },
+                       .listen_at      = { INT_IP4, EXT_PORT },
+               },
+               {
+                       .desc           = "TCP IPv4 redir with reuseport",
+                       .lookup_prog    = skel->progs.select_sock_a,
+                       .reuseport_prog = skel->progs.select_sock_b,
+                       .sock_map       = skel->maps.redir_map,
+                       .sotype         = SOCK_STREAM,
+                       .connect_to     = { EXT_IP4, EXT_PORT },
+                       .listen_at      = { INT_IP4, INT_PORT },
+                       .accept_on      = SERVER_B,
+               },
+               {
+                       .desc           = "TCP IPv4 redir skip reuseport",
+                       .lookup_prog    = skel->progs.select_sock_a_no_reuseport,
+                       .reuseport_prog = skel->progs.select_sock_b,
+                       .sock_map       = skel->maps.redir_map,
+                       .sotype         = SOCK_STREAM,
+                       .connect_to     = { EXT_IP4, EXT_PORT },
+                       .listen_at      = { INT_IP4, INT_PORT },
+                       .accept_on      = SERVER_A,
+               },
+               {
+                       .desc           = "TCP IPv6 redir port",
+                       .lookup_prog    = skel->progs.redir_port,
+                       .sock_map       = skel->maps.redir_map,
+                       .sotype         = SOCK_STREAM,
+                       .connect_to     = { EXT_IP6, EXT_PORT },
+                       .listen_at      = { EXT_IP6, INT_PORT },
+               },
+               {
+                       .desc           = "TCP IPv6 redir addr",
+                       .lookup_prog    = skel->progs.redir_ip6,
+                       .sock_map       = skel->maps.redir_map,
+                       .sotype         = SOCK_STREAM,
+                       .connect_to     = { EXT_IP6, EXT_PORT },
+                       .listen_at      = { INT_IP6, EXT_PORT },
+               },
+               {
+                       .desc           = "TCP IPv4->IPv6 redir port",
+                       .lookup_prog    = skel->progs.redir_port,
+                       .sock_map       = skel->maps.redir_map,
+                       .sotype         = SOCK_STREAM,
+                       .connect_to     = { EXT_IP4, EXT_PORT },
+                       .listen_at      = { INT_IP4_V6, INT_PORT },
+               },
+               {
+                       .desc           = "TCP IPv6 redir with reuseport",
+                       .lookup_prog    = skel->progs.select_sock_a,
+                       .reuseport_prog = skel->progs.select_sock_b,
+                       .sock_map       = skel->maps.redir_map,
+                       .sotype         = SOCK_STREAM,
+                       .connect_to     = { EXT_IP6, EXT_PORT },
+                       .listen_at      = { INT_IP6, INT_PORT },
+                       .accept_on      = SERVER_B,
+               },
+               {
+                       .desc           = "TCP IPv6 redir skip reuseport",
+                       .lookup_prog    = skel->progs.select_sock_a_no_reuseport,
+                       .reuseport_prog = skel->progs.select_sock_b,
+                       .sock_map       = skel->maps.redir_map,
+                       .sotype         = SOCK_STREAM,
+                       .connect_to     = { EXT_IP6, EXT_PORT },
+                       .listen_at      = { INT_IP6, INT_PORT },
+                       .accept_on      = SERVER_A,
+               },
+               {
+                       .desc           = "UDP IPv4 redir port",
+                       .lookup_prog    = skel->progs.redir_port,
+                       .sock_map       = skel->maps.redir_map,
+                       .sotype         = SOCK_DGRAM,
+                       .connect_to     = { EXT_IP4, EXT_PORT },
+                       .listen_at      = { EXT_IP4, INT_PORT },
+               },
+               {
+                       .desc           = "UDP IPv4 redir addr",
+                       .lookup_prog    = skel->progs.redir_ip4,
+                       .sock_map       = skel->maps.redir_map,
+                       .sotype         = SOCK_DGRAM,
+                       .connect_to     = { EXT_IP4, EXT_PORT },
+                       .listen_at      = { INT_IP4, EXT_PORT },
+               },
+               {
+                       .desc           = "UDP IPv4 redir with reuseport",
+                       .lookup_prog    = skel->progs.select_sock_a,
+                       .reuseport_prog = skel->progs.select_sock_b,
+                       .sock_map       = skel->maps.redir_map,
+                       .sotype         = SOCK_DGRAM,
+                       .connect_to     = { EXT_IP4, EXT_PORT },
+                       .listen_at      = { INT_IP4, INT_PORT },
+                       .accept_on      = SERVER_B,
+               },
+               {
+                       .desc           = "UDP IPv4 redir skip reuseport",
+                       .lookup_prog    = skel->progs.select_sock_a_no_reuseport,
+                       .reuseport_prog = skel->progs.select_sock_b,
+                       .sock_map       = skel->maps.redir_map,
+                       .sotype         = SOCK_DGRAM,
+                       .connect_to     = { EXT_IP4, EXT_PORT },
+                       .listen_at      = { INT_IP4, INT_PORT },
+                       .accept_on      = SERVER_A,
+               },
+               {
+                       .desc           = "UDP IPv6 redir port",
+                       .lookup_prog    = skel->progs.redir_port,
+                       .sock_map       = skel->maps.redir_map,
+                       .sotype         = SOCK_DGRAM,
+                       .connect_to     = { EXT_IP6, EXT_PORT },
+                       .listen_at      = { EXT_IP6, INT_PORT },
+               },
+               {
+                       .desc           = "UDP IPv6 redir addr",
+                       .lookup_prog    = skel->progs.redir_ip6,
+                       .sock_map       = skel->maps.redir_map,
+                       .sotype         = SOCK_DGRAM,
+                       .connect_to     = { EXT_IP6, EXT_PORT },
+                       .listen_at      = { INT_IP6, EXT_PORT },
+               },
+               {
+                       .desc           = "UDP IPv4->IPv6 redir port",
+                       .lookup_prog    = skel->progs.redir_port,
+                       .sock_map       = skel->maps.redir_map,
+                       .sotype         = SOCK_DGRAM,
+                       .listen_at      = { INT_IP4_V6, INT_PORT },
+                       .connect_to     = { EXT_IP4, EXT_PORT },
+               },
+               {
+                       .desc           = "UDP IPv6 redir and reuseport",
+                       .lookup_prog    = skel->progs.select_sock_a,
+                       .reuseport_prog = skel->progs.select_sock_b,
+                       .sock_map       = skel->maps.redir_map,
+                       .sotype         = SOCK_DGRAM,
+                       .connect_to     = { EXT_IP6, EXT_PORT },
+                       .listen_at      = { INT_IP6, INT_PORT },
+                       .accept_on      = SERVER_B,
+               },
+               {
+                       .desc           = "UDP IPv6 redir skip reuseport",
+                       .lookup_prog    = skel->progs.select_sock_a_no_reuseport,
+                       .reuseport_prog = skel->progs.select_sock_b,
+                       .sock_map       = skel->maps.redir_map,
+                       .sotype         = SOCK_DGRAM,
+                       .connect_to     = { EXT_IP6, EXT_PORT },
+                       .listen_at      = { INT_IP6, INT_PORT },
+                       .accept_on      = SERVER_A,
+               },
+       };
+       const struct test *t;
+
+       for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
+               if (test__start_subtest(t->desc))
+                       run_lookup_prog(t);
+       }
+}
+
+static void drop_on_lookup(const struct test *t)
+{
+       struct sockaddr_storage dst = {};
+       int client_fd, server_fd, err;
+       struct bpf_link *lookup_link;
+       ssize_t n;
+
+       lookup_link = attach_lookup_prog(t->lookup_prog);
+       if (!lookup_link)
+               return;
+
+       server_fd = make_server(t->sotype, t->listen_at.ip, t->listen_at.port,
+                               t->reuseport_prog);
+       if (server_fd < 0)
+               goto detach;
+
+       client_fd = make_socket(t->sotype, t->connect_to.ip,
+                               t->connect_to.port, &dst);
+       if (client_fd < 0)
+               goto close_srv;
+
+       err = connect(client_fd, (void *)&dst, inetaddr_len(&dst));
+       if (t->sotype == SOCK_DGRAM) {
+               err = send_byte(client_fd);
+               if (err)
+                       goto close_all;
+
+               /* Read out asynchronous error */
+               n = recv(client_fd, NULL, 0, 0);
+               err = n == -1;
+       }
+       if (CHECK(!err || errno != ECONNREFUSED, "connect",
+                 "unexpected success or error\n"))
+               log_err("expected ECONNREFUSED on connect");
+
+close_all:
+       close(client_fd);
+close_srv:
+       close(server_fd);
+detach:
+       bpf_link__destroy(lookup_link);
+}
+
+static void test_drop_on_lookup(struct test_sk_lookup *skel)
+{
+       const struct test tests[] = {
+               {
+                       .desc           = "TCP IPv4 drop on lookup",
+                       .lookup_prog    = skel->progs.lookup_drop,
+                       .sotype         = SOCK_STREAM,
+                       .connect_to     = { EXT_IP4, EXT_PORT },
+                       .listen_at      = { EXT_IP4, EXT_PORT },
+               },
+               {
+                       .desc           = "TCP IPv6 drop on lookup",
+                       .lookup_prog    = skel->progs.lookup_drop,
+                       .sotype         = SOCK_STREAM,
+                       .connect_to     = { EXT_IP6, EXT_PORT },
+                       .listen_at      = { EXT_IP6, EXT_PORT },
+               },
+               {
+                       .desc           = "UDP IPv4 drop on lookup",
+                       .lookup_prog    = skel->progs.lookup_drop,
+                       .sotype         = SOCK_DGRAM,
+                       .connect_to     = { EXT_IP4, EXT_PORT },
+                       .listen_at      = { EXT_IP4, EXT_PORT },
+               },
+               {
+                       .desc           = "UDP IPv6 drop on lookup",
+                       .lookup_prog    = skel->progs.lookup_drop,
+                       .sotype         = SOCK_DGRAM,
+                       .connect_to     = { EXT_IP6, EXT_PORT },
+                       .listen_at      = { EXT_IP6, INT_PORT },
+               },
+       };
+       const struct test *t;
+
+       for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
+               if (test__start_subtest(t->desc))
+                       drop_on_lookup(t);
+       }
+}
+
+static void drop_on_reuseport(const struct test *t)
+{
+       struct sockaddr_storage dst = { 0 };
+       int client, server1, server2, err;
+       struct bpf_link *lookup_link;
+       ssize_t n;
+
+       lookup_link = attach_lookup_prog(t->lookup_prog);
+       if (!lookup_link)
+               return;
+
+       server1 = make_server(t->sotype, t->listen_at.ip, t->listen_at.port,
+                             t->reuseport_prog);
+       if (server1 < 0)
+               goto detach;
+
+       err = update_lookup_map(t->sock_map, SERVER_A, server1);
+       if (err)
+               goto detach;
+
+       /* second server on destination address we should never reach */
+       server2 = make_server(t->sotype, t->connect_to.ip, t->connect_to.port,
+                             NULL /* reuseport prog */);
+       if (server2 < 0)
+               goto close_srv1;
+
+       client = make_socket(t->sotype, t->connect_to.ip,
+                            t->connect_to.port, &dst);
+       if (client < 0)
+               goto close_srv2;
+
+       err = connect(client, (void *)&dst, inetaddr_len(&dst));
+       if (t->sotype == SOCK_DGRAM) {
+               err = send_byte(client);
+               if (err)
+                       goto close_all;
+
+               /* Read out asynchronous error */
+               n = recv(client, NULL, 0, 0);
+               err = n == -1;
+       }
+       if (CHECK(!err || errno != ECONNREFUSED, "connect",
+                 "unexpected success or error\n"))
+               log_err("expected ECONNREFUSED on connect");
+
+close_all:
+       close(client);
+close_srv2:
+       close(server2);
+close_srv1:
+       close(server1);
+detach:
+       bpf_link__destroy(lookup_link);
+}
+
+static void test_drop_on_reuseport(struct test_sk_lookup *skel)
+{
+       const struct test tests[] = {
+               {
+                       .desc           = "TCP IPv4 drop on reuseport",
+                       .lookup_prog    = skel->progs.select_sock_a,
+                       .reuseport_prog = skel->progs.reuseport_drop,
+                       .sock_map       = skel->maps.redir_map,
+                       .sotype         = SOCK_STREAM,
+                       .connect_to     = { EXT_IP4, EXT_PORT },
+                       .listen_at      = { INT_IP4, INT_PORT },
+               },
+               {
+                       .desc           = "TCP IPv6 drop on reuseport",
+                       .lookup_prog    = skel->progs.select_sock_a,
+                       .reuseport_prog = skel->progs.reuseport_drop,
+                       .sock_map       = skel->maps.redir_map,
+                       .sotype         = SOCK_STREAM,
+                       .connect_to     = { EXT_IP6, EXT_PORT },
+                       .listen_at      = { INT_IP6, INT_PORT },
+               },
+               {
+                       .desc           = "UDP IPv4 drop on reuseport",
+                       .lookup_prog    = skel->progs.select_sock_a,
+                       .reuseport_prog = skel->progs.reuseport_drop,
+                       .sock_map       = skel->maps.redir_map,
+                       .sotype         = SOCK_DGRAM,
+                       .connect_to     = { EXT_IP4, EXT_PORT },
+                       .listen_at      = { INT_IP4, INT_PORT },
+               },
+               {
+                       .desc           = "TCP IPv6 drop on reuseport",
+                       .lookup_prog    = skel->progs.select_sock_a,
+                       .reuseport_prog = skel->progs.reuseport_drop,
+                       .sock_map       = skel->maps.redir_map,
+                       .sotype         = SOCK_STREAM,
+                       .connect_to     = { EXT_IP6, EXT_PORT },
+                       .listen_at      = { INT_IP6, INT_PORT },
+               },
+       };
+       const struct test *t;
+
+       for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
+               if (test__start_subtest(t->desc))
+                       drop_on_reuseport(t);
+       }
+}
+
+static void run_sk_assign(struct test_sk_lookup *skel,
+                         struct bpf_program *lookup_prog,
+                         const char *listen_ip, const char *connect_ip)
+{
+       int client_fd, peer_fd, server_fds[MAX_SERVERS] = { -1 };
+       struct bpf_link *lookup_link;
+       int i, err;
+
+       lookup_link = attach_lookup_prog(lookup_prog);
+       if (!lookup_link)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(server_fds); i++) {
+               server_fds[i] = make_server(SOCK_STREAM, listen_ip, 0, NULL);
+               if (server_fds[i] < 0)
+                       goto close_servers;
+
+               err = update_lookup_map(skel->maps.redir_map, i,
+                                       server_fds[i]);
+               if (err)
+                       goto close_servers;
+       }
+
+       client_fd = make_client(SOCK_STREAM, connect_ip, EXT_PORT);
+       if (client_fd < 0)
+               goto close_servers;
+
+       peer_fd = accept(server_fds[SERVER_B], NULL, NULL);
+       if (CHECK(peer_fd < 0, "accept", "failed\n"))
+               goto close_client;
+
+       close(peer_fd);
+close_client:
+       close(client_fd);
+close_servers:
+       for (i = 0; i < ARRAY_SIZE(server_fds); i++) {
+               if (server_fds[i] != -1)
+                       close(server_fds[i]);
+       }
+       bpf_link__destroy(lookup_link);
+}
+
+static void run_sk_assign_v4(struct test_sk_lookup *skel,
+                            struct bpf_program *lookup_prog)
+{
+       run_sk_assign(skel, lookup_prog, INT_IP4, EXT_IP4);
+}
+
+static void run_sk_assign_v6(struct test_sk_lookup *skel,
+                            struct bpf_program *lookup_prog)
+{
+       run_sk_assign(skel, lookup_prog, INT_IP6, EXT_IP6);
+}
+
+static void run_sk_assign_connected(struct test_sk_lookup *skel,
+                                   int sotype)
+{
+       int err, client_fd, connected_fd, server_fd;
+       struct bpf_link *lookup_link;
+
+       server_fd = make_server(sotype, EXT_IP4, EXT_PORT, NULL);
+       if (server_fd < 0)
+               return;
+
+       connected_fd = make_client(sotype, EXT_IP4, EXT_PORT);
+       if (connected_fd < 0)
+               goto out_close_server;
+
+       /* Put a connected socket in redirect map */
+       err = update_lookup_map(skel->maps.redir_map, SERVER_A, connected_fd);
+       if (err)
+               goto out_close_connected;
+
+       lookup_link = attach_lookup_prog(skel->progs.sk_assign_esocknosupport);
+       if (!lookup_link)
+               goto out_close_connected;
+
+       /* Try to redirect TCP SYN / UDP packet to a connected socket */
+       client_fd = make_client(sotype, EXT_IP4, EXT_PORT);
+       if (client_fd < 0)
+               goto out_unlink_prog;
+       if (sotype == SOCK_DGRAM) {
+               send_byte(client_fd);
+               recv_byte(server_fd);
+       }
+
+       close(client_fd);
+out_unlink_prog:
+       bpf_link__destroy(lookup_link);
+out_close_connected:
+       close(connected_fd);
+out_close_server:
+       close(server_fd);
+}
+
+static void test_sk_assign_helper(struct test_sk_lookup *skel)
+{
+       if (test__start_subtest("sk_assign returns EEXIST"))
+               run_sk_assign_v4(skel, skel->progs.sk_assign_eexist);
+       if (test__start_subtest("sk_assign honors F_REPLACE"))
+               run_sk_assign_v4(skel, skel->progs.sk_assign_replace_flag);
+       if (test__start_subtest("sk_assign accepts NULL socket"))
+               run_sk_assign_v4(skel, skel->progs.sk_assign_null);
+       if (test__start_subtest("access ctx->sk"))
+               run_sk_assign_v4(skel, skel->progs.access_ctx_sk);
+       if (test__start_subtest("narrow access to ctx v4"))
+               run_sk_assign_v4(skel, skel->progs.ctx_narrow_access);
+       if (test__start_subtest("narrow access to ctx v6"))
+               run_sk_assign_v6(skel, skel->progs.ctx_narrow_access);
+       if (test__start_subtest("sk_assign rejects TCP established"))
+               run_sk_assign_connected(skel, SOCK_STREAM);
+       if (test__start_subtest("sk_assign rejects UDP connected"))
+               run_sk_assign_connected(skel, SOCK_DGRAM);
+}
+
+struct test_multi_prog {
+       const char *desc;
+       struct bpf_program *prog1;
+       struct bpf_program *prog2;
+       struct bpf_map *redir_map;
+       struct bpf_map *run_map;
+       int expect_errno;
+       struct inet_addr listen_at;
+};
+
+static void run_multi_prog_lookup(const struct test_multi_prog *t)
+{
+       struct sockaddr_storage dst = {};
+       int map_fd, server_fd, client_fd;
+       struct bpf_link *link1, *link2;
+       int prog_idx, done, err;
+
+       map_fd = bpf_map__fd(t->run_map);
+
+       done = 0;
+       prog_idx = PROG1;
+       err = bpf_map_update_elem(map_fd, &prog_idx, &done, BPF_ANY);
+       if (CHECK(err, "bpf_map_update_elem", "failed\n"))
+               return;
+       prog_idx = PROG2;
+       err = bpf_map_update_elem(map_fd, &prog_idx, &done, BPF_ANY);
+       if (CHECK(err, "bpf_map_update_elem", "failed\n"))
+               return;
+
+       link1 = attach_lookup_prog(t->prog1);
+       if (!link1)
+               return;
+       link2 = attach_lookup_prog(t->prog2);
+       if (!link2)
+               goto out_unlink1;
+
+       server_fd = make_server(SOCK_STREAM, t->listen_at.ip,
+                               t->listen_at.port, NULL);
+       if (server_fd < 0)
+               goto out_unlink2;
+
+       err = update_lookup_map(t->redir_map, SERVER_A, server_fd);
+       if (err)
+               goto out_close_server;
+
+       client_fd = make_socket(SOCK_STREAM, EXT_IP4, EXT_PORT, &dst);
+       if (client_fd < 0)
+               goto out_close_server;
+
+       err = connect(client_fd, (void *)&dst, inetaddr_len(&dst));
+       if (CHECK(err && !t->expect_errno, "connect",
+                 "unexpected error %d\n", errno))
+               goto out_close_client;
+       if (CHECK(err && t->expect_errno && errno != t->expect_errno,
+                 "connect", "unexpected error %d\n", errno))
+               goto out_close_client;
+
+       done = 0;
+       prog_idx = PROG1;
+       err = bpf_map_lookup_elem(map_fd, &prog_idx, &done);
+       CHECK(err, "bpf_map_lookup_elem", "failed\n");
+       CHECK(!done, "bpf_map_lookup_elem", "PROG1 !done\n");
+
+       done = 0;
+       prog_idx = PROG2;
+       err = bpf_map_lookup_elem(map_fd, &prog_idx, &done);
+       CHECK(err, "bpf_map_lookup_elem", "failed\n");
+       CHECK(!done, "bpf_map_lookup_elem", "PROG2 !done\n");
+
+out_close_client:
+       close(client_fd);
+out_close_server:
+       close(server_fd);
+out_unlink2:
+       bpf_link__destroy(link2);
+out_unlink1:
+       bpf_link__destroy(link1);
+}
+
+static void test_multi_prog_lookup(struct test_sk_lookup *skel)
+{
+       struct test_multi_prog tests[] = {
+               {
+                       .desc           = "multi prog - pass, pass",
+                       .prog1          = skel->progs.multi_prog_pass1,
+                       .prog2          = skel->progs.multi_prog_pass2,
+                       .listen_at      = { EXT_IP4, EXT_PORT },
+               },
+               {
+                       .desc           = "multi prog - drop, drop",
+                       .prog1          = skel->progs.multi_prog_drop1,
+                       .prog2          = skel->progs.multi_prog_drop2,
+                       .listen_at      = { EXT_IP4, EXT_PORT },
+                       .expect_errno   = ECONNREFUSED,
+               },
+               {
+                       .desc           = "multi prog - pass, drop",
+                       .prog1          = skel->progs.multi_prog_pass1,
+                       .prog2          = skel->progs.multi_prog_drop2,
+                       .listen_at      = { EXT_IP4, EXT_PORT },
+                       .expect_errno   = ECONNREFUSED,
+               },
+               {
+                       .desc           = "multi prog - drop, pass",
+                       .prog1          = skel->progs.multi_prog_drop1,
+                       .prog2          = skel->progs.multi_prog_pass2,
+                       .listen_at      = { EXT_IP4, EXT_PORT },
+                       .expect_errno   = ECONNREFUSED,
+               },
+               {
+                       .desc           = "multi prog - pass, redir",
+                       .prog1          = skel->progs.multi_prog_pass1,
+                       .prog2          = skel->progs.multi_prog_redir2,
+                       .listen_at      = { INT_IP4, INT_PORT },
+               },
+               {
+                       .desc           = "multi prog - redir, pass",
+                       .prog1          = skel->progs.multi_prog_redir1,
+                       .prog2          = skel->progs.multi_prog_pass2,
+                       .listen_at      = { INT_IP4, INT_PORT },
+               },
+               {
+                       .desc           = "multi prog - drop, redir",
+                       .prog1          = skel->progs.multi_prog_drop1,
+                       .prog2          = skel->progs.multi_prog_redir2,
+                       .listen_at      = { INT_IP4, INT_PORT },
+               },
+               {
+                       .desc           = "multi prog - redir, drop",
+                       .prog1          = skel->progs.multi_prog_redir1,
+                       .prog2          = skel->progs.multi_prog_drop2,
+                       .listen_at      = { INT_IP4, INT_PORT },
+               },
+               {
+                       .desc           = "multi prog - redir, redir",
+                       .prog1          = skel->progs.multi_prog_redir1,
+                       .prog2          = skel->progs.multi_prog_redir2,
+                       .listen_at      = { INT_IP4, INT_PORT },
+               },
+       };
+       struct test_multi_prog *t;
+
+       for (t = tests; t < tests + ARRAY_SIZE(tests); t++) {
+               t->redir_map = skel->maps.redir_map;
+               t->run_map = skel->maps.run_map;
+               if (test__start_subtest(t->desc))
+                       run_multi_prog_lookup(t);
+       }
+}
+
+static void run_tests(struct test_sk_lookup *skel)
+{
+       if (test__start_subtest("query lookup prog"))
+               query_lookup_prog(skel);
+       test_redirect_lookup(skel);
+       test_drop_on_lookup(skel);
+       test_drop_on_reuseport(skel);
+       test_sk_assign_helper(skel);
+       test_multi_prog_lookup(skel);
+}
+
+static int switch_netns(void)
+{
+       static const char * const setup_script[] = {
+               "ip -6 addr add dev lo " EXT_IP6 "/128 nodad",
+               "ip -6 addr add dev lo " INT_IP6 "/128 nodad",
+               "ip link set dev lo up",
+               NULL,
+       };
+       const char * const *cmd;
+       int err;
+
+       err = unshare(CLONE_NEWNET);
+       if (CHECK(err, "unshare", "failed\n")) {
+               log_err("unshare(CLONE_NEWNET)");
+               return -1;
+       }
+
+       for (cmd = setup_script; *cmd; cmd++) {
+               err = system(*cmd);
+               if (CHECK(err, "system", "failed\n")) {
+                       log_err("system(%s)", *cmd);
+                       return -1;
+               }
+       }
+
+       return 0;
+}
+
+void test_sk_lookup(void)
+{
+       struct test_sk_lookup *skel;
+       int err;
+
+       err = switch_netns();
+       if (err)
+               return;
+
+       skel = test_sk_lookup__open_and_load();
+       if (CHECK(!skel, "skel open_and_load", "failed\n"))
+               return;
+
+       run_tests(skel);
+
+       test_sk_lookup__destroy(skel);
+}
index fa153cf..fe87b77 100644 (file)
@@ -41,7 +41,7 @@ void test_skeleton(void)
        CHECK(bss->in4 != 0, "in4", "got %lld != exp %lld\n", bss->in4, 0LL);
        CHECK(bss->out4 != 0, "out4", "got %lld != exp %lld\n", bss->out4, 0LL);
 
-       CHECK(rodata->in6 != 0, "in6", "got %d != exp %d\n", rodata->in6, 0);
+       CHECK(rodata->in.in6 != 0, "in6", "got %d != exp %d\n", rodata->in.in6, 0);
        CHECK(bss->out6 != 0, "out6", "got %d != exp %d\n", bss->out6, 0);
 
        /* validate we can pre-setup global variables, even in .bss */
@@ -49,7 +49,7 @@ void test_skeleton(void)
        data->in2 = 11;
        bss->in3 = 12;
        bss->in4 = 13;
-       rodata->in6 = 14;
+       rodata->in.in6 = 14;
 
        err = test_skeleton__load(skel);
        if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err))
@@ -60,7 +60,7 @@ void test_skeleton(void)
        CHECK(data->in2 != 11, "in2", "got %lld != exp %lld\n", data->in2, 11LL);
        CHECK(bss->in3 != 12, "in3", "got %d != exp %d\n", bss->in3, 12);
        CHECK(bss->in4 != 13, "in4", "got %lld != exp %lld\n", bss->in4, 13LL);
-       CHECK(rodata->in6 != 14, "in6", "got %d != exp %d\n", rodata->in6, 14);
+       CHECK(rodata->in.in6 != 14, "in6", "got %d != exp %d\n", rodata->in.in6, 14);
 
        /* now set new values and attach to get them into outX variables */
        data->in1 = 1;
index 8547ecb..ec281b0 100644 (file)
@@ -193,11 +193,10 @@ static void run_test(int cgroup_fd)
        if (CHECK_FAIL(server_fd < 0))
                goto close_bpf_object;
 
+       pthread_mutex_lock(&server_started_mtx);
        if (CHECK_FAIL(pthread_create(&tid, NULL, server_thread,
                                      (void *)&server_fd)))
                goto close_server_fd;
-
-       pthread_mutex_lock(&server_started_mtx);
        pthread_cond_wait(&server_started, &server_started_mtx);
        pthread_mutex_unlock(&server_started_mtx);
 
index 2061a6b..5f54c6a 100644 (file)
@@ -13,6 +13,7 @@ static int getsetsockopt(void)
                char cc[16]; /* TCP_CA_NAME_MAX */
        } buf = {};
        socklen_t optlen;
+       char *big_buf = NULL;
 
        fd = socket(AF_INET, SOCK_STREAM, 0);
        if (fd < 0) {
@@ -22,24 +23,31 @@ static int getsetsockopt(void)
 
        /* IP_TOS - BPF bypass */
 
-       buf.u8[0] = 0x08;
-       err = setsockopt(fd, SOL_IP, IP_TOS, &buf, 1);
+       optlen = getpagesize() * 2;
+       big_buf = calloc(1, optlen);
+       if (!big_buf) {
+               log_err("Couldn't allocate two pages");
+               goto err;
+       }
+
+       *(int *)big_buf = 0x08;
+       err = setsockopt(fd, SOL_IP, IP_TOS, big_buf, optlen);
        if (err) {
                log_err("Failed to call setsockopt(IP_TOS)");
                goto err;
        }
 
-       buf.u8[0] = 0x00;
+       memset(big_buf, 0, optlen);
        optlen = 1;
-       err = getsockopt(fd, SOL_IP, IP_TOS, &buf, &optlen);
+       err = getsockopt(fd, SOL_IP, IP_TOS, big_buf, &optlen);
        if (err) {
                log_err("Failed to call getsockopt(IP_TOS)");
                goto err;
        }
 
-       if (buf.u8[0] != 0x08) {
-               log_err("Unexpected getsockopt(IP_TOS) buf[0] 0x%02x != 0x08",
-                       buf.u8[0]);
+       if (*(int *)big_buf != 0x08) {
+               log_err("Unexpected getsockopt(IP_TOS) optval 0x%x != 0x08",
+                       *(int *)big_buf);
                goto err;
        }
 
@@ -78,6 +86,28 @@ static int getsetsockopt(void)
                goto err;
        }
 
+       /* IP_FREEBIND - BPF can't access optval past PAGE_SIZE */
+
+       optlen = getpagesize() * 2;
+       memset(big_buf, 0, optlen);
+
+       err = setsockopt(fd, SOL_IP, IP_FREEBIND, big_buf, optlen);
+       if (err != 0) {
+               log_err("Failed to call setsockopt, ret=%d", err);
+               goto err;
+       }
+
+       err = getsockopt(fd, SOL_IP, IP_FREEBIND, big_buf, &optlen);
+       if (err != 0) {
+               log_err("Failed to call getsockopt, ret=%d", err);
+               goto err;
+       }
+
+       if (optlen != 1 || *(__u8 *)big_buf != 0x55) {
+               log_err("Unexpected IP_FREEBIND getsockopt, optlen=%d, optval=0x%x",
+                       optlen, *(__u8 *)big_buf);
+       }
+
        /* SO_SNDBUF is overwritten */
 
        buf.u32 = 0x01010101;
@@ -124,9 +154,11 @@ static int getsetsockopt(void)
                goto err;
        }
 
+       free(big_buf);
        close(fd);
        return 0;
 err:
+       free(big_buf);
        close(fd);
        return -1;
 }
index 9013a0c..d207e96 100644 (file)
@@ -118,7 +118,7 @@ static int run_test(int cgroup_fd, int server_fd)
                goto close_bpf_object;
        }
 
-       client_fd = connect_to_fd(AF_INET, SOCK_STREAM, server_fd);
+       client_fd = connect_to_fd(server_fd, 0);
        if (client_fd < 0) {
                err = -1;
                goto close_bpf_object;
@@ -161,7 +161,7 @@ void test_tcp_rtt(void)
        if (CHECK_FAIL(cgroup_fd < 0))
                return;
 
-       server_fd = start_server(AF_INET, SOCK_STREAM);
+       server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0);
        if (CHECK_FAIL(server_fd < 0))
                goto close_cgroup_fd;
 
diff --git a/tools/testing/selftests/bpf/prog_tests/trace_printk.c b/tools/testing/selftests/bpf/prog_tests/trace_printk.c
new file mode 100644 (file)
index 0000000..39b0dec
--- /dev/null
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020, Oracle and/or its affiliates. */
+
+#include <test_progs.h>
+
+#include "trace_printk.skel.h"
+
+#define TRACEBUF       "/sys/kernel/debug/tracing/trace_pipe"
+#define SEARCHMSG      "testing,testing"
+
+void test_trace_printk(void)
+{
+       int err, iter = 0, duration = 0, found = 0;
+       struct trace_printk__bss *bss;
+       struct trace_printk *skel;
+       char *buf = NULL;
+       FILE *fp = NULL;
+       size_t buflen;
+
+       skel = trace_printk__open();
+       if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+               return;
+
+       err = trace_printk__load(skel);
+       if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err))
+               goto cleanup;
+
+       bss = skel->bss;
+
+       err = trace_printk__attach(skel);
+       if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
+               goto cleanup;
+
+       fp = fopen(TRACEBUF, "r");
+       if (CHECK(fp == NULL, "could not open trace buffer",
+                 "error %d opening %s", errno, TRACEBUF))
+               goto cleanup;
+
+       /* We do not want to wait forever if this test fails... */
+       fcntl(fileno(fp), F_SETFL, O_NONBLOCK);
+
+       /* wait for tracepoint to trigger */
+       usleep(1);
+       trace_printk__detach(skel);
+
+       if (CHECK(bss->trace_printk_ran == 0,
+                 "bpf_trace_printk never ran",
+                 "ran == %d", bss->trace_printk_ran))
+               goto cleanup;
+
+       if (CHECK(bss->trace_printk_ret <= 0,
+                 "bpf_trace_printk returned <= 0 value",
+                 "got %d", bss->trace_printk_ret))
+               goto cleanup;
+
+       /* verify our search string is in the trace buffer */
+       while (getline(&buf, &buflen, fp) >= 0 || errno == EAGAIN) {
+               if (strstr(buf, SEARCHMSG) != NULL)
+                       found++;
+               if (found == bss->trace_printk_ran)
+                       break;
+               if (++iter > 1000)
+                       break;
+       }
+
+       if (CHECK(!found, "message from bpf_trace_printk not found",
+                 "no instance of %s in %s", SEARCHMSG, TRACEBUF))
+               goto cleanup;
+
+cleanup:
+       trace_printk__destroy(skel);
+       free(buf);
+       if (fp)
+               fclose(fp);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/udp_limit.c b/tools/testing/selftests/bpf/prog_tests/udp_limit.c
new file mode 100644 (file)
index 0000000..2aba09d
--- /dev/null
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "udp_limit.skel.h"
+
+#include <sys/types.h>
+#include <sys/socket.h>
+
+static int duration;
+
+void test_udp_limit(void)
+{
+       struct udp_limit *skel;
+       int fd1 = -1, fd2 = -1;
+       int cgroup_fd;
+
+       cgroup_fd = test__join_cgroup("/udp_limit");
+       if (CHECK(cgroup_fd < 0, "cg-join", "errno %d", errno))
+               return;
+
+       skel = udp_limit__open_and_load();
+       if (CHECK(!skel, "skel-load", "errno %d", errno))
+               goto close_cgroup_fd;
+
+       skel->links.sock = bpf_program__attach_cgroup(skel->progs.sock, cgroup_fd);
+       skel->links.sock_release = bpf_program__attach_cgroup(skel->progs.sock_release, cgroup_fd);
+       if (CHECK(IS_ERR(skel->links.sock) || IS_ERR(skel->links.sock_release),
+                 "cg-attach", "sock %ld sock_release %ld",
+                 PTR_ERR(skel->links.sock),
+                 PTR_ERR(skel->links.sock_release)))
+               goto close_skeleton;
+
+       /* BPF program enforces a single UDP socket per cgroup,
+        * verify that.
+        */
+       fd1 = socket(AF_INET, SOCK_DGRAM, 0);
+       if (CHECK(fd1 < 0, "fd1", "errno %d", errno))
+               goto close_skeleton;
+
+       fd2 = socket(AF_INET, SOCK_DGRAM, 0);
+       if (CHECK(fd2 >= 0, "fd2", "errno %d", errno))
+               goto close_skeleton;
+
+       /* We can reopen again after close. */
+       close(fd1);
+       fd1 = -1;
+
+       fd1 = socket(AF_INET, SOCK_DGRAM, 0);
+       if (CHECK(fd1 < 0, "fd1-again", "errno %d", errno))
+               goto close_skeleton;
+
+       /* Make sure the program was invoked the expected
+        * number of times:
+        * - open fd1           - BPF_CGROUP_INET_SOCK_CREATE
+        * - attempt to openfd2 - BPF_CGROUP_INET_SOCK_CREATE
+        * - close fd1          - BPF_CGROUP_INET_SOCK_RELEASE
+        * - open fd1 again     - BPF_CGROUP_INET_SOCK_CREATE
+        */
+       if (CHECK(skel->bss->invocations != 4, "bss-invocations",
+                 "invocations=%d", skel->bss->invocations))
+               goto close_skeleton;
+
+       /* We should still have a single socket in use */
+       if (CHECK(skel->bss->in_use != 1, "bss-in_use",
+                 "in_use=%d", skel->bss->in_use))
+               goto close_skeleton;
+
+close_skeleton:
+       if (fd1 >= 0)
+               close(fd1);
+       if (fd2 >= 0)
+               close(fd2);
+       udp_limit__destroy(skel);
+close_cgroup_fd:
+       close(cgroup_fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/varlen.c b/tools/testing/selftests/bpf/prog_tests/varlen.c
new file mode 100644 (file)
index 0000000..c75525e
--- /dev/null
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include <test_progs.h>
+#include <time.h>
+#include "test_varlen.skel.h"
+
+#define CHECK_VAL(got, exp) \
+       CHECK((got) != (exp), "check", "got %ld != exp %ld\n", \
+             (long)(got), (long)(exp))
+
+void test_varlen(void)
+{
+       int duration = 0, err;
+       struct test_varlen* skel;
+       struct test_varlen__bss *bss;
+       struct test_varlen__data *data;
+       const char str1[] = "Hello, ";
+       const char str2[] = "World!";
+       const char exp_str[] = "Hello, \0World!\0";
+       const int size1 = sizeof(str1);
+       const int size2 = sizeof(str2);
+
+       skel = test_varlen__open_and_load();
+       if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+               return;
+       bss = skel->bss;
+       data = skel->data;
+
+       err = test_varlen__attach(skel);
+       if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
+               goto cleanup;
+
+       bss->test_pid = getpid();
+
+       /* trigger everything */
+       memcpy(bss->buf_in1, str1, size1);
+       memcpy(bss->buf_in2, str2, size2);
+       bss->capture = true;
+       usleep(1);
+       bss->capture = false;
+
+       CHECK_VAL(bss->payload1_len1, size1);
+       CHECK_VAL(bss->payload1_len2, size2);
+       CHECK_VAL(bss->total1, size1 + size2);
+       CHECK(memcmp(bss->payload1, exp_str, size1 + size2), "content_check",
+             "doesn't match!");
+
+       CHECK_VAL(data->payload2_len1, size1);
+       CHECK_VAL(data->payload2_len2, size2);
+       CHECK_VAL(data->total2, size1 + size2);
+       CHECK(memcmp(data->payload2, exp_str, size1 + size2), "content_check",
+             "doesn't match!");
+
+       CHECK_VAL(data->payload3_len1, size1);
+       CHECK_VAL(data->payload3_len2, size2);
+       CHECK_VAL(data->total3, size1 + size2);
+       CHECK(memcmp(data->payload3, exp_str, size1 + size2), "content_check",
+             "doesn't match!");
+
+       CHECK_VAL(data->payload4_len1, size1);
+       CHECK_VAL(data->payload4_len2, size2);
+       CHECK_VAL(data->total4, size1 + size2);
+       CHECK(memcmp(data->payload4, exp_str, size1 + size2), "content_check",
+             "doesn't match!");
+cleanup:
+       test_varlen__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c b/tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c
new file mode 100644 (file)
index 0000000..0176573
--- /dev/null
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <uapi/linux/bpf.h>
+#include <linux/if_link.h>
+#include <test_progs.h>
+
+#include "test_xdp_with_cpumap_helpers.skel.h"
+
+#define IFINDEX_LO     1
+
+void test_xdp_with_cpumap_helpers(void)
+{
+       struct test_xdp_with_cpumap_helpers *skel;
+       struct bpf_prog_info info = {};
+       struct bpf_cpumap_val val = {
+               .qsize = 192,
+       };
+       __u32 duration = 0, idx = 0;
+       __u32 len = sizeof(info);
+       int err, prog_fd, map_fd;
+
+       skel = test_xdp_with_cpumap_helpers__open_and_load();
+       if (CHECK_FAIL(!skel)) {
+               perror("test_xdp_with_cpumap_helpers__open_and_load");
+               return;
+       }
+
+       /* can not attach program with cpumaps that allow programs
+        * as xdp generic
+        */
+       prog_fd = bpf_program__fd(skel->progs.xdp_redir_prog);
+       err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE);
+       CHECK(err == 0, "Generic attach of program with 8-byte CPUMAP",
+             "should have failed\n");
+
+       prog_fd = bpf_program__fd(skel->progs.xdp_dummy_cm);
+       map_fd = bpf_map__fd(skel->maps.cpu_map);
+       err = bpf_obj_get_info_by_fd(prog_fd, &info, &len);
+       if (CHECK_FAIL(err))
+               goto out_close;
+
+       val.bpf_prog.fd = prog_fd;
+       err = bpf_map_update_elem(map_fd, &idx, &val, 0);
+       CHECK(err, "Add program to cpumap entry", "err %d errno %d\n",
+             err, errno);
+
+       err = bpf_map_lookup_elem(map_fd, &idx, &val);
+       CHECK(err, "Read cpumap entry", "err %d errno %d\n", err, errno);
+       CHECK(info.id != val.bpf_prog.id, "Expected program id in cpumap entry",
+             "expected %u read %u\n", info.id, val.bpf_prog.id);
+
+       /* can not attach BPF_XDP_CPUMAP program to a device */
+       err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd, XDP_FLAGS_SKB_MODE);
+       CHECK(err == 0, "Attach of BPF_XDP_CPUMAP program",
+             "should have failed\n");
+
+       val.qsize = 192;
+       val.bpf_prog.fd = bpf_program__fd(skel->progs.xdp_dummy_prog);
+       err = bpf_map_update_elem(map_fd, &idx, &val, 0);
+       CHECK(err == 0, "Add non-BPF_XDP_CPUMAP program to cpumap entry",
+             "should have failed\n");
+
+out_close:
+       test_xdp_with_cpumap_helpers__destroy(skel);
+}
+
+void test_xdp_cpumap_attach(void)
+{
+       if (test__start_subtest("cpumap_with_progs"))
+               test_xdp_with_cpumap_helpers();
+}
index 7897c8f..ef57408 100644 (file)
@@ -480,10 +480,9 @@ static __always_inline void hystart_update(struct sock *sk, __u32 delay)
 
        if (hystart_detect & HYSTART_DELAY) {
                /* obtain the minimum delay of more than sampling packets */
+               if (ca->curr_rtt > delay)
+                       ca->curr_rtt = delay;
                if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
-                       if (ca->curr_rtt > delay)
-                               ca->curr_rtt = delay;
-
                        ca->sample_cnt++;
                } else {
                        if (ca->curr_rtt > ca->delay_min +
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter.h b/tools/testing/selftests/bpf/progs/bpf_iter.h
new file mode 100644 (file)
index 0000000..17db3ba
--- /dev/null
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2020 Facebook */
+/* "undefine" structs in vmlinux.h, because we "override" them below */
+#define bpf_iter_meta bpf_iter_meta___not_used
+#define bpf_iter__bpf_map bpf_iter__bpf_map___not_used
+#define bpf_iter__ipv6_route bpf_iter__ipv6_route___not_used
+#define bpf_iter__netlink bpf_iter__netlink___not_used
+#define bpf_iter__task bpf_iter__task___not_used
+#define bpf_iter__task_file bpf_iter__task_file___not_used
+#define bpf_iter__tcp bpf_iter__tcp___not_used
+#define tcp6_sock tcp6_sock___not_used
+#define bpf_iter__udp bpf_iter__udp___not_used
+#define udp6_sock udp6_sock___not_used
+#include "vmlinux.h"
+#undef bpf_iter_meta
+#undef bpf_iter__bpf_map
+#undef bpf_iter__ipv6_route
+#undef bpf_iter__netlink
+#undef bpf_iter__task
+#undef bpf_iter__task_file
+#undef bpf_iter__tcp
+#undef tcp6_sock
+#undef bpf_iter__udp
+#undef udp6_sock
+
+struct bpf_iter_meta {
+       struct seq_file *seq;
+       __u64 session_id;
+       __u64 seq_num;
+} __attribute__((preserve_access_index));
+
+struct bpf_iter__ipv6_route {
+       struct bpf_iter_meta *meta;
+       struct fib6_info *rt;
+} __attribute__((preserve_access_index));
+
+struct bpf_iter__netlink {
+       struct bpf_iter_meta *meta;
+       struct netlink_sock *sk;
+} __attribute__((preserve_access_index));
+
+struct bpf_iter__task {
+       struct bpf_iter_meta *meta;
+       struct task_struct *task;
+} __attribute__((preserve_access_index));
+
+struct bpf_iter__task_file {
+       struct bpf_iter_meta *meta;
+       struct task_struct *task;
+       __u32 fd;
+       struct file *file;
+} __attribute__((preserve_access_index));
+
+struct bpf_iter__bpf_map {
+       struct bpf_iter_meta *meta;
+       struct bpf_map *map;
+} __attribute__((preserve_access_index));
+
+struct bpf_iter__tcp {
+       struct bpf_iter_meta *meta;
+       struct sock_common *sk_common;
+       uid_t uid;
+} __attribute__((preserve_access_index));
+
+struct tcp6_sock {
+       struct tcp_sock tcp;
+       struct ipv6_pinfo inet6;
+} __attribute__((preserve_access_index));
+
+struct bpf_iter__udp {
+       struct bpf_iter_meta *meta;
+       struct udp_sock *udp_sk;
+       uid_t uid __attribute__((aligned(8)));
+       int bucket __attribute__((aligned(8)));
+} __attribute__((preserve_access_index));
+
+struct udp6_sock {
+       struct udp_sock udp;
+       struct ipv6_pinfo inet6;
+} __attribute__((preserve_access_index));
index b57bd6f..08651b2 100644 (file)
@@ -1,27 +1,11 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Copyright (c) 2020 Facebook */
-/* "undefine" structs in vmlinux.h, because we "override" them below */
-#define bpf_iter_meta bpf_iter_meta___not_used
-#define bpf_iter__bpf_map bpf_iter__bpf_map___not_used
-#include "vmlinux.h"
-#undef bpf_iter_meta
-#undef bpf_iter__bpf_map
+#include "bpf_iter.h"
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
 
 char _license[] SEC("license") = "GPL";
 
-struct bpf_iter_meta {
-       struct seq_file *seq;
-       __u64 session_id;
-       __u64 seq_num;
-} __attribute__((preserve_access_index));
-
-struct bpf_iter__bpf_map {
-       struct bpf_iter_meta *meta;
-       struct bpf_map *map;
-} __attribute__((preserve_access_index));
-
 SEC("iter/bpf_map")
 int dump_bpf_map(struct bpf_iter__bpf_map *ctx)
 {
index c8e9ca7..d58d9f1 100644 (file)
@@ -1,35 +1,14 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Copyright (c) 2020 Facebook */
-/* "undefine" structs in vmlinux.h, because we "override" them below */
-#define bpf_iter_meta bpf_iter_meta___not_used
-#define bpf_iter__ipv6_route bpf_iter__ipv6_route___not_used
-#include "vmlinux.h"
-#undef bpf_iter_meta
-#undef bpf_iter__ipv6_route
+#include "bpf_iter.h"
+#include "bpf_tracing_net.h"
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
 
-struct bpf_iter_meta {
-       struct seq_file *seq;
-       __u64 session_id;
-       __u64 seq_num;
-} __attribute__((preserve_access_index));
-
-struct bpf_iter__ipv6_route {
-       struct bpf_iter_meta *meta;
-       struct fib6_info *rt;
-} __attribute__((preserve_access_index));
-
 char _license[] SEC("license") = "GPL";
 
 extern bool CONFIG_IPV6_SUBTREES __kconfig __weak;
 
-#define RTF_GATEWAY            0x0002
-#define IFNAMSIZ               16
-#define fib_nh_gw_family       nh_common.nhc_gw_family
-#define fib_nh_gw6             nh_common.nhc_gw.ipv6
-#define fib_nh_dev             nh_common.nhc_dev
-
 SEC("iter/ipv6_route")
 int dump_ipv6_route(struct bpf_iter__ipv6_route *ctx)
 {
index e7b8753..95989f4 100644 (file)
@@ -1,31 +1,13 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Copyright (c) 2020 Facebook */
-/* "undefine" structs in vmlinux.h, because we "override" them below */
-#define bpf_iter_meta bpf_iter_meta___not_used
-#define bpf_iter__netlink bpf_iter__netlink___not_used
-#include "vmlinux.h"
-#undef bpf_iter_meta
-#undef bpf_iter__netlink
+#include "bpf_iter.h"
+#include "bpf_tracing_net.h"
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
 
 char _license[] SEC("license") = "GPL";
 
-#define sk_rmem_alloc  sk_backlog.rmem_alloc
-#define sk_refcnt      __sk_common.skc_refcnt
-
-struct bpf_iter_meta {
-       struct seq_file *seq;
-       __u64 session_id;
-       __u64 seq_num;
-} __attribute__((preserve_access_index));
-
-struct bpf_iter__netlink {
-       struct bpf_iter_meta *meta;
-       struct netlink_sock *sk;
-} __attribute__((preserve_access_index));
-
-static inline struct inode *SOCK_INODE(struct socket *socket)
+static __attribute__((noinline)) struct inode *SOCK_INODE(struct socket *socket)
 {
        return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
 }
@@ -54,10 +36,10 @@ int dump_netlink(struct bpf_iter__netlink *ctx)
        if (!nlk->groups)  {
                group = 0;
        } else {
-               /* FIXME: temporary use bpf_probe_read here, needs
+               /* FIXME: temporary use bpf_probe_read_kernel here, needs
                 * verifier support to do direct access.
                 */
-               bpf_probe_read(&group, sizeof(group), &nlk->groups[0]);
+               bpf_probe_read_kernel(&group, sizeof(group), &nlk->groups[0]);
        }
        BPF_SEQ_PRINTF(seq, "%-10u %08x %-8d %-8d %-5d %-8d ",
                       nlk->portid, (u32)group,
@@ -74,7 +56,7 @@ int dump_netlink(struct bpf_iter__netlink *ctx)
                 * with current verifier.
                 */
                inode = SOCK_INODE(sk);
-               bpf_probe_read(&ino, sizeof(ino), &inode->i_ino);
+               bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino);
        }
        BPF_SEQ_PRINTF(seq, "%-8u %-8lu\n", s->sk_drops.counter, ino);
 
index ee75402..4983087 100644 (file)
@@ -1,27 +1,11 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Copyright (c) 2020 Facebook */
-/* "undefine" structs in vmlinux.h, because we "override" them below */
-#define bpf_iter_meta bpf_iter_meta___not_used
-#define bpf_iter__task bpf_iter__task___not_used
-#include "vmlinux.h"
-#undef bpf_iter_meta
-#undef bpf_iter__task
+#include "bpf_iter.h"
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
 
 char _license[] SEC("license") = "GPL";
 
-struct bpf_iter_meta {
-       struct seq_file *seq;
-       __u64 session_id;
-       __u64 seq_num;
-} __attribute__((preserve_access_index));
-
-struct bpf_iter__task {
-       struct bpf_iter_meta *meta;
-       struct task_struct *task;
-} __attribute__((preserve_access_index));
-
 SEC("iter/task")
 int dump_task(struct bpf_iter__task *ctx)
 {
index 0f0ec3d..8b787ba 100644 (file)
@@ -1,29 +1,11 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Copyright (c) 2020 Facebook */
-/* "undefine" structs in vmlinux.h, because we "override" them below */
-#define bpf_iter_meta bpf_iter_meta___not_used
-#define bpf_iter__task_file bpf_iter__task_file___not_used
-#include "vmlinux.h"
-#undef bpf_iter_meta
-#undef bpf_iter__task_file
+#include "bpf_iter.h"
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
 
 char _license[] SEC("license") = "GPL";
 
-struct bpf_iter_meta {
-       struct seq_file *seq;
-       __u64 session_id;
-       __u64 seq_num;
-} __attribute__((preserve_access_index));
-
-struct bpf_iter__task_file {
-       struct bpf_iter_meta *meta;
-       struct task_struct *task;
-       __u32 fd;
-       struct file *file;
-} __attribute__((preserve_access_index));
-
 SEC("iter/task_file")
 int dump_task_file(struct bpf_iter__task_file *ctx)
 {
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c
new file mode 100644 (file)
index 0000000..50e59a2
--- /dev/null
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#include "bpf_iter.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+#define MAX_STACK_TRACE_DEPTH   64
+unsigned long entries[MAX_STACK_TRACE_DEPTH] = {};
+#define SIZE_OF_ULONG (sizeof(unsigned long))
+
+SEC("iter/task")
+int dump_task_stack(struct bpf_iter__task *ctx)
+{
+       struct seq_file *seq = ctx->meta->seq;
+       struct task_struct *task = ctx->task;
+       long i, retlen;
+
+       if (task == (void *)0)
+               return 0;
+
+       retlen = bpf_get_task_stack(task, entries,
+                                   MAX_STACK_TRACE_DEPTH * SIZE_OF_ULONG, 0);
+       if (retlen < 0)
+               return 0;
+
+       BPF_SEQ_PRINTF(seq, "pid: %8u num_entries: %8u\n", task->pid,
+                      retlen / SIZE_OF_ULONG);
+       for (i = 0; i < MAX_STACK_TRACE_DEPTH; i++) {
+               if (retlen > i * SIZE_OF_ULONG)
+                       BPF_SEQ_PRINTF(seq, "[<0>] %pB\n", (void *)entries[i]);
+       }
+       BPF_SEQ_PRINTF(seq, "\n");
+
+       return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c
new file mode 100644 (file)
index 0000000..54380c5
--- /dev/null
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#include "bpf_iter.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_endian.h>
+
+char _license[] SEC("license") = "GPL";
+
+static int hlist_unhashed_lockless(const struct hlist_node *h)
+{
+        return !(h->pprev);
+}
+
+static int timer_pending(const struct timer_list * timer)
+{
+       return !hlist_unhashed_lockless(&timer->entry);
+}
+
+extern unsigned CONFIG_HZ __kconfig;
+
+#define USER_HZ                100
+#define NSEC_PER_SEC   1000000000ULL
+static clock_t jiffies_to_clock_t(unsigned long x)
+{
+       /* The implementation here tailored to a particular
+        * setting of USER_HZ.
+        */
+       u64 tick_nsec = (NSEC_PER_SEC + CONFIG_HZ/2) / CONFIG_HZ;
+       u64 user_hz_nsec = NSEC_PER_SEC / USER_HZ;
+
+       if ((tick_nsec % user_hz_nsec) == 0) {
+               if (CONFIG_HZ < USER_HZ)
+                       return x * (USER_HZ / CONFIG_HZ);
+               else
+                       return x / (CONFIG_HZ / USER_HZ);
+       }
+       return x * tick_nsec/user_hz_nsec;
+}
+
+static clock_t jiffies_delta_to_clock_t(long delta)
+{
+       if (delta <= 0)
+               return 0;
+
+       return jiffies_to_clock_t(delta);
+}
+
+static long sock_i_ino(const struct sock *sk)
+{
+       const struct socket *sk_socket = sk->sk_socket;
+       const struct inode *inode;
+       unsigned long ino;
+
+       if (!sk_socket)
+               return 0;
+
+       inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode;
+       bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino);
+       return ino;
+}
+
+static bool
+inet_csk_in_pingpong_mode(const struct inet_connection_sock *icsk)
+{
+       return icsk->icsk_ack.pingpong >= TCP_PINGPONG_THRESH;
+}
+
+static bool tcp_in_initial_slowstart(const struct tcp_sock *tcp)
+{
+       return tcp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
+}
+
+static int dump_tcp_sock(struct seq_file *seq, struct tcp_sock *tp,
+                        uid_t uid, __u32 seq_num)
+{
+       const struct inet_connection_sock *icsk;
+       const struct fastopen_queue *fastopenq;
+       const struct inet_sock *inet;
+       unsigned long timer_expires;
+       const struct sock *sp;
+       __u16 destp, srcp;
+       __be32 dest, src;
+       int timer_active;
+       int rx_queue;
+       int state;
+
+       icsk = &tp->inet_conn;
+       inet = &icsk->icsk_inet;
+       sp = &inet->sk;
+       fastopenq = &icsk->icsk_accept_queue.fastopenq;
+
+       dest = inet->inet_daddr;
+       src = inet->inet_rcv_saddr;
+       destp = bpf_ntohs(inet->inet_dport);
+       srcp = bpf_ntohs(inet->inet_sport);
+
+       if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
+           icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
+           icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
+               timer_active = 1;
+               timer_expires = icsk->icsk_timeout;
+       } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
+               timer_active = 4;
+               timer_expires = icsk->icsk_timeout;
+       } else if (timer_pending(&sp->sk_timer)) {
+               timer_active = 2;
+               timer_expires = sp->sk_timer.expires;
+       } else {
+               timer_active = 0;
+               timer_expires = bpf_jiffies64();
+       }
+
+       state = sp->sk_state;
+       if (state == TCP_LISTEN) {
+               rx_queue = sp->sk_ack_backlog;
+       } else {
+               rx_queue = tp->rcv_nxt - tp->copied_seq;
+               if (rx_queue < 0)
+                       rx_queue = 0;
+       }
+
+       BPF_SEQ_PRINTF(seq, "%4d: %08X:%04X %08X:%04X ",
+                      seq_num, src, srcp, destp, destp);
+       BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d ",
+                      state,
+                      tp->write_seq - tp->snd_una, rx_queue,
+                      timer_active,
+                      jiffies_delta_to_clock_t(timer_expires - bpf_jiffies64()),
+                      icsk->icsk_retransmits, uid,
+                      icsk->icsk_probes_out,
+                      sock_i_ino(sp),
+                      sp->sk_refcnt.refs.counter);
+       BPF_SEQ_PRINTF(seq, "%pK %lu %lu %u %u %d\n",
+                      tp,
+                      jiffies_to_clock_t(icsk->icsk_rto),
+                      jiffies_to_clock_t(icsk->icsk_ack.ato),
+                      (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(icsk),
+                      tp->snd_cwnd,
+                      state == TCP_LISTEN ? fastopenq->max_qlen
+                               : (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
+                     );
+
+       return 0;
+}
+
+static int dump_tw_sock(struct seq_file *seq, struct tcp_timewait_sock *ttw,
+                       uid_t uid, __u32 seq_num)
+{
+       struct inet_timewait_sock *tw = &ttw->tw_sk;
+       __u16 destp, srcp;
+       __be32 dest, src;
+       long delta;
+
+       delta = tw->tw_timer.expires - bpf_jiffies64();
+       dest = tw->tw_daddr;
+       src  = tw->tw_rcv_saddr;
+       destp = bpf_ntohs(tw->tw_dport);
+       srcp  = bpf_ntohs(tw->tw_sport);
+
+       BPF_SEQ_PRINTF(seq, "%4d: %08X:%04X %08X:%04X ",
+                      seq_num, src, srcp, dest, destp);
+
+       BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
+                      tw->tw_substate, 0, 0,
+                      3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
+                      tw->tw_refcnt.refs.counter, tw);
+
+       return 0;
+}
+
+static int dump_req_sock(struct seq_file *seq, struct tcp_request_sock *treq,
+                        uid_t uid, __u32 seq_num)
+{
+       struct inet_request_sock *irsk = &treq->req;
+       struct request_sock *req = &irsk->req;
+       long ttd;
+
+       ttd = req->rsk_timer.expires - bpf_jiffies64();
+
+       if (ttd < 0)
+               ttd = 0;
+
+       BPF_SEQ_PRINTF(seq, "%4d: %08X:%04X %08X:%04X ",
+                      seq_num, irsk->ir_loc_addr,
+                      irsk->ir_num, irsk->ir_rmt_addr,
+                      bpf_ntohs(irsk->ir_rmt_port));
+       BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
+                      TCP_SYN_RECV, 0, 0, 1, jiffies_to_clock_t(ttd),
+                      req->num_timeout, uid, 0, 0, 0, req);
+
+       return 0;
+}
+
+SEC("iter/tcp")
+int dump_tcp4(struct bpf_iter__tcp *ctx)
+{
+       struct sock_common *sk_common = ctx->sk_common;
+       struct seq_file *seq = ctx->meta->seq;
+       struct tcp_timewait_sock *tw;
+       struct tcp_request_sock *req;
+       struct tcp_sock *tp;
+       uid_t uid = ctx->uid;
+       __u32 seq_num;
+
+       if (sk_common == (void *)0)
+               return 0;
+
+       seq_num = ctx->meta->seq_num;
+       if (seq_num == 0)
+               BPF_SEQ_PRINTF(seq, "  sl  "
+                                   "local_address "
+                                   "rem_address   "
+                                   "st tx_queue rx_queue tr tm->when retrnsmt"
+                                   "   uid  timeout inode\n");
+
+       if (sk_common->skc_family != AF_INET)
+               return 0;
+
+       tp = bpf_skc_to_tcp_sock(sk_common);
+       if (tp)
+               return dump_tcp_sock(seq, tp, uid, seq_num);
+
+       tw = bpf_skc_to_tcp_timewait_sock(sk_common);
+       if (tw)
+               return dump_tw_sock(seq, tw, uid, seq_num);
+
+       req = bpf_skc_to_tcp_request_sock(sk_common);
+       if (req)
+               return dump_req_sock(seq, req, uid, seq_num);
+
+       return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c
new file mode 100644 (file)
index 0000000..b4fbddf
--- /dev/null
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#include "bpf_iter.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_endian.h>
+
+char _license[] SEC("license") = "GPL";
+
+static int hlist_unhashed_lockless(const struct hlist_node *h)
+{
+        return !(h->pprev);
+}
+
+static int timer_pending(const struct timer_list * timer)
+{
+       return !hlist_unhashed_lockless(&timer->entry);
+}
+
+extern unsigned CONFIG_HZ __kconfig;
+
+#define USER_HZ                100
+#define NSEC_PER_SEC   1000000000ULL
+static clock_t jiffies_to_clock_t(unsigned long x)
+{
+       /* The implementation here tailored to a particular
+        * setting of USER_HZ.
+        */
+       u64 tick_nsec = (NSEC_PER_SEC + CONFIG_HZ/2) / CONFIG_HZ;
+       u64 user_hz_nsec = NSEC_PER_SEC / USER_HZ;
+
+       if ((tick_nsec % user_hz_nsec) == 0) {
+               if (CONFIG_HZ < USER_HZ)
+                       return x * (USER_HZ / CONFIG_HZ);
+               else
+                       return x / (CONFIG_HZ / USER_HZ);
+       }
+       return x * tick_nsec/user_hz_nsec;
+}
+
+static clock_t jiffies_delta_to_clock_t(long delta)
+{
+       if (delta <= 0)
+               return 0;
+
+       return jiffies_to_clock_t(delta);
+}
+
+static long sock_i_ino(const struct sock *sk)
+{
+       const struct socket *sk_socket = sk->sk_socket;
+       const struct inode *inode;
+       unsigned long ino;
+
+       if (!sk_socket)
+               return 0;
+
+       inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode;
+       bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino);
+       return ino;
+}
+
+static bool
+inet_csk_in_pingpong_mode(const struct inet_connection_sock *icsk)
+{
+       return icsk->icsk_ack.pingpong >= TCP_PINGPONG_THRESH;
+}
+
+static bool tcp_in_initial_slowstart(const struct tcp_sock *tcp)
+{
+       return tcp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
+}
+
+static int dump_tcp6_sock(struct seq_file *seq, struct tcp6_sock *tp,
+                        uid_t uid, __u32 seq_num)
+{
+       const struct inet_connection_sock *icsk;
+       const struct fastopen_queue *fastopenq;
+       const struct in6_addr *dest, *src;
+       const struct inet_sock *inet;
+       unsigned long timer_expires;
+       const struct sock *sp;
+       __u16 destp, srcp;
+       int timer_active;
+       int rx_queue;
+       int state;
+
+       icsk = &tp->tcp.inet_conn;
+       inet = &icsk->icsk_inet;
+       sp = &inet->sk;
+       fastopenq = &icsk->icsk_accept_queue.fastopenq;
+
+       dest = &sp->sk_v6_daddr;
+       src = &sp->sk_v6_rcv_saddr;
+       destp = bpf_ntohs(inet->inet_dport);
+       srcp = bpf_ntohs(inet->inet_sport);
+
+       if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
+           icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
+           icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
+               timer_active = 1;
+               timer_expires = icsk->icsk_timeout;
+       } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
+               timer_active = 4;
+               timer_expires = icsk->icsk_timeout;
+       } else if (timer_pending(&sp->sk_timer)) {
+               timer_active = 2;
+               timer_expires = sp->sk_timer.expires;
+       } else {
+               timer_active = 0;
+               timer_expires = bpf_jiffies64();
+       }
+
+       state = sp->sk_state;
+       if (state == TCP_LISTEN) {
+               rx_queue = sp->sk_ack_backlog;
+       } else {
+               rx_queue = tp->tcp.rcv_nxt - tp->tcp.copied_seq;
+               if (rx_queue < 0)
+                       rx_queue = 0;
+       }
+
+       BPF_SEQ_PRINTF(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X ",
+                      seq_num,
+                      src->s6_addr32[0], src->s6_addr32[1],
+                      src->s6_addr32[2], src->s6_addr32[3], srcp,
+                      dest->s6_addr32[0], dest->s6_addr32[1],
+                      dest->s6_addr32[2], dest->s6_addr32[3], destp);
+       BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d ",
+                      state,
+                      tp->tcp.write_seq - tp->tcp.snd_una, rx_queue,
+                      timer_active,
+                      jiffies_delta_to_clock_t(timer_expires - bpf_jiffies64()),
+                      icsk->icsk_retransmits, uid,
+                      icsk->icsk_probes_out,
+                      sock_i_ino(sp),
+                      sp->sk_refcnt.refs.counter);
+       BPF_SEQ_PRINTF(seq, "%pK %lu %lu %u %u %d\n",
+                      tp,
+                      jiffies_to_clock_t(icsk->icsk_rto),
+                      jiffies_to_clock_t(icsk->icsk_ack.ato),
+                      (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(icsk),
+                      tp->tcp.snd_cwnd,
+                      state == TCP_LISTEN ? fastopenq->max_qlen
+                               : (tcp_in_initial_slowstart(&tp->tcp) ? -1
+                                                                     : tp->tcp.snd_ssthresh)
+                     );
+
+       return 0;
+}
+
+static int dump_tw_sock(struct seq_file *seq, struct tcp_timewait_sock *ttw,
+                       uid_t uid, __u32 seq_num)
+{
+       struct inet_timewait_sock *tw = &ttw->tw_sk;
+       const struct in6_addr *dest, *src;
+       __u16 destp, srcp;
+       long delta;
+
+       delta = tw->tw_timer.expires - bpf_jiffies64();
+       dest = &tw->tw_v6_daddr;
+       src  = &tw->tw_v6_rcv_saddr;
+       destp = bpf_ntohs(tw->tw_dport);
+       srcp  = bpf_ntohs(tw->tw_sport);
+
+       BPF_SEQ_PRINTF(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X ",
+                      seq_num,
+                      src->s6_addr32[0], src->s6_addr32[1],
+                      src->s6_addr32[2], src->s6_addr32[3], srcp,
+                      dest->s6_addr32[0], dest->s6_addr32[1],
+                      dest->s6_addr32[2], dest->s6_addr32[3], destp);
+
+       BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
+                      tw->tw_substate, 0, 0,
+                      3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
+                      tw->tw_refcnt.refs.counter, tw);
+
+       return 0;
+}
+
+static int dump_req_sock(struct seq_file *seq, struct tcp_request_sock *treq,
+                        uid_t uid, __u32 seq_num)
+{
+       struct inet_request_sock *irsk = &treq->req;
+       struct request_sock *req = &irsk->req;
+       struct in6_addr *src, *dest;
+       long ttd;
+
+       ttd = req->rsk_timer.expires - bpf_jiffies64();
+       src = &irsk->ir_v6_loc_addr;
+       dest = &irsk->ir_v6_rmt_addr;
+
+       if (ttd < 0)
+               ttd = 0;
+
+       BPF_SEQ_PRINTF(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X ",
+                      seq_num,
+                      src->s6_addr32[0], src->s6_addr32[1],
+                      src->s6_addr32[2], src->s6_addr32[3],
+                      irsk->ir_num,
+                      dest->s6_addr32[0], dest->s6_addr32[1],
+                      dest->s6_addr32[2], dest->s6_addr32[3],
+                      bpf_ntohs(irsk->ir_rmt_port));
+       BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
+                      TCP_SYN_RECV, 0, 0, 1, jiffies_to_clock_t(ttd),
+                      req->num_timeout, uid, 0, 0, 0, req);
+
+       return 0;
+}
+
+SEC("iter/tcp")
+int dump_tcp6(struct bpf_iter__tcp *ctx)
+{
+       struct sock_common *sk_common = ctx->sk_common;
+       struct seq_file *seq = ctx->meta->seq;
+       struct tcp_timewait_sock *tw;
+       struct tcp_request_sock *req;
+       struct tcp6_sock *tp;
+       uid_t uid = ctx->uid;
+       __u32 seq_num;
+
+       if (sk_common == (void *)0)
+               return 0;
+
+       seq_num = ctx->meta->seq_num;
+       if (seq_num == 0)
+               BPF_SEQ_PRINTF(seq, "  sl  "
+                                   "local_address                         "
+                                   "remote_address                        "
+                                   "st tx_queue rx_queue tr tm->when retrnsmt"
+                                   "   uid  timeout inode\n");
+
+       if (sk_common->skc_family != AF_INET6)
+               return 0;
+
+       tp = bpf_skc_to_tcp6_sock(sk_common);
+       if (tp)
+               return dump_tcp6_sock(seq, tp, uid, seq_num);
+
+       tw = bpf_skc_to_tcp_timewait_sock(sk_common);
+       if (tw)
+               return dump_tw_sock(seq, tw, uid, seq_num);
+
+       req = bpf_skc_to_tcp_request_sock(sk_common);
+       if (req)
+               return dump_req_sock(seq, req, uid, seq_num);
+
+       return 0;
+}
index 13c2c90..2a4647f 100644 (file)
@@ -1,25 +1,10 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Copyright (c) 2020 Facebook */
-#define bpf_iter_meta bpf_iter_meta___not_used
-#define bpf_iter__task bpf_iter__task___not_used
-#include "vmlinux.h"
-#undef bpf_iter_meta
-#undef bpf_iter__task
+#include "bpf_iter.h"
 #include <bpf/bpf_helpers.h>
 
 char _license[] SEC("license") = "GPL";
 
-struct bpf_iter_meta {
-       struct seq_file *seq;
-       __u64 session_id;
-       __u64 seq_num;
-} __attribute__((preserve_access_index));
-
-struct bpf_iter__task {
-       struct bpf_iter_meta *meta;
-       struct task_struct *task;
-} __attribute__((preserve_access_index));
-
 SEC("iter/task")
 int dump_task(struct bpf_iter__task *ctx)
 {
index 0aa71b3..ee49493 100644 (file)
@@ -1,25 +1,10 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Copyright (c) 2020 Facebook */
-#define bpf_iter_meta bpf_iter_meta___not_used
-#define bpf_iter__bpf_map bpf_iter__bpf_map___not_used
-#include "vmlinux.h"
-#undef bpf_iter_meta
-#undef bpf_iter__bpf_map
+#include "bpf_iter.h"
 #include <bpf/bpf_helpers.h>
 
 char _license[] SEC("license") = "GPL";
 
-struct bpf_iter_meta {
-       struct seq_file *seq;
-       __u64 session_id;
-       __u64 seq_num;
-} __attribute__((preserve_access_index));
-
-struct bpf_iter__bpf_map {
-       struct bpf_iter_meta *meta;
-       struct bpf_map *map;
-} __attribute__((preserve_access_index));
-
 __u32 map1_id = 0, map2_id = 0;
 __u32 map1_accessed = 0, map2_accessed = 0;
 __u64 map1_seqnum = 0, map2_seqnum1 = 0, map2_seqnum2 = 0;
index dee1339..d5e3df6 100644 (file)
@@ -1,27 +1,11 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (c) 2020 Facebook */
-/* "undefine" structs in vmlinux.h, because we "override" them below */
-#define bpf_iter_meta bpf_iter_meta___not_used
-#define bpf_iter__task bpf_iter__task___not_used
-#include "vmlinux.h"
-#undef bpf_iter_meta
-#undef bpf_iter__task
+#include "bpf_iter.h"
 #include <bpf/bpf_helpers.h>
 
 char _license[] SEC("license") = "GPL";
 int count = 0;
 
-struct bpf_iter_meta {
-       struct seq_file *seq;
-       __u64 session_id;
-       __u64 seq_num;
-} __attribute__((preserve_access_index));
-
-struct bpf_iter__task {
-       struct bpf_iter_meta *meta;
-       struct task_struct *task;
-} __attribute__((preserve_access_index));
-
 SEC("iter/task")
 int dump_task(struct bpf_iter__task *ctx)
 {
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_udp4.c
new file mode 100644 (file)
index 0000000..f258583
--- /dev/null
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#include "bpf_iter.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_endian.h>
+
+char _license[] SEC("license") = "GPL";
+
+static long sock_i_ino(const struct sock *sk)
+{
+       const struct socket *sk_socket = sk->sk_socket;
+       const struct inode *inode;
+       unsigned long ino;
+
+       if (!sk_socket)
+               return 0;
+
+       inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode;
+       bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino);
+       return ino;
+}
+
+SEC("iter/udp")
+int dump_udp4(struct bpf_iter__udp *ctx)
+{
+       struct seq_file *seq = ctx->meta->seq;
+       struct udp_sock *udp_sk = ctx->udp_sk;
+       struct inet_sock *inet;
+       __u16 srcp, destp;
+       __be32 dest, src;
+       __u32 seq_num;
+       int rqueue;
+
+       if (udp_sk == (void *)0)
+               return 0;
+
+       seq_num = ctx->meta->seq_num;
+       if (seq_num == 0)
+               BPF_SEQ_PRINTF(seq,
+                              "  sl  local_address rem_address   st tx_queue "
+                              "rx_queue tr tm->when retrnsmt   uid  timeout "
+                              "inode ref pointer drops\n");
+
+       /* filter out udp6 sockets */
+       inet = &udp_sk->inet;
+       if (inet->sk.sk_family == AF_INET6)
+               return 0;
+
+       inet = &udp_sk->inet;
+       dest = inet->inet_daddr;
+       src = inet->inet_rcv_saddr;
+       srcp = bpf_ntohs(inet->inet_sport);
+       destp = bpf_ntohs(inet->inet_dport);
+       rqueue = inet->sk.sk_rmem_alloc.counter - udp_sk->forward_deficit;
+
+       BPF_SEQ_PRINTF(seq, "%5d: %08X:%04X %08X:%04X ",
+                      ctx->bucket, src, srcp, dest, destp);
+
+       BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u\n",
+                      inet->sk.sk_state,
+                      inet->sk.sk_wmem_alloc.refs.counter - 1,
+                      rqueue,
+                      0, 0L, 0, ctx->uid, 0,
+                      sock_i_ino(&inet->sk),
+                      inet->sk.sk_refcnt.refs.counter, udp_sk,
+                      inet->sk.sk_drops.counter);
+
+       return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c b/tools/testing/selftests/bpf/progs/bpf_iter_udp6.c
new file mode 100644 (file)
index 0000000..65f93bb
--- /dev/null
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#include "bpf_iter.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_endian.h>
+
+char _license[] SEC("license") = "GPL";
+
+#define IPV6_SEQ_DGRAM_HEADER                          \
+       "  sl  "                                        \
+       "local_address                         "        \
+       "remote_address                        "        \
+       "st tx_queue rx_queue tr tm->when retrnsmt"     \
+       "   uid  timeout inode ref pointer drops\n"
+
+static long sock_i_ino(const struct sock *sk)
+{
+       const struct socket *sk_socket = sk->sk_socket;
+       const struct inode *inode;
+       unsigned long ino;
+
+       if (!sk_socket)
+               return 0;
+
+       inode = &container_of(sk_socket, struct socket_alloc, socket)->vfs_inode;
+       bpf_probe_read_kernel(&ino, sizeof(ino), &inode->i_ino);
+       return ino;
+}
+
+SEC("iter/udp")
+int dump_udp6(struct bpf_iter__udp *ctx)
+{
+       struct seq_file *seq = ctx->meta->seq;
+       struct udp_sock *udp_sk = ctx->udp_sk;
+       const struct in6_addr *dest, *src;
+       struct udp6_sock *udp6_sk;
+       struct inet_sock *inet;
+       __u16 srcp, destp;
+       __u32 seq_num;
+       int rqueue;
+
+       if (udp_sk == (void *)0)
+               return 0;
+
+       seq_num = ctx->meta->seq_num;
+       if (seq_num == 0)
+               BPF_SEQ_PRINTF(seq, IPV6_SEQ_DGRAM_HEADER);
+
+       udp6_sk = bpf_skc_to_udp6_sock(udp_sk);
+       if (udp6_sk == (void *)0)
+               return 0;
+
+       inet = &udp_sk->inet;
+       srcp = bpf_ntohs(inet->inet_sport);
+       destp = bpf_ntohs(inet->inet_dport);
+       rqueue = inet->sk.sk_rmem_alloc.counter - udp_sk->forward_deficit;
+       dest  = &inet->sk.sk_v6_daddr;
+       src   = &inet->sk.sk_v6_rcv_saddr;
+
+       BPF_SEQ_PRINTF(seq, "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X ",
+                      ctx->bucket,
+                      src->s6_addr32[0], src->s6_addr32[1],
+                      src->s6_addr32[2], src->s6_addr32[3], srcp,
+                      dest->s6_addr32[0], dest->s6_addr32[1],
+                      dest->s6_addr32[2], dest->s6_addr32[3], destp);
+
+       BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u\n",
+                      inet->sk.sk_state,
+                      inet->sk.sk_wmem_alloc.refs.counter - 1,
+                      rqueue,
+                      0, 0L, 0, ctx->uid, 0,
+                      sock_i_ino(&inet->sk),
+                      inet->sk.sk_refcnt.refs.counter, udp_sk,
+                      inet->sk.sk_drops.counter);
+
+       return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_tracing_net.h b/tools/testing/selftests/bpf/progs/bpf_tracing_net.h
new file mode 100644 (file)
index 0000000..0137891
--- /dev/null
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+#ifndef __BPF_TRACING_NET_H__
+#define __BPF_TRACING_NET_H__
+
+#define AF_INET                        2
+#define AF_INET6               10
+
+#define ICSK_TIME_RETRANS      1
+#define ICSK_TIME_PROBE0       3
+#define ICSK_TIME_LOSS_PROBE   5
+#define ICSK_TIME_REO_TIMEOUT  6
+
+#define IFNAMSIZ               16
+
+#define RTF_GATEWAY            0x0002
+
+#define TCP_INFINITE_SSTHRESH  0x7fffffff
+#define TCP_PINGPONG_THRESH    3
+
+#define fib_nh_dev             nh_common.nhc_dev
+#define fib_nh_gw_family       nh_common.nhc_gw_family
+#define fib_nh_gw6             nh_common.nhc_gw.ipv6
+
+#define inet_daddr             sk.__sk_common.skc_daddr
+#define inet_rcv_saddr         sk.__sk_common.skc_rcv_saddr
+#define inet_dport             sk.__sk_common.skc_dport
+
+#define ir_loc_addr            req.__req_common.skc_rcv_saddr
+#define ir_num                 req.__req_common.skc_num
+#define ir_rmt_addr            req.__req_common.skc_daddr
+#define ir_rmt_port            req.__req_common.skc_dport
+#define ir_v6_rmt_addr         req.__req_common.skc_v6_daddr
+#define ir_v6_loc_addr         req.__req_common.skc_v6_rcv_saddr
+
+#define sk_family              __sk_common.skc_family
+#define sk_rmem_alloc          sk_backlog.rmem_alloc
+#define sk_refcnt              __sk_common.skc_refcnt
+#define sk_state               __sk_common.skc_state
+#define sk_v6_daddr            __sk_common.skc_v6_daddr
+#define sk_v6_rcv_saddr                __sk_common.skc_v6_rcv_saddr
+
+#define s6_addr32              in6_u.u6_addr32
+
+#define tw_daddr               __tw_common.skc_daddr
+#define tw_rcv_saddr           __tw_common.skc_rcv_saddr
+#define tw_dport               __tw_common.skc_dport
+#define tw_refcnt              __tw_common.skc_refcnt
+#define tw_v6_daddr            __tw_common.skc_v6_daddr
+#define tw_v6_rcv_saddr                __tw_common.skc_v6_rcv_saddr
+
+#endif
diff --git a/tools/testing/selftests/bpf/progs/btf_data.c b/tools/testing/selftests/bpf/progs/btf_data.c
new file mode 100644 (file)
index 0000000..baa5252
--- /dev/null
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+struct S {
+       int     a;
+       int     b;
+       int     c;
+};
+
+union U {
+       int     a;
+       int     b;
+       int     c;
+};
+
+struct S1 {
+       int     a;
+       int     b;
+       int     c;
+};
+
+union U1 {
+       int     a;
+       int     b;
+       int     c;
+};
+
+typedef int T;
+typedef int S;
+typedef int U;
+typedef int T1;
+typedef int S1;
+typedef int U1;
+
+struct root_struct {
+       S               m_1;
+       T               m_2;
+       U               m_3;
+       S1              m_4;
+       T1              m_5;
+       U1              m_6;
+       struct S        m_7;
+       struct S1       m_8;
+       union  U        m_9;
+       union  U1       m_10;
+};
+
+int func(struct root_struct *root)
+{
+       return 0;
+}
index 1ab2c5e..b1b2773 100644 (file)
@@ -104,6 +104,30 @@ static __inline int bind_to_device(struct bpf_sock_addr *ctx)
        return 0;
 }
 
+static __inline int set_keepalive(struct bpf_sock_addr *ctx)
+{
+       int zero = 0, one = 1;
+
+       if (bpf_setsockopt(ctx, SOL_SOCKET, SO_KEEPALIVE, &one, sizeof(one)))
+               return 1;
+       if (ctx->type == SOCK_STREAM) {
+               if (bpf_setsockopt(ctx, SOL_TCP, TCP_KEEPIDLE, &one, sizeof(one)))
+                       return 1;
+               if (bpf_setsockopt(ctx, SOL_TCP, TCP_KEEPINTVL, &one, sizeof(one)))
+                       return 1;
+               if (bpf_setsockopt(ctx, SOL_TCP, TCP_KEEPCNT, &one, sizeof(one)))
+                       return 1;
+               if (bpf_setsockopt(ctx, SOL_TCP, TCP_SYNCNT, &one, sizeof(one)))
+                       return 1;
+               if (bpf_setsockopt(ctx, SOL_TCP, TCP_USER_TIMEOUT, &one, sizeof(one)))
+                       return 1;
+       }
+       if (bpf_setsockopt(ctx, SOL_SOCKET, SO_KEEPALIVE, &zero, sizeof(zero)))
+               return 1;
+
+       return 0;
+}
+
 SEC("cgroup/connect4")
 int connect_v4_prog(struct bpf_sock_addr *ctx)
 {
@@ -121,6 +145,9 @@ int connect_v4_prog(struct bpf_sock_addr *ctx)
        if (bind_to_device(ctx))
                return 0;
 
+       if (set_keepalive(ctx))
+               return 0;
+
        if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM)
                return 0;
        else if (ctx->type == SOCK_STREAM)
index 9365b68..5f645fd 100644 (file)
@@ -55,3 +55,25 @@ int BPF_PROG(test6, __u64 a, void *b, short c, int d, void * e, __u64 f)
                e == (void *)20 && f == 21;
        return 0;
 }
+
+struct bpf_fentry_test_t {
+       struct bpf_fentry_test_t *a;
+};
+
+__u64 test7_result = 0;
+SEC("fentry/bpf_fentry_test7")
+int BPF_PROG(test7, struct bpf_fentry_test_t *arg)
+{
+       if (arg == 0)
+               test7_result = 1;
+       return 0;
+}
+
+__u64 test8_result = 0;
+SEC("fentry/bpf_fentry_test8")
+int BPF_PROG(test8, struct bpf_fentry_test_t *arg)
+{
+       if (arg->a == 0)
+               test8_result = 1;
+       return 0;
+}
index bd1e17d..0952aff 100644 (file)
@@ -56,3 +56,25 @@ int BPF_PROG(test6, __u64 a, void *b, short c, int d, void *e, __u64 f, int ret)
                e == (void *)20 && f == 21 && ret == 111;
        return 0;
 }
+
+struct bpf_fentry_test_t {
+       struct bpf_fentry_test *a;
+};
+
+__u64 test7_result = 0;
+SEC("fexit/bpf_fentry_test7")
+int BPF_PROG(test7, struct bpf_fentry_test_t *arg)
+{
+       if (arg == 0)
+               test7_result = 1;
+       return 0;
+}
+
+__u64 test8_result = 0;
+SEC("fexit/bpf_fentry_test8")
+int BPF_PROG(test8, struct bpf_fentry_test_t *arg)
+{
+       if (arg->a == 0)
+               test8_result = 1;
+       return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/map_ptr_kern.c b/tools/testing/selftests/bpf/progs/map_ptr_kern.c
new file mode 100644 (file)
index 0000000..473665c
--- /dev/null
@@ -0,0 +1,686 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+#define LOOP_BOUND 0xf
+#define MAX_ENTRIES 8
+#define HALF_ENTRIES (MAX_ENTRIES >> 1)
+
+_Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND");
+
+enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC;
+__u32 g_line = 0;
+
+#define VERIFY_TYPE(type, func) ({     \
+       g_map_type = type;              \
+       if (!func())                    \
+               return 0;               \
+})
+
+
+#define VERIFY(expr) ({                \
+       g_line = __LINE__;      \
+       if (!(expr))            \
+               return 0;       \
+})
+
+struct bpf_map_memory {
+       __u32 pages;
+} __attribute__((preserve_access_index));
+
+struct bpf_map {
+       enum bpf_map_type map_type;
+       __u32 key_size;
+       __u32 value_size;
+       __u32 max_entries;
+       __u32 id;
+       struct bpf_map_memory memory;
+} __attribute__((preserve_access_index));
+
+static inline int check_bpf_map_fields(struct bpf_map *map, __u32 key_size,
+                                      __u32 value_size, __u32 max_entries)
+{
+       VERIFY(map->map_type == g_map_type);
+       VERIFY(map->key_size == key_size);
+       VERIFY(map->value_size == value_size);
+       VERIFY(map->max_entries == max_entries);
+       VERIFY(map->id > 0);
+       VERIFY(map->memory.pages > 0);
+
+       return 1;
+}
+
+static inline int check_bpf_map_ptr(struct bpf_map *indirect,
+                                   struct bpf_map *direct)
+{
+       VERIFY(indirect->map_type == direct->map_type);
+       VERIFY(indirect->key_size == direct->key_size);
+       VERIFY(indirect->value_size == direct->value_size);
+       VERIFY(indirect->max_entries == direct->max_entries);
+       VERIFY(indirect->id == direct->id);
+       VERIFY(indirect->memory.pages == direct->memory.pages);
+
+       return 1;
+}
+
+static inline int check(struct bpf_map *indirect, struct bpf_map *direct,
+                       __u32 key_size, __u32 value_size, __u32 max_entries)
+{
+       VERIFY(check_bpf_map_ptr(indirect, direct));
+       VERIFY(check_bpf_map_fields(indirect, key_size, value_size,
+                                   max_entries));
+       return 1;
+}
+
+static inline int check_default(struct bpf_map *indirect,
+                               struct bpf_map *direct)
+{
+       VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
+                    MAX_ENTRIES));
+       return 1;
+}
+
+typedef struct {
+       int counter;
+} atomic_t;
+
+struct bpf_htab {
+       struct bpf_map map;
+       atomic_t count;
+       __u32 n_buckets;
+       __u32 elem_size;
+} __attribute__((preserve_access_index));
+
+struct {
+       __uint(type, BPF_MAP_TYPE_HASH);
+       __uint(map_flags, BPF_F_NO_PREALLOC); /* to test bpf_htab.count */
+       __uint(max_entries, MAX_ENTRIES);
+       __type(key, __u32);
+       __type(value, __u32);
+} m_hash SEC(".maps");
+
+static inline int check_hash(void)
+{
+       struct bpf_htab *hash = (struct bpf_htab *)&m_hash;
+       struct bpf_map *map = (struct bpf_map *)&m_hash;
+       int i;
+
+       VERIFY(check_default(&hash->map, map));
+
+       VERIFY(hash->n_buckets == MAX_ENTRIES);
+       VERIFY(hash->elem_size == 64);
+
+       VERIFY(hash->count.counter == 0);
+       for (i = 0; i < HALF_ENTRIES; ++i) {
+               const __u32 key = i;
+               const __u32 val = 1;
+
+               if (bpf_map_update_elem(hash, &key, &val, 0))
+                       return 0;
+       }
+       VERIFY(hash->count.counter == HALF_ENTRIES);
+
+       return 1;
+}
+
+struct bpf_array {
+       struct bpf_map map;
+       __u32 elem_size;
+} __attribute__((preserve_access_index));
+
+struct {
+       __uint(type, BPF_MAP_TYPE_ARRAY);
+       __uint(max_entries, MAX_ENTRIES);
+       __type(key, __u32);
+       __type(value, __u32);
+} m_array SEC(".maps");
+
+static inline int check_array(void)
+{
+       struct bpf_array *array = (struct bpf_array *)&m_array;
+       struct bpf_map *map = (struct bpf_map *)&m_array;
+       int i, n_lookups = 0, n_keys = 0;
+
+       VERIFY(check_default(&array->map, map));
+
+       VERIFY(array->elem_size == 8);
+
+       for (i = 0; i < array->map.max_entries && i < LOOP_BOUND; ++i) {
+               const __u32 key = i;
+               __u32 *val = bpf_map_lookup_elem(array, &key);
+
+               ++n_lookups;
+               if (val)
+                       ++n_keys;
+       }
+
+       VERIFY(n_lookups == MAX_ENTRIES);
+       VERIFY(n_keys == MAX_ENTRIES);
+
+       return 1;
+}
+
+struct {
+       __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+       __uint(max_entries, MAX_ENTRIES);
+       __type(key, __u32);
+       __type(value, __u32);
+} m_prog_array SEC(".maps");
+
+static inline int check_prog_array(void)
+{
+       struct bpf_array *prog_array = (struct bpf_array *)&m_prog_array;
+       struct bpf_map *map = (struct bpf_map *)&m_prog_array;
+
+       VERIFY(check_default(&prog_array->map, map));
+
+       return 1;
+}
+
+struct {
+       __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+       __uint(max_entries, MAX_ENTRIES);
+       __type(key, __u32);
+       __type(value, __u32);
+} m_perf_event_array SEC(".maps");
+
+static inline int check_perf_event_array(void)
+{
+       struct bpf_array *perf_event_array = (struct bpf_array *)&m_perf_event_array;
+       struct bpf_map *map = (struct bpf_map *)&m_perf_event_array;
+
+       VERIFY(check_default(&perf_event_array->map, map));
+
+       return 1;
+}
+
+struct {
+       __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+       __uint(max_entries, MAX_ENTRIES);
+       __type(key, __u32);
+       __type(value, __u32);
+} m_percpu_hash SEC(".maps");
+
+static inline int check_percpu_hash(void)
+{
+       struct bpf_htab *percpu_hash = (struct bpf_htab *)&m_percpu_hash;
+       struct bpf_map *map = (struct bpf_map *)&m_percpu_hash;
+
+       VERIFY(check_default(&percpu_hash->map, map));
+
+       return 1;
+}
+
+struct {
+       __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+       __uint(max_entries, MAX_ENTRIES);
+       __type(key, __u32);
+       __type(value, __u32);
+} m_percpu_array SEC(".maps");
+
+static inline int check_percpu_array(void)
+{
+       struct bpf_array *percpu_array = (struct bpf_array *)&m_percpu_array;
+       struct bpf_map *map = (struct bpf_map *)&m_percpu_array;
+
+       VERIFY(check_default(&percpu_array->map, map));
+
+       return 1;
+}
+
+struct bpf_stack_map {
+       struct bpf_map map;
+} __attribute__((preserve_access_index));
+
+struct {
+       __uint(type, BPF_MAP_TYPE_STACK_TRACE);
+       __uint(max_entries, MAX_ENTRIES);
+       __type(key, __u32);
+       __type(value, __u64);
+} m_stack_trace SEC(".maps");
+
+static inline int check_stack_trace(void)
+{
+       struct bpf_stack_map *stack_trace =
+               (struct bpf_stack_map *)&m_stack_trace;
+       struct bpf_map *map = (struct bpf_map *)&m_stack_trace;
+
+       VERIFY(check(&stack_trace->map, map, sizeof(__u32), sizeof(__u64),
+                    MAX_ENTRIES));
+
+       return 1;
+}
+
+struct {
+       __uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
+       __uint(max_entries, MAX_ENTRIES);
+       __type(key, __u32);
+       __type(value, __u32);
+} m_cgroup_array SEC(".maps");
+
+static inline int check_cgroup_array(void)
+{
+       struct bpf_array *cgroup_array = (struct bpf_array *)&m_cgroup_array;
+       struct bpf_map *map = (struct bpf_map *)&m_cgroup_array;
+
+       VERIFY(check_default(&cgroup_array->map, map));
+
+       return 1;
+}
+
+struct {
+       __uint(type, BPF_MAP_TYPE_LRU_HASH);
+       __uint(max_entries, MAX_ENTRIES);
+       __type(key, __u32);
+       __type(value, __u32);
+} m_lru_hash SEC(".maps");
+
+static inline int check_lru_hash(void)
+{
+       struct bpf_htab *lru_hash = (struct bpf_htab *)&m_lru_hash;
+       struct bpf_map *map = (struct bpf_map *)&m_lru_hash;
+
+       VERIFY(check_default(&lru_hash->map, map));
+
+       return 1;
+}
+
+struct {
+       __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
+       __uint(max_entries, MAX_ENTRIES);
+       __type(key, __u32);
+       __type(value, __u32);
+} m_lru_percpu_hash SEC(".maps");
+
+static inline int check_lru_percpu_hash(void)
+{
+       struct bpf_htab *lru_percpu_hash = (struct bpf_htab *)&m_lru_percpu_hash;
+       struct bpf_map *map = (struct bpf_map *)&m_lru_percpu_hash;
+
+       VERIFY(check_default(&lru_percpu_hash->map, map));
+
+       return 1;
+}
+
+struct lpm_trie {
+       struct bpf_map map;
+} __attribute__((preserve_access_index));
+
+struct lpm_key {
+       struct bpf_lpm_trie_key trie_key;
+       __u32 data;
+};
+
+struct {
+       __uint(type, BPF_MAP_TYPE_LPM_TRIE);
+       __uint(map_flags, BPF_F_NO_PREALLOC);
+       __uint(max_entries, MAX_ENTRIES);
+       __type(key, struct lpm_key);
+       __type(value, __u32);
+} m_lpm_trie SEC(".maps");
+
+static inline int check_lpm_trie(void)
+{
+       struct lpm_trie *lpm_trie = (struct lpm_trie *)&m_lpm_trie;
+       struct bpf_map *map = (struct bpf_map *)&m_lpm_trie;
+
+       VERIFY(check(&lpm_trie->map, map, sizeof(struct lpm_key), sizeof(__u32),
+                    MAX_ENTRIES));
+
+       return 1;
+}
+
+struct inner_map {
+       __uint(type, BPF_MAP_TYPE_ARRAY);
+       __uint(max_entries, 1);
+       __type(key, __u32);
+       __type(value, __u32);
+} inner_map SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+       __uint(max_entries, MAX_ENTRIES);
+       __type(key, __u32);
+       __type(value, __u32);
+       __array(values, struct {
+               __uint(type, BPF_MAP_TYPE_ARRAY);
+               __uint(max_entries, 1);
+               __type(key, __u32);
+               __type(value, __u32);
+       });
+} m_array_of_maps SEC(".maps") = {
+       .values = { (void *)&inner_map, 0, 0, 0, 0, 0, 0, 0, 0 },
+};
+
+static inline int check_array_of_maps(void)
+{
+       struct bpf_array *array_of_maps = (struct bpf_array *)&m_array_of_maps;
+       struct bpf_map *map = (struct bpf_map *)&m_array_of_maps;
+
+       VERIFY(check_default(&array_of_maps->map, map));
+
+       return 1;
+}
+
+struct {
+       __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+       __uint(max_entries, MAX_ENTRIES);
+       __type(key, __u32);
+       __type(value, __u32);
+       __array(values, struct inner_map);
+} m_hash_of_maps SEC(".maps") = {
+       .values = {
+               [2] = &inner_map,
+       },
+};
+
+static inline int check_hash_of_maps(void)
+{
+       struct bpf_htab *hash_of_maps = (struct bpf_htab *)&m_hash_of_maps;
+       struct bpf_map *map = (struct bpf_map *)&m_hash_of_maps;
+
+       VERIFY(check_default(&hash_of_maps->map, map));
+
+       return 1;
+}
+
+struct bpf_dtab {
+       struct bpf_map map;
+} __attribute__((preserve_access_index));
+
+struct {
+       __uint(type, BPF_MAP_TYPE_DEVMAP);
+       __uint(max_entries, MAX_ENTRIES);
+       __type(key, __u32);
+       __type(value, __u32);
+} m_devmap SEC(".maps");
+
+static inline int check_devmap(void)
+{
+       struct bpf_dtab *devmap = (struct bpf_dtab *)&m_devmap;
+       struct bpf_map *map = (struct bpf_map *)&m_devmap;
+
+       VERIFY(check_default(&devmap->map, map));
+
+       return 1;
+}
+
+struct bpf_stab {
+       struct bpf_map map;
+} __attribute__((preserve_access_index));
+
+struct {
+       __uint(type, BPF_MAP_TYPE_SOCKMAP);
+       __uint(max_entries, MAX_ENTRIES);
+       __type(key, __u32);
+       __type(value, __u32);
+} m_sockmap SEC(".maps");
+
+static inline int check_sockmap(void)
+{
+       struct bpf_stab *sockmap = (struct bpf_stab *)&m_sockmap;
+       struct bpf_map *map = (struct bpf_map *)&m_sockmap;
+
+       VERIFY(check_default(&sockmap->map, map));
+
+       return 1;
+}
+
+struct bpf_cpu_map {
+       struct bpf_map map;
+} __attribute__((preserve_access_index));
+
+struct {
+       __uint(type, BPF_MAP_TYPE_CPUMAP);
+       __uint(max_entries, MAX_ENTRIES);
+       __type(key, __u32);
+       __type(value, __u32);
+} m_cpumap SEC(".maps");
+
+static inline int check_cpumap(void)
+{
+       struct bpf_cpu_map *cpumap = (struct bpf_cpu_map *)&m_cpumap;
+       struct bpf_map *map = (struct bpf_map *)&m_cpumap;
+
+       VERIFY(check_default(&cpumap->map, map));
+
+       return 1;
+}
+
+struct xsk_map {
+       struct bpf_map map;
+} __attribute__((preserve_access_index));
+
+struct {
+       __uint(type, BPF_MAP_TYPE_XSKMAP);
+       __uint(max_entries, MAX_ENTRIES);
+       __type(key, __u32);
+       __type(value, __u32);
+} m_xskmap SEC(".maps");
+
+static inline int check_xskmap(void)
+{
+       struct xsk_map *xskmap = (struct xsk_map *)&m_xskmap;
+       struct bpf_map *map = (struct bpf_map *)&m_xskmap;
+
+       VERIFY(check_default(&xskmap->map, map));
+
+       return 1;
+}
+
+struct bpf_shtab {
+       struct bpf_map map;
+} __attribute__((preserve_access_index));
+
+struct {
+       __uint(type, BPF_MAP_TYPE_SOCKHASH);
+       __uint(max_entries, MAX_ENTRIES);
+       __type(key, __u32);
+       __type(value, __u32);
+} m_sockhash SEC(".maps");
+
+static inline int check_sockhash(void)
+{
+       struct bpf_shtab *sockhash = (struct bpf_shtab *)&m_sockhash;
+       struct bpf_map *map = (struct bpf_map *)&m_sockhash;
+
+       VERIFY(check_default(&sockhash->map, map));
+
+       return 1;
+}
+
+struct bpf_cgroup_storage_map {
+       struct bpf_map map;
+} __attribute__((preserve_access_index));
+
+struct {
+       __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
+       __type(key, struct bpf_cgroup_storage_key);
+       __type(value, __u32);
+} m_cgroup_storage SEC(".maps");
+
+static inline int check_cgroup_storage(void)
+{
+       struct bpf_cgroup_storage_map *cgroup_storage =
+               (struct bpf_cgroup_storage_map *)&m_cgroup_storage;
+       struct bpf_map *map = (struct bpf_map *)&m_cgroup_storage;
+
+       VERIFY(check(&cgroup_storage->map, map,
+                    sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
+
+       return 1;
+}
+
+struct reuseport_array {
+       struct bpf_map map;
+} __attribute__((preserve_access_index));
+
+struct {
+       __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
+       __uint(max_entries, MAX_ENTRIES);
+       __type(key, __u32);
+       __type(value, __u32);
+} m_reuseport_sockarray SEC(".maps");
+
+static inline int check_reuseport_sockarray(void)
+{
+       struct reuseport_array *reuseport_sockarray =
+               (struct reuseport_array *)&m_reuseport_sockarray;
+       struct bpf_map *map = (struct bpf_map *)&m_reuseport_sockarray;
+
+       VERIFY(check_default(&reuseport_sockarray->map, map));
+
+       return 1;
+}
+
+struct {
+       __uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
+       __type(key, struct bpf_cgroup_storage_key);
+       __type(value, __u32);
+} m_percpu_cgroup_storage SEC(".maps");
+
+static inline int check_percpu_cgroup_storage(void)
+{
+       struct bpf_cgroup_storage_map *percpu_cgroup_storage =
+               (struct bpf_cgroup_storage_map *)&m_percpu_cgroup_storage;
+       struct bpf_map *map = (struct bpf_map *)&m_percpu_cgroup_storage;
+
+       VERIFY(check(&percpu_cgroup_storage->map, map,
+                    sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
+
+       return 1;
+}
+
+struct bpf_queue_stack {
+       struct bpf_map map;
+} __attribute__((preserve_access_index));
+
+struct {
+       __uint(type, BPF_MAP_TYPE_QUEUE);
+       __uint(max_entries, MAX_ENTRIES);
+       __type(value, __u32);
+} m_queue SEC(".maps");
+
+static inline int check_queue(void)
+{
+       struct bpf_queue_stack *queue = (struct bpf_queue_stack *)&m_queue;
+       struct bpf_map *map = (struct bpf_map *)&m_queue;
+
+       VERIFY(check(&queue->map, map, 0, sizeof(__u32), MAX_ENTRIES));
+
+       return 1;
+}
+
+struct {
+       __uint(type, BPF_MAP_TYPE_STACK);
+       __uint(max_entries, MAX_ENTRIES);
+       __type(value, __u32);
+} m_stack SEC(".maps");
+
+static inline int check_stack(void)
+{
+       struct bpf_queue_stack *stack = (struct bpf_queue_stack *)&m_stack;
+       struct bpf_map *map = (struct bpf_map *)&m_stack;
+
+       VERIFY(check(&stack->map, map, 0, sizeof(__u32), MAX_ENTRIES));
+
+       return 1;
+}
+
+struct bpf_sk_storage_map {
+       struct bpf_map map;
+} __attribute__((preserve_access_index));
+
+struct {
+       __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+       __uint(map_flags, BPF_F_NO_PREALLOC);
+       __type(key, __u32);
+       __type(value, __u32);
+} m_sk_storage SEC(".maps");
+
+static inline int check_sk_storage(void)
+{
+       struct bpf_sk_storage_map *sk_storage =
+               (struct bpf_sk_storage_map *)&m_sk_storage;
+       struct bpf_map *map = (struct bpf_map *)&m_sk_storage;
+
+       VERIFY(check(&sk_storage->map, map, sizeof(__u32), sizeof(__u32), 0));
+
+       return 1;
+}
+
+struct {
+       __uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
+       __uint(max_entries, MAX_ENTRIES);
+       __type(key, __u32);
+       __type(value, __u32);
+} m_devmap_hash SEC(".maps");
+
+static inline int check_devmap_hash(void)
+{
+       struct bpf_dtab *devmap_hash = (struct bpf_dtab *)&m_devmap_hash;
+       struct bpf_map *map = (struct bpf_map *)&m_devmap_hash;
+
+       VERIFY(check_default(&devmap_hash->map, map));
+
+       return 1;
+}
+
+struct bpf_ringbuf_map {
+       struct bpf_map map;
+} __attribute__((preserve_access_index));
+
+struct {
+       __uint(type, BPF_MAP_TYPE_RINGBUF);
+       __uint(max_entries, 1 << 12);
+} m_ringbuf SEC(".maps");
+
+static inline int check_ringbuf(void)
+{
+       struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf;
+       struct bpf_map *map = (struct bpf_map *)&m_ringbuf;
+
+       VERIFY(check(&ringbuf->map, map, 0, 0, 1 << 12));
+
+       return 1;
+}
+
+SEC("cgroup_skb/egress")
+int cg_skb(void *ctx)
+{
+       VERIFY_TYPE(BPF_MAP_TYPE_HASH, check_hash);
+       VERIFY_TYPE(BPF_MAP_TYPE_ARRAY, check_array);
+       VERIFY_TYPE(BPF_MAP_TYPE_PROG_ARRAY, check_prog_array);
+       VERIFY_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, check_perf_event_array);
+       VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_HASH, check_percpu_hash);
+       VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, check_percpu_array);
+       VERIFY_TYPE(BPF_MAP_TYPE_STACK_TRACE, check_stack_trace);
+       VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, check_cgroup_array);
+       VERIFY_TYPE(BPF_MAP_TYPE_LRU_HASH, check_lru_hash);
+       VERIFY_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, check_lru_percpu_hash);
+       VERIFY_TYPE(BPF_MAP_TYPE_LPM_TRIE, check_lpm_trie);
+       VERIFY_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, check_array_of_maps);
+       VERIFY_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, check_hash_of_maps);
+       VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP, check_devmap);
+       VERIFY_TYPE(BPF_MAP_TYPE_SOCKMAP, check_sockmap);
+       VERIFY_TYPE(BPF_MAP_TYPE_CPUMAP, check_cpumap);
+       VERIFY_TYPE(BPF_MAP_TYPE_XSKMAP, check_xskmap);
+       VERIFY_TYPE(BPF_MAP_TYPE_SOCKHASH, check_sockhash);
+       VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, check_cgroup_storage);
+       VERIFY_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
+                   check_reuseport_sockarray);
+       VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
+                   check_percpu_cgroup_storage);
+       VERIFY_TYPE(BPF_MAP_TYPE_QUEUE, check_queue);
+       VERIFY_TYPE(BPF_MAP_TYPE_STACK, check_stack);
+       VERIFY_TYPE(BPF_MAP_TYPE_SK_STORAGE, check_sk_storage);
+       VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, check_devmap_hash);
+       VERIFY_TYPE(BPF_MAP_TYPE_RINGBUF, check_ringbuf);
+
+       return 1;
+}
+
+__u32 _version SEC("version") = 1;
+char _license[] SEC("license") = "GPL";
index d5a5eeb..712df7b 100644 (file)
@@ -8,6 +8,10 @@
 char _license[] SEC("license") = "GPL";
 __u32 _version SEC("version") = 1;
 
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
 #define SOL_CUSTOM                     0xdeadbeef
 
 struct sockopt_sk {
@@ -28,12 +32,14 @@ int _getsockopt(struct bpf_sockopt *ctx)
        __u8 *optval = ctx->optval;
        struct sockopt_sk *storage;
 
-       if (ctx->level == SOL_IP && ctx->optname == IP_TOS)
+       if (ctx->level == SOL_IP && ctx->optname == IP_TOS) {
                /* Not interested in SOL_IP:IP_TOS;
                 * let next BPF program in the cgroup chain or kernel
                 * handle it.
                 */
+               ctx->optlen = 0; /* bypass optval>PAGE_SIZE */
                return 1;
+       }
 
        if (ctx->level == SOL_SOCKET && ctx->optname == SO_SNDBUF) {
                /* Not interested in SOL_SOCKET:SO_SNDBUF;
@@ -51,6 +57,26 @@ int _getsockopt(struct bpf_sockopt *ctx)
                return 1;
        }
 
+       if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) {
+               if (optval + 1 > optval_end)
+                       return 0; /* EPERM, bounds check */
+
+               ctx->retval = 0; /* Reset system call return value to zero */
+
+               /* Always export 0x55 */
+               optval[0] = 0x55;
+               ctx->optlen = 1;
+
+               /* Userspace buffer is PAGE_SIZE * 2, but BPF
+                * program can only see the first PAGE_SIZE
+                * bytes of data.
+                */
+               if (optval_end - optval != PAGE_SIZE)
+                       return 0; /* EPERM, unexpected data size */
+
+               return 1;
+       }
+
        if (ctx->level != SOL_CUSTOM)
                return 0; /* EPERM, deny everything except custom level */
 
@@ -81,12 +107,14 @@ int _setsockopt(struct bpf_sockopt *ctx)
        __u8 *optval = ctx->optval;
        struct sockopt_sk *storage;
 
-       if (ctx->level == SOL_IP && ctx->optname == IP_TOS)
+       if (ctx->level == SOL_IP && ctx->optname == IP_TOS) {
                /* Not interested in SOL_IP:IP_TOS;
                 * let next BPF program in the cgroup chain or kernel
                 * handle it.
                 */
+               ctx->optlen = 0; /* bypass optval>PAGE_SIZE */
                return 1;
+       }
 
        if (ctx->level == SOL_SOCKET && ctx->optname == SO_SNDBUF) {
                /* Overwrite SO_SNDBUF value */
@@ -112,6 +140,28 @@ int _setsockopt(struct bpf_sockopt *ctx)
                return 1;
        }
 
+       if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) {
+               /* Original optlen is larger than PAGE_SIZE. */
+               if (ctx->optlen != PAGE_SIZE * 2)
+                       return 0; /* EPERM, unexpected data size */
+
+               if (optval + 1 > optval_end)
+                       return 0; /* EPERM, bounds check */
+
+               /* Make sure we can trim the buffer. */
+               optval[0] = 0;
+               ctx->optlen = 1;
+
+               /* Usepace buffer is PAGE_SIZE * 2, but BPF
+                * program can only see the first PAGE_SIZE
+                * bytes of data.
+                */
+               if (optval_end - optval != PAGE_SIZE)
+                       return 0; /* EPERM, unexpected data size */
+
+               return 1;
+       }
+
        if (ctx->level != SOL_CUSTOM)
                return 0; /* EPERM, deny everything except custom level */
 
diff --git a/tools/testing/selftests/bpf/progs/test_autoload.c b/tools/testing/selftests/bpf/progs/test_autoload.c
new file mode 100644 (file)
index 0000000..62c8cde
--- /dev/null
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+bool prog1_called = false;
+bool prog2_called = false;
+bool prog3_called = false;
+
+SEC("raw_tp/sys_enter")
+int prog1(const void *ctx)
+{
+       prog1_called = true;
+       return 0;
+}
+
+SEC("raw_tp/sys_exit")
+int prog2(const void *ctx)
+{
+       prog2_called = true;
+       return 0;
+}
+
+struct fake_kernel_struct {
+       int whatever;
+} __attribute__((preserve_access_index));
+
+SEC("fentry/unexisting-kprobe-will-fail-if-loaded")
+int prog3(const void *ctx)
+{
+       struct fake_kernel_struct *fake = (void *)ctx;
+       fake->whatever = 123;
+       prog3_called = true;
+       return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_core_retro.c b/tools/testing/selftests/bpf/progs/test_core_retro.c
new file mode 100644 (file)
index 0000000..75c60c3
--- /dev/null
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+struct task_struct {
+       int tgid;
+} __attribute__((preserve_access_index));
+
+struct {
+       __uint(type, BPF_MAP_TYPE_ARRAY);
+       __uint(max_entries, 1);
+       __type(key, int);
+       __type(value, int);
+} results SEC(".maps");
+
+SEC("tp/raw_syscalls/sys_enter")
+int handle_sys_enter(void *ctx)
+{
+       struct task_struct *task = (void *)bpf_get_current_task();
+       int tgid = BPF_CORE_READ(task, tgid);
+       int zero = 0;
+
+       bpf_map_update_elem(&results, &zero, &tgid, 0);
+
+       return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_endian.c b/tools/testing/selftests/bpf/progs/test_endian.c
new file mode 100644 (file)
index 0000000..ddb687c
--- /dev/null
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define IN16 0x1234
+#define IN32 0x12345678U
+#define IN64 0x123456789abcdef0ULL
+
+__u16 in16 = 0;
+__u32 in32 = 0;
+__u64 in64 = 0;
+
+__u16 out16 = 0;
+__u32 out32 = 0;
+__u64 out64 = 0;
+
+__u16 const16 = 0;
+__u32 const32 = 0;
+__u64 const64 = 0;
+
+SEC("raw_tp/sys_enter")
+int sys_enter(const void *ctx)
+{
+       out16 = __builtin_bswap16(in16);
+       out32 = __builtin_bswap32(in32);
+       out64 = __builtin_bswap64(in64);
+       const16 = ___bpf_swab16(IN16);
+       const32 = ___bpf_swab32(IN32);
+       const64 = ___bpf_swab64(IN64);
+
+       return 0;
+}
+
+char _license[] SEC("license") = "GPL";
index 29817a7..b6a6eb2 100644 (file)
@@ -57,8 +57,9 @@ struct {
 SEC("raw_tracepoint/sys_enter")
 int bpf_prog1(void *ctx)
 {
-       int max_len, max_buildid_len, usize, ksize, total_size;
+       int max_len, max_buildid_len, total_size;
        struct stack_trace_t *data;
+       long usize, ksize;
        void *raw_data;
        __u32 key = 0;
 
diff --git a/tools/testing/selftests/bpf/progs/test_ksyms.c b/tools/testing/selftests/bpf/progs/test_ksyms.c
new file mode 100644 (file)
index 0000000..6c9cbb5
--- /dev/null
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+
+#include <stdbool.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+__u64 out__bpf_link_fops = -1;
+__u64 out__bpf_link_fops1 = -1;
+__u64 out__btf_size = -1;
+__u64 out__per_cpu_start = -1;
+
+extern const void bpf_link_fops __ksym;
+extern const void __start_BTF __ksym;
+extern const void __stop_BTF __ksym;
+extern const void __per_cpu_start __ksym;
+/* non-existing symbol, weak, default to zero */
+extern const void bpf_link_fops1 __ksym __weak;
+
+SEC("raw_tp/sys_enter")
+int handler(const void *ctx)
+{
+       out__bpf_link_fops = (__u64)&bpf_link_fops;
+       out__btf_size = (__u64)(&__stop_BTF - &__start_BTF);
+       out__per_cpu_start = (__u64)&__per_cpu_start;
+
+       out__bpf_link_fops1 = (__u64)&bpf_link_fops1;
+
+       return 0;
+}
+
+char _license[] SEC("license") = "GPL";
index ad59c4c..8207a2d 100644 (file)
@@ -12,8 +12,8 @@ struct {
        __uint(value_size, sizeof(int));
 } perf_buf_map SEC(".maps");
 
-SEC("kprobe/sys_nanosleep")
-int BPF_KPROBE(handle_sys_nanosleep_entry)
+SEC("tp/raw_syscalls/sys_enter")
+int handle_sys_enter(void *ctx)
 {
        int cpu = bpf_get_smp_processor_id();
 
diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup.c b/tools/testing/selftests/bpf/progs/test_sk_lookup.c
new file mode 100644 (file)
index 0000000..bbf8296
--- /dev/null
@@ -0,0 +1,641 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+// Copyright (c) 2020 Cloudflare
+
+#include <errno.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <linux/bpf.h>
+#include <linux/in.h>
+#include <sys/socket.h>
+
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_helpers.h>
+
+#define IP4(a, b, c, d)                                        \
+       bpf_htonl((((__u32)(a) & 0xffU) << 24) |        \
+                 (((__u32)(b) & 0xffU) << 16) |        \
+                 (((__u32)(c) & 0xffU) <<  8) |        \
+                 (((__u32)(d) & 0xffU) <<  0))
+#define IP6(aaaa, bbbb, cccc, dddd)                    \
+       { bpf_htonl(aaaa), bpf_htonl(bbbb), bpf_htonl(cccc), bpf_htonl(dddd) }
+
+#define MAX_SOCKS 32
+
+struct {
+       __uint(type, BPF_MAP_TYPE_SOCKMAP);
+       __uint(max_entries, MAX_SOCKS);
+       __type(key, __u32);
+       __type(value, __u64);
+} redir_map SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_ARRAY);
+       __uint(max_entries, 2);
+       __type(key, int);
+       __type(value, int);
+} run_map SEC(".maps");
+
+enum {
+       PROG1 = 0,
+       PROG2,
+};
+
+enum {
+       SERVER_A = 0,
+       SERVER_B,
+};
+
+/* Addressable key/value constants for convenience */
+static const int KEY_PROG1 = PROG1;
+static const int KEY_PROG2 = PROG2;
+static const int PROG_DONE = 1;
+
+static const __u32 KEY_SERVER_A = SERVER_A;
+static const __u32 KEY_SERVER_B = SERVER_B;
+
+static const __u16 DST_PORT = 7007; /* Host byte order */
+static const __u32 DST_IP4 = IP4(127, 0, 0, 1);
+static const __u32 DST_IP6[] = IP6(0xfd000000, 0x0, 0x0, 0x00000001);
+
+SEC("sk_lookup/lookup_pass")
+int lookup_pass(struct bpf_sk_lookup *ctx)
+{
+       return SK_PASS;
+}
+
+SEC("sk_lookup/lookup_drop")
+int lookup_drop(struct bpf_sk_lookup *ctx)
+{
+       return SK_DROP;
+}
+
+SEC("sk_reuseport/reuse_pass")
+int reuseport_pass(struct sk_reuseport_md *ctx)
+{
+       return SK_PASS;
+}
+
+SEC("sk_reuseport/reuse_drop")
+int reuseport_drop(struct sk_reuseport_md *ctx)
+{
+       return SK_DROP;
+}
+
+/* Redirect packets destined for port DST_PORT to socket at redir_map[0]. */
+SEC("sk_lookup/redir_port")
+int redir_port(struct bpf_sk_lookup *ctx)
+{
+       struct bpf_sock *sk;
+       int err;
+
+       if (ctx->local_port != DST_PORT)
+               return SK_PASS;
+
+       sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
+       if (!sk)
+               return SK_PASS;
+
+       err = bpf_sk_assign(ctx, sk, 0);
+       bpf_sk_release(sk);
+       return err ? SK_DROP : SK_PASS;
+}
+
+/* Redirect packets destined for DST_IP4 address to socket at redir_map[0]. */
+SEC("sk_lookup/redir_ip4")
+int redir_ip4(struct bpf_sk_lookup *ctx)
+{
+       struct bpf_sock *sk;
+       int err;
+
+       if (ctx->family != AF_INET)
+               return SK_PASS;
+       if (ctx->local_port != DST_PORT)
+               return SK_PASS;
+       if (ctx->local_ip4 != DST_IP4)
+               return SK_PASS;
+
+       sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
+       if (!sk)
+               return SK_PASS;
+
+       err = bpf_sk_assign(ctx, sk, 0);
+       bpf_sk_release(sk);
+       return err ? SK_DROP : SK_PASS;
+}
+
+/* Redirect packets destined for DST_IP6 address to socket at redir_map[0]. */
+SEC("sk_lookup/redir_ip6")
+int redir_ip6(struct bpf_sk_lookup *ctx)
+{
+       struct bpf_sock *sk;
+       int err;
+
+       if (ctx->family != AF_INET6)
+               return SK_PASS;
+       if (ctx->local_port != DST_PORT)
+               return SK_PASS;
+       if (ctx->local_ip6[0] != DST_IP6[0] ||
+           ctx->local_ip6[1] != DST_IP6[1] ||
+           ctx->local_ip6[2] != DST_IP6[2] ||
+           ctx->local_ip6[3] != DST_IP6[3])
+               return SK_PASS;
+
+       sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
+       if (!sk)
+               return SK_PASS;
+
+       err = bpf_sk_assign(ctx, sk, 0);
+       bpf_sk_release(sk);
+       return err ? SK_DROP : SK_PASS;
+}
+
+SEC("sk_lookup/select_sock_a")
+int select_sock_a(struct bpf_sk_lookup *ctx)
+{
+       struct bpf_sock *sk;
+       int err;
+
+       sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
+       if (!sk)
+               return SK_PASS;
+
+       err = bpf_sk_assign(ctx, sk, 0);
+       bpf_sk_release(sk);
+       return err ? SK_DROP : SK_PASS;
+}
+
+SEC("sk_lookup/select_sock_a_no_reuseport")
+int select_sock_a_no_reuseport(struct bpf_sk_lookup *ctx)
+{
+       struct bpf_sock *sk;
+       int err;
+
+       sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
+       if (!sk)
+               return SK_DROP;
+
+       err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_NO_REUSEPORT);
+       bpf_sk_release(sk);
+       return err ? SK_DROP : SK_PASS;
+}
+
+SEC("sk_reuseport/select_sock_b")
+int select_sock_b(struct sk_reuseport_md *ctx)
+{
+       __u32 key = KEY_SERVER_B;
+       int err;
+
+       err = bpf_sk_select_reuseport(ctx, &redir_map, &key, 0);
+       return err ? SK_DROP : SK_PASS;
+}
+
+/* Check that bpf_sk_assign() returns -EEXIST if socket already selected. */
+SEC("sk_lookup/sk_assign_eexist")
+int sk_assign_eexist(struct bpf_sk_lookup *ctx)
+{
+       struct bpf_sock *sk;
+       int err, ret;
+
+       ret = SK_DROP;
+       sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B);
+       if (!sk)
+               goto out;
+       err = bpf_sk_assign(ctx, sk, 0);
+       if (err)
+               goto out;
+       bpf_sk_release(sk);
+
+       sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
+       if (!sk)
+               goto out;
+       err = bpf_sk_assign(ctx, sk, 0);
+       if (err != -EEXIST) {
+               bpf_printk("sk_assign returned %d, expected %d\n",
+                          err, -EEXIST);
+               goto out;
+       }
+
+       ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */
+out:
+       if (sk)
+               bpf_sk_release(sk);
+       return ret;
+}
+
+/* Check that bpf_sk_assign(BPF_SK_LOOKUP_F_REPLACE) can override selection. */
+SEC("sk_lookup/sk_assign_replace_flag")
+int sk_assign_replace_flag(struct bpf_sk_lookup *ctx)
+{
+       struct bpf_sock *sk;
+       int err, ret;
+
+       ret = SK_DROP;
+       sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
+       if (!sk)
+               goto out;
+       err = bpf_sk_assign(ctx, sk, 0);
+       if (err)
+               goto out;
+       bpf_sk_release(sk);
+
+       sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B);
+       if (!sk)
+               goto out;
+       err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_REPLACE);
+       if (err) {
+               bpf_printk("sk_assign returned %d, expected 0\n", err);
+               goto out;
+       }
+
+       ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */
+out:
+       if (sk)
+               bpf_sk_release(sk);
+       return ret;
+}
+
+/* Check that bpf_sk_assign(sk=NULL) is accepted. */
+SEC("sk_lookup/sk_assign_null")
+int sk_assign_null(struct bpf_sk_lookup *ctx)
+{
+       struct bpf_sock *sk = NULL;
+       int err, ret;
+
+       ret = SK_DROP;
+
+       err = bpf_sk_assign(ctx, NULL, 0);
+       if (err) {
+               bpf_printk("sk_assign returned %d, expected 0\n", err);
+               goto out;
+       }
+
+       sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B);
+       if (!sk)
+               goto out;
+       err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_REPLACE);
+       if (err) {
+               bpf_printk("sk_assign returned %d, expected 0\n", err);
+               goto out;
+       }
+
+       if (ctx->sk != sk)
+               goto out;
+       err = bpf_sk_assign(ctx, NULL, 0);
+       if (err != -EEXIST)
+               goto out;
+       err = bpf_sk_assign(ctx, NULL, BPF_SK_LOOKUP_F_REPLACE);
+       if (err)
+               goto out;
+       err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_REPLACE);
+       if (err)
+               goto out;
+
+       ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */
+out:
+       if (sk)
+               bpf_sk_release(sk);
+       return ret;
+}
+
+/* Check that selected sk is accessible through context. */
+SEC("sk_lookup/access_ctx_sk")
+int access_ctx_sk(struct bpf_sk_lookup *ctx)
+{
+       struct bpf_sock *sk1 = NULL, *sk2 = NULL;
+       int err, ret;
+
+       ret = SK_DROP;
+
+       /* Try accessing unassigned (NULL) ctx->sk field */
+       if (ctx->sk && ctx->sk->family != AF_INET)
+               goto out;
+
+       /* Assign a value to ctx->sk */
+       sk1 = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
+       if (!sk1)
+               goto out;
+       err = bpf_sk_assign(ctx, sk1, 0);
+       if (err)
+               goto out;
+       if (ctx->sk != sk1)
+               goto out;
+
+       /* Access ctx->sk fields */
+       if (ctx->sk->family != AF_INET ||
+           ctx->sk->type != SOCK_STREAM ||
+           ctx->sk->state != BPF_TCP_LISTEN)
+               goto out;
+
+       /* Reset selection */
+       err = bpf_sk_assign(ctx, NULL, BPF_SK_LOOKUP_F_REPLACE);
+       if (err)
+               goto out;
+       if (ctx->sk)
+               goto out;
+
+       /* Assign another socket */
+       sk2 = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B);
+       if (!sk2)
+               goto out;
+       err = bpf_sk_assign(ctx, sk2, BPF_SK_LOOKUP_F_REPLACE);
+       if (err)
+               goto out;
+       if (ctx->sk != sk2)
+               goto out;
+
+       /* Access reassigned ctx->sk fields */
+       if (ctx->sk->family != AF_INET ||
+           ctx->sk->type != SOCK_STREAM ||
+           ctx->sk->state != BPF_TCP_LISTEN)
+               goto out;
+
+       ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */
+out:
+       if (sk1)
+               bpf_sk_release(sk1);
+       if (sk2)
+               bpf_sk_release(sk2);
+       return ret;
+}
+
+/* Check narrow loads from ctx fields that support them.
+ *
+ * Narrow loads of size >= target field size from a non-zero offset
+ * are not covered because they give bogus results, that is the
+ * verifier ignores the offset.
+ */
+SEC("sk_lookup/ctx_narrow_access")
+int ctx_narrow_access(struct bpf_sk_lookup *ctx)
+{
+       struct bpf_sock *sk;
+       int err, family;
+       __u16 *half;
+       __u8 *byte;
+       bool v4;
+
+       v4 = (ctx->family == AF_INET);
+
+       /* Narrow loads from family field */
+       byte = (__u8 *)&ctx->family;
+       half = (__u16 *)&ctx->family;
+       if (byte[0] != (v4 ? AF_INET : AF_INET6) ||
+           byte[1] != 0 || byte[2] != 0 || byte[3] != 0)
+               return SK_DROP;
+       if (half[0] != (v4 ? AF_INET : AF_INET6))
+               return SK_DROP;
+
+       byte = (__u8 *)&ctx->protocol;
+       if (byte[0] != IPPROTO_TCP ||
+           byte[1] != 0 || byte[2] != 0 || byte[3] != 0)
+               return SK_DROP;
+       half = (__u16 *)&ctx->protocol;
+       if (half[0] != IPPROTO_TCP)
+               return SK_DROP;
+
+       /* Narrow loads from remote_port field. Expect non-0 value. */
+       byte = (__u8 *)&ctx->remote_port;
+       if (byte[0] == 0 && byte[1] == 0 && byte[2] == 0 && byte[3] == 0)
+               return SK_DROP;
+       half = (__u16 *)&ctx->remote_port;
+       if (half[0] == 0)
+               return SK_DROP;
+
+       /* Narrow loads from local_port field. Expect DST_PORT. */
+       byte = (__u8 *)&ctx->local_port;
+       if (byte[0] != ((DST_PORT >> 0) & 0xff) ||
+           byte[1] != ((DST_PORT >> 8) & 0xff) ||
+           byte[2] != 0 || byte[3] != 0)
+               return SK_DROP;
+       half = (__u16 *)&ctx->local_port;
+       if (half[0] != DST_PORT)
+               return SK_DROP;
+
+       /* Narrow loads from IPv4 fields */
+       if (v4) {
+               /* Expect non-0.0.0.0 in remote_ip4 */
+               byte = (__u8 *)&ctx->remote_ip4;
+               if (byte[0] == 0 && byte[1] == 0 &&
+                   byte[2] == 0 && byte[3] == 0)
+                       return SK_DROP;
+               half = (__u16 *)&ctx->remote_ip4;
+               if (half[0] == 0 && half[1] == 0)
+                       return SK_DROP;
+
+               /* Expect DST_IP4 in local_ip4 */
+               byte = (__u8 *)&ctx->local_ip4;
+               if (byte[0] != ((DST_IP4 >>  0) & 0xff) ||
+                   byte[1] != ((DST_IP4 >>  8) & 0xff) ||
+                   byte[2] != ((DST_IP4 >> 16) & 0xff) ||
+                   byte[3] != ((DST_IP4 >> 24) & 0xff))
+                       return SK_DROP;
+               half = (__u16 *)&ctx->local_ip4;
+               if (half[0] != ((DST_IP4 >>  0) & 0xffff) ||
+                   half[1] != ((DST_IP4 >> 16) & 0xffff))
+                       return SK_DROP;
+       } else {
+               /* Expect 0.0.0.0 IPs when family != AF_INET */
+               byte = (__u8 *)&ctx->remote_ip4;
+               if (byte[0] != 0 || byte[1] != 0 &&
+                   byte[2] != 0 || byte[3] != 0)
+                       return SK_DROP;
+               half = (__u16 *)&ctx->remote_ip4;
+               if (half[0] != 0 || half[1] != 0)
+                       return SK_DROP;
+
+               byte = (__u8 *)&ctx->local_ip4;
+               if (byte[0] != 0 || byte[1] != 0 &&
+                   byte[2] != 0 || byte[3] != 0)
+                       return SK_DROP;
+               half = (__u16 *)&ctx->local_ip4;
+               if (half[0] != 0 || half[1] != 0)
+                       return SK_DROP;
+       }
+
+       /* Narrow loads from IPv6 fields */
+       if (!v4) {
+               /* Expenct non-:: IP in remote_ip6 */
+               byte = (__u8 *)&ctx->remote_ip6;
+               if (byte[0] == 0 && byte[1] == 0 &&
+                   byte[2] == 0 && byte[3] == 0 &&
+                   byte[4] == 0 && byte[5] == 0 &&
+                   byte[6] == 0 && byte[7] == 0 &&
+                   byte[8] == 0 && byte[9] == 0 &&
+                   byte[10] == 0 && byte[11] == 0 &&
+                   byte[12] == 0 && byte[13] == 0 &&
+                   byte[14] == 0 && byte[15] == 0)
+                       return SK_DROP;
+               half = (__u16 *)&ctx->remote_ip6;
+               if (half[0] == 0 && half[1] == 0 &&
+                   half[2] == 0 && half[3] == 0 &&
+                   half[4] == 0 && half[5] == 0 &&
+                   half[6] == 0 && half[7] == 0)
+                       return SK_DROP;
+
+               /* Expect DST_IP6 in local_ip6 */
+               byte = (__u8 *)&ctx->local_ip6;
+               if (byte[0] != ((DST_IP6[0] >>  0) & 0xff) ||
+                   byte[1] != ((DST_IP6[0] >>  8) & 0xff) ||
+                   byte[2] != ((DST_IP6[0] >> 16) & 0xff) ||
+                   byte[3] != ((DST_IP6[0] >> 24) & 0xff) ||
+                   byte[4] != ((DST_IP6[1] >>  0) & 0xff) ||
+                   byte[5] != ((DST_IP6[1] >>  8) & 0xff) ||
+                   byte[6] != ((DST_IP6[1] >> 16) & 0xff) ||
+                   byte[7] != ((DST_IP6[1] >> 24) & 0xff) ||
+                   byte[8] != ((DST_IP6[2] >>  0) & 0xff) ||
+                   byte[9] != ((DST_IP6[2] >>  8) & 0xff) ||
+                   byte[10] != ((DST_IP6[2] >> 16) & 0xff) ||
+                   byte[11] != ((DST_IP6[2] >> 24) & 0xff) ||
+                   byte[12] != ((DST_IP6[3] >>  0) & 0xff) ||
+                   byte[13] != ((DST_IP6[3] >>  8) & 0xff) ||
+                   byte[14] != ((DST_IP6[3] >> 16) & 0xff) ||
+                   byte[15] != ((DST_IP6[3] >> 24) & 0xff))
+                       return SK_DROP;
+               half = (__u16 *)&ctx->local_ip6;
+               if (half[0] != ((DST_IP6[0] >>  0) & 0xffff) ||
+                   half[1] != ((DST_IP6[0] >> 16) & 0xffff) ||
+                   half[2] != ((DST_IP6[1] >>  0) & 0xffff) ||
+                   half[3] != ((DST_IP6[1] >> 16) & 0xffff) ||
+                   half[4] != ((DST_IP6[2] >>  0) & 0xffff) ||
+                   half[5] != ((DST_IP6[2] >> 16) & 0xffff) ||
+                   half[6] != ((DST_IP6[3] >>  0) & 0xffff) ||
+                   half[7] != ((DST_IP6[3] >> 16) & 0xffff))
+                       return SK_DROP;
+       } else {
+               /* Expect :: IPs when family != AF_INET6 */
+               byte = (__u8 *)&ctx->remote_ip6;
+               if (byte[0] != 0 || byte[1] != 0 ||
+                   byte[2] != 0 || byte[3] != 0 ||
+                   byte[4] != 0 || byte[5] != 0 ||
+                   byte[6] != 0 || byte[7] != 0 ||
+                   byte[8] != 0 || byte[9] != 0 ||
+                   byte[10] != 0 || byte[11] != 0 ||
+                   byte[12] != 0 || byte[13] != 0 ||
+                   byte[14] != 0 || byte[15] != 0)
+                       return SK_DROP;
+               half = (__u16 *)&ctx->remote_ip6;
+               if (half[0] != 0 || half[1] != 0 ||
+                   half[2] != 0 || half[3] != 0 ||
+                   half[4] != 0 || half[5] != 0 ||
+                   half[6] != 0 || half[7] != 0)
+                       return SK_DROP;
+
+               byte = (__u8 *)&ctx->local_ip6;
+               if (byte[0] != 0 || byte[1] != 0 ||
+                   byte[2] != 0 || byte[3] != 0 ||
+                   byte[4] != 0 || byte[5] != 0 ||
+                   byte[6] != 0 || byte[7] != 0 ||
+                   byte[8] != 0 || byte[9] != 0 ||
+                   byte[10] != 0 || byte[11] != 0 ||
+                   byte[12] != 0 || byte[13] != 0 ||
+                   byte[14] != 0 || byte[15] != 0)
+                       return SK_DROP;
+               half = (__u16 *)&ctx->local_ip6;
+               if (half[0] != 0 || half[1] != 0 ||
+                   half[2] != 0 || half[3] != 0 ||
+                   half[4] != 0 || half[5] != 0 ||
+                   half[6] != 0 || half[7] != 0)
+                       return SK_DROP;
+       }
+
+       /* Success, redirect to KEY_SERVER_B */
+       sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B);
+       if (sk) {
+               bpf_sk_assign(ctx, sk, 0);
+               bpf_sk_release(sk);
+       }
+       return SK_PASS;
+}
+
+/* Check that sk_assign rejects SERVER_A socket with -ESOCKNOSUPPORT */
+SEC("sk_lookup/sk_assign_esocknosupport")
+int sk_assign_esocknosupport(struct bpf_sk_lookup *ctx)
+{
+       struct bpf_sock *sk;
+       int err, ret;
+
+       ret = SK_DROP;
+       sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
+       if (!sk)
+               goto out;
+
+       err = bpf_sk_assign(ctx, sk, 0);
+       if (err != -ESOCKTNOSUPPORT) {
+               bpf_printk("sk_assign returned %d, expected %d\n",
+                          err, -ESOCKTNOSUPPORT);
+               goto out;
+       }
+
+       ret = SK_PASS; /* Success, pass to regular lookup */
+out:
+       if (sk)
+               bpf_sk_release(sk);
+       return ret;
+}
+
+SEC("sk_lookup/multi_prog_pass1")
+int multi_prog_pass1(struct bpf_sk_lookup *ctx)
+{
+       bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
+       return SK_PASS;
+}
+
+SEC("sk_lookup/multi_prog_pass2")
+int multi_prog_pass2(struct bpf_sk_lookup *ctx)
+{
+       bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
+       return SK_PASS;
+}
+
+SEC("sk_lookup/multi_prog_drop1")
+int multi_prog_drop1(struct bpf_sk_lookup *ctx)
+{
+       bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
+       return SK_DROP;
+}
+
+SEC("sk_lookup/multi_prog_drop2")
+int multi_prog_drop2(struct bpf_sk_lookup *ctx)
+{
+       bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
+       return SK_DROP;
+}
+
+static __always_inline int select_server_a(struct bpf_sk_lookup *ctx)
+{
+       struct bpf_sock *sk;
+       int err;
+
+       sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
+       if (!sk)
+               return SK_DROP;
+
+       err = bpf_sk_assign(ctx, sk, 0);
+       bpf_sk_release(sk);
+       if (err)
+               return SK_DROP;
+
+       return SK_PASS;
+}
+
+SEC("sk_lookup/multi_prog_redir1")
+int multi_prog_redir1(struct bpf_sk_lookup *ctx)
+{
+       int ret;
+
+       ret = select_server_a(ctx);
+       bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
+       return SK_PASS;
+}
+
+SEC("sk_lookup/multi_prog_redir2")
+int multi_prog_redir2(struct bpf_sk_lookup *ctx)
+{
+       int ret;
+
+       ret = select_server_a(ctx);
+       bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
+       return SK_PASS;
+}
+
+char _license[] SEC("license") = "Dual BSD/GPL";
+__u32 _version SEC("version") = 1;
index 77ae86f..374ccef 100644 (file)
@@ -20,7 +20,9 @@ long long in4 __attribute__((aligned(64))) = 0;
 struct s in5 = {};
 
 /* .rodata section */
-const volatile int in6 = 0;
+const volatile struct {
+       const int in6;
+} in = {};
 
 /* .data section */
 int out1 = -1;
@@ -46,7 +48,7 @@ int handler(const void *ctx)
        out3 = in3;
        out4 = in4;
        out5 = in5;
-       out6 = in6;
+       out6 = in.in6;
 
        bpf_syscall = CONFIG_BPF_SYSCALL;
        kern_ver = LINUX_KERNEL_VERSION;
index 057036c..3dca4c2 100644 (file)
@@ -79,7 +79,7 @@ struct {
 
 struct {
        __uint(type, BPF_MAP_TYPE_ARRAY);
-       __uint(max_entries, 2);
+       __uint(max_entries, 3);
        __type(key, int);
        __type(value, int);
 } sock_skb_opts SEC(".maps");
@@ -94,6 +94,12 @@ struct {
 SEC("sk_skb1")
 int bpf_prog1(struct __sk_buff *skb)
 {
+       int *f, two = 2;
+
+       f = bpf_map_lookup_elem(&sock_skb_opts, &two);
+       if (f && *f) {
+               return *f;
+       }
        return skb->len;
 }
 
diff --git a/tools/testing/selftests/bpf/progs/test_varlen.c b/tools/testing/selftests/bpf/progs/test_varlen.c
new file mode 100644 (file)
index 0000000..cd4b72c
--- /dev/null
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+#define MAX_LEN 256
+
+char buf_in1[MAX_LEN] = {};
+char buf_in2[MAX_LEN] = {};
+
+int test_pid = 0;
+bool capture = false;
+
+/* .bss */
+long payload1_len1 = 0;
+long payload1_len2 = 0;
+long total1 = 0;
+char payload1[MAX_LEN + MAX_LEN] = {};
+
+/* .data */
+int payload2_len1 = -1;
+int payload2_len2 = -1;
+int total2 = -1;
+char payload2[MAX_LEN + MAX_LEN] = { 1 };
+
+int payload3_len1 = -1;
+int payload3_len2 = -1;
+int total3= -1;
+char payload3[MAX_LEN + MAX_LEN] = { 1 };
+
+int payload4_len1 = -1;
+int payload4_len2 = -1;
+int total4= -1;
+char payload4[MAX_LEN + MAX_LEN] = { 1 };
+
+SEC("raw_tp/sys_enter")
+int handler64_unsigned(void *regs)
+{
+       int pid = bpf_get_current_pid_tgid() >> 32;
+       void *payload = payload1;
+       u64 len;
+
+       /* ignore irrelevant invocations */
+       if (test_pid != pid || !capture)
+               return 0;
+
+       len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in1[0]);
+       if (len <= MAX_LEN) {
+               payload += len;
+               payload1_len1 = len;
+       }
+
+       len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in2[0]);
+       if (len <= MAX_LEN) {
+               payload += len;
+               payload1_len2 = len;
+       }
+
+       total1 = payload - (void *)payload1;
+
+       return 0;
+}
+
+SEC("raw_tp/sys_exit")
+int handler64_signed(void *regs)
+{
+       int pid = bpf_get_current_pid_tgid() >> 32;
+       void *payload = payload3;
+       long len;
+
+       /* ignore irrelevant invocations */
+       if (test_pid != pid || !capture)
+               return 0;
+
+       len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in1[0]);
+       if (len >= 0) {
+               payload += len;
+               payload3_len1 = len;
+       }
+       len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in2[0]);
+       if (len >= 0) {
+               payload += len;
+               payload3_len2 = len;
+       }
+       total3 = payload - (void *)payload3;
+
+       return 0;
+}
+
+SEC("tp/raw_syscalls/sys_enter")
+int handler32_unsigned(void *regs)
+{
+       int pid = bpf_get_current_pid_tgid() >> 32;
+       void *payload = payload2;
+       u32 len;
+
+       /* ignore irrelevant invocations */
+       if (test_pid != pid || !capture)
+               return 0;
+
+       len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in1[0]);
+       if (len <= MAX_LEN) {
+               payload += len;
+               payload2_len1 = len;
+       }
+
+       len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in2[0]);
+       if (len <= MAX_LEN) {
+               payload += len;
+               payload2_len2 = len;
+       }
+
+       total2 = payload - (void *)payload2;
+
+       return 0;
+}
+
+SEC("tp/raw_syscalls/sys_exit")
+int handler32_signed(void *regs)
+{
+       int pid = bpf_get_current_pid_tgid() >> 32;
+       void *payload = payload4;
+       int len;
+
+       /* ignore irrelevant invocations */
+       if (test_pid != pid || !capture)
+               return 0;
+
+       len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in1[0]);
+       if (len >= 0) {
+               payload += len;
+               payload4_len1 = len;
+       }
+       len = bpf_probe_read_kernel_str(payload, MAX_LEN, &buf_in2[0]);
+       if (len >= 0) {
+               payload += len;
+               payload4_len2 = len;
+       }
+       total4 = payload - (void *)payload4;
+
+       return 0;
+}
+
+SEC("tp/syscalls/sys_exit_getpid")
+int handler_exit(void *regs)
+{
+       long bla;
+
+       if (bpf_probe_read_kernel(&bla, sizeof(bla), 0))
+               return 1;
+       else
+               return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
index 5611b56..29fa09d 100644 (file)
@@ -63,20 +63,20 @@ int BPF_PROG(handle__tp_btf, struct pt_regs *regs, long id)
        return 0;
 }
 
-SEC("kprobe/hrtimer_nanosleep")
-int BPF_KPROBE(handle__kprobe,
-              ktime_t rqtp, enum hrtimer_mode mode, clockid_t clockid)
+SEC("kprobe/hrtimer_start_range_ns")
+int BPF_KPROBE(handle__kprobe, struct hrtimer *timer, ktime_t tim, u64 delta_ns,
+              const enum hrtimer_mode mode)
 {
-       if (rqtp == MY_TV_NSEC)
+       if (tim == MY_TV_NSEC)
                kprobe_called = true;
        return 0;
 }
 
-SEC("fentry/hrtimer_nanosleep")
-int BPF_PROG(handle__fentry,
-            ktime_t rqtp, enum hrtimer_mode mode, clockid_t clockid)
+SEC("fentry/hrtimer_start_range_ns")
+int BPF_PROG(handle__fentry, struct hrtimer *timer, ktime_t tim, u64 delta_ns,
+            const enum hrtimer_mode mode)
 {
-       if (rqtp == MY_TV_NSEC)
+       if (tim == MY_TV_NSEC)
                fentry_called = true;
        return 0;
 }
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c b/tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c
new file mode 100644 (file)
index 0000000..59ee4f1
--- /dev/null
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+#define IFINDEX_LO     1
+
+struct {
+       __uint(type, BPF_MAP_TYPE_CPUMAP);
+       __uint(key_size, sizeof(__u32));
+       __uint(value_size, sizeof(struct bpf_cpumap_val));
+       __uint(max_entries, 4);
+} cpu_map SEC(".maps");
+
+SEC("xdp_redir")
+int xdp_redir_prog(struct xdp_md *ctx)
+{
+       return bpf_redirect_map(&cpu_map, 1, 0);
+}
+
+SEC("xdp_dummy")
+int xdp_dummy_prog(struct xdp_md *ctx)
+{
+       return XDP_PASS;
+}
+
+SEC("xdp_cpumap/dummy_cm")
+int xdp_dummy_cm(struct xdp_md *ctx)
+{
+       if (ctx->ingress_ifindex == IFINDEX_LO)
+               return XDP_DROP;
+
+       return XDP_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
index 3308112..0ac0864 100644 (file)
@@ -27,7 +27,7 @@ int xdp_dummy_prog(struct xdp_md *ctx)
 /* valid program on DEVMAP entry via SEC name;
  * has access to egress and ingress ifindex
  */
-SEC("xdp_devmap")
+SEC("xdp_devmap/map_prog")
 int xdp_dummy_dm(struct xdp_md *ctx)
 {
        char fmt[] = "devmap redirect: dev %u -> dev %u len %u\n";
diff --git a/tools/testing/selftests/bpf/progs/trace_printk.c b/tools/testing/selftests/bpf/progs/trace_printk.c
new file mode 100644 (file)
index 0000000..8ca7f39
--- /dev/null
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020, Oracle and/or its affiliates.
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+int trace_printk_ret = 0;
+int trace_printk_ran = 0;
+
+SEC("tp/raw_syscalls/sys_enter")
+int sys_enter(void *ctx)
+{
+       static const char fmt[] = "testing,testing %d\n";
+
+       trace_printk_ret = bpf_trace_printk(fmt, sizeof(fmt),
+                                           ++trace_printk_ran);
+       return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/udp_limit.c b/tools/testing/selftests/bpf/progs/udp_limit.c
new file mode 100644 (file)
index 0000000..8429b22
--- /dev/null
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <sys/socket.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+int invocations = 0, in_use = 0;
+
+SEC("cgroup/sock_create")
+int sock(struct bpf_sock *ctx)
+{
+       __u32 key;
+
+       if (ctx->type != SOCK_DGRAM)
+               return 1;
+
+       __sync_fetch_and_add(&invocations, 1);
+
+       if (in_use > 0) {
+               /* BPF_CGROUP_INET_SOCK_RELEASE is _not_ called
+                * when we return an error from the BPF
+                * program!
+                */
+               return 0;
+       }
+
+       __sync_fetch_and_add(&in_use, 1);
+       return 1;
+}
+
+SEC("cgroup/sock_release")
+int sock_release(struct bpf_sock *ctx)
+{
+       __u32 key;
+
+       if (ctx->type != SOCK_DGRAM)
+               return 1;
+
+       __sync_fetch_and_add(&invocations, 1);
+       __sync_fetch_and_add(&in_use, -1);
+       return 1;
+}
index 9df0d2a..4f6444b 100755 (executable)
@@ -10,7 +10,13 @@ if [ "$(id -u)" != "0" ]; then
        exit $ksft_skip
 fi
 
-SRC_TREE=../../../../
+if [ "$building_out_of_srctree" ]; then
+       # We are in linux-build/kselftest/bpf
+       OUTPUT=../../
+else
+       # We are in linux/tools/testing/selftests/bpf
+       OUTPUT=../../../../
+fi
 
 test_run()
 {
@@ -19,8 +25,8 @@ test_run()
 
        echo "[ JIT enabled:$1 hardened:$2 ]"
        dmesg -C
-       if [ -f ${SRC_TREE}/lib/test_bpf.ko ]; then
-               insmod ${SRC_TREE}/lib/test_bpf.ko 2> /dev/null
+       if [ -f ${OUTPUT}/lib/test_bpf.ko ]; then
+               insmod ${OUTPUT}/lib/test_bpf.ko 2> /dev/null
                if [ $? -ne 0 ]; then
                        rc=1
                fi
index 785eabf..5620919 100755 (executable)
@@ -140,7 +140,7 @@ ip netns exec ns6 sysctl net.ipv6.conf.veth10.seg6_enabled=1 > /dev/null
 ip netns exec ns6 nc -l -6 -u -d 7330 > $TMP_FILE &
 ip netns exec ns1 bash -c "echo 'foobar' | nc -w0 -6 -u -p 2121 -s fb00::1 fb00::6 7330"
 sleep 5 # wait enough time to ensure the UDP datagram arrived to the last segment
-kill -INT $!
+kill -TERM $!
 
 if [[ $(< $TMP_FILE) != "foobar" ]]; then
        exit 1
index 6a12a0e..754cf61 100644 (file)
@@ -789,19 +789,19 @@ static void test_sockmap(unsigned int tasks, void *data)
        }
 
        err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_PARSER);
-       if (err) {
+       if (!err) {
                printf("Failed empty parser prog detach\n");
                goto out_sockmap;
        }
 
        err = bpf_prog_detach(fd, BPF_SK_SKB_STREAM_VERDICT);
-       if (err) {
+       if (!err) {
                printf("Failed empty verdict prog detach\n");
                goto out_sockmap;
        }
 
        err = bpf_prog_detach(fd, BPF_SK_MSG_VERDICT);
-       if (err) {
+       if (!err) {
                printf("Failed empty msg verdict prog detach\n");
                goto out_sockmap;
        }
@@ -1090,19 +1090,19 @@ static void test_sockmap(unsigned int tasks, void *data)
                assert(status == 0);
        }
 
-       err = bpf_prog_detach(map_fd_rx, __MAX_BPF_ATTACH_TYPE);
+       err = bpf_prog_detach2(parse_prog, map_fd_rx, __MAX_BPF_ATTACH_TYPE);
        if (!err) {
                printf("Detached an invalid prog type.\n");
                goto out_sockmap;
        }
 
-       err = bpf_prog_detach(map_fd_rx, BPF_SK_SKB_STREAM_PARSER);
+       err = bpf_prog_detach2(parse_prog, map_fd_rx, BPF_SK_SKB_STREAM_PARSER);
        if (err) {
                printf("Failed parser prog detach\n");
                goto out_sockmap;
        }
 
-       err = bpf_prog_detach(map_fd_rx, BPF_SK_SKB_STREAM_VERDICT);
+       err = bpf_prog_detach2(verdict_prog, map_fd_rx, BPF_SK_SKB_STREAM_VERDICT);
        if (err) {
                printf("Failed parser prog detach\n");
                goto out_sockmap;
index 54fa5fa..b1e4dad 100644 (file)
@@ -12,6 +12,9 @@
 #include <string.h>
 #include <execinfo.h> /* backtrace */
 
+#define EXIT_NO_TEST           2
+#define EXIT_ERR_SETUP_INFRA   3
+
 /* defined in test_progs.h */
 struct test_env env = {};
 
@@ -111,13 +114,31 @@ static void reset_affinity() {
        if (err < 0) {
                stdio_restore();
                fprintf(stderr, "Failed to reset process affinity: %d!\n", err);
-               exit(-1);
+               exit(EXIT_ERR_SETUP_INFRA);
        }
        err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
        if (err < 0) {
                stdio_restore();
                fprintf(stderr, "Failed to reset thread affinity: %d!\n", err);
-               exit(-1);
+               exit(EXIT_ERR_SETUP_INFRA);
+       }
+}
+
+static void save_netns(void)
+{
+       env.saved_netns_fd = open("/proc/self/ns/net", O_RDONLY);
+       if (env.saved_netns_fd == -1) {
+               perror("open(/proc/self/ns/net)");
+               exit(EXIT_ERR_SETUP_INFRA);
+       }
+}
+
+static void restore_netns(void)
+{
+       if (setns(env.saved_netns_fd, CLONE_NEWNET) == -1) {
+               stdio_restore();
+               perror("setns(CLONE_NEWNS)");
+               exit(EXIT_ERR_SETUP_INFRA);
        }
 }
 
@@ -138,8 +159,6 @@ void test__end_subtest()
               test->test_num, test->subtest_num,
               test->subtest_name, sub_error_cnt ? "FAIL" : "OK");
 
-       reset_affinity();
-
        free(test->subtest_name);
        test->subtest_name = NULL;
 }
@@ -366,6 +385,8 @@ enum ARG_KEYS {
        ARG_TEST_NAME_BLACKLIST = 'b',
        ARG_VERIFIER_STATS = 's',
        ARG_VERBOSE = 'v',
+       ARG_GET_TEST_CNT = 'c',
+       ARG_LIST_TEST_NAMES = 'l',
 };
 
 static const struct argp_option opts[] = {
@@ -379,6 +400,10 @@ static const struct argp_option opts[] = {
          "Output verifier statistics", },
        { "verbose", ARG_VERBOSE, "LEVEL", OPTION_ARG_OPTIONAL,
          "Verbose output (use -vv or -vvv for progressively verbose output)" },
+       { "count", ARG_GET_TEST_CNT, NULL, 0,
+         "Get number of selected top-level tests " },
+       { "list", ARG_LIST_TEST_NAMES, NULL, 0,
+         "List test names that would run (without running them) " },
        {},
 };
 
@@ -511,6 +536,12 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
                        }
                }
                break;
+       case ARG_GET_TEST_CNT:
+               env->get_test_cnt = true;
+               break;
+       case ARG_LIST_TEST_NAMES:
+               env->list_test_names = true;
+               break;
        case ARGP_KEY_ARG:
                argp_usage(state);
                break;
@@ -643,6 +674,7 @@ int main(int argc, char **argv)
                return -1;
        }
 
+       save_netns();
        stdio_hijack();
        for (i = 0; i < prog_test_cnt; i++) {
                struct prog_test_def *test = &prog_test_defs[i];
@@ -654,6 +686,17 @@ int main(int argc, char **argv)
                                test->test_num, test->test_name))
                        continue;
 
+               if (env.get_test_cnt) {
+                       env.succ_cnt++;
+                       continue;
+               }
+
+               if (env.list_test_names) {
+                       fprintf(env.stdout, "%s\n", test->test_name);
+                       env.succ_cnt++;
+                       continue;
+               }
+
                test->run_test();
                /* ensure last sub-test is finalized properly */
                if (test->subtest_name)
@@ -673,19 +716,34 @@ int main(int argc, char **argv)
                        test->error_cnt ? "FAIL" : "OK");
 
                reset_affinity();
+               restore_netns();
                if (test->need_cgroup_cleanup)
                        cleanup_cgroup_environment();
        }
        stdio_restore();
+
+       if (env.get_test_cnt) {
+               printf("%d\n", env.succ_cnt);
+               goto out;
+       }
+
+       if (env.list_test_names)
+               goto out;
+
        fprintf(stdout, "Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
                env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt);
 
+out:
        free_str_set(&env.test_selector.blacklist);
        free_str_set(&env.test_selector.whitelist);
        free(env.test_selector.num_set);
        free_str_set(&env.subtest_selector.blacklist);
        free_str_set(&env.subtest_selector.whitelist);
        free(env.subtest_selector.num_set);
+       close(env.saved_netns_fd);
+
+       if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0)
+               return EXIT_NO_TEST;
 
        return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
 }
index f4503c9..6e09bf7 100644 (file)
@@ -66,6 +66,8 @@ struct test_env {
        enum verbosity verbosity;
 
        bool jit_enabled;
+       bool get_test_cnt;
+       bool list_test_names;
 
        struct prog_test_def *test;
        FILE *stdout;
@@ -78,6 +80,8 @@ struct test_env {
        int sub_succ_cnt; /* successful sub-tests */
        int fail_cnt; /* total failed tests + sub-tests */
        int skip_cnt; /* skipped tests */
+
+       int saved_netns_fd;
 };
 
 extern struct test_env env;
index 37695fc..78789b2 100644 (file)
@@ -85,6 +85,7 @@ int txmsg_ktls_skb_drop;
 int txmsg_ktls_skb_redir;
 int ktls;
 int peek_flag;
+int skb_use_parser;
 
 static const struct option long_options[] = {
        {"help",        no_argument,            NULL, 'h' },
@@ -174,6 +175,7 @@ static void test_reset(void)
        txmsg_apply = txmsg_cork = 0;
        txmsg_ingress = txmsg_redir_skb = 0;
        txmsg_ktls_skb = txmsg_ktls_skb_drop = txmsg_ktls_skb_redir = 0;
+       skb_use_parser = 0;
 }
 
 static int test_start_subtest(const struct _test *t, struct sockmap_options *o)
@@ -1211,6 +1213,11 @@ run:
                }
        }
 
+       if (skb_use_parser) {
+               i = 2;
+               err = bpf_map_update_elem(map_fd[7], &i, &skb_use_parser, BPF_ANY);
+       }
+
        if (txmsg_drop)
                options->drop_expected = true;
 
@@ -1650,6 +1657,16 @@ static void test_txmsg_cork(int cgrp, struct sockmap_options *opt)
        test_send(opt, cgrp);
 }
 
+static void test_txmsg_ingress_parser(int cgrp, struct sockmap_options *opt)
+{
+       txmsg_pass = 1;
+       skb_use_parser = 512;
+       opt->iov_length = 256;
+       opt->iov_count = 1;
+       opt->rate = 2;
+       test_exec(cgrp, opt);
+}
+
 char *map_names[] = {
        "sock_map",
        "sock_map_txmsg",
@@ -1748,6 +1765,7 @@ struct _test test[] = {
        {"txmsg test pull-data", test_txmsg_pull},
        {"txmsg test pop-data", test_txmsg_pop},
        {"txmsg test push/pop data", test_txmsg_push_pop},
+       {"txmsg text ingress parser", test_txmsg_ingress_parser},
 };
 
 static int check_whitelist(struct _test *t, struct sockmap_options *opt)
diff --git a/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c b/tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c
new file mode 100644 (file)
index 0000000..2ad5f97
--- /dev/null
@@ -0,0 +1,492 @@
+{
+       "valid 1,2,4,8-byte reads from bpf_sk_lookup",
+       .insns = {
+               /* 1-byte read from family field */
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, family)),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, family) + 1),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, family) + 2),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, family) + 3),
+               /* 2-byte read from family field */
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, family)),
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, family) + 2),
+               /* 4-byte read from family field */
+               BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, family)),
+
+               /* 1-byte read from protocol field */
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, protocol)),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, protocol) + 1),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, protocol) + 2),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, protocol) + 3),
+               /* 2-byte read from protocol field */
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, protocol)),
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, protocol) + 2),
+               /* 4-byte read from protocol field */
+               BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, protocol)),
+
+               /* 1-byte read from remote_ip4 field */
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip4)),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip4) + 1),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip4) + 2),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip4) + 3),
+               /* 2-byte read from remote_ip4 field */
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip4)),
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip4) + 2),
+               /* 4-byte read from remote_ip4 field */
+               BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip4)),
+
+               /* 1-byte read from remote_ip6 field */
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6)),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6) + 1),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6) + 2),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6) + 3),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6) + 4),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6) + 5),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6) + 6),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6) + 7),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6) + 8),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6) + 9),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6) + 10),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6) + 11),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6) + 12),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6) + 13),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6) + 14),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6) + 15),
+               /* 2-byte read from remote_ip6 field */
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6)),
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6) + 2),
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6) + 4),
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6) + 6),
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6) + 8),
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6) + 10),
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6) + 12),
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6) + 14),
+               /* 4-byte read from remote_ip6 field */
+               BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6)),
+               BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6) + 4),
+               BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6) + 8),
+               BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6) + 12),
+
+               /* 1-byte read from remote_port field */
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_port)),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_port) + 1),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_port) + 2),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_port) + 3),
+               /* 2-byte read from remote_port field */
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_port)),
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_port) + 2),
+               /* 4-byte read from remote_port field */
+               BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_port)),
+
+               /* 1-byte read from local_ip4 field */
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip4)),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip4) + 1),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip4) + 2),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip4) + 3),
+               /* 2-byte read from local_ip4 field */
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip4)),
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip4) + 2),
+               /* 4-byte read from local_ip4 field */
+               BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip4)),
+
+               /* 1-byte read from local_ip6 field */
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6)),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6) + 1),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6) + 2),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6) + 3),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6) + 4),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6) + 5),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6) + 6),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6) + 7),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6) + 8),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6) + 9),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6) + 10),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6) + 11),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6) + 12),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6) + 13),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6) + 14),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6) + 15),
+               /* 2-byte read from local_ip6 field */
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6)),
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6) + 2),
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6) + 4),
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6) + 6),
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6) + 8),
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6) + 10),
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6) + 12),
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6) + 14),
+               /* 4-byte read from local_ip6 field */
+               BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6)),
+               BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6) + 4),
+               BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6) + 8),
+               BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6) + 12),
+
+               /* 1-byte read from local_port field */
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_port)),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_port) + 1),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_port) + 2),
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_port) + 3),
+               /* 2-byte read from local_port field */
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_port)),
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_port) + 2),
+               /* 4-byte read from local_port field */
+               BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_port)),
+
+               /* 8-byte read from sk field */
+               BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, sk)),
+
+               BPF_MOV32_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+       .expected_attach_type = BPF_SK_LOOKUP,
+},
+/* invalid 8-byte reads from a 4-byte fields in bpf_sk_lookup */
+{
+       "invalid 8-byte read from bpf_sk_lookup family field",
+       .insns = {
+               BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, family)),
+               BPF_MOV32_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .errstr = "invalid bpf_context access",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+       .expected_attach_type = BPF_SK_LOOKUP,
+},
+{
+       "invalid 8-byte read from bpf_sk_lookup protocol field",
+       .insns = {
+               BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, protocol)),
+               BPF_MOV32_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .errstr = "invalid bpf_context access",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+       .expected_attach_type = BPF_SK_LOOKUP,
+},
+{
+       "invalid 8-byte read from bpf_sk_lookup remote_ip4 field",
+       .insns = {
+               BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip4)),
+               BPF_MOV32_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .errstr = "invalid bpf_context access",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+       .expected_attach_type = BPF_SK_LOOKUP,
+},
+{
+       "invalid 8-byte read from bpf_sk_lookup remote_ip6 field",
+       .insns = {
+               BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_ip6)),
+               BPF_MOV32_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .errstr = "invalid bpf_context access",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+       .expected_attach_type = BPF_SK_LOOKUP,
+},
+{
+       "invalid 8-byte read from bpf_sk_lookup remote_port field",
+       .insns = {
+               BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, remote_port)),
+               BPF_MOV32_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .errstr = "invalid bpf_context access",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+       .expected_attach_type = BPF_SK_LOOKUP,
+},
+{
+       "invalid 8-byte read from bpf_sk_lookup local_ip4 field",
+       .insns = {
+               BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip4)),
+               BPF_MOV32_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .errstr = "invalid bpf_context access",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+       .expected_attach_type = BPF_SK_LOOKUP,
+},
+{
+       "invalid 8-byte read from bpf_sk_lookup local_ip6 field",
+       .insns = {
+               BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_ip6)),
+               BPF_MOV32_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .errstr = "invalid bpf_context access",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+       .expected_attach_type = BPF_SK_LOOKUP,
+},
+{
+       "invalid 8-byte read from bpf_sk_lookup local_port field",
+       .insns = {
+               BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, local_port)),
+               BPF_MOV32_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .errstr = "invalid bpf_context access",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+       .expected_attach_type = BPF_SK_LOOKUP,
+},
+/* invalid 1,2,4-byte reads from 8-byte fields in bpf_sk_lookup */
+{
+       "invalid 4-byte read from bpf_sk_lookup sk field",
+       .insns = {
+               BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, sk)),
+               BPF_MOV32_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .errstr = "invalid bpf_context access",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+       .expected_attach_type = BPF_SK_LOOKUP,
+},
+{
+       "invalid 2-byte read from bpf_sk_lookup sk field",
+       .insns = {
+               BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, sk)),
+               BPF_MOV32_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .errstr = "invalid bpf_context access",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+       .expected_attach_type = BPF_SK_LOOKUP,
+},
+{
+       "invalid 1-byte read from bpf_sk_lookup sk field",
+       .insns = {
+               BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+                           offsetof(struct bpf_sk_lookup, sk)),
+               BPF_MOV32_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .errstr = "invalid bpf_context access",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+       .expected_attach_type = BPF_SK_LOOKUP,
+},
+/* out of bounds and unaligned reads from bpf_sk_lookup */
+{
+       "invalid 4-byte read past end of bpf_sk_lookup",
+       .insns = {
+               BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                           sizeof(struct bpf_sk_lookup)),
+               BPF_MOV32_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .errstr = "invalid bpf_context access",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+       .expected_attach_type = BPF_SK_LOOKUP,
+},
+{
+       "invalid 4-byte unaligned read from bpf_sk_lookup at odd offset",
+       .insns = {
+               BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1),
+               BPF_MOV32_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .errstr = "invalid bpf_context access",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+       .expected_attach_type = BPF_SK_LOOKUP,
+},
+{
+       "invalid 4-byte unaligned read from bpf_sk_lookup at even offset",
+       .insns = {
+               BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 2),
+               BPF_MOV32_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .errstr = "invalid bpf_context access",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+       .expected_attach_type = BPF_SK_LOOKUP,
+},
+/* in-bound and out-of-bound writes to bpf_sk_lookup */
+{
+       "invalid 8-byte write to bpf_sk_lookup",
+       .insns = {
+               BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U),
+               BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+               BPF_MOV32_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .errstr = "invalid bpf_context access",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+       .expected_attach_type = BPF_SK_LOOKUP,
+},
+{
+       "invalid 4-byte write to bpf_sk_lookup",
+       .insns = {
+               BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U),
+               BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
+               BPF_MOV32_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .errstr = "invalid bpf_context access",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+       .expected_attach_type = BPF_SK_LOOKUP,
+},
+{
+       "invalid 2-byte write to bpf_sk_lookup",
+       .insns = {
+               BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U),
+               BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0, 0),
+               BPF_MOV32_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .errstr = "invalid bpf_context access",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+       .expected_attach_type = BPF_SK_LOOKUP,
+},
+{
+       "invalid 1-byte write to bpf_sk_lookup",
+       .insns = {
+               BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U),
+               BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+               BPF_MOV32_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .errstr = "invalid bpf_context access",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+       .expected_attach_type = BPF_SK_LOOKUP,
+},
+{
+       "invalid 4-byte write past end of bpf_sk_lookup",
+       .insns = {
+               BPF_MOV64_IMM(BPF_REG_0, 0xcafe4a11U),
+               BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+                           sizeof(struct bpf_sk_lookup)),
+               BPF_MOV32_IMM(BPF_REG_0, 0),
+               BPF_EXIT_INSN(),
+       },
+       .errstr = "invalid bpf_context access",
+       .result = REJECT,
+       .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+       .expected_attach_type = BPF_SK_LOOKUP,
+},
diff --git a/tools/testing/selftests/bpf/verifier/map_ptr.c b/tools/testing/selftests/bpf/verifier/map_ptr.c
new file mode 100644 (file)
index 0000000..b52209d
--- /dev/null
@@ -0,0 +1,62 @@
+{
+       "bpf_map_ptr: read with negative offset rejected",
+       .insns = {
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, -8),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1 },
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "bpf_array access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN",
+       .result = REJECT,
+       .errstr = "R1 is bpf_array invalid negative access: off=-8",
+},
+{
+       "bpf_map_ptr: write rejected",
+       .insns = {
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 3 },
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "bpf_array access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN",
+       .result = REJECT,
+       .errstr = "only read from bpf_array is supported",
+},
+{
+       "bpf_map_ptr: read non-existent field rejected",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_6, 0),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 1),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1 },
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "bpf_array access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN",
+       .result = REJECT,
+       .errstr = "cannot access ptr member ops with moff 0 in struct bpf_map with off 1 size 4",
+},
+{
+       "bpf_map_ptr: read ops field accepted",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_6, 0),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 1),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 1 },
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "bpf_array access is allowed only to CAP_PERFMON and CAP_SYS_ADMIN",
+       .result = ACCEPT,
+       .retval = 1,
+},
index cd26ee6..1f2b8c4 100644 (file)
@@ -56,7 +56,7 @@
        .fixup_map_in_map = { 16 },
        .fixup_map_array_48b = { 13 },
        .result = REJECT,
-       .errstr = "R0 invalid mem access 'map_ptr'",
+       .errstr = "only read from bpf_array is supported",
 },
 {
        "cond: two branches returning different map pointers for lookup (tail, tail)",
index 97ee658..ed4e76b 100644 (file)
        .errstr = "R0 invalid mem access 'inv'",
        .errstr_unpriv = "R0 pointer -= pointer prohibited",
 },
+{
+       "32bit pkt_ptr -= scalar",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+                   offsetof(struct __sk_buff, data_end)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+                   offsetof(struct __sk_buff, data)),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 40),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_8, 2),
+       BPF_ALU32_REG(BPF_MOV, BPF_REG_4, BPF_REG_7),
+       BPF_ALU32_REG(BPF_SUB, BPF_REG_6, BPF_REG_4),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = ACCEPT,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "32bit scalar -= pkt_ptr",
+       .insns = {
+       BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
+                   offsetof(struct __sk_buff, data_end)),
+       BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
+                   offsetof(struct __sk_buff, data)),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 40),
+       BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_8, 2),
+       BPF_ALU32_REG(BPF_MOV, BPF_REG_4, BPF_REG_6),
+       BPF_ALU32_REG(BPF_SUB, BPF_REG_4, BPF_REG_7),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = ACCEPT,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
index 0d347d4..4504210 100644 (file)
@@ -121,6 +121,7 @@ h1_destroy()
 h2_create()
 {
        host_create $h2 2
+       tc qdisc add dev $h2 clsact
 
        # Some of the tests in this suite use multicast traffic. As this traffic
        # enters BR2_10 resp. BR2_11, it is flooded to all other ports. Thus
@@ -141,6 +142,7 @@ h2_create()
 h2_destroy()
 {
        ethtool -s $h2 autoneg on
+       tc qdisc del dev $h2 clsact
        host_destroy $h2
 }
 
@@ -336,6 +338,17 @@ get_qdisc_npackets()
                qdisc_stats_get $swp3 $(get_qdisc_handle $vlan) .packets
 }
 
+send_packets()
+{
+       local vlan=$1; shift
+       local proto=$1; shift
+       local pkts=$1; shift
+
+       $MZ $h2.$vlan -p 8000 -a own -b $h3_mac \
+           -A $(ipaddr 2 $vlan) -B $(ipaddr 3 $vlan) \
+           -t $proto -q -c $pkts "$@"
+}
+
 # This sends traffic in an attempt to build a backlog of $size. Returns 0 on
 # success. After 10 failed attempts it bails out and returns 1. It dumps the
 # backlog size to stdout.
@@ -364,9 +377,7 @@ build_backlog()
                        return 1
                fi
 
-               $MZ $h2.$vlan -p 8000 -a own -b $h3_mac \
-                   -A $(ipaddr 2 $vlan) -B $(ipaddr 3 $vlan) \
-                   -t $proto -q -c $pkts "$@"
+               send_packets $vlan $proto $pkts "$@"
        done
 }
 
@@ -531,3 +542,92 @@ do_mc_backlog_test()
 
        log_test "TC $((vlan - 10)): Qdisc reports MC backlog"
 }
+
+do_drop_test()
+{
+       local vlan=$1; shift
+       local limit=$1; shift
+       local trigger=$1; shift
+       local subtest=$1; shift
+       local fetch_counter=$1; shift
+       local backlog
+       local base
+       local now
+       local pct
+
+       RET=0
+
+       start_traffic $h1.$vlan $(ipaddr 1 $vlan) $(ipaddr 3 $vlan) $h3_mac
+
+       # Create a bit of a backlog and observe no mirroring due to drops.
+       qevent_rule_install_$subtest
+       base=$($fetch_counter)
+
+       build_backlog $vlan $((2 * limit / 3)) udp >/dev/null
+
+       busywait 1100 until_counter_is ">= $((base + 1))" $fetch_counter >/dev/null
+       check_fail $? "Spurious packets observed without buffer pressure"
+
+       qevent_rule_uninstall_$subtest
+
+       # Push to the queue until it's at the limit. The configured limit is
+       # rounded by the qdisc and then by the driver, so this is the best we
+       # can do to get to the real limit of the system. Do this with the rules
+       # uninstalled so that the inevitable drops don't get counted.
+       build_backlog $vlan $((3 * limit / 2)) udp >/dev/null
+
+       qevent_rule_install_$subtest
+       base=$($fetch_counter)
+
+       send_packets $vlan udp 11
+
+       now=$(busywait 1100 until_counter_is ">= $((base + 10))" $fetch_counter)
+       check_err $? "Dropped packets not observed: 11 expected, $((now - base)) seen"
+
+       # When no extra traffic is injected, there should be no mirroring.
+       busywait 1100 until_counter_is ">= $((base + 20))" $fetch_counter >/dev/null
+       check_fail $? "Spurious packets observed"
+
+       # When the rule is uninstalled, there should be no mirroring.
+       qevent_rule_uninstall_$subtest
+       send_packets $vlan udp 11
+       busywait 1100 until_counter_is ">= $((base + 20))" $fetch_counter >/dev/null
+       check_fail $? "Spurious packets observed after uninstall"
+
+       log_test "TC $((vlan - 10)): ${trigger}ped packets $subtest'd"
+
+       stop_traffic
+       sleep 1
+}
+
+qevent_rule_install_mirror()
+{
+       tc filter add block 10 pref 1234 handle 102 matchall skip_sw \
+          action mirred egress mirror dev $swp2 hw_stats disabled
+}
+
+qevent_rule_uninstall_mirror()
+{
+       tc filter del block 10 pref 1234 handle 102 matchall
+}
+
+qevent_counter_fetch_mirror()
+{
+       tc_rule_handle_stats_get "dev $h2 ingress" 101
+}
+
+do_drop_mirror_test()
+{
+       local vlan=$1; shift
+       local limit=$1; shift
+       local qevent_name=$1; shift
+
+       tc filter add dev $h2 ingress pref 1 handle 101 prot ip \
+          flower skip_sw ip_proto udp \
+          action drop
+
+       do_drop_test "$vlan" "$limit" "$qevent_name" mirror \
+                    qevent_counter_fetch_mirror
+
+       tc filter del dev $h2 ingress pref 1 handle 101 flower
+}
index 1c36c57..c8968b0 100755 (executable)
@@ -7,6 +7,7 @@ ALL_TESTS="
        ecn_nodrop_test
        red_test
        mc_backlog_test
+       red_mirror_test
 "
 : ${QDISC:=ets}
 source sch_red_core.sh
@@ -83,6 +84,16 @@ mc_backlog_test()
        uninstall_qdisc
 }
 
+red_mirror_test()
+{
+       install_qdisc qevent early_drop block 10
+
+       do_drop_mirror_test 10 $BACKLOG1 early_drop
+       do_drop_mirror_test 11 $BACKLOG2 early_drop
+
+       uninstall_qdisc
+}
+
 trap cleanup EXIT
 
 setup_prepare
index 558667e..ede9c38 100755 (executable)
@@ -7,6 +7,7 @@ ALL_TESTS="
        ecn_nodrop_test
        red_test
        mc_backlog_test
+       red_mirror_test
 "
 source sch_red_core.sh
 
@@ -57,6 +58,13 @@ mc_backlog_test()
        uninstall_qdisc
 }
 
+red_mirror_test()
+{
+       install_qdisc qevent early_drop block 10
+       do_drop_mirror_test 10 $BACKLOG
+       uninstall_qdisc
+}
+
 trap cleanup EXIT
 
 setup_prepare
index fd583a1..d7cf33a 100755 (executable)
@@ -28,7 +28,7 @@ cleanup()
 
 trap cleanup EXIT
 
-ALL_TESTS="router tc_flower mirror_gre"
+ALL_TESTS="router tc_flower mirror_gre tc_police"
 for current_test in ${TESTS:-$ALL_TESTS}; do
        source ${current_test}_scale.sh
 
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_police_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_police_scale.sh
new file mode 100644 (file)
index 0000000..e79ac0d
--- /dev/null
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+source ../tc_police_scale.sh
+
+tc_police_get_target()
+{
+       local should_fail=$1; shift
+       local target
+
+       target=$(devlink_resource_size_get global_policers single_rate_policers)
+
+       if ((! should_fail)); then
+               echo $target
+       else
+               echo $((target + 1))
+       fi
+}
index 43ba1b4..43f6624 100755 (executable)
@@ -22,7 +22,7 @@ cleanup()
 devlink_sp_read_kvd_defaults
 trap cleanup EXIT
 
-ALL_TESTS="router tc_flower mirror_gre"
+ALL_TESTS="router tc_flower mirror_gre tc_police"
 for current_test in ${TESTS:-$ALL_TESTS}; do
        source ${current_test}_scale.sh
 
diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_police_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_police_scale.sh
new file mode 100644 (file)
index 0000000..e79ac0d
--- /dev/null
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+source ../tc_police_scale.sh
+
+tc_police_get_target()
+{
+       local should_fail=$1; shift
+       local target
+
+       target=$(devlink_resource_size_get global_policers single_rate_policers)
+
+       if ((! should_fail)); then
+               echo $target
+       else
+               echo $((target + 1))
+       fi
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_police_occ.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_police_occ.sh
new file mode 100755 (executable)
index 0000000..448b75c
--- /dev/null
@@ -0,0 +1,108 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test that policers shared by different tc filters are correctly reference
+# counted by observing policers' occupancy via devlink-resource.
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+ALL_TESTS="
+       tc_police_occ_test
+"
+NUM_NETIFS=2
+source $lib_dir/lib.sh
+source $lib_dir/devlink_lib.sh
+
+h1_create()
+{
+       simple_if_init $h1
+}
+
+h1_destroy()
+{
+       simple_if_fini $h1
+}
+
+switch_create()
+{
+       simple_if_init $swp1
+       tc qdisc add dev $swp1 clsact
+}
+
+switch_destroy()
+{
+       tc qdisc del dev $swp1 clsact
+       simple_if_fini $swp1
+}
+
+setup_prepare()
+{
+       h1=${NETIFS[p1]}
+       swp1=${NETIFS[p2]}
+
+       vrf_prepare
+
+       h1_create
+       switch_create
+}
+
+cleanup()
+{
+       pre_cleanup
+
+       switch_destroy
+       h1_destroy
+
+       vrf_cleanup
+}
+
+tc_police_occ_get()
+{
+       devlink_resource_occ_get global_policers single_rate_policers
+}
+
+tc_police_occ_test()
+{
+       RET=0
+
+       local occ=$(tc_police_occ_get)
+
+       tc filter add dev $swp1 ingress pref 1 handle 101 proto ip \
+               flower skip_sw \
+               action police rate 100mbit burst 100k conform-exceed drop/ok
+       (( occ + 1 == $(tc_police_occ_get) ))
+       check_err $? "Got occupancy $(tc_police_occ_get), expected $((occ + 1))"
+
+       tc filter del dev $swp1 ingress pref 1 handle 101 flower
+       (( occ == $(tc_police_occ_get) ))
+       check_err $? "Got occupancy $(tc_police_occ_get), expected $occ"
+
+       tc filter add dev $swp1 ingress pref 1 handle 101 proto ip \
+               flower skip_sw \
+               action police rate 100mbit burst 100k conform-exceed drop/ok \
+               index 10
+       tc filter add dev $swp1 ingress pref 2 handle 102 proto ip \
+               flower skip_sw action police index 10
+
+       (( occ + 1 == $(tc_police_occ_get) ))
+       check_err $? "Got occupancy $(tc_police_occ_get), expected $((occ + 1))"
+
+       tc filter del dev $swp1 ingress pref 2 handle 102 flower
+       (( occ + 1 == $(tc_police_occ_get) ))
+       check_err $? "Got occupancy $(tc_police_occ_get), expected $((occ + 1))"
+
+       tc filter del dev $swp1 ingress pref 1 handle 101 flower
+       (( occ == $(tc_police_occ_get) ))
+       check_err $? "Got occupancy $(tc_police_occ_get), expected $occ"
+
+       log_test "tc police occupancy"
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh
new file mode 100644 (file)
index 0000000..4b96561
--- /dev/null
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: GPL-2.0
+
+TC_POLICE_NUM_NETIFS=2
+
+tc_police_h1_create()
+{
+       simple_if_init $h1
+}
+
+tc_police_h1_destroy()
+{
+       simple_if_fini $h1
+}
+
+tc_police_switch_create()
+{
+       simple_if_init $swp1
+       tc qdisc add dev $swp1 clsact
+}
+
+tc_police_switch_destroy()
+{
+       tc qdisc del dev $swp1 clsact
+       simple_if_fini $swp1
+}
+
+tc_police_rules_create()
+{
+       local count=$1; shift
+       local should_fail=$1; shift
+
+       TC_POLICE_BATCH_FILE="$(mktemp)"
+
+       for ((i = 0; i < count; ++i)); do
+               cat >> $TC_POLICE_BATCH_FILE <<-EOF
+                       filter add dev $swp1 ingress \
+                               prot ip \
+                               flower skip_sw \
+                               action police rate 10mbit burst 100k \
+                               conform-exceed drop/ok
+               EOF
+       done
+
+       tc -b $TC_POLICE_BATCH_FILE
+       check_err_fail $should_fail $? "Rule insertion"
+}
+
+__tc_police_test()
+{
+       local count=$1; shift
+       local should_fail=$1; shift
+
+       tc_police_rules_create $count $should_fail
+
+       offload_count=$(tc filter show dev $swp1 ingress | grep in_hw | wc -l)
+       ((offload_count == count))
+       check_err_fail $should_fail $? "tc police offload count"
+}
+
+tc_police_test()
+{
+       local count=$1; shift
+       local should_fail=$1; shift
+
+       if ! tc_offload_check $TC_POLICE_NUM_NETIFS; then
+               check_err 1 "Could not test offloaded functionality"
+               return
+       fi
+
+       __tc_police_test $count $should_fail
+}
+
+tc_police_setup_prepare()
+{
+       h1=${NETIFS[p1]}
+       swp1=${NETIFS[p2]}
+
+       vrf_prepare
+
+       tc_police_h1_create
+       tc_police_switch_create
+}
+
+tc_police_cleanup()
+{
+       pre_cleanup
+
+       tc_police_switch_destroy
+       tc_police_h1_destroy
+
+       vrf_cleanup
+}
index 9241250..553cb9f 100755 (executable)
@@ -11,6 +11,8 @@ ALL_TESTS="
        matchall_mirror_behind_flower_ingress_test
        matchall_sample_behind_flower_ingress_test
        matchall_mirror_behind_flower_egress_test
+       police_limits_test
+       multi_police_test
 "
 NUM_NETIFS=2
 
@@ -287,6 +289,80 @@ matchall_mirror_behind_flower_egress_test()
        matchall_behind_flower_egress_test "mirror" "mirred egress mirror dev $swp2"
 }
 
+police_limits_test()
+{
+       RET=0
+
+       tc qdisc add dev $swp1 clsact
+
+       tc filter add dev $swp1 ingress pref 1 proto ip handle 101 \
+               flower skip_sw \
+               action police rate 0.5kbit burst 1m conform-exceed drop/ok
+       check_fail $? "Incorrect success to add police action with too low rate"
+
+       tc filter add dev $swp1 ingress pref 1 proto ip handle 101 \
+               flower skip_sw \
+               action police rate 2.5tbit burst 1g conform-exceed drop/ok
+       check_fail $? "Incorrect success to add police action with too high rate"
+
+       tc filter add dev $swp1 ingress pref 1 proto ip handle 101 \
+               flower skip_sw \
+               action police rate 1.5kbit burst 1m conform-exceed drop/ok
+       check_err $? "Failed to add police action with low rate"
+
+       tc filter del dev $swp1 ingress protocol ip pref 1 handle 101 flower
+
+       tc filter add dev $swp1 ingress pref 1 proto ip handle 101 \
+               flower skip_sw \
+               action police rate 1.9tbit burst 1g conform-exceed drop/ok
+       check_err $? "Failed to add police action with high rate"
+
+       tc filter del dev $swp1 ingress protocol ip pref 1 handle 101 flower
+
+       tc filter add dev $swp1 ingress pref 1 proto ip handle 101 \
+               flower skip_sw \
+               action police rate 1.5kbit burst 512b conform-exceed drop/ok
+       check_fail $? "Incorrect success to add police action with too low burst size"
+
+       tc filter add dev $swp1 ingress pref 1 proto ip handle 101 \
+               flower skip_sw \
+               action police rate 1.5kbit burst 2k conform-exceed drop/ok
+       check_err $? "Failed to add police action with low burst size"
+
+       tc filter del dev $swp1 ingress protocol ip pref 1 handle 101 flower
+
+       tc qdisc del dev $swp1 clsact
+
+       log_test "police rate and burst limits"
+}
+
+multi_police_test()
+{
+       RET=0
+
+       # It is forbidden in mlxsw driver to have multiple police
+       # actions in a single rule.
+
+       tc qdisc add dev $swp1 clsact
+
+       tc filter add dev $swp1 ingress protocol ip pref 1 handle 101 \
+               flower skip_sw \
+               action police rate 100mbit burst 100k conform-exceed drop/ok
+       check_err $? "Failed to add rule with single police action"
+
+       tc filter del dev $swp1 ingress protocol ip pref 1 handle 101 flower
+
+       tc filter add dev $swp1 ingress protocol ip pref 1 handle 101 \
+               flower skip_sw \
+               action police rate 100mbit burst 100k conform-exceed drop/pipe \
+               action police rate 200mbit burst 200k conform-exceed drop/ok
+       check_fail $? "Incorrect success to add rule with two police actions"
+
+       tc qdisc del dev $swp1 clsact
+
+       log_test "multi police"
+}
+
 setup_prepare()
 {
        swp1=${NETIFS[p1]}
diff --git a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
new file mode 100644 (file)
index 0000000..ba1d53b
--- /dev/null
@@ -0,0 +1,786 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-only
+
+VNI_GEN=$RANDOM
+NSIM_ID=$((RANDOM % 1024))
+NSIM_DEV_SYS=/sys/bus/netdevsim/devices/netdevsim$NSIM_ID
+NSIM_DEV_DFS=/sys/kernel/debug/netdevsim/netdevsim$NSIM_ID
+NSIM_NETDEV=
+HAS_ETHTOOL=
+EXIT_STATUS=0
+num_cases=0
+num_errors=0
+
+clean_up_devs=( )
+
+function err_cnt {
+    echo "ERROR:" $@
+    EXIT_STATUS=1
+    ((num_errors++))
+    ((num_cases++))
+}
+
+function pass_cnt {
+    ((num_cases++))
+}
+
+function cleanup_tuns {
+    for dev in "${clean_up_devs[@]}"; do
+       [ -e /sys/class/net/$dev ] && ip link del dev $dev
+    done
+    clean_up_devs=( )
+}
+
+function cleanup_nsim {
+    if [ -e $NSIM_DEV_SYS ]; then
+       echo $NSIM_ID > /sys/bus/netdevsim/del_device
+    fi
+}
+
+function cleanup {
+    cleanup_tuns
+    cleanup_nsim
+}
+
+trap cleanup EXIT
+
+function new_vxlan {
+    local dev=$1
+    local dstport=$2
+    local lower=$3
+    local ipver=$4
+    local flags=$5
+
+    local group ipfl
+
+    [ "$ipver" != '6' ] && group=239.1.1.1 || group=fff1::1
+    [ "$ipver" != '6' ] || ipfl="-6"
+
+    [[ ! "$flags" =~ "external" ]] && flags="$flags id $((VNI_GEN++))"
+
+    ip $ipfl link add $dev type vxlan \
+       group $group \
+       dev $lower \
+       dstport $dstport \
+       $flags
+
+    ip link set dev $dev up
+
+    clean_up_devs=("${clean_up_devs[@]}" $dev)
+
+    check_tables
+}
+
+function new_geneve {
+    local dev=$1
+    local dstport=$2
+    local ipver=$3
+    local flags=$4
+
+    local group ipfl
+
+    [ "$ipver" != '6' ] && remote=1.1.1.2 || group=::2
+    [ "$ipver" != '6' ] || ipfl="-6"
+
+    [[ ! "$flags" =~ "external" ]] && flags="$flags vni $((VNI_GEN++))"
+
+    ip $ipfl link add $dev type geneve \
+       remote $remote  \
+       dstport $dstport \
+       $flags
+
+    ip link set dev $dev up
+
+    clean_up_devs=("${clean_up_devs[@]}" $dev)
+
+    check_tables
+}
+
+function del_dev {
+    local dev=$1
+
+    ip link del dev $dev
+    check_tables
+}
+
+# Helpers for netdevsim port/type encoding
+function mke {
+    local port=$1
+    local type=$2
+
+    echo $((port << 16 | type))
+}
+
+function pre {
+    local val=$1
+
+    echo -e "port: $((val >> 16))\ttype: $((val & 0xffff))"
+}
+
+function pre_ethtool {
+    local val=$1
+    local port=$((val >> 16))
+    local type=$((val & 0xffff))
+
+    case $type in
+       1)
+           type_name="vxlan"
+           ;;
+       2)
+           type_name="geneve"
+           ;;
+       4)
+           type_name="vxlan-gpe"
+           ;;
+       *)
+           type_name="bit X"
+           ;;
+    esac
+
+    echo "port $port, $type_name"
+}
+
+function check_table {
+    local path=$NSIM_DEV_DFS/ports/$port/udp_ports_table$1
+    local -n expected=$2
+    local last=$3
+
+    read -a have < $path
+
+    if [ ${#expected[@]} -ne ${#have[@]} ]; then
+       echo "check_table: BAD NUMBER OF ITEMS"
+       return 0
+    fi
+
+    for i in "${!expected[@]}"; do
+       if [ -n "$HAS_ETHTOOL" -a ${expected[i]} -ne 0 ]; then
+           pp_expected=`pre_ethtool ${expected[i]}`
+           ethtool --show-tunnels $NSIM_NETDEV | grep "$pp_expected" >/dev/null
+           if [ $? -ne 0 -a $last -ne 0 ]; then
+               err_cnt "ethtool table $1 on port $port: $pfx - $msg"
+               echo "       check_table: ethtool does not contain '$pp_expected'"
+               ethtool --show-tunnels $NSIM_NETDEV
+               return 0
+
+           fi
+       fi
+
+       if [ ${expected[i]} != ${have[i]} ]; then
+           if [ $last -ne 0 ]; then
+               err_cnt "table $1 on port $port: $pfx - $msg"
+               echo "       check_table: wrong entry $i"
+               echo "       expected: `pre ${expected[i]}`"
+               echo "       have:     `pre ${have[i]}`"
+               return 0
+           fi
+           return 1
+       fi
+    done
+
+    pass_cnt
+    return 0
+}
+
+function check_tables {
+    # Need retries in case we have workqueue making the changes
+    local retries=10
+
+    while ! check_table 0 exp0 $((retries == 0)); do
+       sleep 0.02
+       ((retries--))
+    done
+    while ! check_table 1 exp1 $((retries == 0)); do
+       sleep 0.02
+       ((retries--))
+    done
+}
+
+function print_table {
+    local path=$NSIM_DEV_DFS/ports/$port/udp_ports_table$1
+    read -a have < $path
+
+    tree $NSIM_DEV_DFS/
+
+    echo "Port $port table $1:"
+
+    for i in "${!have[@]}"; do
+       echo "    `pre ${have[i]}`"
+    done
+
+}
+
+function print_tables {
+    print_table 0
+    print_table 1
+}
+
+function get_netdev_name {
+    local -n old=$1
+
+    new=$(ls /sys/class/net)
+
+    for netdev in $new; do
+       for check in $old; do
+            [ $netdev == $check ] && break
+       done
+
+       if [ $netdev != $check ]; then
+           echo $netdev
+           break
+       fi
+    done
+}
+
+###
+### Code start
+###
+
+# Probe ethtool support
+ethtool -h | grep show-tunnels 2>&1 >/dev/null && HAS_ETHTOOL=y
+
+modprobe netdevsim
+
+# Basic test
+pfx="basic"
+
+for port in 0 1; do
+    old_netdevs=$(ls /sys/class/net)
+    if [ $port -eq 0 ]; then
+       echo $NSIM_ID > /sys/bus/netdevsim/new_device
+    else
+       echo 1 > $NSIM_DEV_DFS/udp_ports_open_only
+       echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
+       echo 1 > $NSIM_DEV_SYS/new_port
+    fi
+    NSIM_NETDEV=`get_netdev_name old_netdevs`
+
+    msg="new NIC device created"
+    exp0=( 0 0 0 0 )
+    exp1=( 0 0 0 0 )
+    check_tables
+
+    msg="VxLAN v4 devices"
+    exp0=( `mke 4789 1` 0 0 0 )
+    new_vxlan vxlan0 4789 $NSIM_NETDEV
+    new_vxlan vxlan1 4789 $NSIM_NETDEV
+
+    msg="VxLAN v4 devices go down"
+    exp0=( 0 0 0 0 )
+    ifconfig vxlan1 down
+    ifconfig vxlan0 down
+    check_tables
+
+    msg="VxLAN v6 devices"
+    exp0=( `mke 4789 1` 0 0 0 )
+    new_vxlan vxlanA 4789 $NSIM_NETDEV 6
+
+    for ifc in vxlan0 vxlan1; do
+       ifconfig $ifc up
+    done
+
+    new_vxlan vxlanB 4789 $NSIM_NETDEV 6
+
+    msg="another VxLAN v6 devices"
+    exp0=( `mke 4789 1` `mke 4790 1` 0 0 )
+    new_vxlan vxlanC 4790 $NSIM_NETDEV 6
+
+    msg="Geneve device"
+    exp1=( `mke 6081 2` 0 0 0 )
+    new_geneve gnv0 6081
+
+    msg="NIC device goes down"
+    ifconfig $NSIM_NETDEV down
+    if [ $port -eq 1 ]; then
+       exp0=( 0 0 0 0 )
+       exp1=( 0 0 0 0 )
+    fi
+    check_tables
+    msg="NIC device goes up again"
+    ifconfig $NSIM_NETDEV up
+    exp0=( `mke 4789 1` `mke 4790 1` 0 0 )
+    exp1=( `mke 6081 2` 0 0 0 )
+    check_tables
+
+    cleanup_tuns
+
+    msg="tunnels destroyed"
+    exp0=( 0 0 0 0 )
+    exp1=( 0 0 0 0 )
+    check_tables
+
+    modprobe -r geneve
+    modprobe -r vxlan
+    modprobe -r udp_tunnel
+
+    check_tables
+done
+
+modprobe -r netdevsim
+
+# Module tests
+pfx="module tests"
+
+if modinfo netdevsim | grep udp_tunnel >/dev/null; then
+    err_cnt "netdevsim depends on udp_tunnel"
+else
+    pass_cnt
+fi
+
+modprobe netdevsim
+
+old_netdevs=$(ls /sys/class/net)
+port=0
+echo $NSIM_ID > /sys/bus/netdevsim/new_device
+echo 0 > $NSIM_DEV_SYS/del_port
+echo 1000 > $NSIM_DEV_DFS/udp_ports_sleep
+echo 0 > $NSIM_DEV_SYS/new_port
+NSIM_NETDEV=`get_netdev_name old_netdevs`
+
+msg="create VxLANs"
+exp0=( 0 0 0 0 ) # sleep is longer than out wait
+new_vxlan vxlan0 10000 $NSIM_NETDEV
+
+modprobe -r vxlan
+modprobe -r udp_tunnel
+
+msg="remove tunnels"
+exp0=( 0 0 0 0 )
+check_tables
+
+msg="create VxLANs"
+exp0=( 0 0 0 0 ) # sleep is longer than out wait
+new_vxlan vxlan0 10000 $NSIM_NETDEV
+
+exp0=( 0 0 0 0 )
+
+modprobe -r netdevsim
+modprobe netdevsim
+
+# Overflow the table
+
+function overflow_table0 {
+    local pfx=$1
+
+    msg="create VxLANs 1/5"
+    exp0=( `mke 10000 1` 0 0 0 )
+    new_vxlan vxlan0 10000 $NSIM_NETDEV
+
+    msg="create VxLANs 2/5"
+    exp0=( `mke 10000 1` `mke 10001 1` 0 0 )
+    new_vxlan vxlan1 10001 $NSIM_NETDEV
+
+    msg="create VxLANs 3/5"
+    exp0=( `mke 10000 1` `mke 10001 1` `mke 10002 1` 0 )
+    new_vxlan vxlan2 10002 $NSIM_NETDEV
+
+    msg="create VxLANs 4/5"
+    exp0=( `mke 10000 1` `mke 10001 1` `mke 10002 1` `mke 10003 1` )
+    new_vxlan vxlan3 10003 $NSIM_NETDEV
+
+    msg="create VxLANs 5/5"
+    new_vxlan vxlan4 10004 $NSIM_NETDEV
+}
+
+function overflow_table1 {
+    local pfx=$1
+
+    msg="create GENEVE 1/5"
+    exp1=( `mke 20000 2` 0 0 0 )
+    new_geneve gnv0 20000
+
+    msg="create GENEVE 2/5"
+    exp1=( `mke 20000 2` `mke 20001 2` 0 0 )
+    new_geneve gnv1 20001
+
+    msg="create GENEVE 3/5"
+    exp1=( `mke 20000 2` `mke 20001 2` `mke 20002 2` 0 )
+    new_geneve gnv2 20002
+
+    msg="create GENEVE 4/5"
+    exp1=( `mke 20000 2` `mke 20001 2` `mke 20002 2` `mke 20003 2` )
+    new_geneve gnv3 20003
+
+    msg="create GENEVE 5/5"
+    new_geneve gnv4 20004
+}
+
+echo $NSIM_ID > /sys/bus/netdevsim/new_device
+echo 0 > $NSIM_DEV_SYS/del_port
+
+for port in 0 1; do
+    if [ $port -ne 0 ]; then
+       echo 1 > $NSIM_DEV_DFS/udp_ports_open_only
+       echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
+    fi
+
+    echo $port > $NSIM_DEV_SYS/new_port
+    ifconfig $NSIM_NETDEV up
+
+    overflow_table0 "overflow NIC table"
+    overflow_table1 "overflow NIC table"
+
+    msg="replace VxLAN in overflow table"
+    exp0=( `mke 10000 1` `mke 10004 1` `mke 10002 1` `mke 10003 1` )
+    del_dev vxlan1
+
+    msg="vacate VxLAN in overflow table"
+    exp0=( `mke 10000 1` `mke 10004 1` 0 `mke 10003 1` )
+    del_dev vxlan2
+
+    msg="replace GENEVE in overflow table"
+    exp1=( `mke 20000 2` `mke 20004 2` `mke 20002 2` `mke 20003 2` )
+    del_dev gnv1
+
+    msg="vacate GENEVE in overflow table"
+    exp1=( `mke 20000 2` `mke 20004 2` 0 `mke 20003 2` )
+    del_dev gnv2
+
+    msg="table sharing - share"
+    exp1=( `mke 20000 2` `mke 20004 2` `mke 30001 4` `mke 20003 2` )
+    new_vxlan vxlanG0 30001 $NSIM_NETDEV 4 "gpe external"
+
+    msg="table sharing - overflow"
+    new_vxlan vxlanG1 30002 $NSIM_NETDEV 4 "gpe external"
+    msg="table sharing - overflow v6"
+    new_vxlan vxlanG2 30002 $NSIM_NETDEV 6 "gpe external"
+
+    exp1=( `mke 20000 2` `mke 30002 4` `mke 30001 4` `mke 20003 2` )
+    del_dev gnv4
+
+    msg="destroy NIC"
+    echo $port > $NSIM_DEV_SYS/del_port
+
+    cleanup_tuns
+    exp0=( 0 0 0 0 )
+    exp1=( 0 0 0 0 )
+done
+
+cleanup_nsim
+
+# Sync all
+pfx="sync all"
+
+echo $NSIM_ID > /sys/bus/netdevsim/new_device
+echo 0 > $NSIM_DEV_SYS/del_port
+echo 1 > $NSIM_DEV_DFS/udp_ports_sync_all
+
+for port in 0 1; do
+    if [ $port -ne 0 ]; then
+       echo 1 > $NSIM_DEV_DFS/udp_ports_open_only
+       echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
+    fi
+
+    echo $port > $NSIM_DEV_SYS/new_port
+    ifconfig $NSIM_NETDEV up
+
+    overflow_table0 "overflow NIC table"
+    overflow_table1 "overflow NIC table"
+
+    msg="replace VxLAN in overflow table"
+    exp0=( `mke 10000 1` `mke 10004 1` `mke 10002 1` `mke 10003 1` )
+    del_dev vxlan1
+
+    msg="vacate VxLAN in overflow table"
+    exp0=( `mke 10000 1` `mke 10004 1` 0 `mke 10003 1` )
+    del_dev vxlan2
+
+    msg="replace GENEVE in overflow table"
+    exp1=( `mke 20000 2` `mke 20004 2` `mke 20002 2` `mke 20003 2` )
+    del_dev gnv1
+
+    msg="vacate GENEVE in overflow table"
+    exp1=( `mke 20000 2` `mke 20004 2` 0 `mke 20003 2` )
+    del_dev gnv2
+
+    msg="table sharing - share"
+    exp1=( `mke 20000 2` `mke 20004 2` `mke 30001 4` `mke 20003 2` )
+    new_vxlan vxlanG0 30001 $NSIM_NETDEV 4 "gpe external"
+
+    msg="table sharing - overflow"
+    new_vxlan vxlanG1 30002 $NSIM_NETDEV 4 "gpe external"
+    msg="table sharing - overflow v6"
+    new_vxlan vxlanG2 30002 $NSIM_NETDEV 6 "gpe external"
+
+    exp1=( `mke 20000 2` `mke 30002 4` `mke 30001 4` `mke 20003 2` )
+    del_dev gnv4
+
+    msg="destroy NIC"
+    echo $port > $NSIM_DEV_SYS/del_port
+
+    cleanup_tuns
+    exp0=( 0 0 0 0 )
+    exp1=( 0 0 0 0 )
+done
+
+cleanup_nsim
+
+# Destroy full NIC
+pfx="destroy full"
+
+echo $NSIM_ID > /sys/bus/netdevsim/new_device
+echo 0 > $NSIM_DEV_SYS/del_port
+
+for port in 0 1; do
+    if [ $port -ne 0 ]; then
+       echo 1 > $NSIM_DEV_DFS/udp_ports_open_only
+       echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
+    fi
+
+    echo $port > $NSIM_DEV_SYS/new_port
+    ifconfig $NSIM_NETDEV up
+
+    overflow_table0 "destroy NIC"
+    overflow_table1 "destroy NIC"
+
+    msg="destroy NIC"
+    echo $port > $NSIM_DEV_SYS/del_port
+
+    cleanup_tuns
+    exp0=( 0 0 0 0 )
+    exp1=( 0 0 0 0 )
+done
+
+cleanup_nsim
+
+# IPv4 only
+pfx="IPv4 only"
+
+echo $NSIM_ID > /sys/bus/netdevsim/new_device
+echo 0 > $NSIM_DEV_SYS/del_port
+echo 1 > $NSIM_DEV_DFS/udp_ports_ipv4_only
+
+for port in 0 1; do
+    if [ $port -ne 0 ]; then
+       echo 1 > $NSIM_DEV_DFS/udp_ports_open_only
+       echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
+    fi
+
+    echo $port > $NSIM_DEV_SYS/new_port
+    ifconfig $NSIM_NETDEV up
+
+    msg="create VxLANs v6"
+    new_vxlan vxlanA0 10000 $NSIM_NETDEV 6
+
+    msg="create VxLANs v6"
+    new_vxlan vxlanA1 10000 $NSIM_NETDEV 6
+
+    ip link set dev vxlanA0 down
+    ip link set dev vxlanA0 up
+    check_tables
+
+    msg="create VxLANs v4"
+    exp0=( `mke 10000 1` 0 0 0 )
+    new_vxlan vxlan0 10000 $NSIM_NETDEV
+
+    msg="down VxLANs v4"
+    exp0=( 0 0 0 0 )
+    ip link set dev vxlan0 down
+    check_tables
+
+    msg="up VxLANs v4"
+    exp0=( `mke 10000 1` 0 0 0 )
+    ip link set dev vxlan0 up
+    check_tables
+
+    msg="destroy VxLANs v4"
+    exp0=( 0 0 0 0 )
+    del_dev vxlan0
+
+    msg="recreate VxLANs v4"
+    exp0=( `mke 10000 1` 0 0 0 )
+    new_vxlan vxlan0 10000 $NSIM_NETDEV
+
+    del_dev vxlanA0
+    del_dev vxlanA1
+
+    msg="destroy NIC"
+    echo $port > $NSIM_DEV_SYS/del_port
+
+    cleanup_tuns
+    exp0=( 0 0 0 0 )
+    exp1=( 0 0 0 0 )
+done
+
+cleanup_nsim
+
+# Failures
+pfx="error injection"
+
+echo $NSIM_ID > /sys/bus/netdevsim/new_device
+echo 0 > $NSIM_DEV_SYS/del_port
+
+for port in 0 1; do
+    if [ $port -ne 0 ]; then
+       echo 1 > $NSIM_DEV_DFS/udp_ports_open_only
+       echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
+    fi
+
+    echo $port > $NSIM_DEV_SYS/new_port
+    ifconfig $NSIM_NETDEV up
+
+    echo 110 > $NSIM_DEV_DFS/ports/$port/udp_ports_inject_error
+
+    msg="1 - create VxLANs v6"
+    exp0=( 0 0 0 0 )
+    new_vxlan vxlanA0 10000 $NSIM_NETDEV 6
+
+    msg="1 - create VxLANs v4"
+    exp0=( `mke 10000 1` 0 0 0 )
+    new_vxlan vxlan0 10000 $NSIM_NETDEV
+
+    msg="1 - remove VxLANs v4"
+    del_dev vxlan0
+
+    msg="1 - remove VxLANs v6"
+    exp0=( 0 0 0 0 )
+    del_dev vxlanA0
+
+    msg="2 - create GENEVE"
+    exp1=( `mke 20000 2` 0 0 0 )
+    new_geneve gnv0 20000
+
+    msg="2 - destroy GENEVE"
+    echo 2 > $NSIM_DEV_DFS/ports/$port/udp_ports_inject_error
+    exp1=( `mke 20000 2` 0 0 0 )
+    del_dev gnv0
+
+    msg="2 - create second GENEVE"
+    exp1=( 0 `mke 20001 2` 0 0 )
+    new_geneve gnv0 20001
+
+    msg="destroy NIC"
+    echo $port > $NSIM_DEV_SYS/del_port
+
+    cleanup_tuns
+    exp0=( 0 0 0 0 )
+    exp1=( 0 0 0 0 )
+done
+
+cleanup_nsim
+
+# netdev flags
+pfx="netdev flags"
+
+echo $NSIM_ID > /sys/bus/netdevsim/new_device
+echo 0 > $NSIM_DEV_SYS/del_port
+
+for port in 0 1; do
+    if [ $port -ne 0 ]; then
+       echo 1 > $NSIM_DEV_DFS/udp_ports_open_only
+       echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
+    fi
+
+    echo $port > $NSIM_DEV_SYS/new_port
+    ifconfig $NSIM_NETDEV up
+
+    msg="create VxLANs v6"
+    exp0=( `mke 10000 1` 0 0 0 )
+    new_vxlan vxlanA0 10000 $NSIM_NETDEV 6
+
+    msg="create VxLANs v4"
+    new_vxlan vxlan0 10000 $NSIM_NETDEV
+
+    msg="turn off"
+    exp0=( 0 0 0 0 )
+    ethtool -K $NSIM_NETDEV rx-udp_tunnel-port-offload off
+    check_tables
+
+    msg="turn on"
+    exp0=( `mke 10000 1` 0 0 0 )
+    ethtool -K $NSIM_NETDEV rx-udp_tunnel-port-offload on
+    check_tables
+
+    msg="remove both"
+    del_dev vxlanA0
+    exp0=( 0 0 0 0 )
+    del_dev vxlan0
+    check_tables
+
+    ethtool -K $NSIM_NETDEV rx-udp_tunnel-port-offload off
+
+    msg="create VxLANs v4 - off"
+    exp0=( 0 0 0 0 )
+    new_vxlan vxlan0 10000 $NSIM_NETDEV
+
+    msg="created off - turn on"
+    exp0=( `mke 10000 1` 0 0 0 )
+    ethtool -K $NSIM_NETDEV rx-udp_tunnel-port-offload on
+    check_tables
+
+    msg="destroy NIC"
+    echo $port > $NSIM_DEV_SYS/del_port
+
+    cleanup_tuns
+    exp0=( 0 0 0 0 )
+    exp1=( 0 0 0 0 )
+done
+
+cleanup_nsim
+
+# device initiated reset
+pfx="reset notification"
+
+echo $NSIM_ID > /sys/bus/netdevsim/new_device
+echo 0 > $NSIM_DEV_SYS/del_port
+
+for port in 0 1; do
+    if [ $port -ne 0 ]; then
+       echo 1 > $NSIM_DEV_DFS/udp_ports_open_only
+       echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
+    fi
+
+    echo $port > $NSIM_DEV_SYS/new_port
+    ifconfig $NSIM_NETDEV up
+
+    msg="create VxLANs v6"
+    exp0=( `mke 10000 1` 0 0 0 )
+    new_vxlan vxlanA0 10000 $NSIM_NETDEV 6
+
+    msg="create VxLANs v4"
+    new_vxlan vxlan0 10000 $NSIM_NETDEV
+
+    echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset
+    check_tables
+
+    msg="NIC device goes down"
+    ifconfig $NSIM_NETDEV down
+    if [ $port -eq 1 ]; then
+       exp0=( 0 0 0 0 )
+       exp1=( 0 0 0 0 )
+    fi
+    check_tables
+
+    echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset
+    check_tables
+
+    msg="NIC device goes up again"
+    ifconfig $NSIM_NETDEV up
+    exp0=( `mke 10000 1` 0 0 0 )
+    check_tables
+
+    msg="remove both"
+    del_dev vxlanA0
+    exp0=( 0 0 0 0 )
+    del_dev vxlan0
+    check_tables
+
+    echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset
+    check_tables
+
+    msg="destroy NIC"
+    echo $port > $NSIM_DEV_SYS/del_port
+
+    cleanup_tuns
+    exp0=( 0 0 0 0 )
+    exp1=( 0 0 0 0 )
+done
+
+modprobe -r netdevsim
+
+if [ $num_errors -eq 0 ]; then
+    echo "PASSED all $num_cases checks"
+else
+    echo "FAILED $num_errors/$num_cases checks"
+fi
+
+exit $EXIT_STATUS
index a4605b5..8ec1922 100755 (executable)
@@ -263,10 +263,16 @@ CASENO=0
 
 testcase() { # testfile
   CASENO=$((CASENO+1))
-  desc=`grep "^#[ \t]*description:" $1 | cut -f2 -d:`
+  desc=`grep "^#[ \t]*description:" $1 | cut -f2- -d:`
   prlog -n "[$CASENO]$INSTANCE$desc"
 }
 
+checkreq() { # testfile
+  requires=`grep "^#[ \t]*requires:" $1 | cut -f2- -d:`
+  # Use eval to pass quoted-patterns correctly.
+  eval check_requires "$requires"
+}
+
 test_on_instance() { # testfile
   grep -q "^#[ \t]*flags:.*instance" $1
 }
@@ -356,7 +362,8 @@ trap 'SIG_RESULT=$XFAIL' $SIG_XFAIL
 
 __run_test() { # testfile
   # setup PID and PPID, $$ is not updated.
-  (cd $TRACING_DIR; read PID _ < /proc/self/stat; set -e; set -x; initialize_ftrace; . $1)
+  (cd $TRACING_DIR; read PID _ < /proc/self/stat; set -e; set -x;
+   checkreq $1; initialize_ftrace; . $1)
   [ $? -ne 0 ] && kill -s $SIG_FAIL $SIG_PID
 }
 
index 3b1f45e..13b4dab 100644 (file)
@@ -1,9 +1,8 @@
 #!/bin/sh
 # description: Snapshot and tracing setting
+# requires: snapshot
 # flags: instance
 
-[ ! -f snapshot ] && exit_unsupported
-
 echo "Set tracing off"
 echo 0 > tracing_on
 
index 5058fbc..435d07b 100644 (file)
@@ -1,10 +1,9 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: trace_pipe and trace_marker
+# requires: trace_marker
 # flags: instance
 
-[ ! -f trace_marker ] && exit_unsupported
-
 echo "test input 1" > trace_marker
 
 : "trace interface never consume the ring buffer"
index 801ecb6..e52e470 100644 (file)
@@ -1,6 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Test ftrace direct functions against kprobes
+# requires: kprobe_events
 
 rmmod ftrace-direct ||:
 if ! modprobe ftrace-direct ; then
@@ -8,11 +9,6 @@ if ! modprobe ftrace-direct ; then
   exit_unresolved;
 fi
 
-if [ ! -f kprobe_events ]; then
-       echo "No kprobe_events file -please build CONFIG_KPROBE_EVENTS"
-       exit_unresolved;
-fi
-
 echo "Let the module run a little"
 sleep 1
 
index c6d8387..68550f9 100644 (file)
@@ -1,11 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Generic dynamic event - add/remove kprobe events
-
-[ -f dynamic_events ] || exit_unsupported
-
-grep -q "place: \[<module>:\]<symbol>" README || exit_unsupported
-grep -q "place (kretprobe): \[<module>:\]<symbol>" README || exit_unsupported
+# requires: dynamic_events "place: [<module>:]<symbol>":README "place (kretprobe): [<module>:]<symbol>":README
 
 echo 0 > events/enable
 echo > dynamic_events
index 62b77b5..2b94611 100644 (file)
@@ -1,10 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Generic dynamic event - add/remove synthetic events
-
-[ -f dynamic_events ] || exit_unsupported
-
-grep -q "s:\[synthetic/\]" README || exit_unsupported
+# requires: dynamic_events "s:[synthetic/]":README
 
 echo 0 > events/enable
 echo > dynamic_events
index e084210..c969be9 100644 (file)
@@ -1,16 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Generic dynamic event - selective clear (compatibility)
-
-[ -f dynamic_events ] || exit_unsupported
-
-grep -q "place: \[<module>:\]<symbol>" README || exit_unsupported
-grep -q "place (kretprobe): \[<module>:\]<symbol>" README || exit_unsupported
-
-grep -q "s:\[synthetic/\]" README || exit_unsupported
-
-[ -f synthetic_events ] || exit_unsupported
-[ -f kprobe_events ] || exit_unsupported
+# requires: dynamic_events kprobe_events synthetic_events "place: [<module>:]<symbol>":README "place (kretprobe): [<module>:]<symbol>":README "s:[synthetic/]":README
 
 echo 0 > events/enable
 echo > dynamic_events
index 901922e..16d543e 100644 (file)
@@ -1,13 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Generic dynamic event - generic clear event
-
-[ -f dynamic_events ] || exit_unsupported
-
-grep -q "place: \[<module>:\]<symbol>" README || exit_unsupported
-grep -q "place (kretprobe): \[<module>:\]<symbol>" README || exit_unsupported
-
-grep -q "s:\[synthetic/\]" README || exit_unsupported
+# requires: dynamic_events "place: [<module>:]<symbol>":README "place (kretprobe): [<module>:]<symbol>":README "s:[synthetic/]":README
 
 echo 0 > events/enable
 echo > dynamic_events
index dfb0d51..cfe5bd2 100644 (file)
@@ -1,6 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event tracing - enable/disable with event level files
+# requires: set_event events/sched
 # flags: instance
 
 do_reset() {
@@ -13,11 +14,6 @@ fail() { #msg
     exit_fail
 }
 
-if [ ! -f set_event -o ! -d events/sched ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
 echo 'sched:sched_switch' > set_event
 
 yield
index f0f366f..e6eb78f 100644 (file)
@@ -1,6 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event tracing - restricts events based on pid notrace filtering
+# requires: set_event events/sched set_event_pid set_event_notrace_pid
 # flags: instance
 
 do_reset() {
@@ -56,16 +57,6 @@ enable_events() {
     echo 1 > tracing_on
 }
 
-if [ ! -f set_event -o ! -d events/sched ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f set_event_pid -o ! -f set_event_notrace_pid ]; then
-    echo "event pid notrace filtering is not supported"
-    exit_unsupported
-fi
-
 echo 0 > options/event-fork
 
 do_reset
index f9cb214..7f5f97d 100644 (file)
@@ -1,6 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event tracing - restricts events based on pid
+# requires: set_event set_event_pid events/sched
 # flags: instance
 
 do_reset() {
@@ -16,16 +17,6 @@ fail() { #msg
     exit_fail
 }
 
-if [ ! -f set_event -o ! -d events/sched ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f set_event_pid ]; then
-    echo "event pid filtering is not supported"
-    exit_unsupported
-fi
-
 echo 0 > options/event-fork
 
 echo 1 > events/sched/sched_switch/enable
index 83a8c57..b1ede62 100644 (file)
@@ -1,6 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event tracing - enable/disable with subsystem level files
+# requires: set_event events/sched/enable
 # flags: instance
 
 do_reset() {
@@ -13,11 +14,6 @@ fail() { #msg
     exit_fail
 }
 
-if [ ! -f set_event -o ! -d events/sched ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
 echo 'sched:*' > set_event
 
 yield
index 84d7bda..93c10ea 100644 (file)
@@ -1,6 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event tracing - enable/disable with top level files
+# requires: available_events set_event events/enable
 
 do_reset() {
     echo > set_event
@@ -12,11 +13,6 @@ fail() { #msg
     exit_fail
 }
 
-if [ ! -f available_events -o ! -f set_event -o ! -d events ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
 echo '*:*' > set_event
 
 yield
index f598538..cf3ea42 100644 (file)
@@ -1,17 +1,11 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: ftrace - function graph filters with stack tracer
+# requires: stack_trace set_ftrace_filter function_graph:tracer
 
 # Make sure that function graph filtering works, and is not
 # affected by other tracers enabled (like stack tracer)
 
-if ! grep -q function_graph available_tracers; then
-    echo "no function graph tracer configured"
-    exit_unsupported
-fi
-
-check_filter_file set_ftrace_filter
-
 do_reset() {
     if [ -e /proc/sys/kernel/stack_tracer_enabled ]; then
            echo 0 > /proc/sys/kernel/stack_tracer_enabled
@@ -37,12 +31,6 @@ fi
 
 echo function_graph > current_tracer
 
-if [ ! -f stack_trace ]; then
-    echo "Stack tracer not configured"
-    do_reset
-    exit_unsupported;
-fi
-
 echo "Now testing with stack tracer"
 
 echo 1 > /proc/sys/kernel/stack_tracer_enabled
index d610f47..b3ccdae 100644 (file)
@@ -1,16 +1,10 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: ftrace - function graph filters
+# requires: set_ftrace_filter function_graph:tracer
 
 # Make sure that function graph filtering works
 
-if ! grep -q function_graph available_tracers; then
-    echo "no function graph tracer configured"
-    exit_unsupported
-fi
-
-check_filter_file set_ftrace_filter
-
 fail() { # msg
     echo $1
     exit_fail
index 28936f4..4b994b6 100644 (file)
@@ -1,16 +1,10 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: ftrace - function glob filters
+# requires: set_ftrace_filter function:tracer
 
 # Make sure that function glob matching filter works.
 
-if ! grep -q function available_tracers; then
-    echo "no function tracer configured"
-    exit_unsupported
-fi
-
-check_filter_file set_ftrace_filter
-
 disable_tracing
 clear_trace
 
index 71db68a..acb17ce 100644 (file)
@@ -1,22 +1,11 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: ftrace - function pid notrace filters
+# requires: set_ftrace_notrace_pid set_ftrace_filter function:tracer
 # flags: instance
 
 # Make sure that function pid matching filter with notrace works.
 
-if ! grep -q function available_tracers; then
-    echo "no function tracer configured"
-    exit_unsupported
-fi
-
-if [ ! -f set_ftrace_notrace_pid ]; then
-    echo "set_ftrace_notrace_pid not found? Is function tracer not set?"
-    exit_unsupported
-fi
-
-check_filter_file set_ftrace_filter
-
 do_function_fork=1
 
 if [ ! -f options/function-fork ]; then
index d58403c..9f0a968 100644 (file)
@@ -1,23 +1,12 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: ftrace - function pid filters
+# requires: set_ftrace_pid set_ftrace_filter function:tracer
 # flags: instance
 
 # Make sure that function pid matching filter works.
 # Also test it on an instance directory
 
-if ! grep -q function available_tracers; then
-    echo "no function tracer configured"
-    exit_unsupported
-fi
-
-if [ ! -f set_ftrace_pid ]; then
-    echo "set_ftrace_pid not found? Is function tracer not set?"
-    exit_unsupported
-fi
-
-check_filter_file set_ftrace_filter
-
 do_function_fork=1
 
 if [ ! -f options/function-fork ]; then
index b2aff78..0f41e44 100644 (file)
@@ -1,10 +1,9 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: ftrace - stacktrace filter command
+# requires: set_ftrace_filter
 # flags: instance
 
-check_filter_file set_ftrace_filter
-
 echo _do_fork:stacktrace >> set_ftrace_filter
 
 grep -q "_do_fork:stacktrace:unlimited" set_ftrace_filter
index 71fa3f4..0c6cf77 100644 (file)
@@ -1,6 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: ftrace - function trace with cpumask
+# requires: function:tracer
 
 if ! which nproc ; then
   nproc() {
@@ -15,11 +16,6 @@ if [ $NP -eq 1 ] ;then
   exit_unresolved
 fi
 
-if ! grep -q "function" available_tracers ; then
-  echo "Function trace is not enabled"
-  exit_unsupported
-fi
-
 ORIG_CPUMASK=`cat tracing_cpumask`
 
 do_reset() {
index e9b1fd5..3145b0f 100644 (file)
@@ -3,15 +3,14 @@
 # description: ftrace - test for function event triggers
 # flags: instance
 #
+# The triggers are set within the set_ftrace_filter file
+# requires: set_ftrace_filter
+#
 # Ftrace allows to add triggers to functions, such as enabling or disabling
 # tracing, enabling or disabling trace events, or recording a stack trace
 # within the ring buffer.
 #
 # This test is designed to test event triggers
-#
-
-# The triggers are set within the set_ftrace_filter file
-check_filter_file set_ftrace_filter
 
 do_reset() {
     reset_ftrace_filter
index 1a4b4a4..37c8feb 100644 (file)
@@ -1,8 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: ftrace - function trace on module
-
-check_filter_file set_ftrace_filter
+# requires: set_ftrace_filter
 
 : "mod: allows to filter a non exist function"
 echo 'non_exist_func:mod:non_exist_module' > set_ftrace_filter
index 0d50105..4daeffb 100644 (file)
@@ -1,8 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: ftrace - function profiling
-
-[ ! -f function_profile_enabled ] && exit_unsupported
+# requires: function_profile_enabled
 
 : "Enable function profile"
 echo 1 > function_profile_enabled
index a3dadb6..1dbd766 100644 (file)
@@ -1,6 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: ftrace - function profiler with function tracing
+# requires: function_profile_enabled set_ftrace_filter function_graph:tracer
 
 # There was a bug after a rewrite of the ftrace infrastructure that
 # caused the function_profiler not to be able to run with the function
 # This test triggers those bugs on those kernels.
 #
 # We need function_graph and profiling to to run this test
-if ! grep -q function_graph available_tracers; then
-    echo "no function graph tracer configured"
-    exit_unsupported;
-fi
-
-check_filter_file set_ftrace_filter
-
-if [ ! -f function_profile_enabled ]; then
-    echo "function_profile_enabled not found, function profiling enabled?"
-    exit_unsupported
-fi
 
 fail() { # mesg
     echo $1
index 70bad44..e96e279 100644 (file)
@@ -2,6 +2,9 @@
 # SPDX-License-Identifier: GPL-2.0
 # description: ftrace - test reading of set_ftrace_filter
 #
+# The triggers are set within the set_ftrace_filter file
+# requires: set_ftrace_filter
+#
 # The set_ftrace_filter file of ftrace is used to list functions as well as
 # triggers (probes) attached to functions. The code to read this file is not
 # straight forward and has had various bugs in the past. This test is designed
@@ -9,9 +12,6 @@
 # file in various ways (cat vs dd).
 #
 
-# The triggers are set within the set_ftrace_filter file
-check_filter_file set_ftrace_filter
-
 fail() { # mesg
     echo $1
     exit_fail
index 51e9e80..61264e4 100644 (file)
@@ -1,15 +1,9 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: ftrace - Max stack tracer
+# requires: stack_trace stack_trace_filter
 # Test the basic function of max-stack usage tracing
 
-if [ ! -f stack_trace ]; then
-  echo "Max stack tracer is not supported - please make CONFIG_STACK_TRACER=y"
-  exit_unsupported
-fi
-
-check_filter_file stack_trace_filter
-
 echo > stack_trace_filter
 echo 0 > stack_max_size
 echo 1 > /proc/sys/kernel/stack_tracer_enabled
index 3ed173f..aee2228 100644 (file)
@@ -3,6 +3,9 @@
 # description: ftrace - test for function traceon/off triggers
 # flags: instance
 #
+# The triggers are set within the set_ftrace_filter file
+# requires: set_ftrace_filter
+#
 # Ftrace allows to add triggers to functions, such as enabling or disabling
 # tracing, enabling or disabling trace events, or recording a stack trace
 # within the ring buffer.
@@ -10,9 +13,6 @@
 # This test is designed to test enabling and disabling tracing triggers
 #
 
-# The triggers are set within the set_ftrace_filter file
-check_filter_file set_ftrace_filter
-
 fail() { # mesg
     echo $1
     exit_fail
index 2346582..6c19062 100644 (file)
@@ -1,21 +1,15 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: ftrace - test tracing error log support
+# event tracing is currently the only ftrace tracer that uses the
+# tracing error_log, hence this check
+# requires: set_event error_log
 
 fail() { #msg
     echo $1
     exit_fail
 }
 
-# event tracing is currently the only ftrace tracer that uses the
-# tracing error_log, hence this check
-if [ ! -f set_event ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-[ -f error_log ] || exit_unsupported
-
 ftrace_errlog_check 'event filter parse error' '((sig >= 10 && sig < 15) || dsig ^== 17) && comm != bash' 'events/signal/signal_generate/filter'
 
 exit 0
index 697c77e..c5dec55 100644 (file)
@@ -1,10 +1,3 @@
-check_filter_file() { # check filter file introduced by dynamic ftrace
-    if [ ! -f "$1" ]; then
-        echo "$1 not found? Is dynamic ftrace not set?"
-        exit_unsupported
-    fi
-}
-
 clear_trace() { # reset trace output
     echo > trace
 }
@@ -113,6 +106,27 @@ initialize_ftrace() { # Reset ftrace to initial-state
     enable_tracing
 }
 
+check_requires() { # Check required files and tracers
+    for i in "$@" ; do
+        r=${i%:README}
+        t=${i%:tracer}
+        if [ $t != $i ]; then
+            if ! grep -wq $t available_tracers ; then
+                echo "Required tracer $t is not configured."
+                exit_unsupported
+            fi
+        elif [ $r != $i ]; then
+            if ! grep -Fq "$r" README ; then
+                echo "Required feature pattern \"$r\" is not in README."
+                exit_unsupported
+            fi
+        elif [ ! -e $i ]; then
+            echo "Required feature interface $i doesn't exist."
+            exit_unsupported
+        fi
+    done
+}
+
 LOCALHOST=127.0.0.1
 
 yield() {
index 4fa0f79..0eb47fb 100644 (file)
@@ -1,11 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Test creation and deletion of trace instances while setting an event
-
-if [ ! -d instances ] ; then
-    echo "no instance directory with this kernel"
-    exit_unsupported;
-fi
+# requires: instances
 
 fail() { # mesg
     rmdir foo 2>/dev/null
index b846512..607521d 100644 (file)
@@ -1,11 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Test creation and deletion of trace instances
-
-if [ ! -d instances ] ; then
-    echo "no instance directory with this kernel"
-    exit_unsupported;
-fi
+# requires: instances
 
 fail() { # mesg
     rmdir x y z 2>/dev/null
index bb1eb5a..eba858c 100644 (file)
@@ -1,8 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Kprobe dynamic event - adding and removing
-
-[ -f kprobe_events ] || exit_unsupported # this is configurable
+# requires: kprobe_events
 
 echo p:myevent _do_fork > kprobe_events
 grep myevent kprobe_events
index 442c1a8..d10bf4f 100644 (file)
@@ -1,8 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Kprobe dynamic event - busy event check
-
-[ -f kprobe_events ] || exit_unsupported
+# requires: kprobe_events
 
 echo p:myevent _do_fork > kprobe_events
 test -d events/kprobes/myevent
index bcdecf8..61f2ac4 100644 (file)
@@ -1,8 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Kprobe dynamic event with arguments
-
-[ -f kprobe_events ] || exit_unsupported # this is configurable
+# requires: kprobe_events
 
 echo 'p:testprobe _do_fork $stack $stack0 +0($stack)' > kprobe_events
 grep testprobe kprobe_events | grep -q 'arg1=\$stack arg2=\$stack0 arg3=+0(\$stack)'
index 15c1f70..05aaeed 100644 (file)
@@ -1,8 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Kprobe event with comm arguments
-
-[ -f kprobe_events ] || exit_unsupported # this is configurable
+# requires: kprobe_events
 
 grep -A1 "fetcharg:" README | grep -q "\$comm" || exit_unsupported # this is too old
 
index 46e7744..b5fa054 100644 (file)
@@ -1,8 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Kprobe event string type argument
-
-[ -f kprobe_events ] || exit_unsupported # this is configurable
+# requires: kprobe_events
 
 case `uname -m` in
 x86_64)
index 2b6dd33..b8c75a3 100644 (file)
@@ -1,8 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Kprobe event symbol argument
-
-[ -f kprobe_events ] || exit_unsupported # this is configurable
+# requires: kprobe_events
 
 SYMBOL="linux_proc_banner"
 
index 6f0f199..474ca1a 100644 (file)
@@ -1,10 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Kprobe event argument syntax
-
-[ -f kprobe_events ] || exit_unsupported # this is configurable
-
-grep "x8/16/32/64" README > /dev/null || exit_unsupported # version issue
+# requires: kprobe_events "x8/16/32/64":README
 
 PROBEFUNC="vfs_read"
 GOODREG=
index 81490ec..0610e0b 100644 (file)
@@ -1,10 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Kprobes event arguments with types
-
-[ -f kprobe_events ] || exit_unsupported # this is configurable
-
-grep "x8/16/32/64" README > /dev/null || exit_unsupported # version issue
+# requires: kprobe_events "x8/16/32/64":README
 
 gen_event() { # Bitsize
   echo "p:testprobe _do_fork \$stack0:s$1 \$stack0:u$1 \$stack0:x$1 \$stack0:b4@4/$1"
index 0f60087..a30a9c0 100644 (file)
@@ -1,10 +1,8 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Kprobe event user-memory access
+# requires: kprobe_events '$arg<N>':README
 
-[ -f kprobe_events ] || exit_unsupported # this is configurable
-
-grep -q '\$arg<N>' README || exit_unresolved # depends on arch
 grep -A10 "fetcharg:" README | grep -q 'ustring' || exit_unsupported
 grep -A10 "fetcharg:" README | grep -q '\[u\]<offset>' || exit_unsupported
 
index 3ff2367..1f6981e 100644 (file)
@@ -1,8 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Kprobe event auto/manual naming
-
-[ -f kprobe_events ] || exit_unsupported # this is configurable
+# requires: kprobe_events
 
 :;: "Add an event on function without name" ;:
 
index df50728..81d8b58 100644 (file)
@@ -1,11 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Kprobe dynamic event with function tracer
-
-[ -f kprobe_events ] || exit_unsupported # this is configurable
-grep "function" available_tracers || exit_unsupported # this is configurable
-
-check_filter_file set_ftrace_filter
+# requires: kprobe_events stack_trace_filter function:tracer
 
 # prepare
 echo nop > current_tracer
index d861bd7..7e74ee1 100644 (file)
@@ -1,8 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Kprobe dynamic event - probing module
-
-[ -f kprobe_events ] || exit_unsupported # this is configurable
+# requires: kprobe_events
 
 rmmod trace-printk ||:
 if ! modprobe trace-printk ; then
index 44494ba..366b7e1 100644 (file)
@@ -1,10 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Create/delete multiprobe on kprobe event
-
-[ -f kprobe_events ] || exit_unsupported
-
-grep -q "Create/append/" README || exit_unsupported
+# requires: kprobe_events "Create/append/":README
 
 # Choose 2 symbols for target
 SYM1=_do_fork
index eb0f4ab..b4d8346 100644 (file)
@@ -1,10 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Kprobe event parser error log check
-
-[ -f kprobe_events ] || exit_unsupported # this is configurable
-
-[ -f error_log ] || exit_unsupported
+# requires: kprobe_events error_log
 
 check_error() { # command-with-error-pos-by-^
     ftrace_errlog_check 'trace_kprobe' "$1" 'kprobe_events'
index ac9ab4a..523fde6 100644 (file)
@@ -1,8 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Kretprobe dynamic event with arguments
-
-[ -f kprobe_events ] || exit_unsupported # this is configurable
+# requires: kprobe_events
 
 # Add new kretprobe event
 echo 'r:testprobe2 _do_fork $retval' > kprobe_events
index 8e05b17..4f0b268 100644 (file)
@@ -1,9 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Kretprobe dynamic event with maxactive
-
-[ -f kprobe_events ] || exit_unsupported # this is configurable
-grep -q 'r\[maxactive\]' README || exit_unsupported # this is older version
+# requires: kprobe_events 'r[maxactive]':README
 
 # Test if we successfully reject unknown messages
 if echo 'a:myprobeaccept inet_csk_accept' > kprobe_events; then false; else true; fi
index 6e3dbe5..312d237 100644 (file)
@@ -1,8 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Register/unregister many kprobe events
-
-[ -f kprobe_events ] || exit_unsupported # this is configurable
+# requires: kprobe_events
 
 # ftrace fentry skip size depends on the machine architecture.
 # Currently HAVE_KPROBES_ON_FTRACE defined on x86 and powerpc64le
index a902aa0..624269c 100644 (file)
@@ -1,8 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Kprobe events - probe points
-
-[ -f kprobe_events ] || exit_unsupported # this is configurable
+# requires: kprobe_events
 
 TARGET_FUNC=tracefs_create_dir
 
index 0384b52..ff6c44a 100644 (file)
@@ -1,8 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Kprobe dynamic event - adding and removing
-
-[ -f kprobe_events ] || exit_unsupported # this is configurable
+# requires: kprobe_events
 
 ! grep -q 'myevent' kprobe_profile
 echo p:myevent _do_fork > kprobe_events
index 14229d5..7b5b60c 100644 (file)
@@ -1,10 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Uprobe event parser error log check
-
-[ -f uprobe_events ] || exit_unsupported # this is configurable
-
-[ -f error_log ] || exit_unsupported
+# requires: uprobe_events error_log
 
 check_error() { # command-with-error-pos-by-^
     ftrace_errlog_check 'trace_uprobe' "$1" 'uprobe_events'
index 2b82c80..22bff12 100644 (file)
@@ -1,6 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: test for the preemptirqsoff tracer
+# requires: preemptoff:tracer irqsoff:tracer
 
 MOD=preemptirq_delay_test
 
@@ -27,9 +28,6 @@ unres() { #msg
 modprobe $MOD || unres "$MOD module not available"
 rmmod $MOD
 
-grep -q "preemptoff" available_tracers || unsup "preemptoff tracer not enabled"
-grep -q "irqsoff" available_tracers || unsup "irqsoff tracer not enabled"
-
 reset_tracer
 
 # Simulate preemptoff section for half a second couple of times
index e1a5d14..2cd8947 100644 (file)
@@ -1,6 +1,10 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: %HERE DESCRIBE WHAT THIS DOES%
+# requires: %HERE LIST THE REQUIRED FILES, TRACERS OR README-STRINGS%
+# The required tracer needs :tracer suffix, e.g. function:tracer
+# The required README string needs :README suffix, e.g. "x8/16/32/64":README
+# and the README string is treated as a fixed-string instead of regexp pattern.
 # you have to add ".tc" extention for your testcase file
 # Note that all tests are run with "errexit" option.
 
index b0893d7..11be10e 100644 (file)
@@ -1,17 +1,13 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Test wakeup tracer
+# requires: wakeup:tracer
 
 if ! which chrt ; then
   echo "chrt is not found. This test requires nice command."
   exit_unresolved
 fi
 
-if ! grep -wq "wakeup" available_tracers ; then
-  echo "wakeup tracer is not supported"
-  exit_unsupported
-fi
-
 echo wakeup > current_tracer
 echo 1 > tracing_on
 echo 0 > tracing_max_latency
index b9b6669..3a77198 100644 (file)
@@ -1,17 +1,13 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: Test wakeup RT tracer
+# requires: wakeup_rt:tracer
 
 if ! which chrt ; then
   echo "chrt is not found. This test requires chrt command."
   exit_unresolved
 fi
 
-if ! grep -wq "wakeup_rt" available_tracers ; then
-  echo "wakeup_rt tracer is not supported"
-  exit_unsupported
-fi
-
 echo wakeup_rt > current_tracer
 echo 1 > tracing_on
 echo 0 > tracing_max_latency
index 3f2aee1..1590d6b 100644 (file)
@@ -1,24 +1,13 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event trigger - test inter-event histogram trigger expected fail actions
+# requires: set_event snapshot "snapshot()":README
 
 fail() { #msg
     echo $1
     exit_fail
 }
 
-if [ ! -f set_event ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f snapshot ]; then
-    echo "snapshot is not supported"
-    exit_unsupported
-fi
-
-grep -q "snapshot()" README || exit_unsupported # version issue
-
 echo "Test expected snapshot action failure"
 
 echo 'hist:keys=comm:onmatch(sched.sched_wakeup).snapshot()' >> events/sched/sched_waking/trigger && exit_fail
index e232059..41119e0 100644 (file)
@@ -1,27 +1,13 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event trigger - test field variable support
+# requires: set_event synthetic_events events/sched/sched_process_fork/hist
 
 fail() { #msg
     echo $1
     exit_fail
 }
 
-if [ ! -f set_event ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f synthetic_events ]; then
-    echo "synthetic event is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/sched/sched_process_fork/hist ]; then
-    echo "hist trigger is not supported"
-    exit_unsupported
-fi
-
 echo "Test field variable support"
 
 echo 'wakeup_latency u64 lat; pid_t pid; int prio; char comm[16]' > synthetic_events
index 07cfcb8..7449a4b 100644 (file)
@@ -1,27 +1,13 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event trigger - test inter-event combined histogram trigger
+# requires: set_event synthetic_events events/sched/sched_process_fork/hist
 
 fail() { #msg
     echo $1
     exit_fail
 }
 
-if [ ! -f set_event ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f synthetic_events ]; then
-    echo "synthetic event is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/sched/sched_process_fork/hist ]; then
-    echo "hist trigger is not supported"
-    exit_unsupported
-fi
-
 echo "Test create synthetic event"
 
 echo 'waking_latency  u64 lat pid_t pid' > synthetic_events
index 73e413c..3ad6e3f 100644 (file)
@@ -1,27 +1,13 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event trigger - test multiple actions on hist trigger
+# requires: set_event synthetic_events events/sched/sched_process_fork/hist
 
 fail() { #msg
     echo $1
     exit_fail
 }
 
-if [ ! -f set_event ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f synthetic_events ]; then
-    echo "synthetic event is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/sched/sched_process_fork/hist ]; then
-    echo "hist trigger is not supported"
-    exit_unsupported
-fi
-
 echo "Test multiple actions on hist trigger"
 echo 'wakeup_latency u64 lat; pid_t pid' >> synthetic_events
 TRIGGER1=events/sched/sched_wakeup/trigger
index c80007a..adaabb8 100644 (file)
@@ -1,19 +1,13 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event trigger - test inter-event histogram trigger onchange action
+# requires: set_event "onchange(var)":README
 
 fail() { #msg
     echo $1
     exit_fail
 }
 
-if [ ! -f set_event ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-grep -q "onchange(var)" README || exit_unsupported # version issue
-
 echo "Test onchange action"
 
 echo 'hist:keys=comm:newprio=prio:onchange($newprio).save(comm,prio) if comm=="ping"' >> events/sched/sched_waking/trigger
index ebe0ad8..20e3947 100644 (file)
@@ -1,27 +1,13 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event trigger - test inter-event histogram trigger onmatch action
+# requires: set_event synthetic_events events/sched/sched_process_fork/hist
 
 fail() { #msg
     echo $1
     exit_fail
 }
 
-if [ ! -f set_event ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f synthetic_events ]; then
-    echo "synthetic event is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/sched/sched_process_fork/hist ]; then
-    echo "hist trigger is not supported"
-    exit_unsupported
-fi
-
 echo "Test create synthetic event"
 
 echo 'wakeup_latency  u64 lat pid_t pid char comm[16]' > synthetic_events
index 2a2ef76..f4b03ab 100644 (file)
@@ -1,27 +1,13 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event trigger - test inter-event histogram trigger onmatch-onmax action
+# requires: set_event synthetic_events events/sched/sched_process_fork/hist
 
 fail() { #msg
     echo $1
     exit_fail
 }
 
-if [ ! -f set_event ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f synthetic_events ]; then
-    echo "synthetic event is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/sched/sched_process_fork/hist ]; then
-    echo "hist trigger is not supported"
-    exit_unsupported
-fi
-
 echo "Test create synthetic event"
 
 echo 'wakeup_latency  u64 lat pid_t pid char comm[16]' > synthetic_events
index 98d73bf..71c9b59 100644 (file)
@@ -1,27 +1,13 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event trigger - test inter-event histogram trigger onmax action
+# requires: set_event synthetic_events events/sched/sched_process_fork/hist
 
 fail() { #msg
     echo $1
     exit_fail
 }
 
-if [ ! -f set_event ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f synthetic_events ]; then
-    echo "synthetic event is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/sched/sched_process_fork/hist ]; then
-    echo "hist trigger is not supported"
-    exit_unsupported
-fi
-
 echo "Test create synthetic event"
 
 echo 'wakeup_latency  u64 lat pid_t pid char comm[16]' > synthetic_events
index 01b01b9..67fa328 100644 (file)
@@ -1,31 +1,13 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event trigger - test inter-event histogram trigger snapshot action
+# requires: set_event snapshot events/sched/sched_process_fork/hist "onchange(var)":README "snapshot()":README
 
 fail() { #msg
     echo $1
     exit_fail
 }
 
-if [ ! -f set_event ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/sched/sched_process_fork/hist ]; then
-    echo "hist trigger is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f snapshot ]; then
-    echo "snapshot is not supported"
-    exit_unsupported
-fi
-
-grep -q "onchange(var)" README || exit_unsupported # version issue
-
-grep -q "snapshot()" README || exit_unsupported # version issue
-
 echo "Test snapshot action"
 
 echo 1 > events/sched/enable
index df44b14..a152b55 100644 (file)
@@ -1,22 +1,13 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event trigger - test synthetic event create remove
+# requires: set_event synthetic_events
 
 fail() { #msg
     echo $1
     exit_fail
 }
 
-if [ ! -f set_event ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f synthetic_events ]; then
-    echo "synthetic event is not supported"
-    exit_unsupported
-fi
-
 echo "Test create synthetic event"
 
 echo 'wakeup_latency  u64 lat pid_t pid char comm[16]' > synthetic_events
index 88e6c3f..59216f3 100644 (file)
@@ -1,6 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event trigger - test synthetic_events syntax parser
+# requires: set_event synthetic_events
 
 do_reset() {
     reset_trigger
@@ -14,16 +15,6 @@ fail() { #msg
     exit_fail
 }
 
-if [ ! -f set_event ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f synthetic_events ]; then
-    echo "synthetic event is not supported"
-    exit_unsupported
-fi
-
 reset_tracer
 do_reset
 
index c3baa48..c126d23 100644 (file)
@@ -1,29 +1,13 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event trigger - test inter-event histogram trigger trace action
+# requires: set_event synthetic_events events/sched/sched_process_fork/hist "trace(<synthetic_event>":README
 
 fail() { #msg
     echo $1
     exit_fail
 }
 
-if [ ! -f set_event ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f synthetic_events ]; then
-    echo "synthetic event is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/sched/sched_process_fork/hist ]; then
-    echo "hist trigger is not supported"
-    exit_unsupported
-fi
-
-grep -q "trace(<synthetic_event>" README || exit_unsupported # version issue
-
 echo "Test create synthetic event"
 
 echo 'wakeup_latency  u64 lat pid_t pid char comm[16]' > synthetic_events
index eddb51e..c226ace 100644 (file)
@@ -1,6 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event trigger - test event enable/disable trigger
+# requires: set_event events/sched/sched_process_fork/trigger
 # flags: instance
 
 fail() { #msg
@@ -8,16 +9,6 @@ fail() { #msg
     exit_fail
 }
 
-if [ ! -f set_event -o ! -d events/sched ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/sched/sched_process_fork/trigger ]; then
-    echo "event trigger is not supported"
-    exit_unsupported
-fi
-
 FEATURE=`grep enable_event events/sched/sched_process_fork/trigger`
 if [ -z "$FEATURE" ]; then
     echo "event enable/disable trigger is not supported"
index 2dcc229..d9a198c 100644 (file)
@@ -1,6 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event trigger - test trigger filter
+# requires: set_event events/sched/sched_process_fork/trigger
 # flags: instance
 
 fail() { #msg
@@ -8,16 +9,6 @@ fail() { #msg
     exit_fail
 }
 
-if [ ! -f set_event -o ! -d events/sched ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/sched/sched_process_fork/trigger ]; then
-    echo "event trigger is not supported"
-    exit_unsupported
-fi
-
 echo "Test trigger filter"
 echo 1 > tracing_on
 echo 'traceoff if child_pid == 0' > events/sched/sched_process_fork/trigger
index fab4431..4562e13 100644 (file)
@@ -1,6 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event trigger - test histogram modifiers
+# requires: set_event events/sched/sched_process_fork/trigger events/sched/sched_process_fork/hist
 # flags: instance
 
 fail() { #msg
@@ -8,21 +9,6 @@ fail() { #msg
     exit_fail
 }
 
-if [ ! -f set_event -o ! -d events/sched ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/sched/sched_process_fork/trigger ]; then
-    echo "event trigger is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/sched/sched_process_fork/hist ]; then
-    echo "hist trigger is not supported"
-    exit_unsupported
-fi
-
 echo "Test histogram with execname modifier"
 
 echo 'hist:keys=common_pid.execname' > events/sched/sched_process_fork/trigger
index d44087a..52cfe78 100644 (file)
@@ -1,23 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event trigger - test histogram parser errors
-
-if [ ! -f set_event -o ! -d events/kmem ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/kmem/kmalloc/trigger ]; then
-    echo "event trigger is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/kmem/kmalloc/hist ]; then
-    echo "hist trigger is not supported"
-    exit_unsupported
-fi
-
-[ -f error_log ] || exit_unsupported
+# requires: set_event events/kmem/kmalloc/trigger events/kmem/kmalloc/hist error_log
 
 check_error() { # command-with-error-pos-by-^
     ftrace_errlog_check 'hist:kmem:kmalloc' "$1" 'events/kmem/kmalloc/trigger'
index 177e8d4..2950bfb 100644 (file)
@@ -1,6 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event trigger - test histogram trigger
+# requires: set_event events/sched/sched_process_fork/trigger events/sched/sched_process_fork/hist
 # flags: instance
 
 fail() { #msg
@@ -8,22 +9,7 @@ fail() { #msg
     exit_fail
 }
 
-if [ ! -f set_event -o ! -d events/sched ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/sched/sched_process_fork/trigger ]; then
-    echo "event trigger is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/sched/sched_process_fork/hist ]; then
-    echo "hist trigger is not supported"
-    exit_unsupported
-fi
-
-echo "Test histogram basic tigger"
+echo "Test histogram basic trigger"
 
 echo 'hist:keys=parent_pid:vals=child_pid' > events/sched/sched_process_fork/trigger
 for i in `seq 1 10` ; do ( echo "forked" > /dev/null); done
index 68ff3f4..7129b52 100644 (file)
@@ -1,6 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event trigger - test multiple histogram triggers
+# requires: set_event events/sched/sched_process_fork/trigger events/sched/sched_process_fork/hist
 # flags: instance
 
 fail() { #msg
@@ -8,21 +9,6 @@ fail() { #msg
     exit_fail
 }
 
-if [ ! -f set_event -o ! -d events/sched ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/sched/sched_process_fork/trigger ]; then
-    echo "event trigger is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/sched/sched_process_fork/hist ]; then
-    echo "hist trigger is not supported"
-    exit_unsupported
-fi
-
 echo "Test histogram multiple triggers"
 
 echo 'hist:keys=parent_pid:vals=child_pid' > events/sched/sched_process_fork/trigger
index ac73850..33f5bde 100644 (file)
@@ -1,27 +1,13 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event trigger - test snapshot-trigger
+# requires: set_event events/sched/sched_process_fork/trigger snapshot
 
 fail() { #msg
     echo $1
     exit_fail
 }
 
-if [ ! -f set_event -o ! -d events/sched ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/sched/sched_process_fork/trigger ]; then
-    echo "event trigger is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f snapshot ]; then
-    echo "snapshot is not supported"
-    exit_unsupported
-fi
-
 FEATURE=`grep snapshot events/sched/sched_process_fork/trigger`
 if [ -z "$FEATURE" ]; then
     echo "snapshot trigger is not supported"
index 398c05c..320ea9b 100644 (file)
@@ -1,29 +1,20 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event trigger - test stacktrace-trigger
+# requires: set_event events/sched/sched_process_fork/trigger
 
 fail() { #msg
     echo $1
     exit_fail
 }
 
-if [ ! -f set_event -o ! -d events/sched ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/sched/sched_process_fork/trigger ]; then
-    echo "event trigger is not supported"
-    exit_unsupported
-fi
-
 FEATURE=`grep stacktrace events/sched/sched_process_fork/trigger`
 if [ -z "$FEATURE" ]; then
     echo "stacktrace trigger is not supported"
     exit_unsupported
 fi
 
-echo "Test stacktrace tigger"
+echo "Test stacktrace trigger"
 echo 0 > trace
 echo 0 > options/stacktrace
 echo 'stacktrace' > events/sched/sched_process_fork/trigger
index ab6bedb..68f3af9 100644 (file)
@@ -1,6 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: trace_marker trigger - test histogram trigger
+# requires: set_event events/ftrace/print/trigger events/ftrace/print/hist
 # flags: instance
 
 fail() { #msg
@@ -8,27 +9,7 @@ fail() { #msg
     exit_fail
 }
 
-if [ ! -f set_event ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -d events/ftrace/print ]; then
-    echo "event trace_marker is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/ftrace/print/trigger ]; then
-    echo "event trigger is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/ftrace/print/hist ]; then
-    echo "hist trigger is not supported"
-    exit_unsupported
-fi
-
-echo "Test histogram trace_marker tigger"
+echo "Test histogram trace_marker trigger"
 
 echo 'hist:keys=common_pid' > events/ftrace/print/trigger
 for i in `seq 1 10` ; do echo "hello" > trace_marker; done
index df246e5..27da2db 100644 (file)
@@ -1,6 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: trace_marker trigger - test snapshot trigger
+# requires: set_event snapshot events/ftrace/print/trigger
 # flags: instance
 
 fail() { #msg
@@ -8,26 +9,6 @@ fail() { #msg
     exit_fail
 }
 
-if [ ! -f set_event ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f snapshot ]; then
-    echo "snapshot is not supported"
-    exit_unsupported
-fi
-
-if [ ! -d events/ftrace/print ]; then
-    echo "event trace_marker is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/ftrace/print/trigger ]; then
-    echo "event trigger is not supported"
-    exit_unsupported
-fi
-
 test_trace() {
     file=$1
     x=$2
@@ -46,7 +27,7 @@ test_trace() {
     done
 }
 
-echo "Test snapshot trace_marker tigger"
+echo "Test snapshot trace_marker trigger"
 
 echo 'snapshot' > events/ftrace/print/trigger
 
index 18b4d1c..531139f 100644 (file)
@@ -1,6 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: trace_marker trigger - test histogram with synthetic event against kernel event
+# requires: set_event synthetic_events events/sched/sched_waking events/ftrace/print/trigger events/ftrace/print/hist
 # flags:
 
 fail() { #msg
@@ -8,36 +9,6 @@ fail() { #msg
     exit_fail
 }
 
-if [ ! -f set_event ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f synthetic_events ]; then
-    echo "synthetic events not supported"
-    exit_unsupported
-fi
-
-if [ ! -d events/ftrace/print ]; then
-    echo "event trace_marker is not supported"
-    exit_unsupported
-fi
-
-if [ ! -d events/sched/sched_waking ]; then
-    echo "event sched_waking is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/ftrace/print/trigger ]; then
-    echo "event trigger is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/ftrace/print/hist ]; then
-    echo "hist trigger is not supported"
-    exit_unsupported
-fi
-
 echo "Test histogram kernel event to trace_marker latency histogram trigger"
 
 echo 'latency u64 lat' > synthetic_events
index dd262d6..cc99cbb 100644 (file)
@@ -1,6 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: trace_marker trigger - test histogram with synthetic event
+# requires: set_event synthetic_events events/ftrace/print/trigger events/ftrace/print/hist
 # flags:
 
 fail() { #msg
@@ -8,31 +9,6 @@ fail() { #msg
     exit_fail
 }
 
-if [ ! -f set_event ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f synthetic_events ]; then
-    echo "synthetic events not supported"
-    exit_unsupported
-fi
-
-if [ ! -d events/ftrace/print ]; then
-    echo "event trace_marker is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/ftrace/print/trigger ]; then
-    echo "event trigger is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/ftrace/print/hist ]; then
-    echo "hist trigger is not supported"
-    exit_unsupported
-fi
-
 echo "Test histogram trace_marker to trace_marker latency histogram trigger"
 
 echo 'latency u64 lat' > synthetic_events
index d5d2dcb..9ca0467 100644 (file)
@@ -1,22 +1,13 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event trigger - test traceon/off trigger
+# requires: set_event events/sched/sched_process_fork/trigger
 
 fail() { #msg
     echo $1
     exit_fail
 }
 
-if [ ! -f set_event -o ! -d events/sched ]; then
-    echo "event tracing is not supported"
-    exit_unsupported
-fi
-
-if [ ! -f events/sched/sched_process_fork/trigger ]; then
-    echo "event trigger is not supported"
-    exit_unsupported
-fi
-
 echo "Test traceoff trigger"
 echo 1 > tracing_on
 echo 'traceoff' > events/sched/sched_process_fork/trigger
index 3702dbc..c82aa77 100755 (executable)
@@ -63,6 +63,8 @@ ALL_TESTS="$ALL_TESTS 0008:150:1"
 ALL_TESTS="$ALL_TESTS 0009:150:1"
 ALL_TESTS="$ALL_TESTS 0010:1:1"
 ALL_TESTS="$ALL_TESTS 0011:1:1"
+ALL_TESTS="$ALL_TESTS 0012:1:1"
+ALL_TESTS="$ALL_TESTS 0013:1:1"
 
 # Kselftest framework requirement - SKIP code is 4.
 ksft_skip=4
@@ -470,6 +472,38 @@ kmod_test_0011()
        echo "$MODPROBE" > /proc/sys/kernel/modprobe
 }
 
+kmod_check_visibility()
+{
+       local name="$1"
+       local cmd="$2"
+
+       modprobe $DEFAULT_KMOD_DRIVER
+
+       local priv=$(eval $cmd)
+       local unpriv=$(capsh --drop=CAP_SYSLOG -- -c "$cmd")
+
+       if [ "$priv" = "$unpriv" ] || \
+          [ "${priv:0:3}" = "0x0" ] || \
+          [ "${unpriv:0:3}" != "0x0" ] ; then
+               echo "${FUNCNAME[0]}: FAIL, $name visible to unpriv: '$priv' vs '$unpriv'" >&2
+               exit 1
+       else
+               echo "${FUNCNAME[0]}: OK!"
+       fi
+}
+
+kmod_test_0012()
+{
+       kmod_check_visibility /proc/modules \
+               "grep '^${DEFAULT_KMOD_DRIVER}\b' /proc/modules | awk '{print \$NF}'"
+}
+
+kmod_test_0013()
+{
+       kmod_check_visibility '/sys/module/*/sections/*' \
+               "cat /sys/module/${DEFAULT_KMOD_DRIVER}/sections/.*text | head -n1"
+}
+
 list_tests()
 {
        echo "Test ID list:"
@@ -489,6 +523,8 @@ list_tests()
        echo "0009 x $(get_test_count 0009) - multithreaded - push kmod_concurrent over max_modprobes for get_fs_type()"
        echo "0010 x $(get_test_count 0010) - test nonexistent modprobe path"
        echo "0011 x $(get_test_count 0011) - test completely disabling module autoloading"
+       echo "0012 x $(get_test_count 0012) - test /proc/modules address visibility under CAP_SYSLOG"
+       echo "0013 x $(get_test_count 0013) - test /sys/module/*/sections/* visibility under CAP_SYSLOG"
 }
 
 usage()
index 0ac49d9..862eee7 100644 (file)
@@ -36,7 +36,7 @@ struct ksft_count {
 static struct ksft_count ksft_cnt;
 static unsigned int ksft_plan;
 
-static inline int ksft_test_num(void)
+static inline unsigned int ksft_test_num(void)
 {
        return ksft_cnt.ksft_pass + ksft_cnt.ksft_fail +
                ksft_cnt.ksft_xfail + ksft_cnt.ksft_xpass +
index 895ec99..9491bba 100644 (file)
@@ -17,6 +17,8 @@ TEST_PROGS += route_localnet.sh
 TEST_PROGS += reuseaddr_ports_exhausted.sh
 TEST_PROGS += txtimestamp.sh
 TEST_PROGS += vrf-xfrm-tests.sh
+TEST_PROGS += rxtimestamp.sh
+TEST_PROGS += devlink_port_split.py
 TEST_PROGS_EXTENDED := in_netns.sh
 TEST_GEN_FILES =  socket nettest
 TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
diff --git a/tools/testing/selftests/net/devlink_port_split.py b/tools/testing/selftests/net/devlink_port_split.py
new file mode 100755 (executable)
index 0000000..58bb7e9
--- /dev/null
@@ -0,0 +1,277 @@
+#!/usr/bin/python3
+# SPDX-License-Identifier: GPL-2.0
+
+from subprocess import PIPE, Popen
+import json
+import time
+import argparse
+import collections
+import sys
+
+#
+# Test port split configuration using devlink-port lanes attribute.
+# The test is skipped in case the attribute is not available.
+#
+# First, check that all the ports with 1 lane fail to split.
+# Second, check that all the ports with more than 1 lane can be split
+# to all valid configurations (e.g., split to 2, split to 4 etc.)
+#
+
+
+Port = collections.namedtuple('Port', 'bus_info name')
+
+
+def run_command(cmd, should_fail=False):
+    """
+    Run a command in subprocess.
+    Return: Tuple of (stdout, stderr).
+    """
+
+    p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
+    stdout, stderr = p.communicate()
+    stdout, stderr = stdout.decode(), stderr.decode()
+
+    if stderr != "" and not should_fail:
+        print("Error sending command: %s" % cmd)
+        print(stdout)
+        print(stderr)
+    return stdout, stderr
+
+
+class devlink_ports(object):
+    """
+    Class that holds information on the devlink ports, required to the tests;
+    if_names: A list of interfaces in the devlink ports.
+    """
+
+    def get_if_names(dev):
+        """
+        Get a list of physical devlink ports.
+        Return: Array of tuples (bus_info/port, if_name).
+        """
+
+        arr = []
+
+        cmd = "devlink -j port show"
+        stdout, stderr = run_command(cmd)
+        assert stderr == ""
+        ports = json.loads(stdout)['port']
+
+        for port in ports:
+            if dev in port:
+                if ports[port]['flavour'] == 'physical':
+                    arr.append(Port(bus_info=port, name=ports[port]['netdev']))
+
+        return arr
+
+    def __init__(self, dev):
+        self.if_names = devlink_ports.get_if_names(dev)
+
+
+def get_max_lanes(port):
+    """
+    Get the $port's maximum number of lanes.
+    Return: number of lanes, e.g. 1, 2, 4 and 8.
+    """
+
+    cmd = "devlink -j port show %s" % port
+    stdout, stderr = run_command(cmd)
+    assert stderr == ""
+    values = list(json.loads(stdout)['port'].values())[0]
+
+    if 'lanes' in values:
+        lanes = values['lanes']
+    else:
+        lanes = 0
+    return lanes
+
+
+def get_split_ability(port):
+    """
+    Get the $port split ability.
+    Return: split ability, true or false.
+    """
+
+    cmd = "devlink -j port show %s" % port.name
+    stdout, stderr = run_command(cmd)
+    assert stderr == ""
+    values = list(json.loads(stdout)['port'].values())[0]
+
+    return values['splittable']
+
+
+def split(k, port, should_fail=False):
+    """
+    Split $port into $k ports.
+    If should_fail == True, the split should fail. Otherwise, should pass.
+    Return: Array of sub ports after splitting.
+            If the $port wasn't split, the array will be empty.
+    """
+
+    cmd = "devlink port split %s count %s" % (port.bus_info, k)
+    stdout, stderr = run_command(cmd, should_fail=should_fail)
+
+    if should_fail:
+        if not test(stderr != "", "%s is unsplittable" % port.name):
+            print("split an unsplittable port %s" % port.name)
+            return create_split_group(port, k)
+    else:
+        if stderr == "":
+            return create_split_group(port, k)
+        print("didn't split a splittable port %s" % port.name)
+
+    return []
+
+
+def unsplit(port):
+    """
+    Unsplit $port.
+    """
+
+    cmd = "devlink port unsplit %s" % port
+    stdout, stderr = run_command(cmd)
+    test(stderr == "", "Unsplit port %s" % port)
+
+
+def exists(port, dev):
+    """
+    Check if $port exists in the devlink ports.
+    Return: True is so, False otherwise.
+    """
+
+    return any(dev_port.name == port
+               for dev_port in devlink_ports.get_if_names(dev))
+
+
+def exists_and_lanes(ports, lanes, dev):
+    """
+    Check if every port in the list $ports exists in the devlink ports and has
+    $lanes number of lanes after splitting.
+    Return: True if both are True, False otherwise.
+    """
+
+    for port in ports:
+        max_lanes = get_max_lanes(port)
+        if not exists(port, dev):
+            print("port %s doesn't exist in devlink ports" % port)
+            return False
+        if max_lanes != lanes:
+            print("port %s has %d lanes, but %s were expected"
+                  % (port, lanes, max_lanes))
+            return False
+    return True
+
+
+def test(cond, msg):
+    """
+    Check $cond and print a message accordingly.
+    Return: True is pass, False otherwise.
+    """
+
+    if cond:
+        print("TEST: %-60s [ OK ]" % msg)
+    else:
+        print("TEST: %-60s [FAIL]" % msg)
+
+    return cond
+
+
+def create_split_group(port, k):
+    """
+    Create the split group for $port.
+    Return: Array with $k elements, which are the split port group.
+    """
+
+    return list(port.name + "s" + str(i) for i in range(k))
+
+
+def split_unsplittable_port(port, k):
+    """
+    Test that splitting of unsplittable port fails.
+    """
+
+    # split to max
+    new_split_group = split(k, port, should_fail=True)
+
+    if new_split_group != []:
+        unsplit(port.bus_info)
+
+
+def split_splittable_port(port, k, lanes, dev):
+    """
+    Test that splitting of splittable port passes correctly.
+    """
+
+    new_split_group = split(k, port)
+
+    # Once the split command ends, it takes some time to the sub ifaces'
+    # to get their names. Use udevadm to continue only when all current udev
+    # events are handled.
+    cmd = "udevadm settle"
+    stdout, stderr = run_command(cmd)
+    assert stderr == ""
+
+    if new_split_group != []:
+        test(exists_and_lanes(new_split_group, lanes/k, dev),
+             "split port %s into %s" % (port.name, k))
+
+    unsplit(port.bus_info)
+
+
+def make_parser():
+    parser = argparse.ArgumentParser(description='A test for port splitting.')
+    parser.add_argument('--dev',
+                        help='The devlink handle of the device under test. ' +
+                             'The default is the first registered devlink ' +
+                             'handle.')
+
+    return parser
+
+
+def main(cmdline=None):
+    parser = make_parser()
+    args = parser.parse_args(cmdline)
+
+    dev = args.dev
+    if not dev:
+        cmd = "devlink -j dev show"
+        stdout, stderr = run_command(cmd)
+        assert stderr == ""
+
+        devs = json.loads(stdout)['dev']
+        dev = list(devs.keys())[0]
+
+    cmd = "devlink dev show %s" % dev
+    stdout, stderr = run_command(cmd)
+    if stderr != "":
+        print("devlink device %s can not be found" % dev)
+        sys.exit(1)
+
+    ports = devlink_ports(dev)
+
+    for port in ports.if_names:
+        max_lanes = get_max_lanes(port.name)
+
+        # If max lanes is 0, do not test port splitting at all
+        if max_lanes == 0:
+            continue
+
+        # If 1 lane, shouldn't be able to split
+        elif max_lanes == 1:
+            test(not get_split_ability(port),
+                 "%s should not be able to split" % port.name)
+            split_unsplittable_port(port, max_lanes)
+
+        # Else, splitting should pass and all the split ports should exist.
+        else:
+            lane = max_lanes
+            test(get_split_ability(port),
+                 "%s should be able to split" % port.name)
+            while lane > 1:
+                split_splittable_port(port, lane, max_lanes, dev)
+
+                lane //= 2
+
+
+if __name__ == "__main__":
+    main()
index dee567f..22dc2f3 100755 (executable)
@@ -747,6 +747,19 @@ ipv6_fcnal_runtime()
        run_cmd "$IP nexthop add id 86 via 2001:db8:91::2 dev veth1"
        run_cmd "$IP ro add 2001:db8:101::1/128 nhid 81"
 
+       # rpfilter and default route
+       $IP nexthop flush >/dev/null 2>&1
+       run_cmd "ip netns exec me ip6tables -t mangle -I PREROUTING 1 -m rpfilter --invert -j DROP"
+       run_cmd "$IP nexthop add id 91 via 2001:db8:91::2 dev veth1"
+       run_cmd "$IP nexthop add id 92 via 2001:db8:92::2 dev veth3"
+       run_cmd "$IP nexthop add id 93 group 91/92"
+       run_cmd "$IP -6 ro add default nhid 91"
+       run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1"
+       log_test $? 0 "Nexthop with default route and rpfilter"
+       run_cmd "$IP -6 ro replace default nhid 93"
+       run_cmd "ip netns exec me ping -c1 -w1 2001:db8:101::1"
+       log_test $? 0 "Nexthop with multipath default route and rpfilter"
+
        # TO-DO:
        # existing route with old nexthop; append route with new nexthop
        # existing route with old nexthop; replace route with new
index f0e6be4..75fe24b 100644 (file)
@@ -98,6 +98,11 @@ devlink_resource_size_set()
        check_err $? "Failed setting path $path to size $size"
 }
 
+devlink_resource_occ_get()
+{
+       devlink_resource_get "$@" | jq '.["occ"]'
+}
+
 devlink_reload()
 {
        local still_pending
index eb8e2a2..ea7a11a 100755 (executable)
@@ -50,23 +50,6 @@ cleanup()
        h1_destroy
 }
 
-different_speeds_get()
-{
-       local dev1=$1; shift
-       local dev2=$1; shift
-       local with_mode=$1; shift
-       local adver=$1; shift
-
-       local -a speeds_arr
-
-       speeds_arr=($(common_speeds_get $dev1 $dev2 $with_mode $adver))
-       if [[ ${#speeds_arr[@]} < 2 ]]; then
-               check_err 1 "cannot check different speeds. There are not enough speeds"
-       fi
-
-       echo ${speeds_arr[0]} ${speeds_arr[1]}
-}
-
 same_speeds_autoneg_off()
 {
        # Check that when each of the reported speeds is forced, the links come
diff --git a/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh b/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh
new file mode 100755 (executable)
index 0000000..4b42dfd
--- /dev/null
@@ -0,0 +1,102 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="
+       autoneg
+       autoneg_force_mode
+       no_cable
+"
+
+NUM_NETIFS=2
+source lib.sh
+source ethtool_lib.sh
+
+setup_prepare()
+{
+       swp1=${NETIFS[p1]}
+       swp2=${NETIFS[p2]}
+       swp3=$NETIF_NO_CABLE
+}
+
+ethtool_extended_state_check()
+{
+       local dev=$1; shift
+       local expected_ext_state=$1; shift
+       local expected_ext_substate=${1:-""}; shift
+
+       local ext_state=$(ethtool $dev | grep "Link detected" \
+               | cut -d "(" -f2 | cut -d ")" -f1)
+       local ext_substate=$(echo $ext_state | cut -sd "," -f2 \
+               | sed -e 's/^[[:space:]]*//')
+       ext_state=$(echo $ext_state | cut -d "," -f1)
+
+       [[ $ext_state == $expected_ext_state ]]
+       check_err $? "Expected \"$expected_ext_state\", got \"$ext_state\""
+
+       [[ $ext_substate == $expected_ext_substate ]]
+       check_err $? "Expected \"$expected_ext_substate\", got \"$ext_substate\""
+}
+
+autoneg()
+{
+       RET=0
+
+       ip link set dev $swp1 up
+
+       sleep 4
+       ethtool_extended_state_check $swp1 "Autoneg" "No partner detected"
+
+       log_test "Autoneg, No partner detected"
+
+       ip link set dev $swp1 down
+}
+
+autoneg_force_mode()
+{
+       RET=0
+
+       ip link set dev $swp1 up
+       ip link set dev $swp2 up
+
+       local -a speeds_arr=($(different_speeds_get $swp1 $swp2 0 0))
+       local speed1=${speeds_arr[0]}
+       local speed2=${speeds_arr[1]}
+
+       ethtool_set $swp1 speed $speed1 autoneg off
+       ethtool_set $swp2 speed $speed2 autoneg off
+
+       sleep 4
+       ethtool_extended_state_check $swp1 "Autoneg" \
+               "No partner detected during force mode"
+
+       ethtool_extended_state_check $swp2 "Autoneg" \
+               "No partner detected during force mode"
+
+       log_test "Autoneg, No partner detected during force mode"
+
+       ethtool -s $swp2 autoneg on
+       ethtool -s $swp1 autoneg on
+
+       ip link set dev $swp2 down
+       ip link set dev $swp1 down
+}
+
+no_cable()
+{
+       RET=0
+
+       ip link set dev $swp3 up
+
+       sleep 1
+       ethtool_extended_state_check $swp3 "No cable"
+
+       log_test "No cable"
+
+       ip link set dev $swp3 down
+}
+
+setup_prepare
+
+tests_run
+
+exit $EXIT_STATUS
index 925d229..9188e62 100644 (file)
@@ -67,3 +67,20 @@ common_speeds_get()
                <(printf '%s\n' "${dev1_speeds[@]}" | sort -u) \
                <(printf '%s\n' "${dev2_speeds[@]}" | sort -u)
 }
+
+different_speeds_get()
+{
+       local dev1=$1; shift
+       local dev2=$1; shift
+       local with_mode=$1; shift
+       local adver=$1; shift
+
+       local -a speeds_arr
+
+       speeds_arr=($(common_speeds_get $dev1 $dev2 $with_mode $adver))
+       if [[ ${#speeds_arr[@]} < 2 ]]; then
+               check_err 1 "cannot check different speeds. There are not enough speeds"
+       fi
+
+       echo ${speeds_arr[0]} ${speeds_arr[1]}
+}
index e2adb53..b802c14 100644 (file)
@@ -14,6 +14,9 @@ NETIFS[p6]=veth5
 NETIFS[p7]=veth6
 NETIFS[p8]=veth7
 
+# Port that does not have a cable connected.
+NETIF_NO_CABLE=eth8
+
 ##############################################################################
 # Defines
 
diff --git a/tools/testing/selftests/net/forwarding/pedit_l4port.sh b/tools/testing/selftests/net/forwarding/pedit_l4port.sh
new file mode 100755 (executable)
index 0000000..5f20d28
--- /dev/null
@@ -0,0 +1,198 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# This test sends traffic from H1 to H2. Either on ingress of $swp1, or on egress of $swp2, the
+# traffic is acted upon by a pedit action. An ingress filter installed on $h2 verifies that the
+# packet looks like expected.
+#
+# +----------------------+                             +----------------------+
+# | H1                   |                             |                   H2 |
+# |    + $h1             |                             |            $h2 +     |
+# |    | 192.0.2.1/28    |                             |   192.0.2.2/28 |     |
+# +----|-----------------+                             +----------------|-----+
+#      |                                                                |
+# +----|----------------------------------------------------------------|-----+
+# | SW |                                                                |     |
+# |  +-|----------------------------------------------------------------|-+   |
+# |  | + $swp1                       BR                           $swp2 + |   |
+# |  +--------------------------------------------------------------------+   |
+# +---------------------------------------------------------------------------+
+
+ALL_TESTS="
+       ping_ipv4
+       test_udp_sport
+       test_udp_dport
+       test_tcp_sport
+       test_tcp_dport
+"
+
+NUM_NETIFS=4
+source lib.sh
+source tc_common.sh
+
+: ${HIT_TIMEOUT:=2000} # ms
+
+h1_create()
+{
+       simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64
+}
+
+h1_destroy()
+{
+       simple_if_fini $h1 192.0.2.1/28 2001:db8:1::1/64
+}
+
+h2_create()
+{
+       simple_if_init $h2 192.0.2.2/28 2001:db8:1::2/64
+       tc qdisc add dev $h2 clsact
+}
+
+h2_destroy()
+{
+       tc qdisc del dev $h2 clsact
+       simple_if_fini $h2 192.0.2.2/28 2001:db8:1::2/64
+}
+
+switch_create()
+{
+       ip link add name br1 up type bridge vlan_filtering 1
+       ip link set dev $swp1 master br1
+       ip link set dev $swp1 up
+       ip link set dev $swp2 master br1
+       ip link set dev $swp2 up
+
+       tc qdisc add dev $swp1 clsact
+       tc qdisc add dev $swp2 clsact
+}
+
+switch_destroy()
+{
+       tc qdisc del dev $swp2 clsact
+       tc qdisc del dev $swp1 clsact
+
+       ip link set dev $swp2 nomaster
+       ip link set dev $swp1 nomaster
+       ip link del dev br1
+}
+
+setup_prepare()
+{
+       h1=${NETIFS[p1]}
+       swp1=${NETIFS[p2]}
+
+       swp2=${NETIFS[p3]}
+       h2=${NETIFS[p4]}
+
+       h2mac=$(mac_get $h2)
+
+       vrf_prepare
+       h1_create
+       h2_create
+       switch_create
+}
+
+cleanup()
+{
+       pre_cleanup
+
+       switch_destroy
+       h2_destroy
+       h1_destroy
+       vrf_cleanup
+}
+
+ping_ipv4()
+{
+       ping_test $h1 192.0.2.2
+}
+
+ping_ipv6()
+{
+       ping6_test $h1 2001:db8:1::2
+}
+
+do_test_pedit_l4port_one()
+{
+       local pedit_locus=$1; shift
+       local pedit_prot=$1; shift
+       local pedit_action=$1; shift
+       local match_prot=$1; shift
+       local match_flower=$1; shift
+       local mz_flags=$1; shift
+       local saddr=$1; shift
+       local daddr=$1; shift
+
+       tc filter add $pedit_locus handle 101 pref 1 \
+          flower action pedit ex munge $pedit_action
+       tc filter add dev $h2 ingress handle 101 pref 1 prot $match_prot \
+          flower skip_hw $match_flower action pass
+
+       RET=0
+
+       $MZ $mz_flags $h1 -c 10 -d 20msec -p 100 \
+           -a own -b $h2mac -q -t $pedit_prot sp=54321,dp=12345
+
+       local pkts
+       pkts=$(busywait "$TC_HIT_TIMEOUT" until_counter_is ">= 10" \
+                       tc_rule_handle_stats_get "dev $h2 ingress" 101)
+       check_err $? "Expected to get 10 packets, but got $pkts."
+
+       pkts=$(tc_rule_handle_stats_get "$pedit_locus" 101)
+       ((pkts >= 10))
+       check_err $? "Expected to get 10 packets on pedit rule, but got $pkts."
+
+       log_test "$pedit_locus pedit $pedit_action"
+
+       tc filter del dev $h2 ingress pref 1
+       tc filter del $pedit_locus pref 1
+}
+
+do_test_pedit_l4port()
+{
+       local locus=$1; shift
+       local prot=$1; shift
+       local pedit_port=$1; shift
+       local flower_port=$1; shift
+       local port
+
+       for port in 1 11111 65535; do
+               do_test_pedit_l4port_one "$locus" "$prot"                       \
+                                        "$prot $pedit_port set $port"          \
+                                        ip "ip_proto $prot $flower_port $port" \
+                                        "-A 192.0.2.1 -B 192.0.2.2"
+       done
+}
+
+test_udp_sport()
+{
+       do_test_pedit_l4port "dev $swp1 ingress" udp sport src_port
+       do_test_pedit_l4port "dev $swp2 egress"  udp sport src_port
+}
+
+test_udp_dport()
+{
+       do_test_pedit_l4port "dev $swp1 ingress" udp dport dst_port
+       do_test_pedit_l4port "dev $swp2 egress"  udp dport dst_port
+}
+
+test_tcp_sport()
+{
+       do_test_pedit_l4port "dev $swp1 ingress" tcp sport src_port
+       do_test_pedit_l4port "dev $swp2 egress"  tcp sport src_port
+}
+
+test_tcp_dport()
+{
+       do_test_pedit_l4port "dev $swp1 ingress" tcp dport dst_port
+       do_test_pedit_l4port "dev $swp2 egress"  tcp dport dst_port
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/sch_red.sh b/tools/testing/selftests/net/forwarding/sch_red.sh
new file mode 100755 (executable)
index 0000000..e714bae
--- /dev/null
@@ -0,0 +1,492 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# This test sends one stream of traffic from H1 through a TBF shaper, to a RED
+# within TBF shaper on $swp3. The two shapers have the same configuration, and
+# thus the resulting stream should fill all available bandwidth on the latter
+# shaper. A second stream is sent from H2 also via $swp3, and used to inject
+# additional traffic. Since all available bandwidth is taken, this traffic has
+# to go to backlog.
+#
+# +--------------------------+                     +--------------------------+
+# | H1                       |                     | H2                       |
+# |     + $h1                |                     |     + $h2                |
+# |     | 192.0.2.1/28       |                     |     | 192.0.2.2/28       |
+# |     | TBF 10Mbps         |                     |     |                    |
+# +-----|--------------------+                     +-----|--------------------+
+#       |                                                |
+# +-----|------------------------------------------------|--------------------+
+# | SW  |                                                |                    |
+# |  +--|------------------------------------------------|----------------+   |
+# |  |  + $swp1                                          + $swp2          |   |
+# |  |                               BR                                   |   |
+# |  |                                                                    |   |
+# |  |                                + $swp3                             |   |
+# |  |                                | TBF 10Mbps / RED                  |   |
+# |  +--------------------------------|-----------------------------------+   |
+# |                                   |                                       |
+# +-----------------------------------|---------------------------------------+
+#                                     |
+#                               +-----|--------------------+
+#                              | H3  |                    |
+#                              |     + $h1                |
+#                              |       192.0.2.3/28       |
+#                              |                          |
+#                              +--------------------------+
+
+ALL_TESTS="
+       ping_ipv4
+       ecn_test
+       ecn_nodrop_test
+       red_test
+       red_qevent_test
+       ecn_qevent_test
+"
+
+NUM_NETIFS=6
+CHECK_TC="yes"
+source lib.sh
+
+BACKLOG=30000
+PKTSZ=1400
+
+h1_create()
+{
+       simple_if_init $h1 192.0.2.1/28
+       mtu_set $h1 10000
+       tc qdisc replace dev $h1 root handle 1: tbf \
+          rate 10Mbit burst 10K limit 1M
+}
+
+h1_destroy()
+{
+       tc qdisc del dev $h1 root
+       mtu_restore $h1
+       simple_if_fini $h1 192.0.2.1/28
+}
+
+h2_create()
+{
+       simple_if_init $h2 192.0.2.2/28
+       mtu_set $h2 10000
+}
+
+h2_destroy()
+{
+       mtu_restore $h2
+       simple_if_fini $h2 192.0.2.2/28
+}
+
+h3_create()
+{
+       simple_if_init $h3 192.0.2.3/28
+       mtu_set $h3 10000
+}
+
+h3_destroy()
+{
+       mtu_restore $h3
+       simple_if_fini $h3 192.0.2.3/28
+}
+
+switch_create()
+{
+       ip link add dev br up type bridge
+       ip link set dev $swp1 up master br
+       ip link set dev $swp2 up master br
+       ip link set dev $swp3 up master br
+
+       mtu_set $swp1 10000
+       mtu_set $swp2 10000
+       mtu_set $swp3 10000
+
+       tc qdisc replace dev $swp3 root handle 1: tbf \
+          rate 10Mbit burst 10K limit 1M
+       ip link add name _drop_test up type dummy
+}
+
+switch_destroy()
+{
+       ip link del dev _drop_test
+       tc qdisc del dev $swp3 root
+
+       mtu_restore $h3
+       mtu_restore $h2
+       mtu_restore $h1
+
+       ip link set dev $swp3 down nomaster
+       ip link set dev $swp2 down nomaster
+       ip link set dev $swp1 down nomaster
+       ip link del dev br
+}
+
+setup_prepare()
+{
+       h1=${NETIFS[p1]}
+       swp1=${NETIFS[p2]}
+
+       h2=${NETIFS[p3]}
+       swp2=${NETIFS[p4]}
+
+       swp3=${NETIFS[p5]}
+       h3=${NETIFS[p6]}
+
+       h3_mac=$(mac_get $h3)
+
+       vrf_prepare
+
+       h1_create
+       h2_create
+       h3_create
+       switch_create
+}
+
+cleanup()
+{
+       pre_cleanup
+
+       switch_destroy
+       h3_destroy
+       h2_destroy
+       h1_destroy
+
+       vrf_cleanup
+}
+
+ping_ipv4()
+{
+       ping_test $h1 192.0.2.3 " from host 1"
+       ping_test $h2 192.0.2.3 " from host 2"
+}
+
+get_qdisc_backlog()
+{
+       qdisc_stats_get $swp3 11: .backlog
+}
+
+get_nmarked()
+{
+       qdisc_stats_get $swp3 11: .marked
+}
+
+get_qdisc_npackets()
+{
+       qdisc_stats_get $swp3 11: .packets
+}
+
+get_nmirrored()
+{
+       link_stats_get _drop_test tx packets
+}
+
+send_packets()
+{
+       local proto=$1; shift
+       local pkts=$1; shift
+
+       $MZ $h2 -p $PKTSZ -a own -b $h3_mac -A 192.0.2.2 -B 192.0.2.3 -t $proto -q -c $pkts "$@"
+}
+
+# This sends traffic in an attempt to build a backlog of $size. Returns 0 on
+# success. After 10 failed attempts it bails out and returns 1. It dumps the
+# backlog size to stdout.
+build_backlog()
+{
+       local size=$1; shift
+       local proto=$1; shift
+
+       local i=0
+
+       while :; do
+               local cur=$(get_qdisc_backlog)
+               local diff=$((size - cur))
+               local pkts=$(((diff + PKTSZ - 1) / PKTSZ))
+
+               if ((cur >= size)); then
+                       echo $cur
+                       return 0
+               elif ((i++ > 10)); then
+                       echo $cur
+                       return 1
+               fi
+
+               send_packets $proto $pkts "$@"
+               sleep 1
+       done
+}
+
+check_marking()
+{
+       local cond=$1; shift
+
+       local npackets_0=$(get_qdisc_npackets)
+       local nmarked_0=$(get_nmarked)
+       sleep 5
+       local npackets_1=$(get_qdisc_npackets)
+       local nmarked_1=$(get_nmarked)
+
+       local nmarked_d=$((nmarked_1 - nmarked_0))
+       local npackets_d=$((npackets_1 - npackets_0))
+       local pct=$((100 * nmarked_d / npackets_d))
+
+       echo $pct
+       ((pct $cond))
+}
+
+check_mirroring()
+{
+       local cond=$1; shift
+
+       local npackets_0=$(get_qdisc_npackets)
+       local nmirrored_0=$(get_nmirrored)
+       sleep 5
+       local npackets_1=$(get_qdisc_npackets)
+       local nmirrored_1=$(get_nmirrored)
+
+       local nmirrored_d=$((nmirrored_1 - nmirrored_0))
+       local npackets_d=$((npackets_1 - npackets_0))
+       local pct=$((100 * nmirrored_d / npackets_d))
+
+       echo $pct
+       ((pct $cond))
+}
+
+ecn_test_common()
+{
+       local name=$1; shift
+       local limit=$1; shift
+       local backlog
+       local pct
+
+       # Build the below-the-limit backlog using UDP. We could use TCP just
+       # fine, but this way we get a proof that UDP is accepted when queue
+       # length is below the limit. The main stream is using TCP, and if the
+       # limit is misconfigured, we would see this traffic being ECN marked.
+       RET=0
+       backlog=$(build_backlog $((2 * limit / 3)) udp)
+       check_err $? "Could not build the requested backlog"
+       pct=$(check_marking "== 0")
+       check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0."
+       log_test "$name backlog < limit"
+
+       # Now push TCP, because non-TCP traffic would be early-dropped after the
+       # backlog crosses the limit, and we want to make sure that the backlog
+       # is above the limit.
+       RET=0
+       backlog=$(build_backlog $((3 * limit / 2)) tcp tos=0x01)
+       check_err $? "Could not build the requested backlog"
+       pct=$(check_marking ">= 95")
+       check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected >= 95."
+       log_test "$name backlog > limit"
+}
+
+do_ecn_test()
+{
+       local limit=$1; shift
+       local name=ECN
+
+       $MZ $h1 -p $PKTSZ -A 192.0.2.1 -B 192.0.2.3 -c 0 \
+               -a own -b $h3_mac -t tcp -q tos=0x01 &
+       sleep 1
+
+       ecn_test_common "$name" $limit
+
+       # Up there we saw that UDP gets accepted when backlog is below the
+       # limit. Now that it is above, it should all get dropped, and backlog
+       # building should fail.
+       RET=0
+       build_backlog $((2 * limit)) udp >/dev/null
+       check_fail $? "UDP traffic went into backlog instead of being early-dropped"
+       log_test "$name backlog > limit: UDP early-dropped"
+
+       stop_traffic
+       sleep 1
+}
+
+do_ecn_nodrop_test()
+{
+       local limit=$1; shift
+       local name="ECN nodrop"
+
+       $MZ $h1 -p $PKTSZ -A 192.0.2.1 -B 192.0.2.3 -c 0 \
+               -a own -b $h3_mac -t tcp -q tos=0x01 &
+       sleep 1
+
+       ecn_test_common "$name" $limit
+
+       # Up there we saw that UDP gets accepted when backlog is below the
+       # limit. Now that it is above, in nodrop mode, make sure it goes to
+       # backlog as well.
+       RET=0
+       build_backlog $((2 * limit)) udp >/dev/null
+       check_err $? "UDP traffic was early-dropped instead of getting into backlog"
+       log_test "$name backlog > limit: UDP not dropped"
+
+       stop_traffic
+       sleep 1
+}
+
+do_red_test()
+{
+       local limit=$1; shift
+       local backlog
+       local pct
+
+       # Use ECN-capable TCP to verify there's no marking even though the queue
+       # is above limit.
+       $MZ $h1 -p $PKTSZ -A 192.0.2.1 -B 192.0.2.3 -c 0 \
+               -a own -b $h3_mac -t tcp -q tos=0x01 &
+
+       # Pushing below the queue limit should work.
+       RET=0
+       backlog=$(build_backlog $((2 * limit / 3)) tcp tos=0x01)
+       check_err $? "Could not build the requested backlog"
+       pct=$(check_marking "== 0")
+       check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0."
+       log_test "RED backlog < limit"
+
+       # Pushing above should not.
+       RET=0
+       backlog=$(build_backlog $((3 * limit / 2)) tcp tos=0x01)
+       check_fail $? "Traffic went into backlog instead of being early-dropped"
+       pct=$(check_marking "== 0")
+       check_err $? "backlog $backlog / $limit Got $pct% marked packets, expected == 0."
+       log_test "RED backlog > limit"
+
+       stop_traffic
+       sleep 1
+}
+
+do_red_qevent_test()
+{
+       local limit=$1; shift
+       local backlog
+       local base
+       local now
+       local pct
+
+       RET=0
+
+       $MZ $h1 -p $PKTSZ -A 192.0.2.1 -B 192.0.2.3 -c 0 \
+               -a own -b $h3_mac -t udp -q &
+       sleep 1
+
+       tc filter add block 10 pref 1234 handle 102 matchall skip_hw \
+          action mirred egress mirror dev _drop_test
+
+       # Push to the queue until it's at the limit. The configured limit is
+       # rounded by the qdisc, so this is the best we can do to get to the real
+       # limit.
+       build_backlog $((3 * limit / 2)) udp >/dev/null
+
+       base=$(get_nmirrored)
+       send_packets udp 100
+       sleep 1
+       now=$(get_nmirrored)
+       ((now >= base + 100))
+       check_err $? "Dropped packets not observed: 100 expected, $((now - base)) seen"
+
+       tc filter del block 10 pref 1234 handle 102 matchall
+
+       base=$(get_nmirrored)
+       send_packets udp 100
+       sleep 1
+       now=$(get_nmirrored)
+       ((now == base))
+       check_err $? "Dropped packets still observed: 0 expected, $((now - base)) seen"
+
+       log_test "RED early_dropped packets mirrored"
+
+       stop_traffic
+       sleep 1
+}
+
+do_ecn_qevent_test()
+{
+       local limit=$1; shift
+       local name=ECN
+
+       RET=0
+
+       $MZ $h1 -p $PKTSZ -A 192.0.2.1 -B 192.0.2.3 -c 0 \
+               -a own -b $h3_mac -t tcp -q tos=0x01 &
+       sleep 1
+
+       tc filter add block 10 pref 1234 handle 102 matchall skip_hw \
+          action mirred egress mirror dev _drop_test
+
+       backlog=$(build_backlog $((2 * limit / 3)) tcp tos=0x01)
+       check_err $? "Could not build the requested backlog"
+       pct=$(check_mirroring "== 0")
+       check_err $? "backlog $backlog / $limit Got $pct% mirrored packets, expected == 0."
+
+       backlog=$(build_backlog $((3 * limit / 2)) tcp tos=0x01)
+       check_err $? "Could not build the requested backlog"
+       pct=$(check_mirroring ">= 95")
+       check_err $? "backlog $backlog / $limit Got $pct% mirrored packets, expected >= 95."
+
+       tc filter del block 10 pref 1234 handle 102 matchall
+
+       log_test "ECN marked packets mirrored"
+
+       stop_traffic
+       sleep 1
+}
+
+install_qdisc()
+{
+       local -a args=("$@")
+
+       tc qdisc replace dev $swp3 parent 1:1 handle 11: red \
+          limit 1M avpkt $PKTSZ probability 1 \
+          min $BACKLOG max $((BACKLOG + 1)) burst 38 "${args[@]}"
+       sleep 1
+}
+
+uninstall_qdisc()
+{
+       tc qdisc del dev $swp3 parent 1:1
+}
+
+ecn_test()
+{
+       install_qdisc ecn
+       do_ecn_test $BACKLOG
+       uninstall_qdisc
+}
+
+ecn_nodrop_test()
+{
+       install_qdisc ecn nodrop
+       do_ecn_nodrop_test $BACKLOG
+       uninstall_qdisc
+}
+
+red_test()
+{
+       install_qdisc
+       do_red_test $BACKLOG
+       uninstall_qdisc
+}
+
+red_qevent_test()
+{
+       install_qdisc qevent early_drop block 10
+       do_red_qevent_test $BACKLOG
+       uninstall_qdisc
+}
+
+ecn_qevent_test()
+{
+       install_qdisc ecn qevent mark block 10
+       do_ecn_qevent_test $BACKLOG
+       uninstall_qdisc
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/forwarding/tc_police.sh b/tools/testing/selftests/net/forwarding/tc_police.sh
new file mode 100755 (executable)
index 0000000..160f9cc
--- /dev/null
@@ -0,0 +1,333 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Test tc-police action.
+#
+# +---------------------------------+
+# | H1 (vrf)                        |
+# |    + $h1                        |
+# |    | 192.0.2.1/24               |
+# |    |                            |
+# |    |  default via 192.0.2.2     |
+# +----|----------------------------+
+#      |
+# +----|----------------------------------------------------------------------+
+# | SW |                                                                      |
+# |    + $rp1                                                                 |
+# |        192.0.2.2/24                                                       |
+# |                                                                           |
+# |        198.51.100.2/24                           203.0.113.2/24           |
+# |    + $rp2                                    + $rp3                       |
+# |    |                                         |                            |
+# +----|-----------------------------------------|----------------------------+
+#      |                                         |
+# +----|----------------------------+       +----|----------------------------+
+# |    |  default via 198.51.100.2  |       |    |  default via 203.0.113.2   |
+# |    |                            |       |    |                            |
+# |    | 198.51.100.1/24            |       |    | 203.0.113.1/24             |
+# |    + $h2                        |       |    + $h3                        |
+# | H2 (vrf)                        |       | H3 (vrf)                        |
+# +---------------------------------+       +---------------------------------+
+
+ALL_TESTS="
+       police_rx_test
+       police_tx_test
+       police_shared_test
+       police_rx_mirror_test
+       police_tx_mirror_test
+"
+NUM_NETIFS=6
+source tc_common.sh
+source lib.sh
+
+h1_create()
+{
+       simple_if_init $h1 192.0.2.1/24
+
+       ip -4 route add default vrf v$h1 nexthop via 192.0.2.2
+}
+
+h1_destroy()
+{
+       ip -4 route del default vrf v$h1 nexthop via 192.0.2.2
+
+       simple_if_fini $h1 192.0.2.1/24
+}
+
+h2_create()
+{
+       simple_if_init $h2 198.51.100.1/24
+
+       ip -4 route add default vrf v$h2 nexthop via 198.51.100.2
+
+       tc qdisc add dev $h2 clsact
+}
+
+h2_destroy()
+{
+       tc qdisc del dev $h2 clsact
+
+       ip -4 route del default vrf v$h2 nexthop via 198.51.100.2
+
+       simple_if_fini $h2 198.51.100.1/24
+}
+
+h3_create()
+{
+       simple_if_init $h3 203.0.113.1/24
+
+       ip -4 route add default vrf v$h3 nexthop via 203.0.113.2
+
+       tc qdisc add dev $h3 clsact
+}
+
+h3_destroy()
+{
+       tc qdisc del dev $h3 clsact
+
+       ip -4 route del default vrf v$h3 nexthop via 203.0.113.2
+
+       simple_if_fini $h3 203.0.113.1/24
+}
+
+router_create()
+{
+       ip link set dev $rp1 up
+       ip link set dev $rp2 up
+       ip link set dev $rp3 up
+
+       __addr_add_del $rp1 add 192.0.2.2/24
+       __addr_add_del $rp2 add 198.51.100.2/24
+       __addr_add_del $rp3 add 203.0.113.2/24
+
+       tc qdisc add dev $rp1 clsact
+       tc qdisc add dev $rp2 clsact
+}
+
+router_destroy()
+{
+       tc qdisc del dev $rp2 clsact
+       tc qdisc del dev $rp1 clsact
+
+       __addr_add_del $rp3 del 203.0.113.2/24
+       __addr_add_del $rp2 del 198.51.100.2/24
+       __addr_add_del $rp1 del 192.0.2.2/24
+
+       ip link set dev $rp3 down
+       ip link set dev $rp2 down
+       ip link set dev $rp1 down
+}
+
+police_common_test()
+{
+       local test_name=$1; shift
+
+       RET=0
+
+       # Rule to measure bandwidth on ingress of $h2
+       tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
+               dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \
+               action drop
+
+       mausezahn $h1 -a own -b $(mac_get $rp1) -A 192.0.2.1 -B 198.51.100.1 \
+               -t udp sp=12345,dp=54321 -p 1000 -c 0 -q &
+
+       local t0=$(tc_rule_stats_get $h2 1 ingress .bytes)
+       sleep 10
+       local t1=$(tc_rule_stats_get $h2 1 ingress .bytes)
+
+       local er=$((80 * 1000 * 1000))
+       local nr=$(rate $t0 $t1 10)
+       local nr_pct=$((100 * (nr - er) / er))
+       ((-10 <= nr_pct && nr_pct <= 10))
+       check_err $? "Expected rate $(humanize $er), got $(humanize $nr), which is $nr_pct% off. Required accuracy is +-10%."
+
+       log_test "$test_name"
+
+       { kill %% && wait %%; } 2>/dev/null
+       tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
+}
+
+police_rx_test()
+{
+       # Rule to police traffic destined to $h2 on ingress of $rp1
+       tc filter add dev $rp1 ingress protocol ip pref 1 handle 101 flower \
+               dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \
+               action police rate 80mbit burst 16k conform-exceed drop/ok
+
+       police_common_test "police on rx"
+
+       tc filter del dev $rp1 ingress protocol ip pref 1 handle 101 flower
+}
+
+police_tx_test()
+{
+       # Rule to police traffic destined to $h2 on egress of $rp2
+       tc filter add dev $rp2 egress protocol ip pref 1 handle 101 flower \
+               dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \
+               action police rate 80mbit burst 16k conform-exceed drop/ok
+
+       police_common_test "police on tx"
+
+       tc filter del dev $rp2 egress protocol ip pref 1 handle 101 flower
+}
+
+police_shared_common_test()
+{
+       local dport=$1; shift
+       local test_name=$1; shift
+
+       RET=0
+
+       mausezahn $h1 -a own -b $(mac_get $rp1) -A 192.0.2.1 -B 198.51.100.1 \
+               -t udp sp=12345,dp=$dport -p 1000 -c 0 -q &
+
+       local t0=$(tc_rule_stats_get $h2 1 ingress .bytes)
+       sleep 10
+       local t1=$(tc_rule_stats_get $h2 1 ingress .bytes)
+
+       local er=$((80 * 1000 * 1000))
+       local nr=$(rate $t0 $t1 10)
+       local nr_pct=$((100 * (nr - er) / er))
+       ((-10 <= nr_pct && nr_pct <= 10))
+       check_err $? "Expected rate $(humanize $er), got $(humanize $nr), which is $nr_pct% off. Required accuracy is +-10%."
+
+       log_test "$test_name"
+
+       { kill %% && wait %%; } 2>/dev/null
+}
+
+police_shared_test()
+{
+       # Rule to measure bandwidth on ingress of $h2
+       tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
+               dst_ip 198.51.100.1 ip_proto udp src_port 12345 \
+               action drop
+
+       # Rule to police traffic destined to $h2 on ingress of $rp1
+       tc filter add dev $rp1 ingress protocol ip pref 1 handle 101 flower \
+               dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \
+               action police rate 80mbit burst 16k conform-exceed drop/ok \
+               index 10
+
+       # Rule to police a different flow destined to $h2 on egress of $rp2
+       # using same policer
+       tc filter add dev $rp2 egress protocol ip pref 1 handle 101 flower \
+               dst_ip 198.51.100.1 ip_proto udp dst_port 22222 \
+               action police index 10
+
+       police_shared_common_test 54321 "police with shared policer - rx"
+
+       police_shared_common_test 22222 "police with shared policer - tx"
+
+       tc filter del dev $rp2 egress protocol ip pref 1 handle 101 flower
+       tc filter del dev $rp1 ingress protocol ip pref 1 handle 101 flower
+       tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
+}
+
+police_mirror_common_test()
+{
+       local pol_if=$1; shift
+       local dir=$1; shift
+       local test_name=$1; shift
+
+       RET=0
+
+       # Rule to measure bandwidth on ingress of $h2
+       tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
+               dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \
+               action drop
+
+       # Rule to measure bandwidth of mirrored traffic on ingress of $h3
+       tc filter add dev $h3 ingress protocol ip pref 1 handle 101 flower \
+               dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \
+               action drop
+
+       # Rule to police traffic destined to $h2 and mirror to $h3
+       tc filter add dev $pol_if $dir protocol ip pref 1 handle 101 flower \
+               dst_ip 198.51.100.1 ip_proto udp dst_port 54321 \
+               action police rate 80mbit burst 16k conform-exceed drop/pipe \
+               action mirred egress mirror dev $rp3
+
+       mausezahn $h1 -a own -b $(mac_get $rp1) -A 192.0.2.1 -B 198.51.100.1 \
+               -t udp sp=12345,dp=54321 -p 1000 -c 0 -q &
+
+       local t0=$(tc_rule_stats_get $h2 1 ingress .bytes)
+       sleep 10
+       local t1=$(tc_rule_stats_get $h2 1 ingress .bytes)
+
+       local er=$((80 * 1000 * 1000))
+       local nr=$(rate $t0 $t1 10)
+       local nr_pct=$((100 * (nr - er) / er))
+       ((-10 <= nr_pct && nr_pct <= 10))
+       check_err $? "Expected rate $(humanize $er), got $(humanize $nr), which is $nr_pct% off. Required accuracy is +-10%."
+
+       local t0=$(tc_rule_stats_get $h3 1 ingress .bytes)
+       sleep 10
+       local t1=$(tc_rule_stats_get $h3 1 ingress .bytes)
+
+       local er=$((80 * 1000 * 1000))
+       local nr=$(rate $t0 $t1 10)
+       local nr_pct=$((100 * (nr - er) / er))
+       ((-10 <= nr_pct && nr_pct <= 10))
+       check_err $? "Expected rate $(humanize $er), got $(humanize $nr), which is $nr_pct% off. Required accuracy is +-10%."
+
+       log_test "$test_name"
+
+       { kill %% && wait %%; } 2>/dev/null
+       tc filter del dev $pol_if $dir protocol ip pref 1 handle 101 flower
+       tc filter del dev $h3 ingress protocol ip pref 1 handle 101 flower
+       tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
+}
+
+police_rx_mirror_test()
+{
+       police_mirror_common_test $rp1 ingress "police rx and mirror"
+}
+
+police_tx_mirror_test()
+{
+       police_mirror_common_test $rp2 egress "police tx and mirror"
+}
+
+setup_prepare()
+{
+       h1=${NETIFS[p1]}
+       rp1=${NETIFS[p2]}
+
+       rp2=${NETIFS[p3]}
+       h2=${NETIFS[p4]}
+
+       rp3=${NETIFS[p5]}
+       h3=${NETIFS[p6]}
+
+       vrf_prepare
+       forwarding_enable
+
+       h1_create
+       h2_create
+       h3_create
+       router_create
+}
+
+cleanup()
+{
+       pre_cleanup
+
+       router_destroy
+       h3_destroy
+       h2_destroy
+       h1_destroy
+
+       forwarding_restore
+       vrf_cleanup
+}
+
+trap cleanup EXIT
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
index f50976e..aa254ae 100644 (file)
@@ -5,7 +5,7 @@ KSFT_KHDR_INSTALL := 1
 
 CFLAGS =  -Wall -Wl,--no-as-needed -O2 -g  -I$(top_srcdir)/usr/include
 
-TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh
+TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh
 
 TEST_GEN_FILES = mptcp_connect pm_nl_ctl
 
diff --git a/tools/testing/selftests/net/mptcp/diag.sh b/tools/testing/selftests/net/mptcp/diag.sh
new file mode 100755 (executable)
index 0000000..39edce4
--- /dev/null
@@ -0,0 +1,121 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
+ns="ns1-$rndh"
+ksft_skip=4
+test_cnt=1
+ret=0
+pids=()
+
+flush_pids()
+{
+       # mptcp_connect in join mode will sleep a bit before completing,
+       # give it some time
+       sleep 1.1
+
+       for pid in ${pids[@]}; do
+               [ -d /proc/$pid ] && kill -SIGUSR1 $pid >/dev/null 2>&1
+       done
+       pids=()
+}
+
+cleanup()
+{
+       ip netns del $ns
+       for pid in ${pids[@]}; do
+               [ -d /proc/$pid ] && kill -9 $pid >/dev/null 2>&1
+       done
+}
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without ip tool"
+       exit $ksft_skip
+fi
+ss -h | grep -q MPTCP
+if [ $? -ne 0 ];then
+       echo "SKIP: ss tool does not support MPTCP"
+       exit $ksft_skip
+fi
+
+__chk_nr()
+{
+       local condition="$1"
+       local expected=$2
+       local msg nr
+
+       shift 2
+       msg=$*
+       nr=$(ss -inmHMN $ns | $condition)
+
+       printf "%-50s" "$msg"
+       if [ $nr != $expected ]; then
+               echo "[ fail ] expected $expected found $nr"
+               ret=$test_cnt
+       else
+               echo "[  ok  ]"
+       fi
+       test_cnt=$((test_cnt+1))
+}
+
+chk_msk_nr()
+{
+       __chk_nr "grep -c token:" $*
+}
+
+chk_msk_fallback_nr()
+{
+               __chk_nr "grep -c fallback" $*
+}
+
+chk_msk_remote_key_nr()
+{
+               __chk_nr "grep -c remote_key" $*
+}
+
+
+trap cleanup EXIT
+ip netns add $ns
+ip -n $ns link set dev lo up
+
+echo "a" | ip netns exec $ns ./mptcp_connect -p 10000 -l 0.0.0.0 -t 100 >/dev/null &
+sleep 0.1
+pids[0]=$!
+chk_msk_nr 0 "no msk on netns creation"
+
+echo "b" | ip netns exec $ns ./mptcp_connect -p 10000 127.0.0.1 -j -t 100 >/dev/null &
+sleep 0.1
+pids[1]=$!
+chk_msk_nr 2 "after MPC handshake "
+chk_msk_remote_key_nr 2 "....chk remote_key"
+chk_msk_fallback_nr 0 "....chk no fallback"
+flush_pids
+
+
+echo "a" | ip netns exec $ns ./mptcp_connect -p 10001 -s TCP -l 0.0.0.0 -t 100 >/dev/null &
+pids[0]=$!
+sleep 0.1
+echo "b" | ip netns exec $ns ./mptcp_connect -p 10001 127.0.0.1 -j -t 100 >/dev/null &
+pids[1]=$!
+sleep 0.1
+chk_msk_fallback_nr 1 "check fallback"
+flush_pids
+
+NR_CLIENTS=100
+for I in `seq 1 $NR_CLIENTS`; do
+       echo "a" | ip netns exec $ns ./mptcp_connect -p $((I+10001)) -l 0.0.0.0 -t 100 -w 10 >/dev/null  &
+       pids[$((I*2))]=$!
+done
+sleep 0.1
+
+for I in `seq 1 $NR_CLIENTS`; do
+       echo "b" | ip netns exec $ns ./mptcp_connect -p $((I+10001)) 127.0.0.1 -t 100 -w 10 >/dev/null &
+       pids[$((I*2 + 1))]=$!
+done
+sleep 1.5
+
+chk_msk_nr $((NR_CLIENTS*2)) "many msk socket present"
+flush_pids
+
+exit $ret
index cedee5b..cad6f73 100644 (file)
@@ -11,6 +11,7 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <strings.h>
+#include <signal.h>
 #include <unistd.h>
 
 #include <sys/poll.h>
@@ -36,6 +37,7 @@ extern int optind;
 
 static int  poll_timeout = 10 * 1000;
 static bool listen_mode;
+static bool quit;
 
 enum cfg_mode {
        CFG_MODE_POLL,
@@ -52,11 +54,12 @@ static int pf = AF_INET;
 static int cfg_sndbuf;
 static int cfg_rcvbuf;
 static bool cfg_join;
+static int cfg_wait;
 
 static void die_usage(void)
 {
        fprintf(stderr, "Usage: mptcp_connect [-6] [-u] [-s MPTCP|TCP] [-p port] [-m mode]"
-               "[-l] connect_address\n");
+               "[-l] [-w sec] connect_address\n");
        fprintf(stderr, "\t-6 use ipv6\n");
        fprintf(stderr, "\t-t num -- set poll timeout to num\n");
        fprintf(stderr, "\t-S num -- set SO_SNDBUF to num\n");
@@ -65,9 +68,15 @@ static void die_usage(void)
        fprintf(stderr, "\t-m [MPTCP|TCP] -- use tcp or mptcp sockets\n");
        fprintf(stderr, "\t-s [mmap|poll] -- use poll (default) or mmap\n");
        fprintf(stderr, "\t-u -- check mptcp ulp\n");
+       fprintf(stderr, "\t-w num -- wait num sec before closing the socket\n");
        exit(1);
 }
 
+static void handle_signal(int nr)
+{
+       quit = true;
+}
+
 static const char *getxinfo_strerr(int err)
 {
        if (err == EAI_SYSTEM)
@@ -418,8 +427,8 @@ static int copyfd_io_poll(int infd, int peerfd, int outfd)
        }
 
        /* leave some time for late join/announce */
-       if (cfg_join)
-               usleep(400000);
+       if (cfg_wait)
+               usleep(cfg_wait);
 
        close(peerfd);
        return 0;
@@ -812,11 +821,12 @@ static void parse_opts(int argc, char **argv)
 {
        int c;
 
-       while ((c = getopt(argc, argv, "6jlp:s:hut:m:S:R:")) != -1) {
+       while ((c = getopt(argc, argv, "6jlp:s:hut:m:S:R:w:")) != -1) {
                switch (c) {
                case 'j':
                        cfg_join = true;
                        cfg_mode = CFG_MODE_POLL;
+                       cfg_wait = 400000;
                        break;
                case 'l':
                        listen_mode = true;
@@ -850,6 +860,9 @@ static void parse_opts(int argc, char **argv)
                case 'R':
                        cfg_rcvbuf = parse_int(optarg);
                        break;
+               case 'w':
+                       cfg_wait = atoi(optarg)*1000000;
+                       break;
                }
        }
 
@@ -865,6 +878,7 @@ int main(int argc, char *argv[])
 {
        init_rng();
 
+       signal(SIGUSR1, handle_signal);
        parse_opts(argc, argv);
 
        if (tcpulp_audit)
index acf02e1..c0589e0 100755 (executable)
@@ -3,7 +3,7 @@
 
 time_start=$(date +%s)
 
-optstring="S:R:d:e:l:r:h4cm:"
+optstring="S:R:d:e:l:r:h4cm:f:t"
 ret=0
 sin=""
 sout=""
@@ -21,6 +21,8 @@ testmode=""
 sndbuf=0
 rcvbuf=0
 options_log=true
+do_tcp=0
+filesize=0
 
 if [ $tc_loss -eq 100 ];then
        tc_loss=1%
@@ -40,9 +42,11 @@ usage() {
        echo -e "\t-e: ethtool features to disable, e.g.: \"-e tso -e gso\" (default: randomly disable any of tso/gso/gro)"
        echo -e "\t-4: IPv4 only: disable IPv6 tests (default: test both IPv4 and IPv6)"
        echo -e "\t-c: capture packets for each test using tcpdump (default: no capture)"
+       echo -e "\t-f: size of file to transfer in bytes (default random)"
        echo -e "\t-S: set sndbuf value (default: use kernel default)"
        echo -e "\t-R: set rcvbuf value (default: use kernel default)"
        echo -e "\t-m: test mode (poll, sendfile; default: poll)"
+       echo -e "\t-t: also run tests with TCP (use twice to non-fallback tcp)"
 }
 
 while getopts "$optstring" option;do
@@ -94,6 +98,12 @@ while getopts "$optstring" option;do
        "m")
                testmode="$OPTARG"
                ;;
+       "f")
+               filesize="$OPTARG"
+               ;;
+       "t")
+               do_tcp=$((do_tcp+1))
+               ;;
        "?")
                usage $0
                exit 1
@@ -385,10 +395,14 @@ do_transfer()
                        capuser="-Z $SUDO_USER"
                fi
 
-               local capfile="${listener_ns}-${connector_ns}-${cl_proto}-${srv_proto}-${connect_addr}.pcap"
+               local capfile="${rndh}-${connector_ns:0:3}-${listener_ns:0:3}-${cl_proto}-${srv_proto}-${connect_addr}-${port}"
+               local capopt="-i any -s 65535 -B 32768 ${capuser}"
+
+               ip netns exec ${listener_ns}  tcpdump ${capopt} -w "${capfile}-listener.pcap"  >> "${capout}" 2>&1 &
+               local cappid_listener=$!
 
-               ip netns exec ${listener_ns} tcpdump -i any -s 65535 -B 32768 $capuser -w $capfile > "$capout" 2>&1 &
-               local cappid=$!
+               ip netns exec ${connector_ns} tcpdump ${capopt} -w "${capfile}-connector.pcap" >> "${capout}" 2>&1 &
+               local cappid_connector=$!
 
                sleep 1
        fi
@@ -413,7 +427,8 @@ do_transfer()
 
        if $capture; then
                sleep 1
-               kill $cappid
+               kill ${cappid_listener}
+               kill ${cappid_connector}
        fi
 
        local duration
@@ -449,20 +464,25 @@ make_file()
 {
        local name=$1
        local who=$2
+       local SIZE=$filesize
+       local ksize
+       local rem
 
-       local SIZE TSIZE
-       SIZE=$((RANDOM % (1024 * 8)))
-       TSIZE=$((SIZE * 1024))
+       if [ $SIZE -eq 0 ]; then
+               local MAXSIZE=$((1024 * 1024 * 8))
+               local MINSIZE=$((1024 * 256))
+
+               SIZE=$(((RANDOM * RANDOM + MINSIZE) % MAXSIZE))
+       fi
 
-       dd if=/dev/urandom of="$name" bs=1024 count=$SIZE 2> /dev/null
+       ksize=$((SIZE / 1024))
+       rem=$((SIZE - (ksize * 1024)))
 
-       SIZE=$((RANDOM % 1024))
-       SIZE=$((SIZE + 128))
-       TSIZE=$((TSIZE + SIZE))
-       dd if=/dev/urandom conv=notrunc of="$name" bs=1 count=$SIZE 2> /dev/null
+       dd if=/dev/urandom of="$name" bs=1024 count=$ksize 2> /dev/null
+       dd if=/dev/urandom conv=notrunc of="$name" bs=1 count=$rem 2> /dev/null
        echo -e "\nMPTCP_TEST_FILE_END_MARKER" >> "$name"
 
-       echo "Created $name (size $TSIZE) containing data sent by $who"
+       echo "Created $name (size $(du -b "$name")) containing data sent by $who"
 }
 
 run_tests_lo()
@@ -497,9 +517,11 @@ run_tests_lo()
                return 1
        fi
 
-       # don't bother testing fallback tcp except for loopback case.
-       if [ ${listener_ns} != ${connector_ns} ]; then
-               return 0
+       if [ $do_tcp -eq 0 ]; then
+               # don't bother testing fallback tcp except for loopback case.
+               if [ ${listener_ns} != ${connector_ns} ]; then
+                       return 0
+               fi
        fi
 
        do_transfer ${listener_ns} ${connector_ns} MPTCP TCP ${connect_addr} ${local_addr}
@@ -516,6 +538,15 @@ run_tests_lo()
                return 1
        fi
 
+       if [ $do_tcp -gt 1 ] ;then
+               do_transfer ${listener_ns} ${connector_ns} TCP TCP ${connect_addr} ${local_addr}
+               lret=$?
+               if [ $lret -ne 0 ]; then
+                       ret=$lret
+                       return 1
+               fi
+       fi
+
        return 0
 }
 
index 422e776..221fdec 100644 (file)
@@ -44,6 +44,7 @@ struct test_case {
        struct options sockopt;
        struct tstamps expected;
        bool enabled;
+       bool warn_on_fail;
 };
 
 struct sof_flag {
@@ -67,44 +68,44 @@ static struct socket_type socket_types[] = {
 static struct test_case test_cases[] = {
        { {}, {} },
        {
-               { so_timestamp: 1 },
-               { tstamp: true }
+               { .so_timestamp = 1 },
+               { .tstamp = true }
        },
        {
-               { so_timestampns: 1 },
-               { tstampns: true }
+               { .so_timestampns = 1 },
+               { .tstampns = true }
        },
        {
-               { so_timestamp: 1, so_timestampns: 1 },
-               { tstampns: true }
+               { .so_timestamp = 1, .so_timestampns = 1 },
+               { .tstampns = true }
        },
        {
-               { so_timestamping: SOF_TIMESTAMPING_RX_SOFTWARE },
+               { .so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE },
                {}
        },
        {
                /* Loopback device does not support hw timestamps. */
-               { so_timestamping: SOF_TIMESTAMPING_RX_HARDWARE },
+               { .so_timestamping = SOF_TIMESTAMPING_RX_HARDWARE },
                {}
        },
        {
-               { so_timestamping: SOF_TIMESTAMPING_SOFTWARE },
-               {}
+               { .so_timestamping = SOF_TIMESTAMPING_SOFTWARE },
+               .warn_on_fail = true
        },
        {
-               { so_timestamping: SOF_TIMESTAMPING_RX_SOFTWARE
+               { .so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE
                        | SOF_TIMESTAMPING_RX_HARDWARE },
                {}
        },
        {
-               { so_timestamping: SOF_TIMESTAMPING_SOFTWARE
+               { .so_timestamping = SOF_TIMESTAMPING_SOFTWARE
                        | SOF_TIMESTAMPING_RX_SOFTWARE },
-               { swtstamp: true }
+               { .swtstamp = true }
        },
        {
-               { so_timestamp: 1, so_timestamping: SOF_TIMESTAMPING_SOFTWARE
+               { .so_timestamp = 1, .so_timestamping = SOF_TIMESTAMPING_SOFTWARE
                        | SOF_TIMESTAMPING_RX_SOFTWARE },
-               { tstamp: true, swtstamp: true }
+               { .tstamp = true, .swtstamp = true }
        },
 };
 
@@ -115,6 +116,9 @@ static struct option long_options[] = {
        { "tcp", no_argument, 0, 't' },
        { "udp", no_argument, 0, 'u' },
        { "ip", no_argument, 0, 'i' },
+       { "strict", no_argument, 0, 'S' },
+       { "ipv4", no_argument, 0, '4' },
+       { "ipv6", no_argument, 0, '6' },
        { NULL, 0, NULL, 0 },
 };
 
@@ -270,37 +274,55 @@ void config_so_flags(int rcv, struct options o)
                error(1, errno, "Failed to set SO_TIMESTAMPING");
 }
 
-bool run_test_case(struct socket_type s, struct test_case t)
+bool run_test_case(struct socket_type *s, int test_num, char ip_version,
+                  bool strict)
 {
-       int port = (s.type == SOCK_RAW) ? 0 : next_port++;
+       union {
+               struct sockaddr_in6 addr6;
+               struct sockaddr_in addr4;
+               struct sockaddr addr_un;
+       } addr;
        int read_size = op_size;
-       struct sockaddr_in addr;
+       int src, dst, rcv, port;
+       socklen_t addr_size;
        bool failed = false;
-       int src, dst, rcv;
 
-       src = socket(AF_INET, s.type, s.protocol);
+       port = (s->type == SOCK_RAW) ? 0 : next_port++;
+       memset(&addr, 0, sizeof(addr));
+       if (ip_version == '4') {
+               addr.addr4.sin_family = AF_INET;
+               addr.addr4.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+               addr.addr4.sin_port = htons(port);
+               addr_size = sizeof(addr.addr4);
+               if (s->type == SOCK_RAW)
+                       read_size += 20;  /* for IPv4 header */
+       } else {
+               addr.addr6.sin6_family = AF_INET6;
+               addr.addr6.sin6_addr = in6addr_loopback;
+               addr.addr6.sin6_port = htons(port);
+               addr_size = sizeof(addr.addr6);
+       }
+       printf("Starting testcase %d over ipv%c...\n", test_num, ip_version);
+       src = socket(addr.addr_un.sa_family, s->type,
+                    s->protocol);
        if (src < 0)
                error(1, errno, "Failed to open src socket");
 
-       dst = socket(AF_INET, s.type, s.protocol);
+       dst = socket(addr.addr_un.sa_family, s->type,
+                    s->protocol);
        if (dst < 0)
                error(1, errno, "Failed to open dst socket");
 
-       memset(&addr, 0, sizeof(addr));
-       addr.sin_family = AF_INET;
-       addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
-       addr.sin_port = htons(port);
-
-       if (bind(dst, (struct sockaddr *)&addr, sizeof(addr)) < 0)
+       if (bind(dst, &addr.addr_un, addr_size) < 0)
                error(1, errno, "Failed to bind to port %d", port);
 
-       if (s.type == SOCK_STREAM && (listen(dst, 1) < 0))
+       if (s->type == SOCK_STREAM && (listen(dst, 1) < 0))
                error(1, errno, "Failed to listen");
 
-       if (connect(src, (struct sockaddr *)&addr, sizeof(addr)) < 0)
+       if (connect(src, &addr.addr_un, addr_size) < 0)
                error(1, errno, "Failed to connect");
 
-       if (s.type == SOCK_STREAM) {
+       if (s->type == SOCK_STREAM) {
                rcv = accept(dst, NULL, NULL);
                if (rcv < 0)
                        error(1, errno, "Failed to accept");
@@ -309,17 +331,22 @@ bool run_test_case(struct socket_type s, struct test_case t)
                rcv = dst;
        }
 
-       config_so_flags(rcv, t.sockopt);
+       config_so_flags(rcv, test_cases[test_num].sockopt);
        usleep(20000); /* setsockopt for SO_TIMESTAMPING is asynchronous */
        do_send(src);
 
-       if (s.type == SOCK_RAW)
-               read_size += 20;  /* for IP header */
-       failed = do_recv(rcv, read_size, t.expected);
+       failed = do_recv(rcv, read_size, test_cases[test_num].expected);
 
        close(rcv);
        close(src);
 
+       if (failed) {
+               printf("FAILURE in testcase %d over ipv%c ", test_num,
+                      ip_version);
+               print_test_case(&test_cases[test_num]);
+               if (!strict && test_cases[test_num].warn_on_fail)
+                       failed = false;
+       }
        return failed;
 }
 
@@ -327,6 +354,9 @@ int main(int argc, char **argv)
 {
        bool all_protocols = true;
        bool all_tests = true;
+       bool cfg_ipv4 = false;
+       bool cfg_ipv6 = false;
+       bool strict = false;
        int arg_index = 0;
        int failures = 0;
        int s, t;
@@ -363,6 +393,15 @@ int main(int argc, char **argv)
                        all_protocols = false;
                        socket_types[0].enabled = true;
                        break;
+               case 'S':
+                       strict = true;
+                       break;
+               case '4':
+                       cfg_ipv4 = true;
+                       break;
+               case '6':
+                       cfg_ipv6 = true;
+                       break;
                default:
                        error(1, 0, "Failed to parse parameters.");
                }
@@ -376,13 +415,14 @@ int main(int argc, char **argv)
                for (t = 0; t < ARRAY_SIZE(test_cases); t++) {
                        if (!all_tests && !test_cases[t].enabled)
                                continue;
-
-                       printf("Starting testcase %d...\n", t);
-                       if (run_test_case(socket_types[s], test_cases[t])) {
-                               failures++;
-                               printf("FAILURE in test case ");
-                               print_test_case(&test_cases[t]);
-                       }
+                       if (cfg_ipv4 || !cfg_ipv6)
+                               if (run_test_case(&socket_types[s], t, '4',
+                                                 strict))
+                                       failures++;
+                       if (cfg_ipv6 || !cfg_ipv4)
+                               if (run_test_case(&socket_types[s], t, '6',
+                                                 strict))
+                                       failures++;
                }
        }
        if (!failures)
diff --git a/tools/testing/selftests/net/rxtimestamp.sh b/tools/testing/selftests/net/rxtimestamp.sh
new file mode 100755 (executable)
index 0000000..91631e8
--- /dev/null
@@ -0,0 +1,4 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+./in_netns.sh ./rxtimestamp $@
index 383bac0..ceaad78 100644 (file)
@@ -15,8 +15,9 @@
 #include <inttypes.h>
 #include <linux/net_tstamp.h>
 #include <linux/errqueue.h>
+#include <linux/if_ether.h>
 #include <linux/ipv6.h>
-#include <linux/tcp.h>
+#include <linux/udp.h>
 #include <stdbool.h>
 #include <stdlib.h>
 #include <stdio.h>
@@ -140,8 +141,8 @@ static void do_recv_errqueue_timeout(int fdt)
 {
        char control[CMSG_SPACE(sizeof(struct sock_extended_err)) +
                     CMSG_SPACE(sizeof(struct sockaddr_in6))] = {0};
-       char data[sizeof(struct ipv6hdr) +
-                 sizeof(struct tcphdr) + 1];
+       char data[sizeof(struct ethhdr) + sizeof(struct ipv6hdr) +
+                 sizeof(struct udphdr) + 1];
        struct sock_extended_err *err;
        struct msghdr msg = {0};
        struct iovec iov = {0};
@@ -159,6 +160,8 @@ static void do_recv_errqueue_timeout(int fdt)
        msg.msg_controllen = sizeof(control);
 
        while (1) {
+               const char *reason;
+
                ret = recvmsg(fdt, &msg, MSG_ERRQUEUE);
                if (ret == -1 && errno == EAGAIN)
                        break;
@@ -176,14 +179,30 @@ static void do_recv_errqueue_timeout(int fdt)
                err = (struct sock_extended_err *)CMSG_DATA(cm);
                if (err->ee_origin != SO_EE_ORIGIN_TXTIME)
                        error(1, 0, "errqueue: origin 0x%x\n", err->ee_origin);
-               if (err->ee_code != ECANCELED)
-                       error(1, 0, "errqueue: code 0x%x\n", err->ee_code);
+
+               switch (err->ee_errno) {
+               case ECANCELED:
+                       if (err->ee_code != SO_EE_CODE_TXTIME_MISSED)
+                               error(1, 0, "errqueue: unknown ECANCELED %u\n",
+                                     err->ee_code);
+                       reason = "missed txtime";
+               break;
+               case EINVAL:
+                       if (err->ee_code != SO_EE_CODE_TXTIME_INVALID_PARAM)
+                               error(1, 0, "errqueue: unknown EINVAL %u\n",
+                                     err->ee_code);
+                       reason = "invalid txtime";
+               break;
+               default:
+                       error(1, 0, "errqueue: errno %u code %u\n",
+                             err->ee_errno, err->ee_code);
+               };
 
                tstamp = ((int64_t) err->ee_data) << 32 | err->ee_info;
                tstamp -= (int64_t) glob_tstart;
                tstamp /= 1000 * 1000;
-               fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped\n",
-                               data[ret - 1], tstamp);
+               fprintf(stderr, "send: pkt %c at %" PRId64 "ms dropped: %s\n",
+                       data[ret - 1], tstamp, reason);
 
                msg.msg_flags = 0;
                msg.msg_controllen = sizeof(control);
diff --git a/tools/testing/selftests/net/vrf_strict_mode_test.sh b/tools/testing/selftests/net/vrf_strict_mode_test.sh
new file mode 100755 (executable)
index 0000000..18b982d
--- /dev/null
@@ -0,0 +1,396 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# This test is designed for testing the new VRF strict_mode functionality.
+
+ret=0
+
+# identifies the "init" network namespace which is often called root network
+# namespace.
+INIT_NETNS_NAME="init"
+
+PAUSE_ON_FAIL=${PAUSE_ON_FAIL:=no}
+
+log_test()
+{
+       local rc=$1
+       local expected=$2
+       local msg="$3"
+
+       if [ ${rc} -eq ${expected} ]; then
+               nsuccess=$((nsuccess+1))
+               printf "\n    TEST: %-60s  [ OK ]\n" "${msg}"
+       else
+               ret=1
+               nfail=$((nfail+1))
+               printf "\n    TEST: %-60s  [FAIL]\n" "${msg}"
+               if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+                       echo
+                       echo "hit enter to continue, 'q' to quit"
+                       read a
+                       [ "$a" = "q" ] && exit 1
+               fi
+       fi
+}
+
+print_log_test_results()
+{
+       if [ "$TESTS" != "none" ]; then
+               printf "\nTests passed: %3d\n" ${nsuccess}
+               printf "Tests failed: %3d\n"   ${nfail}
+       fi
+}
+
+log_section()
+{
+       echo
+       echo "################################################################################"
+       echo "TEST SECTION: $*"
+       echo "################################################################################"
+}
+
+ip_expand_args()
+{
+       local nsname=$1
+       local nsarg=""
+
+       if [ "${nsname}" != "${INIT_NETNS_NAME}" ]; then
+               nsarg="-netns ${nsname}"
+       fi
+
+       echo "${nsarg}"
+}
+
+vrf_count()
+{
+       local nsname=$1
+       local nsarg="$(ip_expand_args ${nsname})"
+
+       ip ${nsarg} -o link show type vrf | wc -l
+}
+
+count_vrf_by_table_id()
+{
+       local nsname=$1
+       local tableid=$2
+       local nsarg="$(ip_expand_args ${nsname})"
+
+       ip ${nsarg} -d -o link show type vrf | grep "table ${tableid}" | wc -l
+}
+
+add_vrf()
+{
+       local nsname=$1
+       local vrfname=$2
+       local vrftable=$3
+       local nsarg="$(ip_expand_args ${nsname})"
+
+       ip ${nsarg} link add ${vrfname} type vrf table ${vrftable} &>/dev/null
+}
+
+add_vrf_and_check()
+{
+       local nsname=$1
+       local vrfname=$2
+       local vrftable=$3
+       local cnt
+       local rc
+
+       add_vrf ${nsname} ${vrfname} ${vrftable}; rc=$?
+
+       cnt=$(count_vrf_by_table_id ${nsname} ${vrftable})
+
+       log_test ${rc} 0 "${nsname}: add vrf ${vrfname}, ${cnt} vrfs for table ${vrftable}"
+}
+
+add_vrf_and_check_fail()
+{
+       local nsname=$1
+       local vrfname=$2
+       local vrftable=$3
+       local cnt
+       local rc
+
+       add_vrf ${nsname} ${vrfname} ${vrftable}; rc=$?
+
+       cnt=$(count_vrf_by_table_id ${nsname} ${vrftable})
+
+       log_test ${rc} 2 "${nsname}: CANNOT add vrf ${vrfname}, ${cnt} vrfs for table ${vrftable}"
+}
+
+del_vrf_and_check()
+{
+       local nsname=$1
+       local vrfname=$2
+       local nsarg="$(ip_expand_args ${nsname})"
+
+       ip ${nsarg} link del ${vrfname}
+       log_test $? 0 "${nsname}: remove vrf ${vrfname}"
+}
+
+config_vrf_and_check()
+{
+       local nsname=$1
+       local addr=$2
+       local vrfname=$3
+       local nsarg="$(ip_expand_args ${nsname})"
+
+       ip ${nsarg} link set dev ${vrfname} up && \
+               ip ${nsarg} addr add ${addr} dev ${vrfname}
+       log_test $? 0 "${nsname}: vrf ${vrfname} up, addr ${addr}"
+}
+
+read_strict_mode()
+{
+       local nsname=$1
+       local rval
+       local rc=0
+       local nsexec=""
+
+       if [ "${nsname}" != "${INIT_NETNS_NAME}" ]; then
+               # a custom network namespace is provided
+               nsexec="ip netns exec ${nsname}"
+       fi
+
+       rval="$(${nsexec} bash -c "cat /proc/sys/net/vrf/strict_mode" | \
+               grep -E "^[0-1]$")" &> /dev/null
+       if [ $? -ne 0 ]; then
+               # set errors
+               rval=255
+               rc=1
+       fi
+
+       # on success, rval can be only 0 or 1; on error, rval is equal to 255
+       echo ${rval}
+       return ${rc}
+}
+
+read_strict_mode_compare_and_check()
+{
+       local nsname=$1
+       local expected=$2
+       local res
+
+       res="$(read_strict_mode ${nsname})"
+       log_test ${res} ${expected} "${nsname}: check strict_mode=${res}"
+}
+
+set_strict_mode()
+{
+       local nsname=$1
+       local val=$2
+       local nsexec=""
+
+       if [ "${nsname}" != "${INIT_NETNS_NAME}" ]; then
+               # a custom network namespace is provided
+               nsexec="ip netns exec ${nsname}"
+       fi
+
+       ${nsexec} bash -c "echo ${val} >/proc/sys/net/vrf/strict_mode" &>/dev/null
+}
+
+enable_strict_mode()
+{
+       local nsname=$1
+
+       set_strict_mode ${nsname} 1
+}
+
+disable_strict_mode()
+{
+       local nsname=$1
+
+       set_strict_mode ${nsname} 0
+}
+
+disable_strict_mode_and_check()
+{
+       local nsname=$1
+
+       disable_strict_mode ${nsname}
+       log_test $? 0 "${nsname}: disable strict_mode (=0)"
+}
+
+enable_strict_mode_and_check()
+{
+       local nsname=$1
+
+       enable_strict_mode ${nsname}
+       log_test $? 0 "${nsname}: enable strict_mode (=1)"
+}
+
+enable_strict_mode_and_check_fail()
+{
+       local nsname=$1
+
+       enable_strict_mode ${nsname}
+       log_test $? 1 "${nsname}: CANNOT enable strict_mode"
+}
+
+strict_mode_check_default()
+{
+       local nsname=$1
+       local strictmode
+       local vrfcnt
+
+       vrfcnt=$(vrf_count ${nsname})
+       strictmode=$(read_strict_mode ${nsname})
+       log_test ${strictmode} 0 "${nsname}: strict_mode=0 by default, ${vrfcnt} vrfs"
+}
+
+setup()
+{
+       modprobe vrf
+
+       ip netns add testns
+       ip netns exec testns ip link set lo up
+}
+
+cleanup()
+{
+       ip netns del testns 2>/dev/null
+
+       ip link del vrf100 2>/dev/null
+       ip link del vrf101 2>/dev/null
+       ip link del vrf102 2>/dev/null
+
+       echo 0 >/proc/sys/net/vrf/strict_mode 2>/dev/null
+}
+
+vrf_strict_mode_tests_init()
+{
+       vrf_strict_mode_check_support init
+
+       strict_mode_check_default init
+
+       add_vrf_and_check init vrf100 100
+       config_vrf_and_check init 172.16.100.1/24 vrf100
+
+       enable_strict_mode_and_check init
+
+       add_vrf_and_check_fail init vrf101 100
+
+       disable_strict_mode_and_check init
+
+       add_vrf_and_check init vrf101 100
+       config_vrf_and_check init 172.16.101.1/24 vrf101
+
+       enable_strict_mode_and_check_fail init
+
+       del_vrf_and_check init vrf101
+
+       enable_strict_mode_and_check init
+
+       add_vrf_and_check init vrf102 102
+       config_vrf_and_check init 172.16.102.1/24 vrf102
+
+       # the strict_modle is enabled in the init
+}
+
+vrf_strict_mode_tests_testns()
+{
+       vrf_strict_mode_check_support testns
+
+       strict_mode_check_default testns
+
+       enable_strict_mode_and_check testns
+
+       add_vrf_and_check testns vrf100 100
+       config_vrf_and_check testns 10.0.100.1/24 vrf100
+
+       add_vrf_and_check_fail testns vrf101 100
+
+       add_vrf_and_check_fail testns vrf102 100
+
+       add_vrf_and_check testns vrf200 200
+
+       disable_strict_mode_and_check testns
+
+       add_vrf_and_check testns vrf101 100
+
+       add_vrf_and_check testns vrf102 100
+
+       #the strict_mode is disabled in the testns
+}
+
+vrf_strict_mode_tests_mix()
+{
+       read_strict_mode_compare_and_check init 1
+
+       read_strict_mode_compare_and_check testns 0
+
+       del_vrf_and_check testns vrf101
+
+       del_vrf_and_check testns vrf102
+
+       disable_strict_mode_and_check init
+
+       enable_strict_mode_and_check testns
+
+       enable_strict_mode_and_check init
+       enable_strict_mode_and_check init
+
+       disable_strict_mode_and_check testns
+       disable_strict_mode_and_check testns
+
+       read_strict_mode_compare_and_check init 1
+
+       read_strict_mode_compare_and_check testns 0
+}
+
+vrf_strict_mode_tests()
+{
+       log_section "VRF strict_mode test on init network namespace"
+       vrf_strict_mode_tests_init
+
+       log_section "VRF strict_mode test on testns network namespace"
+       vrf_strict_mode_tests_testns
+
+       log_section "VRF strict_mode test mixing init and testns network namespaces"
+       vrf_strict_mode_tests_mix
+}
+
+vrf_strict_mode_check_support()
+{
+       local nsname=$1
+       local output
+       local rc
+
+       output="$(lsmod | grep '^vrf' | awk '{print $1}')"
+       if [ -z "${output}" ]; then
+               modinfo vrf || return $?
+       fi
+
+       # we do not care about the value of the strict_mode; we only check if
+       # the strict_mode parameter is available or not.
+       read_strict_mode ${nsname} &>/dev/null; rc=$?
+       log_test ${rc} 0 "${nsname}: net.vrf.strict_mode is available"
+
+       return ${rc}
+}
+
+if [ "$(id -u)" -ne 0 ];then
+       echo "SKIP: Need root privileges"
+       exit 0
+fi
+
+if [ ! -x "$(command -v ip)" ]; then
+       echo "SKIP: Could not run test without ip tool"
+       exit 0
+fi
+
+modprobe vrf &>/dev/null
+if [ ! -e /proc/sys/net/vrf/strict_mode ]; then
+       echo "SKIP: vrf sysctl does not exist"
+       exit 0
+fi
+
+cleanup &> /dev/null
+
+setup
+vrf_strict_mode_tests
+cleanup
+
+print_log_test_results
+
+exit $ret
index 9c0f758..a179f0d 100644 (file)
@@ -3,7 +3,7 @@
 
 TEST_PROGS := nft_trans_stress.sh nft_nat.sh bridge_brouter.sh \
        conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \
-       nft_concat_range.sh \
+       nft_concat_range.sh nft_conntrack_helper.sh \
        nft_queue.sh
 
 LDLIBS = -lmnl
diff --git a/tools/testing/selftests/netfilter/nft_conntrack_helper.sh b/tools/testing/selftests/netfilter/nft_conntrack_helper.sh
new file mode 100755 (executable)
index 0000000..edf0a48
--- /dev/null
@@ -0,0 +1,175 @@
+#!/bin/bash
+#
+# This tests connection tracking helper assignment:
+# 1. can attach ftp helper to a connection from nft ruleset.
+# 2. auto-assign still works.
+#
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+sfx=$(mktemp -u "XXXXXXXX")
+ns1="ns1-$sfx"
+ns2="ns2-$sfx"
+testipv6=1
+
+cleanup()
+{
+       ip netns del ${ns1}
+       ip netns del ${ns2}
+}
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without nft tool"
+       exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without ip tool"
+       exit $ksft_skip
+fi
+
+conntrack -V > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without conntrack tool"
+       exit $ksft_skip
+fi
+
+which nc >/dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without netcat tool"
+       exit $ksft_skip
+fi
+
+trap cleanup EXIT
+
+ip netns add ${ns1}
+ip netns add ${ns2}
+
+ip link add veth0 netns ${ns1} type veth peer name veth0 netns ${ns2} > /dev/null 2>&1
+if [ $? -ne 0 ];then
+    echo "SKIP: No virtual ethernet pair device support in kernel"
+    exit $ksft_skip
+fi
+
+ip -net ${ns1} link set lo up
+ip -net ${ns1} link set veth0 up
+
+ip -net ${ns2} link set lo up
+ip -net ${ns2} link set veth0 up
+
+ip -net ${ns1} addr add 10.0.1.1/24 dev veth0
+ip -net ${ns1} addr add dead:1::1/64 dev veth0
+
+ip -net ${ns2} addr add 10.0.1.2/24 dev veth0
+ip -net ${ns2} addr add dead:1::2/64 dev veth0
+
+load_ruleset_family() {
+       local family=$1
+       local ns=$2
+
+ip netns exec ${ns} nft -f - <<EOF
+table $family raw {
+       ct helper ftp {
+             type "ftp" protocol tcp
+        }
+       chain pre {
+               type filter hook prerouting priority 0; policy accept;
+               tcp dport 2121 ct helper set "ftp"
+       }
+       chain output {
+               type filter hook output priority 0; policy accept;
+               tcp dport 2121 ct helper set "ftp"
+       }
+}
+EOF
+       return $?
+}
+
+check_for_helper()
+{
+       local netns=$1
+       local message=$2
+       local port=$3
+
+       ip netns exec ${netns} conntrack -L -p tcp --dport $port 2> /dev/null |grep -q 'helper=ftp'
+       if [ $? -ne 0 ] ; then
+               echo "FAIL: ${netns} did not show attached helper $message" 1>&2
+               ret=1
+       fi
+
+       echo "PASS: ${netns} connection on port $port has ftp helper attached" 1>&2
+       return 0
+}
+
+test_helper()
+{
+       local port=$1
+       local msg=$2
+
+       sleep 3 | ip netns exec ${ns2} nc -w 2 -l -p $port > /dev/null &
+
+       sleep 1
+       sleep 1 | ip netns exec ${ns1} nc -w 2 10.0.1.2 $port > /dev/null &
+
+       check_for_helper "$ns1" "ip $msg" $port
+       check_for_helper "$ns2" "ip $msg" $port
+
+       wait
+
+       if [ $testipv6 -eq 0 ] ;then
+               return 0
+       fi
+
+       ip netns exec ${ns1} conntrack -F 2> /dev/null
+       ip netns exec ${ns2} conntrack -F 2> /dev/null
+
+       sleep 3 | ip netns exec ${ns2} nc -w 2 -6 -l -p $port > /dev/null &
+
+       sleep 1
+       sleep 1 | ip netns exec ${ns1} nc -w 2 -6 dead:1::2 $port > /dev/null &
+
+       check_for_helper "$ns1" "ipv6 $msg" $port
+       check_for_helper "$ns2" "ipv6 $msg" $port
+
+       wait
+}
+
+load_ruleset_family ip ${ns1}
+if [ $? -ne 0 ];then
+       echo "FAIL: ${ns1} cannot load ip ruleset" 1>&2
+       exit 1
+fi
+
+load_ruleset_family ip6 ${ns1}
+if [ $? -ne 0 ];then
+       echo "SKIP: ${ns1} cannot load ip6 ruleset" 1>&2
+       testipv6=0
+fi
+
+load_ruleset_family inet ${ns2}
+if [ $? -ne 0 ];then
+       echo "SKIP: ${ns1} cannot load inet ruleset" 1>&2
+       load_ruleset_family ip ${ns2}
+       if [ $? -ne 0 ];then
+               echo "FAIL: ${ns2} cannot load ip ruleset" 1>&2
+               exit 1
+       fi
+
+       if [ $testipv6 -eq 1 ] ;then
+               load_ruleset_family ip6 ${ns2}
+               if [ $? -ne 0 ];then
+                       echo "FAIL: ${ns2} cannot load ip6 ruleset" 1>&2
+                       exit 1
+               fi
+       fi
+fi
+
+test_helper 2121 "set via ruleset"
+ip netns exec ${ns1} sysctl -q 'net.netfilter.nf_conntrack_helper=1'
+ip netns exec ${ns2} sysctl -q 'net.netfilter.nf_conntrack_helper=1'
+test_helper 21 "auto-assign"
+
+exit $ret
index c1921a5..8d728ed 100644 (file)
@@ -95,4 +95,9 @@ static inline int sys_pidfd_getfd(int pidfd, int fd, int flags)
        return syscall(__NR_pidfd_getfd, pidfd, fd, flags);
 }
 
+static inline int sys_memfd_create(const char *name, unsigned int flags)
+{
+       return syscall(__NR_memfd_create, name, flags);
+}
+
 #endif /* __PIDFD_H */
index 401a7c1..84b65ec 100644 (file)
@@ -34,11 +34,6 @@ static int sys_kcmp(pid_t pid1, pid_t pid2, int type, unsigned long idx1,
        return syscall(__NR_kcmp, pid1, pid2, type, idx1, idx2);
 }
 
-static int sys_memfd_create(const char *name, unsigned int flags)
-{
-       return syscall(__NR_memfd_create, name, flags);
-}
-
 static int __child(int sk, int memfd)
 {
        int ret;
index 133ec5b..9418108 100644 (file)
@@ -470,4 +470,16 @@ TEST_F(current_nsset, no_foul_play)
        }
 }
 
+TEST(setns_einval)
+{
+       int fd;
+
+       fd = sys_memfd_create("rostock", 0);
+       EXPECT_GT(fd, 0);
+
+       ASSERT_NE(setns(fd, 0), 0);
+       EXPECT_EQ(errno, EINVAL);
+       close(fd);
+}
+
 TEST_HARNESS_MAIN
index ca35dd8..af3df79 100644 (file)
@@ -7,7 +7,7 @@ noarg:
 # The EBB handler is 64-bit code and everything links against it
 CFLAGS += -m64
 
-TMPOUT = $(OUTPUT)/
+TMPOUT = $(OUTPUT)/TMPDIR/
 # Toolchains may build PIE by default which breaks the assembly
 no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
         $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie)
index da7a9dd..f7911aa 100644 (file)
@@ -35,6 +35,8 @@
 #define CLOCK_INVALID -1
 #endif
 
+#define NSEC_PER_SEC 1000000000LL
+
 /* clock_adjtime is not available in GLIBC < 2.14 */
 #if !__GLIBC_PREREQ(2, 14)
 #include <sys/syscall.h>
@@ -132,6 +134,8 @@ static void usage(char *progname)
                "            1 - external time stamp\n"
                "            2 - periodic output\n"
                " -p val     enable output with a period of 'val' nanoseconds\n"
+               " -H val     set output phase to 'val' nanoseconds (requires -p)\n"
+               " -w val     set output pulse width to 'val' nanoseconds (requires -p)\n"
                " -P val     enable or disable (val=1|0) the system clock PPS\n"
                " -s         set the ptp clock time from the system time\n"
                " -S         set the system time from the ptp clock time\n"
@@ -169,7 +173,6 @@ int main(int argc, char *argv[])
        int list_pins = 0;
        int pct_offset = 0;
        int n_samples = 0;
-       int perout = -1;
        int pin_index = -1, pin_func;
        int pps = -1;
        int seconds = 0;
@@ -177,10 +180,13 @@ int main(int argc, char *argv[])
 
        int64_t t1, t2, tp;
        int64_t interval, offset;
+       int64_t perout_phase = -1;
+       int64_t pulsewidth = -1;
+       int64_t perout = -1;
 
        progname = strrchr(argv[0], '/');
        progname = progname ? 1+progname : argv[0];
-       while (EOF != (c = getopt(argc, argv, "cd:e:f:ghi:k:lL:p:P:sSt:T:z"))) {
+       while (EOF != (c = getopt(argc, argv, "cd:e:f:ghH:i:k:lL:p:P:sSt:T:w:z"))) {
                switch (c) {
                case 'c':
                        capabilities = 1;
@@ -197,6 +203,9 @@ int main(int argc, char *argv[])
                case 'g':
                        gettime = 1;
                        break;
+               case 'H':
+                       perout_phase = atoll(optarg);
+                       break;
                case 'i':
                        index = atoi(optarg);
                        break;
@@ -215,7 +224,7 @@ int main(int argc, char *argv[])
                        }
                        break;
                case 'p':
-                       perout = atoi(optarg);
+                       perout = atoll(optarg);
                        break;
                case 'P':
                        pps = atoi(optarg);
@@ -233,6 +242,9 @@ int main(int argc, char *argv[])
                        settime = 3;
                        seconds = atoi(optarg);
                        break;
+               case 'w':
+                       pulsewidth = atoi(optarg);
+                       break;
                case 'z':
                        flagtest = 1;
                        break;
@@ -391,6 +403,16 @@ int main(int argc, char *argv[])
                }
        }
 
+       if (pulsewidth >= 0 && perout < 0) {
+               puts("-w can only be specified together with -p");
+               return -1;
+       }
+
+       if (perout_phase >= 0 && perout < 0) {
+               puts("-H can only be specified together with -p");
+               return -1;
+       }
+
        if (perout >= 0) {
                if (clock_gettime(clkid, &ts)) {
                        perror("clock_gettime");
@@ -398,11 +420,24 @@ int main(int argc, char *argv[])
                }
                memset(&perout_request, 0, sizeof(perout_request));
                perout_request.index = index;
-               perout_request.start.sec = ts.tv_sec + 2;
-               perout_request.start.nsec = 0;
-               perout_request.period.sec = 0;
-               perout_request.period.nsec = perout;
-               if (ioctl(fd, PTP_PEROUT_REQUEST, &perout_request)) {
+               perout_request.period.sec = perout / NSEC_PER_SEC;
+               perout_request.period.nsec = perout % NSEC_PER_SEC;
+               perout_request.flags = 0;
+               if (pulsewidth >= 0) {
+                       perout_request.flags |= PTP_PEROUT_DUTY_CYCLE;
+                       perout_request.on.sec = pulsewidth / NSEC_PER_SEC;
+                       perout_request.on.nsec = pulsewidth % NSEC_PER_SEC;
+               }
+               if (perout_phase >= 0) {
+                       perout_request.flags |= PTP_PEROUT_PHASE;
+                       perout_request.phase.sec = perout_phase / NSEC_PER_SEC;
+                       perout_request.phase.nsec = perout_phase % NSEC_PER_SEC;
+               } else {
+                       perout_request.start.sec = ts.tv_sec + 2;
+                       perout_request.start.nsec = 0;
+               }
+
+               if (ioctl(fd, PTP_PEROUT_REQUEST2, &perout_request)) {
                        perror("PTP_PEROUT_REQUEST");
                } else {
                        puts("periodic output request okay");
index c0aa46c..252140a 100644 (file)
@@ -1615,6 +1615,7 @@ TEST_F(TRACE_poke, getpid_runs_normally)
 # define ARCH_REGS     s390_regs
 # define SYSCALL_NUM   gprs[2]
 # define SYSCALL_RET   gprs[2]
+# define SYSCALL_NUM_RET_SHARE_REG
 #elif defined(__mips__)
 # define ARCH_REGS     struct pt_regs
 # define SYSCALL_NUM   regs[2]
@@ -1,11 +1,11 @@
 # SPDX-License-Identifier: GPL-2.0
 
-APIDIR := ../../../../include/uapi
+top_srcdir = $(abspath ../../../..)
+APIDIR := $(top_scrdir)/include/uapi
 TEST_GEN_FILES = action.o
 
-top_srcdir = ../../../../..
 KSFT_KHDR_INSTALL := 1
-include ../../lib.mk
+include ../lib.mk
 
 CLANG ?= clang
 LLC   ?= llc
@@ -28,3 +28,6 @@ $(OUTPUT)/%.o: %.c
        $(CLANG) $(CLANG_FLAGS) \
                 -O2 -target bpf -emit-llvm -c $< -o - |      \
        $(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
+
+TEST_PROGS += ./tdc.sh
+TEST_FILES := tdc*.py Tdc*.py plugins plugin-lib tc-tests
index 47a3082..503982b 100644 (file)
                 255
             ]
         ],
-        "cmdUnderTest": "$TC action add action bpf bytecode '4,40 0 0 12,21 0 1 2054,6 0 0 262144,6 0 0 0' index 4294967296 cookie 12345",
+        "cmdUnderTest": "$TC action add action bpf bytecode '4,40 0 0 12,21 0 1 2054,6 0 0 262144,6 0 0 0' index 4294967296 cookie 123456",
         "expExitCode": "255",
         "verifyCmd": "$TC action ls action bpf",
-        "matchPattern": "action order [0-9]*: bpf bytecode '4,40 0 0 12,21 0 1 2048,6 0 0 262144,6 0 0 0' default-action pipe.*cookie 12345",
+        "matchPattern": "action order [0-9]*: bpf bytecode '4,40 0 0 12,21 0 1 2048,6 0 0 262144,6 0 0 0' default-action pipe.*cookie 123456",
         "matchCount": "0",
         "teardown": [
             "$TC action flush action bpf"
index 88ec134..072febf 100644 (file)
                 255
             ]
         ],
-        "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum tcp continue index \\$i cookie aaabbbcccdddeee \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"",
+        "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum tcp continue index \\$i cookie 123456789abcde \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"",
         "expExitCode": "0",
         "verifyCmd": "$TC actions ls action csum",
         "matchPattern": "^[ \t]+index [0-9]* ref",
                 1,
                 255
             ],
-            "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum tcp continue index \\$i cookie aaabbbcccdddeee \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\""
+            "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum tcp continue index \\$i cookie 123456789abcde \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\""
         ],
         "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action csum index \\$i \\\"; args=\"\\$args\\$cmd\"; done && $TC actions del \\$args\"",
         "expExitCode": "0",
index fbeb919..d063469 100644 (file)
         "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022 index 1",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action tunnel_key index 1",
-        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022.*index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:80:00880022.*index 1",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action tunnel_key"
         "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022,0408:42:0040007611223344,0111:02:1020304011223344 index 1",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action tunnel_key index 1",
-        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022,0408:42:0040007611223344,0111:02:1020304011223344.*index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:80:00880022,0408:42:0040007611223344,0111:02:1020304011223344.*index 1",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action tunnel_key"
         "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 824212:80:00880022 index 1",
         "expExitCode": "255",
         "verifyCmd": "$TC actions get action tunnel_key index 1",
-        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 824212:80:00880022.*index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 824212:80:00880022.*index 1",
         "matchCount": "0",
         "teardown": [
             "$TC actions flush action tunnel_key"
         "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:4224:00880022 index 1",
         "expExitCode": "255",
         "verifyCmd": "$TC actions get action tunnel_key index 1",
-        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:4224:00880022.*index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:4224:00880022.*index 1",
         "matchCount": "0",
         "teardown": [
             "$TC actions flush action tunnel_key"
         "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:4288 index 1",
         "expExitCode": "255",
         "verifyCmd": "$TC actions get action tunnel_key index 1",
-        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:4288.*index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:80:4288.*index 1",
         "matchCount": "0",
         "teardown": [
             "$TC actions flush action tunnel_key"
         "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:4288428822 index 1",
         "expExitCode": "255",
         "verifyCmd": "$TC actions get action tunnel_key index 1",
-        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:4288428822.*index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:80:4288428822.*index 1",
         "matchCount": "0",
         "teardown": [
             "$TC actions flush action tunnel_key"
         "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 1.1.1.1 dst_ip 2.2.2.2 id 42 dst_port 6081 geneve_opts 0102:80:00880022,0408:42: index 1",
         "expExitCode": "255",
         "verifyCmd": "$TC actions get action tunnel_key index 1",
-        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt 0102:80:00880022,0408:42:.*index 1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 1.1.1.1.*dst_ip 2.2.2.2.*key_id 42.*dst_port 6081.*geneve_opt[s]? 0102:80:00880022,0408:42:.*index 1",
         "matchCount": "0",
         "teardown": [
             "$TC actions flush action tunnel_key"
                 1,
                 255
             ],
-            "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 index 1 cookie aabbccddeeff112233445566778800a"
+            "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 index 1 cookie 123456"
         ],
-        "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 11.11.11.1 dst_ip 21.21.21.2 dst_port 3129 id 11 csum reclassify index 1 cookie a1b1c1d1",
+        "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 11.11.11.1 dst_ip 21.21.21.2 dst_port 3129 id 11 csum reclassify index 1 cookie 123456",
         "expExitCode": "0",
         "verifyCmd": "$TC actions get action tunnel_key index 1",
-        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 11.11.11.1.*dst_ip 21.21.21.2.*key_id 11.*dst_port 3129.*csum reclassify.*index 1.*cookie a1b1c1d1",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 11.11.11.1.*dst_ip 21.21.21.2.*key_id 11.*dst_port 3129.*csum reclassify.*index 1.*cookie 123456",
         "matchCount": "1",
         "teardown": [
             "$TC actions flush action tunnel_key"
diff --git a/tools/testing/selftests/tc-testing/tdc.sh b/tools/testing/selftests/tc-testing/tdc.sh
new file mode 100755 (executable)
index 0000000..7fe38c7
--- /dev/null
@@ -0,0 +1,5 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+./tdc.py -c actions --nobuildebpf
+./tdc.py -c qdisc
index 080709c..cd4a27e 100644 (file)
@@ -24,7 +24,7 @@ NAMES = {
           # Name of the namespace to use
           'NS': 'tcut',
           # Directory containing eBPF test programs
-          'EBPFDIR': './bpf'
+          'EBPFDIR': './'
         }
 
 
index 6630627..3e5ff29 100755 (executable)
@@ -1,15 +1,10 @@
-#!/bin/bash
+#!/bin/sh
 # SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
 
 # Kselftest framework requirement - SKIP code is 4.
 ksft_skip=4
 
-[ -f /dev/tpm0 ] || exit $ksft_skip
+[ -e /dev/tpm0 ] || exit $ksft_skip
 
-python -m unittest -v tpm2_tests.SmokeTest
-python -m unittest -v tpm2_tests.AsyncTest
-
-CLEAR_CMD=$(which tpm2_clear)
-if [ -n $CLEAR_CMD ]; then
-       tpm2_clear -T device
-fi
+python3 -m unittest -v tpm2_tests.SmokeTest
+python3 -m unittest -v tpm2_tests.AsyncTest
index 36c9d03..04c47b1 100755 (executable)
@@ -1,9 +1,9 @@
-#!/bin/bash
+#!/bin/sh
 # SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
 
 # Kselftest framework requirement - SKIP code is 4.
 ksft_skip=4
 
-[ -f /dev/tpmrm0 ] || exit $ksft_skip
+[ -e /dev/tpmrm0 ] || exit $ksft_skip
 
-python -m unittest -v tpm2_tests.SpaceTest
+python3 -m unittest -v tpm2_tests.SpaceTest
index d0fcb66..f34486c 100644 (file)
@@ -247,14 +247,14 @@ class ProtocolError(Exception):
 class AuthCommand(object):
     """TPMS_AUTH_COMMAND"""
 
-    def __init__(self, session_handle=TPM2_RS_PW, nonce='', session_attributes=0,
-                 hmac=''):
+    def __init__(self, session_handle=TPM2_RS_PW, nonce=bytes(),
+                 session_attributes=0, hmac=bytes()):
         self.session_handle = session_handle
         self.nonce = nonce
         self.session_attributes = session_attributes
         self.hmac = hmac
 
-    def __str__(self):
+    def __bytes__(self):
         fmt = '>I H%us B H%us' % (len(self.nonce), len(self.hmac))
         return struct.pack(fmt, self.session_handle, len(self.nonce),
                            self.nonce, self.session_attributes, len(self.hmac),
@@ -268,11 +268,11 @@ class AuthCommand(object):
 class SensitiveCreate(object):
     """TPMS_SENSITIVE_CREATE"""
 
-    def __init__(self, user_auth='', data=''):
+    def __init__(self, user_auth=bytes(), data=bytes()):
         self.user_auth = user_auth
         self.data = data
 
-    def __str__(self):
+    def __bytes__(self):
         fmt = '>H%us H%us' % (len(self.user_auth), len(self.data))
         return struct.pack(fmt, len(self.user_auth), self.user_auth,
                            len(self.data), self.data)
@@ -296,8 +296,9 @@ class Public(object):
         return '>HHIH%us%usH%us' % \
             (len(self.auth_policy), len(self.parameters), len(self.unique))
 
-    def __init__(self, object_type, name_alg, object_attributes, auth_policy='',
-                 parameters='', unique=''):
+    def __init__(self, object_type, name_alg, object_attributes,
+                 auth_policy=bytes(), parameters=bytes(),
+                 unique=bytes()):
         self.object_type = object_type
         self.name_alg = name_alg
         self.object_attributes = object_attributes
@@ -305,7 +306,7 @@ class Public(object):
         self.parameters = parameters
         self.unique = unique
 
-    def __str__(self):
+    def __bytes__(self):
         return struct.pack(self.__fmt(),
                            self.object_type,
                            self.name_alg,
@@ -343,7 +344,7 @@ def get_algorithm(name):
 
 def hex_dump(d):
     d = [format(ord(x), '02x') for x in d]
-    d = [d[i: i + 16] for i in xrange(0, len(d), 16)]
+    d = [d[i: i + 16] for i in range(0, len(d), 16)]
     d = [' '.join(x) for x in d]
     d = os.linesep.join(d)
 
@@ -401,7 +402,7 @@ class Client:
         pcrsel_len = max((i >> 3) + 1, 3)
         pcrsel = [0] * pcrsel_len
         pcrsel[i >> 3] = 1 << (i & 7)
-        pcrsel = ''.join(map(chr, pcrsel))
+        pcrsel = ''.join(map(chr, pcrsel)).encode()
 
         fmt = '>HII IHB%us' % (pcrsel_len)
         cmd = struct.pack(fmt,
@@ -443,7 +444,7 @@ class Client:
             TPM2_CC_PCR_EXTEND,
             i,
             len(auth_cmd),
-            str(auth_cmd),
+            bytes(auth_cmd),
             1, bank_alg, dig)
 
         self.send_cmd(cmd)
@@ -457,7 +458,7 @@ class Client:
                           TPM2_RH_NULL,
                           TPM2_RH_NULL,
                           16,
-                          '\0' * 16,
+                          ('\0' * 16).encode(),
                           0,
                           session_type,
                           TPM2_ALG_NULL,
@@ -472,7 +473,7 @@ class Client:
 
         for i in pcrs:
             pcr = self.read_pcr(i, bank_alg)
-            if pcr == None:
+            if pcr is None:
                 return None
             x += pcr
 
@@ -489,7 +490,7 @@ class Client:
         pcrsel = [0] * pcrsel_len
         for i in pcrs:
             pcrsel[i >> 3] |= 1 << (i & 7)
-        pcrsel = ''.join(map(chr, pcrsel))
+        pcrsel = ''.join(map(chr, pcrsel)).encode()
 
         fmt = '>HII IH%usIHB3s' % ds
         cmd = struct.pack(fmt,
@@ -497,7 +498,8 @@ class Client:
                           struct.calcsize(fmt),
                           TPM2_CC_POLICY_PCR,
                           handle,
-                          len(dig), str(dig),
+                          len(dig),
+                          bytes(dig),
                           1,
                           bank_alg,
                           pcrsel_len, pcrsel)
@@ -534,7 +536,7 @@ class Client:
 
         self.send_cmd(cmd)
 
-    def create_root_key(self, auth_value = ''):
+    def create_root_key(self, auth_value = bytes()):
         attributes = \
             Public.FIXED_TPM | \
             Public.FIXED_PARENT | \
@@ -570,11 +572,11 @@ class Client:
             TPM2_CC_CREATE_PRIMARY,
             TPM2_RH_OWNER,
             len(auth_cmd),
-            str(auth_cmd),
+            bytes(auth_cmd),
             len(sensitive),
-            str(sensitive),
+            bytes(sensitive),
             len(public),
-            str(public),
+            bytes(public),
             0, 0)
 
         return struct.unpack('>I', self.send_cmd(cmd)[10:14])[0]
@@ -587,7 +589,7 @@ class Client:
         attributes = 0
         if not policy_dig:
             attributes |= Public.USER_WITH_AUTH
-            policy_dig = ''
+            policy_dig = bytes()
 
         auth_cmd =  AuthCommand()
         sensitive = SensitiveCreate(user_auth=auth_value, data=data)
@@ -608,11 +610,11 @@ class Client:
             TPM2_CC_CREATE,
             parent_key,
             len(auth_cmd),
-            str(auth_cmd),
+            bytes(auth_cmd),
             len(sensitive),
-            str(sensitive),
+            bytes(sensitive),
             len(public),
-            str(public),
+            bytes(public),
             0, 0)
 
         rsp = self.send_cmd(cmd)
@@ -635,7 +637,7 @@ class Client:
             TPM2_CC_LOAD,
             parent_key,
             len(auth_cmd),
-            str(auth_cmd),
+            bytes(auth_cmd),
             blob)
 
         data_handle = struct.unpack('>I', self.send_cmd(cmd)[10:14])[0]
@@ -653,7 +655,7 @@ class Client:
             TPM2_CC_UNSEAL,
             data_handle,
             len(auth_cmd),
-            str(auth_cmd))
+            bytes(auth_cmd))
 
         try:
             rsp = self.send_cmd(cmd)
@@ -675,7 +677,7 @@ class Client:
             TPM2_CC_DICTIONARY_ATTACK_LOCK_RESET,
             TPM2_RH_LOCKOUT,
             len(auth_cmd),
-            str(auth_cmd))
+            bytes(auth_cmd))
 
         self.send_cmd(cmd)
 
@@ -693,7 +695,7 @@ class Client:
         more_data, cap, cnt = struct.unpack('>BII', rsp[:9])
         rsp = rsp[9:]
 
-        for i in xrange(0, cnt):
+        for i in range(0, cnt):
             handle = struct.unpack('>I', rsp[:4])[0]
             handles.append(handle)
             rsp = rsp[4:]
index 728be7c..9d76430 100644 (file)
@@ -20,8 +20,8 @@ class SmokeTest(unittest.TestCase):
         self.client.close()
 
     def test_seal_with_auth(self):
-        data = 'X' * 64
-        auth = 'A' * 15
+        data = ('X' * 64).encode()
+        auth = ('A' * 15).encode()
 
         blob = self.client.seal(self.root_key, data, auth, None)
         result = self.client.unseal(self.root_key, blob, auth, None)
@@ -30,8 +30,8 @@ class SmokeTest(unittest.TestCase):
     def test_seal_with_policy(self):
         handle = self.client.start_auth_session(tpm2.TPM2_SE_TRIAL)
 
-        data = 'X' * 64
-        auth = 'A' * 15
+        data = ('X' * 64).encode()
+        auth = ('A' * 15).encode()
         pcrs = [16]
 
         try:
@@ -58,14 +58,15 @@ class SmokeTest(unittest.TestCase):
         self.assertEqual(data, result)
 
     def test_unseal_with_wrong_auth(self):
-        data = 'X' * 64
-        auth = 'A' * 20
+        data = ('X' * 64).encode()
+        auth = ('A' * 20).encode()
         rc = 0
 
         blob = self.client.seal(self.root_key, data, auth, None)
         try:
-            result = self.client.unseal(self.root_key, blob, auth[:-1] + 'B', None)
-        except ProtocolError, e:
+            result = self.client.unseal(self.root_key, blob,
+                        auth[:-1] + 'B'.encode(), None)
+        except ProtocolError as e:
             rc = e.rc
 
         self.assertEqual(rc, tpm2.TPM2_RC_AUTH_FAIL)
@@ -73,8 +74,8 @@ class SmokeTest(unittest.TestCase):
     def test_unseal_with_wrong_policy(self):
         handle = self.client.start_auth_session(tpm2.TPM2_SE_TRIAL)
 
-        data = 'X' * 64
-        auth = 'A' * 17
+        data = ('X' * 64).encode()
+        auth = ('A' * 17).encode()
         pcrs = [16]
 
         try:
@@ -91,7 +92,7 @@ class SmokeTest(unittest.TestCase):
         # This should succeed.
 
         ds = tpm2.get_digest_size(tpm2.TPM2_ALG_SHA1)
-        self.client.extend_pcr(1, 'X' * ds)
+        self.client.extend_pcr(1, ('X' * ds).encode())
 
         handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY)
 
@@ -108,7 +109,7 @@ class SmokeTest(unittest.TestCase):
 
         # Then, extend a PCR that is part of the policy and try to unseal.
         # This should fail.
-        self.client.extend_pcr(16, 'X' * ds)
+        self.client.extend_pcr(16, ('X' * ds).encode())
 
         handle = self.client.start_auth_session(tpm2.TPM2_SE_POLICY)
 
@@ -119,7 +120,7 @@ class SmokeTest(unittest.TestCase):
             self.client.policy_password(handle)
 
             result = self.client.unseal(self.root_key, blob, auth, handle)
-        except ProtocolError, e:
+        except ProtocolError as e:
             rc = e.rc
             self.client.flush_context(handle)
         except:
@@ -130,13 +131,13 @@ class SmokeTest(unittest.TestCase):
 
     def test_seal_with_too_long_auth(self):
         ds = tpm2.get_digest_size(tpm2.TPM2_ALG_SHA1)
-        data = 'X' * 64
-        auth = 'A' * (ds + 1)
+        data = ('X' * 64).encode()
+        auth = ('A' * (ds + 1)).encode()
 
         rc = 0
         try:
             blob = self.client.seal(self.root_key, data, auth, None)
-        except ProtocolError, e:
+        except ProtocolError as e:
             rc = e.rc
 
         self.assertEqual(rc, tpm2.TPM2_RC_SIZE)
@@ -152,7 +153,7 @@ class SmokeTest(unittest.TestCase):
                               0xDEADBEEF)
 
             self.client.send_cmd(cmd)
-        except IOError, e:
+        except IOError as e:
             rejected = True
         except:
             pass
@@ -212,7 +213,7 @@ class SmokeTest(unittest.TestCase):
             self.client.tpm.write(cmd)
             rsp = self.client.tpm.read()
 
-        except IOError, e:
+        except IOError as e:
             # read the response
             rsp = self.client.tpm.read()
             rejected = True
@@ -283,7 +284,7 @@ class SpaceTest(unittest.TestCase):
         rc = 0
         try:
             space1.send_cmd(cmd)
-        except ProtocolError, e:
+        except ProtocolError as e:
             rc = e.rc
 
         self.assertEqual(rc, tpm2.TPM2_RC_COMMAND_CODE |
index 17a1f53..d77f482 100755 (executable)
@@ -587,9 +587,20 @@ ip0 link set wg0 up
 kill $ncat_pid
 ip0 link del wg0
 
+# Ensure there aren't circular reference loops
+ip1 link add wg1 type wireguard
+ip2 link add wg2 type wireguard
+ip1 link set wg1 netns $netns2
+ip2 link set wg2 netns $netns1
+pp ip netns delete $netns1
+pp ip netns delete $netns2
+pp ip netns add $netns1
+pp ip netns add $netns2
+
+sleep 2 # Wait for cleanup and grace periods
 declare -A objects
 while read -t 0.1 -r line 2>/dev/null || [[ $? -ne 142 ]]; do
-       [[ $line =~ .*(wg[0-9]+:\ [A-Z][a-z]+\ [0-9]+)\ .*(created|destroyed).* ]] || continue
+       [[ $line =~ .*(wg[0-9]+:\ [A-Z][a-z]+\ ?[0-9]*)\ .*(created|destroyed).* ]] || continue
        objects["${BASH_REMATCH[1]}"]+="${BASH_REMATCH[2]}"
 done < /dev/kmsg
 alldeleted=1
index 5f16821..d2796ea 100644 (file)
@@ -70,10 +70,10 @@ all_64: $(BINARIES_64)
 
 EXTRA_CLEAN := $(BINARIES_32) $(BINARIES_64)
 
-$(BINARIES_32): $(OUTPUT)/%_32: %.c
+$(BINARIES_32): $(OUTPUT)/%_32: %.c helpers.h
        $(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl -lm
 
-$(BINARIES_64): $(OUTPUT)/%_64: %.c
+$(BINARIES_64): $(OUTPUT)/%_64: %.c helpers.h
        $(CC) -m64 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl
 
 # x86_64 users should be encouraged to install 32-bit libraries
diff --git a/tools/testing/selftests/x86/helpers.h b/tools/testing/selftests/x86/helpers.h
new file mode 100644 (file)
index 0000000..f5ff2a2
--- /dev/null
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __SELFTESTS_X86_HELPERS_H
+#define __SELFTESTS_X86_HELPERS_H
+
+#include <asm/processor-flags.h>
+
+static inline unsigned long get_eflags(void)
+{
+       unsigned long eflags;
+
+       asm volatile (
+#ifdef __x86_64__
+               "subq $128, %%rsp\n\t"
+               "pushfq\n\t"
+               "popq %0\n\t"
+               "addq $128, %%rsp"
+#else
+               "pushfl\n\t"
+               "popl %0"
+#endif
+               : "=r" (eflags) :: "memory");
+
+       return eflags;
+}
+
+static inline void set_eflags(unsigned long eflags)
+{
+       asm volatile (
+#ifdef __x86_64__
+               "subq $128, %%rsp\n\t"
+               "pushq %0\n\t"
+               "popfq\n\t"
+               "addq $128, %%rsp"
+#else
+               "pushl %0\n\t"
+               "popfl"
+#endif
+               :: "r" (eflags) : "flags", "memory");
+}
+
+#endif /* __SELFTESTS_X86_HELPERS_H */
index 1063328..120ac74 100644 (file)
@@ -31,6 +31,8 @@
 #include <sys/ptrace.h>
 #include <sys/user.h>
 
+#include "helpers.h"
+
 static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
                       int flags)
 {
@@ -67,21 +69,6 @@ static unsigned char altstack_data[SIGSTKSZ];
 # define INT80_CLOBBERS
 #endif
 
-static unsigned long get_eflags(void)
-{
-       unsigned long eflags;
-       asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags));
-       return eflags;
-}
-
-static void set_eflags(unsigned long eflags)
-{
-       asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH
-                     : : "rm" (eflags) : "flags");
-}
-
-#define X86_EFLAGS_TF (1UL << 8)
-
 static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
 {
        ucontext_t *ctx = (ucontext_t*)ctx_void;
index bc0ecc2..5b7abeb 100644 (file)
 #include <setjmp.h>
 #include <errno.h>
 
-#ifdef __x86_64__
-# define WIDTH "q"
-#else
-# define WIDTH "l"
-#endif
+#include "helpers.h"
 
 /* Our sigaltstack scratch space. */
 static unsigned char altstack_data[SIGSTKSZ];
 
-static unsigned long get_eflags(void)
-{
-       unsigned long eflags;
-       asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags));
-       return eflags;
-}
-
-static void set_eflags(unsigned long eflags)
-{
-       asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH
-                     : : "rm" (eflags) : "flags");
-}
-
-#define X86_EFLAGS_TF (1UL << 8)
-
 static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
                       int flags)
 {
index 02309a1..a108b80 100644 (file)
 #include <signal.h>
 #include <err.h>
 #include <sys/syscall.h>
-#include <asm/processor-flags.h>
 
-#ifdef __x86_64__
-# define WIDTH "q"
-#else
-# define WIDTH "l"
-#endif
+#include "helpers.h"
 
 static unsigned int nerrs;
 
-static unsigned long get_eflags(void)
-{
-       unsigned long eflags;
-       asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags));
-       return eflags;
-}
-
-static void set_eflags(unsigned long eflags)
-{
-       asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH
-                     : : "rm" (eflags) : "flags");
-}
-
 static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
                       int flags)
 {
@@ -59,6 +41,7 @@ static void do_it(unsigned long extraflags)
        set_eflags(get_eflags() | extraflags);
        syscall(SYS_getpid);
        flags = get_eflags();
+       set_eflags(X86_EFLAGS_IF | X86_EFLAGS_FIXED);
        if ((flags & extraflags) == extraflags) {
                printf("[OK]\tThe syscall worked and flags are still set\n");
        } else {
@@ -73,6 +56,12 @@ int main(void)
        printf("[RUN]\tSet NT and issue a syscall\n");
        do_it(X86_EFLAGS_NT);
 
+       printf("[RUN]\tSet AC and issue a syscall\n");
+       do_it(X86_EFLAGS_AC);
+
+       printf("[RUN]\tSet NT|AC and issue a syscall\n");
+       do_it(X86_EFLAGS_NT | X86_EFLAGS_AC);
+
        /*
         * Now try it again with TF set -- TF forces returns via IRET in all
         * cases except non-ptregs-using 64-bit full fast path syscalls.
@@ -80,8 +69,28 @@ int main(void)
 
        sethandler(SIGTRAP, sigtrap, 0);
 
+       printf("[RUN]\tSet TF and issue a syscall\n");
+       do_it(X86_EFLAGS_TF);
+
        printf("[RUN]\tSet NT|TF and issue a syscall\n");
        do_it(X86_EFLAGS_NT | X86_EFLAGS_TF);
 
+       printf("[RUN]\tSet AC|TF and issue a syscall\n");
+       do_it(X86_EFLAGS_AC | X86_EFLAGS_TF);
+
+       printf("[RUN]\tSet NT|AC|TF and issue a syscall\n");
+       do_it(X86_EFLAGS_NT | X86_EFLAGS_AC | X86_EFLAGS_TF);
+
+       /*
+        * Now try DF.  This is evil and it's plausible that we will crash
+        * glibc, but glibc would have to do something rather surprising
+        * for this to happen.
+        */
+       printf("[RUN]\tSet DF and issue a syscall\n");
+       do_it(X86_EFLAGS_DF);
+
+       printf("[RUN]\tSet TF|DF and issue a syscall\n");
+       do_it(X86_EFLAGS_TF | X86_EFLAGS_DF);
+
        return nerrs == 0 ? 0 : 1;
 }
index a4f4d4c..c41f24b 100644 (file)
@@ -20,6 +20,8 @@
 #include <setjmp.h>
 #include <sys/uio.h>
 
+#include "helpers.h"
+
 #ifdef __x86_64__
 # define VSYS(x) (x)
 #else
@@ -493,21 +495,8 @@ static int test_process_vm_readv(void)
 }
 
 #ifdef __x86_64__
-#define X86_EFLAGS_TF (1UL << 8)
 static volatile sig_atomic_t num_vsyscall_traps;
 
-static unsigned long get_eflags(void)
-{
-       unsigned long eflags;
-       asm volatile ("pushfq\n\tpopq %0" : "=rm" (eflags));
-       return eflags;
-}
-
-static void set_eflags(unsigned long eflags)
-{
-       asm volatile ("pushq %0\n\tpopfq" : : "rm" (eflags) : "flags");
-}
-
 static void sigtrap(int sig, siginfo_t *info, void *ctx_void)
 {
        ucontext_t *ctx = (ucontext_t *)ctx_void;
index 0075ccd..4c311e1 100644 (file)
@@ -11,6 +11,8 @@
 #include <features.h>
 #include <stdio.h>
 
+#include "helpers.h"
+
 #if defined(__GLIBC__) && __GLIBC__ == 2 && __GLIBC_MINOR__ < 16
 
 int main()
@@ -53,27 +55,6 @@ static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
                err(1, "sigaction");
 }
 
-#ifdef __x86_64__
-# define WIDTH "q"
-#else
-# define WIDTH "l"
-#endif
-
-static unsigned long get_eflags(void)
-{
-       unsigned long eflags;
-       asm volatile ("pushf" WIDTH "\n\tpop" WIDTH " %0" : "=rm" (eflags));
-       return eflags;
-}
-
-static void set_eflags(unsigned long eflags)
-{
-       asm volatile ("push" WIDTH " %0\n\tpopf" WIDTH
-                     : : "rm" (eflags) : "flags");
-}
-
-#define X86_EFLAGS_TF (1UL << 8)
-
 static volatile sig_atomic_t nerrs;
 static unsigned long sysinfo;
 static bool got_sysinfo = false;
index 6683b4a..caab980 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <linux/compiler.h>
 #include <linux/types.h>
+#include <linux/list.h>
 #include <linux/printk.h>
 #include <linux/bug.h>
 #include <errno.h>
@@ -135,10 +136,4 @@ static inline void free_page(unsigned long addr)
        (void) (&_min1 == &_min2);              \
        _min1 < _min2 ? _min1 : _min2; })
 
-/* TODO: empty stubs for now. Broken but enough for virtio_ring.c */
-#define list_add_tail(a, b) do {} while (0)
-#define list_del(a) do {} while (0)
-#define list_for_each_entry(a, b, c) while (0)
-/* end of stubs */
-
 #endif /* KERNEL_H */
index b751350..5d90254 100644 (file)
@@ -11,12 +11,11 @@ struct device {
 struct virtio_device {
        struct device dev;
        u64 features;
+       struct list_head vqs;
 };
 
 struct virtqueue {
-       /* TODO: commented as list macros are empty stubs for now.
-        * Broken but enough for virtio_ring.c
-        * struct list_head list; */
+       struct list_head list;
        void (*callback)(struct virtqueue *vq);
        const char *name;
        struct virtio_device *vdev;
index b427def..cb3f29c 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 #define _GNU_SOURCE
 #include <getopt.h>
+#include <limits.h>
 #include <string.h>
 #include <poll.h>
 #include <sys/eventfd.h>
@@ -18,6 +19,8 @@
 #include <linux/virtio_ring.h>
 #include "../../drivers/vhost/test.h"
 
+#define RANDOM_BATCH -1
+
 /* Unused */
 void *__kmalloc_fake, *__kfree_ignore_start, *__kfree_ignore_end;
 
@@ -43,6 +46,10 @@ struct vdev_info {
        struct vhost_memory *mem;
 };
 
+static const struct vhost_vring_file no_backend = { .fd = -1 },
+                                    backend = { .fd = 1 };
+static const struct vhost_vring_state null_state = {};
+
 bool vq_notify(struct virtqueue *vq)
 {
        struct vq_info *info = vq->priv;
@@ -88,6 +95,19 @@ void vhost_vq_setup(struct vdev_info *dev, struct vq_info *info)
        assert(r >= 0);
 }
 
+static void vq_reset(struct vq_info *info, int num, struct virtio_device *vdev)
+{
+       if (info->vq)
+               vring_del_virtqueue(info->vq);
+
+       memset(info->ring, 0, vring_size(num, 4096));
+       vring_init(&info->vring, num, info->ring, 4096);
+       info->vq = __vring_new_virtqueue(info->idx, info->vring, vdev, true,
+                                        false, vq_notify, vq_callback, "test");
+       assert(info->vq);
+       info->vq->priv = info;
+}
+
 static void vq_info_add(struct vdev_info *dev, int num)
 {
        struct vq_info *info = &dev->vqs[dev->nvqs];
@@ -97,14 +117,7 @@ static void vq_info_add(struct vdev_info *dev, int num)
        info->call = eventfd(0, EFD_NONBLOCK);
        r = posix_memalign(&info->ring, 4096, vring_size(num, 4096));
        assert(r >= 0);
-       memset(info->ring, 0, vring_size(num, 4096));
-       vring_init(&info->vring, num, info->ring, 4096);
-       info->vq = vring_new_virtqueue(info->idx,
-                                      info->vring.num, 4096, &dev->vdev,
-                                      true, false, info->ring,
-                                      vq_notify, vq_callback, "test");
-       assert(info->vq);
-       info->vq->priv = info;
+       vq_reset(info, num, &dev->vdev);
        vhost_vq_setup(dev, info);
        dev->fds[info->idx].fd = info->call;
        dev->fds[info->idx].events = POLLIN;
@@ -116,6 +129,7 @@ static void vdev_info_init(struct vdev_info* dev, unsigned long long features)
        int r;
        memset(dev, 0, sizeof *dev);
        dev->vdev.features = features;
+       INIT_LIST_HEAD(&dev->vdev.vqs);
        dev->buf_size = 1024;
        dev->buf = malloc(dev->buf_size);
        assert(dev->buf);
@@ -152,41 +166,93 @@ static void wait_for_interrupt(struct vdev_info *dev)
 }
 
 static void run_test(struct vdev_info *dev, struct vq_info *vq,
-                    bool delayed, int bufs)
+                    bool delayed, int batch, int reset_n, int bufs)
 {
        struct scatterlist sl;
-       long started = 0, completed = 0;
-       long completed_before;
+       long started = 0, completed = 0, next_reset = reset_n;
+       long completed_before, started_before;
        int r, test = 1;
        unsigned len;
        long long spurious = 0;
+       const bool random_batch = batch == RANDOM_BATCH;
+
        r = ioctl(dev->control, VHOST_TEST_RUN, &test);
        assert(r >= 0);
+       if (!reset_n) {
+               next_reset = INT_MAX;
+       }
+
        for (;;) {
                virtqueue_disable_cb(vq->vq);
                completed_before = completed;
+               started_before = started;
                do {
-                       if (started < bufs) {
+                       const bool reset = completed > next_reset;
+                       if (random_batch)
+                               batch = (random() % vq->vring.num) + 1;
+
+                       while (started < bufs &&
+                              (started - completed) < batch) {
                                sg_init_one(&sl, dev->buf, dev->buf_size);
                                r = virtqueue_add_outbuf(vq->vq, &sl, 1,
                                                         dev->buf + started,
                                                         GFP_ATOMIC);
-                               if (likely(r == 0)) {
-                                       ++started;
-                                       if (unlikely(!virtqueue_kick(vq->vq)))
+                               if (unlikely(r != 0)) {
+                                       if (r == -ENOSPC &&
+                                           started > started_before)
+                                               r = 0;
+                                       else
                                                r = -1;
+                                       break;
                                }
-                       } else
+
+                               ++started;
+
+                               if (unlikely(!virtqueue_kick(vq->vq))) {
+                                       r = -1;
+                                       break;
+                               }
+                       }
+
+                       if (started >= bufs)
                                r = -1;
 
+                       if (reset) {
+                               r = ioctl(dev->control, VHOST_TEST_SET_BACKEND,
+                                         &no_backend);
+                               assert(!r);
+                       }
+
                        /* Flush out completed bufs if any */
-                       if (virtqueue_get_buf(vq->vq, &len)) {
+                       while (virtqueue_get_buf(vq->vq, &len)) {
                                ++completed;
                                r = 0;
                        }
 
+                       if (reset) {
+                               struct vhost_vring_state s = { .index = 0 };
+
+                               vq_reset(vq, vq->vring.num, &dev->vdev);
+
+                               r = ioctl(dev->control, VHOST_GET_VRING_BASE,
+                                         &s);
+                               assert(!r);
+
+                               s.num = 0;
+                               r = ioctl(dev->control, VHOST_SET_VRING_BASE,
+                                         &null_state);
+                               assert(!r);
+
+                               r = ioctl(dev->control, VHOST_TEST_SET_BACKEND,
+                                         &backend);
+                               assert(!r);
+
+                               started = completed;
+                               while (completed > next_reset)
+                                       next_reset += completed;
+                       }
                } while (r == 0);
-               if (completed == completed_before)
+               if (completed == completed_before && started == started_before)
                        ++spurious;
                assert(completed <= bufs);
                assert(started <= bufs);
@@ -203,7 +269,9 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
        test = 0;
        r = ioctl(dev->control, VHOST_TEST_RUN, &test);
        assert(r >= 0);
-       fprintf(stderr, "spurious wakeups: 0x%llx\n", spurious);
+       fprintf(stderr,
+               "spurious wakeups: 0x%llx started=0x%lx completed=0x%lx\n",
+               spurious, started, completed);
 }
 
 const char optstring[] = "h";
@@ -245,6 +313,16 @@ const struct option longopts[] = {
                .val = 'd',
        },
        {
+               .name = "batch",
+               .val = 'b',
+               .has_arg = required_argument,
+       },
+       {
+               .name = "reset",
+               .val = 'r',
+               .has_arg = optional_argument,
+       },
+       {
        }
 };
 
@@ -255,6 +333,8 @@ static void help(void)
                " [--no-event-idx]"
                " [--no-virtio-1]"
                " [--delayed-interrupt]"
+               " [--batch=random/N]"
+               " [--reset=N]"
                "\n");
 }
 
@@ -263,6 +343,7 @@ int main(int argc, char **argv)
        struct vdev_info dev;
        unsigned long long features = (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
                (1ULL << VIRTIO_RING_F_EVENT_IDX) | (1ULL << VIRTIO_F_VERSION_1);
+       long batch = 1, reset = 0;
        int o;
        bool delayed = false;
 
@@ -289,6 +370,24 @@ int main(int argc, char **argv)
                case 'D':
                        delayed = true;
                        break;
+               case 'b':
+                       if (0 == strcmp(optarg, "random")) {
+                               batch = RANDOM_BATCH;
+                       } else {
+                               batch = strtol(optarg, NULL, 10);
+                               assert(batch > 0);
+                               assert(batch < (long)INT_MAX + 1);
+                       }
+                       break;
+               case 'r':
+                       if (!optarg) {
+                               reset = 1;
+                       } else {
+                               reset = strtol(optarg, NULL, 10);
+                               assert(reset > 0);
+                               assert(reset < (long)INT_MAX + 1);
+                       }
+                       break;
                default:
                        assert(0);
                        break;
@@ -298,6 +397,6 @@ int main(int argc, char **argv)
 done:
        vdev_info_init(&dev, features);
        vq_info_add(&dev, 256);
-       run_test(&dev, &dev.vqs[0], delayed, 0x100000);
+       run_test(&dev, &dev.vqs[0], delayed, batch, reset, 0x100000);
        return 0;
 }
index 2936534..fa87b58 100644 (file)
@@ -307,6 +307,7 @@ static int parallel_test(u64 features,
                close(to_host[0]);
 
                gvdev.vdev.features = features;
+               INIT_LIST_HEAD(&gvdev.vdev.vqs);
                gvdev.to_host_fd = to_host[1];
                gvdev.notifies = 0;
 
@@ -453,6 +454,7 @@ int main(int argc, char *argv[])
 
        getrange = getrange_iov;
        vdev.features = 0;
+       INIT_LIST_HEAD(&vdev.vqs);
 
        while (argv[1]) {
                if (strcmp(argv[1], "--indirect") == 0)
index a852af5..0a68c9d 100644 (file)
@@ -3350,7 +3350,8 @@ static long kvm_vcpu_compat_ioctl(struct file *filp,
                        if (kvm_sigmask.len != sizeof(compat_sigset_t))
                                goto out;
                        r = -EFAULT;
-                       if (get_compat_sigset(&sigset, (void *)sigmask_arg->sigset))
+                       if (get_compat_sigset(&sigset,
+                                             (compat_sigset_t __user *)sigmask_arg->sigset))
                                goto out;
                        r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
                } else